1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com 4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> 5 */ 6 7 #include <linux/kernel.h> 8 #include <linux/module.h> 9 #include <linux/delay.h> 10 #include <linux/dmaengine.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/dmapool.h> 13 #include <linux/err.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/list.h> 17 #include <linux/platform_device.h> 18 #include <linux/slab.h> 19 #include <linux/spinlock.h> 20 #include <linux/sys_soc.h> 21 #include <linux/of.h> 22 #include <linux/of_dma.h> 23 #include <linux/of_device.h> 24 #include <linux/of_irq.h> 25 #include <linux/workqueue.h> 26 #include <linux/completion.h> 27 #include <linux/soc/ti/k3-ringacc.h> 28 #include <linux/soc/ti/ti_sci_protocol.h> 29 #include <linux/soc/ti/ti_sci_inta_msi.h> 30 #include <linux/dma/k3-event-router.h> 31 #include <linux/dma/ti-cppi5.h> 32 33 #include "../virt-dma.h" 34 #include "k3-udma.h" 35 #include "k3-psil-priv.h" 36 37 struct udma_static_tr { 38 u8 elsize; /* RPSTR0 */ 39 u16 elcnt; /* RPSTR0 */ 40 u16 bstcnt; /* RPSTR1 */ 41 }; 42 43 #define K3_UDMA_MAX_RFLOWS 1024 44 #define K3_UDMA_DEFAULT_RING_SIZE 16 45 46 /* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */ 47 #define UDMA_RFLOW_SRCTAG_NONE 0 48 #define UDMA_RFLOW_SRCTAG_CFG_TAG 1 49 #define UDMA_RFLOW_SRCTAG_FLOW_ID 2 50 #define UDMA_RFLOW_SRCTAG_SRC_TAG 4 51 52 #define UDMA_RFLOW_DSTTAG_NONE 0 53 #define UDMA_RFLOW_DSTTAG_CFG_TAG 1 54 #define UDMA_RFLOW_DSTTAG_FLOW_ID 2 55 #define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4 56 #define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5 57 58 struct udma_chan; 59 60 enum k3_dma_type { 61 DMA_TYPE_UDMA = 0, 62 DMA_TYPE_BCDMA, 63 DMA_TYPE_PKTDMA, 64 }; 65 66 enum udma_mmr { 67 MMR_GCFG = 0, 68 MMR_BCHANRT, 69 MMR_RCHANRT, 70 MMR_TCHANRT, 71 MMR_LAST, 72 }; 73 74 static const char * const mmr_names[] = { 75 [MMR_GCFG] = "gcfg", 76 [MMR_BCHANRT] = "bchanrt", 77 [MMR_RCHANRT] = "rchanrt", 78 [MMR_TCHANRT] = "tchanrt", 79 }; 80 81 struct udma_tchan { 82 void __iomem *reg_rt; 83 84 int id; 85 struct k3_ring *t_ring; /* Transmit ring */ 86 struct k3_ring *tc_ring; /* Transmit Completion ring */ 87 int tflow_id; /* applicable only for PKTDMA */ 88 89 }; 90 91 #define udma_bchan udma_tchan 92 93 struct udma_rflow { 94 int id; 95 struct k3_ring *fd_ring; /* Free Descriptor ring */ 96 struct k3_ring *r_ring; /* Receive ring */ 97 }; 98 99 struct udma_rchan { 100 void __iomem *reg_rt; 101 102 int id; 103 }; 104 105 struct udma_oes_offsets { 106 /* K3 UDMA Output Event Offset */ 107 u32 udma_rchan; 108 109 /* BCDMA Output Event Offsets */ 110 u32 bcdma_bchan_data; 111 u32 bcdma_bchan_ring; 112 u32 bcdma_tchan_data; 113 u32 bcdma_tchan_ring; 114 u32 bcdma_rchan_data; 115 u32 bcdma_rchan_ring; 116 117 /* PKTDMA Output Event Offsets */ 118 u32 pktdma_tchan_flow; 119 u32 pktdma_rchan_flow; 120 }; 121 122 #define UDMA_FLAG_PDMA_ACC32 BIT(0) 123 #define UDMA_FLAG_PDMA_BURST BIT(1) 124 #define UDMA_FLAG_TDTYPE BIT(2) 125 #define UDMA_FLAG_BURST_SIZE BIT(3) 126 #define UDMA_FLAGS_J7_CLASS (UDMA_FLAG_PDMA_ACC32 | \ 127 UDMA_FLAG_PDMA_BURST | \ 128 UDMA_FLAG_TDTYPE | \ 129 UDMA_FLAG_BURST_SIZE) 130 131 struct udma_match_data { 132 enum k3_dma_type type; 133 u32 psil_base; 134 bool enable_memcpy_support; 135 u32 flags; 136 u32 statictr_z_mask; 137 u8 burst_size[3]; 138 struct udma_soc_data *soc_data; 139 }; 140 141 struct udma_soc_data { 142 struct udma_oes_offsets oes; 143 u32 bcdma_trigger_event_offset; 144 }; 145 146 struct udma_hwdesc { 147 size_t cppi5_desc_size; 148 void *cppi5_desc_vaddr; 149 dma_addr_t cppi5_desc_paddr; 150 151 /* TR descriptor internal pointers */ 152 void *tr_req_base; 153 struct cppi5_tr_resp_t *tr_resp_base; 154 }; 155 156 struct udma_rx_flush { 157 struct udma_hwdesc hwdescs[2]; 158 159 size_t buffer_size; 160 void *buffer_vaddr; 161 dma_addr_t buffer_paddr; 162 }; 163 164 struct udma_tpl { 165 u8 levels; 166 u32 start_idx[3]; 167 }; 168 169 struct udma_dev { 170 struct dma_device ddev; 171 struct device *dev; 172 void __iomem *mmrs[MMR_LAST]; 173 const struct udma_match_data *match_data; 174 const struct udma_soc_data *soc_data; 175 176 struct udma_tpl bchan_tpl; 177 struct udma_tpl tchan_tpl; 178 struct udma_tpl rchan_tpl; 179 180 size_t desc_align; /* alignment to use for descriptors */ 181 182 struct udma_tisci_rm tisci_rm; 183 184 struct k3_ringacc *ringacc; 185 186 struct work_struct purge_work; 187 struct list_head desc_to_purge; 188 spinlock_t lock; 189 190 struct udma_rx_flush rx_flush; 191 192 int bchan_cnt; 193 int tchan_cnt; 194 int echan_cnt; 195 int rchan_cnt; 196 int rflow_cnt; 197 int tflow_cnt; 198 unsigned long *bchan_map; 199 unsigned long *tchan_map; 200 unsigned long *rchan_map; 201 unsigned long *rflow_gp_map; 202 unsigned long *rflow_gp_map_allocated; 203 unsigned long *rflow_in_use; 204 unsigned long *tflow_map; 205 206 struct udma_bchan *bchans; 207 struct udma_tchan *tchans; 208 struct udma_rchan *rchans; 209 struct udma_rflow *rflows; 210 211 struct udma_chan *channels; 212 u32 psil_base; 213 u32 atype; 214 u32 asel; 215 }; 216 217 struct udma_desc { 218 struct virt_dma_desc vd; 219 220 bool terminated; 221 222 enum dma_transfer_direction dir; 223 224 struct udma_static_tr static_tr; 225 u32 residue; 226 227 unsigned int sglen; 228 unsigned int desc_idx; /* Only used for cyclic in packet mode */ 229 unsigned int tr_idx; 230 231 u32 metadata_size; 232 void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */ 233 234 unsigned int hwdesc_count; 235 struct udma_hwdesc hwdesc[]; 236 }; 237 238 enum udma_chan_state { 239 UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */ 240 UDMA_CHAN_IS_ACTIVE, /* Normal operation */ 241 UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */ 242 }; 243 244 struct udma_tx_drain { 245 struct delayed_work work; 246 ktime_t tstamp; 247 u32 residue; 248 }; 249 250 struct udma_chan_config { 251 bool pkt_mode; /* TR or packet */ 252 bool needs_epib; /* EPIB is needed for the communication or not */ 253 u32 psd_size; /* size of Protocol Specific Data */ 254 u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */ 255 u32 hdesc_size; /* Size of a packet descriptor in packet mode */ 256 bool notdpkt; /* Suppress sending TDC packet */ 257 int remote_thread_id; 258 u32 atype; 259 u32 asel; 260 u32 src_thread; 261 u32 dst_thread; 262 enum psil_endpoint_type ep_type; 263 bool enable_acc32; 264 bool enable_burst; 265 enum udma_tp_level channel_tpl; /* Channel Throughput Level */ 266 267 u32 tr_trigger_type; 268 unsigned long tx_flags; 269 270 /* PKDMA mapped channel */ 271 int mapped_channel_id; 272 /* PKTDMA default tflow or rflow for mapped channel */ 273 int default_flow_id; 274 275 enum dma_transfer_direction dir; 276 }; 277 278 struct udma_chan { 279 struct virt_dma_chan vc; 280 struct dma_slave_config cfg; 281 struct udma_dev *ud; 282 struct device *dma_dev; 283 struct udma_desc *desc; 284 struct udma_desc *terminated_desc; 285 struct udma_static_tr static_tr; 286 char *name; 287 288 struct udma_bchan *bchan; 289 struct udma_tchan *tchan; 290 struct udma_rchan *rchan; 291 struct udma_rflow *rflow; 292 293 bool psil_paired; 294 295 int irq_num_ring; 296 int irq_num_udma; 297 298 bool cyclic; 299 bool paused; 300 301 enum udma_chan_state state; 302 struct completion teardown_completed; 303 304 struct udma_tx_drain tx_drain; 305 306 /* Channel configuration parameters */ 307 struct udma_chan_config config; 308 309 /* dmapool for packet mode descriptors */ 310 bool use_dma_pool; 311 struct dma_pool *hdesc_pool; 312 313 u32 id; 314 }; 315 316 static inline struct udma_dev *to_udma_dev(struct dma_device *d) 317 { 318 return container_of(d, struct udma_dev, ddev); 319 } 320 321 static inline struct udma_chan *to_udma_chan(struct dma_chan *c) 322 { 323 return container_of(c, struct udma_chan, vc.chan); 324 } 325 326 static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t) 327 { 328 return container_of(t, struct udma_desc, vd.tx); 329 } 330 331 /* Generic register access functions */ 332 static inline u32 udma_read(void __iomem *base, int reg) 333 { 334 return readl(base + reg); 335 } 336 337 static inline void udma_write(void __iomem *base, int reg, u32 val) 338 { 339 writel(val, base + reg); 340 } 341 342 static inline void udma_update_bits(void __iomem *base, int reg, 343 u32 mask, u32 val) 344 { 345 u32 tmp, orig; 346 347 orig = readl(base + reg); 348 tmp = orig & ~mask; 349 tmp |= (val & mask); 350 351 if (tmp != orig) 352 writel(tmp, base + reg); 353 } 354 355 /* TCHANRT */ 356 static inline u32 udma_tchanrt_read(struct udma_chan *uc, int reg) 357 { 358 if (!uc->tchan) 359 return 0; 360 return udma_read(uc->tchan->reg_rt, reg); 361 } 362 363 static inline void udma_tchanrt_write(struct udma_chan *uc, int reg, u32 val) 364 { 365 if (!uc->tchan) 366 return; 367 udma_write(uc->tchan->reg_rt, reg, val); 368 } 369 370 static inline void udma_tchanrt_update_bits(struct udma_chan *uc, int reg, 371 u32 mask, u32 val) 372 { 373 if (!uc->tchan) 374 return; 375 udma_update_bits(uc->tchan->reg_rt, reg, mask, val); 376 } 377 378 /* RCHANRT */ 379 static inline u32 udma_rchanrt_read(struct udma_chan *uc, int reg) 380 { 381 if (!uc->rchan) 382 return 0; 383 return udma_read(uc->rchan->reg_rt, reg); 384 } 385 386 static inline void udma_rchanrt_write(struct udma_chan *uc, int reg, u32 val) 387 { 388 if (!uc->rchan) 389 return; 390 udma_write(uc->rchan->reg_rt, reg, val); 391 } 392 393 static inline void udma_rchanrt_update_bits(struct udma_chan *uc, int reg, 394 u32 mask, u32 val) 395 { 396 if (!uc->rchan) 397 return; 398 udma_update_bits(uc->rchan->reg_rt, reg, mask, val); 399 } 400 401 static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread) 402 { 403 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 404 405 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; 406 return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci, 407 tisci_rm->tisci_navss_dev_id, 408 src_thread, dst_thread); 409 } 410 411 static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread, 412 u32 dst_thread) 413 { 414 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 415 416 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; 417 return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci, 418 tisci_rm->tisci_navss_dev_id, 419 src_thread, dst_thread); 420 } 421 422 static void k3_configure_chan_coherency(struct dma_chan *chan, u32 asel) 423 { 424 struct device *chan_dev = &chan->dev->device; 425 426 if (asel == 0) { 427 /* No special handling for the channel */ 428 chan->dev->chan_dma_dev = false; 429 430 chan_dev->dma_coherent = false; 431 chan_dev->dma_parms = NULL; 432 } else if (asel == 14 || asel == 15) { 433 chan->dev->chan_dma_dev = true; 434 435 chan_dev->dma_coherent = true; 436 dma_coerce_mask_and_coherent(chan_dev, DMA_BIT_MASK(48)); 437 chan_dev->dma_parms = chan_dev->parent->dma_parms; 438 } else { 439 dev_warn(chan->device->dev, "Invalid ASEL value: %u\n", asel); 440 441 chan_dev->dma_coherent = false; 442 chan_dev->dma_parms = NULL; 443 } 444 } 445 446 static u8 udma_get_chan_tpl_index(struct udma_tpl *tpl_map, int chan_id) 447 { 448 int i; 449 450 for (i = 0; i < tpl_map->levels; i++) { 451 if (chan_id >= tpl_map->start_idx[i]) 452 return i; 453 } 454 455 return 0; 456 } 457 458 static void udma_reset_uchan(struct udma_chan *uc) 459 { 460 memset(&uc->config, 0, sizeof(uc->config)); 461 uc->config.remote_thread_id = -1; 462 uc->config.mapped_channel_id = -1; 463 uc->config.default_flow_id = -1; 464 uc->state = UDMA_CHAN_IS_IDLE; 465 } 466 467 static void udma_dump_chan_stdata(struct udma_chan *uc) 468 { 469 struct device *dev = uc->ud->dev; 470 u32 offset; 471 int i; 472 473 if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) { 474 dev_dbg(dev, "TCHAN State data:\n"); 475 for (i = 0; i < 32; i++) { 476 offset = UDMA_CHAN_RT_STDATA_REG + i * 4; 477 dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i, 478 udma_tchanrt_read(uc, offset)); 479 } 480 } 481 482 if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) { 483 dev_dbg(dev, "RCHAN State data:\n"); 484 for (i = 0; i < 32; i++) { 485 offset = UDMA_CHAN_RT_STDATA_REG + i * 4; 486 dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i, 487 udma_rchanrt_read(uc, offset)); 488 } 489 } 490 } 491 492 static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d, 493 int idx) 494 { 495 return d->hwdesc[idx].cppi5_desc_paddr; 496 } 497 498 static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx) 499 { 500 return d->hwdesc[idx].cppi5_desc_vaddr; 501 } 502 503 static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc, 504 dma_addr_t paddr) 505 { 506 struct udma_desc *d = uc->terminated_desc; 507 508 if (d) { 509 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d, 510 d->desc_idx); 511 512 if (desc_paddr != paddr) 513 d = NULL; 514 } 515 516 if (!d) { 517 d = uc->desc; 518 if (d) { 519 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d, 520 d->desc_idx); 521 522 if (desc_paddr != paddr) 523 d = NULL; 524 } 525 } 526 527 return d; 528 } 529 530 static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d) 531 { 532 if (uc->use_dma_pool) { 533 int i; 534 535 for (i = 0; i < d->hwdesc_count; i++) { 536 if (!d->hwdesc[i].cppi5_desc_vaddr) 537 continue; 538 539 dma_pool_free(uc->hdesc_pool, 540 d->hwdesc[i].cppi5_desc_vaddr, 541 d->hwdesc[i].cppi5_desc_paddr); 542 543 d->hwdesc[i].cppi5_desc_vaddr = NULL; 544 } 545 } else if (d->hwdesc[0].cppi5_desc_vaddr) { 546 dma_free_coherent(uc->dma_dev, d->hwdesc[0].cppi5_desc_size, 547 d->hwdesc[0].cppi5_desc_vaddr, 548 d->hwdesc[0].cppi5_desc_paddr); 549 550 d->hwdesc[0].cppi5_desc_vaddr = NULL; 551 } 552 } 553 554 static void udma_purge_desc_work(struct work_struct *work) 555 { 556 struct udma_dev *ud = container_of(work, typeof(*ud), purge_work); 557 struct virt_dma_desc *vd, *_vd; 558 unsigned long flags; 559 LIST_HEAD(head); 560 561 spin_lock_irqsave(&ud->lock, flags); 562 list_splice_tail_init(&ud->desc_to_purge, &head); 563 spin_unlock_irqrestore(&ud->lock, flags); 564 565 list_for_each_entry_safe(vd, _vd, &head, node) { 566 struct udma_chan *uc = to_udma_chan(vd->tx.chan); 567 struct udma_desc *d = to_udma_desc(&vd->tx); 568 569 udma_free_hwdesc(uc, d); 570 list_del(&vd->node); 571 kfree(d); 572 } 573 574 /* If more to purge, schedule the work again */ 575 if (!list_empty(&ud->desc_to_purge)) 576 schedule_work(&ud->purge_work); 577 } 578 579 static void udma_desc_free(struct virt_dma_desc *vd) 580 { 581 struct udma_dev *ud = to_udma_dev(vd->tx.chan->device); 582 struct udma_chan *uc = to_udma_chan(vd->tx.chan); 583 struct udma_desc *d = to_udma_desc(&vd->tx); 584 unsigned long flags; 585 586 if (uc->terminated_desc == d) 587 uc->terminated_desc = NULL; 588 589 if (uc->use_dma_pool) { 590 udma_free_hwdesc(uc, d); 591 kfree(d); 592 return; 593 } 594 595 spin_lock_irqsave(&ud->lock, flags); 596 list_add_tail(&vd->node, &ud->desc_to_purge); 597 spin_unlock_irqrestore(&ud->lock, flags); 598 599 schedule_work(&ud->purge_work); 600 } 601 602 static bool udma_is_chan_running(struct udma_chan *uc) 603 { 604 u32 trt_ctl = 0; 605 u32 rrt_ctl = 0; 606 607 if (uc->tchan) 608 trt_ctl = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG); 609 if (uc->rchan) 610 rrt_ctl = udma_rchanrt_read(uc, UDMA_CHAN_RT_CTL_REG); 611 612 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN) 613 return true; 614 615 return false; 616 } 617 618 static bool udma_is_chan_paused(struct udma_chan *uc) 619 { 620 u32 val, pause_mask; 621 622 switch (uc->config.dir) { 623 case DMA_DEV_TO_MEM: 624 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG); 625 pause_mask = UDMA_PEER_RT_EN_PAUSE; 626 break; 627 case DMA_MEM_TO_DEV: 628 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG); 629 pause_mask = UDMA_PEER_RT_EN_PAUSE; 630 break; 631 case DMA_MEM_TO_MEM: 632 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG); 633 pause_mask = UDMA_CHAN_RT_CTL_PAUSE; 634 break; 635 default: 636 return false; 637 } 638 639 if (val & pause_mask) 640 return true; 641 642 return false; 643 } 644 645 static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc) 646 { 647 return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr; 648 } 649 650 static int udma_push_to_ring(struct udma_chan *uc, int idx) 651 { 652 struct udma_desc *d = uc->desc; 653 struct k3_ring *ring = NULL; 654 dma_addr_t paddr; 655 656 switch (uc->config.dir) { 657 case DMA_DEV_TO_MEM: 658 ring = uc->rflow->fd_ring; 659 break; 660 case DMA_MEM_TO_DEV: 661 case DMA_MEM_TO_MEM: 662 ring = uc->tchan->t_ring; 663 break; 664 default: 665 return -EINVAL; 666 } 667 668 /* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */ 669 if (idx == -1) { 670 paddr = udma_get_rx_flush_hwdesc_paddr(uc); 671 } else { 672 paddr = udma_curr_cppi5_desc_paddr(d, idx); 673 674 wmb(); /* Ensure that writes are not moved over this point */ 675 } 676 677 return k3_ringacc_ring_push(ring, &paddr); 678 } 679 680 static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr) 681 { 682 if (uc->config.dir != DMA_DEV_TO_MEM) 683 return false; 684 685 if (addr == udma_get_rx_flush_hwdesc_paddr(uc)) 686 return true; 687 688 return false; 689 } 690 691 static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr) 692 { 693 struct k3_ring *ring = NULL; 694 int ret; 695 696 switch (uc->config.dir) { 697 case DMA_DEV_TO_MEM: 698 ring = uc->rflow->r_ring; 699 break; 700 case DMA_MEM_TO_DEV: 701 case DMA_MEM_TO_MEM: 702 ring = uc->tchan->tc_ring; 703 break; 704 default: 705 return -ENOENT; 706 } 707 708 ret = k3_ringacc_ring_pop(ring, addr); 709 if (ret) 710 return ret; 711 712 rmb(); /* Ensure that reads are not moved before this point */ 713 714 /* Teardown completion */ 715 if (cppi5_desc_is_tdcm(*addr)) 716 return 0; 717 718 /* Check for flush descriptor */ 719 if (udma_desc_is_rx_flush(uc, *addr)) 720 return -ENOENT; 721 722 return 0; 723 } 724 725 static void udma_reset_rings(struct udma_chan *uc) 726 { 727 struct k3_ring *ring1 = NULL; 728 struct k3_ring *ring2 = NULL; 729 730 switch (uc->config.dir) { 731 case DMA_DEV_TO_MEM: 732 if (uc->rchan) { 733 ring1 = uc->rflow->fd_ring; 734 ring2 = uc->rflow->r_ring; 735 } 736 break; 737 case DMA_MEM_TO_DEV: 738 case DMA_MEM_TO_MEM: 739 if (uc->tchan) { 740 ring1 = uc->tchan->t_ring; 741 ring2 = uc->tchan->tc_ring; 742 } 743 break; 744 default: 745 break; 746 } 747 748 if (ring1) 749 k3_ringacc_ring_reset_dma(ring1, 750 k3_ringacc_ring_get_occ(ring1)); 751 if (ring2) 752 k3_ringacc_ring_reset(ring2); 753 754 /* make sure we are not leaking memory by stalled descriptor */ 755 if (uc->terminated_desc) { 756 udma_desc_free(&uc->terminated_desc->vd); 757 uc->terminated_desc = NULL; 758 } 759 } 760 761 static void udma_decrement_byte_counters(struct udma_chan *uc, u32 val) 762 { 763 if (uc->desc->dir == DMA_DEV_TO_MEM) { 764 udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val); 765 udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val); 766 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val); 767 } else { 768 udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val); 769 udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val); 770 if (!uc->bchan) 771 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val); 772 } 773 } 774 775 static void udma_reset_counters(struct udma_chan *uc) 776 { 777 u32 val; 778 779 if (uc->tchan) { 780 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); 781 udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val); 782 783 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG); 784 udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val); 785 786 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG); 787 udma_tchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val); 788 789 if (!uc->bchan) { 790 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG); 791 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val); 792 } 793 } 794 795 if (uc->rchan) { 796 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); 797 udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val); 798 799 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG); 800 udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val); 801 802 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG); 803 udma_rchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val); 804 805 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG); 806 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val); 807 } 808 } 809 810 static int udma_reset_chan(struct udma_chan *uc, bool hard) 811 { 812 switch (uc->config.dir) { 813 case DMA_DEV_TO_MEM: 814 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0); 815 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0); 816 break; 817 case DMA_MEM_TO_DEV: 818 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0); 819 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0); 820 break; 821 case DMA_MEM_TO_MEM: 822 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0); 823 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0); 824 break; 825 default: 826 return -EINVAL; 827 } 828 829 /* Reset all counters */ 830 udma_reset_counters(uc); 831 832 /* Hard reset: re-initialize the channel to reset */ 833 if (hard) { 834 struct udma_chan_config ucc_backup; 835 int ret; 836 837 memcpy(&ucc_backup, &uc->config, sizeof(uc->config)); 838 uc->ud->ddev.device_free_chan_resources(&uc->vc.chan); 839 840 /* restore the channel configuration */ 841 memcpy(&uc->config, &ucc_backup, sizeof(uc->config)); 842 ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan); 843 if (ret) 844 return ret; 845 846 /* 847 * Setting forced teardown after forced reset helps recovering 848 * the rchan. 849 */ 850 if (uc->config.dir == DMA_DEV_TO_MEM) 851 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 852 UDMA_CHAN_RT_CTL_EN | 853 UDMA_CHAN_RT_CTL_TDOWN | 854 UDMA_CHAN_RT_CTL_FTDOWN); 855 } 856 uc->state = UDMA_CHAN_IS_IDLE; 857 858 return 0; 859 } 860 861 static void udma_start_desc(struct udma_chan *uc) 862 { 863 struct udma_chan_config *ucc = &uc->config; 864 865 if (uc->ud->match_data->type == DMA_TYPE_UDMA && ucc->pkt_mode && 866 (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) { 867 int i; 868 869 /* 870 * UDMA only: Push all descriptors to ring for packet mode 871 * cyclic or RX 872 * PKTDMA supports pre-linked descriptor and cyclic is not 873 * supported 874 */ 875 for (i = 0; i < uc->desc->sglen; i++) 876 udma_push_to_ring(uc, i); 877 } else { 878 udma_push_to_ring(uc, 0); 879 } 880 } 881 882 static bool udma_chan_needs_reconfiguration(struct udma_chan *uc) 883 { 884 /* Only PDMAs have staticTR */ 885 if (uc->config.ep_type == PSIL_EP_NATIVE) 886 return false; 887 888 /* Check if the staticTR configuration has changed for TX */ 889 if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr))) 890 return true; 891 892 return false; 893 } 894 895 static int udma_start(struct udma_chan *uc) 896 { 897 struct virt_dma_desc *vd = vchan_next_desc(&uc->vc); 898 899 if (!vd) { 900 uc->desc = NULL; 901 return -ENOENT; 902 } 903 904 list_del(&vd->node); 905 906 uc->desc = to_udma_desc(&vd->tx); 907 908 /* Channel is already running and does not need reconfiguration */ 909 if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) { 910 udma_start_desc(uc); 911 goto out; 912 } 913 914 /* Make sure that we clear the teardown bit, if it is set */ 915 udma_reset_chan(uc, false); 916 917 /* Push descriptors before we start the channel */ 918 udma_start_desc(uc); 919 920 switch (uc->desc->dir) { 921 case DMA_DEV_TO_MEM: 922 /* Config remote TR */ 923 if (uc->config.ep_type == PSIL_EP_PDMA_XY) { 924 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) | 925 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize); 926 const struct udma_match_data *match_data = 927 uc->ud->match_data; 928 929 if (uc->config.enable_acc32) 930 val |= PDMA_STATIC_TR_XY_ACC32; 931 if (uc->config.enable_burst) 932 val |= PDMA_STATIC_TR_XY_BURST; 933 934 udma_rchanrt_write(uc, 935 UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG, 936 val); 937 938 udma_rchanrt_write(uc, 939 UDMA_CHAN_RT_PEER_STATIC_TR_Z_REG, 940 PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt, 941 match_data->statictr_z_mask)); 942 943 /* save the current staticTR configuration */ 944 memcpy(&uc->static_tr, &uc->desc->static_tr, 945 sizeof(uc->static_tr)); 946 } 947 948 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 949 UDMA_CHAN_RT_CTL_EN); 950 951 /* Enable remote */ 952 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 953 UDMA_PEER_RT_EN_ENABLE); 954 955 break; 956 case DMA_MEM_TO_DEV: 957 /* Config remote TR */ 958 if (uc->config.ep_type == PSIL_EP_PDMA_XY) { 959 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) | 960 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize); 961 962 if (uc->config.enable_acc32) 963 val |= PDMA_STATIC_TR_XY_ACC32; 964 if (uc->config.enable_burst) 965 val |= PDMA_STATIC_TR_XY_BURST; 966 967 udma_tchanrt_write(uc, 968 UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG, 969 val); 970 971 /* save the current staticTR configuration */ 972 memcpy(&uc->static_tr, &uc->desc->static_tr, 973 sizeof(uc->static_tr)); 974 } 975 976 /* Enable remote */ 977 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 978 UDMA_PEER_RT_EN_ENABLE); 979 980 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 981 UDMA_CHAN_RT_CTL_EN); 982 983 break; 984 case DMA_MEM_TO_MEM: 985 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 986 UDMA_CHAN_RT_CTL_EN); 987 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 988 UDMA_CHAN_RT_CTL_EN); 989 990 break; 991 default: 992 return -EINVAL; 993 } 994 995 uc->state = UDMA_CHAN_IS_ACTIVE; 996 out: 997 998 return 0; 999 } 1000 1001 static int udma_stop(struct udma_chan *uc) 1002 { 1003 enum udma_chan_state old_state = uc->state; 1004 1005 uc->state = UDMA_CHAN_IS_TERMINATING; 1006 reinit_completion(&uc->teardown_completed); 1007 1008 switch (uc->config.dir) { 1009 case DMA_DEV_TO_MEM: 1010 if (!uc->cyclic && !uc->desc) 1011 udma_push_to_ring(uc, -1); 1012 1013 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 1014 UDMA_PEER_RT_EN_ENABLE | 1015 UDMA_PEER_RT_EN_TEARDOWN); 1016 break; 1017 case DMA_MEM_TO_DEV: 1018 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 1019 UDMA_PEER_RT_EN_ENABLE | 1020 UDMA_PEER_RT_EN_FLUSH); 1021 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 1022 UDMA_CHAN_RT_CTL_EN | 1023 UDMA_CHAN_RT_CTL_TDOWN); 1024 break; 1025 case DMA_MEM_TO_MEM: 1026 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 1027 UDMA_CHAN_RT_CTL_EN | 1028 UDMA_CHAN_RT_CTL_TDOWN); 1029 break; 1030 default: 1031 uc->state = old_state; 1032 complete_all(&uc->teardown_completed); 1033 return -EINVAL; 1034 } 1035 1036 return 0; 1037 } 1038 1039 static void udma_cyclic_packet_elapsed(struct udma_chan *uc) 1040 { 1041 struct udma_desc *d = uc->desc; 1042 struct cppi5_host_desc_t *h_desc; 1043 1044 h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr; 1045 cppi5_hdesc_reset_to_original(h_desc); 1046 udma_push_to_ring(uc, d->desc_idx); 1047 d->desc_idx = (d->desc_idx + 1) % d->sglen; 1048 } 1049 1050 static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d) 1051 { 1052 struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr; 1053 1054 memcpy(d->metadata, h_desc->epib, d->metadata_size); 1055 } 1056 1057 static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d) 1058 { 1059 u32 peer_bcnt, bcnt; 1060 1061 /* 1062 * Only TX towards PDMA is affected. 1063 * If DMA_PREP_INTERRUPT is not set by consumer then skip the transfer 1064 * completion calculation, consumer must ensure that there is no stale 1065 * data in DMA fabric in this case. 1066 */ 1067 if (uc->config.ep_type == PSIL_EP_NATIVE || 1068 uc->config.dir != DMA_MEM_TO_DEV || !(uc->config.tx_flags & DMA_PREP_INTERRUPT)) 1069 return true; 1070 1071 peer_bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG); 1072 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); 1073 1074 /* Transfer is incomplete, store current residue and time stamp */ 1075 if (peer_bcnt < bcnt) { 1076 uc->tx_drain.residue = bcnt - peer_bcnt; 1077 uc->tx_drain.tstamp = ktime_get(); 1078 return false; 1079 } 1080 1081 return true; 1082 } 1083 1084 static void udma_check_tx_completion(struct work_struct *work) 1085 { 1086 struct udma_chan *uc = container_of(work, typeof(*uc), 1087 tx_drain.work.work); 1088 bool desc_done = true; 1089 u32 residue_diff; 1090 ktime_t time_diff; 1091 unsigned long delay; 1092 1093 while (1) { 1094 if (uc->desc) { 1095 /* Get previous residue and time stamp */ 1096 residue_diff = uc->tx_drain.residue; 1097 time_diff = uc->tx_drain.tstamp; 1098 /* 1099 * Get current residue and time stamp or see if 1100 * transfer is complete 1101 */ 1102 desc_done = udma_is_desc_really_done(uc, uc->desc); 1103 } 1104 1105 if (!desc_done) { 1106 /* 1107 * Find the time delta and residue delta w.r.t 1108 * previous poll 1109 */ 1110 time_diff = ktime_sub(uc->tx_drain.tstamp, 1111 time_diff) + 1; 1112 residue_diff -= uc->tx_drain.residue; 1113 if (residue_diff) { 1114 /* 1115 * Try to guess when we should check 1116 * next time by calculating rate at 1117 * which data is being drained at the 1118 * peer device 1119 */ 1120 delay = (time_diff / residue_diff) * 1121 uc->tx_drain.residue; 1122 } else { 1123 /* No progress, check again in 1 second */ 1124 schedule_delayed_work(&uc->tx_drain.work, HZ); 1125 break; 1126 } 1127 1128 usleep_range(ktime_to_us(delay), 1129 ktime_to_us(delay) + 10); 1130 continue; 1131 } 1132 1133 if (uc->desc) { 1134 struct udma_desc *d = uc->desc; 1135 1136 udma_decrement_byte_counters(uc, d->residue); 1137 udma_start(uc); 1138 vchan_cookie_complete(&d->vd); 1139 break; 1140 } 1141 1142 break; 1143 } 1144 } 1145 1146 static irqreturn_t udma_ring_irq_handler(int irq, void *data) 1147 { 1148 struct udma_chan *uc = data; 1149 struct udma_desc *d; 1150 dma_addr_t paddr = 0; 1151 1152 if (udma_pop_from_ring(uc, &paddr) || !paddr) 1153 return IRQ_HANDLED; 1154 1155 spin_lock(&uc->vc.lock); 1156 1157 /* Teardown completion message */ 1158 if (cppi5_desc_is_tdcm(paddr)) { 1159 complete_all(&uc->teardown_completed); 1160 1161 if (uc->terminated_desc) { 1162 udma_desc_free(&uc->terminated_desc->vd); 1163 uc->terminated_desc = NULL; 1164 } 1165 1166 if (!uc->desc) 1167 udma_start(uc); 1168 1169 goto out; 1170 } 1171 1172 d = udma_udma_desc_from_paddr(uc, paddr); 1173 1174 if (d) { 1175 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d, 1176 d->desc_idx); 1177 if (desc_paddr != paddr) { 1178 dev_err(uc->ud->dev, "not matching descriptors!\n"); 1179 goto out; 1180 } 1181 1182 if (d == uc->desc) { 1183 /* active descriptor */ 1184 if (uc->cyclic) { 1185 udma_cyclic_packet_elapsed(uc); 1186 vchan_cyclic_callback(&d->vd); 1187 } else { 1188 if (udma_is_desc_really_done(uc, d)) { 1189 udma_decrement_byte_counters(uc, d->residue); 1190 udma_start(uc); 1191 vchan_cookie_complete(&d->vd); 1192 } else { 1193 schedule_delayed_work(&uc->tx_drain.work, 1194 0); 1195 } 1196 } 1197 } else { 1198 /* 1199 * terminated descriptor, mark the descriptor as 1200 * completed to update the channel's cookie marker 1201 */ 1202 dma_cookie_complete(&d->vd.tx); 1203 } 1204 } 1205 out: 1206 spin_unlock(&uc->vc.lock); 1207 1208 return IRQ_HANDLED; 1209 } 1210 1211 static irqreturn_t udma_udma_irq_handler(int irq, void *data) 1212 { 1213 struct udma_chan *uc = data; 1214 struct udma_desc *d; 1215 1216 spin_lock(&uc->vc.lock); 1217 d = uc->desc; 1218 if (d) { 1219 d->tr_idx = (d->tr_idx + 1) % d->sglen; 1220 1221 if (uc->cyclic) { 1222 vchan_cyclic_callback(&d->vd); 1223 } else { 1224 /* TODO: figure out the real amount of data */ 1225 udma_decrement_byte_counters(uc, d->residue); 1226 udma_start(uc); 1227 vchan_cookie_complete(&d->vd); 1228 } 1229 } 1230 1231 spin_unlock(&uc->vc.lock); 1232 1233 return IRQ_HANDLED; 1234 } 1235 1236 /** 1237 * __udma_alloc_gp_rflow_range - alloc range of GP RX flows 1238 * @ud: UDMA device 1239 * @from: Start the search from this flow id number 1240 * @cnt: Number of consecutive flow ids to allocate 1241 * 1242 * Allocate range of RX flow ids for future use, those flows can be requested 1243 * only using explicit flow id number. if @from is set to -1 it will try to find 1244 * first free range. if @from is positive value it will force allocation only 1245 * of the specified range of flows. 1246 * 1247 * Returns -ENOMEM if can't find free range. 1248 * -EEXIST if requested range is busy. 1249 * -EINVAL if wrong input values passed. 1250 * Returns flow id on success. 1251 */ 1252 static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt) 1253 { 1254 int start, tmp_from; 1255 DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS); 1256 1257 tmp_from = from; 1258 if (tmp_from < 0) 1259 tmp_from = ud->rchan_cnt; 1260 /* default flows can't be allocated and accessible only by id */ 1261 if (tmp_from < ud->rchan_cnt) 1262 return -EINVAL; 1263 1264 if (tmp_from + cnt > ud->rflow_cnt) 1265 return -EINVAL; 1266 1267 bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated, 1268 ud->rflow_cnt); 1269 1270 start = bitmap_find_next_zero_area(tmp, 1271 ud->rflow_cnt, 1272 tmp_from, cnt, 0); 1273 if (start >= ud->rflow_cnt) 1274 return -ENOMEM; 1275 1276 if (from >= 0 && start != from) 1277 return -EEXIST; 1278 1279 bitmap_set(ud->rflow_gp_map_allocated, start, cnt); 1280 return start; 1281 } 1282 1283 static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt) 1284 { 1285 if (from < ud->rchan_cnt) 1286 return -EINVAL; 1287 if (from + cnt > ud->rflow_cnt) 1288 return -EINVAL; 1289 1290 bitmap_clear(ud->rflow_gp_map_allocated, from, cnt); 1291 return 0; 1292 } 1293 1294 static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id) 1295 { 1296 /* 1297 * Attempt to request rflow by ID can be made for any rflow 1298 * if not in use with assumption that caller knows what's doing. 1299 * TI-SCI FW will perform additional permission check ant way, it's 1300 * safe 1301 */ 1302 1303 if (id < 0 || id >= ud->rflow_cnt) 1304 return ERR_PTR(-ENOENT); 1305 1306 if (test_bit(id, ud->rflow_in_use)) 1307 return ERR_PTR(-ENOENT); 1308 1309 if (ud->rflow_gp_map) { 1310 /* GP rflow has to be allocated first */ 1311 if (!test_bit(id, ud->rflow_gp_map) && 1312 !test_bit(id, ud->rflow_gp_map_allocated)) 1313 return ERR_PTR(-EINVAL); 1314 } 1315 1316 dev_dbg(ud->dev, "get rflow%d\n", id); 1317 set_bit(id, ud->rflow_in_use); 1318 return &ud->rflows[id]; 1319 } 1320 1321 static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow) 1322 { 1323 if (!test_bit(rflow->id, ud->rflow_in_use)) { 1324 dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id); 1325 return; 1326 } 1327 1328 dev_dbg(ud->dev, "put rflow%d\n", rflow->id); 1329 clear_bit(rflow->id, ud->rflow_in_use); 1330 } 1331 1332 #define UDMA_RESERVE_RESOURCE(res) \ 1333 static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \ 1334 enum udma_tp_level tpl, \ 1335 int id) \ 1336 { \ 1337 if (id >= 0) { \ 1338 if (test_bit(id, ud->res##_map)) { \ 1339 dev_err(ud->dev, "res##%d is in use\n", id); \ 1340 return ERR_PTR(-ENOENT); \ 1341 } \ 1342 } else { \ 1343 int start; \ 1344 \ 1345 if (tpl >= ud->res##_tpl.levels) \ 1346 tpl = ud->res##_tpl.levels - 1; \ 1347 \ 1348 start = ud->res##_tpl.start_idx[tpl]; \ 1349 \ 1350 id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \ 1351 start); \ 1352 if (id == ud->res##_cnt) { \ 1353 return ERR_PTR(-ENOENT); \ 1354 } \ 1355 } \ 1356 \ 1357 set_bit(id, ud->res##_map); \ 1358 return &ud->res##s[id]; \ 1359 } 1360 1361 UDMA_RESERVE_RESOURCE(bchan); 1362 UDMA_RESERVE_RESOURCE(tchan); 1363 UDMA_RESERVE_RESOURCE(rchan); 1364 1365 static int bcdma_get_bchan(struct udma_chan *uc) 1366 { 1367 struct udma_dev *ud = uc->ud; 1368 enum udma_tp_level tpl; 1369 int ret; 1370 1371 if (uc->bchan) { 1372 dev_dbg(ud->dev, "chan%d: already have bchan%d allocated\n", 1373 uc->id, uc->bchan->id); 1374 return 0; 1375 } 1376 1377 /* 1378 * Use normal channels for peripherals, and highest TPL channel for 1379 * mem2mem 1380 */ 1381 if (uc->config.tr_trigger_type) 1382 tpl = 0; 1383 else 1384 tpl = ud->bchan_tpl.levels - 1; 1385 1386 uc->bchan = __udma_reserve_bchan(ud, tpl, -1); 1387 if (IS_ERR(uc->bchan)) { 1388 ret = PTR_ERR(uc->bchan); 1389 uc->bchan = NULL; 1390 return ret; 1391 } 1392 1393 uc->tchan = uc->bchan; 1394 1395 return 0; 1396 } 1397 1398 static int udma_get_tchan(struct udma_chan *uc) 1399 { 1400 struct udma_dev *ud = uc->ud; 1401 int ret; 1402 1403 if (uc->tchan) { 1404 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n", 1405 uc->id, uc->tchan->id); 1406 return 0; 1407 } 1408 1409 /* 1410 * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels. 1411 * For PKTDMA mapped channels it is configured to a channel which must 1412 * be used to service the peripheral. 1413 */ 1414 uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, 1415 uc->config.mapped_channel_id); 1416 if (IS_ERR(uc->tchan)) { 1417 ret = PTR_ERR(uc->tchan); 1418 uc->tchan = NULL; 1419 return ret; 1420 } 1421 1422 if (ud->tflow_cnt) { 1423 int tflow_id; 1424 1425 /* Only PKTDMA have support for tx flows */ 1426 if (uc->config.default_flow_id >= 0) 1427 tflow_id = uc->config.default_flow_id; 1428 else 1429 tflow_id = uc->tchan->id; 1430 1431 if (test_bit(tflow_id, ud->tflow_map)) { 1432 dev_err(ud->dev, "tflow%d is in use\n", tflow_id); 1433 clear_bit(uc->tchan->id, ud->tchan_map); 1434 uc->tchan = NULL; 1435 return -ENOENT; 1436 } 1437 1438 uc->tchan->tflow_id = tflow_id; 1439 set_bit(tflow_id, ud->tflow_map); 1440 } else { 1441 uc->tchan->tflow_id = -1; 1442 } 1443 1444 return 0; 1445 } 1446 1447 static int udma_get_rchan(struct udma_chan *uc) 1448 { 1449 struct udma_dev *ud = uc->ud; 1450 int ret; 1451 1452 if (uc->rchan) { 1453 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n", 1454 uc->id, uc->rchan->id); 1455 return 0; 1456 } 1457 1458 /* 1459 * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels. 1460 * For PKTDMA mapped channels it is configured to a channel which must 1461 * be used to service the peripheral. 1462 */ 1463 uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, 1464 uc->config.mapped_channel_id); 1465 if (IS_ERR(uc->rchan)) { 1466 ret = PTR_ERR(uc->rchan); 1467 uc->rchan = NULL; 1468 return ret; 1469 } 1470 1471 return 0; 1472 } 1473 1474 static int udma_get_chan_pair(struct udma_chan *uc) 1475 { 1476 struct udma_dev *ud = uc->ud; 1477 int chan_id, end; 1478 1479 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) { 1480 dev_info(ud->dev, "chan%d: already have %d pair allocated\n", 1481 uc->id, uc->tchan->id); 1482 return 0; 1483 } 1484 1485 if (uc->tchan) { 1486 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n", 1487 uc->id, uc->tchan->id); 1488 return -EBUSY; 1489 } else if (uc->rchan) { 1490 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n", 1491 uc->id, uc->rchan->id); 1492 return -EBUSY; 1493 } 1494 1495 /* Can be optimized, but let's have it like this for now */ 1496 end = min(ud->tchan_cnt, ud->rchan_cnt); 1497 /* 1498 * Try to use the highest TPL channel pair for MEM_TO_MEM channels 1499 * Note: in UDMAP the channel TPL is symmetric between tchan and rchan 1500 */ 1501 chan_id = ud->tchan_tpl.start_idx[ud->tchan_tpl.levels - 1]; 1502 for (; chan_id < end; chan_id++) { 1503 if (!test_bit(chan_id, ud->tchan_map) && 1504 !test_bit(chan_id, ud->rchan_map)) 1505 break; 1506 } 1507 1508 if (chan_id == end) 1509 return -ENOENT; 1510 1511 set_bit(chan_id, ud->tchan_map); 1512 set_bit(chan_id, ud->rchan_map); 1513 uc->tchan = &ud->tchans[chan_id]; 1514 uc->rchan = &ud->rchans[chan_id]; 1515 1516 /* UDMA does not use tx flows */ 1517 uc->tchan->tflow_id = -1; 1518 1519 return 0; 1520 } 1521 1522 static int udma_get_rflow(struct udma_chan *uc, int flow_id) 1523 { 1524 struct udma_dev *ud = uc->ud; 1525 int ret; 1526 1527 if (!uc->rchan) { 1528 dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id); 1529 return -EINVAL; 1530 } 1531 1532 if (uc->rflow) { 1533 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n", 1534 uc->id, uc->rflow->id); 1535 return 0; 1536 } 1537 1538 uc->rflow = __udma_get_rflow(ud, flow_id); 1539 if (IS_ERR(uc->rflow)) { 1540 ret = PTR_ERR(uc->rflow); 1541 uc->rflow = NULL; 1542 return ret; 1543 } 1544 1545 return 0; 1546 } 1547 1548 static void bcdma_put_bchan(struct udma_chan *uc) 1549 { 1550 struct udma_dev *ud = uc->ud; 1551 1552 if (uc->bchan) { 1553 dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id, 1554 uc->bchan->id); 1555 clear_bit(uc->bchan->id, ud->bchan_map); 1556 uc->bchan = NULL; 1557 uc->tchan = NULL; 1558 } 1559 } 1560 1561 static void udma_put_rchan(struct udma_chan *uc) 1562 { 1563 struct udma_dev *ud = uc->ud; 1564 1565 if (uc->rchan) { 1566 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id, 1567 uc->rchan->id); 1568 clear_bit(uc->rchan->id, ud->rchan_map); 1569 uc->rchan = NULL; 1570 } 1571 } 1572 1573 static void udma_put_tchan(struct udma_chan *uc) 1574 { 1575 struct udma_dev *ud = uc->ud; 1576 1577 if (uc->tchan) { 1578 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id, 1579 uc->tchan->id); 1580 clear_bit(uc->tchan->id, ud->tchan_map); 1581 1582 if (uc->tchan->tflow_id >= 0) 1583 clear_bit(uc->tchan->tflow_id, ud->tflow_map); 1584 1585 uc->tchan = NULL; 1586 } 1587 } 1588 1589 static void udma_put_rflow(struct udma_chan *uc) 1590 { 1591 struct udma_dev *ud = uc->ud; 1592 1593 if (uc->rflow) { 1594 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id, 1595 uc->rflow->id); 1596 __udma_put_rflow(ud, uc->rflow); 1597 uc->rflow = NULL; 1598 } 1599 } 1600 1601 static void bcdma_free_bchan_resources(struct udma_chan *uc) 1602 { 1603 if (!uc->bchan) 1604 return; 1605 1606 k3_ringacc_ring_free(uc->bchan->tc_ring); 1607 k3_ringacc_ring_free(uc->bchan->t_ring); 1608 uc->bchan->tc_ring = NULL; 1609 uc->bchan->t_ring = NULL; 1610 k3_configure_chan_coherency(&uc->vc.chan, 0); 1611 1612 bcdma_put_bchan(uc); 1613 } 1614 1615 static int bcdma_alloc_bchan_resources(struct udma_chan *uc) 1616 { 1617 struct k3_ring_cfg ring_cfg; 1618 struct udma_dev *ud = uc->ud; 1619 int ret; 1620 1621 ret = bcdma_get_bchan(uc); 1622 if (ret) 1623 return ret; 1624 1625 ret = k3_ringacc_request_rings_pair(ud->ringacc, uc->bchan->id, -1, 1626 &uc->bchan->t_ring, 1627 &uc->bchan->tc_ring); 1628 if (ret) { 1629 ret = -EBUSY; 1630 goto err_ring; 1631 } 1632 1633 memset(&ring_cfg, 0, sizeof(ring_cfg)); 1634 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; 1635 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8; 1636 ring_cfg.mode = K3_RINGACC_RING_MODE_RING; 1637 1638 k3_configure_chan_coherency(&uc->vc.chan, ud->asel); 1639 ring_cfg.asel = ud->asel; 1640 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan); 1641 1642 ret = k3_ringacc_ring_cfg(uc->bchan->t_ring, &ring_cfg); 1643 if (ret) 1644 goto err_ringcfg; 1645 1646 return 0; 1647 1648 err_ringcfg: 1649 k3_ringacc_ring_free(uc->bchan->tc_ring); 1650 uc->bchan->tc_ring = NULL; 1651 k3_ringacc_ring_free(uc->bchan->t_ring); 1652 uc->bchan->t_ring = NULL; 1653 k3_configure_chan_coherency(&uc->vc.chan, 0); 1654 err_ring: 1655 bcdma_put_bchan(uc); 1656 1657 return ret; 1658 } 1659 1660 static void udma_free_tx_resources(struct udma_chan *uc) 1661 { 1662 if (!uc->tchan) 1663 return; 1664 1665 k3_ringacc_ring_free(uc->tchan->t_ring); 1666 k3_ringacc_ring_free(uc->tchan->tc_ring); 1667 uc->tchan->t_ring = NULL; 1668 uc->tchan->tc_ring = NULL; 1669 1670 udma_put_tchan(uc); 1671 } 1672 1673 static int udma_alloc_tx_resources(struct udma_chan *uc) 1674 { 1675 struct k3_ring_cfg ring_cfg; 1676 struct udma_dev *ud = uc->ud; 1677 struct udma_tchan *tchan; 1678 int ring_idx, ret; 1679 1680 ret = udma_get_tchan(uc); 1681 if (ret) 1682 return ret; 1683 1684 tchan = uc->tchan; 1685 if (tchan->tflow_id >= 0) 1686 ring_idx = tchan->tflow_id; 1687 else 1688 ring_idx = ud->bchan_cnt + tchan->id; 1689 1690 ret = k3_ringacc_request_rings_pair(ud->ringacc, ring_idx, -1, 1691 &tchan->t_ring, 1692 &tchan->tc_ring); 1693 if (ret) { 1694 ret = -EBUSY; 1695 goto err_ring; 1696 } 1697 1698 memset(&ring_cfg, 0, sizeof(ring_cfg)); 1699 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; 1700 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8; 1701 if (ud->match_data->type == DMA_TYPE_UDMA) { 1702 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE; 1703 } else { 1704 ring_cfg.mode = K3_RINGACC_RING_MODE_RING; 1705 1706 k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel); 1707 ring_cfg.asel = uc->config.asel; 1708 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan); 1709 } 1710 1711 ret = k3_ringacc_ring_cfg(tchan->t_ring, &ring_cfg); 1712 ret |= k3_ringacc_ring_cfg(tchan->tc_ring, &ring_cfg); 1713 1714 if (ret) 1715 goto err_ringcfg; 1716 1717 return 0; 1718 1719 err_ringcfg: 1720 k3_ringacc_ring_free(uc->tchan->tc_ring); 1721 uc->tchan->tc_ring = NULL; 1722 k3_ringacc_ring_free(uc->tchan->t_ring); 1723 uc->tchan->t_ring = NULL; 1724 err_ring: 1725 udma_put_tchan(uc); 1726 1727 return ret; 1728 } 1729 1730 static void udma_free_rx_resources(struct udma_chan *uc) 1731 { 1732 if (!uc->rchan) 1733 return; 1734 1735 if (uc->rflow) { 1736 struct udma_rflow *rflow = uc->rflow; 1737 1738 k3_ringacc_ring_free(rflow->fd_ring); 1739 k3_ringacc_ring_free(rflow->r_ring); 1740 rflow->fd_ring = NULL; 1741 rflow->r_ring = NULL; 1742 1743 udma_put_rflow(uc); 1744 } 1745 1746 udma_put_rchan(uc); 1747 } 1748 1749 static int udma_alloc_rx_resources(struct udma_chan *uc) 1750 { 1751 struct udma_dev *ud = uc->ud; 1752 struct k3_ring_cfg ring_cfg; 1753 struct udma_rflow *rflow; 1754 int fd_ring_id; 1755 int ret; 1756 1757 ret = udma_get_rchan(uc); 1758 if (ret) 1759 return ret; 1760 1761 /* For MEM_TO_MEM we don't need rflow or rings */ 1762 if (uc->config.dir == DMA_MEM_TO_MEM) 1763 return 0; 1764 1765 if (uc->config.default_flow_id >= 0) 1766 ret = udma_get_rflow(uc, uc->config.default_flow_id); 1767 else 1768 ret = udma_get_rflow(uc, uc->rchan->id); 1769 1770 if (ret) { 1771 ret = -EBUSY; 1772 goto err_rflow; 1773 } 1774 1775 rflow = uc->rflow; 1776 if (ud->tflow_cnt) 1777 fd_ring_id = ud->tflow_cnt + rflow->id; 1778 else 1779 fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt + 1780 uc->rchan->id; 1781 1782 ret = k3_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1, 1783 &rflow->fd_ring, &rflow->r_ring); 1784 if (ret) { 1785 ret = -EBUSY; 1786 goto err_ring; 1787 } 1788 1789 memset(&ring_cfg, 0, sizeof(ring_cfg)); 1790 1791 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8; 1792 if (ud->match_data->type == DMA_TYPE_UDMA) { 1793 if (uc->config.pkt_mode) 1794 ring_cfg.size = SG_MAX_SEGMENTS; 1795 else 1796 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; 1797 1798 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE; 1799 } else { 1800 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; 1801 ring_cfg.mode = K3_RINGACC_RING_MODE_RING; 1802 1803 k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel); 1804 ring_cfg.asel = uc->config.asel; 1805 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan); 1806 } 1807 1808 ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg); 1809 1810 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; 1811 ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg); 1812 1813 if (ret) 1814 goto err_ringcfg; 1815 1816 return 0; 1817 1818 err_ringcfg: 1819 k3_ringacc_ring_free(rflow->r_ring); 1820 rflow->r_ring = NULL; 1821 k3_ringacc_ring_free(rflow->fd_ring); 1822 rflow->fd_ring = NULL; 1823 err_ring: 1824 udma_put_rflow(uc); 1825 err_rflow: 1826 udma_put_rchan(uc); 1827 1828 return ret; 1829 } 1830 1831 #define TISCI_BCDMA_BCHAN_VALID_PARAMS ( \ 1832 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ 1833 TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID) 1834 1835 #define TISCI_BCDMA_TCHAN_VALID_PARAMS ( \ 1836 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ 1837 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID) 1838 1839 #define TISCI_BCDMA_RCHAN_VALID_PARAMS ( \ 1840 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID) 1841 1842 #define TISCI_UDMA_TCHAN_VALID_PARAMS ( \ 1843 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ 1844 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \ 1845 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \ 1846 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \ 1847 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \ 1848 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \ 1849 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \ 1850 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID) 1851 1852 #define TISCI_UDMA_RCHAN_VALID_PARAMS ( \ 1853 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ 1854 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \ 1855 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \ 1856 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \ 1857 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \ 1858 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \ 1859 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \ 1860 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \ 1861 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID) 1862 1863 static int udma_tisci_m2m_channel_config(struct udma_chan *uc) 1864 { 1865 struct udma_dev *ud = uc->ud; 1866 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 1867 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 1868 struct udma_tchan *tchan = uc->tchan; 1869 struct udma_rchan *rchan = uc->rchan; 1870 u8 burst_size = 0; 1871 int ret; 1872 u8 tpl; 1873 1874 /* Non synchronized - mem to mem type of transfer */ 1875 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring); 1876 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; 1877 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; 1878 1879 if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) { 1880 tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, tchan->id); 1881 1882 burst_size = ud->match_data->burst_size[tpl]; 1883 } 1884 1885 req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS; 1886 req_tx.nav_id = tisci_rm->tisci_dev_id; 1887 req_tx.index = tchan->id; 1888 req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR; 1889 req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2; 1890 req_tx.txcq_qnum = tc_ring; 1891 req_tx.tx_atype = ud->atype; 1892 if (burst_size) { 1893 req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID; 1894 req_tx.tx_burst_size = burst_size; 1895 } 1896 1897 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); 1898 if (ret) { 1899 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); 1900 return ret; 1901 } 1902 1903 req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS; 1904 req_rx.nav_id = tisci_rm->tisci_dev_id; 1905 req_rx.index = rchan->id; 1906 req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2; 1907 req_rx.rxcq_qnum = tc_ring; 1908 req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR; 1909 req_rx.rx_atype = ud->atype; 1910 if (burst_size) { 1911 req_rx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID; 1912 req_rx.rx_burst_size = burst_size; 1913 } 1914 1915 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); 1916 if (ret) 1917 dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret); 1918 1919 return ret; 1920 } 1921 1922 static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc) 1923 { 1924 struct udma_dev *ud = uc->ud; 1925 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 1926 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 1927 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; 1928 struct udma_bchan *bchan = uc->bchan; 1929 u8 burst_size = 0; 1930 int ret; 1931 u8 tpl; 1932 1933 if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) { 1934 tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, bchan->id); 1935 1936 burst_size = ud->match_data->burst_size[tpl]; 1937 } 1938 1939 req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS; 1940 req_tx.nav_id = tisci_rm->tisci_dev_id; 1941 req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN; 1942 req_tx.index = bchan->id; 1943 if (burst_size) { 1944 req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID; 1945 req_tx.tx_burst_size = burst_size; 1946 } 1947 1948 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); 1949 if (ret) 1950 dev_err(ud->dev, "bchan%d cfg failed %d\n", bchan->id, ret); 1951 1952 return ret; 1953 } 1954 1955 static int udma_tisci_tx_channel_config(struct udma_chan *uc) 1956 { 1957 struct udma_dev *ud = uc->ud; 1958 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 1959 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 1960 struct udma_tchan *tchan = uc->tchan; 1961 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring); 1962 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; 1963 u32 mode, fetch_size; 1964 int ret; 1965 1966 if (uc->config.pkt_mode) { 1967 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; 1968 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib, 1969 uc->config.psd_size, 0); 1970 } else { 1971 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR; 1972 fetch_size = sizeof(struct cppi5_desc_hdr_t); 1973 } 1974 1975 req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS; 1976 req_tx.nav_id = tisci_rm->tisci_dev_id; 1977 req_tx.index = tchan->id; 1978 req_tx.tx_chan_type = mode; 1979 req_tx.tx_supr_tdpkt = uc->config.notdpkt; 1980 req_tx.tx_fetch_size = fetch_size >> 2; 1981 req_tx.txcq_qnum = tc_ring; 1982 req_tx.tx_atype = uc->config.atype; 1983 if (uc->config.ep_type == PSIL_EP_PDMA_XY && 1984 ud->match_data->flags & UDMA_FLAG_TDTYPE) { 1985 /* wait for peer to complete the teardown for PDMAs */ 1986 req_tx.valid_params |= 1987 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID; 1988 req_tx.tx_tdtype = 1; 1989 } 1990 1991 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); 1992 if (ret) 1993 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); 1994 1995 return ret; 1996 } 1997 1998 static int bcdma_tisci_tx_channel_config(struct udma_chan *uc) 1999 { 2000 struct udma_dev *ud = uc->ud; 2001 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 2002 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 2003 struct udma_tchan *tchan = uc->tchan; 2004 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; 2005 int ret; 2006 2007 req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS; 2008 req_tx.nav_id = tisci_rm->tisci_dev_id; 2009 req_tx.index = tchan->id; 2010 req_tx.tx_supr_tdpkt = uc->config.notdpkt; 2011 if (ud->match_data->flags & UDMA_FLAG_TDTYPE) { 2012 /* wait for peer to complete the teardown for PDMAs */ 2013 req_tx.valid_params |= 2014 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID; 2015 req_tx.tx_tdtype = 1; 2016 } 2017 2018 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); 2019 if (ret) 2020 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); 2021 2022 return ret; 2023 } 2024 2025 #define pktdma_tisci_tx_channel_config bcdma_tisci_tx_channel_config 2026 2027 static int udma_tisci_rx_channel_config(struct udma_chan *uc) 2028 { 2029 struct udma_dev *ud = uc->ud; 2030 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 2031 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 2032 struct udma_rchan *rchan = uc->rchan; 2033 int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring); 2034 int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring); 2035 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; 2036 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 }; 2037 u32 mode, fetch_size; 2038 int ret; 2039 2040 if (uc->config.pkt_mode) { 2041 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; 2042 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib, 2043 uc->config.psd_size, 0); 2044 } else { 2045 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR; 2046 fetch_size = sizeof(struct cppi5_desc_hdr_t); 2047 } 2048 2049 req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS; 2050 req_rx.nav_id = tisci_rm->tisci_dev_id; 2051 req_rx.index = rchan->id; 2052 req_rx.rx_fetch_size = fetch_size >> 2; 2053 req_rx.rxcq_qnum = rx_ring; 2054 req_rx.rx_chan_type = mode; 2055 req_rx.rx_atype = uc->config.atype; 2056 2057 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); 2058 if (ret) { 2059 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret); 2060 return ret; 2061 } 2062 2063 flow_req.valid_params = 2064 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID | 2065 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID | 2066 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID | 2067 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID | 2068 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID | 2069 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID | 2070 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID | 2071 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID | 2072 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID | 2073 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID | 2074 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID | 2075 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID | 2076 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID; 2077 2078 flow_req.nav_id = tisci_rm->tisci_dev_id; 2079 flow_req.flow_index = rchan->id; 2080 2081 if (uc->config.needs_epib) 2082 flow_req.rx_einfo_present = 1; 2083 else 2084 flow_req.rx_einfo_present = 0; 2085 if (uc->config.psd_size) 2086 flow_req.rx_psinfo_present = 1; 2087 else 2088 flow_req.rx_psinfo_present = 0; 2089 flow_req.rx_error_handling = 1; 2090 flow_req.rx_dest_qnum = rx_ring; 2091 flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE; 2092 flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG; 2093 flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI; 2094 flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO; 2095 flow_req.rx_fdq0_sz0_qnum = fd_ring; 2096 flow_req.rx_fdq1_qnum = fd_ring; 2097 flow_req.rx_fdq2_qnum = fd_ring; 2098 flow_req.rx_fdq3_qnum = fd_ring; 2099 2100 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req); 2101 2102 if (ret) 2103 dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret); 2104 2105 return 0; 2106 } 2107 2108 static int bcdma_tisci_rx_channel_config(struct udma_chan *uc) 2109 { 2110 struct udma_dev *ud = uc->ud; 2111 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 2112 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 2113 struct udma_rchan *rchan = uc->rchan; 2114 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; 2115 int ret; 2116 2117 req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS; 2118 req_rx.nav_id = tisci_rm->tisci_dev_id; 2119 req_rx.index = rchan->id; 2120 2121 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); 2122 if (ret) 2123 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret); 2124 2125 return ret; 2126 } 2127 2128 static int pktdma_tisci_rx_channel_config(struct udma_chan *uc) 2129 { 2130 struct udma_dev *ud = uc->ud; 2131 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 2132 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 2133 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; 2134 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 }; 2135 int ret; 2136 2137 req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS; 2138 req_rx.nav_id = tisci_rm->tisci_dev_id; 2139 req_rx.index = uc->rchan->id; 2140 2141 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); 2142 if (ret) { 2143 dev_err(ud->dev, "rchan%d cfg failed %d\n", uc->rchan->id, ret); 2144 return ret; 2145 } 2146 2147 flow_req.valid_params = 2148 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID | 2149 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID | 2150 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID; 2151 2152 flow_req.nav_id = tisci_rm->tisci_dev_id; 2153 flow_req.flow_index = uc->rflow->id; 2154 2155 if (uc->config.needs_epib) 2156 flow_req.rx_einfo_present = 1; 2157 else 2158 flow_req.rx_einfo_present = 0; 2159 if (uc->config.psd_size) 2160 flow_req.rx_psinfo_present = 1; 2161 else 2162 flow_req.rx_psinfo_present = 0; 2163 flow_req.rx_error_handling = 1; 2164 2165 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req); 2166 2167 if (ret) 2168 dev_err(ud->dev, "flow%d config failed: %d\n", uc->rflow->id, 2169 ret); 2170 2171 return ret; 2172 } 2173 2174 static int udma_alloc_chan_resources(struct dma_chan *chan) 2175 { 2176 struct udma_chan *uc = to_udma_chan(chan); 2177 struct udma_dev *ud = to_udma_dev(chan->device); 2178 const struct udma_soc_data *soc_data = ud->soc_data; 2179 struct k3_ring *irq_ring; 2180 u32 irq_udma_idx; 2181 int ret; 2182 2183 uc->dma_dev = ud->dev; 2184 2185 if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) { 2186 uc->use_dma_pool = true; 2187 /* in case of MEM_TO_MEM we have maximum of two TRs */ 2188 if (uc->config.dir == DMA_MEM_TO_MEM) { 2189 uc->config.hdesc_size = cppi5_trdesc_calc_size( 2190 sizeof(struct cppi5_tr_type15_t), 2); 2191 uc->config.pkt_mode = false; 2192 } 2193 } 2194 2195 if (uc->use_dma_pool) { 2196 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev, 2197 uc->config.hdesc_size, 2198 ud->desc_align, 2199 0); 2200 if (!uc->hdesc_pool) { 2201 dev_err(ud->ddev.dev, 2202 "Descriptor pool allocation failed\n"); 2203 uc->use_dma_pool = false; 2204 ret = -ENOMEM; 2205 goto err_cleanup; 2206 } 2207 } 2208 2209 /* 2210 * Make sure that the completion is in a known state: 2211 * No teardown, the channel is idle 2212 */ 2213 reinit_completion(&uc->teardown_completed); 2214 complete_all(&uc->teardown_completed); 2215 uc->state = UDMA_CHAN_IS_IDLE; 2216 2217 switch (uc->config.dir) { 2218 case DMA_MEM_TO_MEM: 2219 /* Non synchronized - mem to mem type of transfer */ 2220 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__, 2221 uc->id); 2222 2223 ret = udma_get_chan_pair(uc); 2224 if (ret) 2225 goto err_cleanup; 2226 2227 ret = udma_alloc_tx_resources(uc); 2228 if (ret) { 2229 udma_put_rchan(uc); 2230 goto err_cleanup; 2231 } 2232 2233 ret = udma_alloc_rx_resources(uc); 2234 if (ret) { 2235 udma_free_tx_resources(uc); 2236 goto err_cleanup; 2237 } 2238 2239 uc->config.src_thread = ud->psil_base + uc->tchan->id; 2240 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 2241 K3_PSIL_DST_THREAD_ID_OFFSET; 2242 2243 irq_ring = uc->tchan->tc_ring; 2244 irq_udma_idx = uc->tchan->id; 2245 2246 ret = udma_tisci_m2m_channel_config(uc); 2247 break; 2248 case DMA_MEM_TO_DEV: 2249 /* Slave transfer synchronized - mem to dev (TX) trasnfer */ 2250 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__, 2251 uc->id); 2252 2253 ret = udma_alloc_tx_resources(uc); 2254 if (ret) 2255 goto err_cleanup; 2256 2257 uc->config.src_thread = ud->psil_base + uc->tchan->id; 2258 uc->config.dst_thread = uc->config.remote_thread_id; 2259 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; 2260 2261 irq_ring = uc->tchan->tc_ring; 2262 irq_udma_idx = uc->tchan->id; 2263 2264 ret = udma_tisci_tx_channel_config(uc); 2265 break; 2266 case DMA_DEV_TO_MEM: 2267 /* Slave transfer synchronized - dev to mem (RX) trasnfer */ 2268 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__, 2269 uc->id); 2270 2271 ret = udma_alloc_rx_resources(uc); 2272 if (ret) 2273 goto err_cleanup; 2274 2275 uc->config.src_thread = uc->config.remote_thread_id; 2276 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 2277 K3_PSIL_DST_THREAD_ID_OFFSET; 2278 2279 irq_ring = uc->rflow->r_ring; 2280 irq_udma_idx = soc_data->oes.udma_rchan + uc->rchan->id; 2281 2282 ret = udma_tisci_rx_channel_config(uc); 2283 break; 2284 default: 2285 /* Can not happen */ 2286 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", 2287 __func__, uc->id, uc->config.dir); 2288 ret = -EINVAL; 2289 goto err_cleanup; 2290 2291 } 2292 2293 /* check if the channel configuration was successful */ 2294 if (ret) 2295 goto err_res_free; 2296 2297 if (udma_is_chan_running(uc)) { 2298 dev_warn(ud->dev, "chan%d: is running!\n", uc->id); 2299 udma_reset_chan(uc, false); 2300 if (udma_is_chan_running(uc)) { 2301 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); 2302 ret = -EBUSY; 2303 goto err_res_free; 2304 } 2305 } 2306 2307 /* PSI-L pairing */ 2308 ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread); 2309 if (ret) { 2310 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n", 2311 uc->config.src_thread, uc->config.dst_thread); 2312 goto err_res_free; 2313 } 2314 2315 uc->psil_paired = true; 2316 2317 uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring); 2318 if (uc->irq_num_ring <= 0) { 2319 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n", 2320 k3_ringacc_get_ring_id(irq_ring)); 2321 ret = -EINVAL; 2322 goto err_psi_free; 2323 } 2324 2325 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler, 2326 IRQF_TRIGGER_HIGH, uc->name, uc); 2327 if (ret) { 2328 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id); 2329 goto err_irq_free; 2330 } 2331 2332 /* Event from UDMA (TR events) only needed for slave TR mode channels */ 2333 if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) { 2334 uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx); 2335 if (uc->irq_num_udma <= 0) { 2336 dev_err(ud->dev, "Failed to get udma irq (index: %u)\n", 2337 irq_udma_idx); 2338 free_irq(uc->irq_num_ring, uc); 2339 ret = -EINVAL; 2340 goto err_irq_free; 2341 } 2342 2343 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0, 2344 uc->name, uc); 2345 if (ret) { 2346 dev_err(ud->dev, "chan%d: UDMA irq request failed\n", 2347 uc->id); 2348 free_irq(uc->irq_num_ring, uc); 2349 goto err_irq_free; 2350 } 2351 } else { 2352 uc->irq_num_udma = 0; 2353 } 2354 2355 udma_reset_rings(uc); 2356 2357 return 0; 2358 2359 err_irq_free: 2360 uc->irq_num_ring = 0; 2361 uc->irq_num_udma = 0; 2362 err_psi_free: 2363 navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread); 2364 uc->psil_paired = false; 2365 err_res_free: 2366 udma_free_tx_resources(uc); 2367 udma_free_rx_resources(uc); 2368 err_cleanup: 2369 udma_reset_uchan(uc); 2370 2371 if (uc->use_dma_pool) { 2372 dma_pool_destroy(uc->hdesc_pool); 2373 uc->use_dma_pool = false; 2374 } 2375 2376 return ret; 2377 } 2378 2379 static int bcdma_alloc_chan_resources(struct dma_chan *chan) 2380 { 2381 struct udma_chan *uc = to_udma_chan(chan); 2382 struct udma_dev *ud = to_udma_dev(chan->device); 2383 const struct udma_oes_offsets *oes = &ud->soc_data->oes; 2384 u32 irq_udma_idx, irq_ring_idx; 2385 int ret; 2386 2387 /* Only TR mode is supported */ 2388 uc->config.pkt_mode = false; 2389 2390 /* 2391 * Make sure that the completion is in a known state: 2392 * No teardown, the channel is idle 2393 */ 2394 reinit_completion(&uc->teardown_completed); 2395 complete_all(&uc->teardown_completed); 2396 uc->state = UDMA_CHAN_IS_IDLE; 2397 2398 switch (uc->config.dir) { 2399 case DMA_MEM_TO_MEM: 2400 /* Non synchronized - mem to mem type of transfer */ 2401 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__, 2402 uc->id); 2403 2404 ret = bcdma_alloc_bchan_resources(uc); 2405 if (ret) 2406 return ret; 2407 2408 irq_ring_idx = uc->bchan->id + oes->bcdma_bchan_ring; 2409 irq_udma_idx = uc->bchan->id + oes->bcdma_bchan_data; 2410 2411 ret = bcdma_tisci_m2m_channel_config(uc); 2412 break; 2413 case DMA_MEM_TO_DEV: 2414 /* Slave transfer synchronized - mem to dev (TX) trasnfer */ 2415 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__, 2416 uc->id); 2417 2418 ret = udma_alloc_tx_resources(uc); 2419 if (ret) { 2420 uc->config.remote_thread_id = -1; 2421 return ret; 2422 } 2423 2424 uc->config.src_thread = ud->psil_base + uc->tchan->id; 2425 uc->config.dst_thread = uc->config.remote_thread_id; 2426 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; 2427 2428 irq_ring_idx = uc->tchan->id + oes->bcdma_tchan_ring; 2429 irq_udma_idx = uc->tchan->id + oes->bcdma_tchan_data; 2430 2431 ret = bcdma_tisci_tx_channel_config(uc); 2432 break; 2433 case DMA_DEV_TO_MEM: 2434 /* Slave transfer synchronized - dev to mem (RX) trasnfer */ 2435 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__, 2436 uc->id); 2437 2438 ret = udma_alloc_rx_resources(uc); 2439 if (ret) { 2440 uc->config.remote_thread_id = -1; 2441 return ret; 2442 } 2443 2444 uc->config.src_thread = uc->config.remote_thread_id; 2445 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 2446 K3_PSIL_DST_THREAD_ID_OFFSET; 2447 2448 irq_ring_idx = uc->rchan->id + oes->bcdma_rchan_ring; 2449 irq_udma_idx = uc->rchan->id + oes->bcdma_rchan_data; 2450 2451 ret = bcdma_tisci_rx_channel_config(uc); 2452 break; 2453 default: 2454 /* Can not happen */ 2455 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", 2456 __func__, uc->id, uc->config.dir); 2457 return -EINVAL; 2458 } 2459 2460 /* check if the channel configuration was successful */ 2461 if (ret) 2462 goto err_res_free; 2463 2464 if (udma_is_chan_running(uc)) { 2465 dev_warn(ud->dev, "chan%d: is running!\n", uc->id); 2466 udma_reset_chan(uc, false); 2467 if (udma_is_chan_running(uc)) { 2468 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); 2469 ret = -EBUSY; 2470 goto err_res_free; 2471 } 2472 } 2473 2474 uc->dma_dev = dmaengine_get_dma_device(chan); 2475 if (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type) { 2476 uc->config.hdesc_size = cppi5_trdesc_calc_size( 2477 sizeof(struct cppi5_tr_type15_t), 2); 2478 2479 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev, 2480 uc->config.hdesc_size, 2481 ud->desc_align, 2482 0); 2483 if (!uc->hdesc_pool) { 2484 dev_err(ud->ddev.dev, 2485 "Descriptor pool allocation failed\n"); 2486 uc->use_dma_pool = false; 2487 ret = -ENOMEM; 2488 goto err_res_free; 2489 } 2490 2491 uc->use_dma_pool = true; 2492 } else if (uc->config.dir != DMA_MEM_TO_MEM) { 2493 /* PSI-L pairing */ 2494 ret = navss_psil_pair(ud, uc->config.src_thread, 2495 uc->config.dst_thread); 2496 if (ret) { 2497 dev_err(ud->dev, 2498 "PSI-L pairing failed: 0x%04x -> 0x%04x\n", 2499 uc->config.src_thread, uc->config.dst_thread); 2500 goto err_res_free; 2501 } 2502 2503 uc->psil_paired = true; 2504 } 2505 2506 uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx); 2507 if (uc->irq_num_ring <= 0) { 2508 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n", 2509 irq_ring_idx); 2510 ret = -EINVAL; 2511 goto err_psi_free; 2512 } 2513 2514 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler, 2515 IRQF_TRIGGER_HIGH, uc->name, uc); 2516 if (ret) { 2517 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id); 2518 goto err_irq_free; 2519 } 2520 2521 /* Event from BCDMA (TR events) only needed for slave channels */ 2522 if (is_slave_direction(uc->config.dir)) { 2523 uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx); 2524 if (uc->irq_num_udma <= 0) { 2525 dev_err(ud->dev, "Failed to get bcdma irq (index: %u)\n", 2526 irq_udma_idx); 2527 free_irq(uc->irq_num_ring, uc); 2528 ret = -EINVAL; 2529 goto err_irq_free; 2530 } 2531 2532 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0, 2533 uc->name, uc); 2534 if (ret) { 2535 dev_err(ud->dev, "chan%d: BCDMA irq request failed\n", 2536 uc->id); 2537 free_irq(uc->irq_num_ring, uc); 2538 goto err_irq_free; 2539 } 2540 } else { 2541 uc->irq_num_udma = 0; 2542 } 2543 2544 udma_reset_rings(uc); 2545 2546 INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work, 2547 udma_check_tx_completion); 2548 return 0; 2549 2550 err_irq_free: 2551 uc->irq_num_ring = 0; 2552 uc->irq_num_udma = 0; 2553 err_psi_free: 2554 if (uc->psil_paired) 2555 navss_psil_unpair(ud, uc->config.src_thread, 2556 uc->config.dst_thread); 2557 uc->psil_paired = false; 2558 err_res_free: 2559 bcdma_free_bchan_resources(uc); 2560 udma_free_tx_resources(uc); 2561 udma_free_rx_resources(uc); 2562 2563 udma_reset_uchan(uc); 2564 2565 if (uc->use_dma_pool) { 2566 dma_pool_destroy(uc->hdesc_pool); 2567 uc->use_dma_pool = false; 2568 } 2569 2570 return ret; 2571 } 2572 2573 static int bcdma_router_config(struct dma_chan *chan) 2574 { 2575 struct k3_event_route_data *router_data = chan->route_data; 2576 struct udma_chan *uc = to_udma_chan(chan); 2577 u32 trigger_event; 2578 2579 if (!uc->bchan) 2580 return -EINVAL; 2581 2582 if (uc->config.tr_trigger_type != 1 && uc->config.tr_trigger_type != 2) 2583 return -EINVAL; 2584 2585 trigger_event = uc->ud->soc_data->bcdma_trigger_event_offset; 2586 trigger_event += (uc->bchan->id * 2) + uc->config.tr_trigger_type - 1; 2587 2588 return router_data->set_event(router_data->priv, trigger_event); 2589 } 2590 2591 static int pktdma_alloc_chan_resources(struct dma_chan *chan) 2592 { 2593 struct udma_chan *uc = to_udma_chan(chan); 2594 struct udma_dev *ud = to_udma_dev(chan->device); 2595 const struct udma_oes_offsets *oes = &ud->soc_data->oes; 2596 u32 irq_ring_idx; 2597 int ret; 2598 2599 /* 2600 * Make sure that the completion is in a known state: 2601 * No teardown, the channel is idle 2602 */ 2603 reinit_completion(&uc->teardown_completed); 2604 complete_all(&uc->teardown_completed); 2605 uc->state = UDMA_CHAN_IS_IDLE; 2606 2607 switch (uc->config.dir) { 2608 case DMA_MEM_TO_DEV: 2609 /* Slave transfer synchronized - mem to dev (TX) trasnfer */ 2610 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__, 2611 uc->id); 2612 2613 ret = udma_alloc_tx_resources(uc); 2614 if (ret) { 2615 uc->config.remote_thread_id = -1; 2616 return ret; 2617 } 2618 2619 uc->config.src_thread = ud->psil_base + uc->tchan->id; 2620 uc->config.dst_thread = uc->config.remote_thread_id; 2621 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; 2622 2623 irq_ring_idx = uc->tchan->tflow_id + oes->pktdma_tchan_flow; 2624 2625 ret = pktdma_tisci_tx_channel_config(uc); 2626 break; 2627 case DMA_DEV_TO_MEM: 2628 /* Slave transfer synchronized - dev to mem (RX) trasnfer */ 2629 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__, 2630 uc->id); 2631 2632 ret = udma_alloc_rx_resources(uc); 2633 if (ret) { 2634 uc->config.remote_thread_id = -1; 2635 return ret; 2636 } 2637 2638 uc->config.src_thread = uc->config.remote_thread_id; 2639 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 2640 K3_PSIL_DST_THREAD_ID_OFFSET; 2641 2642 irq_ring_idx = uc->rflow->id + oes->pktdma_rchan_flow; 2643 2644 ret = pktdma_tisci_rx_channel_config(uc); 2645 break; 2646 default: 2647 /* Can not happen */ 2648 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", 2649 __func__, uc->id, uc->config.dir); 2650 return -EINVAL; 2651 } 2652 2653 /* check if the channel configuration was successful */ 2654 if (ret) 2655 goto err_res_free; 2656 2657 if (udma_is_chan_running(uc)) { 2658 dev_warn(ud->dev, "chan%d: is running!\n", uc->id); 2659 udma_reset_chan(uc, false); 2660 if (udma_is_chan_running(uc)) { 2661 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); 2662 ret = -EBUSY; 2663 goto err_res_free; 2664 } 2665 } 2666 2667 uc->dma_dev = dmaengine_get_dma_device(chan); 2668 uc->hdesc_pool = dma_pool_create(uc->name, uc->dma_dev, 2669 uc->config.hdesc_size, ud->desc_align, 2670 0); 2671 if (!uc->hdesc_pool) { 2672 dev_err(ud->ddev.dev, 2673 "Descriptor pool allocation failed\n"); 2674 uc->use_dma_pool = false; 2675 ret = -ENOMEM; 2676 goto err_res_free; 2677 } 2678 2679 uc->use_dma_pool = true; 2680 2681 /* PSI-L pairing */ 2682 ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread); 2683 if (ret) { 2684 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n", 2685 uc->config.src_thread, uc->config.dst_thread); 2686 goto err_res_free; 2687 } 2688 2689 uc->psil_paired = true; 2690 2691 uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx); 2692 if (uc->irq_num_ring <= 0) { 2693 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n", 2694 irq_ring_idx); 2695 ret = -EINVAL; 2696 goto err_psi_free; 2697 } 2698 2699 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler, 2700 IRQF_TRIGGER_HIGH, uc->name, uc); 2701 if (ret) { 2702 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id); 2703 goto err_irq_free; 2704 } 2705 2706 uc->irq_num_udma = 0; 2707 2708 udma_reset_rings(uc); 2709 2710 INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work, 2711 udma_check_tx_completion); 2712 2713 if (uc->tchan) 2714 dev_dbg(ud->dev, 2715 "chan%d: tchan%d, tflow%d, Remote thread: 0x%04x\n", 2716 uc->id, uc->tchan->id, uc->tchan->tflow_id, 2717 uc->config.remote_thread_id); 2718 else if (uc->rchan) 2719 dev_dbg(ud->dev, 2720 "chan%d: rchan%d, rflow%d, Remote thread: 0x%04x\n", 2721 uc->id, uc->rchan->id, uc->rflow->id, 2722 uc->config.remote_thread_id); 2723 return 0; 2724 2725 err_irq_free: 2726 uc->irq_num_ring = 0; 2727 err_psi_free: 2728 navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread); 2729 uc->psil_paired = false; 2730 err_res_free: 2731 udma_free_tx_resources(uc); 2732 udma_free_rx_resources(uc); 2733 2734 udma_reset_uchan(uc); 2735 2736 dma_pool_destroy(uc->hdesc_pool); 2737 uc->use_dma_pool = false; 2738 2739 return ret; 2740 } 2741 2742 static int udma_slave_config(struct dma_chan *chan, 2743 struct dma_slave_config *cfg) 2744 { 2745 struct udma_chan *uc = to_udma_chan(chan); 2746 2747 memcpy(&uc->cfg, cfg, sizeof(uc->cfg)); 2748 2749 return 0; 2750 } 2751 2752 static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc, 2753 size_t tr_size, int tr_count, 2754 enum dma_transfer_direction dir) 2755 { 2756 struct udma_hwdesc *hwdesc; 2757 struct cppi5_desc_hdr_t *tr_desc; 2758 struct udma_desc *d; 2759 u32 reload_count = 0; 2760 u32 ring_id; 2761 2762 switch (tr_size) { 2763 case 16: 2764 case 32: 2765 case 64: 2766 case 128: 2767 break; 2768 default: 2769 dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size); 2770 return NULL; 2771 } 2772 2773 /* We have only one descriptor containing multiple TRs */ 2774 d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT); 2775 if (!d) 2776 return NULL; 2777 2778 d->sglen = tr_count; 2779 2780 d->hwdesc_count = 1; 2781 hwdesc = &d->hwdesc[0]; 2782 2783 /* Allocate memory for DMA ring descriptor */ 2784 if (uc->use_dma_pool) { 2785 hwdesc->cppi5_desc_size = uc->config.hdesc_size; 2786 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, 2787 GFP_NOWAIT, 2788 &hwdesc->cppi5_desc_paddr); 2789 } else { 2790 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 2791 tr_count); 2792 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size, 2793 uc->ud->desc_align); 2794 hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev, 2795 hwdesc->cppi5_desc_size, 2796 &hwdesc->cppi5_desc_paddr, 2797 GFP_NOWAIT); 2798 } 2799 2800 if (!hwdesc->cppi5_desc_vaddr) { 2801 kfree(d); 2802 return NULL; 2803 } 2804 2805 /* Start of the TR req records */ 2806 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size; 2807 /* Start address of the TR response array */ 2808 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count; 2809 2810 tr_desc = hwdesc->cppi5_desc_vaddr; 2811 2812 if (uc->cyclic) 2813 reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE; 2814 2815 if (dir == DMA_DEV_TO_MEM) 2816 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); 2817 else 2818 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); 2819 2820 cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count); 2821 cppi5_desc_set_pktids(tr_desc, uc->id, 2822 CPPI5_INFO1_DESC_FLOWID_DEFAULT); 2823 cppi5_desc_set_retpolicy(tr_desc, 0, ring_id); 2824 2825 return d; 2826 } 2827 2828 /** 2829 * udma_get_tr_counters - calculate TR counters for a given length 2830 * @len: Length of the trasnfer 2831 * @align_to: Preferred alignment 2832 * @tr0_cnt0: First TR icnt0 2833 * @tr0_cnt1: First TR icnt1 2834 * @tr1_cnt0: Second (if used) TR icnt0 2835 * 2836 * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated 2837 * For len >= SZ_64K two TRs are used in a simple way: 2838 * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1) 2839 * Second TR: the remaining length (tr1_cnt0) 2840 * 2841 * Returns the number of TRs the length needs (1 or 2) 2842 * -EINVAL if the length can not be supported 2843 */ 2844 static int udma_get_tr_counters(size_t len, unsigned long align_to, 2845 u16 *tr0_cnt0, u16 *tr0_cnt1, u16 *tr1_cnt0) 2846 { 2847 if (len < SZ_64K) { 2848 *tr0_cnt0 = len; 2849 *tr0_cnt1 = 1; 2850 2851 return 1; 2852 } 2853 2854 if (align_to > 3) 2855 align_to = 3; 2856 2857 realign: 2858 *tr0_cnt0 = SZ_64K - BIT(align_to); 2859 if (len / *tr0_cnt0 >= SZ_64K) { 2860 if (align_to) { 2861 align_to--; 2862 goto realign; 2863 } 2864 return -EINVAL; 2865 } 2866 2867 *tr0_cnt1 = len / *tr0_cnt0; 2868 *tr1_cnt0 = len % *tr0_cnt0; 2869 2870 return 2; 2871 } 2872 2873 static struct udma_desc * 2874 udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl, 2875 unsigned int sglen, enum dma_transfer_direction dir, 2876 unsigned long tx_flags, void *context) 2877 { 2878 struct scatterlist *sgent; 2879 struct udma_desc *d; 2880 struct cppi5_tr_type1_t *tr_req = NULL; 2881 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; 2882 unsigned int i; 2883 size_t tr_size; 2884 int num_tr = 0; 2885 int tr_idx = 0; 2886 u64 asel; 2887 2888 /* estimate the number of TRs we will need */ 2889 for_each_sg(sgl, sgent, sglen, i) { 2890 if (sg_dma_len(sgent) < SZ_64K) 2891 num_tr++; 2892 else 2893 num_tr += 2; 2894 } 2895 2896 /* Now allocate and setup the descriptor. */ 2897 tr_size = sizeof(struct cppi5_tr_type1_t); 2898 d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir); 2899 if (!d) 2900 return NULL; 2901 2902 d->sglen = sglen; 2903 2904 if (uc->ud->match_data->type == DMA_TYPE_UDMA) 2905 asel = 0; 2906 else 2907 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT; 2908 2909 tr_req = d->hwdesc[0].tr_req_base; 2910 for_each_sg(sgl, sgent, sglen, i) { 2911 dma_addr_t sg_addr = sg_dma_address(sgent); 2912 2913 num_tr = udma_get_tr_counters(sg_dma_len(sgent), __ffs(sg_addr), 2914 &tr0_cnt0, &tr0_cnt1, &tr1_cnt0); 2915 if (num_tr < 0) { 2916 dev_err(uc->ud->dev, "size %u is not supported\n", 2917 sg_dma_len(sgent)); 2918 udma_free_hwdesc(uc, d); 2919 kfree(d); 2920 return NULL; 2921 } 2922 2923 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false, 2924 false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 2925 cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT); 2926 2927 sg_addr |= asel; 2928 tr_req[tr_idx].addr = sg_addr; 2929 tr_req[tr_idx].icnt0 = tr0_cnt0; 2930 tr_req[tr_idx].icnt1 = tr0_cnt1; 2931 tr_req[tr_idx].dim1 = tr0_cnt0; 2932 tr_idx++; 2933 2934 if (num_tr == 2) { 2935 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, 2936 false, false, 2937 CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 2938 cppi5_tr_csf_set(&tr_req[tr_idx].flags, 2939 CPPI5_TR_CSF_SUPR_EVT); 2940 2941 tr_req[tr_idx].addr = sg_addr + tr0_cnt1 * tr0_cnt0; 2942 tr_req[tr_idx].icnt0 = tr1_cnt0; 2943 tr_req[tr_idx].icnt1 = 1; 2944 tr_req[tr_idx].dim1 = tr1_cnt0; 2945 tr_idx++; 2946 } 2947 2948 d->residue += sg_dma_len(sgent); 2949 } 2950 2951 cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, 2952 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP); 2953 2954 return d; 2955 } 2956 2957 static struct udma_desc * 2958 udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl, 2959 unsigned int sglen, 2960 enum dma_transfer_direction dir, 2961 unsigned long tx_flags, void *context) 2962 { 2963 struct scatterlist *sgent; 2964 struct cppi5_tr_type15_t *tr_req = NULL; 2965 enum dma_slave_buswidth dev_width; 2966 u16 tr_cnt0, tr_cnt1; 2967 dma_addr_t dev_addr; 2968 struct udma_desc *d; 2969 unsigned int i; 2970 size_t tr_size, sg_len; 2971 int num_tr = 0; 2972 int tr_idx = 0; 2973 u32 burst, trigger_size, port_window; 2974 u64 asel; 2975 2976 if (dir == DMA_DEV_TO_MEM) { 2977 dev_addr = uc->cfg.src_addr; 2978 dev_width = uc->cfg.src_addr_width; 2979 burst = uc->cfg.src_maxburst; 2980 port_window = uc->cfg.src_port_window_size; 2981 } else if (dir == DMA_MEM_TO_DEV) { 2982 dev_addr = uc->cfg.dst_addr; 2983 dev_width = uc->cfg.dst_addr_width; 2984 burst = uc->cfg.dst_maxburst; 2985 port_window = uc->cfg.dst_port_window_size; 2986 } else { 2987 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__); 2988 return NULL; 2989 } 2990 2991 if (!burst) 2992 burst = 1; 2993 2994 if (port_window) { 2995 if (port_window != burst) { 2996 dev_err(uc->ud->dev, 2997 "The burst must be equal to port_window\n"); 2998 return NULL; 2999 } 3000 3001 tr_cnt0 = dev_width * port_window; 3002 tr_cnt1 = 1; 3003 } else { 3004 tr_cnt0 = dev_width; 3005 tr_cnt1 = burst; 3006 } 3007 trigger_size = tr_cnt0 * tr_cnt1; 3008 3009 /* estimate the number of TRs we will need */ 3010 for_each_sg(sgl, sgent, sglen, i) { 3011 sg_len = sg_dma_len(sgent); 3012 3013 if (sg_len % trigger_size) { 3014 dev_err(uc->ud->dev, 3015 "Not aligned SG entry (%zu for %u)\n", sg_len, 3016 trigger_size); 3017 return NULL; 3018 } 3019 3020 if (sg_len / trigger_size < SZ_64K) 3021 num_tr++; 3022 else 3023 num_tr += 2; 3024 } 3025 3026 /* Now allocate and setup the descriptor. */ 3027 tr_size = sizeof(struct cppi5_tr_type15_t); 3028 d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir); 3029 if (!d) 3030 return NULL; 3031 3032 d->sglen = sglen; 3033 3034 if (uc->ud->match_data->type == DMA_TYPE_UDMA) { 3035 asel = 0; 3036 } else { 3037 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT; 3038 dev_addr |= asel; 3039 } 3040 3041 tr_req = d->hwdesc[0].tr_req_base; 3042 for_each_sg(sgl, sgent, sglen, i) { 3043 u16 tr0_cnt2, tr0_cnt3, tr1_cnt2; 3044 dma_addr_t sg_addr = sg_dma_address(sgent); 3045 3046 sg_len = sg_dma_len(sgent); 3047 num_tr = udma_get_tr_counters(sg_len / trigger_size, 0, 3048 &tr0_cnt2, &tr0_cnt3, &tr1_cnt2); 3049 if (num_tr < 0) { 3050 dev_err(uc->ud->dev, "size %zu is not supported\n", 3051 sg_len); 3052 udma_free_hwdesc(uc, d); 3053 kfree(d); 3054 return NULL; 3055 } 3056 3057 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15, false, 3058 true, CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 3059 cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT); 3060 cppi5_tr_set_trigger(&tr_req[tr_idx].flags, 3061 uc->config.tr_trigger_type, 3062 CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC, 0, 0); 3063 3064 sg_addr |= asel; 3065 if (dir == DMA_DEV_TO_MEM) { 3066 tr_req[tr_idx].addr = dev_addr; 3067 tr_req[tr_idx].icnt0 = tr_cnt0; 3068 tr_req[tr_idx].icnt1 = tr_cnt1; 3069 tr_req[tr_idx].icnt2 = tr0_cnt2; 3070 tr_req[tr_idx].icnt3 = tr0_cnt3; 3071 tr_req[tr_idx].dim1 = (-1) * tr_cnt0; 3072 3073 tr_req[tr_idx].daddr = sg_addr; 3074 tr_req[tr_idx].dicnt0 = tr_cnt0; 3075 tr_req[tr_idx].dicnt1 = tr_cnt1; 3076 tr_req[tr_idx].dicnt2 = tr0_cnt2; 3077 tr_req[tr_idx].dicnt3 = tr0_cnt3; 3078 tr_req[tr_idx].ddim1 = tr_cnt0; 3079 tr_req[tr_idx].ddim2 = trigger_size; 3080 tr_req[tr_idx].ddim3 = trigger_size * tr0_cnt2; 3081 } else { 3082 tr_req[tr_idx].addr = sg_addr; 3083 tr_req[tr_idx].icnt0 = tr_cnt0; 3084 tr_req[tr_idx].icnt1 = tr_cnt1; 3085 tr_req[tr_idx].icnt2 = tr0_cnt2; 3086 tr_req[tr_idx].icnt3 = tr0_cnt3; 3087 tr_req[tr_idx].dim1 = tr_cnt0; 3088 tr_req[tr_idx].dim2 = trigger_size; 3089 tr_req[tr_idx].dim3 = trigger_size * tr0_cnt2; 3090 3091 tr_req[tr_idx].daddr = dev_addr; 3092 tr_req[tr_idx].dicnt0 = tr_cnt0; 3093 tr_req[tr_idx].dicnt1 = tr_cnt1; 3094 tr_req[tr_idx].dicnt2 = tr0_cnt2; 3095 tr_req[tr_idx].dicnt3 = tr0_cnt3; 3096 tr_req[tr_idx].ddim1 = (-1) * tr_cnt0; 3097 } 3098 3099 tr_idx++; 3100 3101 if (num_tr == 2) { 3102 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15, 3103 false, true, 3104 CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 3105 cppi5_tr_csf_set(&tr_req[tr_idx].flags, 3106 CPPI5_TR_CSF_SUPR_EVT); 3107 cppi5_tr_set_trigger(&tr_req[tr_idx].flags, 3108 uc->config.tr_trigger_type, 3109 CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC, 3110 0, 0); 3111 3112 sg_addr += trigger_size * tr0_cnt2 * tr0_cnt3; 3113 if (dir == DMA_DEV_TO_MEM) { 3114 tr_req[tr_idx].addr = dev_addr; 3115 tr_req[tr_idx].icnt0 = tr_cnt0; 3116 tr_req[tr_idx].icnt1 = tr_cnt1; 3117 tr_req[tr_idx].icnt2 = tr1_cnt2; 3118 tr_req[tr_idx].icnt3 = 1; 3119 tr_req[tr_idx].dim1 = (-1) * tr_cnt0; 3120 3121 tr_req[tr_idx].daddr = sg_addr; 3122 tr_req[tr_idx].dicnt0 = tr_cnt0; 3123 tr_req[tr_idx].dicnt1 = tr_cnt1; 3124 tr_req[tr_idx].dicnt2 = tr1_cnt2; 3125 tr_req[tr_idx].dicnt3 = 1; 3126 tr_req[tr_idx].ddim1 = tr_cnt0; 3127 tr_req[tr_idx].ddim2 = trigger_size; 3128 } else { 3129 tr_req[tr_idx].addr = sg_addr; 3130 tr_req[tr_idx].icnt0 = tr_cnt0; 3131 tr_req[tr_idx].icnt1 = tr_cnt1; 3132 tr_req[tr_idx].icnt2 = tr1_cnt2; 3133 tr_req[tr_idx].icnt3 = 1; 3134 tr_req[tr_idx].dim1 = tr_cnt0; 3135 tr_req[tr_idx].dim2 = trigger_size; 3136 3137 tr_req[tr_idx].daddr = dev_addr; 3138 tr_req[tr_idx].dicnt0 = tr_cnt0; 3139 tr_req[tr_idx].dicnt1 = tr_cnt1; 3140 tr_req[tr_idx].dicnt2 = tr1_cnt2; 3141 tr_req[tr_idx].dicnt3 = 1; 3142 tr_req[tr_idx].ddim1 = (-1) * tr_cnt0; 3143 } 3144 tr_idx++; 3145 } 3146 3147 d->residue += sg_len; 3148 } 3149 3150 cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, 3151 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP); 3152 3153 return d; 3154 } 3155 3156 static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d, 3157 enum dma_slave_buswidth dev_width, 3158 u16 elcnt) 3159 { 3160 if (uc->config.ep_type != PSIL_EP_PDMA_XY) 3161 return 0; 3162 3163 /* Bus width translates to the element size (ES) */ 3164 switch (dev_width) { 3165 case DMA_SLAVE_BUSWIDTH_1_BYTE: 3166 d->static_tr.elsize = 0; 3167 break; 3168 case DMA_SLAVE_BUSWIDTH_2_BYTES: 3169 d->static_tr.elsize = 1; 3170 break; 3171 case DMA_SLAVE_BUSWIDTH_3_BYTES: 3172 d->static_tr.elsize = 2; 3173 break; 3174 case DMA_SLAVE_BUSWIDTH_4_BYTES: 3175 d->static_tr.elsize = 3; 3176 break; 3177 case DMA_SLAVE_BUSWIDTH_8_BYTES: 3178 d->static_tr.elsize = 4; 3179 break; 3180 default: /* not reached */ 3181 return -EINVAL; 3182 } 3183 3184 d->static_tr.elcnt = elcnt; 3185 3186 /* 3187 * PDMA must to close the packet when the channel is in packet mode. 3188 * For TR mode when the channel is not cyclic we also need PDMA to close 3189 * the packet otherwise the transfer will stall because PDMA holds on 3190 * the data it has received from the peripheral. 3191 */ 3192 if (uc->config.pkt_mode || !uc->cyclic) { 3193 unsigned int div = dev_width * elcnt; 3194 3195 if (uc->cyclic) 3196 d->static_tr.bstcnt = d->residue / d->sglen / div; 3197 else 3198 d->static_tr.bstcnt = d->residue / div; 3199 3200 if (uc->config.dir == DMA_DEV_TO_MEM && 3201 d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask) 3202 return -EINVAL; 3203 } else { 3204 d->static_tr.bstcnt = 0; 3205 } 3206 3207 return 0; 3208 } 3209 3210 static struct udma_desc * 3211 udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl, 3212 unsigned int sglen, enum dma_transfer_direction dir, 3213 unsigned long tx_flags, void *context) 3214 { 3215 struct scatterlist *sgent; 3216 struct cppi5_host_desc_t *h_desc = NULL; 3217 struct udma_desc *d; 3218 u32 ring_id; 3219 unsigned int i; 3220 u64 asel; 3221 3222 d = kzalloc(struct_size(d, hwdesc, sglen), GFP_NOWAIT); 3223 if (!d) 3224 return NULL; 3225 3226 d->sglen = sglen; 3227 d->hwdesc_count = sglen; 3228 3229 if (dir == DMA_DEV_TO_MEM) 3230 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); 3231 else 3232 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); 3233 3234 if (uc->ud->match_data->type == DMA_TYPE_UDMA) 3235 asel = 0; 3236 else 3237 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT; 3238 3239 for_each_sg(sgl, sgent, sglen, i) { 3240 struct udma_hwdesc *hwdesc = &d->hwdesc[i]; 3241 dma_addr_t sg_addr = sg_dma_address(sgent); 3242 struct cppi5_host_desc_t *desc; 3243 size_t sg_len = sg_dma_len(sgent); 3244 3245 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, 3246 GFP_NOWAIT, 3247 &hwdesc->cppi5_desc_paddr); 3248 if (!hwdesc->cppi5_desc_vaddr) { 3249 dev_err(uc->ud->dev, 3250 "descriptor%d allocation failed\n", i); 3251 3252 udma_free_hwdesc(uc, d); 3253 kfree(d); 3254 return NULL; 3255 } 3256 3257 d->residue += sg_len; 3258 hwdesc->cppi5_desc_size = uc->config.hdesc_size; 3259 desc = hwdesc->cppi5_desc_vaddr; 3260 3261 if (i == 0) { 3262 cppi5_hdesc_init(desc, 0, 0); 3263 /* Flow and Packed ID */ 3264 cppi5_desc_set_pktids(&desc->hdr, uc->id, 3265 CPPI5_INFO1_DESC_FLOWID_DEFAULT); 3266 cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id); 3267 } else { 3268 cppi5_hdesc_reset_hbdesc(desc); 3269 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff); 3270 } 3271 3272 /* attach the sg buffer to the descriptor */ 3273 sg_addr |= asel; 3274 cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len); 3275 3276 /* Attach link as host buffer descriptor */ 3277 if (h_desc) 3278 cppi5_hdesc_link_hbdesc(h_desc, 3279 hwdesc->cppi5_desc_paddr | asel); 3280 3281 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA || 3282 dir == DMA_MEM_TO_DEV) 3283 h_desc = desc; 3284 } 3285 3286 if (d->residue >= SZ_4M) { 3287 dev_err(uc->ud->dev, 3288 "%s: Transfer size %u is over the supported 4M range\n", 3289 __func__, d->residue); 3290 udma_free_hwdesc(uc, d); 3291 kfree(d); 3292 return NULL; 3293 } 3294 3295 h_desc = d->hwdesc[0].cppi5_desc_vaddr; 3296 cppi5_hdesc_set_pktlen(h_desc, d->residue); 3297 3298 return d; 3299 } 3300 3301 static int udma_attach_metadata(struct dma_async_tx_descriptor *desc, 3302 void *data, size_t len) 3303 { 3304 struct udma_desc *d = to_udma_desc(desc); 3305 struct udma_chan *uc = to_udma_chan(desc->chan); 3306 struct cppi5_host_desc_t *h_desc; 3307 u32 psd_size = len; 3308 u32 flags = 0; 3309 3310 if (!uc->config.pkt_mode || !uc->config.metadata_size) 3311 return -ENOTSUPP; 3312 3313 if (!data || len > uc->config.metadata_size) 3314 return -EINVAL; 3315 3316 if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE) 3317 return -EINVAL; 3318 3319 h_desc = d->hwdesc[0].cppi5_desc_vaddr; 3320 if (d->dir == DMA_MEM_TO_DEV) 3321 memcpy(h_desc->epib, data, len); 3322 3323 if (uc->config.needs_epib) 3324 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE; 3325 3326 d->metadata = data; 3327 d->metadata_size = len; 3328 if (uc->config.needs_epib) 3329 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT; 3330 3331 cppi5_hdesc_update_flags(h_desc, flags); 3332 cppi5_hdesc_update_psdata_size(h_desc, psd_size); 3333 3334 return 0; 3335 } 3336 3337 static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc, 3338 size_t *payload_len, size_t *max_len) 3339 { 3340 struct udma_desc *d = to_udma_desc(desc); 3341 struct udma_chan *uc = to_udma_chan(desc->chan); 3342 struct cppi5_host_desc_t *h_desc; 3343 3344 if (!uc->config.pkt_mode || !uc->config.metadata_size) 3345 return ERR_PTR(-ENOTSUPP); 3346 3347 h_desc = d->hwdesc[0].cppi5_desc_vaddr; 3348 3349 *max_len = uc->config.metadata_size; 3350 3351 *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ? 3352 CPPI5_INFO0_HDESC_EPIB_SIZE : 0; 3353 *payload_len += cppi5_hdesc_get_psdata_size(h_desc); 3354 3355 return h_desc->epib; 3356 } 3357 3358 static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc, 3359 size_t payload_len) 3360 { 3361 struct udma_desc *d = to_udma_desc(desc); 3362 struct udma_chan *uc = to_udma_chan(desc->chan); 3363 struct cppi5_host_desc_t *h_desc; 3364 u32 psd_size = payload_len; 3365 u32 flags = 0; 3366 3367 if (!uc->config.pkt_mode || !uc->config.metadata_size) 3368 return -ENOTSUPP; 3369 3370 if (payload_len > uc->config.metadata_size) 3371 return -EINVAL; 3372 3373 if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE) 3374 return -EINVAL; 3375 3376 h_desc = d->hwdesc[0].cppi5_desc_vaddr; 3377 3378 if (uc->config.needs_epib) { 3379 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE; 3380 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT; 3381 } 3382 3383 cppi5_hdesc_update_flags(h_desc, flags); 3384 cppi5_hdesc_update_psdata_size(h_desc, psd_size); 3385 3386 return 0; 3387 } 3388 3389 static struct dma_descriptor_metadata_ops metadata_ops = { 3390 .attach = udma_attach_metadata, 3391 .get_ptr = udma_get_metadata_ptr, 3392 .set_len = udma_set_metadata_len, 3393 }; 3394 3395 static struct dma_async_tx_descriptor * 3396 udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 3397 unsigned int sglen, enum dma_transfer_direction dir, 3398 unsigned long tx_flags, void *context) 3399 { 3400 struct udma_chan *uc = to_udma_chan(chan); 3401 enum dma_slave_buswidth dev_width; 3402 struct udma_desc *d; 3403 u32 burst; 3404 3405 if (dir != uc->config.dir && 3406 (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type)) { 3407 dev_err(chan->device->dev, 3408 "%s: chan%d is for %s, not supporting %s\n", 3409 __func__, uc->id, 3410 dmaengine_get_direction_text(uc->config.dir), 3411 dmaengine_get_direction_text(dir)); 3412 return NULL; 3413 } 3414 3415 if (dir == DMA_DEV_TO_MEM) { 3416 dev_width = uc->cfg.src_addr_width; 3417 burst = uc->cfg.src_maxburst; 3418 } else if (dir == DMA_MEM_TO_DEV) { 3419 dev_width = uc->cfg.dst_addr_width; 3420 burst = uc->cfg.dst_maxburst; 3421 } else { 3422 dev_err(chan->device->dev, "%s: bad direction?\n", __func__); 3423 return NULL; 3424 } 3425 3426 if (!burst) 3427 burst = 1; 3428 3429 uc->config.tx_flags = tx_flags; 3430 3431 if (uc->config.pkt_mode) 3432 d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags, 3433 context); 3434 else if (is_slave_direction(uc->config.dir)) 3435 d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags, 3436 context); 3437 else 3438 d = udma_prep_slave_sg_triggered_tr(uc, sgl, sglen, dir, 3439 tx_flags, context); 3440 3441 if (!d) 3442 return NULL; 3443 3444 d->dir = dir; 3445 d->desc_idx = 0; 3446 d->tr_idx = 0; 3447 3448 /* static TR for remote PDMA */ 3449 if (udma_configure_statictr(uc, d, dev_width, burst)) { 3450 dev_err(uc->ud->dev, 3451 "%s: StaticTR Z is limited to maximum 4095 (%u)\n", 3452 __func__, d->static_tr.bstcnt); 3453 3454 udma_free_hwdesc(uc, d); 3455 kfree(d); 3456 return NULL; 3457 } 3458 3459 if (uc->config.metadata_size) 3460 d->vd.tx.metadata_ops = &metadata_ops; 3461 3462 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags); 3463 } 3464 3465 static struct udma_desc * 3466 udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr, 3467 size_t buf_len, size_t period_len, 3468 enum dma_transfer_direction dir, unsigned long flags) 3469 { 3470 struct udma_desc *d; 3471 size_t tr_size, period_addr; 3472 struct cppi5_tr_type1_t *tr_req; 3473 unsigned int periods = buf_len / period_len; 3474 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; 3475 unsigned int i; 3476 int num_tr; 3477 3478 num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0, 3479 &tr0_cnt1, &tr1_cnt0); 3480 if (num_tr < 0) { 3481 dev_err(uc->ud->dev, "size %zu is not supported\n", 3482 period_len); 3483 return NULL; 3484 } 3485 3486 /* Now allocate and setup the descriptor. */ 3487 tr_size = sizeof(struct cppi5_tr_type1_t); 3488 d = udma_alloc_tr_desc(uc, tr_size, periods * num_tr, dir); 3489 if (!d) 3490 return NULL; 3491 3492 tr_req = d->hwdesc[0].tr_req_base; 3493 if (uc->ud->match_data->type == DMA_TYPE_UDMA) 3494 period_addr = buf_addr; 3495 else 3496 period_addr = buf_addr | 3497 ((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT); 3498 3499 for (i = 0; i < periods; i++) { 3500 int tr_idx = i * num_tr; 3501 3502 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false, 3503 false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 3504 3505 tr_req[tr_idx].addr = period_addr; 3506 tr_req[tr_idx].icnt0 = tr0_cnt0; 3507 tr_req[tr_idx].icnt1 = tr0_cnt1; 3508 tr_req[tr_idx].dim1 = tr0_cnt0; 3509 3510 if (num_tr == 2) { 3511 cppi5_tr_csf_set(&tr_req[tr_idx].flags, 3512 CPPI5_TR_CSF_SUPR_EVT); 3513 tr_idx++; 3514 3515 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, 3516 false, false, 3517 CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 3518 3519 tr_req[tr_idx].addr = period_addr + tr0_cnt1 * tr0_cnt0; 3520 tr_req[tr_idx].icnt0 = tr1_cnt0; 3521 tr_req[tr_idx].icnt1 = 1; 3522 tr_req[tr_idx].dim1 = tr1_cnt0; 3523 } 3524 3525 if (!(flags & DMA_PREP_INTERRUPT)) 3526 cppi5_tr_csf_set(&tr_req[tr_idx].flags, 3527 CPPI5_TR_CSF_SUPR_EVT); 3528 3529 period_addr += period_len; 3530 } 3531 3532 return d; 3533 } 3534 3535 static struct udma_desc * 3536 udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr, 3537 size_t buf_len, size_t period_len, 3538 enum dma_transfer_direction dir, unsigned long flags) 3539 { 3540 struct udma_desc *d; 3541 u32 ring_id; 3542 int i; 3543 int periods = buf_len / period_len; 3544 3545 if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1)) 3546 return NULL; 3547 3548 if (period_len >= SZ_4M) 3549 return NULL; 3550 3551 d = kzalloc(struct_size(d, hwdesc, periods), GFP_NOWAIT); 3552 if (!d) 3553 return NULL; 3554 3555 d->hwdesc_count = periods; 3556 3557 /* TODO: re-check this... */ 3558 if (dir == DMA_DEV_TO_MEM) 3559 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); 3560 else 3561 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); 3562 3563 if (uc->ud->match_data->type != DMA_TYPE_UDMA) 3564 buf_addr |= (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT; 3565 3566 for (i = 0; i < periods; i++) { 3567 struct udma_hwdesc *hwdesc = &d->hwdesc[i]; 3568 dma_addr_t period_addr = buf_addr + (period_len * i); 3569 struct cppi5_host_desc_t *h_desc; 3570 3571 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, 3572 GFP_NOWAIT, 3573 &hwdesc->cppi5_desc_paddr); 3574 if (!hwdesc->cppi5_desc_vaddr) { 3575 dev_err(uc->ud->dev, 3576 "descriptor%d allocation failed\n", i); 3577 3578 udma_free_hwdesc(uc, d); 3579 kfree(d); 3580 return NULL; 3581 } 3582 3583 hwdesc->cppi5_desc_size = uc->config.hdesc_size; 3584 h_desc = hwdesc->cppi5_desc_vaddr; 3585 3586 cppi5_hdesc_init(h_desc, 0, 0); 3587 cppi5_hdesc_set_pktlen(h_desc, period_len); 3588 3589 /* Flow and Packed ID */ 3590 cppi5_desc_set_pktids(&h_desc->hdr, uc->id, 3591 CPPI5_INFO1_DESC_FLOWID_DEFAULT); 3592 cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id); 3593 3594 /* attach each period to a new descriptor */ 3595 cppi5_hdesc_attach_buf(h_desc, 3596 period_addr, period_len, 3597 period_addr, period_len); 3598 } 3599 3600 return d; 3601 } 3602 3603 static struct dma_async_tx_descriptor * 3604 udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 3605 size_t period_len, enum dma_transfer_direction dir, 3606 unsigned long flags) 3607 { 3608 struct udma_chan *uc = to_udma_chan(chan); 3609 enum dma_slave_buswidth dev_width; 3610 struct udma_desc *d; 3611 u32 burst; 3612 3613 if (dir != uc->config.dir) { 3614 dev_err(chan->device->dev, 3615 "%s: chan%d is for %s, not supporting %s\n", 3616 __func__, uc->id, 3617 dmaengine_get_direction_text(uc->config.dir), 3618 dmaengine_get_direction_text(dir)); 3619 return NULL; 3620 } 3621 3622 uc->cyclic = true; 3623 3624 if (dir == DMA_DEV_TO_MEM) { 3625 dev_width = uc->cfg.src_addr_width; 3626 burst = uc->cfg.src_maxburst; 3627 } else if (dir == DMA_MEM_TO_DEV) { 3628 dev_width = uc->cfg.dst_addr_width; 3629 burst = uc->cfg.dst_maxburst; 3630 } else { 3631 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__); 3632 return NULL; 3633 } 3634 3635 if (!burst) 3636 burst = 1; 3637 3638 if (uc->config.pkt_mode) 3639 d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len, 3640 dir, flags); 3641 else 3642 d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len, 3643 dir, flags); 3644 3645 if (!d) 3646 return NULL; 3647 3648 d->sglen = buf_len / period_len; 3649 3650 d->dir = dir; 3651 d->residue = buf_len; 3652 3653 /* static TR for remote PDMA */ 3654 if (udma_configure_statictr(uc, d, dev_width, burst)) { 3655 dev_err(uc->ud->dev, 3656 "%s: StaticTR Z is limited to maximum 4095 (%u)\n", 3657 __func__, d->static_tr.bstcnt); 3658 3659 udma_free_hwdesc(uc, d); 3660 kfree(d); 3661 return NULL; 3662 } 3663 3664 if (uc->config.metadata_size) 3665 d->vd.tx.metadata_ops = &metadata_ops; 3666 3667 return vchan_tx_prep(&uc->vc, &d->vd, flags); 3668 } 3669 3670 static struct dma_async_tx_descriptor * 3671 udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 3672 size_t len, unsigned long tx_flags) 3673 { 3674 struct udma_chan *uc = to_udma_chan(chan); 3675 struct udma_desc *d; 3676 struct cppi5_tr_type15_t *tr_req; 3677 int num_tr; 3678 size_t tr_size = sizeof(struct cppi5_tr_type15_t); 3679 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; 3680 3681 if (uc->config.dir != DMA_MEM_TO_MEM) { 3682 dev_err(chan->device->dev, 3683 "%s: chan%d is for %s, not supporting %s\n", 3684 __func__, uc->id, 3685 dmaengine_get_direction_text(uc->config.dir), 3686 dmaengine_get_direction_text(DMA_MEM_TO_MEM)); 3687 return NULL; 3688 } 3689 3690 num_tr = udma_get_tr_counters(len, __ffs(src | dest), &tr0_cnt0, 3691 &tr0_cnt1, &tr1_cnt0); 3692 if (num_tr < 0) { 3693 dev_err(uc->ud->dev, "size %zu is not supported\n", 3694 len); 3695 return NULL; 3696 } 3697 3698 d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM); 3699 if (!d) 3700 return NULL; 3701 3702 d->dir = DMA_MEM_TO_MEM; 3703 d->desc_idx = 0; 3704 d->tr_idx = 0; 3705 d->residue = len; 3706 3707 if (uc->ud->match_data->type != DMA_TYPE_UDMA) { 3708 src |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT; 3709 dest |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT; 3710 } 3711 3712 tr_req = d->hwdesc[0].tr_req_base; 3713 3714 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true, 3715 CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 3716 cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT); 3717 3718 tr_req[0].addr = src; 3719 tr_req[0].icnt0 = tr0_cnt0; 3720 tr_req[0].icnt1 = tr0_cnt1; 3721 tr_req[0].icnt2 = 1; 3722 tr_req[0].icnt3 = 1; 3723 tr_req[0].dim1 = tr0_cnt0; 3724 3725 tr_req[0].daddr = dest; 3726 tr_req[0].dicnt0 = tr0_cnt0; 3727 tr_req[0].dicnt1 = tr0_cnt1; 3728 tr_req[0].dicnt2 = 1; 3729 tr_req[0].dicnt3 = 1; 3730 tr_req[0].ddim1 = tr0_cnt0; 3731 3732 if (num_tr == 2) { 3733 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true, 3734 CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 3735 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT); 3736 3737 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0; 3738 tr_req[1].icnt0 = tr1_cnt0; 3739 tr_req[1].icnt1 = 1; 3740 tr_req[1].icnt2 = 1; 3741 tr_req[1].icnt3 = 1; 3742 3743 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0; 3744 tr_req[1].dicnt0 = tr1_cnt0; 3745 tr_req[1].dicnt1 = 1; 3746 tr_req[1].dicnt2 = 1; 3747 tr_req[1].dicnt3 = 1; 3748 } 3749 3750 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, 3751 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP); 3752 3753 if (uc->config.metadata_size) 3754 d->vd.tx.metadata_ops = &metadata_ops; 3755 3756 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags); 3757 } 3758 3759 static void udma_issue_pending(struct dma_chan *chan) 3760 { 3761 struct udma_chan *uc = to_udma_chan(chan); 3762 unsigned long flags; 3763 3764 spin_lock_irqsave(&uc->vc.lock, flags); 3765 3766 /* If we have something pending and no active descriptor, then */ 3767 if (vchan_issue_pending(&uc->vc) && !uc->desc) { 3768 /* 3769 * start a descriptor if the channel is NOT [marked as 3770 * terminating _and_ it is still running (teardown has not 3771 * completed yet)]. 3772 */ 3773 if (!(uc->state == UDMA_CHAN_IS_TERMINATING && 3774 udma_is_chan_running(uc))) 3775 udma_start(uc); 3776 } 3777 3778 spin_unlock_irqrestore(&uc->vc.lock, flags); 3779 } 3780 3781 static enum dma_status udma_tx_status(struct dma_chan *chan, 3782 dma_cookie_t cookie, 3783 struct dma_tx_state *txstate) 3784 { 3785 struct udma_chan *uc = to_udma_chan(chan); 3786 enum dma_status ret; 3787 unsigned long flags; 3788 3789 spin_lock_irqsave(&uc->vc.lock, flags); 3790 3791 ret = dma_cookie_status(chan, cookie, txstate); 3792 3793 if (!udma_is_chan_running(uc)) 3794 ret = DMA_COMPLETE; 3795 3796 if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc)) 3797 ret = DMA_PAUSED; 3798 3799 if (ret == DMA_COMPLETE || !txstate) 3800 goto out; 3801 3802 if (uc->desc && uc->desc->vd.tx.cookie == cookie) { 3803 u32 peer_bcnt = 0; 3804 u32 bcnt = 0; 3805 u32 residue = uc->desc->residue; 3806 u32 delay = 0; 3807 3808 if (uc->desc->dir == DMA_MEM_TO_DEV) { 3809 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG); 3810 3811 if (uc->config.ep_type != PSIL_EP_NATIVE) { 3812 peer_bcnt = udma_tchanrt_read(uc, 3813 UDMA_CHAN_RT_PEER_BCNT_REG); 3814 3815 if (bcnt > peer_bcnt) 3816 delay = bcnt - peer_bcnt; 3817 } 3818 } else if (uc->desc->dir == DMA_DEV_TO_MEM) { 3819 bcnt = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); 3820 3821 if (uc->config.ep_type != PSIL_EP_NATIVE) { 3822 peer_bcnt = udma_rchanrt_read(uc, 3823 UDMA_CHAN_RT_PEER_BCNT_REG); 3824 3825 if (peer_bcnt > bcnt) 3826 delay = peer_bcnt - bcnt; 3827 } 3828 } else { 3829 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); 3830 } 3831 3832 if (bcnt && !(bcnt % uc->desc->residue)) 3833 residue = 0; 3834 else 3835 residue -= bcnt % uc->desc->residue; 3836 3837 if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) { 3838 ret = DMA_COMPLETE; 3839 delay = 0; 3840 } 3841 3842 dma_set_residue(txstate, residue); 3843 dma_set_in_flight_bytes(txstate, delay); 3844 3845 } else { 3846 ret = DMA_COMPLETE; 3847 } 3848 3849 out: 3850 spin_unlock_irqrestore(&uc->vc.lock, flags); 3851 return ret; 3852 } 3853 3854 static int udma_pause(struct dma_chan *chan) 3855 { 3856 struct udma_chan *uc = to_udma_chan(chan); 3857 3858 /* pause the channel */ 3859 switch (uc->config.dir) { 3860 case DMA_DEV_TO_MEM: 3861 udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 3862 UDMA_PEER_RT_EN_PAUSE, 3863 UDMA_PEER_RT_EN_PAUSE); 3864 break; 3865 case DMA_MEM_TO_DEV: 3866 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 3867 UDMA_PEER_RT_EN_PAUSE, 3868 UDMA_PEER_RT_EN_PAUSE); 3869 break; 3870 case DMA_MEM_TO_MEM: 3871 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG, 3872 UDMA_CHAN_RT_CTL_PAUSE, 3873 UDMA_CHAN_RT_CTL_PAUSE); 3874 break; 3875 default: 3876 return -EINVAL; 3877 } 3878 3879 return 0; 3880 } 3881 3882 static int udma_resume(struct dma_chan *chan) 3883 { 3884 struct udma_chan *uc = to_udma_chan(chan); 3885 3886 /* resume the channel */ 3887 switch (uc->config.dir) { 3888 case DMA_DEV_TO_MEM: 3889 udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 3890 UDMA_PEER_RT_EN_PAUSE, 0); 3891 3892 break; 3893 case DMA_MEM_TO_DEV: 3894 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 3895 UDMA_PEER_RT_EN_PAUSE, 0); 3896 break; 3897 case DMA_MEM_TO_MEM: 3898 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG, 3899 UDMA_CHAN_RT_CTL_PAUSE, 0); 3900 break; 3901 default: 3902 return -EINVAL; 3903 } 3904 3905 return 0; 3906 } 3907 3908 static int udma_terminate_all(struct dma_chan *chan) 3909 { 3910 struct udma_chan *uc = to_udma_chan(chan); 3911 unsigned long flags; 3912 LIST_HEAD(head); 3913 3914 spin_lock_irqsave(&uc->vc.lock, flags); 3915 3916 if (udma_is_chan_running(uc)) 3917 udma_stop(uc); 3918 3919 if (uc->desc) { 3920 uc->terminated_desc = uc->desc; 3921 uc->desc = NULL; 3922 uc->terminated_desc->terminated = true; 3923 cancel_delayed_work(&uc->tx_drain.work); 3924 } 3925 3926 uc->paused = false; 3927 3928 vchan_get_all_descriptors(&uc->vc, &head); 3929 spin_unlock_irqrestore(&uc->vc.lock, flags); 3930 vchan_dma_desc_free_list(&uc->vc, &head); 3931 3932 return 0; 3933 } 3934 3935 static void udma_synchronize(struct dma_chan *chan) 3936 { 3937 struct udma_chan *uc = to_udma_chan(chan); 3938 unsigned long timeout = msecs_to_jiffies(1000); 3939 3940 vchan_synchronize(&uc->vc); 3941 3942 if (uc->state == UDMA_CHAN_IS_TERMINATING) { 3943 timeout = wait_for_completion_timeout(&uc->teardown_completed, 3944 timeout); 3945 if (!timeout) { 3946 dev_warn(uc->ud->dev, "chan%d teardown timeout!\n", 3947 uc->id); 3948 udma_dump_chan_stdata(uc); 3949 udma_reset_chan(uc, true); 3950 } 3951 } 3952 3953 udma_reset_chan(uc, false); 3954 if (udma_is_chan_running(uc)) 3955 dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id); 3956 3957 cancel_delayed_work_sync(&uc->tx_drain.work); 3958 udma_reset_rings(uc); 3959 } 3960 3961 static void udma_desc_pre_callback(struct virt_dma_chan *vc, 3962 struct virt_dma_desc *vd, 3963 struct dmaengine_result *result) 3964 { 3965 struct udma_chan *uc = to_udma_chan(&vc->chan); 3966 struct udma_desc *d; 3967 3968 if (!vd) 3969 return; 3970 3971 d = to_udma_desc(&vd->tx); 3972 3973 if (d->metadata_size) 3974 udma_fetch_epib(uc, d); 3975 3976 /* Provide residue information for the client */ 3977 if (result) { 3978 void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx); 3979 3980 if (cppi5_desc_get_type(desc_vaddr) == 3981 CPPI5_INFO0_DESC_TYPE_VAL_HOST) { 3982 result->residue = d->residue - 3983 cppi5_hdesc_get_pktlen(desc_vaddr); 3984 if (result->residue) 3985 result->result = DMA_TRANS_ABORTED; 3986 else 3987 result->result = DMA_TRANS_NOERROR; 3988 } else { 3989 result->residue = 0; 3990 result->result = DMA_TRANS_NOERROR; 3991 } 3992 } 3993 } 3994 3995 /* 3996 * This tasklet handles the completion of a DMA descriptor by 3997 * calling its callback and freeing it. 3998 */ 3999 static void udma_vchan_complete(struct tasklet_struct *t) 4000 { 4001 struct virt_dma_chan *vc = from_tasklet(vc, t, task); 4002 struct virt_dma_desc *vd, *_vd; 4003 struct dmaengine_desc_callback cb; 4004 LIST_HEAD(head); 4005 4006 spin_lock_irq(&vc->lock); 4007 list_splice_tail_init(&vc->desc_completed, &head); 4008 vd = vc->cyclic; 4009 if (vd) { 4010 vc->cyclic = NULL; 4011 dmaengine_desc_get_callback(&vd->tx, &cb); 4012 } else { 4013 memset(&cb, 0, sizeof(cb)); 4014 } 4015 spin_unlock_irq(&vc->lock); 4016 4017 udma_desc_pre_callback(vc, vd, NULL); 4018 dmaengine_desc_callback_invoke(&cb, NULL); 4019 4020 list_for_each_entry_safe(vd, _vd, &head, node) { 4021 struct dmaengine_result result; 4022 4023 dmaengine_desc_get_callback(&vd->tx, &cb); 4024 4025 list_del(&vd->node); 4026 4027 udma_desc_pre_callback(vc, vd, &result); 4028 dmaengine_desc_callback_invoke(&cb, &result); 4029 4030 vchan_vdesc_fini(vd); 4031 } 4032 } 4033 4034 static void udma_free_chan_resources(struct dma_chan *chan) 4035 { 4036 struct udma_chan *uc = to_udma_chan(chan); 4037 struct udma_dev *ud = to_udma_dev(chan->device); 4038 4039 udma_terminate_all(chan); 4040 if (uc->terminated_desc) { 4041 udma_reset_chan(uc, false); 4042 udma_reset_rings(uc); 4043 } 4044 4045 cancel_delayed_work_sync(&uc->tx_drain.work); 4046 4047 if (uc->irq_num_ring > 0) { 4048 free_irq(uc->irq_num_ring, uc); 4049 4050 uc->irq_num_ring = 0; 4051 } 4052 if (uc->irq_num_udma > 0) { 4053 free_irq(uc->irq_num_udma, uc); 4054 4055 uc->irq_num_udma = 0; 4056 } 4057 4058 /* Release PSI-L pairing */ 4059 if (uc->psil_paired) { 4060 navss_psil_unpair(ud, uc->config.src_thread, 4061 uc->config.dst_thread); 4062 uc->psil_paired = false; 4063 } 4064 4065 vchan_free_chan_resources(&uc->vc); 4066 tasklet_kill(&uc->vc.task); 4067 4068 bcdma_free_bchan_resources(uc); 4069 udma_free_tx_resources(uc); 4070 udma_free_rx_resources(uc); 4071 udma_reset_uchan(uc); 4072 4073 if (uc->use_dma_pool) { 4074 dma_pool_destroy(uc->hdesc_pool); 4075 uc->use_dma_pool = false; 4076 } 4077 } 4078 4079 static struct platform_driver udma_driver; 4080 static struct platform_driver bcdma_driver; 4081 static struct platform_driver pktdma_driver; 4082 4083 struct udma_filter_param { 4084 int remote_thread_id; 4085 u32 atype; 4086 u32 asel; 4087 u32 tr_trigger_type; 4088 }; 4089 4090 static bool udma_dma_filter_fn(struct dma_chan *chan, void *param) 4091 { 4092 struct udma_chan_config *ucc; 4093 struct psil_endpoint_config *ep_config; 4094 struct udma_filter_param *filter_param; 4095 struct udma_chan *uc; 4096 struct udma_dev *ud; 4097 4098 if (chan->device->dev->driver != &udma_driver.driver && 4099 chan->device->dev->driver != &bcdma_driver.driver && 4100 chan->device->dev->driver != &pktdma_driver.driver) 4101 return false; 4102 4103 uc = to_udma_chan(chan); 4104 ucc = &uc->config; 4105 ud = uc->ud; 4106 filter_param = param; 4107 4108 if (filter_param->atype > 2) { 4109 dev_err(ud->dev, "Invalid channel atype: %u\n", 4110 filter_param->atype); 4111 return false; 4112 } 4113 4114 if (filter_param->asel > 15) { 4115 dev_err(ud->dev, "Invalid channel asel: %u\n", 4116 filter_param->asel); 4117 return false; 4118 } 4119 4120 ucc->remote_thread_id = filter_param->remote_thread_id; 4121 ucc->atype = filter_param->atype; 4122 ucc->asel = filter_param->asel; 4123 ucc->tr_trigger_type = filter_param->tr_trigger_type; 4124 4125 if (ucc->tr_trigger_type) { 4126 ucc->dir = DMA_MEM_TO_MEM; 4127 goto triggered_bchan; 4128 } else if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET) { 4129 ucc->dir = DMA_MEM_TO_DEV; 4130 } else { 4131 ucc->dir = DMA_DEV_TO_MEM; 4132 } 4133 4134 ep_config = psil_get_ep_config(ucc->remote_thread_id); 4135 if (IS_ERR(ep_config)) { 4136 dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n", 4137 ucc->remote_thread_id); 4138 ucc->dir = DMA_MEM_TO_MEM; 4139 ucc->remote_thread_id = -1; 4140 ucc->atype = 0; 4141 ucc->asel = 0; 4142 return false; 4143 } 4144 4145 if (ud->match_data->type == DMA_TYPE_BCDMA && 4146 ep_config->pkt_mode) { 4147 dev_err(ud->dev, 4148 "Only TR mode is supported (psi-l thread 0x%04x)\n", 4149 ucc->remote_thread_id); 4150 ucc->dir = DMA_MEM_TO_MEM; 4151 ucc->remote_thread_id = -1; 4152 ucc->atype = 0; 4153 ucc->asel = 0; 4154 return false; 4155 } 4156 4157 ucc->pkt_mode = ep_config->pkt_mode; 4158 ucc->channel_tpl = ep_config->channel_tpl; 4159 ucc->notdpkt = ep_config->notdpkt; 4160 ucc->ep_type = ep_config->ep_type; 4161 4162 if (ud->match_data->type == DMA_TYPE_PKTDMA && 4163 ep_config->mapped_channel_id >= 0) { 4164 ucc->mapped_channel_id = ep_config->mapped_channel_id; 4165 ucc->default_flow_id = ep_config->default_flow_id; 4166 } else { 4167 ucc->mapped_channel_id = -1; 4168 ucc->default_flow_id = -1; 4169 } 4170 4171 if (ucc->ep_type != PSIL_EP_NATIVE) { 4172 const struct udma_match_data *match_data = ud->match_data; 4173 4174 if (match_data->flags & UDMA_FLAG_PDMA_ACC32) 4175 ucc->enable_acc32 = ep_config->pdma_acc32; 4176 if (match_data->flags & UDMA_FLAG_PDMA_BURST) 4177 ucc->enable_burst = ep_config->pdma_burst; 4178 } 4179 4180 ucc->needs_epib = ep_config->needs_epib; 4181 ucc->psd_size = ep_config->psd_size; 4182 ucc->metadata_size = 4183 (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) + 4184 ucc->psd_size; 4185 4186 if (ucc->pkt_mode) 4187 ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) + 4188 ucc->metadata_size, ud->desc_align); 4189 4190 dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id, 4191 ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir)); 4192 4193 return true; 4194 4195 triggered_bchan: 4196 dev_dbg(ud->dev, "chan%d: triggered channel (type: %u)\n", uc->id, 4197 ucc->tr_trigger_type); 4198 4199 return true; 4200 4201 } 4202 4203 static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec, 4204 struct of_dma *ofdma) 4205 { 4206 struct udma_dev *ud = ofdma->of_dma_data; 4207 dma_cap_mask_t mask = ud->ddev.cap_mask; 4208 struct udma_filter_param filter_param; 4209 struct dma_chan *chan; 4210 4211 if (ud->match_data->type == DMA_TYPE_BCDMA) { 4212 if (dma_spec->args_count != 3) 4213 return NULL; 4214 4215 filter_param.tr_trigger_type = dma_spec->args[0]; 4216 filter_param.remote_thread_id = dma_spec->args[1]; 4217 filter_param.asel = dma_spec->args[2]; 4218 filter_param.atype = 0; 4219 } else { 4220 if (dma_spec->args_count != 1 && dma_spec->args_count != 2) 4221 return NULL; 4222 4223 filter_param.remote_thread_id = dma_spec->args[0]; 4224 filter_param.tr_trigger_type = 0; 4225 if (dma_spec->args_count == 2) { 4226 if (ud->match_data->type == DMA_TYPE_UDMA) { 4227 filter_param.atype = dma_spec->args[1]; 4228 filter_param.asel = 0; 4229 } else { 4230 filter_param.atype = 0; 4231 filter_param.asel = dma_spec->args[1]; 4232 } 4233 } else { 4234 filter_param.atype = 0; 4235 filter_param.asel = 0; 4236 } 4237 } 4238 4239 chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param, 4240 ofdma->of_node); 4241 if (!chan) { 4242 dev_err(ud->dev, "get channel fail in %s.\n", __func__); 4243 return ERR_PTR(-EINVAL); 4244 } 4245 4246 return chan; 4247 } 4248 4249 static struct udma_match_data am654_main_data = { 4250 .type = DMA_TYPE_UDMA, 4251 .psil_base = 0x1000, 4252 .enable_memcpy_support = true, 4253 .statictr_z_mask = GENMASK(11, 0), 4254 .burst_size = { 4255 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 4256 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */ 4257 0, /* No UH Channels */ 4258 }, 4259 }; 4260 4261 static struct udma_match_data am654_mcu_data = { 4262 .type = DMA_TYPE_UDMA, 4263 .psil_base = 0x6000, 4264 .enable_memcpy_support = false, 4265 .statictr_z_mask = GENMASK(11, 0), 4266 .burst_size = { 4267 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 4268 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */ 4269 0, /* No UH Channels */ 4270 }, 4271 }; 4272 4273 static struct udma_match_data j721e_main_data = { 4274 .type = DMA_TYPE_UDMA, 4275 .psil_base = 0x1000, 4276 .enable_memcpy_support = true, 4277 .flags = UDMA_FLAGS_J7_CLASS, 4278 .statictr_z_mask = GENMASK(23, 0), 4279 .burst_size = { 4280 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 4281 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* H Channels */ 4282 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* UH Channels */ 4283 }, 4284 }; 4285 4286 static struct udma_match_data j721e_mcu_data = { 4287 .type = DMA_TYPE_UDMA, 4288 .psil_base = 0x6000, 4289 .enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */ 4290 .flags = UDMA_FLAGS_J7_CLASS, 4291 .statictr_z_mask = GENMASK(23, 0), 4292 .burst_size = { 4293 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 4294 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES, /* H Channels */ 4295 0, /* No UH Channels */ 4296 }, 4297 }; 4298 4299 static struct udma_soc_data am62a_dmss_csi_soc_data = { 4300 .oes = { 4301 .bcdma_rchan_data = 0xe00, 4302 .bcdma_rchan_ring = 0x1000, 4303 }, 4304 }; 4305 4306 static struct udma_match_data am62a_bcdma_csirx_data = { 4307 .type = DMA_TYPE_BCDMA, 4308 .psil_base = 0x3100, 4309 .enable_memcpy_support = false, 4310 .burst_size = { 4311 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 4312 0, /* No H Channels */ 4313 0, /* No UH Channels */ 4314 }, 4315 .soc_data = &am62a_dmss_csi_soc_data, 4316 }; 4317 4318 static struct udma_match_data am64_bcdma_data = { 4319 .type = DMA_TYPE_BCDMA, 4320 .psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */ 4321 .enable_memcpy_support = true, /* Supported via bchan */ 4322 .flags = UDMA_FLAGS_J7_CLASS, 4323 .statictr_z_mask = GENMASK(23, 0), 4324 .burst_size = { 4325 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 4326 0, /* No H Channels */ 4327 0, /* No UH Channels */ 4328 }, 4329 }; 4330 4331 static struct udma_match_data am64_pktdma_data = { 4332 .type = DMA_TYPE_PKTDMA, 4333 .psil_base = 0x1000, 4334 .enable_memcpy_support = false, /* PKTDMA does not support MEM_TO_MEM */ 4335 .flags = UDMA_FLAGS_J7_CLASS, 4336 .statictr_z_mask = GENMASK(23, 0), 4337 .burst_size = { 4338 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 4339 0, /* No H Channels */ 4340 0, /* No UH Channels */ 4341 }, 4342 }; 4343 4344 static const struct of_device_id udma_of_match[] = { 4345 { 4346 .compatible = "ti,am654-navss-main-udmap", 4347 .data = &am654_main_data, 4348 }, 4349 { 4350 .compatible = "ti,am654-navss-mcu-udmap", 4351 .data = &am654_mcu_data, 4352 }, { 4353 .compatible = "ti,j721e-navss-main-udmap", 4354 .data = &j721e_main_data, 4355 }, { 4356 .compatible = "ti,j721e-navss-mcu-udmap", 4357 .data = &j721e_mcu_data, 4358 }, 4359 { 4360 .compatible = "ti,am64-dmss-bcdma", 4361 .data = &am64_bcdma_data, 4362 }, 4363 { 4364 .compatible = "ti,am64-dmss-pktdma", 4365 .data = &am64_pktdma_data, 4366 }, 4367 { 4368 .compatible = "ti,am62a-dmss-bcdma-csirx", 4369 .data = &am62a_bcdma_csirx_data, 4370 }, 4371 { /* Sentinel */ }, 4372 }; 4373 4374 static struct udma_soc_data am654_soc_data = { 4375 .oes = { 4376 .udma_rchan = 0x200, 4377 }, 4378 }; 4379 4380 static struct udma_soc_data j721e_soc_data = { 4381 .oes = { 4382 .udma_rchan = 0x400, 4383 }, 4384 }; 4385 4386 static struct udma_soc_data j7200_soc_data = { 4387 .oes = { 4388 .udma_rchan = 0x80, 4389 }, 4390 }; 4391 4392 static struct udma_soc_data am64_soc_data = { 4393 .oes = { 4394 .bcdma_bchan_data = 0x2200, 4395 .bcdma_bchan_ring = 0x2400, 4396 .bcdma_tchan_data = 0x2800, 4397 .bcdma_tchan_ring = 0x2a00, 4398 .bcdma_rchan_data = 0x2e00, 4399 .bcdma_rchan_ring = 0x3000, 4400 .pktdma_tchan_flow = 0x1200, 4401 .pktdma_rchan_flow = 0x1600, 4402 }, 4403 .bcdma_trigger_event_offset = 0xc400, 4404 }; 4405 4406 static const struct soc_device_attribute k3_soc_devices[] = { 4407 { .family = "AM65X", .data = &am654_soc_data }, 4408 { .family = "J721E", .data = &j721e_soc_data }, 4409 { .family = "J7200", .data = &j7200_soc_data }, 4410 { .family = "AM64X", .data = &am64_soc_data }, 4411 { .family = "J721S2", .data = &j721e_soc_data}, 4412 { .family = "AM62X", .data = &am64_soc_data }, 4413 { .family = "AM62AX", .data = &am64_soc_data }, 4414 { /* sentinel */ } 4415 }; 4416 4417 static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud) 4418 { 4419 u32 cap2, cap3, cap4; 4420 int i; 4421 4422 ud->mmrs[MMR_GCFG] = devm_platform_ioremap_resource_byname(pdev, mmr_names[MMR_GCFG]); 4423 if (IS_ERR(ud->mmrs[MMR_GCFG])) 4424 return PTR_ERR(ud->mmrs[MMR_GCFG]); 4425 4426 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28); 4427 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c); 4428 4429 switch (ud->match_data->type) { 4430 case DMA_TYPE_UDMA: 4431 ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3); 4432 ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2); 4433 ud->echan_cnt = UDMA_CAP2_ECHAN_CNT(cap2); 4434 ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2); 4435 break; 4436 case DMA_TYPE_BCDMA: 4437 ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2); 4438 ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2); 4439 ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2); 4440 ud->rflow_cnt = ud->rchan_cnt; 4441 break; 4442 case DMA_TYPE_PKTDMA: 4443 cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30); 4444 ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2); 4445 ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2); 4446 ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3); 4447 ud->tflow_cnt = PKTDMA_CAP4_TFLOW_CNT(cap4); 4448 break; 4449 default: 4450 return -EINVAL; 4451 } 4452 4453 for (i = 1; i < MMR_LAST; i++) { 4454 if (i == MMR_BCHANRT && ud->bchan_cnt == 0) 4455 continue; 4456 if (i == MMR_TCHANRT && ud->tchan_cnt == 0) 4457 continue; 4458 if (i == MMR_RCHANRT && ud->rchan_cnt == 0) 4459 continue; 4460 4461 ud->mmrs[i] = devm_platform_ioremap_resource_byname(pdev, mmr_names[i]); 4462 if (IS_ERR(ud->mmrs[i])) 4463 return PTR_ERR(ud->mmrs[i]); 4464 } 4465 4466 return 0; 4467 } 4468 4469 static void udma_mark_resource_ranges(struct udma_dev *ud, unsigned long *map, 4470 struct ti_sci_resource_desc *rm_desc, 4471 char *name) 4472 { 4473 bitmap_clear(map, rm_desc->start, rm_desc->num); 4474 bitmap_clear(map, rm_desc->start_sec, rm_desc->num_sec); 4475 dev_dbg(ud->dev, "ti_sci resource range for %s: %d:%d | %d:%d\n", name, 4476 rm_desc->start, rm_desc->num, rm_desc->start_sec, 4477 rm_desc->num_sec); 4478 } 4479 4480 static const char * const range_names[] = { 4481 [RM_RANGE_BCHAN] = "ti,sci-rm-range-bchan", 4482 [RM_RANGE_TCHAN] = "ti,sci-rm-range-tchan", 4483 [RM_RANGE_RCHAN] = "ti,sci-rm-range-rchan", 4484 [RM_RANGE_RFLOW] = "ti,sci-rm-range-rflow", 4485 [RM_RANGE_TFLOW] = "ti,sci-rm-range-tflow", 4486 }; 4487 4488 static int udma_setup_resources(struct udma_dev *ud) 4489 { 4490 int ret, i, j; 4491 struct device *dev = ud->dev; 4492 struct ti_sci_resource *rm_res, irq_res; 4493 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 4494 u32 cap3; 4495 4496 /* Set up the throughput level start indexes */ 4497 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c); 4498 if (of_device_is_compatible(dev->of_node, 4499 "ti,am654-navss-main-udmap")) { 4500 ud->tchan_tpl.levels = 2; 4501 ud->tchan_tpl.start_idx[0] = 8; 4502 } else if (of_device_is_compatible(dev->of_node, 4503 "ti,am654-navss-mcu-udmap")) { 4504 ud->tchan_tpl.levels = 2; 4505 ud->tchan_tpl.start_idx[0] = 2; 4506 } else if (UDMA_CAP3_UCHAN_CNT(cap3)) { 4507 ud->tchan_tpl.levels = 3; 4508 ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3); 4509 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); 4510 } else if (UDMA_CAP3_HCHAN_CNT(cap3)) { 4511 ud->tchan_tpl.levels = 2; 4512 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); 4513 } else { 4514 ud->tchan_tpl.levels = 1; 4515 } 4516 4517 ud->rchan_tpl.levels = ud->tchan_tpl.levels; 4518 ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0]; 4519 ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1]; 4520 4521 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), 4522 sizeof(unsigned long), GFP_KERNEL); 4523 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans), 4524 GFP_KERNEL); 4525 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), 4526 sizeof(unsigned long), GFP_KERNEL); 4527 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans), 4528 GFP_KERNEL); 4529 ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt), 4530 sizeof(unsigned long), 4531 GFP_KERNEL); 4532 ud->rflow_gp_map_allocated = devm_kcalloc(dev, 4533 BITS_TO_LONGS(ud->rflow_cnt), 4534 sizeof(unsigned long), 4535 GFP_KERNEL); 4536 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt), 4537 sizeof(unsigned long), 4538 GFP_KERNEL); 4539 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows), 4540 GFP_KERNEL); 4541 4542 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map || 4543 !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans || 4544 !ud->rflows || !ud->rflow_in_use) 4545 return -ENOMEM; 4546 4547 /* 4548 * RX flows with the same Ids as RX channels are reserved to be used 4549 * as default flows if remote HW can't generate flow_ids. Those 4550 * RX flows can be requested only explicitly by id. 4551 */ 4552 bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt); 4553 4554 /* by default no GP rflows are assigned to Linux */ 4555 bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt); 4556 4557 /* Get resource ranges from tisci */ 4558 for (i = 0; i < RM_RANGE_LAST; i++) { 4559 if (i == RM_RANGE_BCHAN || i == RM_RANGE_TFLOW) 4560 continue; 4561 4562 tisci_rm->rm_ranges[i] = 4563 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev, 4564 tisci_rm->tisci_dev_id, 4565 (char *)range_names[i]); 4566 } 4567 4568 /* tchan ranges */ 4569 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; 4570 if (IS_ERR(rm_res)) { 4571 bitmap_zero(ud->tchan_map, ud->tchan_cnt); 4572 irq_res.sets = 1; 4573 } else { 4574 bitmap_fill(ud->tchan_map, ud->tchan_cnt); 4575 for (i = 0; i < rm_res->sets; i++) 4576 udma_mark_resource_ranges(ud, ud->tchan_map, 4577 &rm_res->desc[i], "tchan"); 4578 irq_res.sets = rm_res->sets; 4579 } 4580 4581 /* rchan and matching default flow ranges */ 4582 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; 4583 if (IS_ERR(rm_res)) { 4584 bitmap_zero(ud->rchan_map, ud->rchan_cnt); 4585 irq_res.sets++; 4586 } else { 4587 bitmap_fill(ud->rchan_map, ud->rchan_cnt); 4588 for (i = 0; i < rm_res->sets; i++) 4589 udma_mark_resource_ranges(ud, ud->rchan_map, 4590 &rm_res->desc[i], "rchan"); 4591 irq_res.sets += rm_res->sets; 4592 } 4593 4594 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL); 4595 if (!irq_res.desc) 4596 return -ENOMEM; 4597 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; 4598 if (IS_ERR(rm_res)) { 4599 irq_res.desc[0].start = 0; 4600 irq_res.desc[0].num = ud->tchan_cnt; 4601 i = 1; 4602 } else { 4603 for (i = 0; i < rm_res->sets; i++) { 4604 irq_res.desc[i].start = rm_res->desc[i].start; 4605 irq_res.desc[i].num = rm_res->desc[i].num; 4606 irq_res.desc[i].start_sec = rm_res->desc[i].start_sec; 4607 irq_res.desc[i].num_sec = rm_res->desc[i].num_sec; 4608 } 4609 } 4610 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; 4611 if (IS_ERR(rm_res)) { 4612 irq_res.desc[i].start = 0; 4613 irq_res.desc[i].num = ud->rchan_cnt; 4614 } else { 4615 for (j = 0; j < rm_res->sets; j++, i++) { 4616 if (rm_res->desc[j].num) { 4617 irq_res.desc[i].start = rm_res->desc[j].start + 4618 ud->soc_data->oes.udma_rchan; 4619 irq_res.desc[i].num = rm_res->desc[j].num; 4620 } 4621 if (rm_res->desc[j].num_sec) { 4622 irq_res.desc[i].start_sec = rm_res->desc[j].start_sec + 4623 ud->soc_data->oes.udma_rchan; 4624 irq_res.desc[i].num_sec = rm_res->desc[j].num_sec; 4625 } 4626 } 4627 } 4628 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); 4629 kfree(irq_res.desc); 4630 if (ret) { 4631 dev_err(ud->dev, "Failed to allocate MSI interrupts\n"); 4632 return ret; 4633 } 4634 4635 /* GP rflow ranges */ 4636 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; 4637 if (IS_ERR(rm_res)) { 4638 /* all gp flows are assigned exclusively to Linux */ 4639 bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt, 4640 ud->rflow_cnt - ud->rchan_cnt); 4641 } else { 4642 for (i = 0; i < rm_res->sets; i++) 4643 udma_mark_resource_ranges(ud, ud->rflow_gp_map, 4644 &rm_res->desc[i], "gp-rflow"); 4645 } 4646 4647 return 0; 4648 } 4649 4650 static int bcdma_setup_resources(struct udma_dev *ud) 4651 { 4652 int ret, i, j; 4653 struct device *dev = ud->dev; 4654 struct ti_sci_resource *rm_res, irq_res; 4655 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 4656 const struct udma_oes_offsets *oes = &ud->soc_data->oes; 4657 u32 cap; 4658 4659 /* Set up the throughput level start indexes */ 4660 cap = udma_read(ud->mmrs[MMR_GCFG], 0x2c); 4661 if (BCDMA_CAP3_UBCHAN_CNT(cap)) { 4662 ud->bchan_tpl.levels = 3; 4663 ud->bchan_tpl.start_idx[1] = BCDMA_CAP3_UBCHAN_CNT(cap); 4664 ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap); 4665 } else if (BCDMA_CAP3_HBCHAN_CNT(cap)) { 4666 ud->bchan_tpl.levels = 2; 4667 ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap); 4668 } else { 4669 ud->bchan_tpl.levels = 1; 4670 } 4671 4672 cap = udma_read(ud->mmrs[MMR_GCFG], 0x30); 4673 if (BCDMA_CAP4_URCHAN_CNT(cap)) { 4674 ud->rchan_tpl.levels = 3; 4675 ud->rchan_tpl.start_idx[1] = BCDMA_CAP4_URCHAN_CNT(cap); 4676 ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap); 4677 } else if (BCDMA_CAP4_HRCHAN_CNT(cap)) { 4678 ud->rchan_tpl.levels = 2; 4679 ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap); 4680 } else { 4681 ud->rchan_tpl.levels = 1; 4682 } 4683 4684 if (BCDMA_CAP4_UTCHAN_CNT(cap)) { 4685 ud->tchan_tpl.levels = 3; 4686 ud->tchan_tpl.start_idx[1] = BCDMA_CAP4_UTCHAN_CNT(cap); 4687 ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap); 4688 } else if (BCDMA_CAP4_HTCHAN_CNT(cap)) { 4689 ud->tchan_tpl.levels = 2; 4690 ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap); 4691 } else { 4692 ud->tchan_tpl.levels = 1; 4693 } 4694 4695 ud->bchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->bchan_cnt), 4696 sizeof(unsigned long), GFP_KERNEL); 4697 ud->bchans = devm_kcalloc(dev, ud->bchan_cnt, sizeof(*ud->bchans), 4698 GFP_KERNEL); 4699 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), 4700 sizeof(unsigned long), GFP_KERNEL); 4701 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans), 4702 GFP_KERNEL); 4703 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), 4704 sizeof(unsigned long), GFP_KERNEL); 4705 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans), 4706 GFP_KERNEL); 4707 /* BCDMA do not really have flows, but the driver expect it */ 4708 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rchan_cnt), 4709 sizeof(unsigned long), 4710 GFP_KERNEL); 4711 ud->rflows = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rflows), 4712 GFP_KERNEL); 4713 4714 if (!ud->bchan_map || !ud->tchan_map || !ud->rchan_map || 4715 !ud->rflow_in_use || !ud->bchans || !ud->tchans || !ud->rchans || 4716 !ud->rflows) 4717 return -ENOMEM; 4718 4719 /* Get resource ranges from tisci */ 4720 for (i = 0; i < RM_RANGE_LAST; i++) { 4721 if (i == RM_RANGE_RFLOW || i == RM_RANGE_TFLOW) 4722 continue; 4723 if (i == RM_RANGE_BCHAN && ud->bchan_cnt == 0) 4724 continue; 4725 if (i == RM_RANGE_TCHAN && ud->tchan_cnt == 0) 4726 continue; 4727 if (i == RM_RANGE_RCHAN && ud->rchan_cnt == 0) 4728 continue; 4729 4730 tisci_rm->rm_ranges[i] = 4731 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev, 4732 tisci_rm->tisci_dev_id, 4733 (char *)range_names[i]); 4734 } 4735 4736 irq_res.sets = 0; 4737 4738 /* bchan ranges */ 4739 if (ud->bchan_cnt) { 4740 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN]; 4741 if (IS_ERR(rm_res)) { 4742 bitmap_zero(ud->bchan_map, ud->bchan_cnt); 4743 irq_res.sets++; 4744 } else { 4745 bitmap_fill(ud->bchan_map, ud->bchan_cnt); 4746 for (i = 0; i < rm_res->sets; i++) 4747 udma_mark_resource_ranges(ud, ud->bchan_map, 4748 &rm_res->desc[i], 4749 "bchan"); 4750 irq_res.sets += rm_res->sets; 4751 } 4752 } 4753 4754 /* tchan ranges */ 4755 if (ud->tchan_cnt) { 4756 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; 4757 if (IS_ERR(rm_res)) { 4758 bitmap_zero(ud->tchan_map, ud->tchan_cnt); 4759 irq_res.sets += 2; 4760 } else { 4761 bitmap_fill(ud->tchan_map, ud->tchan_cnt); 4762 for (i = 0; i < rm_res->sets; i++) 4763 udma_mark_resource_ranges(ud, ud->tchan_map, 4764 &rm_res->desc[i], 4765 "tchan"); 4766 irq_res.sets += rm_res->sets * 2; 4767 } 4768 } 4769 4770 /* rchan ranges */ 4771 if (ud->rchan_cnt) { 4772 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; 4773 if (IS_ERR(rm_res)) { 4774 bitmap_zero(ud->rchan_map, ud->rchan_cnt); 4775 irq_res.sets += 2; 4776 } else { 4777 bitmap_fill(ud->rchan_map, ud->rchan_cnt); 4778 for (i = 0; i < rm_res->sets; i++) 4779 udma_mark_resource_ranges(ud, ud->rchan_map, 4780 &rm_res->desc[i], 4781 "rchan"); 4782 irq_res.sets += rm_res->sets * 2; 4783 } 4784 } 4785 4786 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL); 4787 if (!irq_res.desc) 4788 return -ENOMEM; 4789 if (ud->bchan_cnt) { 4790 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN]; 4791 if (IS_ERR(rm_res)) { 4792 irq_res.desc[0].start = oes->bcdma_bchan_ring; 4793 irq_res.desc[0].num = ud->bchan_cnt; 4794 i = 1; 4795 } else { 4796 for (i = 0; i < rm_res->sets; i++) { 4797 irq_res.desc[i].start = rm_res->desc[i].start + 4798 oes->bcdma_bchan_ring; 4799 irq_res.desc[i].num = rm_res->desc[i].num; 4800 } 4801 } 4802 } else { 4803 i = 0; 4804 } 4805 4806 if (ud->tchan_cnt) { 4807 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; 4808 if (IS_ERR(rm_res)) { 4809 irq_res.desc[i].start = oes->bcdma_tchan_data; 4810 irq_res.desc[i].num = ud->tchan_cnt; 4811 irq_res.desc[i + 1].start = oes->bcdma_tchan_ring; 4812 irq_res.desc[i + 1].num = ud->tchan_cnt; 4813 i += 2; 4814 } else { 4815 for (j = 0; j < rm_res->sets; j++, i += 2) { 4816 irq_res.desc[i].start = rm_res->desc[j].start + 4817 oes->bcdma_tchan_data; 4818 irq_res.desc[i].num = rm_res->desc[j].num; 4819 4820 irq_res.desc[i + 1].start = rm_res->desc[j].start + 4821 oes->bcdma_tchan_ring; 4822 irq_res.desc[i + 1].num = rm_res->desc[j].num; 4823 } 4824 } 4825 } 4826 if (ud->rchan_cnt) { 4827 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; 4828 if (IS_ERR(rm_res)) { 4829 irq_res.desc[i].start = oes->bcdma_rchan_data; 4830 irq_res.desc[i].num = ud->rchan_cnt; 4831 irq_res.desc[i + 1].start = oes->bcdma_rchan_ring; 4832 irq_res.desc[i + 1].num = ud->rchan_cnt; 4833 i += 2; 4834 } else { 4835 for (j = 0; j < rm_res->sets; j++, i += 2) { 4836 irq_res.desc[i].start = rm_res->desc[j].start + 4837 oes->bcdma_rchan_data; 4838 irq_res.desc[i].num = rm_res->desc[j].num; 4839 4840 irq_res.desc[i + 1].start = rm_res->desc[j].start + 4841 oes->bcdma_rchan_ring; 4842 irq_res.desc[i + 1].num = rm_res->desc[j].num; 4843 } 4844 } 4845 } 4846 4847 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); 4848 kfree(irq_res.desc); 4849 if (ret) { 4850 dev_err(ud->dev, "Failed to allocate MSI interrupts\n"); 4851 return ret; 4852 } 4853 4854 return 0; 4855 } 4856 4857 static int pktdma_setup_resources(struct udma_dev *ud) 4858 { 4859 int ret, i, j; 4860 struct device *dev = ud->dev; 4861 struct ti_sci_resource *rm_res, irq_res; 4862 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 4863 const struct udma_oes_offsets *oes = &ud->soc_data->oes; 4864 u32 cap3; 4865 4866 /* Set up the throughput level start indexes */ 4867 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c); 4868 if (UDMA_CAP3_UCHAN_CNT(cap3)) { 4869 ud->tchan_tpl.levels = 3; 4870 ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3); 4871 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); 4872 } else if (UDMA_CAP3_HCHAN_CNT(cap3)) { 4873 ud->tchan_tpl.levels = 2; 4874 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); 4875 } else { 4876 ud->tchan_tpl.levels = 1; 4877 } 4878 4879 ud->rchan_tpl.levels = ud->tchan_tpl.levels; 4880 ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0]; 4881 ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1]; 4882 4883 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), 4884 sizeof(unsigned long), GFP_KERNEL); 4885 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans), 4886 GFP_KERNEL); 4887 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), 4888 sizeof(unsigned long), GFP_KERNEL); 4889 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans), 4890 GFP_KERNEL); 4891 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt), 4892 sizeof(unsigned long), 4893 GFP_KERNEL); 4894 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows), 4895 GFP_KERNEL); 4896 ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt), 4897 sizeof(unsigned long), GFP_KERNEL); 4898 4899 if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans || 4900 !ud->rchans || !ud->rflows || !ud->rflow_in_use) 4901 return -ENOMEM; 4902 4903 /* Get resource ranges from tisci */ 4904 for (i = 0; i < RM_RANGE_LAST; i++) { 4905 if (i == RM_RANGE_BCHAN) 4906 continue; 4907 4908 tisci_rm->rm_ranges[i] = 4909 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev, 4910 tisci_rm->tisci_dev_id, 4911 (char *)range_names[i]); 4912 } 4913 4914 /* tchan ranges */ 4915 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; 4916 if (IS_ERR(rm_res)) { 4917 bitmap_zero(ud->tchan_map, ud->tchan_cnt); 4918 } else { 4919 bitmap_fill(ud->tchan_map, ud->tchan_cnt); 4920 for (i = 0; i < rm_res->sets; i++) 4921 udma_mark_resource_ranges(ud, ud->tchan_map, 4922 &rm_res->desc[i], "tchan"); 4923 } 4924 4925 /* rchan ranges */ 4926 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; 4927 if (IS_ERR(rm_res)) { 4928 bitmap_zero(ud->rchan_map, ud->rchan_cnt); 4929 } else { 4930 bitmap_fill(ud->rchan_map, ud->rchan_cnt); 4931 for (i = 0; i < rm_res->sets; i++) 4932 udma_mark_resource_ranges(ud, ud->rchan_map, 4933 &rm_res->desc[i], "rchan"); 4934 } 4935 4936 /* rflow ranges */ 4937 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; 4938 if (IS_ERR(rm_res)) { 4939 /* all rflows are assigned exclusively to Linux */ 4940 bitmap_zero(ud->rflow_in_use, ud->rflow_cnt); 4941 irq_res.sets = 1; 4942 } else { 4943 bitmap_fill(ud->rflow_in_use, ud->rflow_cnt); 4944 for (i = 0; i < rm_res->sets; i++) 4945 udma_mark_resource_ranges(ud, ud->rflow_in_use, 4946 &rm_res->desc[i], "rflow"); 4947 irq_res.sets = rm_res->sets; 4948 } 4949 4950 /* tflow ranges */ 4951 rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW]; 4952 if (IS_ERR(rm_res)) { 4953 /* all tflows are assigned exclusively to Linux */ 4954 bitmap_zero(ud->tflow_map, ud->tflow_cnt); 4955 irq_res.sets++; 4956 } else { 4957 bitmap_fill(ud->tflow_map, ud->tflow_cnt); 4958 for (i = 0; i < rm_res->sets; i++) 4959 udma_mark_resource_ranges(ud, ud->tflow_map, 4960 &rm_res->desc[i], "tflow"); 4961 irq_res.sets += rm_res->sets; 4962 } 4963 4964 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL); 4965 if (!irq_res.desc) 4966 return -ENOMEM; 4967 rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW]; 4968 if (IS_ERR(rm_res)) { 4969 irq_res.desc[0].start = oes->pktdma_tchan_flow; 4970 irq_res.desc[0].num = ud->tflow_cnt; 4971 i = 1; 4972 } else { 4973 for (i = 0; i < rm_res->sets; i++) { 4974 irq_res.desc[i].start = rm_res->desc[i].start + 4975 oes->pktdma_tchan_flow; 4976 irq_res.desc[i].num = rm_res->desc[i].num; 4977 } 4978 } 4979 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; 4980 if (IS_ERR(rm_res)) { 4981 irq_res.desc[i].start = oes->pktdma_rchan_flow; 4982 irq_res.desc[i].num = ud->rflow_cnt; 4983 } else { 4984 for (j = 0; j < rm_res->sets; j++, i++) { 4985 irq_res.desc[i].start = rm_res->desc[j].start + 4986 oes->pktdma_rchan_flow; 4987 irq_res.desc[i].num = rm_res->desc[j].num; 4988 } 4989 } 4990 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); 4991 kfree(irq_res.desc); 4992 if (ret) { 4993 dev_err(ud->dev, "Failed to allocate MSI interrupts\n"); 4994 return ret; 4995 } 4996 4997 return 0; 4998 } 4999 5000 static int setup_resources(struct udma_dev *ud) 5001 { 5002 struct device *dev = ud->dev; 5003 int ch_count, ret; 5004 5005 switch (ud->match_data->type) { 5006 case DMA_TYPE_UDMA: 5007 ret = udma_setup_resources(ud); 5008 break; 5009 case DMA_TYPE_BCDMA: 5010 ret = bcdma_setup_resources(ud); 5011 break; 5012 case DMA_TYPE_PKTDMA: 5013 ret = pktdma_setup_resources(ud); 5014 break; 5015 default: 5016 return -EINVAL; 5017 } 5018 5019 if (ret) 5020 return ret; 5021 5022 ch_count = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt; 5023 if (ud->bchan_cnt) 5024 ch_count -= bitmap_weight(ud->bchan_map, ud->bchan_cnt); 5025 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt); 5026 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt); 5027 if (!ch_count) 5028 return -ENODEV; 5029 5030 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels), 5031 GFP_KERNEL); 5032 if (!ud->channels) 5033 return -ENOMEM; 5034 5035 switch (ud->match_data->type) { 5036 case DMA_TYPE_UDMA: 5037 dev_info(dev, 5038 "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n", 5039 ch_count, 5040 ud->tchan_cnt - bitmap_weight(ud->tchan_map, 5041 ud->tchan_cnt), 5042 ud->rchan_cnt - bitmap_weight(ud->rchan_map, 5043 ud->rchan_cnt), 5044 ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map, 5045 ud->rflow_cnt)); 5046 break; 5047 case DMA_TYPE_BCDMA: 5048 dev_info(dev, 5049 "Channels: %d (bchan: %u, tchan: %u, rchan: %u)\n", 5050 ch_count, 5051 ud->bchan_cnt - bitmap_weight(ud->bchan_map, 5052 ud->bchan_cnt), 5053 ud->tchan_cnt - bitmap_weight(ud->tchan_map, 5054 ud->tchan_cnt), 5055 ud->rchan_cnt - bitmap_weight(ud->rchan_map, 5056 ud->rchan_cnt)); 5057 break; 5058 case DMA_TYPE_PKTDMA: 5059 dev_info(dev, 5060 "Channels: %d (tchan: %u, rchan: %u)\n", 5061 ch_count, 5062 ud->tchan_cnt - bitmap_weight(ud->tchan_map, 5063 ud->tchan_cnt), 5064 ud->rchan_cnt - bitmap_weight(ud->rchan_map, 5065 ud->rchan_cnt)); 5066 break; 5067 default: 5068 break; 5069 } 5070 5071 return ch_count; 5072 } 5073 5074 static int udma_setup_rx_flush(struct udma_dev *ud) 5075 { 5076 struct udma_rx_flush *rx_flush = &ud->rx_flush; 5077 struct cppi5_desc_hdr_t *tr_desc; 5078 struct cppi5_tr_type1_t *tr_req; 5079 struct cppi5_host_desc_t *desc; 5080 struct device *dev = ud->dev; 5081 struct udma_hwdesc *hwdesc; 5082 size_t tr_size; 5083 5084 /* Allocate 1K buffer for discarded data on RX channel teardown */ 5085 rx_flush->buffer_size = SZ_1K; 5086 rx_flush->buffer_vaddr = devm_kzalloc(dev, rx_flush->buffer_size, 5087 GFP_KERNEL); 5088 if (!rx_flush->buffer_vaddr) 5089 return -ENOMEM; 5090 5091 rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr, 5092 rx_flush->buffer_size, 5093 DMA_TO_DEVICE); 5094 if (dma_mapping_error(dev, rx_flush->buffer_paddr)) 5095 return -ENOMEM; 5096 5097 /* Set up descriptor to be used for TR mode */ 5098 hwdesc = &rx_flush->hwdescs[0]; 5099 tr_size = sizeof(struct cppi5_tr_type1_t); 5100 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1); 5101 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size, 5102 ud->desc_align); 5103 5104 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size, 5105 GFP_KERNEL); 5106 if (!hwdesc->cppi5_desc_vaddr) 5107 return -ENOMEM; 5108 5109 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr, 5110 hwdesc->cppi5_desc_size, 5111 DMA_TO_DEVICE); 5112 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr)) 5113 return -ENOMEM; 5114 5115 /* Start of the TR req records */ 5116 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size; 5117 /* Start address of the TR response array */ 5118 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size; 5119 5120 tr_desc = hwdesc->cppi5_desc_vaddr; 5121 cppi5_trdesc_init(tr_desc, 1, tr_size, 0, 0); 5122 cppi5_desc_set_pktids(tr_desc, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT); 5123 cppi5_desc_set_retpolicy(tr_desc, 0, 0); 5124 5125 tr_req = hwdesc->tr_req_base; 5126 cppi5_tr_init(&tr_req->flags, CPPI5_TR_TYPE1, false, false, 5127 CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 5128 cppi5_tr_csf_set(&tr_req->flags, CPPI5_TR_CSF_SUPR_EVT); 5129 5130 tr_req->addr = rx_flush->buffer_paddr; 5131 tr_req->icnt0 = rx_flush->buffer_size; 5132 tr_req->icnt1 = 1; 5133 5134 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr, 5135 hwdesc->cppi5_desc_size, DMA_TO_DEVICE); 5136 5137 /* Set up descriptor to be used for packet mode */ 5138 hwdesc = &rx_flush->hwdescs[1]; 5139 hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) + 5140 CPPI5_INFO0_HDESC_EPIB_SIZE + 5141 CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE, 5142 ud->desc_align); 5143 5144 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size, 5145 GFP_KERNEL); 5146 if (!hwdesc->cppi5_desc_vaddr) 5147 return -ENOMEM; 5148 5149 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr, 5150 hwdesc->cppi5_desc_size, 5151 DMA_TO_DEVICE); 5152 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr)) 5153 return -ENOMEM; 5154 5155 desc = hwdesc->cppi5_desc_vaddr; 5156 cppi5_hdesc_init(desc, 0, 0); 5157 cppi5_desc_set_pktids(&desc->hdr, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT); 5158 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0); 5159 5160 cppi5_hdesc_attach_buf(desc, 5161 rx_flush->buffer_paddr, rx_flush->buffer_size, 5162 rx_flush->buffer_paddr, rx_flush->buffer_size); 5163 5164 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr, 5165 hwdesc->cppi5_desc_size, DMA_TO_DEVICE); 5166 return 0; 5167 } 5168 5169 #ifdef CONFIG_DEBUG_FS 5170 static void udma_dbg_summary_show_chan(struct seq_file *s, 5171 struct dma_chan *chan) 5172 { 5173 struct udma_chan *uc = to_udma_chan(chan); 5174 struct udma_chan_config *ucc = &uc->config; 5175 5176 seq_printf(s, " %-13s| %s", dma_chan_name(chan), 5177 chan->dbg_client_name ?: "in-use"); 5178 if (ucc->tr_trigger_type) 5179 seq_puts(s, " (triggered, "); 5180 else 5181 seq_printf(s, " (%s, ", 5182 dmaengine_get_direction_text(uc->config.dir)); 5183 5184 switch (uc->config.dir) { 5185 case DMA_MEM_TO_MEM: 5186 if (uc->ud->match_data->type == DMA_TYPE_BCDMA) { 5187 seq_printf(s, "bchan%d)\n", uc->bchan->id); 5188 return; 5189 } 5190 5191 seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id, 5192 ucc->src_thread, ucc->dst_thread); 5193 break; 5194 case DMA_DEV_TO_MEM: 5195 seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id, 5196 ucc->src_thread, ucc->dst_thread); 5197 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA) 5198 seq_printf(s, "rflow%d, ", uc->rflow->id); 5199 break; 5200 case DMA_MEM_TO_DEV: 5201 seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id, 5202 ucc->src_thread, ucc->dst_thread); 5203 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA) 5204 seq_printf(s, "tflow%d, ", uc->tchan->tflow_id); 5205 break; 5206 default: 5207 seq_printf(s, ")\n"); 5208 return; 5209 } 5210 5211 if (ucc->ep_type == PSIL_EP_NATIVE) { 5212 seq_printf(s, "PSI-L Native"); 5213 if (ucc->metadata_size) { 5214 seq_printf(s, "[%s", ucc->needs_epib ? " EPIB" : ""); 5215 if (ucc->psd_size) 5216 seq_printf(s, " PSDsize:%u", ucc->psd_size); 5217 seq_printf(s, " ]"); 5218 } 5219 } else { 5220 seq_printf(s, "PDMA"); 5221 if (ucc->enable_acc32 || ucc->enable_burst) 5222 seq_printf(s, "[%s%s ]", 5223 ucc->enable_acc32 ? " ACC32" : "", 5224 ucc->enable_burst ? " BURST" : ""); 5225 } 5226 5227 seq_printf(s, ", %s)\n", ucc->pkt_mode ? "Packet mode" : "TR mode"); 5228 } 5229 5230 static void udma_dbg_summary_show(struct seq_file *s, 5231 struct dma_device *dma_dev) 5232 { 5233 struct dma_chan *chan; 5234 5235 list_for_each_entry(chan, &dma_dev->channels, device_node) { 5236 if (chan->client_count) 5237 udma_dbg_summary_show_chan(s, chan); 5238 } 5239 } 5240 #endif /* CONFIG_DEBUG_FS */ 5241 5242 static enum dmaengine_alignment udma_get_copy_align(struct udma_dev *ud) 5243 { 5244 const struct udma_match_data *match_data = ud->match_data; 5245 u8 tpl; 5246 5247 if (!match_data->enable_memcpy_support) 5248 return DMAENGINE_ALIGN_8_BYTES; 5249 5250 /* Get the highest TPL level the device supports for memcpy */ 5251 if (ud->bchan_cnt) 5252 tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, 0); 5253 else if (ud->tchan_cnt) 5254 tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, 0); 5255 else 5256 return DMAENGINE_ALIGN_8_BYTES; 5257 5258 switch (match_data->burst_size[tpl]) { 5259 case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES: 5260 return DMAENGINE_ALIGN_256_BYTES; 5261 case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES: 5262 return DMAENGINE_ALIGN_128_BYTES; 5263 case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES: 5264 fallthrough; 5265 default: 5266 return DMAENGINE_ALIGN_64_BYTES; 5267 } 5268 } 5269 5270 #define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ 5271 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 5272 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ 5273 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ 5274 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) 5275 5276 static int udma_probe(struct platform_device *pdev) 5277 { 5278 struct device_node *navss_node = pdev->dev.parent->of_node; 5279 const struct soc_device_attribute *soc; 5280 struct device *dev = &pdev->dev; 5281 struct udma_dev *ud; 5282 const struct of_device_id *match; 5283 int i, ret; 5284 int ch_count; 5285 5286 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48)); 5287 if (ret) 5288 dev_err(dev, "failed to set dma mask stuff\n"); 5289 5290 ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL); 5291 if (!ud) 5292 return -ENOMEM; 5293 5294 match = of_match_node(udma_of_match, dev->of_node); 5295 if (!match) { 5296 dev_err(dev, "No compatible match found\n"); 5297 return -ENODEV; 5298 } 5299 ud->match_data = match->data; 5300 5301 ud->soc_data = ud->match_data->soc_data; 5302 if (!ud->soc_data) { 5303 soc = soc_device_match(k3_soc_devices); 5304 if (!soc) { 5305 dev_err(dev, "No compatible SoC found\n"); 5306 return -ENODEV; 5307 } 5308 ud->soc_data = soc->data; 5309 } 5310 5311 ret = udma_get_mmrs(pdev, ud); 5312 if (ret) 5313 return ret; 5314 5315 ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci"); 5316 if (IS_ERR(ud->tisci_rm.tisci)) 5317 return PTR_ERR(ud->tisci_rm.tisci); 5318 5319 ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id", 5320 &ud->tisci_rm.tisci_dev_id); 5321 if (ret) { 5322 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret); 5323 return ret; 5324 } 5325 pdev->id = ud->tisci_rm.tisci_dev_id; 5326 5327 ret = of_property_read_u32(navss_node, "ti,sci-dev-id", 5328 &ud->tisci_rm.tisci_navss_dev_id); 5329 if (ret) { 5330 dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret); 5331 return ret; 5332 } 5333 5334 if (ud->match_data->type == DMA_TYPE_UDMA) { 5335 ret = of_property_read_u32(dev->of_node, "ti,udma-atype", 5336 &ud->atype); 5337 if (!ret && ud->atype > 2) { 5338 dev_err(dev, "Invalid atype: %u\n", ud->atype); 5339 return -EINVAL; 5340 } 5341 } else { 5342 ret = of_property_read_u32(dev->of_node, "ti,asel", 5343 &ud->asel); 5344 if (!ret && ud->asel > 15) { 5345 dev_err(dev, "Invalid asel: %u\n", ud->asel); 5346 return -EINVAL; 5347 } 5348 } 5349 5350 ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops; 5351 ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops; 5352 5353 if (ud->match_data->type == DMA_TYPE_UDMA) { 5354 ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc"); 5355 } else { 5356 struct k3_ringacc_init_data ring_init_data; 5357 5358 ring_init_data.tisci = ud->tisci_rm.tisci; 5359 ring_init_data.tisci_dev_id = ud->tisci_rm.tisci_dev_id; 5360 if (ud->match_data->type == DMA_TYPE_BCDMA) { 5361 ring_init_data.num_rings = ud->bchan_cnt + 5362 ud->tchan_cnt + 5363 ud->rchan_cnt; 5364 } else { 5365 ring_init_data.num_rings = ud->rflow_cnt + 5366 ud->tflow_cnt; 5367 } 5368 5369 ud->ringacc = k3_ringacc_dmarings_init(pdev, &ring_init_data); 5370 } 5371 5372 if (IS_ERR(ud->ringacc)) 5373 return PTR_ERR(ud->ringacc); 5374 5375 dev->msi.domain = of_msi_get_domain(dev, dev->of_node, 5376 DOMAIN_BUS_TI_SCI_INTA_MSI); 5377 if (!dev->msi.domain) { 5378 return -EPROBE_DEFER; 5379 } 5380 5381 dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask); 5382 /* cyclic operation is not supported via PKTDMA */ 5383 if (ud->match_data->type != DMA_TYPE_PKTDMA) { 5384 dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask); 5385 ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic; 5386 } 5387 5388 ud->ddev.device_config = udma_slave_config; 5389 ud->ddev.device_prep_slave_sg = udma_prep_slave_sg; 5390 ud->ddev.device_issue_pending = udma_issue_pending; 5391 ud->ddev.device_tx_status = udma_tx_status; 5392 ud->ddev.device_pause = udma_pause; 5393 ud->ddev.device_resume = udma_resume; 5394 ud->ddev.device_terminate_all = udma_terminate_all; 5395 ud->ddev.device_synchronize = udma_synchronize; 5396 #ifdef CONFIG_DEBUG_FS 5397 ud->ddev.dbg_summary_show = udma_dbg_summary_show; 5398 #endif 5399 5400 switch (ud->match_data->type) { 5401 case DMA_TYPE_UDMA: 5402 ud->ddev.device_alloc_chan_resources = 5403 udma_alloc_chan_resources; 5404 break; 5405 case DMA_TYPE_BCDMA: 5406 ud->ddev.device_alloc_chan_resources = 5407 bcdma_alloc_chan_resources; 5408 ud->ddev.device_router_config = bcdma_router_config; 5409 break; 5410 case DMA_TYPE_PKTDMA: 5411 ud->ddev.device_alloc_chan_resources = 5412 pktdma_alloc_chan_resources; 5413 break; 5414 default: 5415 return -EINVAL; 5416 } 5417 ud->ddev.device_free_chan_resources = udma_free_chan_resources; 5418 5419 ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS; 5420 ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS; 5421 ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 5422 ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 5423 ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT | 5424 DESC_METADATA_ENGINE; 5425 if (ud->match_data->enable_memcpy_support && 5426 !(ud->match_data->type == DMA_TYPE_BCDMA && ud->bchan_cnt == 0)) { 5427 dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask); 5428 ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy; 5429 ud->ddev.directions |= BIT(DMA_MEM_TO_MEM); 5430 } 5431 5432 ud->ddev.dev = dev; 5433 ud->dev = dev; 5434 ud->psil_base = ud->match_data->psil_base; 5435 5436 INIT_LIST_HEAD(&ud->ddev.channels); 5437 INIT_LIST_HEAD(&ud->desc_to_purge); 5438 5439 ch_count = setup_resources(ud); 5440 if (ch_count <= 0) 5441 return ch_count; 5442 5443 spin_lock_init(&ud->lock); 5444 INIT_WORK(&ud->purge_work, udma_purge_desc_work); 5445 5446 ud->desc_align = 64; 5447 if (ud->desc_align < dma_get_cache_alignment()) 5448 ud->desc_align = dma_get_cache_alignment(); 5449 5450 ret = udma_setup_rx_flush(ud); 5451 if (ret) 5452 return ret; 5453 5454 for (i = 0; i < ud->bchan_cnt; i++) { 5455 struct udma_bchan *bchan = &ud->bchans[i]; 5456 5457 bchan->id = i; 5458 bchan->reg_rt = ud->mmrs[MMR_BCHANRT] + i * 0x1000; 5459 } 5460 5461 for (i = 0; i < ud->tchan_cnt; i++) { 5462 struct udma_tchan *tchan = &ud->tchans[i]; 5463 5464 tchan->id = i; 5465 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000; 5466 } 5467 5468 for (i = 0; i < ud->rchan_cnt; i++) { 5469 struct udma_rchan *rchan = &ud->rchans[i]; 5470 5471 rchan->id = i; 5472 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000; 5473 } 5474 5475 for (i = 0; i < ud->rflow_cnt; i++) { 5476 struct udma_rflow *rflow = &ud->rflows[i]; 5477 5478 rflow->id = i; 5479 } 5480 5481 for (i = 0; i < ch_count; i++) { 5482 struct udma_chan *uc = &ud->channels[i]; 5483 5484 uc->ud = ud; 5485 uc->vc.desc_free = udma_desc_free; 5486 uc->id = i; 5487 uc->bchan = NULL; 5488 uc->tchan = NULL; 5489 uc->rchan = NULL; 5490 uc->config.remote_thread_id = -1; 5491 uc->config.mapped_channel_id = -1; 5492 uc->config.default_flow_id = -1; 5493 uc->config.dir = DMA_MEM_TO_MEM; 5494 uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d", 5495 dev_name(dev), i); 5496 5497 vchan_init(&uc->vc, &ud->ddev); 5498 /* Use custom vchan completion handling */ 5499 tasklet_setup(&uc->vc.task, udma_vchan_complete); 5500 init_completion(&uc->teardown_completed); 5501 INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion); 5502 } 5503 5504 /* Configure the copy_align to the maximum burst size the device supports */ 5505 ud->ddev.copy_align = udma_get_copy_align(ud); 5506 5507 ret = dma_async_device_register(&ud->ddev); 5508 if (ret) { 5509 dev_err(dev, "failed to register slave DMA engine: %d\n", ret); 5510 return ret; 5511 } 5512 5513 platform_set_drvdata(pdev, ud); 5514 5515 ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud); 5516 if (ret) { 5517 dev_err(dev, "failed to register of_dma controller\n"); 5518 dma_async_device_unregister(&ud->ddev); 5519 } 5520 5521 return ret; 5522 } 5523 5524 static struct platform_driver udma_driver = { 5525 .driver = { 5526 .name = "ti-udma", 5527 .of_match_table = udma_of_match, 5528 .suppress_bind_attrs = true, 5529 }, 5530 .probe = udma_probe, 5531 }; 5532 5533 module_platform_driver(udma_driver); 5534 MODULE_LICENSE("GPL v2"); 5535 5536 /* Private interfaces to UDMA */ 5537 #include "k3-udma-private.c" 5538