1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com 4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> 5 */ 6 7 #include <linux/kernel.h> 8 #include <linux/module.h> 9 #include <linux/delay.h> 10 #include <linux/dmaengine.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/dmapool.h> 13 #include <linux/err.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/list.h> 17 #include <linux/platform_device.h> 18 #include <linux/slab.h> 19 #include <linux/spinlock.h> 20 #include <linux/sys_soc.h> 21 #include <linux/of.h> 22 #include <linux/of_dma.h> 23 #include <linux/of_device.h> 24 #include <linux/of_irq.h> 25 #include <linux/workqueue.h> 26 #include <linux/completion.h> 27 #include <linux/soc/ti/k3-ringacc.h> 28 #include <linux/soc/ti/ti_sci_protocol.h> 29 #include <linux/soc/ti/ti_sci_inta_msi.h> 30 #include <linux/dma/k3-event-router.h> 31 #include <linux/dma/ti-cppi5.h> 32 33 #include "../virt-dma.h" 34 #include "k3-udma.h" 35 #include "k3-psil-priv.h" 36 37 struct udma_static_tr { 38 u8 elsize; /* RPSTR0 */ 39 u16 elcnt; /* RPSTR0 */ 40 u16 bstcnt; /* RPSTR1 */ 41 }; 42 43 #define K3_UDMA_MAX_RFLOWS 1024 44 #define K3_UDMA_DEFAULT_RING_SIZE 16 45 46 /* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */ 47 #define UDMA_RFLOW_SRCTAG_NONE 0 48 #define UDMA_RFLOW_SRCTAG_CFG_TAG 1 49 #define UDMA_RFLOW_SRCTAG_FLOW_ID 2 50 #define UDMA_RFLOW_SRCTAG_SRC_TAG 4 51 52 #define UDMA_RFLOW_DSTTAG_NONE 0 53 #define UDMA_RFLOW_DSTTAG_CFG_TAG 1 54 #define UDMA_RFLOW_DSTTAG_FLOW_ID 2 55 #define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4 56 #define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5 57 58 struct udma_chan; 59 60 enum k3_dma_type { 61 DMA_TYPE_UDMA = 0, 62 DMA_TYPE_BCDMA, 63 DMA_TYPE_PKTDMA, 64 }; 65 66 enum udma_mmr { 67 MMR_GCFG = 0, 68 MMR_BCHANRT, 69 MMR_RCHANRT, 70 MMR_TCHANRT, 71 MMR_LAST, 72 }; 73 74 static const char * const mmr_names[] = { 75 [MMR_GCFG] = "gcfg", 76 [MMR_BCHANRT] = "bchanrt", 77 [MMR_RCHANRT] = "rchanrt", 78 [MMR_TCHANRT] = "tchanrt", 79 }; 80 81 struct udma_tchan { 82 void __iomem *reg_rt; 83 84 int id; 85 struct k3_ring *t_ring; /* Transmit ring */ 86 struct k3_ring *tc_ring; /* Transmit Completion ring */ 87 int tflow_id; /* applicable only for PKTDMA */ 88 89 }; 90 91 #define udma_bchan udma_tchan 92 93 struct udma_rflow { 94 int id; 95 struct k3_ring *fd_ring; /* Free Descriptor ring */ 96 struct k3_ring *r_ring; /* Receive ring */ 97 }; 98 99 struct udma_rchan { 100 void __iomem *reg_rt; 101 102 int id; 103 }; 104 105 struct udma_oes_offsets { 106 /* K3 UDMA Output Event Offset */ 107 u32 udma_rchan; 108 109 /* BCDMA Output Event Offsets */ 110 u32 bcdma_bchan_data; 111 u32 bcdma_bchan_ring; 112 u32 bcdma_tchan_data; 113 u32 bcdma_tchan_ring; 114 u32 bcdma_rchan_data; 115 u32 bcdma_rchan_ring; 116 117 /* PKTDMA Output Event Offsets */ 118 u32 pktdma_tchan_flow; 119 u32 pktdma_rchan_flow; 120 }; 121 122 #define UDMA_FLAG_PDMA_ACC32 BIT(0) 123 #define UDMA_FLAG_PDMA_BURST BIT(1) 124 #define UDMA_FLAG_TDTYPE BIT(2) 125 #define UDMA_FLAG_BURST_SIZE BIT(3) 126 #define UDMA_FLAGS_J7_CLASS (UDMA_FLAG_PDMA_ACC32 | \ 127 UDMA_FLAG_PDMA_BURST | \ 128 UDMA_FLAG_TDTYPE | \ 129 UDMA_FLAG_BURST_SIZE) 130 131 struct udma_match_data { 132 enum k3_dma_type type; 133 u32 psil_base; 134 bool enable_memcpy_support; 135 u32 flags; 136 u32 statictr_z_mask; 137 u8 burst_size[3]; 138 }; 139 140 struct udma_soc_data { 141 struct udma_oes_offsets oes; 142 u32 bcdma_trigger_event_offset; 143 }; 144 145 struct udma_hwdesc { 146 size_t cppi5_desc_size; 147 void *cppi5_desc_vaddr; 148 dma_addr_t cppi5_desc_paddr; 149 150 /* TR descriptor internal pointers */ 151 void *tr_req_base; 152 struct cppi5_tr_resp_t *tr_resp_base; 153 }; 154 155 struct udma_rx_flush { 156 struct udma_hwdesc hwdescs[2]; 157 158 size_t buffer_size; 159 void *buffer_vaddr; 160 dma_addr_t buffer_paddr; 161 }; 162 163 struct udma_tpl { 164 u8 levels; 165 u32 start_idx[3]; 166 }; 167 168 struct udma_dev { 169 struct dma_device ddev; 170 struct device *dev; 171 void __iomem *mmrs[MMR_LAST]; 172 const struct udma_match_data *match_data; 173 const struct udma_soc_data *soc_data; 174 175 struct udma_tpl bchan_tpl; 176 struct udma_tpl tchan_tpl; 177 struct udma_tpl rchan_tpl; 178 179 size_t desc_align; /* alignment to use for descriptors */ 180 181 struct udma_tisci_rm tisci_rm; 182 183 struct k3_ringacc *ringacc; 184 185 struct work_struct purge_work; 186 struct list_head desc_to_purge; 187 spinlock_t lock; 188 189 struct udma_rx_flush rx_flush; 190 191 int bchan_cnt; 192 int tchan_cnt; 193 int echan_cnt; 194 int rchan_cnt; 195 int rflow_cnt; 196 int tflow_cnt; 197 unsigned long *bchan_map; 198 unsigned long *tchan_map; 199 unsigned long *rchan_map; 200 unsigned long *rflow_gp_map; 201 unsigned long *rflow_gp_map_allocated; 202 unsigned long *rflow_in_use; 203 unsigned long *tflow_map; 204 205 struct udma_bchan *bchans; 206 struct udma_tchan *tchans; 207 struct udma_rchan *rchans; 208 struct udma_rflow *rflows; 209 210 struct udma_chan *channels; 211 u32 psil_base; 212 u32 atype; 213 u32 asel; 214 }; 215 216 struct udma_desc { 217 struct virt_dma_desc vd; 218 219 bool terminated; 220 221 enum dma_transfer_direction dir; 222 223 struct udma_static_tr static_tr; 224 u32 residue; 225 226 unsigned int sglen; 227 unsigned int desc_idx; /* Only used for cyclic in packet mode */ 228 unsigned int tr_idx; 229 230 u32 metadata_size; 231 void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */ 232 233 unsigned int hwdesc_count; 234 struct udma_hwdesc hwdesc[]; 235 }; 236 237 enum udma_chan_state { 238 UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */ 239 UDMA_CHAN_IS_ACTIVE, /* Normal operation */ 240 UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */ 241 }; 242 243 struct udma_tx_drain { 244 struct delayed_work work; 245 ktime_t tstamp; 246 u32 residue; 247 }; 248 249 struct udma_chan_config { 250 bool pkt_mode; /* TR or packet */ 251 bool needs_epib; /* EPIB is needed for the communication or not */ 252 u32 psd_size; /* size of Protocol Specific Data */ 253 u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */ 254 u32 hdesc_size; /* Size of a packet descriptor in packet mode */ 255 bool notdpkt; /* Suppress sending TDC packet */ 256 int remote_thread_id; 257 u32 atype; 258 u32 asel; 259 u32 src_thread; 260 u32 dst_thread; 261 enum psil_endpoint_type ep_type; 262 bool enable_acc32; 263 bool enable_burst; 264 enum udma_tp_level channel_tpl; /* Channel Throughput Level */ 265 266 u32 tr_trigger_type; 267 unsigned long tx_flags; 268 269 /* PKDMA mapped channel */ 270 int mapped_channel_id; 271 /* PKTDMA default tflow or rflow for mapped channel */ 272 int default_flow_id; 273 274 enum dma_transfer_direction dir; 275 }; 276 277 struct udma_chan { 278 struct virt_dma_chan vc; 279 struct dma_slave_config cfg; 280 struct udma_dev *ud; 281 struct device *dma_dev; 282 struct udma_desc *desc; 283 struct udma_desc *terminated_desc; 284 struct udma_static_tr static_tr; 285 char *name; 286 287 struct udma_bchan *bchan; 288 struct udma_tchan *tchan; 289 struct udma_rchan *rchan; 290 struct udma_rflow *rflow; 291 292 bool psil_paired; 293 294 int irq_num_ring; 295 int irq_num_udma; 296 297 bool cyclic; 298 bool paused; 299 300 enum udma_chan_state state; 301 struct completion teardown_completed; 302 303 struct udma_tx_drain tx_drain; 304 305 /* Channel configuration parameters */ 306 struct udma_chan_config config; 307 308 /* dmapool for packet mode descriptors */ 309 bool use_dma_pool; 310 struct dma_pool *hdesc_pool; 311 312 u32 id; 313 }; 314 315 static inline struct udma_dev *to_udma_dev(struct dma_device *d) 316 { 317 return container_of(d, struct udma_dev, ddev); 318 } 319 320 static inline struct udma_chan *to_udma_chan(struct dma_chan *c) 321 { 322 return container_of(c, struct udma_chan, vc.chan); 323 } 324 325 static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t) 326 { 327 return container_of(t, struct udma_desc, vd.tx); 328 } 329 330 /* Generic register access functions */ 331 static inline u32 udma_read(void __iomem *base, int reg) 332 { 333 return readl(base + reg); 334 } 335 336 static inline void udma_write(void __iomem *base, int reg, u32 val) 337 { 338 writel(val, base + reg); 339 } 340 341 static inline void udma_update_bits(void __iomem *base, int reg, 342 u32 mask, u32 val) 343 { 344 u32 tmp, orig; 345 346 orig = readl(base + reg); 347 tmp = orig & ~mask; 348 tmp |= (val & mask); 349 350 if (tmp != orig) 351 writel(tmp, base + reg); 352 } 353 354 /* TCHANRT */ 355 static inline u32 udma_tchanrt_read(struct udma_chan *uc, int reg) 356 { 357 if (!uc->tchan) 358 return 0; 359 return udma_read(uc->tchan->reg_rt, reg); 360 } 361 362 static inline void udma_tchanrt_write(struct udma_chan *uc, int reg, u32 val) 363 { 364 if (!uc->tchan) 365 return; 366 udma_write(uc->tchan->reg_rt, reg, val); 367 } 368 369 static inline void udma_tchanrt_update_bits(struct udma_chan *uc, int reg, 370 u32 mask, u32 val) 371 { 372 if (!uc->tchan) 373 return; 374 udma_update_bits(uc->tchan->reg_rt, reg, mask, val); 375 } 376 377 /* RCHANRT */ 378 static inline u32 udma_rchanrt_read(struct udma_chan *uc, int reg) 379 { 380 if (!uc->rchan) 381 return 0; 382 return udma_read(uc->rchan->reg_rt, reg); 383 } 384 385 static inline void udma_rchanrt_write(struct udma_chan *uc, int reg, u32 val) 386 { 387 if (!uc->rchan) 388 return; 389 udma_write(uc->rchan->reg_rt, reg, val); 390 } 391 392 static inline void udma_rchanrt_update_bits(struct udma_chan *uc, int reg, 393 u32 mask, u32 val) 394 { 395 if (!uc->rchan) 396 return; 397 udma_update_bits(uc->rchan->reg_rt, reg, mask, val); 398 } 399 400 static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread) 401 { 402 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 403 404 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; 405 return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci, 406 tisci_rm->tisci_navss_dev_id, 407 src_thread, dst_thread); 408 } 409 410 static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread, 411 u32 dst_thread) 412 { 413 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 414 415 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; 416 return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci, 417 tisci_rm->tisci_navss_dev_id, 418 src_thread, dst_thread); 419 } 420 421 static void k3_configure_chan_coherency(struct dma_chan *chan, u32 asel) 422 { 423 struct device *chan_dev = &chan->dev->device; 424 425 if (asel == 0) { 426 /* No special handling for the channel */ 427 chan->dev->chan_dma_dev = false; 428 429 chan_dev->dma_coherent = false; 430 chan_dev->dma_parms = NULL; 431 } else if (asel == 14 || asel == 15) { 432 chan->dev->chan_dma_dev = true; 433 434 chan_dev->dma_coherent = true; 435 dma_coerce_mask_and_coherent(chan_dev, DMA_BIT_MASK(48)); 436 chan_dev->dma_parms = chan_dev->parent->dma_parms; 437 } else { 438 dev_warn(chan->device->dev, "Invalid ASEL value: %u\n", asel); 439 440 chan_dev->dma_coherent = false; 441 chan_dev->dma_parms = NULL; 442 } 443 } 444 445 static u8 udma_get_chan_tpl_index(struct udma_tpl *tpl_map, int chan_id) 446 { 447 int i; 448 449 for (i = 0; i < tpl_map->levels; i++) { 450 if (chan_id >= tpl_map->start_idx[i]) 451 return i; 452 } 453 454 return 0; 455 } 456 457 static void udma_reset_uchan(struct udma_chan *uc) 458 { 459 memset(&uc->config, 0, sizeof(uc->config)); 460 uc->config.remote_thread_id = -1; 461 uc->config.mapped_channel_id = -1; 462 uc->config.default_flow_id = -1; 463 uc->state = UDMA_CHAN_IS_IDLE; 464 } 465 466 static void udma_dump_chan_stdata(struct udma_chan *uc) 467 { 468 struct device *dev = uc->ud->dev; 469 u32 offset; 470 int i; 471 472 if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) { 473 dev_dbg(dev, "TCHAN State data:\n"); 474 for (i = 0; i < 32; i++) { 475 offset = UDMA_CHAN_RT_STDATA_REG + i * 4; 476 dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i, 477 udma_tchanrt_read(uc, offset)); 478 } 479 } 480 481 if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) { 482 dev_dbg(dev, "RCHAN State data:\n"); 483 for (i = 0; i < 32; i++) { 484 offset = UDMA_CHAN_RT_STDATA_REG + i * 4; 485 dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i, 486 udma_rchanrt_read(uc, offset)); 487 } 488 } 489 } 490 491 static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d, 492 int idx) 493 { 494 return d->hwdesc[idx].cppi5_desc_paddr; 495 } 496 497 static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx) 498 { 499 return d->hwdesc[idx].cppi5_desc_vaddr; 500 } 501 502 static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc, 503 dma_addr_t paddr) 504 { 505 struct udma_desc *d = uc->terminated_desc; 506 507 if (d) { 508 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d, 509 d->desc_idx); 510 511 if (desc_paddr != paddr) 512 d = NULL; 513 } 514 515 if (!d) { 516 d = uc->desc; 517 if (d) { 518 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d, 519 d->desc_idx); 520 521 if (desc_paddr != paddr) 522 d = NULL; 523 } 524 } 525 526 return d; 527 } 528 529 static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d) 530 { 531 if (uc->use_dma_pool) { 532 int i; 533 534 for (i = 0; i < d->hwdesc_count; i++) { 535 if (!d->hwdesc[i].cppi5_desc_vaddr) 536 continue; 537 538 dma_pool_free(uc->hdesc_pool, 539 d->hwdesc[i].cppi5_desc_vaddr, 540 d->hwdesc[i].cppi5_desc_paddr); 541 542 d->hwdesc[i].cppi5_desc_vaddr = NULL; 543 } 544 } else if (d->hwdesc[0].cppi5_desc_vaddr) { 545 dma_free_coherent(uc->dma_dev, d->hwdesc[0].cppi5_desc_size, 546 d->hwdesc[0].cppi5_desc_vaddr, 547 d->hwdesc[0].cppi5_desc_paddr); 548 549 d->hwdesc[0].cppi5_desc_vaddr = NULL; 550 } 551 } 552 553 static void udma_purge_desc_work(struct work_struct *work) 554 { 555 struct udma_dev *ud = container_of(work, typeof(*ud), purge_work); 556 struct virt_dma_desc *vd, *_vd; 557 unsigned long flags; 558 LIST_HEAD(head); 559 560 spin_lock_irqsave(&ud->lock, flags); 561 list_splice_tail_init(&ud->desc_to_purge, &head); 562 spin_unlock_irqrestore(&ud->lock, flags); 563 564 list_for_each_entry_safe(vd, _vd, &head, node) { 565 struct udma_chan *uc = to_udma_chan(vd->tx.chan); 566 struct udma_desc *d = to_udma_desc(&vd->tx); 567 568 udma_free_hwdesc(uc, d); 569 list_del(&vd->node); 570 kfree(d); 571 } 572 573 /* If more to purge, schedule the work again */ 574 if (!list_empty(&ud->desc_to_purge)) 575 schedule_work(&ud->purge_work); 576 } 577 578 static void udma_desc_free(struct virt_dma_desc *vd) 579 { 580 struct udma_dev *ud = to_udma_dev(vd->tx.chan->device); 581 struct udma_chan *uc = to_udma_chan(vd->tx.chan); 582 struct udma_desc *d = to_udma_desc(&vd->tx); 583 unsigned long flags; 584 585 if (uc->terminated_desc == d) 586 uc->terminated_desc = NULL; 587 588 if (uc->use_dma_pool) { 589 udma_free_hwdesc(uc, d); 590 kfree(d); 591 return; 592 } 593 594 spin_lock_irqsave(&ud->lock, flags); 595 list_add_tail(&vd->node, &ud->desc_to_purge); 596 spin_unlock_irqrestore(&ud->lock, flags); 597 598 schedule_work(&ud->purge_work); 599 } 600 601 static bool udma_is_chan_running(struct udma_chan *uc) 602 { 603 u32 trt_ctl = 0; 604 u32 rrt_ctl = 0; 605 606 if (uc->tchan) 607 trt_ctl = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG); 608 if (uc->rchan) 609 rrt_ctl = udma_rchanrt_read(uc, UDMA_CHAN_RT_CTL_REG); 610 611 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN) 612 return true; 613 614 return false; 615 } 616 617 static bool udma_is_chan_paused(struct udma_chan *uc) 618 { 619 u32 val, pause_mask; 620 621 switch (uc->config.dir) { 622 case DMA_DEV_TO_MEM: 623 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG); 624 pause_mask = UDMA_PEER_RT_EN_PAUSE; 625 break; 626 case DMA_MEM_TO_DEV: 627 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG); 628 pause_mask = UDMA_PEER_RT_EN_PAUSE; 629 break; 630 case DMA_MEM_TO_MEM: 631 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG); 632 pause_mask = UDMA_CHAN_RT_CTL_PAUSE; 633 break; 634 default: 635 return false; 636 } 637 638 if (val & pause_mask) 639 return true; 640 641 return false; 642 } 643 644 static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc) 645 { 646 return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr; 647 } 648 649 static int udma_push_to_ring(struct udma_chan *uc, int idx) 650 { 651 struct udma_desc *d = uc->desc; 652 struct k3_ring *ring = NULL; 653 dma_addr_t paddr; 654 655 switch (uc->config.dir) { 656 case DMA_DEV_TO_MEM: 657 ring = uc->rflow->fd_ring; 658 break; 659 case DMA_MEM_TO_DEV: 660 case DMA_MEM_TO_MEM: 661 ring = uc->tchan->t_ring; 662 break; 663 default: 664 return -EINVAL; 665 } 666 667 /* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */ 668 if (idx == -1) { 669 paddr = udma_get_rx_flush_hwdesc_paddr(uc); 670 } else { 671 paddr = udma_curr_cppi5_desc_paddr(d, idx); 672 673 wmb(); /* Ensure that writes are not moved over this point */ 674 } 675 676 return k3_ringacc_ring_push(ring, &paddr); 677 } 678 679 static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr) 680 { 681 if (uc->config.dir != DMA_DEV_TO_MEM) 682 return false; 683 684 if (addr == udma_get_rx_flush_hwdesc_paddr(uc)) 685 return true; 686 687 return false; 688 } 689 690 static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr) 691 { 692 struct k3_ring *ring = NULL; 693 int ret; 694 695 switch (uc->config.dir) { 696 case DMA_DEV_TO_MEM: 697 ring = uc->rflow->r_ring; 698 break; 699 case DMA_MEM_TO_DEV: 700 case DMA_MEM_TO_MEM: 701 ring = uc->tchan->tc_ring; 702 break; 703 default: 704 return -ENOENT; 705 } 706 707 ret = k3_ringacc_ring_pop(ring, addr); 708 if (ret) 709 return ret; 710 711 rmb(); /* Ensure that reads are not moved before this point */ 712 713 /* Teardown completion */ 714 if (cppi5_desc_is_tdcm(*addr)) 715 return 0; 716 717 /* Check for flush descriptor */ 718 if (udma_desc_is_rx_flush(uc, *addr)) 719 return -ENOENT; 720 721 return 0; 722 } 723 724 static void udma_reset_rings(struct udma_chan *uc) 725 { 726 struct k3_ring *ring1 = NULL; 727 struct k3_ring *ring2 = NULL; 728 729 switch (uc->config.dir) { 730 case DMA_DEV_TO_MEM: 731 if (uc->rchan) { 732 ring1 = uc->rflow->fd_ring; 733 ring2 = uc->rflow->r_ring; 734 } 735 break; 736 case DMA_MEM_TO_DEV: 737 case DMA_MEM_TO_MEM: 738 if (uc->tchan) { 739 ring1 = uc->tchan->t_ring; 740 ring2 = uc->tchan->tc_ring; 741 } 742 break; 743 default: 744 break; 745 } 746 747 if (ring1) 748 k3_ringacc_ring_reset_dma(ring1, 749 k3_ringacc_ring_get_occ(ring1)); 750 if (ring2) 751 k3_ringacc_ring_reset(ring2); 752 753 /* make sure we are not leaking memory by stalled descriptor */ 754 if (uc->terminated_desc) { 755 udma_desc_free(&uc->terminated_desc->vd); 756 uc->terminated_desc = NULL; 757 } 758 } 759 760 static void udma_decrement_byte_counters(struct udma_chan *uc, u32 val) 761 { 762 if (uc->desc->dir == DMA_DEV_TO_MEM) { 763 udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val); 764 udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val); 765 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val); 766 } else { 767 udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val); 768 udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val); 769 if (!uc->bchan) 770 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val); 771 } 772 } 773 774 static void udma_reset_counters(struct udma_chan *uc) 775 { 776 u32 val; 777 778 if (uc->tchan) { 779 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); 780 udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val); 781 782 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG); 783 udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val); 784 785 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG); 786 udma_tchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val); 787 788 if (!uc->bchan) { 789 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG); 790 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val); 791 } 792 } 793 794 if (uc->rchan) { 795 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); 796 udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val); 797 798 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG); 799 udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val); 800 801 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG); 802 udma_rchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val); 803 804 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG); 805 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val); 806 } 807 } 808 809 static int udma_reset_chan(struct udma_chan *uc, bool hard) 810 { 811 switch (uc->config.dir) { 812 case DMA_DEV_TO_MEM: 813 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0); 814 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0); 815 break; 816 case DMA_MEM_TO_DEV: 817 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0); 818 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0); 819 break; 820 case DMA_MEM_TO_MEM: 821 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0); 822 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0); 823 break; 824 default: 825 return -EINVAL; 826 } 827 828 /* Reset all counters */ 829 udma_reset_counters(uc); 830 831 /* Hard reset: re-initialize the channel to reset */ 832 if (hard) { 833 struct udma_chan_config ucc_backup; 834 int ret; 835 836 memcpy(&ucc_backup, &uc->config, sizeof(uc->config)); 837 uc->ud->ddev.device_free_chan_resources(&uc->vc.chan); 838 839 /* restore the channel configuration */ 840 memcpy(&uc->config, &ucc_backup, sizeof(uc->config)); 841 ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan); 842 if (ret) 843 return ret; 844 845 /* 846 * Setting forced teardown after forced reset helps recovering 847 * the rchan. 848 */ 849 if (uc->config.dir == DMA_DEV_TO_MEM) 850 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 851 UDMA_CHAN_RT_CTL_EN | 852 UDMA_CHAN_RT_CTL_TDOWN | 853 UDMA_CHAN_RT_CTL_FTDOWN); 854 } 855 uc->state = UDMA_CHAN_IS_IDLE; 856 857 return 0; 858 } 859 860 static void udma_start_desc(struct udma_chan *uc) 861 { 862 struct udma_chan_config *ucc = &uc->config; 863 864 if (uc->ud->match_data->type == DMA_TYPE_UDMA && ucc->pkt_mode && 865 (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) { 866 int i; 867 868 /* 869 * UDMA only: Push all descriptors to ring for packet mode 870 * cyclic or RX 871 * PKTDMA supports pre-linked descriptor and cyclic is not 872 * supported 873 */ 874 for (i = 0; i < uc->desc->sglen; i++) 875 udma_push_to_ring(uc, i); 876 } else { 877 udma_push_to_ring(uc, 0); 878 } 879 } 880 881 static bool udma_chan_needs_reconfiguration(struct udma_chan *uc) 882 { 883 /* Only PDMAs have staticTR */ 884 if (uc->config.ep_type == PSIL_EP_NATIVE) 885 return false; 886 887 /* Check if the staticTR configuration has changed for TX */ 888 if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr))) 889 return true; 890 891 return false; 892 } 893 894 static int udma_start(struct udma_chan *uc) 895 { 896 struct virt_dma_desc *vd = vchan_next_desc(&uc->vc); 897 898 if (!vd) { 899 uc->desc = NULL; 900 return -ENOENT; 901 } 902 903 list_del(&vd->node); 904 905 uc->desc = to_udma_desc(&vd->tx); 906 907 /* Channel is already running and does not need reconfiguration */ 908 if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) { 909 udma_start_desc(uc); 910 goto out; 911 } 912 913 /* Make sure that we clear the teardown bit, if it is set */ 914 udma_reset_chan(uc, false); 915 916 /* Push descriptors before we start the channel */ 917 udma_start_desc(uc); 918 919 switch (uc->desc->dir) { 920 case DMA_DEV_TO_MEM: 921 /* Config remote TR */ 922 if (uc->config.ep_type == PSIL_EP_PDMA_XY) { 923 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) | 924 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize); 925 const struct udma_match_data *match_data = 926 uc->ud->match_data; 927 928 if (uc->config.enable_acc32) 929 val |= PDMA_STATIC_TR_XY_ACC32; 930 if (uc->config.enable_burst) 931 val |= PDMA_STATIC_TR_XY_BURST; 932 933 udma_rchanrt_write(uc, 934 UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG, 935 val); 936 937 udma_rchanrt_write(uc, 938 UDMA_CHAN_RT_PEER_STATIC_TR_Z_REG, 939 PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt, 940 match_data->statictr_z_mask)); 941 942 /* save the current staticTR configuration */ 943 memcpy(&uc->static_tr, &uc->desc->static_tr, 944 sizeof(uc->static_tr)); 945 } 946 947 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 948 UDMA_CHAN_RT_CTL_EN); 949 950 /* Enable remote */ 951 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 952 UDMA_PEER_RT_EN_ENABLE); 953 954 break; 955 case DMA_MEM_TO_DEV: 956 /* Config remote TR */ 957 if (uc->config.ep_type == PSIL_EP_PDMA_XY) { 958 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) | 959 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize); 960 961 if (uc->config.enable_acc32) 962 val |= PDMA_STATIC_TR_XY_ACC32; 963 if (uc->config.enable_burst) 964 val |= PDMA_STATIC_TR_XY_BURST; 965 966 udma_tchanrt_write(uc, 967 UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG, 968 val); 969 970 /* save the current staticTR configuration */ 971 memcpy(&uc->static_tr, &uc->desc->static_tr, 972 sizeof(uc->static_tr)); 973 } 974 975 /* Enable remote */ 976 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 977 UDMA_PEER_RT_EN_ENABLE); 978 979 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 980 UDMA_CHAN_RT_CTL_EN); 981 982 break; 983 case DMA_MEM_TO_MEM: 984 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 985 UDMA_CHAN_RT_CTL_EN); 986 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 987 UDMA_CHAN_RT_CTL_EN); 988 989 break; 990 default: 991 return -EINVAL; 992 } 993 994 uc->state = UDMA_CHAN_IS_ACTIVE; 995 out: 996 997 return 0; 998 } 999 1000 static int udma_stop(struct udma_chan *uc) 1001 { 1002 enum udma_chan_state old_state = uc->state; 1003 1004 uc->state = UDMA_CHAN_IS_TERMINATING; 1005 reinit_completion(&uc->teardown_completed); 1006 1007 switch (uc->config.dir) { 1008 case DMA_DEV_TO_MEM: 1009 if (!uc->cyclic && !uc->desc) 1010 udma_push_to_ring(uc, -1); 1011 1012 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 1013 UDMA_PEER_RT_EN_ENABLE | 1014 UDMA_PEER_RT_EN_TEARDOWN); 1015 break; 1016 case DMA_MEM_TO_DEV: 1017 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 1018 UDMA_PEER_RT_EN_ENABLE | 1019 UDMA_PEER_RT_EN_FLUSH); 1020 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 1021 UDMA_CHAN_RT_CTL_EN | 1022 UDMA_CHAN_RT_CTL_TDOWN); 1023 break; 1024 case DMA_MEM_TO_MEM: 1025 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 1026 UDMA_CHAN_RT_CTL_EN | 1027 UDMA_CHAN_RT_CTL_TDOWN); 1028 break; 1029 default: 1030 uc->state = old_state; 1031 complete_all(&uc->teardown_completed); 1032 return -EINVAL; 1033 } 1034 1035 return 0; 1036 } 1037 1038 static void udma_cyclic_packet_elapsed(struct udma_chan *uc) 1039 { 1040 struct udma_desc *d = uc->desc; 1041 struct cppi5_host_desc_t *h_desc; 1042 1043 h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr; 1044 cppi5_hdesc_reset_to_original(h_desc); 1045 udma_push_to_ring(uc, d->desc_idx); 1046 d->desc_idx = (d->desc_idx + 1) % d->sglen; 1047 } 1048 1049 static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d) 1050 { 1051 struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr; 1052 1053 memcpy(d->metadata, h_desc->epib, d->metadata_size); 1054 } 1055 1056 static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d) 1057 { 1058 u32 peer_bcnt, bcnt; 1059 1060 /* 1061 * Only TX towards PDMA is affected. 1062 * If DMA_PREP_INTERRUPT is not set by consumer then skip the transfer 1063 * completion calculation, consumer must ensure that there is no stale 1064 * data in DMA fabric in this case. 1065 */ 1066 if (uc->config.ep_type == PSIL_EP_NATIVE || 1067 uc->config.dir != DMA_MEM_TO_DEV || !(uc->config.tx_flags & DMA_PREP_INTERRUPT)) 1068 return true; 1069 1070 peer_bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG); 1071 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); 1072 1073 /* Transfer is incomplete, store current residue and time stamp */ 1074 if (peer_bcnt < bcnt) { 1075 uc->tx_drain.residue = bcnt - peer_bcnt; 1076 uc->tx_drain.tstamp = ktime_get(); 1077 return false; 1078 } 1079 1080 return true; 1081 } 1082 1083 static void udma_check_tx_completion(struct work_struct *work) 1084 { 1085 struct udma_chan *uc = container_of(work, typeof(*uc), 1086 tx_drain.work.work); 1087 bool desc_done = true; 1088 u32 residue_diff; 1089 ktime_t time_diff; 1090 unsigned long delay; 1091 1092 while (1) { 1093 if (uc->desc) { 1094 /* Get previous residue and time stamp */ 1095 residue_diff = uc->tx_drain.residue; 1096 time_diff = uc->tx_drain.tstamp; 1097 /* 1098 * Get current residue and time stamp or see if 1099 * transfer is complete 1100 */ 1101 desc_done = udma_is_desc_really_done(uc, uc->desc); 1102 } 1103 1104 if (!desc_done) { 1105 /* 1106 * Find the time delta and residue delta w.r.t 1107 * previous poll 1108 */ 1109 time_diff = ktime_sub(uc->tx_drain.tstamp, 1110 time_diff) + 1; 1111 residue_diff -= uc->tx_drain.residue; 1112 if (residue_diff) { 1113 /* 1114 * Try to guess when we should check 1115 * next time by calculating rate at 1116 * which data is being drained at the 1117 * peer device 1118 */ 1119 delay = (time_diff / residue_diff) * 1120 uc->tx_drain.residue; 1121 } else { 1122 /* No progress, check again in 1 second */ 1123 schedule_delayed_work(&uc->tx_drain.work, HZ); 1124 break; 1125 } 1126 1127 usleep_range(ktime_to_us(delay), 1128 ktime_to_us(delay) + 10); 1129 continue; 1130 } 1131 1132 if (uc->desc) { 1133 struct udma_desc *d = uc->desc; 1134 1135 udma_decrement_byte_counters(uc, d->residue); 1136 udma_start(uc); 1137 vchan_cookie_complete(&d->vd); 1138 break; 1139 } 1140 1141 break; 1142 } 1143 } 1144 1145 static irqreturn_t udma_ring_irq_handler(int irq, void *data) 1146 { 1147 struct udma_chan *uc = data; 1148 struct udma_desc *d; 1149 dma_addr_t paddr = 0; 1150 1151 if (udma_pop_from_ring(uc, &paddr) || !paddr) 1152 return IRQ_HANDLED; 1153 1154 spin_lock(&uc->vc.lock); 1155 1156 /* Teardown completion message */ 1157 if (cppi5_desc_is_tdcm(paddr)) { 1158 complete_all(&uc->teardown_completed); 1159 1160 if (uc->terminated_desc) { 1161 udma_desc_free(&uc->terminated_desc->vd); 1162 uc->terminated_desc = NULL; 1163 } 1164 1165 if (!uc->desc) 1166 udma_start(uc); 1167 1168 goto out; 1169 } 1170 1171 d = udma_udma_desc_from_paddr(uc, paddr); 1172 1173 if (d) { 1174 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d, 1175 d->desc_idx); 1176 if (desc_paddr != paddr) { 1177 dev_err(uc->ud->dev, "not matching descriptors!\n"); 1178 goto out; 1179 } 1180 1181 if (d == uc->desc) { 1182 /* active descriptor */ 1183 if (uc->cyclic) { 1184 udma_cyclic_packet_elapsed(uc); 1185 vchan_cyclic_callback(&d->vd); 1186 } else { 1187 if (udma_is_desc_really_done(uc, d)) { 1188 udma_decrement_byte_counters(uc, d->residue); 1189 udma_start(uc); 1190 vchan_cookie_complete(&d->vd); 1191 } else { 1192 schedule_delayed_work(&uc->tx_drain.work, 1193 0); 1194 } 1195 } 1196 } else { 1197 /* 1198 * terminated descriptor, mark the descriptor as 1199 * completed to update the channel's cookie marker 1200 */ 1201 dma_cookie_complete(&d->vd.tx); 1202 } 1203 } 1204 out: 1205 spin_unlock(&uc->vc.lock); 1206 1207 return IRQ_HANDLED; 1208 } 1209 1210 static irqreturn_t udma_udma_irq_handler(int irq, void *data) 1211 { 1212 struct udma_chan *uc = data; 1213 struct udma_desc *d; 1214 1215 spin_lock(&uc->vc.lock); 1216 d = uc->desc; 1217 if (d) { 1218 d->tr_idx = (d->tr_idx + 1) % d->sglen; 1219 1220 if (uc->cyclic) { 1221 vchan_cyclic_callback(&d->vd); 1222 } else { 1223 /* TODO: figure out the real amount of data */ 1224 udma_decrement_byte_counters(uc, d->residue); 1225 udma_start(uc); 1226 vchan_cookie_complete(&d->vd); 1227 } 1228 } 1229 1230 spin_unlock(&uc->vc.lock); 1231 1232 return IRQ_HANDLED; 1233 } 1234 1235 /** 1236 * __udma_alloc_gp_rflow_range - alloc range of GP RX flows 1237 * @ud: UDMA device 1238 * @from: Start the search from this flow id number 1239 * @cnt: Number of consecutive flow ids to allocate 1240 * 1241 * Allocate range of RX flow ids for future use, those flows can be requested 1242 * only using explicit flow id number. if @from is set to -1 it will try to find 1243 * first free range. if @from is positive value it will force allocation only 1244 * of the specified range of flows. 1245 * 1246 * Returns -ENOMEM if can't find free range. 1247 * -EEXIST if requested range is busy. 1248 * -EINVAL if wrong input values passed. 1249 * Returns flow id on success. 1250 */ 1251 static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt) 1252 { 1253 int start, tmp_from; 1254 DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS); 1255 1256 tmp_from = from; 1257 if (tmp_from < 0) 1258 tmp_from = ud->rchan_cnt; 1259 /* default flows can't be allocated and accessible only by id */ 1260 if (tmp_from < ud->rchan_cnt) 1261 return -EINVAL; 1262 1263 if (tmp_from + cnt > ud->rflow_cnt) 1264 return -EINVAL; 1265 1266 bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated, 1267 ud->rflow_cnt); 1268 1269 start = bitmap_find_next_zero_area(tmp, 1270 ud->rflow_cnt, 1271 tmp_from, cnt, 0); 1272 if (start >= ud->rflow_cnt) 1273 return -ENOMEM; 1274 1275 if (from >= 0 && start != from) 1276 return -EEXIST; 1277 1278 bitmap_set(ud->rflow_gp_map_allocated, start, cnt); 1279 return start; 1280 } 1281 1282 static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt) 1283 { 1284 if (from < ud->rchan_cnt) 1285 return -EINVAL; 1286 if (from + cnt > ud->rflow_cnt) 1287 return -EINVAL; 1288 1289 bitmap_clear(ud->rflow_gp_map_allocated, from, cnt); 1290 return 0; 1291 } 1292 1293 static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id) 1294 { 1295 /* 1296 * Attempt to request rflow by ID can be made for any rflow 1297 * if not in use with assumption that caller knows what's doing. 1298 * TI-SCI FW will perform additional permission check ant way, it's 1299 * safe 1300 */ 1301 1302 if (id < 0 || id >= ud->rflow_cnt) 1303 return ERR_PTR(-ENOENT); 1304 1305 if (test_bit(id, ud->rflow_in_use)) 1306 return ERR_PTR(-ENOENT); 1307 1308 if (ud->rflow_gp_map) { 1309 /* GP rflow has to be allocated first */ 1310 if (!test_bit(id, ud->rflow_gp_map) && 1311 !test_bit(id, ud->rflow_gp_map_allocated)) 1312 return ERR_PTR(-EINVAL); 1313 } 1314 1315 dev_dbg(ud->dev, "get rflow%d\n", id); 1316 set_bit(id, ud->rflow_in_use); 1317 return &ud->rflows[id]; 1318 } 1319 1320 static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow) 1321 { 1322 if (!test_bit(rflow->id, ud->rflow_in_use)) { 1323 dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id); 1324 return; 1325 } 1326 1327 dev_dbg(ud->dev, "put rflow%d\n", rflow->id); 1328 clear_bit(rflow->id, ud->rflow_in_use); 1329 } 1330 1331 #define UDMA_RESERVE_RESOURCE(res) \ 1332 static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \ 1333 enum udma_tp_level tpl, \ 1334 int id) \ 1335 { \ 1336 if (id >= 0) { \ 1337 if (test_bit(id, ud->res##_map)) { \ 1338 dev_err(ud->dev, "res##%d is in use\n", id); \ 1339 return ERR_PTR(-ENOENT); \ 1340 } \ 1341 } else { \ 1342 int start; \ 1343 \ 1344 if (tpl >= ud->res##_tpl.levels) \ 1345 tpl = ud->res##_tpl.levels - 1; \ 1346 \ 1347 start = ud->res##_tpl.start_idx[tpl]; \ 1348 \ 1349 id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \ 1350 start); \ 1351 if (id == ud->res##_cnt) { \ 1352 return ERR_PTR(-ENOENT); \ 1353 } \ 1354 } \ 1355 \ 1356 set_bit(id, ud->res##_map); \ 1357 return &ud->res##s[id]; \ 1358 } 1359 1360 UDMA_RESERVE_RESOURCE(bchan); 1361 UDMA_RESERVE_RESOURCE(tchan); 1362 UDMA_RESERVE_RESOURCE(rchan); 1363 1364 static int bcdma_get_bchan(struct udma_chan *uc) 1365 { 1366 struct udma_dev *ud = uc->ud; 1367 enum udma_tp_level tpl; 1368 int ret; 1369 1370 if (uc->bchan) { 1371 dev_dbg(ud->dev, "chan%d: already have bchan%d allocated\n", 1372 uc->id, uc->bchan->id); 1373 return 0; 1374 } 1375 1376 /* 1377 * Use normal channels for peripherals, and highest TPL channel for 1378 * mem2mem 1379 */ 1380 if (uc->config.tr_trigger_type) 1381 tpl = 0; 1382 else 1383 tpl = ud->bchan_tpl.levels - 1; 1384 1385 uc->bchan = __udma_reserve_bchan(ud, tpl, -1); 1386 if (IS_ERR(uc->bchan)) { 1387 ret = PTR_ERR(uc->bchan); 1388 uc->bchan = NULL; 1389 return ret; 1390 } 1391 1392 uc->tchan = uc->bchan; 1393 1394 return 0; 1395 } 1396 1397 static int udma_get_tchan(struct udma_chan *uc) 1398 { 1399 struct udma_dev *ud = uc->ud; 1400 int ret; 1401 1402 if (uc->tchan) { 1403 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n", 1404 uc->id, uc->tchan->id); 1405 return 0; 1406 } 1407 1408 /* 1409 * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels. 1410 * For PKTDMA mapped channels it is configured to a channel which must 1411 * be used to service the peripheral. 1412 */ 1413 uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, 1414 uc->config.mapped_channel_id); 1415 if (IS_ERR(uc->tchan)) { 1416 ret = PTR_ERR(uc->tchan); 1417 uc->tchan = NULL; 1418 return ret; 1419 } 1420 1421 if (ud->tflow_cnt) { 1422 int tflow_id; 1423 1424 /* Only PKTDMA have support for tx flows */ 1425 if (uc->config.default_flow_id >= 0) 1426 tflow_id = uc->config.default_flow_id; 1427 else 1428 tflow_id = uc->tchan->id; 1429 1430 if (test_bit(tflow_id, ud->tflow_map)) { 1431 dev_err(ud->dev, "tflow%d is in use\n", tflow_id); 1432 clear_bit(uc->tchan->id, ud->tchan_map); 1433 uc->tchan = NULL; 1434 return -ENOENT; 1435 } 1436 1437 uc->tchan->tflow_id = tflow_id; 1438 set_bit(tflow_id, ud->tflow_map); 1439 } else { 1440 uc->tchan->tflow_id = -1; 1441 } 1442 1443 return 0; 1444 } 1445 1446 static int udma_get_rchan(struct udma_chan *uc) 1447 { 1448 struct udma_dev *ud = uc->ud; 1449 int ret; 1450 1451 if (uc->rchan) { 1452 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n", 1453 uc->id, uc->rchan->id); 1454 return 0; 1455 } 1456 1457 /* 1458 * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels. 1459 * For PKTDMA mapped channels it is configured to a channel which must 1460 * be used to service the peripheral. 1461 */ 1462 uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, 1463 uc->config.mapped_channel_id); 1464 if (IS_ERR(uc->rchan)) { 1465 ret = PTR_ERR(uc->rchan); 1466 uc->rchan = NULL; 1467 return ret; 1468 } 1469 1470 return 0; 1471 } 1472 1473 static int udma_get_chan_pair(struct udma_chan *uc) 1474 { 1475 struct udma_dev *ud = uc->ud; 1476 int chan_id, end; 1477 1478 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) { 1479 dev_info(ud->dev, "chan%d: already have %d pair allocated\n", 1480 uc->id, uc->tchan->id); 1481 return 0; 1482 } 1483 1484 if (uc->tchan) { 1485 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n", 1486 uc->id, uc->tchan->id); 1487 return -EBUSY; 1488 } else if (uc->rchan) { 1489 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n", 1490 uc->id, uc->rchan->id); 1491 return -EBUSY; 1492 } 1493 1494 /* Can be optimized, but let's have it like this for now */ 1495 end = min(ud->tchan_cnt, ud->rchan_cnt); 1496 /* 1497 * Try to use the highest TPL channel pair for MEM_TO_MEM channels 1498 * Note: in UDMAP the channel TPL is symmetric between tchan and rchan 1499 */ 1500 chan_id = ud->tchan_tpl.start_idx[ud->tchan_tpl.levels - 1]; 1501 for (; chan_id < end; chan_id++) { 1502 if (!test_bit(chan_id, ud->tchan_map) && 1503 !test_bit(chan_id, ud->rchan_map)) 1504 break; 1505 } 1506 1507 if (chan_id == end) 1508 return -ENOENT; 1509 1510 set_bit(chan_id, ud->tchan_map); 1511 set_bit(chan_id, ud->rchan_map); 1512 uc->tchan = &ud->tchans[chan_id]; 1513 uc->rchan = &ud->rchans[chan_id]; 1514 1515 /* UDMA does not use tx flows */ 1516 uc->tchan->tflow_id = -1; 1517 1518 return 0; 1519 } 1520 1521 static int udma_get_rflow(struct udma_chan *uc, int flow_id) 1522 { 1523 struct udma_dev *ud = uc->ud; 1524 int ret; 1525 1526 if (!uc->rchan) { 1527 dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id); 1528 return -EINVAL; 1529 } 1530 1531 if (uc->rflow) { 1532 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n", 1533 uc->id, uc->rflow->id); 1534 return 0; 1535 } 1536 1537 uc->rflow = __udma_get_rflow(ud, flow_id); 1538 if (IS_ERR(uc->rflow)) { 1539 ret = PTR_ERR(uc->rflow); 1540 uc->rflow = NULL; 1541 return ret; 1542 } 1543 1544 return 0; 1545 } 1546 1547 static void bcdma_put_bchan(struct udma_chan *uc) 1548 { 1549 struct udma_dev *ud = uc->ud; 1550 1551 if (uc->bchan) { 1552 dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id, 1553 uc->bchan->id); 1554 clear_bit(uc->bchan->id, ud->bchan_map); 1555 uc->bchan = NULL; 1556 uc->tchan = NULL; 1557 } 1558 } 1559 1560 static void udma_put_rchan(struct udma_chan *uc) 1561 { 1562 struct udma_dev *ud = uc->ud; 1563 1564 if (uc->rchan) { 1565 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id, 1566 uc->rchan->id); 1567 clear_bit(uc->rchan->id, ud->rchan_map); 1568 uc->rchan = NULL; 1569 } 1570 } 1571 1572 static void udma_put_tchan(struct udma_chan *uc) 1573 { 1574 struct udma_dev *ud = uc->ud; 1575 1576 if (uc->tchan) { 1577 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id, 1578 uc->tchan->id); 1579 clear_bit(uc->tchan->id, ud->tchan_map); 1580 1581 if (uc->tchan->tflow_id >= 0) 1582 clear_bit(uc->tchan->tflow_id, ud->tflow_map); 1583 1584 uc->tchan = NULL; 1585 } 1586 } 1587 1588 static void udma_put_rflow(struct udma_chan *uc) 1589 { 1590 struct udma_dev *ud = uc->ud; 1591 1592 if (uc->rflow) { 1593 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id, 1594 uc->rflow->id); 1595 __udma_put_rflow(ud, uc->rflow); 1596 uc->rflow = NULL; 1597 } 1598 } 1599 1600 static void bcdma_free_bchan_resources(struct udma_chan *uc) 1601 { 1602 if (!uc->bchan) 1603 return; 1604 1605 k3_ringacc_ring_free(uc->bchan->tc_ring); 1606 k3_ringacc_ring_free(uc->bchan->t_ring); 1607 uc->bchan->tc_ring = NULL; 1608 uc->bchan->t_ring = NULL; 1609 k3_configure_chan_coherency(&uc->vc.chan, 0); 1610 1611 bcdma_put_bchan(uc); 1612 } 1613 1614 static int bcdma_alloc_bchan_resources(struct udma_chan *uc) 1615 { 1616 struct k3_ring_cfg ring_cfg; 1617 struct udma_dev *ud = uc->ud; 1618 int ret; 1619 1620 ret = bcdma_get_bchan(uc); 1621 if (ret) 1622 return ret; 1623 1624 ret = k3_ringacc_request_rings_pair(ud->ringacc, uc->bchan->id, -1, 1625 &uc->bchan->t_ring, 1626 &uc->bchan->tc_ring); 1627 if (ret) { 1628 ret = -EBUSY; 1629 goto err_ring; 1630 } 1631 1632 memset(&ring_cfg, 0, sizeof(ring_cfg)); 1633 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; 1634 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8; 1635 ring_cfg.mode = K3_RINGACC_RING_MODE_RING; 1636 1637 k3_configure_chan_coherency(&uc->vc.chan, ud->asel); 1638 ring_cfg.asel = ud->asel; 1639 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan); 1640 1641 ret = k3_ringacc_ring_cfg(uc->bchan->t_ring, &ring_cfg); 1642 if (ret) 1643 goto err_ringcfg; 1644 1645 return 0; 1646 1647 err_ringcfg: 1648 k3_ringacc_ring_free(uc->bchan->tc_ring); 1649 uc->bchan->tc_ring = NULL; 1650 k3_ringacc_ring_free(uc->bchan->t_ring); 1651 uc->bchan->t_ring = NULL; 1652 k3_configure_chan_coherency(&uc->vc.chan, 0); 1653 err_ring: 1654 bcdma_put_bchan(uc); 1655 1656 return ret; 1657 } 1658 1659 static void udma_free_tx_resources(struct udma_chan *uc) 1660 { 1661 if (!uc->tchan) 1662 return; 1663 1664 k3_ringacc_ring_free(uc->tchan->t_ring); 1665 k3_ringacc_ring_free(uc->tchan->tc_ring); 1666 uc->tchan->t_ring = NULL; 1667 uc->tchan->tc_ring = NULL; 1668 1669 udma_put_tchan(uc); 1670 } 1671 1672 static int udma_alloc_tx_resources(struct udma_chan *uc) 1673 { 1674 struct k3_ring_cfg ring_cfg; 1675 struct udma_dev *ud = uc->ud; 1676 struct udma_tchan *tchan; 1677 int ring_idx, ret; 1678 1679 ret = udma_get_tchan(uc); 1680 if (ret) 1681 return ret; 1682 1683 tchan = uc->tchan; 1684 if (tchan->tflow_id >= 0) 1685 ring_idx = tchan->tflow_id; 1686 else 1687 ring_idx = ud->bchan_cnt + tchan->id; 1688 1689 ret = k3_ringacc_request_rings_pair(ud->ringacc, ring_idx, -1, 1690 &tchan->t_ring, 1691 &tchan->tc_ring); 1692 if (ret) { 1693 ret = -EBUSY; 1694 goto err_ring; 1695 } 1696 1697 memset(&ring_cfg, 0, sizeof(ring_cfg)); 1698 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; 1699 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8; 1700 if (ud->match_data->type == DMA_TYPE_UDMA) { 1701 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE; 1702 } else { 1703 ring_cfg.mode = K3_RINGACC_RING_MODE_RING; 1704 1705 k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel); 1706 ring_cfg.asel = uc->config.asel; 1707 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan); 1708 } 1709 1710 ret = k3_ringacc_ring_cfg(tchan->t_ring, &ring_cfg); 1711 ret |= k3_ringacc_ring_cfg(tchan->tc_ring, &ring_cfg); 1712 1713 if (ret) 1714 goto err_ringcfg; 1715 1716 return 0; 1717 1718 err_ringcfg: 1719 k3_ringacc_ring_free(uc->tchan->tc_ring); 1720 uc->tchan->tc_ring = NULL; 1721 k3_ringacc_ring_free(uc->tchan->t_ring); 1722 uc->tchan->t_ring = NULL; 1723 err_ring: 1724 udma_put_tchan(uc); 1725 1726 return ret; 1727 } 1728 1729 static void udma_free_rx_resources(struct udma_chan *uc) 1730 { 1731 if (!uc->rchan) 1732 return; 1733 1734 if (uc->rflow) { 1735 struct udma_rflow *rflow = uc->rflow; 1736 1737 k3_ringacc_ring_free(rflow->fd_ring); 1738 k3_ringacc_ring_free(rflow->r_ring); 1739 rflow->fd_ring = NULL; 1740 rflow->r_ring = NULL; 1741 1742 udma_put_rflow(uc); 1743 } 1744 1745 udma_put_rchan(uc); 1746 } 1747 1748 static int udma_alloc_rx_resources(struct udma_chan *uc) 1749 { 1750 struct udma_dev *ud = uc->ud; 1751 struct k3_ring_cfg ring_cfg; 1752 struct udma_rflow *rflow; 1753 int fd_ring_id; 1754 int ret; 1755 1756 ret = udma_get_rchan(uc); 1757 if (ret) 1758 return ret; 1759 1760 /* For MEM_TO_MEM we don't need rflow or rings */ 1761 if (uc->config.dir == DMA_MEM_TO_MEM) 1762 return 0; 1763 1764 if (uc->config.default_flow_id >= 0) 1765 ret = udma_get_rflow(uc, uc->config.default_flow_id); 1766 else 1767 ret = udma_get_rflow(uc, uc->rchan->id); 1768 1769 if (ret) { 1770 ret = -EBUSY; 1771 goto err_rflow; 1772 } 1773 1774 rflow = uc->rflow; 1775 if (ud->tflow_cnt) 1776 fd_ring_id = ud->tflow_cnt + rflow->id; 1777 else 1778 fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt + 1779 uc->rchan->id; 1780 1781 ret = k3_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1, 1782 &rflow->fd_ring, &rflow->r_ring); 1783 if (ret) { 1784 ret = -EBUSY; 1785 goto err_ring; 1786 } 1787 1788 memset(&ring_cfg, 0, sizeof(ring_cfg)); 1789 1790 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8; 1791 if (ud->match_data->type == DMA_TYPE_UDMA) { 1792 if (uc->config.pkt_mode) 1793 ring_cfg.size = SG_MAX_SEGMENTS; 1794 else 1795 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; 1796 1797 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE; 1798 } else { 1799 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; 1800 ring_cfg.mode = K3_RINGACC_RING_MODE_RING; 1801 1802 k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel); 1803 ring_cfg.asel = uc->config.asel; 1804 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan); 1805 } 1806 1807 ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg); 1808 1809 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; 1810 ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg); 1811 1812 if (ret) 1813 goto err_ringcfg; 1814 1815 return 0; 1816 1817 err_ringcfg: 1818 k3_ringacc_ring_free(rflow->r_ring); 1819 rflow->r_ring = NULL; 1820 k3_ringacc_ring_free(rflow->fd_ring); 1821 rflow->fd_ring = NULL; 1822 err_ring: 1823 udma_put_rflow(uc); 1824 err_rflow: 1825 udma_put_rchan(uc); 1826 1827 return ret; 1828 } 1829 1830 #define TISCI_BCDMA_BCHAN_VALID_PARAMS ( \ 1831 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ 1832 TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID) 1833 1834 #define TISCI_BCDMA_TCHAN_VALID_PARAMS ( \ 1835 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ 1836 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID) 1837 1838 #define TISCI_BCDMA_RCHAN_VALID_PARAMS ( \ 1839 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID) 1840 1841 #define TISCI_UDMA_TCHAN_VALID_PARAMS ( \ 1842 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ 1843 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \ 1844 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \ 1845 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \ 1846 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \ 1847 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \ 1848 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \ 1849 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID) 1850 1851 #define TISCI_UDMA_RCHAN_VALID_PARAMS ( \ 1852 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ 1853 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \ 1854 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \ 1855 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \ 1856 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \ 1857 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \ 1858 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \ 1859 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \ 1860 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID) 1861 1862 static int udma_tisci_m2m_channel_config(struct udma_chan *uc) 1863 { 1864 struct udma_dev *ud = uc->ud; 1865 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 1866 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 1867 struct udma_tchan *tchan = uc->tchan; 1868 struct udma_rchan *rchan = uc->rchan; 1869 u8 burst_size = 0; 1870 int ret; 1871 u8 tpl; 1872 1873 /* Non synchronized - mem to mem type of transfer */ 1874 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring); 1875 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; 1876 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; 1877 1878 if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) { 1879 tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, tchan->id); 1880 1881 burst_size = ud->match_data->burst_size[tpl]; 1882 } 1883 1884 req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS; 1885 req_tx.nav_id = tisci_rm->tisci_dev_id; 1886 req_tx.index = tchan->id; 1887 req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR; 1888 req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2; 1889 req_tx.txcq_qnum = tc_ring; 1890 req_tx.tx_atype = ud->atype; 1891 if (burst_size) { 1892 req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID; 1893 req_tx.tx_burst_size = burst_size; 1894 } 1895 1896 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); 1897 if (ret) { 1898 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); 1899 return ret; 1900 } 1901 1902 req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS; 1903 req_rx.nav_id = tisci_rm->tisci_dev_id; 1904 req_rx.index = rchan->id; 1905 req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2; 1906 req_rx.rxcq_qnum = tc_ring; 1907 req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR; 1908 req_rx.rx_atype = ud->atype; 1909 if (burst_size) { 1910 req_rx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID; 1911 req_rx.rx_burst_size = burst_size; 1912 } 1913 1914 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); 1915 if (ret) 1916 dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret); 1917 1918 return ret; 1919 } 1920 1921 static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc) 1922 { 1923 struct udma_dev *ud = uc->ud; 1924 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 1925 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 1926 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; 1927 struct udma_bchan *bchan = uc->bchan; 1928 u8 burst_size = 0; 1929 int ret; 1930 u8 tpl; 1931 1932 if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) { 1933 tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, bchan->id); 1934 1935 burst_size = ud->match_data->burst_size[tpl]; 1936 } 1937 1938 req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS; 1939 req_tx.nav_id = tisci_rm->tisci_dev_id; 1940 req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN; 1941 req_tx.index = bchan->id; 1942 if (burst_size) { 1943 req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID; 1944 req_tx.tx_burst_size = burst_size; 1945 } 1946 1947 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); 1948 if (ret) 1949 dev_err(ud->dev, "bchan%d cfg failed %d\n", bchan->id, ret); 1950 1951 return ret; 1952 } 1953 1954 static int udma_tisci_tx_channel_config(struct udma_chan *uc) 1955 { 1956 struct udma_dev *ud = uc->ud; 1957 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 1958 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 1959 struct udma_tchan *tchan = uc->tchan; 1960 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring); 1961 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; 1962 u32 mode, fetch_size; 1963 int ret; 1964 1965 if (uc->config.pkt_mode) { 1966 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; 1967 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib, 1968 uc->config.psd_size, 0); 1969 } else { 1970 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR; 1971 fetch_size = sizeof(struct cppi5_desc_hdr_t); 1972 } 1973 1974 req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS; 1975 req_tx.nav_id = tisci_rm->tisci_dev_id; 1976 req_tx.index = tchan->id; 1977 req_tx.tx_chan_type = mode; 1978 req_tx.tx_supr_tdpkt = uc->config.notdpkt; 1979 req_tx.tx_fetch_size = fetch_size >> 2; 1980 req_tx.txcq_qnum = tc_ring; 1981 req_tx.tx_atype = uc->config.atype; 1982 if (uc->config.ep_type == PSIL_EP_PDMA_XY && 1983 ud->match_data->flags & UDMA_FLAG_TDTYPE) { 1984 /* wait for peer to complete the teardown for PDMAs */ 1985 req_tx.valid_params |= 1986 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID; 1987 req_tx.tx_tdtype = 1; 1988 } 1989 1990 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); 1991 if (ret) 1992 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); 1993 1994 return ret; 1995 } 1996 1997 static int bcdma_tisci_tx_channel_config(struct udma_chan *uc) 1998 { 1999 struct udma_dev *ud = uc->ud; 2000 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 2001 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 2002 struct udma_tchan *tchan = uc->tchan; 2003 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; 2004 int ret; 2005 2006 req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS; 2007 req_tx.nav_id = tisci_rm->tisci_dev_id; 2008 req_tx.index = tchan->id; 2009 req_tx.tx_supr_tdpkt = uc->config.notdpkt; 2010 if (ud->match_data->flags & UDMA_FLAG_TDTYPE) { 2011 /* wait for peer to complete the teardown for PDMAs */ 2012 req_tx.valid_params |= 2013 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID; 2014 req_tx.tx_tdtype = 1; 2015 } 2016 2017 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); 2018 if (ret) 2019 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); 2020 2021 return ret; 2022 } 2023 2024 #define pktdma_tisci_tx_channel_config bcdma_tisci_tx_channel_config 2025 2026 static int udma_tisci_rx_channel_config(struct udma_chan *uc) 2027 { 2028 struct udma_dev *ud = uc->ud; 2029 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 2030 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 2031 struct udma_rchan *rchan = uc->rchan; 2032 int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring); 2033 int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring); 2034 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; 2035 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 }; 2036 u32 mode, fetch_size; 2037 int ret; 2038 2039 if (uc->config.pkt_mode) { 2040 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; 2041 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib, 2042 uc->config.psd_size, 0); 2043 } else { 2044 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR; 2045 fetch_size = sizeof(struct cppi5_desc_hdr_t); 2046 } 2047 2048 req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS; 2049 req_rx.nav_id = tisci_rm->tisci_dev_id; 2050 req_rx.index = rchan->id; 2051 req_rx.rx_fetch_size = fetch_size >> 2; 2052 req_rx.rxcq_qnum = rx_ring; 2053 req_rx.rx_chan_type = mode; 2054 req_rx.rx_atype = uc->config.atype; 2055 2056 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); 2057 if (ret) { 2058 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret); 2059 return ret; 2060 } 2061 2062 flow_req.valid_params = 2063 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID | 2064 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID | 2065 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID | 2066 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID | 2067 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID | 2068 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID | 2069 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID | 2070 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID | 2071 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID | 2072 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID | 2073 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID | 2074 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID | 2075 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID; 2076 2077 flow_req.nav_id = tisci_rm->tisci_dev_id; 2078 flow_req.flow_index = rchan->id; 2079 2080 if (uc->config.needs_epib) 2081 flow_req.rx_einfo_present = 1; 2082 else 2083 flow_req.rx_einfo_present = 0; 2084 if (uc->config.psd_size) 2085 flow_req.rx_psinfo_present = 1; 2086 else 2087 flow_req.rx_psinfo_present = 0; 2088 flow_req.rx_error_handling = 1; 2089 flow_req.rx_dest_qnum = rx_ring; 2090 flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE; 2091 flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG; 2092 flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI; 2093 flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO; 2094 flow_req.rx_fdq0_sz0_qnum = fd_ring; 2095 flow_req.rx_fdq1_qnum = fd_ring; 2096 flow_req.rx_fdq2_qnum = fd_ring; 2097 flow_req.rx_fdq3_qnum = fd_ring; 2098 2099 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req); 2100 2101 if (ret) 2102 dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret); 2103 2104 return 0; 2105 } 2106 2107 static int bcdma_tisci_rx_channel_config(struct udma_chan *uc) 2108 { 2109 struct udma_dev *ud = uc->ud; 2110 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 2111 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 2112 struct udma_rchan *rchan = uc->rchan; 2113 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; 2114 int ret; 2115 2116 req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS; 2117 req_rx.nav_id = tisci_rm->tisci_dev_id; 2118 req_rx.index = rchan->id; 2119 2120 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); 2121 if (ret) 2122 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret); 2123 2124 return ret; 2125 } 2126 2127 static int pktdma_tisci_rx_channel_config(struct udma_chan *uc) 2128 { 2129 struct udma_dev *ud = uc->ud; 2130 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 2131 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 2132 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; 2133 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 }; 2134 int ret; 2135 2136 req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS; 2137 req_rx.nav_id = tisci_rm->tisci_dev_id; 2138 req_rx.index = uc->rchan->id; 2139 2140 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); 2141 if (ret) { 2142 dev_err(ud->dev, "rchan%d cfg failed %d\n", uc->rchan->id, ret); 2143 return ret; 2144 } 2145 2146 flow_req.valid_params = 2147 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID | 2148 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID | 2149 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID; 2150 2151 flow_req.nav_id = tisci_rm->tisci_dev_id; 2152 flow_req.flow_index = uc->rflow->id; 2153 2154 if (uc->config.needs_epib) 2155 flow_req.rx_einfo_present = 1; 2156 else 2157 flow_req.rx_einfo_present = 0; 2158 if (uc->config.psd_size) 2159 flow_req.rx_psinfo_present = 1; 2160 else 2161 flow_req.rx_psinfo_present = 0; 2162 flow_req.rx_error_handling = 1; 2163 2164 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req); 2165 2166 if (ret) 2167 dev_err(ud->dev, "flow%d config failed: %d\n", uc->rflow->id, 2168 ret); 2169 2170 return ret; 2171 } 2172 2173 static int udma_alloc_chan_resources(struct dma_chan *chan) 2174 { 2175 struct udma_chan *uc = to_udma_chan(chan); 2176 struct udma_dev *ud = to_udma_dev(chan->device); 2177 const struct udma_soc_data *soc_data = ud->soc_data; 2178 struct k3_ring *irq_ring; 2179 u32 irq_udma_idx; 2180 int ret; 2181 2182 uc->dma_dev = ud->dev; 2183 2184 if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) { 2185 uc->use_dma_pool = true; 2186 /* in case of MEM_TO_MEM we have maximum of two TRs */ 2187 if (uc->config.dir == DMA_MEM_TO_MEM) { 2188 uc->config.hdesc_size = cppi5_trdesc_calc_size( 2189 sizeof(struct cppi5_tr_type15_t), 2); 2190 uc->config.pkt_mode = false; 2191 } 2192 } 2193 2194 if (uc->use_dma_pool) { 2195 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev, 2196 uc->config.hdesc_size, 2197 ud->desc_align, 2198 0); 2199 if (!uc->hdesc_pool) { 2200 dev_err(ud->ddev.dev, 2201 "Descriptor pool allocation failed\n"); 2202 uc->use_dma_pool = false; 2203 ret = -ENOMEM; 2204 goto err_cleanup; 2205 } 2206 } 2207 2208 /* 2209 * Make sure that the completion is in a known state: 2210 * No teardown, the channel is idle 2211 */ 2212 reinit_completion(&uc->teardown_completed); 2213 complete_all(&uc->teardown_completed); 2214 uc->state = UDMA_CHAN_IS_IDLE; 2215 2216 switch (uc->config.dir) { 2217 case DMA_MEM_TO_MEM: 2218 /* Non synchronized - mem to mem type of transfer */ 2219 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__, 2220 uc->id); 2221 2222 ret = udma_get_chan_pair(uc); 2223 if (ret) 2224 goto err_cleanup; 2225 2226 ret = udma_alloc_tx_resources(uc); 2227 if (ret) { 2228 udma_put_rchan(uc); 2229 goto err_cleanup; 2230 } 2231 2232 ret = udma_alloc_rx_resources(uc); 2233 if (ret) { 2234 udma_free_tx_resources(uc); 2235 goto err_cleanup; 2236 } 2237 2238 uc->config.src_thread = ud->psil_base + uc->tchan->id; 2239 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 2240 K3_PSIL_DST_THREAD_ID_OFFSET; 2241 2242 irq_ring = uc->tchan->tc_ring; 2243 irq_udma_idx = uc->tchan->id; 2244 2245 ret = udma_tisci_m2m_channel_config(uc); 2246 break; 2247 case DMA_MEM_TO_DEV: 2248 /* Slave transfer synchronized - mem to dev (TX) trasnfer */ 2249 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__, 2250 uc->id); 2251 2252 ret = udma_alloc_tx_resources(uc); 2253 if (ret) 2254 goto err_cleanup; 2255 2256 uc->config.src_thread = ud->psil_base + uc->tchan->id; 2257 uc->config.dst_thread = uc->config.remote_thread_id; 2258 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; 2259 2260 irq_ring = uc->tchan->tc_ring; 2261 irq_udma_idx = uc->tchan->id; 2262 2263 ret = udma_tisci_tx_channel_config(uc); 2264 break; 2265 case DMA_DEV_TO_MEM: 2266 /* Slave transfer synchronized - dev to mem (RX) trasnfer */ 2267 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__, 2268 uc->id); 2269 2270 ret = udma_alloc_rx_resources(uc); 2271 if (ret) 2272 goto err_cleanup; 2273 2274 uc->config.src_thread = uc->config.remote_thread_id; 2275 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 2276 K3_PSIL_DST_THREAD_ID_OFFSET; 2277 2278 irq_ring = uc->rflow->r_ring; 2279 irq_udma_idx = soc_data->oes.udma_rchan + uc->rchan->id; 2280 2281 ret = udma_tisci_rx_channel_config(uc); 2282 break; 2283 default: 2284 /* Can not happen */ 2285 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", 2286 __func__, uc->id, uc->config.dir); 2287 ret = -EINVAL; 2288 goto err_cleanup; 2289 2290 } 2291 2292 /* check if the channel configuration was successful */ 2293 if (ret) 2294 goto err_res_free; 2295 2296 if (udma_is_chan_running(uc)) { 2297 dev_warn(ud->dev, "chan%d: is running!\n", uc->id); 2298 udma_reset_chan(uc, false); 2299 if (udma_is_chan_running(uc)) { 2300 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); 2301 ret = -EBUSY; 2302 goto err_res_free; 2303 } 2304 } 2305 2306 /* PSI-L pairing */ 2307 ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread); 2308 if (ret) { 2309 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n", 2310 uc->config.src_thread, uc->config.dst_thread); 2311 goto err_res_free; 2312 } 2313 2314 uc->psil_paired = true; 2315 2316 uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring); 2317 if (uc->irq_num_ring <= 0) { 2318 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n", 2319 k3_ringacc_get_ring_id(irq_ring)); 2320 ret = -EINVAL; 2321 goto err_psi_free; 2322 } 2323 2324 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler, 2325 IRQF_TRIGGER_HIGH, uc->name, uc); 2326 if (ret) { 2327 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id); 2328 goto err_irq_free; 2329 } 2330 2331 /* Event from UDMA (TR events) only needed for slave TR mode channels */ 2332 if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) { 2333 uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx); 2334 if (uc->irq_num_udma <= 0) { 2335 dev_err(ud->dev, "Failed to get udma irq (index: %u)\n", 2336 irq_udma_idx); 2337 free_irq(uc->irq_num_ring, uc); 2338 ret = -EINVAL; 2339 goto err_irq_free; 2340 } 2341 2342 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0, 2343 uc->name, uc); 2344 if (ret) { 2345 dev_err(ud->dev, "chan%d: UDMA irq request failed\n", 2346 uc->id); 2347 free_irq(uc->irq_num_ring, uc); 2348 goto err_irq_free; 2349 } 2350 } else { 2351 uc->irq_num_udma = 0; 2352 } 2353 2354 udma_reset_rings(uc); 2355 2356 return 0; 2357 2358 err_irq_free: 2359 uc->irq_num_ring = 0; 2360 uc->irq_num_udma = 0; 2361 err_psi_free: 2362 navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread); 2363 uc->psil_paired = false; 2364 err_res_free: 2365 udma_free_tx_resources(uc); 2366 udma_free_rx_resources(uc); 2367 err_cleanup: 2368 udma_reset_uchan(uc); 2369 2370 if (uc->use_dma_pool) { 2371 dma_pool_destroy(uc->hdesc_pool); 2372 uc->use_dma_pool = false; 2373 } 2374 2375 return ret; 2376 } 2377 2378 static int bcdma_alloc_chan_resources(struct dma_chan *chan) 2379 { 2380 struct udma_chan *uc = to_udma_chan(chan); 2381 struct udma_dev *ud = to_udma_dev(chan->device); 2382 const struct udma_oes_offsets *oes = &ud->soc_data->oes; 2383 u32 irq_udma_idx, irq_ring_idx; 2384 int ret; 2385 2386 /* Only TR mode is supported */ 2387 uc->config.pkt_mode = false; 2388 2389 /* 2390 * Make sure that the completion is in a known state: 2391 * No teardown, the channel is idle 2392 */ 2393 reinit_completion(&uc->teardown_completed); 2394 complete_all(&uc->teardown_completed); 2395 uc->state = UDMA_CHAN_IS_IDLE; 2396 2397 switch (uc->config.dir) { 2398 case DMA_MEM_TO_MEM: 2399 /* Non synchronized - mem to mem type of transfer */ 2400 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__, 2401 uc->id); 2402 2403 ret = bcdma_alloc_bchan_resources(uc); 2404 if (ret) 2405 return ret; 2406 2407 irq_ring_idx = uc->bchan->id + oes->bcdma_bchan_ring; 2408 irq_udma_idx = uc->bchan->id + oes->bcdma_bchan_data; 2409 2410 ret = bcdma_tisci_m2m_channel_config(uc); 2411 break; 2412 case DMA_MEM_TO_DEV: 2413 /* Slave transfer synchronized - mem to dev (TX) trasnfer */ 2414 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__, 2415 uc->id); 2416 2417 ret = udma_alloc_tx_resources(uc); 2418 if (ret) { 2419 uc->config.remote_thread_id = -1; 2420 return ret; 2421 } 2422 2423 uc->config.src_thread = ud->psil_base + uc->tchan->id; 2424 uc->config.dst_thread = uc->config.remote_thread_id; 2425 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; 2426 2427 irq_ring_idx = uc->tchan->id + oes->bcdma_tchan_ring; 2428 irq_udma_idx = uc->tchan->id + oes->bcdma_tchan_data; 2429 2430 ret = bcdma_tisci_tx_channel_config(uc); 2431 break; 2432 case DMA_DEV_TO_MEM: 2433 /* Slave transfer synchronized - dev to mem (RX) trasnfer */ 2434 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__, 2435 uc->id); 2436 2437 ret = udma_alloc_rx_resources(uc); 2438 if (ret) { 2439 uc->config.remote_thread_id = -1; 2440 return ret; 2441 } 2442 2443 uc->config.src_thread = uc->config.remote_thread_id; 2444 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 2445 K3_PSIL_DST_THREAD_ID_OFFSET; 2446 2447 irq_ring_idx = uc->rchan->id + oes->bcdma_rchan_ring; 2448 irq_udma_idx = uc->rchan->id + oes->bcdma_rchan_data; 2449 2450 ret = bcdma_tisci_rx_channel_config(uc); 2451 break; 2452 default: 2453 /* Can not happen */ 2454 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", 2455 __func__, uc->id, uc->config.dir); 2456 return -EINVAL; 2457 } 2458 2459 /* check if the channel configuration was successful */ 2460 if (ret) 2461 goto err_res_free; 2462 2463 if (udma_is_chan_running(uc)) { 2464 dev_warn(ud->dev, "chan%d: is running!\n", uc->id); 2465 udma_reset_chan(uc, false); 2466 if (udma_is_chan_running(uc)) { 2467 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); 2468 ret = -EBUSY; 2469 goto err_res_free; 2470 } 2471 } 2472 2473 uc->dma_dev = dmaengine_get_dma_device(chan); 2474 if (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type) { 2475 uc->config.hdesc_size = cppi5_trdesc_calc_size( 2476 sizeof(struct cppi5_tr_type15_t), 2); 2477 2478 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev, 2479 uc->config.hdesc_size, 2480 ud->desc_align, 2481 0); 2482 if (!uc->hdesc_pool) { 2483 dev_err(ud->ddev.dev, 2484 "Descriptor pool allocation failed\n"); 2485 uc->use_dma_pool = false; 2486 ret = -ENOMEM; 2487 goto err_res_free; 2488 } 2489 2490 uc->use_dma_pool = true; 2491 } else if (uc->config.dir != DMA_MEM_TO_MEM) { 2492 /* PSI-L pairing */ 2493 ret = navss_psil_pair(ud, uc->config.src_thread, 2494 uc->config.dst_thread); 2495 if (ret) { 2496 dev_err(ud->dev, 2497 "PSI-L pairing failed: 0x%04x -> 0x%04x\n", 2498 uc->config.src_thread, uc->config.dst_thread); 2499 goto err_res_free; 2500 } 2501 2502 uc->psil_paired = true; 2503 } 2504 2505 uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx); 2506 if (uc->irq_num_ring <= 0) { 2507 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n", 2508 irq_ring_idx); 2509 ret = -EINVAL; 2510 goto err_psi_free; 2511 } 2512 2513 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler, 2514 IRQF_TRIGGER_HIGH, uc->name, uc); 2515 if (ret) { 2516 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id); 2517 goto err_irq_free; 2518 } 2519 2520 /* Event from BCDMA (TR events) only needed for slave channels */ 2521 if (is_slave_direction(uc->config.dir)) { 2522 uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx); 2523 if (uc->irq_num_udma <= 0) { 2524 dev_err(ud->dev, "Failed to get bcdma irq (index: %u)\n", 2525 irq_udma_idx); 2526 free_irq(uc->irq_num_ring, uc); 2527 ret = -EINVAL; 2528 goto err_irq_free; 2529 } 2530 2531 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0, 2532 uc->name, uc); 2533 if (ret) { 2534 dev_err(ud->dev, "chan%d: BCDMA irq request failed\n", 2535 uc->id); 2536 free_irq(uc->irq_num_ring, uc); 2537 goto err_irq_free; 2538 } 2539 } else { 2540 uc->irq_num_udma = 0; 2541 } 2542 2543 udma_reset_rings(uc); 2544 2545 INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work, 2546 udma_check_tx_completion); 2547 return 0; 2548 2549 err_irq_free: 2550 uc->irq_num_ring = 0; 2551 uc->irq_num_udma = 0; 2552 err_psi_free: 2553 if (uc->psil_paired) 2554 navss_psil_unpair(ud, uc->config.src_thread, 2555 uc->config.dst_thread); 2556 uc->psil_paired = false; 2557 err_res_free: 2558 bcdma_free_bchan_resources(uc); 2559 udma_free_tx_resources(uc); 2560 udma_free_rx_resources(uc); 2561 2562 udma_reset_uchan(uc); 2563 2564 if (uc->use_dma_pool) { 2565 dma_pool_destroy(uc->hdesc_pool); 2566 uc->use_dma_pool = false; 2567 } 2568 2569 return ret; 2570 } 2571 2572 static int bcdma_router_config(struct dma_chan *chan) 2573 { 2574 struct k3_event_route_data *router_data = chan->route_data; 2575 struct udma_chan *uc = to_udma_chan(chan); 2576 u32 trigger_event; 2577 2578 if (!uc->bchan) 2579 return -EINVAL; 2580 2581 if (uc->config.tr_trigger_type != 1 && uc->config.tr_trigger_type != 2) 2582 return -EINVAL; 2583 2584 trigger_event = uc->ud->soc_data->bcdma_trigger_event_offset; 2585 trigger_event += (uc->bchan->id * 2) + uc->config.tr_trigger_type - 1; 2586 2587 return router_data->set_event(router_data->priv, trigger_event); 2588 } 2589 2590 static int pktdma_alloc_chan_resources(struct dma_chan *chan) 2591 { 2592 struct udma_chan *uc = to_udma_chan(chan); 2593 struct udma_dev *ud = to_udma_dev(chan->device); 2594 const struct udma_oes_offsets *oes = &ud->soc_data->oes; 2595 u32 irq_ring_idx; 2596 int ret; 2597 2598 /* 2599 * Make sure that the completion is in a known state: 2600 * No teardown, the channel is idle 2601 */ 2602 reinit_completion(&uc->teardown_completed); 2603 complete_all(&uc->teardown_completed); 2604 uc->state = UDMA_CHAN_IS_IDLE; 2605 2606 switch (uc->config.dir) { 2607 case DMA_MEM_TO_DEV: 2608 /* Slave transfer synchronized - mem to dev (TX) trasnfer */ 2609 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__, 2610 uc->id); 2611 2612 ret = udma_alloc_tx_resources(uc); 2613 if (ret) { 2614 uc->config.remote_thread_id = -1; 2615 return ret; 2616 } 2617 2618 uc->config.src_thread = ud->psil_base + uc->tchan->id; 2619 uc->config.dst_thread = uc->config.remote_thread_id; 2620 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; 2621 2622 irq_ring_idx = uc->tchan->tflow_id + oes->pktdma_tchan_flow; 2623 2624 ret = pktdma_tisci_tx_channel_config(uc); 2625 break; 2626 case DMA_DEV_TO_MEM: 2627 /* Slave transfer synchronized - dev to mem (RX) trasnfer */ 2628 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__, 2629 uc->id); 2630 2631 ret = udma_alloc_rx_resources(uc); 2632 if (ret) { 2633 uc->config.remote_thread_id = -1; 2634 return ret; 2635 } 2636 2637 uc->config.src_thread = uc->config.remote_thread_id; 2638 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 2639 K3_PSIL_DST_THREAD_ID_OFFSET; 2640 2641 irq_ring_idx = uc->rflow->id + oes->pktdma_rchan_flow; 2642 2643 ret = pktdma_tisci_rx_channel_config(uc); 2644 break; 2645 default: 2646 /* Can not happen */ 2647 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", 2648 __func__, uc->id, uc->config.dir); 2649 return -EINVAL; 2650 } 2651 2652 /* check if the channel configuration was successful */ 2653 if (ret) 2654 goto err_res_free; 2655 2656 if (udma_is_chan_running(uc)) { 2657 dev_warn(ud->dev, "chan%d: is running!\n", uc->id); 2658 udma_reset_chan(uc, false); 2659 if (udma_is_chan_running(uc)) { 2660 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); 2661 ret = -EBUSY; 2662 goto err_res_free; 2663 } 2664 } 2665 2666 uc->dma_dev = dmaengine_get_dma_device(chan); 2667 uc->hdesc_pool = dma_pool_create(uc->name, uc->dma_dev, 2668 uc->config.hdesc_size, ud->desc_align, 2669 0); 2670 if (!uc->hdesc_pool) { 2671 dev_err(ud->ddev.dev, 2672 "Descriptor pool allocation failed\n"); 2673 uc->use_dma_pool = false; 2674 ret = -ENOMEM; 2675 goto err_res_free; 2676 } 2677 2678 uc->use_dma_pool = true; 2679 2680 /* PSI-L pairing */ 2681 ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread); 2682 if (ret) { 2683 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n", 2684 uc->config.src_thread, uc->config.dst_thread); 2685 goto err_res_free; 2686 } 2687 2688 uc->psil_paired = true; 2689 2690 uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx); 2691 if (uc->irq_num_ring <= 0) { 2692 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n", 2693 irq_ring_idx); 2694 ret = -EINVAL; 2695 goto err_psi_free; 2696 } 2697 2698 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler, 2699 IRQF_TRIGGER_HIGH, uc->name, uc); 2700 if (ret) { 2701 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id); 2702 goto err_irq_free; 2703 } 2704 2705 uc->irq_num_udma = 0; 2706 2707 udma_reset_rings(uc); 2708 2709 INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work, 2710 udma_check_tx_completion); 2711 2712 if (uc->tchan) 2713 dev_dbg(ud->dev, 2714 "chan%d: tchan%d, tflow%d, Remote thread: 0x%04x\n", 2715 uc->id, uc->tchan->id, uc->tchan->tflow_id, 2716 uc->config.remote_thread_id); 2717 else if (uc->rchan) 2718 dev_dbg(ud->dev, 2719 "chan%d: rchan%d, rflow%d, Remote thread: 0x%04x\n", 2720 uc->id, uc->rchan->id, uc->rflow->id, 2721 uc->config.remote_thread_id); 2722 return 0; 2723 2724 err_irq_free: 2725 uc->irq_num_ring = 0; 2726 err_psi_free: 2727 navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread); 2728 uc->psil_paired = false; 2729 err_res_free: 2730 udma_free_tx_resources(uc); 2731 udma_free_rx_resources(uc); 2732 2733 udma_reset_uchan(uc); 2734 2735 dma_pool_destroy(uc->hdesc_pool); 2736 uc->use_dma_pool = false; 2737 2738 return ret; 2739 } 2740 2741 static int udma_slave_config(struct dma_chan *chan, 2742 struct dma_slave_config *cfg) 2743 { 2744 struct udma_chan *uc = to_udma_chan(chan); 2745 2746 memcpy(&uc->cfg, cfg, sizeof(uc->cfg)); 2747 2748 return 0; 2749 } 2750 2751 static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc, 2752 size_t tr_size, int tr_count, 2753 enum dma_transfer_direction dir) 2754 { 2755 struct udma_hwdesc *hwdesc; 2756 struct cppi5_desc_hdr_t *tr_desc; 2757 struct udma_desc *d; 2758 u32 reload_count = 0; 2759 u32 ring_id; 2760 2761 switch (tr_size) { 2762 case 16: 2763 case 32: 2764 case 64: 2765 case 128: 2766 break; 2767 default: 2768 dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size); 2769 return NULL; 2770 } 2771 2772 /* We have only one descriptor containing multiple TRs */ 2773 d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT); 2774 if (!d) 2775 return NULL; 2776 2777 d->sglen = tr_count; 2778 2779 d->hwdesc_count = 1; 2780 hwdesc = &d->hwdesc[0]; 2781 2782 /* Allocate memory for DMA ring descriptor */ 2783 if (uc->use_dma_pool) { 2784 hwdesc->cppi5_desc_size = uc->config.hdesc_size; 2785 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, 2786 GFP_NOWAIT, 2787 &hwdesc->cppi5_desc_paddr); 2788 } else { 2789 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 2790 tr_count); 2791 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size, 2792 uc->ud->desc_align); 2793 hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev, 2794 hwdesc->cppi5_desc_size, 2795 &hwdesc->cppi5_desc_paddr, 2796 GFP_NOWAIT); 2797 } 2798 2799 if (!hwdesc->cppi5_desc_vaddr) { 2800 kfree(d); 2801 return NULL; 2802 } 2803 2804 /* Start of the TR req records */ 2805 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size; 2806 /* Start address of the TR response array */ 2807 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count; 2808 2809 tr_desc = hwdesc->cppi5_desc_vaddr; 2810 2811 if (uc->cyclic) 2812 reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE; 2813 2814 if (dir == DMA_DEV_TO_MEM) 2815 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); 2816 else 2817 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); 2818 2819 cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count); 2820 cppi5_desc_set_pktids(tr_desc, uc->id, 2821 CPPI5_INFO1_DESC_FLOWID_DEFAULT); 2822 cppi5_desc_set_retpolicy(tr_desc, 0, ring_id); 2823 2824 return d; 2825 } 2826 2827 /** 2828 * udma_get_tr_counters - calculate TR counters for a given length 2829 * @len: Length of the trasnfer 2830 * @align_to: Preferred alignment 2831 * @tr0_cnt0: First TR icnt0 2832 * @tr0_cnt1: First TR icnt1 2833 * @tr1_cnt0: Second (if used) TR icnt0 2834 * 2835 * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated 2836 * For len >= SZ_64K two TRs are used in a simple way: 2837 * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1) 2838 * Second TR: the remaining length (tr1_cnt0) 2839 * 2840 * Returns the number of TRs the length needs (1 or 2) 2841 * -EINVAL if the length can not be supported 2842 */ 2843 static int udma_get_tr_counters(size_t len, unsigned long align_to, 2844 u16 *tr0_cnt0, u16 *tr0_cnt1, u16 *tr1_cnt0) 2845 { 2846 if (len < SZ_64K) { 2847 *tr0_cnt0 = len; 2848 *tr0_cnt1 = 1; 2849 2850 return 1; 2851 } 2852 2853 if (align_to > 3) 2854 align_to = 3; 2855 2856 realign: 2857 *tr0_cnt0 = SZ_64K - BIT(align_to); 2858 if (len / *tr0_cnt0 >= SZ_64K) { 2859 if (align_to) { 2860 align_to--; 2861 goto realign; 2862 } 2863 return -EINVAL; 2864 } 2865 2866 *tr0_cnt1 = len / *tr0_cnt0; 2867 *tr1_cnt0 = len % *tr0_cnt0; 2868 2869 return 2; 2870 } 2871 2872 static struct udma_desc * 2873 udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl, 2874 unsigned int sglen, enum dma_transfer_direction dir, 2875 unsigned long tx_flags, void *context) 2876 { 2877 struct scatterlist *sgent; 2878 struct udma_desc *d; 2879 struct cppi5_tr_type1_t *tr_req = NULL; 2880 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; 2881 unsigned int i; 2882 size_t tr_size; 2883 int num_tr = 0; 2884 int tr_idx = 0; 2885 u64 asel; 2886 2887 /* estimate the number of TRs we will need */ 2888 for_each_sg(sgl, sgent, sglen, i) { 2889 if (sg_dma_len(sgent) < SZ_64K) 2890 num_tr++; 2891 else 2892 num_tr += 2; 2893 } 2894 2895 /* Now allocate and setup the descriptor. */ 2896 tr_size = sizeof(struct cppi5_tr_type1_t); 2897 d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir); 2898 if (!d) 2899 return NULL; 2900 2901 d->sglen = sglen; 2902 2903 if (uc->ud->match_data->type == DMA_TYPE_UDMA) 2904 asel = 0; 2905 else 2906 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT; 2907 2908 tr_req = d->hwdesc[0].tr_req_base; 2909 for_each_sg(sgl, sgent, sglen, i) { 2910 dma_addr_t sg_addr = sg_dma_address(sgent); 2911 2912 num_tr = udma_get_tr_counters(sg_dma_len(sgent), __ffs(sg_addr), 2913 &tr0_cnt0, &tr0_cnt1, &tr1_cnt0); 2914 if (num_tr < 0) { 2915 dev_err(uc->ud->dev, "size %u is not supported\n", 2916 sg_dma_len(sgent)); 2917 udma_free_hwdesc(uc, d); 2918 kfree(d); 2919 return NULL; 2920 } 2921 2922 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false, 2923 false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 2924 cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT); 2925 2926 sg_addr |= asel; 2927 tr_req[tr_idx].addr = sg_addr; 2928 tr_req[tr_idx].icnt0 = tr0_cnt0; 2929 tr_req[tr_idx].icnt1 = tr0_cnt1; 2930 tr_req[tr_idx].dim1 = tr0_cnt0; 2931 tr_idx++; 2932 2933 if (num_tr == 2) { 2934 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, 2935 false, false, 2936 CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 2937 cppi5_tr_csf_set(&tr_req[tr_idx].flags, 2938 CPPI5_TR_CSF_SUPR_EVT); 2939 2940 tr_req[tr_idx].addr = sg_addr + tr0_cnt1 * tr0_cnt0; 2941 tr_req[tr_idx].icnt0 = tr1_cnt0; 2942 tr_req[tr_idx].icnt1 = 1; 2943 tr_req[tr_idx].dim1 = tr1_cnt0; 2944 tr_idx++; 2945 } 2946 2947 d->residue += sg_dma_len(sgent); 2948 } 2949 2950 cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, 2951 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP); 2952 2953 return d; 2954 } 2955 2956 static struct udma_desc * 2957 udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl, 2958 unsigned int sglen, 2959 enum dma_transfer_direction dir, 2960 unsigned long tx_flags, void *context) 2961 { 2962 struct scatterlist *sgent; 2963 struct cppi5_tr_type15_t *tr_req = NULL; 2964 enum dma_slave_buswidth dev_width; 2965 u16 tr_cnt0, tr_cnt1; 2966 dma_addr_t dev_addr; 2967 struct udma_desc *d; 2968 unsigned int i; 2969 size_t tr_size, sg_len; 2970 int num_tr = 0; 2971 int tr_idx = 0; 2972 u32 burst, trigger_size, port_window; 2973 u64 asel; 2974 2975 if (dir == DMA_DEV_TO_MEM) { 2976 dev_addr = uc->cfg.src_addr; 2977 dev_width = uc->cfg.src_addr_width; 2978 burst = uc->cfg.src_maxburst; 2979 port_window = uc->cfg.src_port_window_size; 2980 } else if (dir == DMA_MEM_TO_DEV) { 2981 dev_addr = uc->cfg.dst_addr; 2982 dev_width = uc->cfg.dst_addr_width; 2983 burst = uc->cfg.dst_maxburst; 2984 port_window = uc->cfg.dst_port_window_size; 2985 } else { 2986 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__); 2987 return NULL; 2988 } 2989 2990 if (!burst) 2991 burst = 1; 2992 2993 if (port_window) { 2994 if (port_window != burst) { 2995 dev_err(uc->ud->dev, 2996 "The burst must be equal to port_window\n"); 2997 return NULL; 2998 } 2999 3000 tr_cnt0 = dev_width * port_window; 3001 tr_cnt1 = 1; 3002 } else { 3003 tr_cnt0 = dev_width; 3004 tr_cnt1 = burst; 3005 } 3006 trigger_size = tr_cnt0 * tr_cnt1; 3007 3008 /* estimate the number of TRs we will need */ 3009 for_each_sg(sgl, sgent, sglen, i) { 3010 sg_len = sg_dma_len(sgent); 3011 3012 if (sg_len % trigger_size) { 3013 dev_err(uc->ud->dev, 3014 "Not aligned SG entry (%zu for %u)\n", sg_len, 3015 trigger_size); 3016 return NULL; 3017 } 3018 3019 if (sg_len / trigger_size < SZ_64K) 3020 num_tr++; 3021 else 3022 num_tr += 2; 3023 } 3024 3025 /* Now allocate and setup the descriptor. */ 3026 tr_size = sizeof(struct cppi5_tr_type15_t); 3027 d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir); 3028 if (!d) 3029 return NULL; 3030 3031 d->sglen = sglen; 3032 3033 if (uc->ud->match_data->type == DMA_TYPE_UDMA) { 3034 asel = 0; 3035 } else { 3036 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT; 3037 dev_addr |= asel; 3038 } 3039 3040 tr_req = d->hwdesc[0].tr_req_base; 3041 for_each_sg(sgl, sgent, sglen, i) { 3042 u16 tr0_cnt2, tr0_cnt3, tr1_cnt2; 3043 dma_addr_t sg_addr = sg_dma_address(sgent); 3044 3045 sg_len = sg_dma_len(sgent); 3046 num_tr = udma_get_tr_counters(sg_len / trigger_size, 0, 3047 &tr0_cnt2, &tr0_cnt3, &tr1_cnt2); 3048 if (num_tr < 0) { 3049 dev_err(uc->ud->dev, "size %zu is not supported\n", 3050 sg_len); 3051 udma_free_hwdesc(uc, d); 3052 kfree(d); 3053 return NULL; 3054 } 3055 3056 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15, false, 3057 true, CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 3058 cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT); 3059 cppi5_tr_set_trigger(&tr_req[tr_idx].flags, 3060 uc->config.tr_trigger_type, 3061 CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC, 0, 0); 3062 3063 sg_addr |= asel; 3064 if (dir == DMA_DEV_TO_MEM) { 3065 tr_req[tr_idx].addr = dev_addr; 3066 tr_req[tr_idx].icnt0 = tr_cnt0; 3067 tr_req[tr_idx].icnt1 = tr_cnt1; 3068 tr_req[tr_idx].icnt2 = tr0_cnt2; 3069 tr_req[tr_idx].icnt3 = tr0_cnt3; 3070 tr_req[tr_idx].dim1 = (-1) * tr_cnt0; 3071 3072 tr_req[tr_idx].daddr = sg_addr; 3073 tr_req[tr_idx].dicnt0 = tr_cnt0; 3074 tr_req[tr_idx].dicnt1 = tr_cnt1; 3075 tr_req[tr_idx].dicnt2 = tr0_cnt2; 3076 tr_req[tr_idx].dicnt3 = tr0_cnt3; 3077 tr_req[tr_idx].ddim1 = tr_cnt0; 3078 tr_req[tr_idx].ddim2 = trigger_size; 3079 tr_req[tr_idx].ddim3 = trigger_size * tr0_cnt2; 3080 } else { 3081 tr_req[tr_idx].addr = sg_addr; 3082 tr_req[tr_idx].icnt0 = tr_cnt0; 3083 tr_req[tr_idx].icnt1 = tr_cnt1; 3084 tr_req[tr_idx].icnt2 = tr0_cnt2; 3085 tr_req[tr_idx].icnt3 = tr0_cnt3; 3086 tr_req[tr_idx].dim1 = tr_cnt0; 3087 tr_req[tr_idx].dim2 = trigger_size; 3088 tr_req[tr_idx].dim3 = trigger_size * tr0_cnt2; 3089 3090 tr_req[tr_idx].daddr = dev_addr; 3091 tr_req[tr_idx].dicnt0 = tr_cnt0; 3092 tr_req[tr_idx].dicnt1 = tr_cnt1; 3093 tr_req[tr_idx].dicnt2 = tr0_cnt2; 3094 tr_req[tr_idx].dicnt3 = tr0_cnt3; 3095 tr_req[tr_idx].ddim1 = (-1) * tr_cnt0; 3096 } 3097 3098 tr_idx++; 3099 3100 if (num_tr == 2) { 3101 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15, 3102 false, true, 3103 CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 3104 cppi5_tr_csf_set(&tr_req[tr_idx].flags, 3105 CPPI5_TR_CSF_SUPR_EVT); 3106 cppi5_tr_set_trigger(&tr_req[tr_idx].flags, 3107 uc->config.tr_trigger_type, 3108 CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC, 3109 0, 0); 3110 3111 sg_addr += trigger_size * tr0_cnt2 * tr0_cnt3; 3112 if (dir == DMA_DEV_TO_MEM) { 3113 tr_req[tr_idx].addr = dev_addr; 3114 tr_req[tr_idx].icnt0 = tr_cnt0; 3115 tr_req[tr_idx].icnt1 = tr_cnt1; 3116 tr_req[tr_idx].icnt2 = tr1_cnt2; 3117 tr_req[tr_idx].icnt3 = 1; 3118 tr_req[tr_idx].dim1 = (-1) * tr_cnt0; 3119 3120 tr_req[tr_idx].daddr = sg_addr; 3121 tr_req[tr_idx].dicnt0 = tr_cnt0; 3122 tr_req[tr_idx].dicnt1 = tr_cnt1; 3123 tr_req[tr_idx].dicnt2 = tr1_cnt2; 3124 tr_req[tr_idx].dicnt3 = 1; 3125 tr_req[tr_idx].ddim1 = tr_cnt0; 3126 tr_req[tr_idx].ddim2 = trigger_size; 3127 } else { 3128 tr_req[tr_idx].addr = sg_addr; 3129 tr_req[tr_idx].icnt0 = tr_cnt0; 3130 tr_req[tr_idx].icnt1 = tr_cnt1; 3131 tr_req[tr_idx].icnt2 = tr1_cnt2; 3132 tr_req[tr_idx].icnt3 = 1; 3133 tr_req[tr_idx].dim1 = tr_cnt0; 3134 tr_req[tr_idx].dim2 = trigger_size; 3135 3136 tr_req[tr_idx].daddr = dev_addr; 3137 tr_req[tr_idx].dicnt0 = tr_cnt0; 3138 tr_req[tr_idx].dicnt1 = tr_cnt1; 3139 tr_req[tr_idx].dicnt2 = tr1_cnt2; 3140 tr_req[tr_idx].dicnt3 = 1; 3141 tr_req[tr_idx].ddim1 = (-1) * tr_cnt0; 3142 } 3143 tr_idx++; 3144 } 3145 3146 d->residue += sg_len; 3147 } 3148 3149 cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, 3150 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP); 3151 3152 return d; 3153 } 3154 3155 static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d, 3156 enum dma_slave_buswidth dev_width, 3157 u16 elcnt) 3158 { 3159 if (uc->config.ep_type != PSIL_EP_PDMA_XY) 3160 return 0; 3161 3162 /* Bus width translates to the element size (ES) */ 3163 switch (dev_width) { 3164 case DMA_SLAVE_BUSWIDTH_1_BYTE: 3165 d->static_tr.elsize = 0; 3166 break; 3167 case DMA_SLAVE_BUSWIDTH_2_BYTES: 3168 d->static_tr.elsize = 1; 3169 break; 3170 case DMA_SLAVE_BUSWIDTH_3_BYTES: 3171 d->static_tr.elsize = 2; 3172 break; 3173 case DMA_SLAVE_BUSWIDTH_4_BYTES: 3174 d->static_tr.elsize = 3; 3175 break; 3176 case DMA_SLAVE_BUSWIDTH_8_BYTES: 3177 d->static_tr.elsize = 4; 3178 break; 3179 default: /* not reached */ 3180 return -EINVAL; 3181 } 3182 3183 d->static_tr.elcnt = elcnt; 3184 3185 /* 3186 * PDMA must to close the packet when the channel is in packet mode. 3187 * For TR mode when the channel is not cyclic we also need PDMA to close 3188 * the packet otherwise the transfer will stall because PDMA holds on 3189 * the data it has received from the peripheral. 3190 */ 3191 if (uc->config.pkt_mode || !uc->cyclic) { 3192 unsigned int div = dev_width * elcnt; 3193 3194 if (uc->cyclic) 3195 d->static_tr.bstcnt = d->residue / d->sglen / div; 3196 else 3197 d->static_tr.bstcnt = d->residue / div; 3198 3199 if (uc->config.dir == DMA_DEV_TO_MEM && 3200 d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask) 3201 return -EINVAL; 3202 } else { 3203 d->static_tr.bstcnt = 0; 3204 } 3205 3206 return 0; 3207 } 3208 3209 static struct udma_desc * 3210 udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl, 3211 unsigned int sglen, enum dma_transfer_direction dir, 3212 unsigned long tx_flags, void *context) 3213 { 3214 struct scatterlist *sgent; 3215 struct cppi5_host_desc_t *h_desc = NULL; 3216 struct udma_desc *d; 3217 u32 ring_id; 3218 unsigned int i; 3219 u64 asel; 3220 3221 d = kzalloc(struct_size(d, hwdesc, sglen), GFP_NOWAIT); 3222 if (!d) 3223 return NULL; 3224 3225 d->sglen = sglen; 3226 d->hwdesc_count = sglen; 3227 3228 if (dir == DMA_DEV_TO_MEM) 3229 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); 3230 else 3231 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); 3232 3233 if (uc->ud->match_data->type == DMA_TYPE_UDMA) 3234 asel = 0; 3235 else 3236 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT; 3237 3238 for_each_sg(sgl, sgent, sglen, i) { 3239 struct udma_hwdesc *hwdesc = &d->hwdesc[i]; 3240 dma_addr_t sg_addr = sg_dma_address(sgent); 3241 struct cppi5_host_desc_t *desc; 3242 size_t sg_len = sg_dma_len(sgent); 3243 3244 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, 3245 GFP_NOWAIT, 3246 &hwdesc->cppi5_desc_paddr); 3247 if (!hwdesc->cppi5_desc_vaddr) { 3248 dev_err(uc->ud->dev, 3249 "descriptor%d allocation failed\n", i); 3250 3251 udma_free_hwdesc(uc, d); 3252 kfree(d); 3253 return NULL; 3254 } 3255 3256 d->residue += sg_len; 3257 hwdesc->cppi5_desc_size = uc->config.hdesc_size; 3258 desc = hwdesc->cppi5_desc_vaddr; 3259 3260 if (i == 0) { 3261 cppi5_hdesc_init(desc, 0, 0); 3262 /* Flow and Packed ID */ 3263 cppi5_desc_set_pktids(&desc->hdr, uc->id, 3264 CPPI5_INFO1_DESC_FLOWID_DEFAULT); 3265 cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id); 3266 } else { 3267 cppi5_hdesc_reset_hbdesc(desc); 3268 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff); 3269 } 3270 3271 /* attach the sg buffer to the descriptor */ 3272 sg_addr |= asel; 3273 cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len); 3274 3275 /* Attach link as host buffer descriptor */ 3276 if (h_desc) 3277 cppi5_hdesc_link_hbdesc(h_desc, 3278 hwdesc->cppi5_desc_paddr | asel); 3279 3280 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA || 3281 dir == DMA_MEM_TO_DEV) 3282 h_desc = desc; 3283 } 3284 3285 if (d->residue >= SZ_4M) { 3286 dev_err(uc->ud->dev, 3287 "%s: Transfer size %u is over the supported 4M range\n", 3288 __func__, d->residue); 3289 udma_free_hwdesc(uc, d); 3290 kfree(d); 3291 return NULL; 3292 } 3293 3294 h_desc = d->hwdesc[0].cppi5_desc_vaddr; 3295 cppi5_hdesc_set_pktlen(h_desc, d->residue); 3296 3297 return d; 3298 } 3299 3300 static int udma_attach_metadata(struct dma_async_tx_descriptor *desc, 3301 void *data, size_t len) 3302 { 3303 struct udma_desc *d = to_udma_desc(desc); 3304 struct udma_chan *uc = to_udma_chan(desc->chan); 3305 struct cppi5_host_desc_t *h_desc; 3306 u32 psd_size = len; 3307 u32 flags = 0; 3308 3309 if (!uc->config.pkt_mode || !uc->config.metadata_size) 3310 return -ENOTSUPP; 3311 3312 if (!data || len > uc->config.metadata_size) 3313 return -EINVAL; 3314 3315 if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE) 3316 return -EINVAL; 3317 3318 h_desc = d->hwdesc[0].cppi5_desc_vaddr; 3319 if (d->dir == DMA_MEM_TO_DEV) 3320 memcpy(h_desc->epib, data, len); 3321 3322 if (uc->config.needs_epib) 3323 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE; 3324 3325 d->metadata = data; 3326 d->metadata_size = len; 3327 if (uc->config.needs_epib) 3328 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT; 3329 3330 cppi5_hdesc_update_flags(h_desc, flags); 3331 cppi5_hdesc_update_psdata_size(h_desc, psd_size); 3332 3333 return 0; 3334 } 3335 3336 static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc, 3337 size_t *payload_len, size_t *max_len) 3338 { 3339 struct udma_desc *d = to_udma_desc(desc); 3340 struct udma_chan *uc = to_udma_chan(desc->chan); 3341 struct cppi5_host_desc_t *h_desc; 3342 3343 if (!uc->config.pkt_mode || !uc->config.metadata_size) 3344 return ERR_PTR(-ENOTSUPP); 3345 3346 h_desc = d->hwdesc[0].cppi5_desc_vaddr; 3347 3348 *max_len = uc->config.metadata_size; 3349 3350 *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ? 3351 CPPI5_INFO0_HDESC_EPIB_SIZE : 0; 3352 *payload_len += cppi5_hdesc_get_psdata_size(h_desc); 3353 3354 return h_desc->epib; 3355 } 3356 3357 static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc, 3358 size_t payload_len) 3359 { 3360 struct udma_desc *d = to_udma_desc(desc); 3361 struct udma_chan *uc = to_udma_chan(desc->chan); 3362 struct cppi5_host_desc_t *h_desc; 3363 u32 psd_size = payload_len; 3364 u32 flags = 0; 3365 3366 if (!uc->config.pkt_mode || !uc->config.metadata_size) 3367 return -ENOTSUPP; 3368 3369 if (payload_len > uc->config.metadata_size) 3370 return -EINVAL; 3371 3372 if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE) 3373 return -EINVAL; 3374 3375 h_desc = d->hwdesc[0].cppi5_desc_vaddr; 3376 3377 if (uc->config.needs_epib) { 3378 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE; 3379 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT; 3380 } 3381 3382 cppi5_hdesc_update_flags(h_desc, flags); 3383 cppi5_hdesc_update_psdata_size(h_desc, psd_size); 3384 3385 return 0; 3386 } 3387 3388 static struct dma_descriptor_metadata_ops metadata_ops = { 3389 .attach = udma_attach_metadata, 3390 .get_ptr = udma_get_metadata_ptr, 3391 .set_len = udma_set_metadata_len, 3392 }; 3393 3394 static struct dma_async_tx_descriptor * 3395 udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 3396 unsigned int sglen, enum dma_transfer_direction dir, 3397 unsigned long tx_flags, void *context) 3398 { 3399 struct udma_chan *uc = to_udma_chan(chan); 3400 enum dma_slave_buswidth dev_width; 3401 struct udma_desc *d; 3402 u32 burst; 3403 3404 if (dir != uc->config.dir && 3405 (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type)) { 3406 dev_err(chan->device->dev, 3407 "%s: chan%d is for %s, not supporting %s\n", 3408 __func__, uc->id, 3409 dmaengine_get_direction_text(uc->config.dir), 3410 dmaengine_get_direction_text(dir)); 3411 return NULL; 3412 } 3413 3414 if (dir == DMA_DEV_TO_MEM) { 3415 dev_width = uc->cfg.src_addr_width; 3416 burst = uc->cfg.src_maxburst; 3417 } else if (dir == DMA_MEM_TO_DEV) { 3418 dev_width = uc->cfg.dst_addr_width; 3419 burst = uc->cfg.dst_maxburst; 3420 } else { 3421 dev_err(chan->device->dev, "%s: bad direction?\n", __func__); 3422 return NULL; 3423 } 3424 3425 if (!burst) 3426 burst = 1; 3427 3428 uc->config.tx_flags = tx_flags; 3429 3430 if (uc->config.pkt_mode) 3431 d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags, 3432 context); 3433 else if (is_slave_direction(uc->config.dir)) 3434 d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags, 3435 context); 3436 else 3437 d = udma_prep_slave_sg_triggered_tr(uc, sgl, sglen, dir, 3438 tx_flags, context); 3439 3440 if (!d) 3441 return NULL; 3442 3443 d->dir = dir; 3444 d->desc_idx = 0; 3445 d->tr_idx = 0; 3446 3447 /* static TR for remote PDMA */ 3448 if (udma_configure_statictr(uc, d, dev_width, burst)) { 3449 dev_err(uc->ud->dev, 3450 "%s: StaticTR Z is limited to maximum 4095 (%u)\n", 3451 __func__, d->static_tr.bstcnt); 3452 3453 udma_free_hwdesc(uc, d); 3454 kfree(d); 3455 return NULL; 3456 } 3457 3458 if (uc->config.metadata_size) 3459 d->vd.tx.metadata_ops = &metadata_ops; 3460 3461 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags); 3462 } 3463 3464 static struct udma_desc * 3465 udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr, 3466 size_t buf_len, size_t period_len, 3467 enum dma_transfer_direction dir, unsigned long flags) 3468 { 3469 struct udma_desc *d; 3470 size_t tr_size, period_addr; 3471 struct cppi5_tr_type1_t *tr_req; 3472 unsigned int periods = buf_len / period_len; 3473 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; 3474 unsigned int i; 3475 int num_tr; 3476 3477 num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0, 3478 &tr0_cnt1, &tr1_cnt0); 3479 if (num_tr < 0) { 3480 dev_err(uc->ud->dev, "size %zu is not supported\n", 3481 period_len); 3482 return NULL; 3483 } 3484 3485 /* Now allocate and setup the descriptor. */ 3486 tr_size = sizeof(struct cppi5_tr_type1_t); 3487 d = udma_alloc_tr_desc(uc, tr_size, periods * num_tr, dir); 3488 if (!d) 3489 return NULL; 3490 3491 tr_req = d->hwdesc[0].tr_req_base; 3492 if (uc->ud->match_data->type == DMA_TYPE_UDMA) 3493 period_addr = buf_addr; 3494 else 3495 period_addr = buf_addr | 3496 ((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT); 3497 3498 for (i = 0; i < periods; i++) { 3499 int tr_idx = i * num_tr; 3500 3501 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false, 3502 false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 3503 3504 tr_req[tr_idx].addr = period_addr; 3505 tr_req[tr_idx].icnt0 = tr0_cnt0; 3506 tr_req[tr_idx].icnt1 = tr0_cnt1; 3507 tr_req[tr_idx].dim1 = tr0_cnt0; 3508 3509 if (num_tr == 2) { 3510 cppi5_tr_csf_set(&tr_req[tr_idx].flags, 3511 CPPI5_TR_CSF_SUPR_EVT); 3512 tr_idx++; 3513 3514 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, 3515 false, false, 3516 CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 3517 3518 tr_req[tr_idx].addr = period_addr + tr0_cnt1 * tr0_cnt0; 3519 tr_req[tr_idx].icnt0 = tr1_cnt0; 3520 tr_req[tr_idx].icnt1 = 1; 3521 tr_req[tr_idx].dim1 = tr1_cnt0; 3522 } 3523 3524 if (!(flags & DMA_PREP_INTERRUPT)) 3525 cppi5_tr_csf_set(&tr_req[tr_idx].flags, 3526 CPPI5_TR_CSF_SUPR_EVT); 3527 3528 period_addr += period_len; 3529 } 3530 3531 return d; 3532 } 3533 3534 static struct udma_desc * 3535 udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr, 3536 size_t buf_len, size_t period_len, 3537 enum dma_transfer_direction dir, unsigned long flags) 3538 { 3539 struct udma_desc *d; 3540 u32 ring_id; 3541 int i; 3542 int periods = buf_len / period_len; 3543 3544 if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1)) 3545 return NULL; 3546 3547 if (period_len >= SZ_4M) 3548 return NULL; 3549 3550 d = kzalloc(struct_size(d, hwdesc, periods), GFP_NOWAIT); 3551 if (!d) 3552 return NULL; 3553 3554 d->hwdesc_count = periods; 3555 3556 /* TODO: re-check this... */ 3557 if (dir == DMA_DEV_TO_MEM) 3558 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); 3559 else 3560 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); 3561 3562 if (uc->ud->match_data->type != DMA_TYPE_UDMA) 3563 buf_addr |= (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT; 3564 3565 for (i = 0; i < periods; i++) { 3566 struct udma_hwdesc *hwdesc = &d->hwdesc[i]; 3567 dma_addr_t period_addr = buf_addr + (period_len * i); 3568 struct cppi5_host_desc_t *h_desc; 3569 3570 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, 3571 GFP_NOWAIT, 3572 &hwdesc->cppi5_desc_paddr); 3573 if (!hwdesc->cppi5_desc_vaddr) { 3574 dev_err(uc->ud->dev, 3575 "descriptor%d allocation failed\n", i); 3576 3577 udma_free_hwdesc(uc, d); 3578 kfree(d); 3579 return NULL; 3580 } 3581 3582 hwdesc->cppi5_desc_size = uc->config.hdesc_size; 3583 h_desc = hwdesc->cppi5_desc_vaddr; 3584 3585 cppi5_hdesc_init(h_desc, 0, 0); 3586 cppi5_hdesc_set_pktlen(h_desc, period_len); 3587 3588 /* Flow and Packed ID */ 3589 cppi5_desc_set_pktids(&h_desc->hdr, uc->id, 3590 CPPI5_INFO1_DESC_FLOWID_DEFAULT); 3591 cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id); 3592 3593 /* attach each period to a new descriptor */ 3594 cppi5_hdesc_attach_buf(h_desc, 3595 period_addr, period_len, 3596 period_addr, period_len); 3597 } 3598 3599 return d; 3600 } 3601 3602 static struct dma_async_tx_descriptor * 3603 udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 3604 size_t period_len, enum dma_transfer_direction dir, 3605 unsigned long flags) 3606 { 3607 struct udma_chan *uc = to_udma_chan(chan); 3608 enum dma_slave_buswidth dev_width; 3609 struct udma_desc *d; 3610 u32 burst; 3611 3612 if (dir != uc->config.dir) { 3613 dev_err(chan->device->dev, 3614 "%s: chan%d is for %s, not supporting %s\n", 3615 __func__, uc->id, 3616 dmaengine_get_direction_text(uc->config.dir), 3617 dmaengine_get_direction_text(dir)); 3618 return NULL; 3619 } 3620 3621 uc->cyclic = true; 3622 3623 if (dir == DMA_DEV_TO_MEM) { 3624 dev_width = uc->cfg.src_addr_width; 3625 burst = uc->cfg.src_maxburst; 3626 } else if (dir == DMA_MEM_TO_DEV) { 3627 dev_width = uc->cfg.dst_addr_width; 3628 burst = uc->cfg.dst_maxburst; 3629 } else { 3630 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__); 3631 return NULL; 3632 } 3633 3634 if (!burst) 3635 burst = 1; 3636 3637 if (uc->config.pkt_mode) 3638 d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len, 3639 dir, flags); 3640 else 3641 d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len, 3642 dir, flags); 3643 3644 if (!d) 3645 return NULL; 3646 3647 d->sglen = buf_len / period_len; 3648 3649 d->dir = dir; 3650 d->residue = buf_len; 3651 3652 /* static TR for remote PDMA */ 3653 if (udma_configure_statictr(uc, d, dev_width, burst)) { 3654 dev_err(uc->ud->dev, 3655 "%s: StaticTR Z is limited to maximum 4095 (%u)\n", 3656 __func__, d->static_tr.bstcnt); 3657 3658 udma_free_hwdesc(uc, d); 3659 kfree(d); 3660 return NULL; 3661 } 3662 3663 if (uc->config.metadata_size) 3664 d->vd.tx.metadata_ops = &metadata_ops; 3665 3666 return vchan_tx_prep(&uc->vc, &d->vd, flags); 3667 } 3668 3669 static struct dma_async_tx_descriptor * 3670 udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 3671 size_t len, unsigned long tx_flags) 3672 { 3673 struct udma_chan *uc = to_udma_chan(chan); 3674 struct udma_desc *d; 3675 struct cppi5_tr_type15_t *tr_req; 3676 int num_tr; 3677 size_t tr_size = sizeof(struct cppi5_tr_type15_t); 3678 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; 3679 3680 if (uc->config.dir != DMA_MEM_TO_MEM) { 3681 dev_err(chan->device->dev, 3682 "%s: chan%d is for %s, not supporting %s\n", 3683 __func__, uc->id, 3684 dmaengine_get_direction_text(uc->config.dir), 3685 dmaengine_get_direction_text(DMA_MEM_TO_MEM)); 3686 return NULL; 3687 } 3688 3689 num_tr = udma_get_tr_counters(len, __ffs(src | dest), &tr0_cnt0, 3690 &tr0_cnt1, &tr1_cnt0); 3691 if (num_tr < 0) { 3692 dev_err(uc->ud->dev, "size %zu is not supported\n", 3693 len); 3694 return NULL; 3695 } 3696 3697 d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM); 3698 if (!d) 3699 return NULL; 3700 3701 d->dir = DMA_MEM_TO_MEM; 3702 d->desc_idx = 0; 3703 d->tr_idx = 0; 3704 d->residue = len; 3705 3706 if (uc->ud->match_data->type != DMA_TYPE_UDMA) { 3707 src |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT; 3708 dest |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT; 3709 } 3710 3711 tr_req = d->hwdesc[0].tr_req_base; 3712 3713 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true, 3714 CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 3715 cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT); 3716 3717 tr_req[0].addr = src; 3718 tr_req[0].icnt0 = tr0_cnt0; 3719 tr_req[0].icnt1 = tr0_cnt1; 3720 tr_req[0].icnt2 = 1; 3721 tr_req[0].icnt3 = 1; 3722 tr_req[0].dim1 = tr0_cnt0; 3723 3724 tr_req[0].daddr = dest; 3725 tr_req[0].dicnt0 = tr0_cnt0; 3726 tr_req[0].dicnt1 = tr0_cnt1; 3727 tr_req[0].dicnt2 = 1; 3728 tr_req[0].dicnt3 = 1; 3729 tr_req[0].ddim1 = tr0_cnt0; 3730 3731 if (num_tr == 2) { 3732 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true, 3733 CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 3734 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT); 3735 3736 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0; 3737 tr_req[1].icnt0 = tr1_cnt0; 3738 tr_req[1].icnt1 = 1; 3739 tr_req[1].icnt2 = 1; 3740 tr_req[1].icnt3 = 1; 3741 3742 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0; 3743 tr_req[1].dicnt0 = tr1_cnt0; 3744 tr_req[1].dicnt1 = 1; 3745 tr_req[1].dicnt2 = 1; 3746 tr_req[1].dicnt3 = 1; 3747 } 3748 3749 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, 3750 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP); 3751 3752 if (uc->config.metadata_size) 3753 d->vd.tx.metadata_ops = &metadata_ops; 3754 3755 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags); 3756 } 3757 3758 static void udma_issue_pending(struct dma_chan *chan) 3759 { 3760 struct udma_chan *uc = to_udma_chan(chan); 3761 unsigned long flags; 3762 3763 spin_lock_irqsave(&uc->vc.lock, flags); 3764 3765 /* If we have something pending and no active descriptor, then */ 3766 if (vchan_issue_pending(&uc->vc) && !uc->desc) { 3767 /* 3768 * start a descriptor if the channel is NOT [marked as 3769 * terminating _and_ it is still running (teardown has not 3770 * completed yet)]. 3771 */ 3772 if (!(uc->state == UDMA_CHAN_IS_TERMINATING && 3773 udma_is_chan_running(uc))) 3774 udma_start(uc); 3775 } 3776 3777 spin_unlock_irqrestore(&uc->vc.lock, flags); 3778 } 3779 3780 static enum dma_status udma_tx_status(struct dma_chan *chan, 3781 dma_cookie_t cookie, 3782 struct dma_tx_state *txstate) 3783 { 3784 struct udma_chan *uc = to_udma_chan(chan); 3785 enum dma_status ret; 3786 unsigned long flags; 3787 3788 spin_lock_irqsave(&uc->vc.lock, flags); 3789 3790 ret = dma_cookie_status(chan, cookie, txstate); 3791 3792 if (!udma_is_chan_running(uc)) 3793 ret = DMA_COMPLETE; 3794 3795 if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc)) 3796 ret = DMA_PAUSED; 3797 3798 if (ret == DMA_COMPLETE || !txstate) 3799 goto out; 3800 3801 if (uc->desc && uc->desc->vd.tx.cookie == cookie) { 3802 u32 peer_bcnt = 0; 3803 u32 bcnt = 0; 3804 u32 residue = uc->desc->residue; 3805 u32 delay = 0; 3806 3807 if (uc->desc->dir == DMA_MEM_TO_DEV) { 3808 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG); 3809 3810 if (uc->config.ep_type != PSIL_EP_NATIVE) { 3811 peer_bcnt = udma_tchanrt_read(uc, 3812 UDMA_CHAN_RT_PEER_BCNT_REG); 3813 3814 if (bcnt > peer_bcnt) 3815 delay = bcnt - peer_bcnt; 3816 } 3817 } else if (uc->desc->dir == DMA_DEV_TO_MEM) { 3818 bcnt = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); 3819 3820 if (uc->config.ep_type != PSIL_EP_NATIVE) { 3821 peer_bcnt = udma_rchanrt_read(uc, 3822 UDMA_CHAN_RT_PEER_BCNT_REG); 3823 3824 if (peer_bcnt > bcnt) 3825 delay = peer_bcnt - bcnt; 3826 } 3827 } else { 3828 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); 3829 } 3830 3831 if (bcnt && !(bcnt % uc->desc->residue)) 3832 residue = 0; 3833 else 3834 residue -= bcnt % uc->desc->residue; 3835 3836 if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) { 3837 ret = DMA_COMPLETE; 3838 delay = 0; 3839 } 3840 3841 dma_set_residue(txstate, residue); 3842 dma_set_in_flight_bytes(txstate, delay); 3843 3844 } else { 3845 ret = DMA_COMPLETE; 3846 } 3847 3848 out: 3849 spin_unlock_irqrestore(&uc->vc.lock, flags); 3850 return ret; 3851 } 3852 3853 static int udma_pause(struct dma_chan *chan) 3854 { 3855 struct udma_chan *uc = to_udma_chan(chan); 3856 3857 /* pause the channel */ 3858 switch (uc->config.dir) { 3859 case DMA_DEV_TO_MEM: 3860 udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 3861 UDMA_PEER_RT_EN_PAUSE, 3862 UDMA_PEER_RT_EN_PAUSE); 3863 break; 3864 case DMA_MEM_TO_DEV: 3865 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 3866 UDMA_PEER_RT_EN_PAUSE, 3867 UDMA_PEER_RT_EN_PAUSE); 3868 break; 3869 case DMA_MEM_TO_MEM: 3870 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG, 3871 UDMA_CHAN_RT_CTL_PAUSE, 3872 UDMA_CHAN_RT_CTL_PAUSE); 3873 break; 3874 default: 3875 return -EINVAL; 3876 } 3877 3878 return 0; 3879 } 3880 3881 static int udma_resume(struct dma_chan *chan) 3882 { 3883 struct udma_chan *uc = to_udma_chan(chan); 3884 3885 /* resume the channel */ 3886 switch (uc->config.dir) { 3887 case DMA_DEV_TO_MEM: 3888 udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 3889 UDMA_PEER_RT_EN_PAUSE, 0); 3890 3891 break; 3892 case DMA_MEM_TO_DEV: 3893 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 3894 UDMA_PEER_RT_EN_PAUSE, 0); 3895 break; 3896 case DMA_MEM_TO_MEM: 3897 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG, 3898 UDMA_CHAN_RT_CTL_PAUSE, 0); 3899 break; 3900 default: 3901 return -EINVAL; 3902 } 3903 3904 return 0; 3905 } 3906 3907 static int udma_terminate_all(struct dma_chan *chan) 3908 { 3909 struct udma_chan *uc = to_udma_chan(chan); 3910 unsigned long flags; 3911 LIST_HEAD(head); 3912 3913 spin_lock_irqsave(&uc->vc.lock, flags); 3914 3915 if (udma_is_chan_running(uc)) 3916 udma_stop(uc); 3917 3918 if (uc->desc) { 3919 uc->terminated_desc = uc->desc; 3920 uc->desc = NULL; 3921 uc->terminated_desc->terminated = true; 3922 cancel_delayed_work(&uc->tx_drain.work); 3923 } 3924 3925 uc->paused = false; 3926 3927 vchan_get_all_descriptors(&uc->vc, &head); 3928 spin_unlock_irqrestore(&uc->vc.lock, flags); 3929 vchan_dma_desc_free_list(&uc->vc, &head); 3930 3931 return 0; 3932 } 3933 3934 static void udma_synchronize(struct dma_chan *chan) 3935 { 3936 struct udma_chan *uc = to_udma_chan(chan); 3937 unsigned long timeout = msecs_to_jiffies(1000); 3938 3939 vchan_synchronize(&uc->vc); 3940 3941 if (uc->state == UDMA_CHAN_IS_TERMINATING) { 3942 timeout = wait_for_completion_timeout(&uc->teardown_completed, 3943 timeout); 3944 if (!timeout) { 3945 dev_warn(uc->ud->dev, "chan%d teardown timeout!\n", 3946 uc->id); 3947 udma_dump_chan_stdata(uc); 3948 udma_reset_chan(uc, true); 3949 } 3950 } 3951 3952 udma_reset_chan(uc, false); 3953 if (udma_is_chan_running(uc)) 3954 dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id); 3955 3956 cancel_delayed_work_sync(&uc->tx_drain.work); 3957 udma_reset_rings(uc); 3958 } 3959 3960 static void udma_desc_pre_callback(struct virt_dma_chan *vc, 3961 struct virt_dma_desc *vd, 3962 struct dmaengine_result *result) 3963 { 3964 struct udma_chan *uc = to_udma_chan(&vc->chan); 3965 struct udma_desc *d; 3966 3967 if (!vd) 3968 return; 3969 3970 d = to_udma_desc(&vd->tx); 3971 3972 if (d->metadata_size) 3973 udma_fetch_epib(uc, d); 3974 3975 /* Provide residue information for the client */ 3976 if (result) { 3977 void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx); 3978 3979 if (cppi5_desc_get_type(desc_vaddr) == 3980 CPPI5_INFO0_DESC_TYPE_VAL_HOST) { 3981 result->residue = d->residue - 3982 cppi5_hdesc_get_pktlen(desc_vaddr); 3983 if (result->residue) 3984 result->result = DMA_TRANS_ABORTED; 3985 else 3986 result->result = DMA_TRANS_NOERROR; 3987 } else { 3988 result->residue = 0; 3989 result->result = DMA_TRANS_NOERROR; 3990 } 3991 } 3992 } 3993 3994 /* 3995 * This tasklet handles the completion of a DMA descriptor by 3996 * calling its callback and freeing it. 3997 */ 3998 static void udma_vchan_complete(struct tasklet_struct *t) 3999 { 4000 struct virt_dma_chan *vc = from_tasklet(vc, t, task); 4001 struct virt_dma_desc *vd, *_vd; 4002 struct dmaengine_desc_callback cb; 4003 LIST_HEAD(head); 4004 4005 spin_lock_irq(&vc->lock); 4006 list_splice_tail_init(&vc->desc_completed, &head); 4007 vd = vc->cyclic; 4008 if (vd) { 4009 vc->cyclic = NULL; 4010 dmaengine_desc_get_callback(&vd->tx, &cb); 4011 } else { 4012 memset(&cb, 0, sizeof(cb)); 4013 } 4014 spin_unlock_irq(&vc->lock); 4015 4016 udma_desc_pre_callback(vc, vd, NULL); 4017 dmaengine_desc_callback_invoke(&cb, NULL); 4018 4019 list_for_each_entry_safe(vd, _vd, &head, node) { 4020 struct dmaengine_result result; 4021 4022 dmaengine_desc_get_callback(&vd->tx, &cb); 4023 4024 list_del(&vd->node); 4025 4026 udma_desc_pre_callback(vc, vd, &result); 4027 dmaengine_desc_callback_invoke(&cb, &result); 4028 4029 vchan_vdesc_fini(vd); 4030 } 4031 } 4032 4033 static void udma_free_chan_resources(struct dma_chan *chan) 4034 { 4035 struct udma_chan *uc = to_udma_chan(chan); 4036 struct udma_dev *ud = to_udma_dev(chan->device); 4037 4038 udma_terminate_all(chan); 4039 if (uc->terminated_desc) { 4040 udma_reset_chan(uc, false); 4041 udma_reset_rings(uc); 4042 } 4043 4044 cancel_delayed_work_sync(&uc->tx_drain.work); 4045 4046 if (uc->irq_num_ring > 0) { 4047 free_irq(uc->irq_num_ring, uc); 4048 4049 uc->irq_num_ring = 0; 4050 } 4051 if (uc->irq_num_udma > 0) { 4052 free_irq(uc->irq_num_udma, uc); 4053 4054 uc->irq_num_udma = 0; 4055 } 4056 4057 /* Release PSI-L pairing */ 4058 if (uc->psil_paired) { 4059 navss_psil_unpair(ud, uc->config.src_thread, 4060 uc->config.dst_thread); 4061 uc->psil_paired = false; 4062 } 4063 4064 vchan_free_chan_resources(&uc->vc); 4065 tasklet_kill(&uc->vc.task); 4066 4067 bcdma_free_bchan_resources(uc); 4068 udma_free_tx_resources(uc); 4069 udma_free_rx_resources(uc); 4070 udma_reset_uchan(uc); 4071 4072 if (uc->use_dma_pool) { 4073 dma_pool_destroy(uc->hdesc_pool); 4074 uc->use_dma_pool = false; 4075 } 4076 } 4077 4078 static struct platform_driver udma_driver; 4079 static struct platform_driver bcdma_driver; 4080 static struct platform_driver pktdma_driver; 4081 4082 struct udma_filter_param { 4083 int remote_thread_id; 4084 u32 atype; 4085 u32 asel; 4086 u32 tr_trigger_type; 4087 }; 4088 4089 static bool udma_dma_filter_fn(struct dma_chan *chan, void *param) 4090 { 4091 struct udma_chan_config *ucc; 4092 struct psil_endpoint_config *ep_config; 4093 struct udma_filter_param *filter_param; 4094 struct udma_chan *uc; 4095 struct udma_dev *ud; 4096 4097 if (chan->device->dev->driver != &udma_driver.driver && 4098 chan->device->dev->driver != &bcdma_driver.driver && 4099 chan->device->dev->driver != &pktdma_driver.driver) 4100 return false; 4101 4102 uc = to_udma_chan(chan); 4103 ucc = &uc->config; 4104 ud = uc->ud; 4105 filter_param = param; 4106 4107 if (filter_param->atype > 2) { 4108 dev_err(ud->dev, "Invalid channel atype: %u\n", 4109 filter_param->atype); 4110 return false; 4111 } 4112 4113 if (filter_param->asel > 15) { 4114 dev_err(ud->dev, "Invalid channel asel: %u\n", 4115 filter_param->asel); 4116 return false; 4117 } 4118 4119 ucc->remote_thread_id = filter_param->remote_thread_id; 4120 ucc->atype = filter_param->atype; 4121 ucc->asel = filter_param->asel; 4122 ucc->tr_trigger_type = filter_param->tr_trigger_type; 4123 4124 if (ucc->tr_trigger_type) { 4125 ucc->dir = DMA_MEM_TO_MEM; 4126 goto triggered_bchan; 4127 } else if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET) { 4128 ucc->dir = DMA_MEM_TO_DEV; 4129 } else { 4130 ucc->dir = DMA_DEV_TO_MEM; 4131 } 4132 4133 ep_config = psil_get_ep_config(ucc->remote_thread_id); 4134 if (IS_ERR(ep_config)) { 4135 dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n", 4136 ucc->remote_thread_id); 4137 ucc->dir = DMA_MEM_TO_MEM; 4138 ucc->remote_thread_id = -1; 4139 ucc->atype = 0; 4140 ucc->asel = 0; 4141 return false; 4142 } 4143 4144 if (ud->match_data->type == DMA_TYPE_BCDMA && 4145 ep_config->pkt_mode) { 4146 dev_err(ud->dev, 4147 "Only TR mode is supported (psi-l thread 0x%04x)\n", 4148 ucc->remote_thread_id); 4149 ucc->dir = DMA_MEM_TO_MEM; 4150 ucc->remote_thread_id = -1; 4151 ucc->atype = 0; 4152 ucc->asel = 0; 4153 return false; 4154 } 4155 4156 ucc->pkt_mode = ep_config->pkt_mode; 4157 ucc->channel_tpl = ep_config->channel_tpl; 4158 ucc->notdpkt = ep_config->notdpkt; 4159 ucc->ep_type = ep_config->ep_type; 4160 4161 if (ud->match_data->type == DMA_TYPE_PKTDMA && 4162 ep_config->mapped_channel_id >= 0) { 4163 ucc->mapped_channel_id = ep_config->mapped_channel_id; 4164 ucc->default_flow_id = ep_config->default_flow_id; 4165 } else { 4166 ucc->mapped_channel_id = -1; 4167 ucc->default_flow_id = -1; 4168 } 4169 4170 if (ucc->ep_type != PSIL_EP_NATIVE) { 4171 const struct udma_match_data *match_data = ud->match_data; 4172 4173 if (match_data->flags & UDMA_FLAG_PDMA_ACC32) 4174 ucc->enable_acc32 = ep_config->pdma_acc32; 4175 if (match_data->flags & UDMA_FLAG_PDMA_BURST) 4176 ucc->enable_burst = ep_config->pdma_burst; 4177 } 4178 4179 ucc->needs_epib = ep_config->needs_epib; 4180 ucc->psd_size = ep_config->psd_size; 4181 ucc->metadata_size = 4182 (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) + 4183 ucc->psd_size; 4184 4185 if (ucc->pkt_mode) 4186 ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) + 4187 ucc->metadata_size, ud->desc_align); 4188 4189 dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id, 4190 ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir)); 4191 4192 return true; 4193 4194 triggered_bchan: 4195 dev_dbg(ud->dev, "chan%d: triggered channel (type: %u)\n", uc->id, 4196 ucc->tr_trigger_type); 4197 4198 return true; 4199 4200 } 4201 4202 static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec, 4203 struct of_dma *ofdma) 4204 { 4205 struct udma_dev *ud = ofdma->of_dma_data; 4206 dma_cap_mask_t mask = ud->ddev.cap_mask; 4207 struct udma_filter_param filter_param; 4208 struct dma_chan *chan; 4209 4210 if (ud->match_data->type == DMA_TYPE_BCDMA) { 4211 if (dma_spec->args_count != 3) 4212 return NULL; 4213 4214 filter_param.tr_trigger_type = dma_spec->args[0]; 4215 filter_param.remote_thread_id = dma_spec->args[1]; 4216 filter_param.asel = dma_spec->args[2]; 4217 filter_param.atype = 0; 4218 } else { 4219 if (dma_spec->args_count != 1 && dma_spec->args_count != 2) 4220 return NULL; 4221 4222 filter_param.remote_thread_id = dma_spec->args[0]; 4223 filter_param.tr_trigger_type = 0; 4224 if (dma_spec->args_count == 2) { 4225 if (ud->match_data->type == DMA_TYPE_UDMA) { 4226 filter_param.atype = dma_spec->args[1]; 4227 filter_param.asel = 0; 4228 } else { 4229 filter_param.atype = 0; 4230 filter_param.asel = dma_spec->args[1]; 4231 } 4232 } else { 4233 filter_param.atype = 0; 4234 filter_param.asel = 0; 4235 } 4236 } 4237 4238 chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param, 4239 ofdma->of_node); 4240 if (!chan) { 4241 dev_err(ud->dev, "get channel fail in %s.\n", __func__); 4242 return ERR_PTR(-EINVAL); 4243 } 4244 4245 return chan; 4246 } 4247 4248 static struct udma_match_data am654_main_data = { 4249 .type = DMA_TYPE_UDMA, 4250 .psil_base = 0x1000, 4251 .enable_memcpy_support = true, 4252 .statictr_z_mask = GENMASK(11, 0), 4253 .burst_size = { 4254 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 4255 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */ 4256 0, /* No UH Channels */ 4257 }, 4258 }; 4259 4260 static struct udma_match_data am654_mcu_data = { 4261 .type = DMA_TYPE_UDMA, 4262 .psil_base = 0x6000, 4263 .enable_memcpy_support = false, 4264 .statictr_z_mask = GENMASK(11, 0), 4265 .burst_size = { 4266 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 4267 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */ 4268 0, /* No UH Channels */ 4269 }, 4270 }; 4271 4272 static struct udma_match_data j721e_main_data = { 4273 .type = DMA_TYPE_UDMA, 4274 .psil_base = 0x1000, 4275 .enable_memcpy_support = true, 4276 .flags = UDMA_FLAGS_J7_CLASS, 4277 .statictr_z_mask = GENMASK(23, 0), 4278 .burst_size = { 4279 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 4280 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* H Channels */ 4281 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* UH Channels */ 4282 }, 4283 }; 4284 4285 static struct udma_match_data j721e_mcu_data = { 4286 .type = DMA_TYPE_UDMA, 4287 .psil_base = 0x6000, 4288 .enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */ 4289 .flags = UDMA_FLAGS_J7_CLASS, 4290 .statictr_z_mask = GENMASK(23, 0), 4291 .burst_size = { 4292 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 4293 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES, /* H Channels */ 4294 0, /* No UH Channels */ 4295 }, 4296 }; 4297 4298 static struct udma_match_data am64_bcdma_data = { 4299 .type = DMA_TYPE_BCDMA, 4300 .psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */ 4301 .enable_memcpy_support = true, /* Supported via bchan */ 4302 .flags = UDMA_FLAGS_J7_CLASS, 4303 .statictr_z_mask = GENMASK(23, 0), 4304 .burst_size = { 4305 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 4306 0, /* No H Channels */ 4307 0, /* No UH Channels */ 4308 }, 4309 }; 4310 4311 static struct udma_match_data am64_pktdma_data = { 4312 .type = DMA_TYPE_PKTDMA, 4313 .psil_base = 0x1000, 4314 .enable_memcpy_support = false, /* PKTDMA does not support MEM_TO_MEM */ 4315 .flags = UDMA_FLAGS_J7_CLASS, 4316 .statictr_z_mask = GENMASK(23, 0), 4317 .burst_size = { 4318 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 4319 0, /* No H Channels */ 4320 0, /* No UH Channels */ 4321 }, 4322 }; 4323 4324 static const struct of_device_id udma_of_match[] = { 4325 { 4326 .compatible = "ti,am654-navss-main-udmap", 4327 .data = &am654_main_data, 4328 }, 4329 { 4330 .compatible = "ti,am654-navss-mcu-udmap", 4331 .data = &am654_mcu_data, 4332 }, { 4333 .compatible = "ti,j721e-navss-main-udmap", 4334 .data = &j721e_main_data, 4335 }, { 4336 .compatible = "ti,j721e-navss-mcu-udmap", 4337 .data = &j721e_mcu_data, 4338 }, 4339 { 4340 .compatible = "ti,am64-dmss-bcdma", 4341 .data = &am64_bcdma_data, 4342 }, 4343 { 4344 .compatible = "ti,am64-dmss-pktdma", 4345 .data = &am64_pktdma_data, 4346 }, 4347 { /* Sentinel */ }, 4348 }; 4349 4350 static struct udma_soc_data am654_soc_data = { 4351 .oes = { 4352 .udma_rchan = 0x200, 4353 }, 4354 }; 4355 4356 static struct udma_soc_data j721e_soc_data = { 4357 .oes = { 4358 .udma_rchan = 0x400, 4359 }, 4360 }; 4361 4362 static struct udma_soc_data j7200_soc_data = { 4363 .oes = { 4364 .udma_rchan = 0x80, 4365 }, 4366 }; 4367 4368 static struct udma_soc_data am64_soc_data = { 4369 .oes = { 4370 .bcdma_bchan_data = 0x2200, 4371 .bcdma_bchan_ring = 0x2400, 4372 .bcdma_tchan_data = 0x2800, 4373 .bcdma_tchan_ring = 0x2a00, 4374 .bcdma_rchan_data = 0x2e00, 4375 .bcdma_rchan_ring = 0x3000, 4376 .pktdma_tchan_flow = 0x1200, 4377 .pktdma_rchan_flow = 0x1600, 4378 }, 4379 .bcdma_trigger_event_offset = 0xc400, 4380 }; 4381 4382 static const struct soc_device_attribute k3_soc_devices[] = { 4383 { .family = "AM65X", .data = &am654_soc_data }, 4384 { .family = "J721E", .data = &j721e_soc_data }, 4385 { .family = "J7200", .data = &j7200_soc_data }, 4386 { .family = "AM64X", .data = &am64_soc_data }, 4387 { .family = "J721S2", .data = &j721e_soc_data}, 4388 { .family = "AM62X", .data = &am64_soc_data }, 4389 { /* sentinel */ } 4390 }; 4391 4392 static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud) 4393 { 4394 u32 cap2, cap3, cap4; 4395 int i; 4396 4397 ud->mmrs[MMR_GCFG] = devm_platform_ioremap_resource_byname(pdev, mmr_names[MMR_GCFG]); 4398 if (IS_ERR(ud->mmrs[MMR_GCFG])) 4399 return PTR_ERR(ud->mmrs[MMR_GCFG]); 4400 4401 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28); 4402 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c); 4403 4404 switch (ud->match_data->type) { 4405 case DMA_TYPE_UDMA: 4406 ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3); 4407 ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2); 4408 ud->echan_cnt = UDMA_CAP2_ECHAN_CNT(cap2); 4409 ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2); 4410 break; 4411 case DMA_TYPE_BCDMA: 4412 ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2); 4413 ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2); 4414 ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2); 4415 ud->rflow_cnt = ud->rchan_cnt; 4416 break; 4417 case DMA_TYPE_PKTDMA: 4418 cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30); 4419 ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2); 4420 ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2); 4421 ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3); 4422 ud->tflow_cnt = PKTDMA_CAP4_TFLOW_CNT(cap4); 4423 break; 4424 default: 4425 return -EINVAL; 4426 } 4427 4428 for (i = 1; i < MMR_LAST; i++) { 4429 if (i == MMR_BCHANRT && ud->bchan_cnt == 0) 4430 continue; 4431 if (i == MMR_TCHANRT && ud->tchan_cnt == 0) 4432 continue; 4433 if (i == MMR_RCHANRT && ud->rchan_cnt == 0) 4434 continue; 4435 4436 ud->mmrs[i] = devm_platform_ioremap_resource_byname(pdev, mmr_names[i]); 4437 if (IS_ERR(ud->mmrs[i])) 4438 return PTR_ERR(ud->mmrs[i]); 4439 } 4440 4441 return 0; 4442 } 4443 4444 static void udma_mark_resource_ranges(struct udma_dev *ud, unsigned long *map, 4445 struct ti_sci_resource_desc *rm_desc, 4446 char *name) 4447 { 4448 bitmap_clear(map, rm_desc->start, rm_desc->num); 4449 bitmap_clear(map, rm_desc->start_sec, rm_desc->num_sec); 4450 dev_dbg(ud->dev, "ti_sci resource range for %s: %d:%d | %d:%d\n", name, 4451 rm_desc->start, rm_desc->num, rm_desc->start_sec, 4452 rm_desc->num_sec); 4453 } 4454 4455 static const char * const range_names[] = { 4456 [RM_RANGE_BCHAN] = "ti,sci-rm-range-bchan", 4457 [RM_RANGE_TCHAN] = "ti,sci-rm-range-tchan", 4458 [RM_RANGE_RCHAN] = "ti,sci-rm-range-rchan", 4459 [RM_RANGE_RFLOW] = "ti,sci-rm-range-rflow", 4460 [RM_RANGE_TFLOW] = "ti,sci-rm-range-tflow", 4461 }; 4462 4463 static int udma_setup_resources(struct udma_dev *ud) 4464 { 4465 int ret, i, j; 4466 struct device *dev = ud->dev; 4467 struct ti_sci_resource *rm_res, irq_res; 4468 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 4469 u32 cap3; 4470 4471 /* Set up the throughput level start indexes */ 4472 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c); 4473 if (of_device_is_compatible(dev->of_node, 4474 "ti,am654-navss-main-udmap")) { 4475 ud->tchan_tpl.levels = 2; 4476 ud->tchan_tpl.start_idx[0] = 8; 4477 } else if (of_device_is_compatible(dev->of_node, 4478 "ti,am654-navss-mcu-udmap")) { 4479 ud->tchan_tpl.levels = 2; 4480 ud->tchan_tpl.start_idx[0] = 2; 4481 } else if (UDMA_CAP3_UCHAN_CNT(cap3)) { 4482 ud->tchan_tpl.levels = 3; 4483 ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3); 4484 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); 4485 } else if (UDMA_CAP3_HCHAN_CNT(cap3)) { 4486 ud->tchan_tpl.levels = 2; 4487 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); 4488 } else { 4489 ud->tchan_tpl.levels = 1; 4490 } 4491 4492 ud->rchan_tpl.levels = ud->tchan_tpl.levels; 4493 ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0]; 4494 ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1]; 4495 4496 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), 4497 sizeof(unsigned long), GFP_KERNEL); 4498 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans), 4499 GFP_KERNEL); 4500 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), 4501 sizeof(unsigned long), GFP_KERNEL); 4502 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans), 4503 GFP_KERNEL); 4504 ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt), 4505 sizeof(unsigned long), 4506 GFP_KERNEL); 4507 ud->rflow_gp_map_allocated = devm_kcalloc(dev, 4508 BITS_TO_LONGS(ud->rflow_cnt), 4509 sizeof(unsigned long), 4510 GFP_KERNEL); 4511 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt), 4512 sizeof(unsigned long), 4513 GFP_KERNEL); 4514 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows), 4515 GFP_KERNEL); 4516 4517 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map || 4518 !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans || 4519 !ud->rflows || !ud->rflow_in_use) 4520 return -ENOMEM; 4521 4522 /* 4523 * RX flows with the same Ids as RX channels are reserved to be used 4524 * as default flows if remote HW can't generate flow_ids. Those 4525 * RX flows can be requested only explicitly by id. 4526 */ 4527 bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt); 4528 4529 /* by default no GP rflows are assigned to Linux */ 4530 bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt); 4531 4532 /* Get resource ranges from tisci */ 4533 for (i = 0; i < RM_RANGE_LAST; i++) { 4534 if (i == RM_RANGE_BCHAN || i == RM_RANGE_TFLOW) 4535 continue; 4536 4537 tisci_rm->rm_ranges[i] = 4538 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev, 4539 tisci_rm->tisci_dev_id, 4540 (char *)range_names[i]); 4541 } 4542 4543 /* tchan ranges */ 4544 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; 4545 if (IS_ERR(rm_res)) { 4546 bitmap_zero(ud->tchan_map, ud->tchan_cnt); 4547 irq_res.sets = 1; 4548 } else { 4549 bitmap_fill(ud->tchan_map, ud->tchan_cnt); 4550 for (i = 0; i < rm_res->sets; i++) 4551 udma_mark_resource_ranges(ud, ud->tchan_map, 4552 &rm_res->desc[i], "tchan"); 4553 irq_res.sets = rm_res->sets; 4554 } 4555 4556 /* rchan and matching default flow ranges */ 4557 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; 4558 if (IS_ERR(rm_res)) { 4559 bitmap_zero(ud->rchan_map, ud->rchan_cnt); 4560 irq_res.sets++; 4561 } else { 4562 bitmap_fill(ud->rchan_map, ud->rchan_cnt); 4563 for (i = 0; i < rm_res->sets; i++) 4564 udma_mark_resource_ranges(ud, ud->rchan_map, 4565 &rm_res->desc[i], "rchan"); 4566 irq_res.sets += rm_res->sets; 4567 } 4568 4569 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL); 4570 if (!irq_res.desc) 4571 return -ENOMEM; 4572 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; 4573 if (IS_ERR(rm_res)) { 4574 irq_res.desc[0].start = 0; 4575 irq_res.desc[0].num = ud->tchan_cnt; 4576 i = 1; 4577 } else { 4578 for (i = 0; i < rm_res->sets; i++) { 4579 irq_res.desc[i].start = rm_res->desc[i].start; 4580 irq_res.desc[i].num = rm_res->desc[i].num; 4581 irq_res.desc[i].start_sec = rm_res->desc[i].start_sec; 4582 irq_res.desc[i].num_sec = rm_res->desc[i].num_sec; 4583 } 4584 } 4585 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; 4586 if (IS_ERR(rm_res)) { 4587 irq_res.desc[i].start = 0; 4588 irq_res.desc[i].num = ud->rchan_cnt; 4589 } else { 4590 for (j = 0; j < rm_res->sets; j++, i++) { 4591 if (rm_res->desc[j].num) { 4592 irq_res.desc[i].start = rm_res->desc[j].start + 4593 ud->soc_data->oes.udma_rchan; 4594 irq_res.desc[i].num = rm_res->desc[j].num; 4595 } 4596 if (rm_res->desc[j].num_sec) { 4597 irq_res.desc[i].start_sec = rm_res->desc[j].start_sec + 4598 ud->soc_data->oes.udma_rchan; 4599 irq_res.desc[i].num_sec = rm_res->desc[j].num_sec; 4600 } 4601 } 4602 } 4603 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); 4604 kfree(irq_res.desc); 4605 if (ret) { 4606 dev_err(ud->dev, "Failed to allocate MSI interrupts\n"); 4607 return ret; 4608 } 4609 4610 /* GP rflow ranges */ 4611 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; 4612 if (IS_ERR(rm_res)) { 4613 /* all gp flows are assigned exclusively to Linux */ 4614 bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt, 4615 ud->rflow_cnt - ud->rchan_cnt); 4616 } else { 4617 for (i = 0; i < rm_res->sets; i++) 4618 udma_mark_resource_ranges(ud, ud->rflow_gp_map, 4619 &rm_res->desc[i], "gp-rflow"); 4620 } 4621 4622 return 0; 4623 } 4624 4625 static int bcdma_setup_resources(struct udma_dev *ud) 4626 { 4627 int ret, i, j; 4628 struct device *dev = ud->dev; 4629 struct ti_sci_resource *rm_res, irq_res; 4630 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 4631 const struct udma_oes_offsets *oes = &ud->soc_data->oes; 4632 u32 cap; 4633 4634 /* Set up the throughput level start indexes */ 4635 cap = udma_read(ud->mmrs[MMR_GCFG], 0x2c); 4636 if (BCDMA_CAP3_UBCHAN_CNT(cap)) { 4637 ud->bchan_tpl.levels = 3; 4638 ud->bchan_tpl.start_idx[1] = BCDMA_CAP3_UBCHAN_CNT(cap); 4639 ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap); 4640 } else if (BCDMA_CAP3_HBCHAN_CNT(cap)) { 4641 ud->bchan_tpl.levels = 2; 4642 ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap); 4643 } else { 4644 ud->bchan_tpl.levels = 1; 4645 } 4646 4647 cap = udma_read(ud->mmrs[MMR_GCFG], 0x30); 4648 if (BCDMA_CAP4_URCHAN_CNT(cap)) { 4649 ud->rchan_tpl.levels = 3; 4650 ud->rchan_tpl.start_idx[1] = BCDMA_CAP4_URCHAN_CNT(cap); 4651 ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap); 4652 } else if (BCDMA_CAP4_HRCHAN_CNT(cap)) { 4653 ud->rchan_tpl.levels = 2; 4654 ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap); 4655 } else { 4656 ud->rchan_tpl.levels = 1; 4657 } 4658 4659 if (BCDMA_CAP4_UTCHAN_CNT(cap)) { 4660 ud->tchan_tpl.levels = 3; 4661 ud->tchan_tpl.start_idx[1] = BCDMA_CAP4_UTCHAN_CNT(cap); 4662 ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap); 4663 } else if (BCDMA_CAP4_HTCHAN_CNT(cap)) { 4664 ud->tchan_tpl.levels = 2; 4665 ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap); 4666 } else { 4667 ud->tchan_tpl.levels = 1; 4668 } 4669 4670 ud->bchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->bchan_cnt), 4671 sizeof(unsigned long), GFP_KERNEL); 4672 ud->bchans = devm_kcalloc(dev, ud->bchan_cnt, sizeof(*ud->bchans), 4673 GFP_KERNEL); 4674 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), 4675 sizeof(unsigned long), GFP_KERNEL); 4676 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans), 4677 GFP_KERNEL); 4678 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), 4679 sizeof(unsigned long), GFP_KERNEL); 4680 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans), 4681 GFP_KERNEL); 4682 /* BCDMA do not really have flows, but the driver expect it */ 4683 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rchan_cnt), 4684 sizeof(unsigned long), 4685 GFP_KERNEL); 4686 ud->rflows = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rflows), 4687 GFP_KERNEL); 4688 4689 if (!ud->bchan_map || !ud->tchan_map || !ud->rchan_map || 4690 !ud->rflow_in_use || !ud->bchans || !ud->tchans || !ud->rchans || 4691 !ud->rflows) 4692 return -ENOMEM; 4693 4694 /* Get resource ranges from tisci */ 4695 for (i = 0; i < RM_RANGE_LAST; i++) { 4696 if (i == RM_RANGE_RFLOW || i == RM_RANGE_TFLOW) 4697 continue; 4698 if (i == RM_RANGE_BCHAN && ud->bchan_cnt == 0) 4699 continue; 4700 if (i == RM_RANGE_TCHAN && ud->tchan_cnt == 0) 4701 continue; 4702 if (i == RM_RANGE_RCHAN && ud->rchan_cnt == 0) 4703 continue; 4704 4705 tisci_rm->rm_ranges[i] = 4706 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev, 4707 tisci_rm->tisci_dev_id, 4708 (char *)range_names[i]); 4709 } 4710 4711 irq_res.sets = 0; 4712 4713 /* bchan ranges */ 4714 if (ud->bchan_cnt) { 4715 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN]; 4716 if (IS_ERR(rm_res)) { 4717 bitmap_zero(ud->bchan_map, ud->bchan_cnt); 4718 irq_res.sets++; 4719 } else { 4720 bitmap_fill(ud->bchan_map, ud->bchan_cnt); 4721 for (i = 0; i < rm_res->sets; i++) 4722 udma_mark_resource_ranges(ud, ud->bchan_map, 4723 &rm_res->desc[i], 4724 "bchan"); 4725 irq_res.sets += rm_res->sets; 4726 } 4727 } 4728 4729 /* tchan ranges */ 4730 if (ud->tchan_cnt) { 4731 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; 4732 if (IS_ERR(rm_res)) { 4733 bitmap_zero(ud->tchan_map, ud->tchan_cnt); 4734 irq_res.sets += 2; 4735 } else { 4736 bitmap_fill(ud->tchan_map, ud->tchan_cnt); 4737 for (i = 0; i < rm_res->sets; i++) 4738 udma_mark_resource_ranges(ud, ud->tchan_map, 4739 &rm_res->desc[i], 4740 "tchan"); 4741 irq_res.sets += rm_res->sets * 2; 4742 } 4743 } 4744 4745 /* rchan ranges */ 4746 if (ud->rchan_cnt) { 4747 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; 4748 if (IS_ERR(rm_res)) { 4749 bitmap_zero(ud->rchan_map, ud->rchan_cnt); 4750 irq_res.sets += 2; 4751 } else { 4752 bitmap_fill(ud->rchan_map, ud->rchan_cnt); 4753 for (i = 0; i < rm_res->sets; i++) 4754 udma_mark_resource_ranges(ud, ud->rchan_map, 4755 &rm_res->desc[i], 4756 "rchan"); 4757 irq_res.sets += rm_res->sets * 2; 4758 } 4759 } 4760 4761 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL); 4762 if (!irq_res.desc) 4763 return -ENOMEM; 4764 if (ud->bchan_cnt) { 4765 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN]; 4766 if (IS_ERR(rm_res)) { 4767 irq_res.desc[0].start = oes->bcdma_bchan_ring; 4768 irq_res.desc[0].num = ud->bchan_cnt; 4769 i = 1; 4770 } else { 4771 for (i = 0; i < rm_res->sets; i++) { 4772 irq_res.desc[i].start = rm_res->desc[i].start + 4773 oes->bcdma_bchan_ring; 4774 irq_res.desc[i].num = rm_res->desc[i].num; 4775 } 4776 } 4777 } 4778 if (ud->tchan_cnt) { 4779 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; 4780 if (IS_ERR(rm_res)) { 4781 irq_res.desc[i].start = oes->bcdma_tchan_data; 4782 irq_res.desc[i].num = ud->tchan_cnt; 4783 irq_res.desc[i + 1].start = oes->bcdma_tchan_ring; 4784 irq_res.desc[i + 1].num = ud->tchan_cnt; 4785 i += 2; 4786 } else { 4787 for (j = 0; j < rm_res->sets; j++, i += 2) { 4788 irq_res.desc[i].start = rm_res->desc[j].start + 4789 oes->bcdma_tchan_data; 4790 irq_res.desc[i].num = rm_res->desc[j].num; 4791 4792 irq_res.desc[i + 1].start = rm_res->desc[j].start + 4793 oes->bcdma_tchan_ring; 4794 irq_res.desc[i + 1].num = rm_res->desc[j].num; 4795 } 4796 } 4797 } 4798 if (ud->rchan_cnt) { 4799 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; 4800 if (IS_ERR(rm_res)) { 4801 irq_res.desc[i].start = oes->bcdma_rchan_data; 4802 irq_res.desc[i].num = ud->rchan_cnt; 4803 irq_res.desc[i + 1].start = oes->bcdma_rchan_ring; 4804 irq_res.desc[i + 1].num = ud->rchan_cnt; 4805 i += 2; 4806 } else { 4807 for (j = 0; j < rm_res->sets; j++, i += 2) { 4808 irq_res.desc[i].start = rm_res->desc[j].start + 4809 oes->bcdma_rchan_data; 4810 irq_res.desc[i].num = rm_res->desc[j].num; 4811 4812 irq_res.desc[i + 1].start = rm_res->desc[j].start + 4813 oes->bcdma_rchan_ring; 4814 irq_res.desc[i + 1].num = rm_res->desc[j].num; 4815 } 4816 } 4817 } 4818 4819 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); 4820 kfree(irq_res.desc); 4821 if (ret) { 4822 dev_err(ud->dev, "Failed to allocate MSI interrupts\n"); 4823 return ret; 4824 } 4825 4826 return 0; 4827 } 4828 4829 static int pktdma_setup_resources(struct udma_dev *ud) 4830 { 4831 int ret, i, j; 4832 struct device *dev = ud->dev; 4833 struct ti_sci_resource *rm_res, irq_res; 4834 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 4835 const struct udma_oes_offsets *oes = &ud->soc_data->oes; 4836 u32 cap3; 4837 4838 /* Set up the throughput level start indexes */ 4839 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c); 4840 if (UDMA_CAP3_UCHAN_CNT(cap3)) { 4841 ud->tchan_tpl.levels = 3; 4842 ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3); 4843 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); 4844 } else if (UDMA_CAP3_HCHAN_CNT(cap3)) { 4845 ud->tchan_tpl.levels = 2; 4846 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); 4847 } else { 4848 ud->tchan_tpl.levels = 1; 4849 } 4850 4851 ud->rchan_tpl.levels = ud->tchan_tpl.levels; 4852 ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0]; 4853 ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1]; 4854 4855 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), 4856 sizeof(unsigned long), GFP_KERNEL); 4857 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans), 4858 GFP_KERNEL); 4859 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), 4860 sizeof(unsigned long), GFP_KERNEL); 4861 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans), 4862 GFP_KERNEL); 4863 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt), 4864 sizeof(unsigned long), 4865 GFP_KERNEL); 4866 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows), 4867 GFP_KERNEL); 4868 ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt), 4869 sizeof(unsigned long), GFP_KERNEL); 4870 4871 if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans || 4872 !ud->rchans || !ud->rflows || !ud->rflow_in_use) 4873 return -ENOMEM; 4874 4875 /* Get resource ranges from tisci */ 4876 for (i = 0; i < RM_RANGE_LAST; i++) { 4877 if (i == RM_RANGE_BCHAN) 4878 continue; 4879 4880 tisci_rm->rm_ranges[i] = 4881 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev, 4882 tisci_rm->tisci_dev_id, 4883 (char *)range_names[i]); 4884 } 4885 4886 /* tchan ranges */ 4887 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; 4888 if (IS_ERR(rm_res)) { 4889 bitmap_zero(ud->tchan_map, ud->tchan_cnt); 4890 } else { 4891 bitmap_fill(ud->tchan_map, ud->tchan_cnt); 4892 for (i = 0; i < rm_res->sets; i++) 4893 udma_mark_resource_ranges(ud, ud->tchan_map, 4894 &rm_res->desc[i], "tchan"); 4895 } 4896 4897 /* rchan ranges */ 4898 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; 4899 if (IS_ERR(rm_res)) { 4900 bitmap_zero(ud->rchan_map, ud->rchan_cnt); 4901 } else { 4902 bitmap_fill(ud->rchan_map, ud->rchan_cnt); 4903 for (i = 0; i < rm_res->sets; i++) 4904 udma_mark_resource_ranges(ud, ud->rchan_map, 4905 &rm_res->desc[i], "rchan"); 4906 } 4907 4908 /* rflow ranges */ 4909 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; 4910 if (IS_ERR(rm_res)) { 4911 /* all rflows are assigned exclusively to Linux */ 4912 bitmap_zero(ud->rflow_in_use, ud->rflow_cnt); 4913 irq_res.sets = 1; 4914 } else { 4915 bitmap_fill(ud->rflow_in_use, ud->rflow_cnt); 4916 for (i = 0; i < rm_res->sets; i++) 4917 udma_mark_resource_ranges(ud, ud->rflow_in_use, 4918 &rm_res->desc[i], "rflow"); 4919 irq_res.sets = rm_res->sets; 4920 } 4921 4922 /* tflow ranges */ 4923 rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW]; 4924 if (IS_ERR(rm_res)) { 4925 /* all tflows are assigned exclusively to Linux */ 4926 bitmap_zero(ud->tflow_map, ud->tflow_cnt); 4927 irq_res.sets++; 4928 } else { 4929 bitmap_fill(ud->tflow_map, ud->tflow_cnt); 4930 for (i = 0; i < rm_res->sets; i++) 4931 udma_mark_resource_ranges(ud, ud->tflow_map, 4932 &rm_res->desc[i], "tflow"); 4933 irq_res.sets += rm_res->sets; 4934 } 4935 4936 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL); 4937 if (!irq_res.desc) 4938 return -ENOMEM; 4939 rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW]; 4940 if (IS_ERR(rm_res)) { 4941 irq_res.desc[0].start = oes->pktdma_tchan_flow; 4942 irq_res.desc[0].num = ud->tflow_cnt; 4943 i = 1; 4944 } else { 4945 for (i = 0; i < rm_res->sets; i++) { 4946 irq_res.desc[i].start = rm_res->desc[i].start + 4947 oes->pktdma_tchan_flow; 4948 irq_res.desc[i].num = rm_res->desc[i].num; 4949 } 4950 } 4951 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; 4952 if (IS_ERR(rm_res)) { 4953 irq_res.desc[i].start = oes->pktdma_rchan_flow; 4954 irq_res.desc[i].num = ud->rflow_cnt; 4955 } else { 4956 for (j = 0; j < rm_res->sets; j++, i++) { 4957 irq_res.desc[i].start = rm_res->desc[j].start + 4958 oes->pktdma_rchan_flow; 4959 irq_res.desc[i].num = rm_res->desc[j].num; 4960 } 4961 } 4962 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); 4963 kfree(irq_res.desc); 4964 if (ret) { 4965 dev_err(ud->dev, "Failed to allocate MSI interrupts\n"); 4966 return ret; 4967 } 4968 4969 return 0; 4970 } 4971 4972 static int setup_resources(struct udma_dev *ud) 4973 { 4974 struct device *dev = ud->dev; 4975 int ch_count, ret; 4976 4977 switch (ud->match_data->type) { 4978 case DMA_TYPE_UDMA: 4979 ret = udma_setup_resources(ud); 4980 break; 4981 case DMA_TYPE_BCDMA: 4982 ret = bcdma_setup_resources(ud); 4983 break; 4984 case DMA_TYPE_PKTDMA: 4985 ret = pktdma_setup_resources(ud); 4986 break; 4987 default: 4988 return -EINVAL; 4989 } 4990 4991 if (ret) 4992 return ret; 4993 4994 ch_count = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt; 4995 if (ud->bchan_cnt) 4996 ch_count -= bitmap_weight(ud->bchan_map, ud->bchan_cnt); 4997 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt); 4998 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt); 4999 if (!ch_count) 5000 return -ENODEV; 5001 5002 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels), 5003 GFP_KERNEL); 5004 if (!ud->channels) 5005 return -ENOMEM; 5006 5007 switch (ud->match_data->type) { 5008 case DMA_TYPE_UDMA: 5009 dev_info(dev, 5010 "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n", 5011 ch_count, 5012 ud->tchan_cnt - bitmap_weight(ud->tchan_map, 5013 ud->tchan_cnt), 5014 ud->rchan_cnt - bitmap_weight(ud->rchan_map, 5015 ud->rchan_cnt), 5016 ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map, 5017 ud->rflow_cnt)); 5018 break; 5019 case DMA_TYPE_BCDMA: 5020 dev_info(dev, 5021 "Channels: %d (bchan: %u, tchan: %u, rchan: %u)\n", 5022 ch_count, 5023 ud->bchan_cnt - bitmap_weight(ud->bchan_map, 5024 ud->bchan_cnt), 5025 ud->tchan_cnt - bitmap_weight(ud->tchan_map, 5026 ud->tchan_cnt), 5027 ud->rchan_cnt - bitmap_weight(ud->rchan_map, 5028 ud->rchan_cnt)); 5029 break; 5030 case DMA_TYPE_PKTDMA: 5031 dev_info(dev, 5032 "Channels: %d (tchan: %u, rchan: %u)\n", 5033 ch_count, 5034 ud->tchan_cnt - bitmap_weight(ud->tchan_map, 5035 ud->tchan_cnt), 5036 ud->rchan_cnt - bitmap_weight(ud->rchan_map, 5037 ud->rchan_cnt)); 5038 break; 5039 default: 5040 break; 5041 } 5042 5043 return ch_count; 5044 } 5045 5046 static int udma_setup_rx_flush(struct udma_dev *ud) 5047 { 5048 struct udma_rx_flush *rx_flush = &ud->rx_flush; 5049 struct cppi5_desc_hdr_t *tr_desc; 5050 struct cppi5_tr_type1_t *tr_req; 5051 struct cppi5_host_desc_t *desc; 5052 struct device *dev = ud->dev; 5053 struct udma_hwdesc *hwdesc; 5054 size_t tr_size; 5055 5056 /* Allocate 1K buffer for discarded data on RX channel teardown */ 5057 rx_flush->buffer_size = SZ_1K; 5058 rx_flush->buffer_vaddr = devm_kzalloc(dev, rx_flush->buffer_size, 5059 GFP_KERNEL); 5060 if (!rx_flush->buffer_vaddr) 5061 return -ENOMEM; 5062 5063 rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr, 5064 rx_flush->buffer_size, 5065 DMA_TO_DEVICE); 5066 if (dma_mapping_error(dev, rx_flush->buffer_paddr)) 5067 return -ENOMEM; 5068 5069 /* Set up descriptor to be used for TR mode */ 5070 hwdesc = &rx_flush->hwdescs[0]; 5071 tr_size = sizeof(struct cppi5_tr_type1_t); 5072 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1); 5073 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size, 5074 ud->desc_align); 5075 5076 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size, 5077 GFP_KERNEL); 5078 if (!hwdesc->cppi5_desc_vaddr) 5079 return -ENOMEM; 5080 5081 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr, 5082 hwdesc->cppi5_desc_size, 5083 DMA_TO_DEVICE); 5084 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr)) 5085 return -ENOMEM; 5086 5087 /* Start of the TR req records */ 5088 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size; 5089 /* Start address of the TR response array */ 5090 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size; 5091 5092 tr_desc = hwdesc->cppi5_desc_vaddr; 5093 cppi5_trdesc_init(tr_desc, 1, tr_size, 0, 0); 5094 cppi5_desc_set_pktids(tr_desc, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT); 5095 cppi5_desc_set_retpolicy(tr_desc, 0, 0); 5096 5097 tr_req = hwdesc->tr_req_base; 5098 cppi5_tr_init(&tr_req->flags, CPPI5_TR_TYPE1, false, false, 5099 CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 5100 cppi5_tr_csf_set(&tr_req->flags, CPPI5_TR_CSF_SUPR_EVT); 5101 5102 tr_req->addr = rx_flush->buffer_paddr; 5103 tr_req->icnt0 = rx_flush->buffer_size; 5104 tr_req->icnt1 = 1; 5105 5106 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr, 5107 hwdesc->cppi5_desc_size, DMA_TO_DEVICE); 5108 5109 /* Set up descriptor to be used for packet mode */ 5110 hwdesc = &rx_flush->hwdescs[1]; 5111 hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) + 5112 CPPI5_INFO0_HDESC_EPIB_SIZE + 5113 CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE, 5114 ud->desc_align); 5115 5116 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size, 5117 GFP_KERNEL); 5118 if (!hwdesc->cppi5_desc_vaddr) 5119 return -ENOMEM; 5120 5121 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr, 5122 hwdesc->cppi5_desc_size, 5123 DMA_TO_DEVICE); 5124 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr)) 5125 return -ENOMEM; 5126 5127 desc = hwdesc->cppi5_desc_vaddr; 5128 cppi5_hdesc_init(desc, 0, 0); 5129 cppi5_desc_set_pktids(&desc->hdr, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT); 5130 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0); 5131 5132 cppi5_hdesc_attach_buf(desc, 5133 rx_flush->buffer_paddr, rx_flush->buffer_size, 5134 rx_flush->buffer_paddr, rx_flush->buffer_size); 5135 5136 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr, 5137 hwdesc->cppi5_desc_size, DMA_TO_DEVICE); 5138 return 0; 5139 } 5140 5141 #ifdef CONFIG_DEBUG_FS 5142 static void udma_dbg_summary_show_chan(struct seq_file *s, 5143 struct dma_chan *chan) 5144 { 5145 struct udma_chan *uc = to_udma_chan(chan); 5146 struct udma_chan_config *ucc = &uc->config; 5147 5148 seq_printf(s, " %-13s| %s", dma_chan_name(chan), 5149 chan->dbg_client_name ?: "in-use"); 5150 if (ucc->tr_trigger_type) 5151 seq_puts(s, " (triggered, "); 5152 else 5153 seq_printf(s, " (%s, ", 5154 dmaengine_get_direction_text(uc->config.dir)); 5155 5156 switch (uc->config.dir) { 5157 case DMA_MEM_TO_MEM: 5158 if (uc->ud->match_data->type == DMA_TYPE_BCDMA) { 5159 seq_printf(s, "bchan%d)\n", uc->bchan->id); 5160 return; 5161 } 5162 5163 seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id, 5164 ucc->src_thread, ucc->dst_thread); 5165 break; 5166 case DMA_DEV_TO_MEM: 5167 seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id, 5168 ucc->src_thread, ucc->dst_thread); 5169 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA) 5170 seq_printf(s, "rflow%d, ", uc->rflow->id); 5171 break; 5172 case DMA_MEM_TO_DEV: 5173 seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id, 5174 ucc->src_thread, ucc->dst_thread); 5175 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA) 5176 seq_printf(s, "tflow%d, ", uc->tchan->tflow_id); 5177 break; 5178 default: 5179 seq_printf(s, ")\n"); 5180 return; 5181 } 5182 5183 if (ucc->ep_type == PSIL_EP_NATIVE) { 5184 seq_printf(s, "PSI-L Native"); 5185 if (ucc->metadata_size) { 5186 seq_printf(s, "[%s", ucc->needs_epib ? " EPIB" : ""); 5187 if (ucc->psd_size) 5188 seq_printf(s, " PSDsize:%u", ucc->psd_size); 5189 seq_printf(s, " ]"); 5190 } 5191 } else { 5192 seq_printf(s, "PDMA"); 5193 if (ucc->enable_acc32 || ucc->enable_burst) 5194 seq_printf(s, "[%s%s ]", 5195 ucc->enable_acc32 ? " ACC32" : "", 5196 ucc->enable_burst ? " BURST" : ""); 5197 } 5198 5199 seq_printf(s, ", %s)\n", ucc->pkt_mode ? "Packet mode" : "TR mode"); 5200 } 5201 5202 static void udma_dbg_summary_show(struct seq_file *s, 5203 struct dma_device *dma_dev) 5204 { 5205 struct dma_chan *chan; 5206 5207 list_for_each_entry(chan, &dma_dev->channels, device_node) { 5208 if (chan->client_count) 5209 udma_dbg_summary_show_chan(s, chan); 5210 } 5211 } 5212 #endif /* CONFIG_DEBUG_FS */ 5213 5214 static enum dmaengine_alignment udma_get_copy_align(struct udma_dev *ud) 5215 { 5216 const struct udma_match_data *match_data = ud->match_data; 5217 u8 tpl; 5218 5219 if (!match_data->enable_memcpy_support) 5220 return DMAENGINE_ALIGN_8_BYTES; 5221 5222 /* Get the highest TPL level the device supports for memcpy */ 5223 if (ud->bchan_cnt) 5224 tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, 0); 5225 else if (ud->tchan_cnt) 5226 tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, 0); 5227 else 5228 return DMAENGINE_ALIGN_8_BYTES; 5229 5230 switch (match_data->burst_size[tpl]) { 5231 case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES: 5232 return DMAENGINE_ALIGN_256_BYTES; 5233 case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES: 5234 return DMAENGINE_ALIGN_128_BYTES; 5235 case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES: 5236 fallthrough; 5237 default: 5238 return DMAENGINE_ALIGN_64_BYTES; 5239 } 5240 } 5241 5242 #define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ 5243 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 5244 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ 5245 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ 5246 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) 5247 5248 static int udma_probe(struct platform_device *pdev) 5249 { 5250 struct device_node *navss_node = pdev->dev.parent->of_node; 5251 const struct soc_device_attribute *soc; 5252 struct device *dev = &pdev->dev; 5253 struct udma_dev *ud; 5254 const struct of_device_id *match; 5255 int i, ret; 5256 int ch_count; 5257 5258 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48)); 5259 if (ret) 5260 dev_err(dev, "failed to set dma mask stuff\n"); 5261 5262 ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL); 5263 if (!ud) 5264 return -ENOMEM; 5265 5266 match = of_match_node(udma_of_match, dev->of_node); 5267 if (!match) { 5268 dev_err(dev, "No compatible match found\n"); 5269 return -ENODEV; 5270 } 5271 ud->match_data = match->data; 5272 5273 soc = soc_device_match(k3_soc_devices); 5274 if (!soc) { 5275 dev_err(dev, "No compatible SoC found\n"); 5276 return -ENODEV; 5277 } 5278 ud->soc_data = soc->data; 5279 5280 ret = udma_get_mmrs(pdev, ud); 5281 if (ret) 5282 return ret; 5283 5284 ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci"); 5285 if (IS_ERR(ud->tisci_rm.tisci)) 5286 return PTR_ERR(ud->tisci_rm.tisci); 5287 5288 ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id", 5289 &ud->tisci_rm.tisci_dev_id); 5290 if (ret) { 5291 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret); 5292 return ret; 5293 } 5294 pdev->id = ud->tisci_rm.tisci_dev_id; 5295 5296 ret = of_property_read_u32(navss_node, "ti,sci-dev-id", 5297 &ud->tisci_rm.tisci_navss_dev_id); 5298 if (ret) { 5299 dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret); 5300 return ret; 5301 } 5302 5303 if (ud->match_data->type == DMA_TYPE_UDMA) { 5304 ret = of_property_read_u32(dev->of_node, "ti,udma-atype", 5305 &ud->atype); 5306 if (!ret && ud->atype > 2) { 5307 dev_err(dev, "Invalid atype: %u\n", ud->atype); 5308 return -EINVAL; 5309 } 5310 } else { 5311 ret = of_property_read_u32(dev->of_node, "ti,asel", 5312 &ud->asel); 5313 if (!ret && ud->asel > 15) { 5314 dev_err(dev, "Invalid asel: %u\n", ud->asel); 5315 return -EINVAL; 5316 } 5317 } 5318 5319 ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops; 5320 ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops; 5321 5322 if (ud->match_data->type == DMA_TYPE_UDMA) { 5323 ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc"); 5324 } else { 5325 struct k3_ringacc_init_data ring_init_data; 5326 5327 ring_init_data.tisci = ud->tisci_rm.tisci; 5328 ring_init_data.tisci_dev_id = ud->tisci_rm.tisci_dev_id; 5329 if (ud->match_data->type == DMA_TYPE_BCDMA) { 5330 ring_init_data.num_rings = ud->bchan_cnt + 5331 ud->tchan_cnt + 5332 ud->rchan_cnt; 5333 } else { 5334 ring_init_data.num_rings = ud->rflow_cnt + 5335 ud->tflow_cnt; 5336 } 5337 5338 ud->ringacc = k3_ringacc_dmarings_init(pdev, &ring_init_data); 5339 } 5340 5341 if (IS_ERR(ud->ringacc)) 5342 return PTR_ERR(ud->ringacc); 5343 5344 dev->msi.domain = of_msi_get_domain(dev, dev->of_node, 5345 DOMAIN_BUS_TI_SCI_INTA_MSI); 5346 if (!dev->msi.domain) { 5347 dev_err(dev, "Failed to get MSI domain\n"); 5348 return -EPROBE_DEFER; 5349 } 5350 5351 dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask); 5352 /* cyclic operation is not supported via PKTDMA */ 5353 if (ud->match_data->type != DMA_TYPE_PKTDMA) { 5354 dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask); 5355 ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic; 5356 } 5357 5358 ud->ddev.device_config = udma_slave_config; 5359 ud->ddev.device_prep_slave_sg = udma_prep_slave_sg; 5360 ud->ddev.device_issue_pending = udma_issue_pending; 5361 ud->ddev.device_tx_status = udma_tx_status; 5362 ud->ddev.device_pause = udma_pause; 5363 ud->ddev.device_resume = udma_resume; 5364 ud->ddev.device_terminate_all = udma_terminate_all; 5365 ud->ddev.device_synchronize = udma_synchronize; 5366 #ifdef CONFIG_DEBUG_FS 5367 ud->ddev.dbg_summary_show = udma_dbg_summary_show; 5368 #endif 5369 5370 switch (ud->match_data->type) { 5371 case DMA_TYPE_UDMA: 5372 ud->ddev.device_alloc_chan_resources = 5373 udma_alloc_chan_resources; 5374 break; 5375 case DMA_TYPE_BCDMA: 5376 ud->ddev.device_alloc_chan_resources = 5377 bcdma_alloc_chan_resources; 5378 ud->ddev.device_router_config = bcdma_router_config; 5379 break; 5380 case DMA_TYPE_PKTDMA: 5381 ud->ddev.device_alloc_chan_resources = 5382 pktdma_alloc_chan_resources; 5383 break; 5384 default: 5385 return -EINVAL; 5386 } 5387 ud->ddev.device_free_chan_resources = udma_free_chan_resources; 5388 5389 ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS; 5390 ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS; 5391 ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 5392 ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 5393 ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT | 5394 DESC_METADATA_ENGINE; 5395 if (ud->match_data->enable_memcpy_support && 5396 !(ud->match_data->type == DMA_TYPE_BCDMA && ud->bchan_cnt == 0)) { 5397 dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask); 5398 ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy; 5399 ud->ddev.directions |= BIT(DMA_MEM_TO_MEM); 5400 } 5401 5402 ud->ddev.dev = dev; 5403 ud->dev = dev; 5404 ud->psil_base = ud->match_data->psil_base; 5405 5406 INIT_LIST_HEAD(&ud->ddev.channels); 5407 INIT_LIST_HEAD(&ud->desc_to_purge); 5408 5409 ch_count = setup_resources(ud); 5410 if (ch_count <= 0) 5411 return ch_count; 5412 5413 spin_lock_init(&ud->lock); 5414 INIT_WORK(&ud->purge_work, udma_purge_desc_work); 5415 5416 ud->desc_align = 64; 5417 if (ud->desc_align < dma_get_cache_alignment()) 5418 ud->desc_align = dma_get_cache_alignment(); 5419 5420 ret = udma_setup_rx_flush(ud); 5421 if (ret) 5422 return ret; 5423 5424 for (i = 0; i < ud->bchan_cnt; i++) { 5425 struct udma_bchan *bchan = &ud->bchans[i]; 5426 5427 bchan->id = i; 5428 bchan->reg_rt = ud->mmrs[MMR_BCHANRT] + i * 0x1000; 5429 } 5430 5431 for (i = 0; i < ud->tchan_cnt; i++) { 5432 struct udma_tchan *tchan = &ud->tchans[i]; 5433 5434 tchan->id = i; 5435 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000; 5436 } 5437 5438 for (i = 0; i < ud->rchan_cnt; i++) { 5439 struct udma_rchan *rchan = &ud->rchans[i]; 5440 5441 rchan->id = i; 5442 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000; 5443 } 5444 5445 for (i = 0; i < ud->rflow_cnt; i++) { 5446 struct udma_rflow *rflow = &ud->rflows[i]; 5447 5448 rflow->id = i; 5449 } 5450 5451 for (i = 0; i < ch_count; i++) { 5452 struct udma_chan *uc = &ud->channels[i]; 5453 5454 uc->ud = ud; 5455 uc->vc.desc_free = udma_desc_free; 5456 uc->id = i; 5457 uc->bchan = NULL; 5458 uc->tchan = NULL; 5459 uc->rchan = NULL; 5460 uc->config.remote_thread_id = -1; 5461 uc->config.mapped_channel_id = -1; 5462 uc->config.default_flow_id = -1; 5463 uc->config.dir = DMA_MEM_TO_MEM; 5464 uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d", 5465 dev_name(dev), i); 5466 5467 vchan_init(&uc->vc, &ud->ddev); 5468 /* Use custom vchan completion handling */ 5469 tasklet_setup(&uc->vc.task, udma_vchan_complete); 5470 init_completion(&uc->teardown_completed); 5471 INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion); 5472 } 5473 5474 /* Configure the copy_align to the maximum burst size the device supports */ 5475 ud->ddev.copy_align = udma_get_copy_align(ud); 5476 5477 ret = dma_async_device_register(&ud->ddev); 5478 if (ret) { 5479 dev_err(dev, "failed to register slave DMA engine: %d\n", ret); 5480 return ret; 5481 } 5482 5483 platform_set_drvdata(pdev, ud); 5484 5485 ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud); 5486 if (ret) { 5487 dev_err(dev, "failed to register of_dma controller\n"); 5488 dma_async_device_unregister(&ud->ddev); 5489 } 5490 5491 return ret; 5492 } 5493 5494 static struct platform_driver udma_driver = { 5495 .driver = { 5496 .name = "ti-udma", 5497 .of_match_table = udma_of_match, 5498 .suppress_bind_attrs = true, 5499 }, 5500 .probe = udma_probe, 5501 }; 5502 5503 module_platform_driver(udma_driver); 5504 MODULE_LICENSE("GPL v2"); 5505 5506 /* Private interfaces to UDMA */ 5507 #include "k3-udma-private.c" 5508