1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * TI K3 NAVSS Ring Accelerator subsystem driver 4 * 5 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com 6 */ 7 8 #include <linux/dma-mapping.h> 9 #include <linux/io.h> 10 #include <linux/module.h> 11 #include <linux/of.h> 12 #include <linux/of_device.h> 13 #include <linux/platform_device.h> 14 #include <linux/sys_soc.h> 15 #include <linux/dma/ti-cppi5.h> 16 #include <linux/soc/ti/k3-ringacc.h> 17 #include <linux/soc/ti/ti_sci_protocol.h> 18 #include <linux/soc/ti/ti_sci_inta_msi.h> 19 #include <linux/of_irq.h> 20 #include <linux/irqdomain.h> 21 22 static LIST_HEAD(k3_ringacc_list); 23 static DEFINE_MUTEX(k3_ringacc_list_lock); 24 25 #define K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK GENMASK(19, 0) 26 #define K3_DMARING_CFG_RING_SIZE_ELCNT_MASK GENMASK(15, 0) 27 28 /** 29 * struct k3_ring_rt_regs - The RA realtime Control/Status Registers region 30 * 31 * @resv_16: Reserved 32 * @db: Ring Doorbell Register 33 * @resv_4: Reserved 34 * @occ: Ring Occupancy Register 35 * @indx: Ring Current Index Register 36 * @hwocc: Ring Hardware Occupancy Register 37 * @hwindx: Ring Hardware Current Index Register 38 */ 39 struct k3_ring_rt_regs { 40 u32 resv_16[4]; 41 u32 db; 42 u32 resv_4[1]; 43 u32 occ; 44 u32 indx; 45 u32 hwocc; 46 u32 hwindx; 47 }; 48 49 #define K3_RINGACC_RT_REGS_STEP 0x1000 50 #define K3_DMARING_RT_REGS_STEP 0x2000 51 #define K3_DMARING_RT_REGS_REVERSE_OFS 0x1000 52 #define K3_RINGACC_RT_OCC_MASK GENMASK(20, 0) 53 #define K3_DMARING_RT_OCC_TDOWN_COMPLETE BIT(31) 54 #define K3_DMARING_RT_DB_ENTRY_MASK GENMASK(7, 0) 55 #define K3_DMARING_RT_DB_TDOWN_ACK BIT(31) 56 57 /** 58 * struct k3_ring_fifo_regs - The Ring Accelerator Queues Registers region 59 * 60 * @head_data: Ring Head Entry Data Registers 61 * @tail_data: Ring Tail Entry Data Registers 62 * @peek_head_data: Ring Peek Head Entry Data Regs 63 * @peek_tail_data: Ring Peek Tail Entry Data Regs 64 */ 65 struct k3_ring_fifo_regs { 66 u32 head_data[128]; 67 u32 tail_data[128]; 68 u32 peek_head_data[128]; 69 u32 peek_tail_data[128]; 70 }; 71 72 /** 73 * struct k3_ringacc_proxy_gcfg_regs - RA Proxy Global Config MMIO Region 74 * 75 * @revision: Revision Register 76 * @config: Config Register 77 */ 78 struct k3_ringacc_proxy_gcfg_regs { 79 u32 revision; 80 u32 config; 81 }; 82 83 #define K3_RINGACC_PROXY_CFG_THREADS_MASK GENMASK(15, 0) 84 85 /** 86 * struct k3_ringacc_proxy_target_regs - Proxy Datapath MMIO Region 87 * 88 * @control: Proxy Control Register 89 * @status: Proxy Status Register 90 * @resv_512: Reserved 91 * @data: Proxy Data Register 92 */ 93 struct k3_ringacc_proxy_target_regs { 94 u32 control; 95 u32 status; 96 u8 resv_512[504]; 97 u32 data[128]; 98 }; 99 100 #define K3_RINGACC_PROXY_TARGET_STEP 0x1000 101 #define K3_RINGACC_PROXY_NOT_USED (-1) 102 103 enum k3_ringacc_proxy_access_mode { 104 PROXY_ACCESS_MODE_HEAD = 0, 105 PROXY_ACCESS_MODE_TAIL = 1, 106 PROXY_ACCESS_MODE_PEEK_HEAD = 2, 107 PROXY_ACCESS_MODE_PEEK_TAIL = 3, 108 }; 109 110 #define K3_RINGACC_FIFO_WINDOW_SIZE_BYTES (512U) 111 #define K3_RINGACC_FIFO_REGS_STEP 0x1000 112 #define K3_RINGACC_MAX_DB_RING_CNT (127U) 113 114 struct k3_ring_ops { 115 int (*push_tail)(struct k3_ring *ring, void *elm); 116 int (*push_head)(struct k3_ring *ring, void *elm); 117 int (*pop_tail)(struct k3_ring *ring, void *elm); 118 int (*pop_head)(struct k3_ring *ring, void *elm); 119 }; 120 121 /** 122 * struct k3_ring_state - Internal state tracking structure 123 * 124 * @free: Number of free entries 125 * @occ: Occupancy 126 * @windex: Write index 127 * @rindex: Read index 128 */ 129 struct k3_ring_state { 130 u32 free; 131 u32 occ; 132 u32 windex; 133 u32 rindex; 134 u32 tdown_complete:1; 135 }; 136 137 /** 138 * struct k3_ring - RA Ring descriptor 139 * 140 * @rt: Ring control/status registers 141 * @fifos: Ring queues registers 142 * @proxy: Ring Proxy Datapath registers 143 * @ring_mem_dma: Ring buffer dma address 144 * @ring_mem_virt: Ring buffer virt address 145 * @ops: Ring operations 146 * @size: Ring size in elements 147 * @elm_size: Size of the ring element 148 * @mode: Ring mode 149 * @flags: flags 150 * @state: Ring state 151 * @ring_id: Ring Id 152 * @parent: Pointer on struct @k3_ringacc 153 * @use_count: Use count for shared rings 154 * @proxy_id: RA Ring Proxy Id (only if @K3_RINGACC_RING_USE_PROXY) 155 * @dma_dev: device to be used for DMA API (allocation, mapping) 156 * @asel: Address Space Select value for physical addresses 157 */ 158 struct k3_ring { 159 struct k3_ring_rt_regs __iomem *rt; 160 struct k3_ring_fifo_regs __iomem *fifos; 161 struct k3_ringacc_proxy_target_regs __iomem *proxy; 162 dma_addr_t ring_mem_dma; 163 void *ring_mem_virt; 164 struct k3_ring_ops *ops; 165 u32 size; 166 enum k3_ring_size elm_size; 167 enum k3_ring_mode mode; 168 u32 flags; 169 #define K3_RING_FLAG_BUSY BIT(1) 170 #define K3_RING_FLAG_SHARED BIT(2) 171 #define K3_RING_FLAG_REVERSE BIT(3) 172 struct k3_ring_state state; 173 u32 ring_id; 174 struct k3_ringacc *parent; 175 u32 use_count; 176 int proxy_id; 177 struct device *dma_dev; 178 u32 asel; 179 #define K3_ADDRESS_ASEL_SHIFT 48 180 }; 181 182 struct k3_ringacc_ops { 183 int (*init)(struct platform_device *pdev, struct k3_ringacc *ringacc); 184 }; 185 186 /** 187 * struct k3_ringacc - Rings accelerator descriptor 188 * 189 * @dev: pointer on RA device 190 * @proxy_gcfg: RA proxy global config registers 191 * @proxy_target_base: RA proxy datapath region 192 * @num_rings: number of ring in RA 193 * @rings_inuse: bitfield for ring usage tracking 194 * @rm_gp_range: general purpose rings range from tisci 195 * @dma_ring_reset_quirk: DMA reset w/a enable 196 * @num_proxies: number of RA proxies 197 * @proxy_inuse: bitfield for proxy usage tracking 198 * @rings: array of rings descriptors (struct @k3_ring) 199 * @list: list of RAs in the system 200 * @req_lock: protect rings allocation 201 * @tisci: pointer ti-sci handle 202 * @tisci_ring_ops: ti-sci rings ops 203 * @tisci_dev_id: ti-sci device id 204 * @ops: SoC specific ringacc operation 205 * @dma_rings: indicate DMA ring (dual ring within BCDMA/PKTDMA) 206 */ 207 struct k3_ringacc { 208 struct device *dev; 209 struct k3_ringacc_proxy_gcfg_regs __iomem *proxy_gcfg; 210 void __iomem *proxy_target_base; 211 u32 num_rings; /* number of rings in Ringacc module */ 212 unsigned long *rings_inuse; 213 struct ti_sci_resource *rm_gp_range; 214 215 bool dma_ring_reset_quirk; 216 u32 num_proxies; 217 unsigned long *proxy_inuse; 218 219 struct k3_ring *rings; 220 struct list_head list; 221 struct mutex req_lock; /* protect rings allocation */ 222 223 const struct ti_sci_handle *tisci; 224 const struct ti_sci_rm_ringacc_ops *tisci_ring_ops; 225 u32 tisci_dev_id; 226 227 const struct k3_ringacc_ops *ops; 228 bool dma_rings; 229 }; 230 231 /** 232 * struct k3_ringacc - Rings accelerator SoC data 233 * 234 * @dma_ring_reset_quirk: DMA reset w/a enable 235 */ 236 struct k3_ringacc_soc_data { 237 unsigned dma_ring_reset_quirk:1; 238 }; 239 240 static int k3_ringacc_ring_read_occ(struct k3_ring *ring) 241 { 242 return readl(&ring->rt->occ) & K3_RINGACC_RT_OCC_MASK; 243 } 244 245 static void k3_ringacc_ring_update_occ(struct k3_ring *ring) 246 { 247 u32 val; 248 249 val = readl(&ring->rt->occ); 250 251 ring->state.occ = val & K3_RINGACC_RT_OCC_MASK; 252 ring->state.tdown_complete = !!(val & K3_DMARING_RT_OCC_TDOWN_COMPLETE); 253 } 254 255 static long k3_ringacc_ring_get_fifo_pos(struct k3_ring *ring) 256 { 257 return K3_RINGACC_FIFO_WINDOW_SIZE_BYTES - 258 (4 << ring->elm_size); 259 } 260 261 static void *k3_ringacc_get_elm_addr(struct k3_ring *ring, u32 idx) 262 { 263 return (ring->ring_mem_virt + idx * (4 << ring->elm_size)); 264 } 265 266 static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem); 267 static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem); 268 static int k3_dmaring_fwd_pop(struct k3_ring *ring, void *elem); 269 static int k3_dmaring_reverse_pop(struct k3_ring *ring, void *elem); 270 271 static struct k3_ring_ops k3_ring_mode_ring_ops = { 272 .push_tail = k3_ringacc_ring_push_mem, 273 .pop_head = k3_ringacc_ring_pop_mem, 274 }; 275 276 static struct k3_ring_ops k3_dmaring_fwd_ops = { 277 .push_tail = k3_ringacc_ring_push_mem, 278 .pop_head = k3_dmaring_fwd_pop, 279 }; 280 281 static struct k3_ring_ops k3_dmaring_reverse_ops = { 282 /* Reverse side of the DMA ring can only be popped by SW */ 283 .pop_head = k3_dmaring_reverse_pop, 284 }; 285 286 static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem); 287 static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem); 288 static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem); 289 static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem); 290 291 static struct k3_ring_ops k3_ring_mode_msg_ops = { 292 .push_tail = k3_ringacc_ring_push_io, 293 .push_head = k3_ringacc_ring_push_head_io, 294 .pop_tail = k3_ringacc_ring_pop_tail_io, 295 .pop_head = k3_ringacc_ring_pop_io, 296 }; 297 298 static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem); 299 static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem); 300 static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem); 301 static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem); 302 303 static struct k3_ring_ops k3_ring_mode_proxy_ops = { 304 .push_tail = k3_ringacc_ring_push_tail_proxy, 305 .push_head = k3_ringacc_ring_push_head_proxy, 306 .pop_tail = k3_ringacc_ring_pop_tail_proxy, 307 .pop_head = k3_ringacc_ring_pop_head_proxy, 308 }; 309 310 static void k3_ringacc_ring_dump(struct k3_ring *ring) 311 { 312 struct device *dev = ring->parent->dev; 313 314 dev_dbg(dev, "dump ring: %d\n", ring->ring_id); 315 dev_dbg(dev, "dump mem virt %p, dma %pad\n", ring->ring_mem_virt, 316 &ring->ring_mem_dma); 317 dev_dbg(dev, "dump elmsize %d, size %d, mode %d, proxy_id %d\n", 318 ring->elm_size, ring->size, ring->mode, ring->proxy_id); 319 dev_dbg(dev, "dump flags %08X\n", ring->flags); 320 321 dev_dbg(dev, "dump ring_rt_regs: db%08x\n", readl(&ring->rt->db)); 322 dev_dbg(dev, "dump occ%08x\n", readl(&ring->rt->occ)); 323 dev_dbg(dev, "dump indx%08x\n", readl(&ring->rt->indx)); 324 dev_dbg(dev, "dump hwocc%08x\n", readl(&ring->rt->hwocc)); 325 dev_dbg(dev, "dump hwindx%08x\n", readl(&ring->rt->hwindx)); 326 327 if (ring->ring_mem_virt) 328 print_hex_dump_debug("dump ring_mem_virt ", DUMP_PREFIX_NONE, 329 16, 1, ring->ring_mem_virt, 16 * 8, false); 330 } 331 332 struct k3_ring *k3_ringacc_request_ring(struct k3_ringacc *ringacc, 333 int id, u32 flags) 334 { 335 int proxy_id = K3_RINGACC_PROXY_NOT_USED; 336 337 mutex_lock(&ringacc->req_lock); 338 339 if (!try_module_get(ringacc->dev->driver->owner)) 340 goto err_module_get; 341 342 if (id == K3_RINGACC_RING_ID_ANY) { 343 /* Request for any general purpose ring */ 344 struct ti_sci_resource_desc *gp_rings = 345 &ringacc->rm_gp_range->desc[0]; 346 unsigned long size; 347 348 size = gp_rings->start + gp_rings->num; 349 id = find_next_zero_bit(ringacc->rings_inuse, size, 350 gp_rings->start); 351 if (id == size) 352 goto error; 353 } else if (id < 0) { 354 goto error; 355 } 356 357 if (test_bit(id, ringacc->rings_inuse) && 358 !(ringacc->rings[id].flags & K3_RING_FLAG_SHARED)) 359 goto error; 360 else if (ringacc->rings[id].flags & K3_RING_FLAG_SHARED) 361 goto out; 362 363 if (flags & K3_RINGACC_RING_USE_PROXY) { 364 proxy_id = find_first_zero_bit(ringacc->proxy_inuse, 365 ringacc->num_proxies); 366 if (proxy_id == ringacc->num_proxies) 367 goto error; 368 } 369 370 if (proxy_id != K3_RINGACC_PROXY_NOT_USED) { 371 set_bit(proxy_id, ringacc->proxy_inuse); 372 ringacc->rings[id].proxy_id = proxy_id; 373 dev_dbg(ringacc->dev, "Giving ring#%d proxy#%d\n", id, 374 proxy_id); 375 } else { 376 dev_dbg(ringacc->dev, "Giving ring#%d\n", id); 377 } 378 379 set_bit(id, ringacc->rings_inuse); 380 out: 381 ringacc->rings[id].use_count++; 382 mutex_unlock(&ringacc->req_lock); 383 return &ringacc->rings[id]; 384 385 error: 386 module_put(ringacc->dev->driver->owner); 387 388 err_module_get: 389 mutex_unlock(&ringacc->req_lock); 390 return NULL; 391 } 392 EXPORT_SYMBOL_GPL(k3_ringacc_request_ring); 393 394 static int k3_dmaring_request_dual_ring(struct k3_ringacc *ringacc, int fwd_id, 395 struct k3_ring **fwd_ring, 396 struct k3_ring **compl_ring) 397 { 398 int ret = 0; 399 400 /* 401 * DMA rings must be requested by ID, completion ring is the reverse 402 * side of the forward ring 403 */ 404 if (fwd_id < 0) 405 return -EINVAL; 406 407 mutex_lock(&ringacc->req_lock); 408 409 if (!try_module_get(ringacc->dev->driver->owner)) { 410 ret = -EINVAL; 411 goto err_module_get; 412 } 413 414 if (test_bit(fwd_id, ringacc->rings_inuse)) { 415 ret = -EBUSY; 416 goto error; 417 } 418 419 *fwd_ring = &ringacc->rings[fwd_id]; 420 *compl_ring = &ringacc->rings[fwd_id + ringacc->num_rings]; 421 set_bit(fwd_id, ringacc->rings_inuse); 422 ringacc->rings[fwd_id].use_count++; 423 dev_dbg(ringacc->dev, "Giving ring#%d\n", fwd_id); 424 425 mutex_unlock(&ringacc->req_lock); 426 return 0; 427 428 error: 429 module_put(ringacc->dev->driver->owner); 430 err_module_get: 431 mutex_unlock(&ringacc->req_lock); 432 return ret; 433 } 434 435 int k3_ringacc_request_rings_pair(struct k3_ringacc *ringacc, 436 int fwd_id, int compl_id, 437 struct k3_ring **fwd_ring, 438 struct k3_ring **compl_ring) 439 { 440 int ret = 0; 441 442 if (!fwd_ring || !compl_ring) 443 return -EINVAL; 444 445 if (ringacc->dma_rings) 446 return k3_dmaring_request_dual_ring(ringacc, fwd_id, 447 fwd_ring, compl_ring); 448 449 *fwd_ring = k3_ringacc_request_ring(ringacc, fwd_id, 0); 450 if (!(*fwd_ring)) 451 return -ENODEV; 452 453 *compl_ring = k3_ringacc_request_ring(ringacc, compl_id, 0); 454 if (!(*compl_ring)) { 455 k3_ringacc_ring_free(*fwd_ring); 456 ret = -ENODEV; 457 } 458 459 return ret; 460 } 461 EXPORT_SYMBOL_GPL(k3_ringacc_request_rings_pair); 462 463 static void k3_ringacc_ring_reset_sci(struct k3_ring *ring) 464 { 465 struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 }; 466 struct k3_ringacc *ringacc = ring->parent; 467 int ret; 468 469 ring_cfg.nav_id = ringacc->tisci_dev_id; 470 ring_cfg.index = ring->ring_id; 471 ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID; 472 ring_cfg.count = ring->size; 473 474 ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg); 475 if (ret) 476 dev_err(ringacc->dev, "TISCI reset ring fail (%d) ring_idx %d\n", 477 ret, ring->ring_id); 478 } 479 480 void k3_ringacc_ring_reset(struct k3_ring *ring) 481 { 482 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) 483 return; 484 485 memset(&ring->state, 0, sizeof(ring->state)); 486 487 k3_ringacc_ring_reset_sci(ring); 488 } 489 EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset); 490 491 static void k3_ringacc_ring_reconfig_qmode_sci(struct k3_ring *ring, 492 enum k3_ring_mode mode) 493 { 494 struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 }; 495 struct k3_ringacc *ringacc = ring->parent; 496 int ret; 497 498 ring_cfg.nav_id = ringacc->tisci_dev_id; 499 ring_cfg.index = ring->ring_id; 500 ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_RING_MODE_VALID; 501 ring_cfg.mode = mode; 502 503 ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg); 504 if (ret) 505 dev_err(ringacc->dev, "TISCI reconf qmode fail (%d) ring_idx %d\n", 506 ret, ring->ring_id); 507 } 508 509 void k3_ringacc_ring_reset_dma(struct k3_ring *ring, u32 occ) 510 { 511 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) 512 return; 513 514 if (!ring->parent->dma_ring_reset_quirk) 515 goto reset; 516 517 if (!occ) 518 occ = k3_ringacc_ring_read_occ(ring); 519 520 if (occ) { 521 u32 db_ring_cnt, db_ring_cnt_cur; 522 523 dev_dbg(ring->parent->dev, "%s %u occ: %u\n", __func__, 524 ring->ring_id, occ); 525 /* TI-SCI ring reset */ 526 k3_ringacc_ring_reset_sci(ring); 527 528 /* 529 * Setup the ring in ring/doorbell mode (if not already in this 530 * mode) 531 */ 532 if (ring->mode != K3_RINGACC_RING_MODE_RING) 533 k3_ringacc_ring_reconfig_qmode_sci( 534 ring, K3_RINGACC_RING_MODE_RING); 535 /* 536 * Ring the doorbell 2**22 – ringOcc times. 537 * This will wrap the internal UDMAP ring state occupancy 538 * counter (which is 21-bits wide) to 0. 539 */ 540 db_ring_cnt = (1U << 22) - occ; 541 542 while (db_ring_cnt != 0) { 543 /* 544 * Ring the doorbell with the maximum count each 545 * iteration if possible to minimize the total 546 * of writes 547 */ 548 if (db_ring_cnt > K3_RINGACC_MAX_DB_RING_CNT) 549 db_ring_cnt_cur = K3_RINGACC_MAX_DB_RING_CNT; 550 else 551 db_ring_cnt_cur = db_ring_cnt; 552 553 writel(db_ring_cnt_cur, &ring->rt->db); 554 db_ring_cnt -= db_ring_cnt_cur; 555 } 556 557 /* Restore the original ring mode (if not ring mode) */ 558 if (ring->mode != K3_RINGACC_RING_MODE_RING) 559 k3_ringacc_ring_reconfig_qmode_sci(ring, ring->mode); 560 } 561 562 reset: 563 /* Reset the ring */ 564 k3_ringacc_ring_reset(ring); 565 } 566 EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset_dma); 567 568 static void k3_ringacc_ring_free_sci(struct k3_ring *ring) 569 { 570 struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 }; 571 struct k3_ringacc *ringacc = ring->parent; 572 int ret; 573 574 ring_cfg.nav_id = ringacc->tisci_dev_id; 575 ring_cfg.index = ring->ring_id; 576 ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER; 577 578 ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg); 579 if (ret) 580 dev_err(ringacc->dev, "TISCI ring free fail (%d) ring_idx %d\n", 581 ret, ring->ring_id); 582 } 583 584 int k3_ringacc_ring_free(struct k3_ring *ring) 585 { 586 struct k3_ringacc *ringacc; 587 588 if (!ring) 589 return -EINVAL; 590 591 ringacc = ring->parent; 592 593 /* 594 * DMA rings: rings shared memory and configuration, only forward ring 595 * is configured and reverse ring considered as slave. 596 */ 597 if (ringacc->dma_rings && (ring->flags & K3_RING_FLAG_REVERSE)) 598 return 0; 599 600 dev_dbg(ring->parent->dev, "flags: 0x%08x\n", ring->flags); 601 602 if (!test_bit(ring->ring_id, ringacc->rings_inuse)) 603 return -EINVAL; 604 605 mutex_lock(&ringacc->req_lock); 606 607 if (--ring->use_count) 608 goto out; 609 610 if (!(ring->flags & K3_RING_FLAG_BUSY)) 611 goto no_init; 612 613 k3_ringacc_ring_free_sci(ring); 614 615 dma_free_coherent(ring->dma_dev, 616 ring->size * (4 << ring->elm_size), 617 ring->ring_mem_virt, ring->ring_mem_dma); 618 ring->flags = 0; 619 ring->ops = NULL; 620 ring->dma_dev = NULL; 621 ring->asel = 0; 622 623 if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) { 624 clear_bit(ring->proxy_id, ringacc->proxy_inuse); 625 ring->proxy = NULL; 626 ring->proxy_id = K3_RINGACC_PROXY_NOT_USED; 627 } 628 629 no_init: 630 clear_bit(ring->ring_id, ringacc->rings_inuse); 631 632 module_put(ringacc->dev->driver->owner); 633 634 out: 635 mutex_unlock(&ringacc->req_lock); 636 return 0; 637 } 638 EXPORT_SYMBOL_GPL(k3_ringacc_ring_free); 639 640 u32 k3_ringacc_get_ring_id(struct k3_ring *ring) 641 { 642 if (!ring) 643 return -EINVAL; 644 645 return ring->ring_id; 646 } 647 EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_id); 648 649 u32 k3_ringacc_get_tisci_dev_id(struct k3_ring *ring) 650 { 651 if (!ring) 652 return -EINVAL; 653 654 return ring->parent->tisci_dev_id; 655 } 656 EXPORT_SYMBOL_GPL(k3_ringacc_get_tisci_dev_id); 657 658 int k3_ringacc_get_ring_irq_num(struct k3_ring *ring) 659 { 660 int irq_num; 661 662 if (!ring) 663 return -EINVAL; 664 665 irq_num = msi_get_virq(ring->parent->dev, ring->ring_id); 666 if (irq_num <= 0) 667 irq_num = -EINVAL; 668 return irq_num; 669 } 670 EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_irq_num); 671 672 static int k3_ringacc_ring_cfg_sci(struct k3_ring *ring) 673 { 674 struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 }; 675 struct k3_ringacc *ringacc = ring->parent; 676 int ret; 677 678 if (!ringacc->tisci) 679 return -EINVAL; 680 681 ring_cfg.nav_id = ringacc->tisci_dev_id; 682 ring_cfg.index = ring->ring_id; 683 ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER; 684 ring_cfg.addr_lo = lower_32_bits(ring->ring_mem_dma); 685 ring_cfg.addr_hi = upper_32_bits(ring->ring_mem_dma); 686 ring_cfg.count = ring->size; 687 ring_cfg.mode = ring->mode; 688 ring_cfg.size = ring->elm_size; 689 ring_cfg.asel = ring->asel; 690 691 ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg); 692 if (ret) 693 dev_err(ringacc->dev, "TISCI config ring fail (%d) ring_idx %d\n", 694 ret, ring->ring_id); 695 696 return ret; 697 } 698 699 static int k3_dmaring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg) 700 { 701 struct k3_ringacc *ringacc; 702 struct k3_ring *reverse_ring; 703 int ret = 0; 704 705 if (cfg->elm_size != K3_RINGACC_RING_ELSIZE_8 || 706 cfg->mode != K3_RINGACC_RING_MODE_RING || 707 cfg->size & ~K3_DMARING_CFG_RING_SIZE_ELCNT_MASK) 708 return -EINVAL; 709 710 ringacc = ring->parent; 711 712 /* 713 * DMA rings: rings shared memory and configuration, only forward ring 714 * is configured and reverse ring considered as slave. 715 */ 716 if (ringacc->dma_rings && (ring->flags & K3_RING_FLAG_REVERSE)) 717 return 0; 718 719 if (!test_bit(ring->ring_id, ringacc->rings_inuse)) 720 return -EINVAL; 721 722 ring->size = cfg->size; 723 ring->elm_size = cfg->elm_size; 724 ring->mode = cfg->mode; 725 ring->asel = cfg->asel; 726 ring->dma_dev = cfg->dma_dev; 727 if (!ring->dma_dev) { 728 dev_warn(ringacc->dev, "dma_dev is not provided for ring%d\n", 729 ring->ring_id); 730 ring->dma_dev = ringacc->dev; 731 } 732 733 memset(&ring->state, 0, sizeof(ring->state)); 734 735 ring->ops = &k3_dmaring_fwd_ops; 736 737 ring->ring_mem_virt = dma_alloc_coherent(ring->dma_dev, 738 ring->size * (4 << ring->elm_size), 739 &ring->ring_mem_dma, GFP_KERNEL); 740 if (!ring->ring_mem_virt) { 741 dev_err(ringacc->dev, "Failed to alloc ring mem\n"); 742 ret = -ENOMEM; 743 goto err_free_ops; 744 } 745 746 ret = k3_ringacc_ring_cfg_sci(ring); 747 if (ret) 748 goto err_free_mem; 749 750 ring->flags |= K3_RING_FLAG_BUSY; 751 752 k3_ringacc_ring_dump(ring); 753 754 /* DMA rings: configure reverse ring */ 755 reverse_ring = &ringacc->rings[ring->ring_id + ringacc->num_rings]; 756 reverse_ring->size = cfg->size; 757 reverse_ring->elm_size = cfg->elm_size; 758 reverse_ring->mode = cfg->mode; 759 reverse_ring->asel = cfg->asel; 760 memset(&reverse_ring->state, 0, sizeof(reverse_ring->state)); 761 reverse_ring->ops = &k3_dmaring_reverse_ops; 762 763 reverse_ring->ring_mem_virt = ring->ring_mem_virt; 764 reverse_ring->ring_mem_dma = ring->ring_mem_dma; 765 reverse_ring->flags |= K3_RING_FLAG_BUSY; 766 k3_ringacc_ring_dump(reverse_ring); 767 768 return 0; 769 770 err_free_mem: 771 dma_free_coherent(ring->dma_dev, 772 ring->size * (4 << ring->elm_size), 773 ring->ring_mem_virt, 774 ring->ring_mem_dma); 775 err_free_ops: 776 ring->ops = NULL; 777 ring->proxy = NULL; 778 ring->dma_dev = NULL; 779 ring->asel = 0; 780 return ret; 781 } 782 783 int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg) 784 { 785 struct k3_ringacc *ringacc; 786 int ret = 0; 787 788 if (!ring || !cfg) 789 return -EINVAL; 790 791 ringacc = ring->parent; 792 793 if (ringacc->dma_rings) 794 return k3_dmaring_cfg(ring, cfg); 795 796 if (cfg->elm_size > K3_RINGACC_RING_ELSIZE_256 || 797 cfg->mode >= K3_RINGACC_RING_MODE_INVALID || 798 cfg->size & ~K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK || 799 !test_bit(ring->ring_id, ringacc->rings_inuse)) 800 return -EINVAL; 801 802 if (cfg->mode == K3_RINGACC_RING_MODE_MESSAGE && 803 ring->proxy_id == K3_RINGACC_PROXY_NOT_USED && 804 cfg->elm_size > K3_RINGACC_RING_ELSIZE_8) { 805 dev_err(ringacc->dev, 806 "Message mode must use proxy for %u element size\n", 807 4 << ring->elm_size); 808 return -EINVAL; 809 } 810 811 /* 812 * In case of shared ring only the first user (master user) can 813 * configure the ring. The sequence should be by the client: 814 * ring = k3_ringacc_request_ring(ringacc, ring_id, 0); # master user 815 * k3_ringacc_ring_cfg(ring, cfg); # master configuration 816 * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED); 817 * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED); 818 */ 819 if (ring->use_count != 1) 820 return 0; 821 822 ring->size = cfg->size; 823 ring->elm_size = cfg->elm_size; 824 ring->mode = cfg->mode; 825 memset(&ring->state, 0, sizeof(ring->state)); 826 827 if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) 828 ring->proxy = ringacc->proxy_target_base + 829 ring->proxy_id * K3_RINGACC_PROXY_TARGET_STEP; 830 831 switch (ring->mode) { 832 case K3_RINGACC_RING_MODE_RING: 833 ring->ops = &k3_ring_mode_ring_ops; 834 ring->dma_dev = cfg->dma_dev; 835 if (!ring->dma_dev) 836 ring->dma_dev = ringacc->dev; 837 break; 838 case K3_RINGACC_RING_MODE_MESSAGE: 839 ring->dma_dev = ringacc->dev; 840 if (ring->proxy) 841 ring->ops = &k3_ring_mode_proxy_ops; 842 else 843 ring->ops = &k3_ring_mode_msg_ops; 844 break; 845 default: 846 ring->ops = NULL; 847 ret = -EINVAL; 848 goto err_free_proxy; 849 } 850 851 ring->ring_mem_virt = dma_alloc_coherent(ring->dma_dev, 852 ring->size * (4 << ring->elm_size), 853 &ring->ring_mem_dma, GFP_KERNEL); 854 if (!ring->ring_mem_virt) { 855 dev_err(ringacc->dev, "Failed to alloc ring mem\n"); 856 ret = -ENOMEM; 857 goto err_free_ops; 858 } 859 860 ret = k3_ringacc_ring_cfg_sci(ring); 861 862 if (ret) 863 goto err_free_mem; 864 865 ring->flags |= K3_RING_FLAG_BUSY; 866 ring->flags |= (cfg->flags & K3_RINGACC_RING_SHARED) ? 867 K3_RING_FLAG_SHARED : 0; 868 869 k3_ringacc_ring_dump(ring); 870 871 return 0; 872 873 err_free_mem: 874 dma_free_coherent(ring->dma_dev, 875 ring->size * (4 << ring->elm_size), 876 ring->ring_mem_virt, 877 ring->ring_mem_dma); 878 err_free_ops: 879 ring->ops = NULL; 880 ring->dma_dev = NULL; 881 err_free_proxy: 882 ring->proxy = NULL; 883 return ret; 884 } 885 EXPORT_SYMBOL_GPL(k3_ringacc_ring_cfg); 886 887 u32 k3_ringacc_ring_get_size(struct k3_ring *ring) 888 { 889 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) 890 return -EINVAL; 891 892 return ring->size; 893 } 894 EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_size); 895 896 u32 k3_ringacc_ring_get_free(struct k3_ring *ring) 897 { 898 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) 899 return -EINVAL; 900 901 if (!ring->state.free) 902 ring->state.free = ring->size - k3_ringacc_ring_read_occ(ring); 903 904 return ring->state.free; 905 } 906 EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_free); 907 908 u32 k3_ringacc_ring_get_occ(struct k3_ring *ring) 909 { 910 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) 911 return -EINVAL; 912 913 return k3_ringacc_ring_read_occ(ring); 914 } 915 EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_occ); 916 917 u32 k3_ringacc_ring_is_full(struct k3_ring *ring) 918 { 919 return !k3_ringacc_ring_get_free(ring); 920 } 921 EXPORT_SYMBOL_GPL(k3_ringacc_ring_is_full); 922 923 enum k3_ringacc_access_mode { 924 K3_RINGACC_ACCESS_MODE_PUSH_HEAD, 925 K3_RINGACC_ACCESS_MODE_POP_HEAD, 926 K3_RINGACC_ACCESS_MODE_PUSH_TAIL, 927 K3_RINGACC_ACCESS_MODE_POP_TAIL, 928 K3_RINGACC_ACCESS_MODE_PEEK_HEAD, 929 K3_RINGACC_ACCESS_MODE_PEEK_TAIL, 930 }; 931 932 #define K3_RINGACC_PROXY_MODE(x) (((x) & 0x3) << 16) 933 #define K3_RINGACC_PROXY_ELSIZE(x) (((x) & 0x7) << 24) 934 static int k3_ringacc_ring_cfg_proxy(struct k3_ring *ring, 935 enum k3_ringacc_proxy_access_mode mode) 936 { 937 u32 val; 938 939 val = ring->ring_id; 940 val |= K3_RINGACC_PROXY_MODE(mode); 941 val |= K3_RINGACC_PROXY_ELSIZE(ring->elm_size); 942 writel(val, &ring->proxy->control); 943 return 0; 944 } 945 946 static int k3_ringacc_ring_access_proxy(struct k3_ring *ring, void *elem, 947 enum k3_ringacc_access_mode access_mode) 948 { 949 void __iomem *ptr; 950 951 ptr = (void __iomem *)&ring->proxy->data; 952 953 switch (access_mode) { 954 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD: 955 case K3_RINGACC_ACCESS_MODE_POP_HEAD: 956 k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_HEAD); 957 break; 958 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL: 959 case K3_RINGACC_ACCESS_MODE_POP_TAIL: 960 k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_TAIL); 961 break; 962 default: 963 return -EINVAL; 964 } 965 966 ptr += k3_ringacc_ring_get_fifo_pos(ring); 967 968 switch (access_mode) { 969 case K3_RINGACC_ACCESS_MODE_POP_HEAD: 970 case K3_RINGACC_ACCESS_MODE_POP_TAIL: 971 dev_dbg(ring->parent->dev, 972 "proxy:memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr, 973 access_mode); 974 memcpy_fromio(elem, ptr, (4 << ring->elm_size)); 975 ring->state.occ--; 976 break; 977 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL: 978 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD: 979 dev_dbg(ring->parent->dev, 980 "proxy:memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr, 981 access_mode); 982 memcpy_toio(ptr, elem, (4 << ring->elm_size)); 983 ring->state.free--; 984 break; 985 default: 986 return -EINVAL; 987 } 988 989 dev_dbg(ring->parent->dev, "proxy: free%d occ%d\n", ring->state.free, 990 ring->state.occ); 991 return 0; 992 } 993 994 static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem) 995 { 996 return k3_ringacc_ring_access_proxy(ring, elem, 997 K3_RINGACC_ACCESS_MODE_PUSH_HEAD); 998 } 999 1000 static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem) 1001 { 1002 return k3_ringacc_ring_access_proxy(ring, elem, 1003 K3_RINGACC_ACCESS_MODE_PUSH_TAIL); 1004 } 1005 1006 static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem) 1007 { 1008 return k3_ringacc_ring_access_proxy(ring, elem, 1009 K3_RINGACC_ACCESS_MODE_POP_HEAD); 1010 } 1011 1012 static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem) 1013 { 1014 return k3_ringacc_ring_access_proxy(ring, elem, 1015 K3_RINGACC_ACCESS_MODE_POP_HEAD); 1016 } 1017 1018 static int k3_ringacc_ring_access_io(struct k3_ring *ring, void *elem, 1019 enum k3_ringacc_access_mode access_mode) 1020 { 1021 void __iomem *ptr; 1022 1023 switch (access_mode) { 1024 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD: 1025 case K3_RINGACC_ACCESS_MODE_POP_HEAD: 1026 ptr = (void __iomem *)&ring->fifos->head_data; 1027 break; 1028 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL: 1029 case K3_RINGACC_ACCESS_MODE_POP_TAIL: 1030 ptr = (void __iomem *)&ring->fifos->tail_data; 1031 break; 1032 default: 1033 return -EINVAL; 1034 } 1035 1036 ptr += k3_ringacc_ring_get_fifo_pos(ring); 1037 1038 switch (access_mode) { 1039 case K3_RINGACC_ACCESS_MODE_POP_HEAD: 1040 case K3_RINGACC_ACCESS_MODE_POP_TAIL: 1041 dev_dbg(ring->parent->dev, 1042 "memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr, 1043 access_mode); 1044 memcpy_fromio(elem, ptr, (4 << ring->elm_size)); 1045 ring->state.occ--; 1046 break; 1047 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL: 1048 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD: 1049 dev_dbg(ring->parent->dev, 1050 "memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr, 1051 access_mode); 1052 memcpy_toio(ptr, elem, (4 << ring->elm_size)); 1053 ring->state.free--; 1054 break; 1055 default: 1056 return -EINVAL; 1057 } 1058 1059 dev_dbg(ring->parent->dev, "free%d index%d occ%d index%d\n", 1060 ring->state.free, ring->state.windex, ring->state.occ, 1061 ring->state.rindex); 1062 return 0; 1063 } 1064 1065 static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem) 1066 { 1067 return k3_ringacc_ring_access_io(ring, elem, 1068 K3_RINGACC_ACCESS_MODE_PUSH_HEAD); 1069 } 1070 1071 static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem) 1072 { 1073 return k3_ringacc_ring_access_io(ring, elem, 1074 K3_RINGACC_ACCESS_MODE_PUSH_TAIL); 1075 } 1076 1077 static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem) 1078 { 1079 return k3_ringacc_ring_access_io(ring, elem, 1080 K3_RINGACC_ACCESS_MODE_POP_HEAD); 1081 } 1082 1083 static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem) 1084 { 1085 return k3_ringacc_ring_access_io(ring, elem, 1086 K3_RINGACC_ACCESS_MODE_POP_HEAD); 1087 } 1088 1089 /* 1090 * The element is 48 bits of address + ASEL bits in the ring. 1091 * ASEL is used by the DMAs and should be removed for the kernel as it is not 1092 * part of the physical memory address. 1093 */ 1094 static void k3_dmaring_remove_asel_from_elem(u64 *elem) 1095 { 1096 *elem &= GENMASK_ULL(K3_ADDRESS_ASEL_SHIFT - 1, 0); 1097 } 1098 1099 static int k3_dmaring_fwd_pop(struct k3_ring *ring, void *elem) 1100 { 1101 void *elem_ptr; 1102 u32 elem_idx; 1103 1104 /* 1105 * DMA rings: forward ring is always tied DMA channel and HW does not 1106 * maintain any state data required for POP operation and its unknown 1107 * how much elements were consumed by HW. So, to actually 1108 * do POP, the read pointer has to be recalculated every time. 1109 */ 1110 ring->state.occ = k3_ringacc_ring_read_occ(ring); 1111 if (ring->state.windex >= ring->state.occ) 1112 elem_idx = ring->state.windex - ring->state.occ; 1113 else 1114 elem_idx = ring->size - (ring->state.occ - ring->state.windex); 1115 1116 elem_ptr = k3_ringacc_get_elm_addr(ring, elem_idx); 1117 memcpy(elem, elem_ptr, (4 << ring->elm_size)); 1118 k3_dmaring_remove_asel_from_elem(elem); 1119 1120 ring->state.occ--; 1121 writel(-1, &ring->rt->db); 1122 1123 dev_dbg(ring->parent->dev, "%s: occ%d Windex%d Rindex%d pos_ptr%px\n", 1124 __func__, ring->state.occ, ring->state.windex, elem_idx, 1125 elem_ptr); 1126 return 0; 1127 } 1128 1129 static int k3_dmaring_reverse_pop(struct k3_ring *ring, void *elem) 1130 { 1131 void *elem_ptr; 1132 1133 elem_ptr = k3_ringacc_get_elm_addr(ring, ring->state.rindex); 1134 1135 if (ring->state.occ) { 1136 memcpy(elem, elem_ptr, (4 << ring->elm_size)); 1137 k3_dmaring_remove_asel_from_elem(elem); 1138 1139 ring->state.rindex = (ring->state.rindex + 1) % ring->size; 1140 ring->state.occ--; 1141 writel(-1 & K3_DMARING_RT_DB_ENTRY_MASK, &ring->rt->db); 1142 } else if (ring->state.tdown_complete) { 1143 dma_addr_t *value = elem; 1144 1145 *value = CPPI5_TDCM_MARKER; 1146 writel(K3_DMARING_RT_DB_TDOWN_ACK, &ring->rt->db); 1147 ring->state.tdown_complete = false; 1148 } 1149 1150 dev_dbg(ring->parent->dev, "%s: occ%d index%d pos_ptr%px\n", 1151 __func__, ring->state.occ, ring->state.rindex, elem_ptr); 1152 return 0; 1153 } 1154 1155 static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem) 1156 { 1157 void *elem_ptr; 1158 1159 elem_ptr = k3_ringacc_get_elm_addr(ring, ring->state.windex); 1160 1161 memcpy(elem_ptr, elem, (4 << ring->elm_size)); 1162 if (ring->parent->dma_rings) { 1163 u64 *addr = elem_ptr; 1164 1165 *addr |= ((u64)ring->asel << K3_ADDRESS_ASEL_SHIFT); 1166 } 1167 1168 ring->state.windex = (ring->state.windex + 1) % ring->size; 1169 ring->state.free--; 1170 writel(1, &ring->rt->db); 1171 1172 dev_dbg(ring->parent->dev, "ring_push_mem: free%d index%d\n", 1173 ring->state.free, ring->state.windex); 1174 1175 return 0; 1176 } 1177 1178 static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem) 1179 { 1180 void *elem_ptr; 1181 1182 elem_ptr = k3_ringacc_get_elm_addr(ring, ring->state.rindex); 1183 1184 memcpy(elem, elem_ptr, (4 << ring->elm_size)); 1185 1186 ring->state.rindex = (ring->state.rindex + 1) % ring->size; 1187 ring->state.occ--; 1188 writel(-1, &ring->rt->db); 1189 1190 dev_dbg(ring->parent->dev, "ring_pop_mem: occ%d index%d pos_ptr%p\n", 1191 ring->state.occ, ring->state.rindex, elem_ptr); 1192 return 0; 1193 } 1194 1195 int k3_ringacc_ring_push(struct k3_ring *ring, void *elem) 1196 { 1197 int ret = -EOPNOTSUPP; 1198 1199 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) 1200 return -EINVAL; 1201 1202 dev_dbg(ring->parent->dev, "ring_push: free%d index%d\n", 1203 ring->state.free, ring->state.windex); 1204 1205 if (k3_ringacc_ring_is_full(ring)) 1206 return -ENOMEM; 1207 1208 if (ring->ops && ring->ops->push_tail) 1209 ret = ring->ops->push_tail(ring, elem); 1210 1211 return ret; 1212 } 1213 EXPORT_SYMBOL_GPL(k3_ringacc_ring_push); 1214 1215 int k3_ringacc_ring_push_head(struct k3_ring *ring, void *elem) 1216 { 1217 int ret = -EOPNOTSUPP; 1218 1219 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) 1220 return -EINVAL; 1221 1222 dev_dbg(ring->parent->dev, "ring_push_head: free%d index%d\n", 1223 ring->state.free, ring->state.windex); 1224 1225 if (k3_ringacc_ring_is_full(ring)) 1226 return -ENOMEM; 1227 1228 if (ring->ops && ring->ops->push_head) 1229 ret = ring->ops->push_head(ring, elem); 1230 1231 return ret; 1232 } 1233 EXPORT_SYMBOL_GPL(k3_ringacc_ring_push_head); 1234 1235 int k3_ringacc_ring_pop(struct k3_ring *ring, void *elem) 1236 { 1237 int ret = -EOPNOTSUPP; 1238 1239 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) 1240 return -EINVAL; 1241 1242 if (!ring->state.occ) 1243 k3_ringacc_ring_update_occ(ring); 1244 1245 dev_dbg(ring->parent->dev, "ring_pop: occ%d index%d\n", ring->state.occ, 1246 ring->state.rindex); 1247 1248 if (!ring->state.occ && !ring->state.tdown_complete) 1249 return -ENODATA; 1250 1251 if (ring->ops && ring->ops->pop_head) 1252 ret = ring->ops->pop_head(ring, elem); 1253 1254 return ret; 1255 } 1256 EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop); 1257 1258 int k3_ringacc_ring_pop_tail(struct k3_ring *ring, void *elem) 1259 { 1260 int ret = -EOPNOTSUPP; 1261 1262 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) 1263 return -EINVAL; 1264 1265 if (!ring->state.occ) 1266 k3_ringacc_ring_update_occ(ring); 1267 1268 dev_dbg(ring->parent->dev, "ring_pop_tail: occ%d index%d\n", 1269 ring->state.occ, ring->state.rindex); 1270 1271 if (!ring->state.occ) 1272 return -ENODATA; 1273 1274 if (ring->ops && ring->ops->pop_tail) 1275 ret = ring->ops->pop_tail(ring, elem); 1276 1277 return ret; 1278 } 1279 EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop_tail); 1280 1281 struct k3_ringacc *of_k3_ringacc_get_by_phandle(struct device_node *np, 1282 const char *property) 1283 { 1284 struct device_node *ringacc_np; 1285 struct k3_ringacc *ringacc = ERR_PTR(-EPROBE_DEFER); 1286 struct k3_ringacc *entry; 1287 1288 ringacc_np = of_parse_phandle(np, property, 0); 1289 if (!ringacc_np) 1290 return ERR_PTR(-ENODEV); 1291 1292 mutex_lock(&k3_ringacc_list_lock); 1293 list_for_each_entry(entry, &k3_ringacc_list, list) 1294 if (entry->dev->of_node == ringacc_np) { 1295 ringacc = entry; 1296 break; 1297 } 1298 mutex_unlock(&k3_ringacc_list_lock); 1299 of_node_put(ringacc_np); 1300 1301 return ringacc; 1302 } 1303 EXPORT_SYMBOL_GPL(of_k3_ringacc_get_by_phandle); 1304 1305 static int k3_ringacc_probe_dt(struct k3_ringacc *ringacc) 1306 { 1307 struct device_node *node = ringacc->dev->of_node; 1308 struct device *dev = ringacc->dev; 1309 struct platform_device *pdev = to_platform_device(dev); 1310 int ret; 1311 1312 if (!node) { 1313 dev_err(dev, "device tree info unavailable\n"); 1314 return -ENODEV; 1315 } 1316 1317 ret = of_property_read_u32(node, "ti,num-rings", &ringacc->num_rings); 1318 if (ret) { 1319 dev_err(dev, "ti,num-rings read failure %d\n", ret); 1320 return ret; 1321 } 1322 1323 ringacc->tisci = ti_sci_get_by_phandle(node, "ti,sci"); 1324 if (IS_ERR(ringacc->tisci)) { 1325 ret = PTR_ERR(ringacc->tisci); 1326 if (ret != -EPROBE_DEFER) 1327 dev_err(dev, "ti,sci read fail %d\n", ret); 1328 ringacc->tisci = NULL; 1329 return ret; 1330 } 1331 1332 ret = of_property_read_u32(node, "ti,sci-dev-id", 1333 &ringacc->tisci_dev_id); 1334 if (ret) { 1335 dev_err(dev, "ti,sci-dev-id read fail %d\n", ret); 1336 return ret; 1337 } 1338 1339 pdev->id = ringacc->tisci_dev_id; 1340 1341 ringacc->rm_gp_range = devm_ti_sci_get_of_resource(ringacc->tisci, dev, 1342 ringacc->tisci_dev_id, 1343 "ti,sci-rm-range-gp-rings"); 1344 if (IS_ERR(ringacc->rm_gp_range)) { 1345 dev_err(dev, "Failed to allocate MSI interrupts\n"); 1346 return PTR_ERR(ringacc->rm_gp_range); 1347 } 1348 1349 return ti_sci_inta_msi_domain_alloc_irqs(ringacc->dev, 1350 ringacc->rm_gp_range); 1351 } 1352 1353 static const struct k3_ringacc_soc_data k3_ringacc_soc_data_sr1 = { 1354 .dma_ring_reset_quirk = 1, 1355 }; 1356 1357 static const struct soc_device_attribute k3_ringacc_socinfo[] = { 1358 { .family = "AM65X", 1359 .revision = "SR1.0", 1360 .data = &k3_ringacc_soc_data_sr1 1361 }, 1362 {/* sentinel */} 1363 }; 1364 1365 static int k3_ringacc_init(struct platform_device *pdev, 1366 struct k3_ringacc *ringacc) 1367 { 1368 const struct soc_device_attribute *soc; 1369 void __iomem *base_fifo, *base_rt; 1370 struct device *dev = &pdev->dev; 1371 struct resource *res; 1372 int ret, i; 1373 1374 dev->msi.domain = of_msi_get_domain(dev, dev->of_node, 1375 DOMAIN_BUS_TI_SCI_INTA_MSI); 1376 if (!dev->msi.domain) { 1377 dev_err(dev, "Failed to get MSI domain\n"); 1378 return -EPROBE_DEFER; 1379 } 1380 1381 ret = k3_ringacc_probe_dt(ringacc); 1382 if (ret) 1383 return ret; 1384 1385 soc = soc_device_match(k3_ringacc_socinfo); 1386 if (soc && soc->data) { 1387 const struct k3_ringacc_soc_data *soc_data = soc->data; 1388 1389 ringacc->dma_ring_reset_quirk = soc_data->dma_ring_reset_quirk; 1390 } 1391 1392 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rt"); 1393 base_rt = devm_ioremap_resource(dev, res); 1394 if (IS_ERR(base_rt)) 1395 return PTR_ERR(base_rt); 1396 1397 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fifos"); 1398 base_fifo = devm_ioremap_resource(dev, res); 1399 if (IS_ERR(base_fifo)) 1400 return PTR_ERR(base_fifo); 1401 1402 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "proxy_gcfg"); 1403 ringacc->proxy_gcfg = devm_ioremap_resource(dev, res); 1404 if (IS_ERR(ringacc->proxy_gcfg)) 1405 return PTR_ERR(ringacc->proxy_gcfg); 1406 1407 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 1408 "proxy_target"); 1409 ringacc->proxy_target_base = devm_ioremap_resource(dev, res); 1410 if (IS_ERR(ringacc->proxy_target_base)) 1411 return PTR_ERR(ringacc->proxy_target_base); 1412 1413 ringacc->num_proxies = readl(&ringacc->proxy_gcfg->config) & 1414 K3_RINGACC_PROXY_CFG_THREADS_MASK; 1415 1416 ringacc->rings = devm_kzalloc(dev, 1417 sizeof(*ringacc->rings) * 1418 ringacc->num_rings, 1419 GFP_KERNEL); 1420 ringacc->rings_inuse = devm_bitmap_zalloc(dev, ringacc->num_rings, 1421 GFP_KERNEL); 1422 ringacc->proxy_inuse = devm_bitmap_zalloc(dev, ringacc->num_proxies, 1423 GFP_KERNEL); 1424 1425 if (!ringacc->rings || !ringacc->rings_inuse || !ringacc->proxy_inuse) 1426 return -ENOMEM; 1427 1428 for (i = 0; i < ringacc->num_rings; i++) { 1429 ringacc->rings[i].rt = base_rt + 1430 K3_RINGACC_RT_REGS_STEP * i; 1431 ringacc->rings[i].fifos = base_fifo + 1432 K3_RINGACC_FIFO_REGS_STEP * i; 1433 ringacc->rings[i].parent = ringacc; 1434 ringacc->rings[i].ring_id = i; 1435 ringacc->rings[i].proxy_id = K3_RINGACC_PROXY_NOT_USED; 1436 } 1437 1438 ringacc->tisci_ring_ops = &ringacc->tisci->ops.rm_ring_ops; 1439 1440 dev_info(dev, "Ring Accelerator probed rings:%u, gp-rings[%u,%u] sci-dev-id:%u\n", 1441 ringacc->num_rings, 1442 ringacc->rm_gp_range->desc[0].start, 1443 ringacc->rm_gp_range->desc[0].num, 1444 ringacc->tisci_dev_id); 1445 dev_info(dev, "dma-ring-reset-quirk: %s\n", 1446 ringacc->dma_ring_reset_quirk ? "enabled" : "disabled"); 1447 dev_info(dev, "RA Proxy rev. %08x, num_proxies:%u\n", 1448 readl(&ringacc->proxy_gcfg->revision), ringacc->num_proxies); 1449 1450 return 0; 1451 } 1452 1453 struct ringacc_match_data { 1454 struct k3_ringacc_ops ops; 1455 }; 1456 1457 static struct ringacc_match_data k3_ringacc_data = { 1458 .ops = { 1459 .init = k3_ringacc_init, 1460 }, 1461 }; 1462 1463 /* Match table for of_platform binding */ 1464 static const struct of_device_id k3_ringacc_of_match[] = { 1465 { .compatible = "ti,am654-navss-ringacc", .data = &k3_ringacc_data, }, 1466 {}, 1467 }; 1468 MODULE_DEVICE_TABLE(of, k3_ringacc_of_match); 1469 1470 struct k3_ringacc *k3_ringacc_dmarings_init(struct platform_device *pdev, 1471 struct k3_ringacc_init_data *data) 1472 { 1473 struct device *dev = &pdev->dev; 1474 struct k3_ringacc *ringacc; 1475 void __iomem *base_rt; 1476 struct resource *res; 1477 int i; 1478 1479 ringacc = devm_kzalloc(dev, sizeof(*ringacc), GFP_KERNEL); 1480 if (!ringacc) 1481 return ERR_PTR(-ENOMEM); 1482 1483 ringacc->dev = dev; 1484 ringacc->dma_rings = true; 1485 ringacc->num_rings = data->num_rings; 1486 ringacc->tisci = data->tisci; 1487 ringacc->tisci_dev_id = data->tisci_dev_id; 1488 1489 mutex_init(&ringacc->req_lock); 1490 1491 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ringrt"); 1492 base_rt = devm_ioremap_resource(dev, res); 1493 if (IS_ERR(base_rt)) 1494 return ERR_CAST(base_rt); 1495 1496 ringacc->rings = devm_kzalloc(dev, 1497 sizeof(*ringacc->rings) * 1498 ringacc->num_rings * 2, 1499 GFP_KERNEL); 1500 ringacc->rings_inuse = devm_bitmap_zalloc(dev, ringacc->num_rings, 1501 GFP_KERNEL); 1502 1503 if (!ringacc->rings || !ringacc->rings_inuse) 1504 return ERR_PTR(-ENOMEM); 1505 1506 for (i = 0; i < ringacc->num_rings; i++) { 1507 struct k3_ring *ring = &ringacc->rings[i]; 1508 1509 ring->rt = base_rt + K3_DMARING_RT_REGS_STEP * i; 1510 ring->parent = ringacc; 1511 ring->ring_id = i; 1512 ring->proxy_id = K3_RINGACC_PROXY_NOT_USED; 1513 1514 ring = &ringacc->rings[ringacc->num_rings + i]; 1515 ring->rt = base_rt + K3_DMARING_RT_REGS_STEP * i + 1516 K3_DMARING_RT_REGS_REVERSE_OFS; 1517 ring->parent = ringacc; 1518 ring->ring_id = i; 1519 ring->proxy_id = K3_RINGACC_PROXY_NOT_USED; 1520 ring->flags = K3_RING_FLAG_REVERSE; 1521 } 1522 1523 ringacc->tisci_ring_ops = &ringacc->tisci->ops.rm_ring_ops; 1524 1525 dev_info(dev, "Number of rings: %u\n", ringacc->num_rings); 1526 1527 return ringacc; 1528 } 1529 EXPORT_SYMBOL_GPL(k3_ringacc_dmarings_init); 1530 1531 static int k3_ringacc_probe(struct platform_device *pdev) 1532 { 1533 const struct ringacc_match_data *match_data; 1534 struct device *dev = &pdev->dev; 1535 struct k3_ringacc *ringacc; 1536 int ret; 1537 1538 match_data = of_device_get_match_data(&pdev->dev); 1539 if (!match_data) 1540 return -ENODEV; 1541 1542 ringacc = devm_kzalloc(dev, sizeof(*ringacc), GFP_KERNEL); 1543 if (!ringacc) 1544 return -ENOMEM; 1545 1546 ringacc->dev = dev; 1547 mutex_init(&ringacc->req_lock); 1548 ringacc->ops = &match_data->ops; 1549 1550 ret = ringacc->ops->init(pdev, ringacc); 1551 if (ret) 1552 return ret; 1553 1554 dev_set_drvdata(dev, ringacc); 1555 1556 mutex_lock(&k3_ringacc_list_lock); 1557 list_add_tail(&ringacc->list, &k3_ringacc_list); 1558 mutex_unlock(&k3_ringacc_list_lock); 1559 1560 return 0; 1561 } 1562 1563 static int k3_ringacc_remove(struct platform_device *pdev) 1564 { 1565 struct k3_ringacc *ringacc = dev_get_drvdata(&pdev->dev); 1566 1567 mutex_lock(&k3_ringacc_list_lock); 1568 list_del(&ringacc->list); 1569 mutex_unlock(&k3_ringacc_list_lock); 1570 return 0; 1571 } 1572 1573 static struct platform_driver k3_ringacc_driver = { 1574 .probe = k3_ringacc_probe, 1575 .remove = k3_ringacc_remove, 1576 .driver = { 1577 .name = "k3-ringacc", 1578 .of_match_table = k3_ringacc_of_match, 1579 .suppress_bind_attrs = true, 1580 }, 1581 }; 1582 module_platform_driver(k3_ringacc_driver); 1583 1584 MODULE_LICENSE("GPL"); 1585 MODULE_DESCRIPTION("TI Ringacc driver for K3 SOCs"); 1586 MODULE_AUTHOR("Grygorii Strashko <grygorii.strashko@ti.com>"); 1587