1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 */ 5 6 #include "dp_rx.h" 7 #include "debug.h" 8 #include "hif.h" 9 10 const struct ce_attr ath11k_host_ce_config_ipq8074[] = { 11 /* CE0: host->target HTC control and raw streams */ 12 { 13 .flags = CE_ATTR_FLAGS, 14 .src_nentries = 16, 15 .src_sz_max = 2048, 16 .dest_nentries = 0, 17 }, 18 19 /* CE1: target->host HTT + HTC control */ 20 { 21 .flags = CE_ATTR_FLAGS, 22 .src_nentries = 0, 23 .src_sz_max = 2048, 24 .dest_nentries = 512, 25 .recv_cb = ath11k_htc_rx_completion_handler, 26 }, 27 28 /* CE2: target->host WMI */ 29 { 30 .flags = CE_ATTR_FLAGS, 31 .src_nentries = 0, 32 .src_sz_max = 2048, 33 .dest_nentries = 512, 34 .recv_cb = ath11k_htc_rx_completion_handler, 35 }, 36 37 /* CE3: host->target WMI (mac0) */ 38 { 39 .flags = CE_ATTR_FLAGS, 40 .src_nentries = 32, 41 .src_sz_max = 2048, 42 .dest_nentries = 0, 43 }, 44 45 /* CE4: host->target HTT */ 46 { 47 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 48 .src_nentries = 2048, 49 .src_sz_max = 256, 50 .dest_nentries = 0, 51 }, 52 53 /* CE5: target->host pktlog */ 54 { 55 .flags = CE_ATTR_FLAGS, 56 .src_nentries = 0, 57 .src_sz_max = 2048, 58 .dest_nentries = 512, 59 .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler, 60 }, 61 62 /* CE6: target autonomous hif_memcpy */ 63 { 64 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 65 .src_nentries = 0, 66 .src_sz_max = 0, 67 .dest_nentries = 0, 68 }, 69 70 /* CE7: host->target WMI (mac1) */ 71 { 72 .flags = CE_ATTR_FLAGS, 73 .src_nentries = 32, 74 .src_sz_max = 2048, 75 .dest_nentries = 0, 76 }, 77 78 /* CE8: target autonomous hif_memcpy */ 79 { 80 .flags = CE_ATTR_FLAGS, 81 .src_nentries = 0, 82 .src_sz_max = 0, 83 .dest_nentries = 0, 84 }, 85 86 /* CE9: host->target WMI (mac2) */ 87 { 88 .flags = CE_ATTR_FLAGS, 89 .src_nentries = 32, 90 .src_sz_max = 2048, 91 .dest_nentries = 0, 92 }, 93 94 /* CE10: target->host HTT */ 95 { 96 .flags = CE_ATTR_FLAGS, 97 .src_nentries = 0, 98 .src_sz_max = 2048, 99 .dest_nentries = 512, 100 .recv_cb = ath11k_htc_rx_completion_handler, 101 }, 102 103 /* CE11: Not used */ 104 { 105 .flags = CE_ATTR_FLAGS, 106 .src_nentries = 0, 107 .src_sz_max = 0, 108 .dest_nentries = 0, 109 }, 110 }; 111 112 const struct ce_attr ath11k_host_ce_config_qca6390[] = { 113 /* CE0: host->target HTC control and raw streams */ 114 { 115 .flags = CE_ATTR_FLAGS, 116 .src_nentries = 16, 117 .src_sz_max = 2048, 118 .dest_nentries = 0, 119 }, 120 121 /* CE1: target->host HTT + HTC control */ 122 { 123 .flags = CE_ATTR_FLAGS, 124 .src_nentries = 0, 125 .src_sz_max = 2048, 126 .dest_nentries = 512, 127 .recv_cb = ath11k_htc_rx_completion_handler, 128 }, 129 130 /* CE2: target->host WMI */ 131 { 132 .flags = CE_ATTR_FLAGS, 133 .src_nentries = 0, 134 .src_sz_max = 2048, 135 .dest_nentries = 512, 136 .recv_cb = ath11k_htc_rx_completion_handler, 137 }, 138 139 /* CE3: host->target WMI (mac0) */ 140 { 141 .flags = CE_ATTR_FLAGS, 142 .src_nentries = 32, 143 .src_sz_max = 2048, 144 .dest_nentries = 0, 145 }, 146 147 /* CE4: host->target HTT */ 148 { 149 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 150 .src_nentries = 2048, 151 .src_sz_max = 256, 152 .dest_nentries = 0, 153 }, 154 155 /* CE5: target->host pktlog */ 156 { 157 .flags = CE_ATTR_FLAGS, 158 .src_nentries = 0, 159 .src_sz_max = 2048, 160 .dest_nentries = 512, 161 .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler, 162 }, 163 164 /* CE6: target autonomous hif_memcpy */ 165 { 166 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 167 .src_nentries = 0, 168 .src_sz_max = 0, 169 .dest_nentries = 0, 170 }, 171 172 /* CE7: host->target WMI (mac1) */ 173 { 174 .flags = CE_ATTR_FLAGS, 175 .src_nentries = 32, 176 .src_sz_max = 2048, 177 .dest_nentries = 0, 178 }, 179 180 /* CE8: target autonomous hif_memcpy */ 181 { 182 .flags = CE_ATTR_FLAGS, 183 .src_nentries = 0, 184 .src_sz_max = 0, 185 .dest_nentries = 0, 186 }, 187 188 }; 189 190 static int ath11k_ce_rx_buf_enqueue_pipe(struct ath11k_ce_pipe *pipe, 191 struct sk_buff *skb, dma_addr_t paddr) 192 { 193 struct ath11k_base *ab = pipe->ab; 194 struct ath11k_ce_ring *ring = pipe->dest_ring; 195 struct hal_srng *srng; 196 unsigned int write_index; 197 unsigned int nentries_mask = ring->nentries_mask; 198 u32 *desc; 199 int ret; 200 201 lockdep_assert_held(&ab->ce.ce_lock); 202 203 write_index = ring->write_index; 204 205 srng = &ab->hal.srng_list[ring->hal_ring_id]; 206 207 spin_lock_bh(&srng->lock); 208 209 ath11k_hal_srng_access_begin(ab, srng); 210 211 if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) { 212 ret = -ENOSPC; 213 goto exit; 214 } 215 216 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 217 if (!desc) { 218 ret = -ENOSPC; 219 goto exit; 220 } 221 222 ath11k_hal_ce_dst_set_desc(desc, paddr); 223 224 ring->skb[write_index] = skb; 225 write_index = CE_RING_IDX_INCR(nentries_mask, write_index); 226 ring->write_index = write_index; 227 228 pipe->rx_buf_needed--; 229 230 ret = 0; 231 exit: 232 ath11k_hal_srng_access_end(ab, srng); 233 234 spin_unlock_bh(&srng->lock); 235 236 return ret; 237 } 238 239 static int ath11k_ce_rx_post_pipe(struct ath11k_ce_pipe *pipe) 240 { 241 struct ath11k_base *ab = pipe->ab; 242 struct sk_buff *skb; 243 dma_addr_t paddr; 244 int ret = 0; 245 246 if (!(pipe->dest_ring || pipe->status_ring)) 247 return 0; 248 249 spin_lock_bh(&ab->ce.ce_lock); 250 while (pipe->rx_buf_needed) { 251 skb = dev_alloc_skb(pipe->buf_sz); 252 if (!skb) { 253 ret = -ENOMEM; 254 goto exit; 255 } 256 257 WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4)); 258 259 paddr = dma_map_single(ab->dev, skb->data, 260 skb->len + skb_tailroom(skb), 261 DMA_FROM_DEVICE); 262 if (unlikely(dma_mapping_error(ab->dev, paddr))) { 263 ath11k_warn(ab, "failed to dma map ce rx buf\n"); 264 dev_kfree_skb_any(skb); 265 ret = -EIO; 266 goto exit; 267 } 268 269 ATH11K_SKB_RXCB(skb)->paddr = paddr; 270 271 ret = ath11k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr); 272 273 if (ret) { 274 ath11k_warn(ab, "failed to enqueue rx buf: %d\n", ret); 275 dma_unmap_single(ab->dev, paddr, 276 skb->len + skb_tailroom(skb), 277 DMA_FROM_DEVICE); 278 dev_kfree_skb_any(skb); 279 goto exit; 280 } 281 } 282 283 exit: 284 spin_unlock_bh(&ab->ce.ce_lock); 285 return ret; 286 } 287 288 static int ath11k_ce_completed_recv_next(struct ath11k_ce_pipe *pipe, 289 struct sk_buff **skb, int *nbytes) 290 { 291 struct ath11k_base *ab = pipe->ab; 292 struct hal_srng *srng; 293 unsigned int sw_index; 294 unsigned int nentries_mask; 295 u32 *desc; 296 int ret = 0; 297 298 spin_lock_bh(&ab->ce.ce_lock); 299 300 sw_index = pipe->dest_ring->sw_index; 301 nentries_mask = pipe->dest_ring->nentries_mask; 302 303 srng = &ab->hal.srng_list[pipe->status_ring->hal_ring_id]; 304 305 spin_lock_bh(&srng->lock); 306 307 ath11k_hal_srng_access_begin(ab, srng); 308 309 desc = ath11k_hal_srng_dst_get_next_entry(ab, srng); 310 if (!desc) { 311 ret = -EIO; 312 goto err; 313 } 314 315 *nbytes = ath11k_hal_ce_dst_status_get_length(desc); 316 if (*nbytes == 0) { 317 ret = -EIO; 318 goto err; 319 } 320 321 *skb = pipe->dest_ring->skb[sw_index]; 322 pipe->dest_ring->skb[sw_index] = NULL; 323 324 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); 325 pipe->dest_ring->sw_index = sw_index; 326 327 pipe->rx_buf_needed++; 328 err: 329 ath11k_hal_srng_access_end(ab, srng); 330 331 spin_unlock_bh(&srng->lock); 332 333 spin_unlock_bh(&ab->ce.ce_lock); 334 335 return ret; 336 } 337 338 static void ath11k_ce_recv_process_cb(struct ath11k_ce_pipe *pipe) 339 { 340 struct ath11k_base *ab = pipe->ab; 341 struct sk_buff *skb; 342 struct sk_buff_head list; 343 unsigned int nbytes, max_nbytes; 344 int ret; 345 346 __skb_queue_head_init(&list); 347 while (ath11k_ce_completed_recv_next(pipe, &skb, &nbytes) == 0) { 348 max_nbytes = skb->len + skb_tailroom(skb); 349 dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr, 350 max_nbytes, DMA_FROM_DEVICE); 351 352 if (unlikely(max_nbytes < nbytes)) { 353 ath11k_warn(ab, "rxed more than expected (nbytes %d, max %d)", 354 nbytes, max_nbytes); 355 dev_kfree_skb_any(skb); 356 continue; 357 } 358 359 skb_put(skb, nbytes); 360 __skb_queue_tail(&list, skb); 361 } 362 363 while ((skb = __skb_dequeue(&list))) { 364 ath11k_dbg(ab, ATH11K_DBG_AHB, "rx ce pipe %d len %d\n", 365 pipe->pipe_num, skb->len); 366 pipe->recv_cb(ab, skb); 367 } 368 369 ret = ath11k_ce_rx_post_pipe(pipe); 370 if (ret && ret != -ENOSPC) { 371 ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n", 372 pipe->pipe_num, ret); 373 mod_timer(&ab->rx_replenish_retry, 374 jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES); 375 } 376 } 377 378 static struct sk_buff *ath11k_ce_completed_send_next(struct ath11k_ce_pipe *pipe) 379 { 380 struct ath11k_base *ab = pipe->ab; 381 struct hal_srng *srng; 382 unsigned int sw_index; 383 unsigned int nentries_mask; 384 struct sk_buff *skb; 385 u32 *desc; 386 387 spin_lock_bh(&ab->ce.ce_lock); 388 389 sw_index = pipe->src_ring->sw_index; 390 nentries_mask = pipe->src_ring->nentries_mask; 391 392 srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id]; 393 394 spin_lock_bh(&srng->lock); 395 396 ath11k_hal_srng_access_begin(ab, srng); 397 398 desc = ath11k_hal_srng_src_reap_next(ab, srng); 399 if (!desc) { 400 skb = ERR_PTR(-EIO); 401 goto err_unlock; 402 } 403 404 skb = pipe->src_ring->skb[sw_index]; 405 406 pipe->src_ring->skb[sw_index] = NULL; 407 408 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); 409 pipe->src_ring->sw_index = sw_index; 410 411 err_unlock: 412 spin_unlock_bh(&srng->lock); 413 414 spin_unlock_bh(&ab->ce.ce_lock); 415 416 return skb; 417 } 418 419 static void ath11k_ce_send_done_cb(struct ath11k_ce_pipe *pipe) 420 { 421 struct ath11k_base *ab = pipe->ab; 422 struct sk_buff *skb; 423 424 while (!IS_ERR(skb = ath11k_ce_completed_send_next(pipe))) { 425 if (!skb) 426 continue; 427 428 dma_unmap_single(ab->dev, ATH11K_SKB_CB(skb)->paddr, skb->len, 429 DMA_TO_DEVICE); 430 dev_kfree_skb_any(skb); 431 } 432 } 433 434 static void ath11k_ce_srng_msi_ring_params_setup(struct ath11k_base *ab, u32 ce_id, 435 struct hal_srng_params *ring_params) 436 { 437 u32 msi_data_start; 438 u32 msi_data_count; 439 u32 msi_irq_start; 440 u32 addr_lo; 441 u32 addr_hi; 442 int ret; 443 444 ret = ath11k_get_user_msi_vector(ab, "CE", 445 &msi_data_count, &msi_data_start, 446 &msi_irq_start); 447 448 if (ret) 449 return; 450 451 ath11k_get_msi_address(ab, &addr_lo, &addr_hi); 452 453 ring_params->msi_addr = addr_lo; 454 ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32); 455 ring_params->msi_data = (ce_id % msi_data_count) + msi_data_start; 456 ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR; 457 } 458 459 static int ath11k_ce_init_ring(struct ath11k_base *ab, 460 struct ath11k_ce_ring *ce_ring, 461 int ce_id, enum hal_ring_type type) 462 { 463 struct hal_srng_params params = { 0 }; 464 int ret; 465 466 params.ring_base_paddr = ce_ring->base_addr_ce_space; 467 params.ring_base_vaddr = ce_ring->base_addr_owner_space; 468 params.num_entries = ce_ring->nentries; 469 470 switch (type) { 471 case HAL_CE_SRC: 472 if (!(CE_ATTR_DIS_INTR & ab->hw_params.host_ce_config[ce_id].flags)) 473 params.intr_batch_cntr_thres_entries = 1; 474 break; 475 case HAL_CE_DST: 476 params.max_buffer_len = ab->hw_params.host_ce_config[ce_id].src_sz_max; 477 if (!(ab->hw_params.host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) { 478 params.intr_timer_thres_us = 1024; 479 params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN; 480 params.low_threshold = ce_ring->nentries - 3; 481 } 482 break; 483 case HAL_CE_DST_STATUS: 484 if (!(ab->hw_params.host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) { 485 params.intr_batch_cntr_thres_entries = 1; 486 params.intr_timer_thres_us = 0x1000; 487 } 488 break; 489 default: 490 ath11k_warn(ab, "Invalid CE ring type %d\n", type); 491 return -EINVAL; 492 } 493 494 /* TODO: Init other params needed by HAL to init the ring */ 495 496 ret = ath11k_hal_srng_setup(ab, type, ce_id, 0, ¶ms); 497 if (ret < 0) { 498 ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n", 499 ret, ce_id); 500 return ret; 501 } 502 503 if (!(CE_ATTR_DIS_INTR & ab->hw_params.host_ce_config[ce_id].flags)) 504 ath11k_ce_srng_msi_ring_params_setup(ab, ce_id, ¶ms); 505 506 ce_ring->hal_ring_id = ret; 507 508 return 0; 509 } 510 511 static struct ath11k_ce_ring * 512 ath11k_ce_alloc_ring(struct ath11k_base *ab, int nentries, int desc_sz) 513 { 514 struct ath11k_ce_ring *ce_ring; 515 dma_addr_t base_addr; 516 517 ce_ring = kzalloc(struct_size(ce_ring, skb, nentries), GFP_KERNEL); 518 if (ce_ring == NULL) 519 return ERR_PTR(-ENOMEM); 520 521 ce_ring->nentries = nentries; 522 ce_ring->nentries_mask = nentries - 1; 523 524 /* Legacy platforms that do not support cache 525 * coherent DMA are unsupported 526 */ 527 ce_ring->base_addr_owner_space_unaligned = 528 dma_alloc_coherent(ab->dev, 529 nentries * desc_sz + CE_DESC_RING_ALIGN, 530 &base_addr, GFP_KERNEL); 531 if (!ce_ring->base_addr_owner_space_unaligned) { 532 kfree(ce_ring); 533 return ERR_PTR(-ENOMEM); 534 } 535 536 ce_ring->base_addr_ce_space_unaligned = base_addr; 537 538 ce_ring->base_addr_owner_space = PTR_ALIGN( 539 ce_ring->base_addr_owner_space_unaligned, 540 CE_DESC_RING_ALIGN); 541 ce_ring->base_addr_ce_space = ALIGN( 542 ce_ring->base_addr_ce_space_unaligned, 543 CE_DESC_RING_ALIGN); 544 545 return ce_ring; 546 } 547 548 static int ath11k_ce_alloc_pipe(struct ath11k_base *ab, int ce_id) 549 { 550 struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id]; 551 const struct ce_attr *attr = &ab->hw_params.host_ce_config[ce_id]; 552 struct ath11k_ce_ring *ring; 553 int nentries; 554 int desc_sz; 555 556 pipe->attr_flags = attr->flags; 557 558 if (attr->src_nentries) { 559 pipe->send_cb = ath11k_ce_send_done_cb; 560 nentries = roundup_pow_of_two(attr->src_nentries); 561 desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC); 562 ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz); 563 if (IS_ERR(ring)) 564 return PTR_ERR(ring); 565 pipe->src_ring = ring; 566 } 567 568 if (attr->dest_nentries) { 569 pipe->recv_cb = attr->recv_cb; 570 nentries = roundup_pow_of_two(attr->dest_nentries); 571 desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST); 572 ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz); 573 if (IS_ERR(ring)) 574 return PTR_ERR(ring); 575 pipe->dest_ring = ring; 576 577 desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS); 578 ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz); 579 if (IS_ERR(ring)) 580 return PTR_ERR(ring); 581 pipe->status_ring = ring; 582 } 583 584 return 0; 585 } 586 587 void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id) 588 { 589 struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id]; 590 591 if (pipe->send_cb) 592 pipe->send_cb(pipe); 593 594 if (pipe->recv_cb) 595 ath11k_ce_recv_process_cb(pipe); 596 } 597 598 void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id) 599 { 600 struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id]; 601 602 if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && pipe->send_cb) 603 pipe->send_cb(pipe); 604 } 605 EXPORT_SYMBOL(ath11k_ce_per_engine_service); 606 607 int ath11k_ce_send(struct ath11k_base *ab, struct sk_buff *skb, u8 pipe_id, 608 u16 transfer_id) 609 { 610 struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id]; 611 struct hal_srng *srng; 612 u32 *desc; 613 unsigned int write_index, sw_index; 614 unsigned int nentries_mask; 615 int ret = 0; 616 u8 byte_swap_data = 0; 617 int num_used; 618 619 /* Check if some entries could be regained by handling tx completion if 620 * the CE has interrupts disabled and the used entries is more than the 621 * defined usage threshold. 622 */ 623 if (pipe->attr_flags & CE_ATTR_DIS_INTR) { 624 spin_lock_bh(&ab->ce.ce_lock); 625 write_index = pipe->src_ring->write_index; 626 627 sw_index = pipe->src_ring->sw_index; 628 629 if (write_index >= sw_index) 630 num_used = write_index - sw_index; 631 else 632 num_used = pipe->src_ring->nentries - sw_index + 633 write_index; 634 635 spin_unlock_bh(&ab->ce.ce_lock); 636 637 if (num_used > ATH11K_CE_USAGE_THRESHOLD) 638 ath11k_ce_poll_send_completed(ab, pipe->pipe_num); 639 } 640 641 if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags)) 642 return -ESHUTDOWN; 643 644 spin_lock_bh(&ab->ce.ce_lock); 645 646 write_index = pipe->src_ring->write_index; 647 nentries_mask = pipe->src_ring->nentries_mask; 648 649 srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id]; 650 651 spin_lock_bh(&srng->lock); 652 653 ath11k_hal_srng_access_begin(ab, srng); 654 655 if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) { 656 ath11k_hal_srng_access_end(ab, srng); 657 ret = -ENOBUFS; 658 goto err_unlock; 659 } 660 661 desc = ath11k_hal_srng_src_get_next_reaped(ab, srng); 662 if (!desc) { 663 ath11k_hal_srng_access_end(ab, srng); 664 ret = -ENOBUFS; 665 goto err_unlock; 666 } 667 668 if (pipe->attr_flags & CE_ATTR_BYTE_SWAP_DATA) 669 byte_swap_data = 1; 670 671 ath11k_hal_ce_src_set_desc(desc, ATH11K_SKB_CB(skb)->paddr, 672 skb->len, transfer_id, byte_swap_data); 673 674 pipe->src_ring->skb[write_index] = skb; 675 pipe->src_ring->write_index = CE_RING_IDX_INCR(nentries_mask, 676 write_index); 677 678 ath11k_hal_srng_access_end(ab, srng); 679 680 spin_unlock_bh(&srng->lock); 681 682 spin_unlock_bh(&ab->ce.ce_lock); 683 684 return 0; 685 686 err_unlock: 687 spin_unlock_bh(&srng->lock); 688 689 spin_unlock_bh(&ab->ce.ce_lock); 690 691 return ret; 692 } 693 694 static void ath11k_ce_rx_pipe_cleanup(struct ath11k_ce_pipe *pipe) 695 { 696 struct ath11k_base *ab = pipe->ab; 697 struct ath11k_ce_ring *ring = pipe->dest_ring; 698 struct sk_buff *skb; 699 int i; 700 701 if (!(ring && pipe->buf_sz)) 702 return; 703 704 for (i = 0; i < ring->nentries; i++) { 705 skb = ring->skb[i]; 706 if (!skb) 707 continue; 708 709 ring->skb[i] = NULL; 710 dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr, 711 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); 712 dev_kfree_skb_any(skb); 713 } 714 } 715 716 void ath11k_ce_cleanup_pipes(struct ath11k_base *ab) 717 { 718 struct ath11k_ce_pipe *pipe; 719 int pipe_num; 720 721 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { 722 pipe = &ab->ce.ce_pipe[pipe_num]; 723 ath11k_ce_rx_pipe_cleanup(pipe); 724 725 /* Cleanup any src CE's which have interrupts disabled */ 726 ath11k_ce_poll_send_completed(ab, pipe_num); 727 728 /* NOTE: Should we also clean up tx buffer in all pipes? */ 729 } 730 } 731 EXPORT_SYMBOL(ath11k_ce_cleanup_pipes); 732 733 void ath11k_ce_rx_post_buf(struct ath11k_base *ab) 734 { 735 struct ath11k_ce_pipe *pipe; 736 int i; 737 int ret; 738 739 for (i = 0; i < CE_COUNT; i++) { 740 pipe = &ab->ce.ce_pipe[i]; 741 ret = ath11k_ce_rx_post_pipe(pipe); 742 if (ret) { 743 if (ret == -ENOSPC) 744 continue; 745 746 ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n", 747 i, ret); 748 mod_timer(&ab->rx_replenish_retry, 749 jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES); 750 751 return; 752 } 753 } 754 } 755 EXPORT_SYMBOL(ath11k_ce_rx_post_buf); 756 757 void ath11k_ce_rx_replenish_retry(struct timer_list *t) 758 { 759 struct ath11k_base *ab = from_timer(ab, t, rx_replenish_retry); 760 761 ath11k_ce_rx_post_buf(ab); 762 } 763 764 int ath11k_ce_init_pipes(struct ath11k_base *ab) 765 { 766 struct ath11k_ce_pipe *pipe; 767 int i; 768 int ret; 769 770 for (i = 0; i < CE_COUNT; i++) { 771 pipe = &ab->ce.ce_pipe[i]; 772 773 if (pipe->src_ring) { 774 ret = ath11k_ce_init_ring(ab, pipe->src_ring, i, 775 HAL_CE_SRC); 776 if (ret) { 777 ath11k_warn(ab, "failed to init src ring: %d\n", 778 ret); 779 /* Should we clear any partial init */ 780 return ret; 781 } 782 783 pipe->src_ring->write_index = 0; 784 pipe->src_ring->sw_index = 0; 785 } 786 787 if (pipe->dest_ring) { 788 ret = ath11k_ce_init_ring(ab, pipe->dest_ring, i, 789 HAL_CE_DST); 790 if (ret) { 791 ath11k_warn(ab, "failed to init dest ring: %d\n", 792 ret); 793 /* Should we clear any partial init */ 794 return ret; 795 } 796 797 pipe->rx_buf_needed = pipe->dest_ring->nentries ? 798 pipe->dest_ring->nentries - 2 : 0; 799 800 pipe->dest_ring->write_index = 0; 801 pipe->dest_ring->sw_index = 0; 802 } 803 804 if (pipe->status_ring) { 805 ret = ath11k_ce_init_ring(ab, pipe->status_ring, i, 806 HAL_CE_DST_STATUS); 807 if (ret) { 808 ath11k_warn(ab, "failed to init dest status ing: %d\n", 809 ret); 810 /* Should we clear any partial init */ 811 return ret; 812 } 813 814 pipe->status_ring->write_index = 0; 815 pipe->status_ring->sw_index = 0; 816 } 817 } 818 819 return 0; 820 } 821 822 void ath11k_ce_free_pipes(struct ath11k_base *ab) 823 { 824 struct ath11k_ce_pipe *pipe; 825 int desc_sz; 826 int i; 827 828 for (i = 0; i < CE_COUNT; i++) { 829 pipe = &ab->ce.ce_pipe[i]; 830 831 if (pipe->src_ring) { 832 desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC); 833 dma_free_coherent(ab->dev, 834 pipe->src_ring->nentries * desc_sz + 835 CE_DESC_RING_ALIGN, 836 pipe->src_ring->base_addr_owner_space, 837 pipe->src_ring->base_addr_ce_space); 838 kfree(pipe->src_ring); 839 pipe->src_ring = NULL; 840 } 841 842 if (pipe->dest_ring) { 843 desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST); 844 dma_free_coherent(ab->dev, 845 pipe->dest_ring->nentries * desc_sz + 846 CE_DESC_RING_ALIGN, 847 pipe->dest_ring->base_addr_owner_space, 848 pipe->dest_ring->base_addr_ce_space); 849 kfree(pipe->dest_ring); 850 pipe->dest_ring = NULL; 851 } 852 853 if (pipe->status_ring) { 854 desc_sz = 855 ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS); 856 dma_free_coherent(ab->dev, 857 pipe->status_ring->nentries * desc_sz + 858 CE_DESC_RING_ALIGN, 859 pipe->status_ring->base_addr_owner_space, 860 pipe->status_ring->base_addr_ce_space); 861 kfree(pipe->status_ring); 862 pipe->status_ring = NULL; 863 } 864 } 865 } 866 EXPORT_SYMBOL(ath11k_ce_free_pipes); 867 868 int ath11k_ce_alloc_pipes(struct ath11k_base *ab) 869 { 870 struct ath11k_ce_pipe *pipe; 871 int i; 872 int ret; 873 const struct ce_attr *attr; 874 875 spin_lock_init(&ab->ce.ce_lock); 876 877 for (i = 0; i < CE_COUNT; i++) { 878 attr = &ab->hw_params.host_ce_config[i]; 879 pipe = &ab->ce.ce_pipe[i]; 880 pipe->pipe_num = i; 881 pipe->ab = ab; 882 pipe->buf_sz = attr->src_sz_max; 883 884 ret = ath11k_ce_alloc_pipe(ab, i); 885 if (ret) { 886 /* Free any parial successful allocation */ 887 ath11k_ce_free_pipes(ab); 888 return ret; 889 } 890 } 891 892 return 0; 893 } 894 EXPORT_SYMBOL(ath11k_ce_alloc_pipes); 895 896 /* For Big Endian Host, Copy Engine byte_swap is enabled 897 * When Copy Engine does byte_swap, need to byte swap again for the 898 * Host to get/put buffer content in the correct byte order 899 */ 900 void ath11k_ce_byte_swap(void *mem, u32 len) 901 { 902 int i; 903 904 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) { 905 if (!mem) 906 return; 907 908 for (i = 0; i < (len / 4); i++) { 909 *(u32 *)mem = swab32(*(u32 *)mem); 910 mem += 4; 911 } 912 } 913 } 914 915 int ath11k_ce_get_attr_flags(struct ath11k_base *ab, int ce_id) 916 { 917 if (ce_id >= CE_COUNT) 918 return -EINVAL; 919 920 return ab->hw_params.host_ce_config[ce_id].flags; 921 } 922 EXPORT_SYMBOL(ath11k_ce_get_attr_flags); 923