1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 */ 5 6 #include "dp_rx.h" 7 #include "debug.h" 8 #include "hif.h" 9 10 #define host_ce_config_wlan ab->hw_params.host_ce_config 11 12 const struct ce_attr ath11k_host_ce_config_ipq8074[] = { 13 /* CE0: host->target HTC control and raw streams */ 14 { 15 .flags = CE_ATTR_FLAGS, 16 .src_nentries = 16, 17 .src_sz_max = 2048, 18 .dest_nentries = 0, 19 }, 20 21 /* CE1: target->host HTT + HTC control */ 22 { 23 .flags = CE_ATTR_FLAGS, 24 .src_nentries = 0, 25 .src_sz_max = 2048, 26 .dest_nentries = 512, 27 .recv_cb = ath11k_htc_rx_completion_handler, 28 }, 29 30 /* CE2: target->host WMI */ 31 { 32 .flags = CE_ATTR_FLAGS, 33 .src_nentries = 0, 34 .src_sz_max = 2048, 35 .dest_nentries = 512, 36 .recv_cb = ath11k_htc_rx_completion_handler, 37 }, 38 39 /* CE3: host->target WMI (mac0) */ 40 { 41 .flags = CE_ATTR_FLAGS, 42 .src_nentries = 32, 43 .src_sz_max = 2048, 44 .dest_nentries = 0, 45 }, 46 47 /* CE4: host->target HTT */ 48 { 49 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 50 .src_nentries = 2048, 51 .src_sz_max = 256, 52 .dest_nentries = 0, 53 }, 54 55 /* CE5: target->host pktlog */ 56 { 57 .flags = CE_ATTR_FLAGS, 58 .src_nentries = 0, 59 .src_sz_max = 2048, 60 .dest_nentries = 512, 61 .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler, 62 }, 63 64 /* CE6: target autonomous hif_memcpy */ 65 { 66 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 67 .src_nentries = 0, 68 .src_sz_max = 0, 69 .dest_nentries = 0, 70 }, 71 72 /* CE7: host->target WMI (mac1) */ 73 { 74 .flags = CE_ATTR_FLAGS, 75 .src_nentries = 32, 76 .src_sz_max = 2048, 77 .dest_nentries = 0, 78 }, 79 80 /* CE8: target autonomous hif_memcpy */ 81 { 82 .flags = CE_ATTR_FLAGS, 83 .src_nentries = 0, 84 .src_sz_max = 0, 85 .dest_nentries = 0, 86 }, 87 88 /* CE9: host->target WMI (mac2) */ 89 { 90 .flags = CE_ATTR_FLAGS, 91 .src_nentries = 32, 92 .src_sz_max = 2048, 93 .dest_nentries = 0, 94 }, 95 96 /* CE10: target->host HTT */ 97 { 98 .flags = CE_ATTR_FLAGS, 99 .src_nentries = 0, 100 .src_sz_max = 2048, 101 .dest_nentries = 512, 102 .recv_cb = ath11k_htc_rx_completion_handler, 103 }, 104 105 /* CE11: Not used */ 106 { 107 .flags = CE_ATTR_FLAGS, 108 .src_nentries = 0, 109 .src_sz_max = 0, 110 .dest_nentries = 0, 111 }, 112 }; 113 114 const struct ce_attr ath11k_host_ce_config_qca6390[] = { 115 /* CE0: host->target HTC control and raw streams */ 116 { 117 .flags = CE_ATTR_FLAGS, 118 .src_nentries = 16, 119 .src_sz_max = 2048, 120 .dest_nentries = 0, 121 }, 122 123 /* CE1: target->host HTT + HTC control */ 124 { 125 .flags = CE_ATTR_FLAGS, 126 .src_nentries = 0, 127 .src_sz_max = 2048, 128 .dest_nentries = 512, 129 .recv_cb = ath11k_htc_rx_completion_handler, 130 }, 131 132 /* CE2: target->host WMI */ 133 { 134 .flags = CE_ATTR_FLAGS, 135 .src_nentries = 0, 136 .src_sz_max = 2048, 137 .dest_nentries = 512, 138 .recv_cb = ath11k_htc_rx_completion_handler, 139 }, 140 141 /* CE3: host->target WMI (mac0) */ 142 { 143 .flags = CE_ATTR_FLAGS, 144 .src_nentries = 32, 145 .src_sz_max = 2048, 146 .dest_nentries = 0, 147 }, 148 149 /* CE4: host->target HTT */ 150 { 151 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 152 .src_nentries = 2048, 153 .src_sz_max = 256, 154 .dest_nentries = 0, 155 }, 156 157 /* CE5: target->host pktlog */ 158 { 159 .flags = CE_ATTR_FLAGS, 160 .src_nentries = 0, 161 .src_sz_max = 2048, 162 .dest_nentries = 512, 163 .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler, 164 }, 165 166 /* CE6: target autonomous hif_memcpy */ 167 { 168 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 169 .src_nentries = 0, 170 .src_sz_max = 0, 171 .dest_nentries = 0, 172 }, 173 174 /* CE7: host->target WMI (mac1) */ 175 { 176 .flags = CE_ATTR_FLAGS, 177 .src_nentries = 32, 178 .src_sz_max = 2048, 179 .dest_nentries = 0, 180 }, 181 182 /* CE8: target autonomous hif_memcpy */ 183 { 184 .flags = CE_ATTR_FLAGS, 185 .src_nentries = 0, 186 .src_sz_max = 0, 187 .dest_nentries = 0, 188 }, 189 190 }; 191 192 static int ath11k_ce_rx_buf_enqueue_pipe(struct ath11k_ce_pipe *pipe, 193 struct sk_buff *skb, dma_addr_t paddr) 194 { 195 struct ath11k_base *ab = pipe->ab; 196 struct ath11k_ce_ring *ring = pipe->dest_ring; 197 struct hal_srng *srng; 198 unsigned int write_index; 199 unsigned int nentries_mask = ring->nentries_mask; 200 u32 *desc; 201 int ret; 202 203 lockdep_assert_held(&ab->ce.ce_lock); 204 205 write_index = ring->write_index; 206 207 srng = &ab->hal.srng_list[ring->hal_ring_id]; 208 209 spin_lock_bh(&srng->lock); 210 211 ath11k_hal_srng_access_begin(ab, srng); 212 213 if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) { 214 ret = -ENOSPC; 215 goto exit; 216 } 217 218 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 219 if (!desc) { 220 ret = -ENOSPC; 221 goto exit; 222 } 223 224 ath11k_hal_ce_dst_set_desc(desc, paddr); 225 226 ring->skb[write_index] = skb; 227 write_index = CE_RING_IDX_INCR(nentries_mask, write_index); 228 ring->write_index = write_index; 229 230 pipe->rx_buf_needed--; 231 232 ret = 0; 233 exit: 234 ath11k_hal_srng_access_end(ab, srng); 235 236 spin_unlock_bh(&srng->lock); 237 238 return ret; 239 } 240 241 static int ath11k_ce_rx_post_pipe(struct ath11k_ce_pipe *pipe) 242 { 243 struct ath11k_base *ab = pipe->ab; 244 struct sk_buff *skb; 245 dma_addr_t paddr; 246 int ret = 0; 247 248 if (!(pipe->dest_ring || pipe->status_ring)) 249 return 0; 250 251 spin_lock_bh(&ab->ce.ce_lock); 252 while (pipe->rx_buf_needed) { 253 skb = dev_alloc_skb(pipe->buf_sz); 254 if (!skb) { 255 ret = -ENOMEM; 256 goto exit; 257 } 258 259 WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4)); 260 261 paddr = dma_map_single(ab->dev, skb->data, 262 skb->len + skb_tailroom(skb), 263 DMA_FROM_DEVICE); 264 if (unlikely(dma_mapping_error(ab->dev, paddr))) { 265 ath11k_warn(ab, "failed to dma map ce rx buf\n"); 266 dev_kfree_skb_any(skb); 267 ret = -EIO; 268 goto exit; 269 } 270 271 ATH11K_SKB_RXCB(skb)->paddr = paddr; 272 273 ret = ath11k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr); 274 275 if (ret) { 276 ath11k_warn(ab, "failed to enqueue rx buf: %d\n", ret); 277 dma_unmap_single(ab->dev, paddr, 278 skb->len + skb_tailroom(skb), 279 DMA_FROM_DEVICE); 280 dev_kfree_skb_any(skb); 281 goto exit; 282 } 283 } 284 285 exit: 286 spin_unlock_bh(&ab->ce.ce_lock); 287 return ret; 288 } 289 290 static int ath11k_ce_completed_recv_next(struct ath11k_ce_pipe *pipe, 291 struct sk_buff **skb, int *nbytes) 292 { 293 struct ath11k_base *ab = pipe->ab; 294 struct hal_srng *srng; 295 unsigned int sw_index; 296 unsigned int nentries_mask; 297 u32 *desc; 298 int ret = 0; 299 300 spin_lock_bh(&ab->ce.ce_lock); 301 302 sw_index = pipe->dest_ring->sw_index; 303 nentries_mask = pipe->dest_ring->nentries_mask; 304 305 srng = &ab->hal.srng_list[pipe->status_ring->hal_ring_id]; 306 307 spin_lock_bh(&srng->lock); 308 309 ath11k_hal_srng_access_begin(ab, srng); 310 311 desc = ath11k_hal_srng_dst_get_next_entry(ab, srng); 312 if (!desc) { 313 ret = -EIO; 314 goto err; 315 } 316 317 *nbytes = ath11k_hal_ce_dst_status_get_length(desc); 318 if (*nbytes == 0) { 319 ret = -EIO; 320 goto err; 321 } 322 323 *skb = pipe->dest_ring->skb[sw_index]; 324 pipe->dest_ring->skb[sw_index] = NULL; 325 326 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); 327 pipe->dest_ring->sw_index = sw_index; 328 329 pipe->rx_buf_needed++; 330 err: 331 ath11k_hal_srng_access_end(ab, srng); 332 333 spin_unlock_bh(&srng->lock); 334 335 spin_unlock_bh(&ab->ce.ce_lock); 336 337 return ret; 338 } 339 340 static void ath11k_ce_recv_process_cb(struct ath11k_ce_pipe *pipe) 341 { 342 struct ath11k_base *ab = pipe->ab; 343 struct sk_buff *skb; 344 struct sk_buff_head list; 345 unsigned int nbytes, max_nbytes; 346 int ret; 347 348 __skb_queue_head_init(&list); 349 while (ath11k_ce_completed_recv_next(pipe, &skb, &nbytes) == 0) { 350 max_nbytes = skb->len + skb_tailroom(skb); 351 dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr, 352 max_nbytes, DMA_FROM_DEVICE); 353 354 if (unlikely(max_nbytes < nbytes)) { 355 ath11k_warn(ab, "rxed more than expected (nbytes %d, max %d)", 356 nbytes, max_nbytes); 357 dev_kfree_skb_any(skb); 358 continue; 359 } 360 361 skb_put(skb, nbytes); 362 __skb_queue_tail(&list, skb); 363 } 364 365 while ((skb = __skb_dequeue(&list))) { 366 ath11k_dbg(ab, ATH11K_DBG_AHB, "rx ce pipe %d len %d\n", 367 pipe->pipe_num, skb->len); 368 pipe->recv_cb(ab, skb); 369 } 370 371 ret = ath11k_ce_rx_post_pipe(pipe); 372 if (ret && ret != -ENOSPC) { 373 ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n", 374 pipe->pipe_num, ret); 375 mod_timer(&ab->rx_replenish_retry, 376 jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES); 377 } 378 } 379 380 static struct sk_buff *ath11k_ce_completed_send_next(struct ath11k_ce_pipe *pipe) 381 { 382 struct ath11k_base *ab = pipe->ab; 383 struct hal_srng *srng; 384 unsigned int sw_index; 385 unsigned int nentries_mask; 386 struct sk_buff *skb; 387 u32 *desc; 388 389 spin_lock_bh(&ab->ce.ce_lock); 390 391 sw_index = pipe->src_ring->sw_index; 392 nentries_mask = pipe->src_ring->nentries_mask; 393 394 srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id]; 395 396 spin_lock_bh(&srng->lock); 397 398 ath11k_hal_srng_access_begin(ab, srng); 399 400 desc = ath11k_hal_srng_src_reap_next(ab, srng); 401 if (!desc) { 402 skb = ERR_PTR(-EIO); 403 goto err_unlock; 404 } 405 406 skb = pipe->src_ring->skb[sw_index]; 407 408 pipe->src_ring->skb[sw_index] = NULL; 409 410 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); 411 pipe->src_ring->sw_index = sw_index; 412 413 err_unlock: 414 spin_unlock_bh(&srng->lock); 415 416 spin_unlock_bh(&ab->ce.ce_lock); 417 418 return skb; 419 } 420 421 static void ath11k_ce_send_done_cb(struct ath11k_ce_pipe *pipe) 422 { 423 struct ath11k_base *ab = pipe->ab; 424 struct sk_buff *skb; 425 426 while (!IS_ERR(skb = ath11k_ce_completed_send_next(pipe))) { 427 if (!skb) 428 continue; 429 430 dma_unmap_single(ab->dev, ATH11K_SKB_CB(skb)->paddr, skb->len, 431 DMA_TO_DEVICE); 432 dev_kfree_skb_any(skb); 433 } 434 } 435 436 static void ath11k_ce_srng_msi_ring_params_setup(struct ath11k_base *ab, u32 ce_id, 437 struct hal_srng_params *ring_params) 438 { 439 u32 msi_data_start; 440 u32 msi_data_count; 441 u32 msi_irq_start; 442 u32 addr_lo; 443 u32 addr_hi; 444 int ret; 445 446 ret = ath11k_get_user_msi_vector(ab, "CE", 447 &msi_data_count, &msi_data_start, 448 &msi_irq_start); 449 450 if (ret) 451 return; 452 453 ath11k_get_msi_address(ab, &addr_lo, &addr_hi); 454 455 ring_params->msi_addr = addr_lo; 456 ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32); 457 ring_params->msi_data = (ce_id % msi_data_count) + msi_data_start; 458 ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR; 459 } 460 461 static int ath11k_ce_init_ring(struct ath11k_base *ab, 462 struct ath11k_ce_ring *ce_ring, 463 int ce_id, enum hal_ring_type type) 464 { 465 struct hal_srng_params params = { 0 }; 466 int ret; 467 468 params.ring_base_paddr = ce_ring->base_addr_ce_space; 469 params.ring_base_vaddr = ce_ring->base_addr_owner_space; 470 params.num_entries = ce_ring->nentries; 471 472 switch (type) { 473 case HAL_CE_SRC: 474 if (!(CE_ATTR_DIS_INTR & host_ce_config_wlan[ce_id].flags)) 475 params.intr_batch_cntr_thres_entries = 1; 476 break; 477 case HAL_CE_DST: 478 params.max_buffer_len = host_ce_config_wlan[ce_id].src_sz_max; 479 if (!(host_ce_config_wlan[ce_id].flags & CE_ATTR_DIS_INTR)) { 480 params.intr_timer_thres_us = 1024; 481 params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN; 482 params.low_threshold = ce_ring->nentries - 3; 483 } 484 break; 485 case HAL_CE_DST_STATUS: 486 if (!(host_ce_config_wlan[ce_id].flags & CE_ATTR_DIS_INTR)) { 487 params.intr_batch_cntr_thres_entries = 1; 488 params.intr_timer_thres_us = 0x1000; 489 } 490 break; 491 default: 492 ath11k_warn(ab, "Invalid CE ring type %d\n", type); 493 return -EINVAL; 494 } 495 496 /* TODO: Init other params needed by HAL to init the ring */ 497 498 ret = ath11k_hal_srng_setup(ab, type, ce_id, 0, ¶ms); 499 if (ret < 0) { 500 ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n", 501 ret, ce_id); 502 return ret; 503 } 504 505 if (!(CE_ATTR_DIS_INTR & host_ce_config_wlan[ce_id].flags)) 506 ath11k_ce_srng_msi_ring_params_setup(ab, ce_id, ¶ms); 507 508 ce_ring->hal_ring_id = ret; 509 510 return 0; 511 } 512 513 static struct ath11k_ce_ring * 514 ath11k_ce_alloc_ring(struct ath11k_base *ab, int nentries, int desc_sz) 515 { 516 struct ath11k_ce_ring *ce_ring; 517 dma_addr_t base_addr; 518 519 ce_ring = kzalloc(struct_size(ce_ring, skb, nentries), GFP_KERNEL); 520 if (ce_ring == NULL) 521 return ERR_PTR(-ENOMEM); 522 523 ce_ring->nentries = nentries; 524 ce_ring->nentries_mask = nentries - 1; 525 526 /* Legacy platforms that do not support cache 527 * coherent DMA are unsupported 528 */ 529 ce_ring->base_addr_owner_space_unaligned = 530 dma_alloc_coherent(ab->dev, 531 nentries * desc_sz + CE_DESC_RING_ALIGN, 532 &base_addr, GFP_KERNEL); 533 if (!ce_ring->base_addr_owner_space_unaligned) { 534 kfree(ce_ring); 535 return ERR_PTR(-ENOMEM); 536 } 537 538 ce_ring->base_addr_ce_space_unaligned = base_addr; 539 540 ce_ring->base_addr_owner_space = PTR_ALIGN( 541 ce_ring->base_addr_owner_space_unaligned, 542 CE_DESC_RING_ALIGN); 543 ce_ring->base_addr_ce_space = ALIGN( 544 ce_ring->base_addr_ce_space_unaligned, 545 CE_DESC_RING_ALIGN); 546 547 return ce_ring; 548 } 549 550 static int ath11k_ce_alloc_pipe(struct ath11k_base *ab, int ce_id) 551 { 552 struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id]; 553 const struct ce_attr *attr = &host_ce_config_wlan[ce_id]; 554 struct ath11k_ce_ring *ring; 555 int nentries; 556 int desc_sz; 557 558 pipe->attr_flags = attr->flags; 559 560 if (attr->src_nentries) { 561 pipe->send_cb = ath11k_ce_send_done_cb; 562 nentries = roundup_pow_of_two(attr->src_nentries); 563 desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC); 564 ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz); 565 if (IS_ERR(ring)) 566 return PTR_ERR(ring); 567 pipe->src_ring = ring; 568 } 569 570 if (attr->dest_nentries) { 571 pipe->recv_cb = attr->recv_cb; 572 nentries = roundup_pow_of_two(attr->dest_nentries); 573 desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST); 574 ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz); 575 if (IS_ERR(ring)) 576 return PTR_ERR(ring); 577 pipe->dest_ring = ring; 578 579 desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS); 580 ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz); 581 if (IS_ERR(ring)) 582 return PTR_ERR(ring); 583 pipe->status_ring = ring; 584 } 585 586 return 0; 587 } 588 589 void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id) 590 { 591 struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id]; 592 593 if (pipe->send_cb) 594 pipe->send_cb(pipe); 595 596 if (pipe->recv_cb) 597 ath11k_ce_recv_process_cb(pipe); 598 } 599 600 void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id) 601 { 602 struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id]; 603 604 if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && pipe->send_cb) 605 pipe->send_cb(pipe); 606 } 607 EXPORT_SYMBOL(ath11k_ce_per_engine_service); 608 609 int ath11k_ce_send(struct ath11k_base *ab, struct sk_buff *skb, u8 pipe_id, 610 u16 transfer_id) 611 { 612 struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id]; 613 struct hal_srng *srng; 614 u32 *desc; 615 unsigned int write_index, sw_index; 616 unsigned int nentries_mask; 617 int ret = 0; 618 u8 byte_swap_data = 0; 619 int num_used; 620 621 /* Check if some entries could be regained by handling tx completion if 622 * the CE has interrupts disabled and the used entries is more than the 623 * defined usage threshold. 624 */ 625 if (pipe->attr_flags & CE_ATTR_DIS_INTR) { 626 spin_lock_bh(&ab->ce.ce_lock); 627 write_index = pipe->src_ring->write_index; 628 629 sw_index = pipe->src_ring->sw_index; 630 631 if (write_index >= sw_index) 632 num_used = write_index - sw_index; 633 else 634 num_used = pipe->src_ring->nentries - sw_index + 635 write_index; 636 637 spin_unlock_bh(&ab->ce.ce_lock); 638 639 if (num_used > ATH11K_CE_USAGE_THRESHOLD) 640 ath11k_ce_poll_send_completed(ab, pipe->pipe_num); 641 } 642 643 if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags)) 644 return -ESHUTDOWN; 645 646 spin_lock_bh(&ab->ce.ce_lock); 647 648 write_index = pipe->src_ring->write_index; 649 nentries_mask = pipe->src_ring->nentries_mask; 650 651 srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id]; 652 653 spin_lock_bh(&srng->lock); 654 655 ath11k_hal_srng_access_begin(ab, srng); 656 657 if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) { 658 ath11k_hal_srng_access_end(ab, srng); 659 ret = -ENOBUFS; 660 goto err_unlock; 661 } 662 663 desc = ath11k_hal_srng_src_get_next_reaped(ab, srng); 664 if (!desc) { 665 ath11k_hal_srng_access_end(ab, srng); 666 ret = -ENOBUFS; 667 goto err_unlock; 668 } 669 670 if (pipe->attr_flags & CE_ATTR_BYTE_SWAP_DATA) 671 byte_swap_data = 1; 672 673 ath11k_hal_ce_src_set_desc(desc, ATH11K_SKB_CB(skb)->paddr, 674 skb->len, transfer_id, byte_swap_data); 675 676 pipe->src_ring->skb[write_index] = skb; 677 pipe->src_ring->write_index = CE_RING_IDX_INCR(nentries_mask, 678 write_index); 679 680 ath11k_hal_srng_access_end(ab, srng); 681 682 spin_unlock_bh(&srng->lock); 683 684 spin_unlock_bh(&ab->ce.ce_lock); 685 686 return 0; 687 688 err_unlock: 689 spin_unlock_bh(&srng->lock); 690 691 spin_unlock_bh(&ab->ce.ce_lock); 692 693 return ret; 694 } 695 696 static void ath11k_ce_rx_pipe_cleanup(struct ath11k_ce_pipe *pipe) 697 { 698 struct ath11k_base *ab = pipe->ab; 699 struct ath11k_ce_ring *ring = pipe->dest_ring; 700 struct sk_buff *skb; 701 int i; 702 703 if (!(ring && pipe->buf_sz)) 704 return; 705 706 for (i = 0; i < ring->nentries; i++) { 707 skb = ring->skb[i]; 708 if (!skb) 709 continue; 710 711 ring->skb[i] = NULL; 712 dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr, 713 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); 714 dev_kfree_skb_any(skb); 715 } 716 } 717 718 void ath11k_ce_cleanup_pipes(struct ath11k_base *ab) 719 { 720 struct ath11k_ce_pipe *pipe; 721 int pipe_num; 722 723 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { 724 pipe = &ab->ce.ce_pipe[pipe_num]; 725 ath11k_ce_rx_pipe_cleanup(pipe); 726 727 /* Cleanup any src CE's which have interrupts disabled */ 728 ath11k_ce_poll_send_completed(ab, pipe_num); 729 730 /* NOTE: Should we also clean up tx buffer in all pipes? */ 731 } 732 } 733 EXPORT_SYMBOL(ath11k_ce_cleanup_pipes); 734 735 void ath11k_ce_rx_post_buf(struct ath11k_base *ab) 736 { 737 struct ath11k_ce_pipe *pipe; 738 int i; 739 int ret; 740 741 for (i = 0; i < CE_COUNT; i++) { 742 pipe = &ab->ce.ce_pipe[i]; 743 ret = ath11k_ce_rx_post_pipe(pipe); 744 if (ret) { 745 if (ret == -ENOSPC) 746 continue; 747 748 ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n", 749 i, ret); 750 mod_timer(&ab->rx_replenish_retry, 751 jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES); 752 753 return; 754 } 755 } 756 } 757 EXPORT_SYMBOL(ath11k_ce_rx_post_buf); 758 759 void ath11k_ce_rx_replenish_retry(struct timer_list *t) 760 { 761 struct ath11k_base *ab = from_timer(ab, t, rx_replenish_retry); 762 763 ath11k_ce_rx_post_buf(ab); 764 } 765 766 int ath11k_ce_init_pipes(struct ath11k_base *ab) 767 { 768 struct ath11k_ce_pipe *pipe; 769 int i; 770 int ret; 771 772 for (i = 0; i < CE_COUNT; i++) { 773 pipe = &ab->ce.ce_pipe[i]; 774 775 if (pipe->src_ring) { 776 ret = ath11k_ce_init_ring(ab, pipe->src_ring, i, 777 HAL_CE_SRC); 778 if (ret) { 779 ath11k_warn(ab, "failed to init src ring: %d\n", 780 ret); 781 /* Should we clear any partial init */ 782 return ret; 783 } 784 785 pipe->src_ring->write_index = 0; 786 pipe->src_ring->sw_index = 0; 787 } 788 789 if (pipe->dest_ring) { 790 ret = ath11k_ce_init_ring(ab, pipe->dest_ring, i, 791 HAL_CE_DST); 792 if (ret) { 793 ath11k_warn(ab, "failed to init dest ring: %d\n", 794 ret); 795 /* Should we clear any partial init */ 796 return ret; 797 } 798 799 pipe->rx_buf_needed = pipe->dest_ring->nentries ? 800 pipe->dest_ring->nentries - 2 : 0; 801 802 pipe->dest_ring->write_index = 0; 803 pipe->dest_ring->sw_index = 0; 804 } 805 806 if (pipe->status_ring) { 807 ret = ath11k_ce_init_ring(ab, pipe->status_ring, i, 808 HAL_CE_DST_STATUS); 809 if (ret) { 810 ath11k_warn(ab, "failed to init dest status ing: %d\n", 811 ret); 812 /* Should we clear any partial init */ 813 return ret; 814 } 815 816 pipe->status_ring->write_index = 0; 817 pipe->status_ring->sw_index = 0; 818 } 819 } 820 821 return 0; 822 } 823 824 void ath11k_ce_free_pipes(struct ath11k_base *ab) 825 { 826 struct ath11k_ce_pipe *pipe; 827 int desc_sz; 828 int i; 829 830 for (i = 0; i < CE_COUNT; i++) { 831 pipe = &ab->ce.ce_pipe[i]; 832 833 if (pipe->src_ring) { 834 desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC); 835 dma_free_coherent(ab->dev, 836 pipe->src_ring->nentries * desc_sz + 837 CE_DESC_RING_ALIGN, 838 pipe->src_ring->base_addr_owner_space, 839 pipe->src_ring->base_addr_ce_space); 840 kfree(pipe->src_ring); 841 pipe->src_ring = NULL; 842 } 843 844 if (pipe->dest_ring) { 845 desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST); 846 dma_free_coherent(ab->dev, 847 pipe->dest_ring->nentries * desc_sz + 848 CE_DESC_RING_ALIGN, 849 pipe->dest_ring->base_addr_owner_space, 850 pipe->dest_ring->base_addr_ce_space); 851 kfree(pipe->dest_ring); 852 pipe->dest_ring = NULL; 853 } 854 855 if (pipe->status_ring) { 856 desc_sz = 857 ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS); 858 dma_free_coherent(ab->dev, 859 pipe->status_ring->nentries * desc_sz + 860 CE_DESC_RING_ALIGN, 861 pipe->status_ring->base_addr_owner_space, 862 pipe->status_ring->base_addr_ce_space); 863 kfree(pipe->status_ring); 864 pipe->status_ring = NULL; 865 } 866 } 867 } 868 EXPORT_SYMBOL(ath11k_ce_free_pipes); 869 870 int ath11k_ce_alloc_pipes(struct ath11k_base *ab) 871 { 872 struct ath11k_ce_pipe *pipe; 873 int i; 874 int ret; 875 const struct ce_attr *attr; 876 877 spin_lock_init(&ab->ce.ce_lock); 878 879 for (i = 0; i < CE_COUNT; i++) { 880 attr = &host_ce_config_wlan[i]; 881 pipe = &ab->ce.ce_pipe[i]; 882 pipe->pipe_num = i; 883 pipe->ab = ab; 884 pipe->buf_sz = attr->src_sz_max; 885 886 ret = ath11k_ce_alloc_pipe(ab, i); 887 if (ret) { 888 /* Free any parial successful allocation */ 889 ath11k_ce_free_pipes(ab); 890 return ret; 891 } 892 } 893 894 return 0; 895 } 896 EXPORT_SYMBOL(ath11k_ce_alloc_pipes); 897 898 /* For Big Endian Host, Copy Engine byte_swap is enabled 899 * When Copy Engine does byte_swap, need to byte swap again for the 900 * Host to get/put buffer content in the correct byte order 901 */ 902 void ath11k_ce_byte_swap(void *mem, u32 len) 903 { 904 int i; 905 906 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) { 907 if (!mem) 908 return; 909 910 for (i = 0; i < (len / 4); i++) { 911 *(u32 *)mem = swab32(*(u32 *)mem); 912 mem += 4; 913 } 914 } 915 } 916 917 int ath11k_ce_get_attr_flags(struct ath11k_base *ab, int ce_id) 918 { 919 if (ce_id >= CE_COUNT) 920 return -EINVAL; 921 922 return host_ce_config_wlan[ce_id].flags; 923 } 924 EXPORT_SYMBOL(ath11k_ce_get_attr_flags); 925