1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 */ 5 6 #include <crypto/hash.h> 7 #include "core.h" 8 #include "dp_tx.h" 9 #include "hal_tx.h" 10 #include "debug.h" 11 #include "dp_rx.h" 12 #include "peer.h" 13 14 static void ath11k_dp_htt_htc_tx_complete(struct ath11k_base *ab, 15 struct sk_buff *skb) 16 { 17 dev_kfree_skb_any(skb); 18 } 19 20 void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr) 21 { 22 struct ath11k_base *ab = ar->ab; 23 struct ath11k_peer *peer; 24 25 /* TODO: Any other peer specific DP cleanup */ 26 27 spin_lock_bh(&ab->base_lock); 28 peer = ath11k_peer_find(ab, vdev_id, addr); 29 if (!peer) { 30 ath11k_warn(ab, "failed to lookup peer %pM on vdev %d\n", 31 addr, vdev_id); 32 spin_unlock_bh(&ab->base_lock); 33 return; 34 } 35 36 ath11k_peer_rx_tid_cleanup(ar, peer); 37 crypto_free_shash(peer->tfm_mmic); 38 spin_unlock_bh(&ab->base_lock); 39 } 40 41 int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr) 42 { 43 struct ath11k_base *ab = ar->ab; 44 struct ath11k_peer *peer; 45 u32 reo_dest; 46 int ret = 0, tid; 47 48 /* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */ 49 reo_dest = ar->dp.mac_id + 1; 50 ret = ath11k_wmi_set_peer_param(ar, addr, vdev_id, 51 WMI_PEER_SET_DEFAULT_ROUTING, 52 DP_RX_HASH_ENABLE | (reo_dest << 1)); 53 54 if (ret) { 55 ath11k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n", 56 ret, addr, vdev_id); 57 return ret; 58 } 59 60 for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) { 61 ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id, tid, 1, 0, 62 HAL_PN_TYPE_NONE); 63 if (ret) { 64 ath11k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n", 65 tid, ret); 66 goto peer_clean; 67 } 68 } 69 70 ret = ath11k_peer_rx_frag_setup(ar, addr, vdev_id); 71 if (ret) { 72 ath11k_warn(ab, "failed to setup rx defrag context\n"); 73 return ret; 74 } 75 76 /* TODO: Setup other peer specific resource used in data path */ 77 78 return 0; 79 80 peer_clean: 81 spin_lock_bh(&ab->base_lock); 82 83 peer = ath11k_peer_find(ab, vdev_id, addr); 84 if (!peer) { 85 ath11k_warn(ab, "failed to find the peer to del rx tid\n"); 86 spin_unlock_bh(&ab->base_lock); 87 return -ENOENT; 88 } 89 90 for (; tid >= 0; tid--) 91 ath11k_peer_rx_tid_delete(ar, peer, tid); 92 93 spin_unlock_bh(&ab->base_lock); 94 95 return ret; 96 } 97 98 void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring) 99 { 100 if (!ring->vaddr_unaligned) 101 return; 102 103 dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned, 104 ring->paddr_unaligned); 105 106 ring->vaddr_unaligned = NULL; 107 } 108 109 int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring, 110 enum hal_ring_type type, int ring_num, 111 int mac_id, int num_entries) 112 { 113 struct hal_srng_params params = { 0 }; 114 int entry_sz = ath11k_hal_srng_get_entrysize(type); 115 int max_entries = ath11k_hal_srng_get_max_entries(type); 116 int ret; 117 118 if (max_entries < 0 || entry_sz < 0) 119 return -EINVAL; 120 121 if (num_entries > max_entries) 122 num_entries = max_entries; 123 124 ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1; 125 ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size, 126 &ring->paddr_unaligned, 127 GFP_KERNEL); 128 if (!ring->vaddr_unaligned) 129 return -ENOMEM; 130 131 ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN); 132 ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr - 133 (unsigned long)ring->vaddr_unaligned); 134 135 params.ring_base_vaddr = ring->vaddr; 136 params.ring_base_paddr = ring->paddr; 137 params.num_entries = num_entries; 138 139 switch (type) { 140 case HAL_REO_DST: 141 params.intr_batch_cntr_thres_entries = 142 HAL_SRNG_INT_BATCH_THRESHOLD_RX; 143 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX; 144 break; 145 case HAL_RXDMA_BUF: 146 case HAL_RXDMA_MONITOR_BUF: 147 case HAL_RXDMA_MONITOR_STATUS: 148 params.low_threshold = num_entries >> 3; 149 params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN; 150 params.intr_batch_cntr_thres_entries = 0; 151 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX; 152 break; 153 case HAL_WBM2SW_RELEASE: 154 if (ring_num < 3) { 155 params.intr_batch_cntr_thres_entries = 156 HAL_SRNG_INT_BATCH_THRESHOLD_TX; 157 params.intr_timer_thres_us = 158 HAL_SRNG_INT_TIMER_THRESHOLD_TX; 159 break; 160 } 161 /* follow through when ring_num >= 3 */ 162 /* fall through */ 163 case HAL_REO_EXCEPTION: 164 case HAL_REO_REINJECT: 165 case HAL_REO_CMD: 166 case HAL_REO_STATUS: 167 case HAL_TCL_DATA: 168 case HAL_TCL_CMD: 169 case HAL_TCL_STATUS: 170 case HAL_WBM_IDLE_LINK: 171 case HAL_SW2WBM_RELEASE: 172 case HAL_RXDMA_DST: 173 case HAL_RXDMA_MONITOR_DST: 174 case HAL_RXDMA_MONITOR_DESC: 175 case HAL_RXDMA_DIR_BUF: 176 params.intr_batch_cntr_thres_entries = 177 HAL_SRNG_INT_BATCH_THRESHOLD_OTHER; 178 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER; 179 break; 180 default: 181 ath11k_warn(ab, "Not a valid ring type in dp :%d\n", type); 182 return -EINVAL; 183 } 184 185 ret = ath11k_hal_srng_setup(ab, type, ring_num, mac_id, ¶ms); 186 if (ret < 0) { 187 ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n", 188 ret, ring_num); 189 return ret; 190 } 191 192 ring->ring_id = ret; 193 194 return 0; 195 } 196 197 static void ath11k_dp_srng_common_cleanup(struct ath11k_base *ab) 198 { 199 struct ath11k_dp *dp = &ab->dp; 200 int i; 201 202 ath11k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring); 203 ath11k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring); 204 ath11k_dp_srng_cleanup(ab, &dp->tcl_status_ring); 205 for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) { 206 ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring); 207 ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring); 208 } 209 ath11k_dp_srng_cleanup(ab, &dp->reo_reinject_ring); 210 ath11k_dp_srng_cleanup(ab, &dp->rx_rel_ring); 211 ath11k_dp_srng_cleanup(ab, &dp->reo_except_ring); 212 ath11k_dp_srng_cleanup(ab, &dp->reo_cmd_ring); 213 ath11k_dp_srng_cleanup(ab, &dp->reo_status_ring); 214 } 215 216 static int ath11k_dp_srng_common_setup(struct ath11k_base *ab) 217 { 218 struct ath11k_dp *dp = &ab->dp; 219 struct hal_srng *srng; 220 int i, ret; 221 u32 ring_hash_map; 222 223 ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring, 224 HAL_SW2WBM_RELEASE, 0, 0, 225 DP_WBM_RELEASE_RING_SIZE); 226 if (ret) { 227 ath11k_warn(ab, "failed to set up wbm2sw_release ring :%d\n", 228 ret); 229 goto err; 230 } 231 232 ret = ath11k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0, 233 DP_TCL_CMD_RING_SIZE); 234 if (ret) { 235 ath11k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret); 236 goto err; 237 } 238 239 ret = ath11k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS, 240 0, 0, DP_TCL_STATUS_RING_SIZE); 241 if (ret) { 242 ath11k_warn(ab, "failed to set up tcl_status ring :%d\n", ret); 243 goto err; 244 } 245 246 for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) { 247 ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring, 248 HAL_TCL_DATA, i, 0, 249 DP_TCL_DATA_RING_SIZE); 250 if (ret) { 251 ath11k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n", 252 i, ret); 253 goto err; 254 } 255 256 ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring, 257 HAL_WBM2SW_RELEASE, i, 0, 258 DP_TX_COMP_RING_SIZE); 259 if (ret) { 260 ath11k_warn(ab, "failed to set up tcl_comp ring ring (%d) :%d\n", 261 i, ret); 262 goto err; 263 } 264 265 srng = &ab->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id]; 266 ath11k_hal_tx_init_data_ring(ab, srng); 267 } 268 269 ret = ath11k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT, 270 0, 0, DP_REO_REINJECT_RING_SIZE); 271 if (ret) { 272 ath11k_warn(ab, "failed to set up reo_reinject ring :%d\n", 273 ret); 274 goto err; 275 } 276 277 ret = ath11k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE, 278 3, 0, DP_RX_RELEASE_RING_SIZE); 279 if (ret) { 280 ath11k_warn(ab, "failed to set up rx_rel ring :%d\n", ret); 281 goto err; 282 } 283 284 ret = ath11k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION, 285 0, 0, DP_REO_EXCEPTION_RING_SIZE); 286 if (ret) { 287 ath11k_warn(ab, "failed to set up reo_exception ring :%d\n", 288 ret); 289 goto err; 290 } 291 292 ret = ath11k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD, 293 0, 0, DP_REO_CMD_RING_SIZE); 294 if (ret) { 295 ath11k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret); 296 goto err; 297 } 298 299 srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id]; 300 ath11k_hal_reo_init_cmd_ring(ab, srng); 301 302 ret = ath11k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS, 303 0, 0, DP_REO_STATUS_RING_SIZE); 304 if (ret) { 305 ath11k_warn(ab, "failed to set up reo_status ring :%d\n", ret); 306 goto err; 307 } 308 309 /* When hash based routing of rx packet is enabled, 32 entries to map 310 * the hash values to the ring will be configured. Each hash entry uses 311 * three bits to map to a particular ring. The ring mapping will be 312 * 0:TCL, 1:SW1, 2:SW2, 3:SW3, 4:SW4, 5:Release, 6:FW and 7:Not used. 313 */ 314 ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 | 315 HAL_HASH_ROUTING_RING_SW2 << 3 | 316 HAL_HASH_ROUTING_RING_SW3 << 6 | 317 HAL_HASH_ROUTING_RING_SW4 << 9 | 318 HAL_HASH_ROUTING_RING_SW1 << 12 | 319 HAL_HASH_ROUTING_RING_SW2 << 15 | 320 HAL_HASH_ROUTING_RING_SW3 << 18 | 321 HAL_HASH_ROUTING_RING_SW4 << 21; 322 323 ath11k_hal_reo_hw_setup(ab, ring_hash_map); 324 325 return 0; 326 327 err: 328 ath11k_dp_srng_common_cleanup(ab); 329 330 return ret; 331 } 332 333 static void ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base *ab) 334 { 335 struct ath11k_dp *dp = &ab->dp; 336 struct hal_wbm_idle_scatter_list *slist = dp->scatter_list; 337 int i; 338 339 for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) { 340 if (!slist[i].vaddr) 341 continue; 342 343 dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX, 344 slist[i].vaddr, slist[i].paddr); 345 slist[i].vaddr = NULL; 346 } 347 } 348 349 static int ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base *ab, 350 int size, 351 u32 n_link_desc_bank, 352 u32 n_link_desc, 353 u32 last_bank_sz) 354 { 355 struct ath11k_dp *dp = &ab->dp; 356 struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks; 357 struct hal_wbm_idle_scatter_list *slist = dp->scatter_list; 358 u32 n_entries_per_buf; 359 int num_scatter_buf, scatter_idx; 360 struct hal_wbm_link_desc *scatter_buf; 361 int align_bytes, n_entries; 362 dma_addr_t paddr; 363 int rem_entries; 364 int i; 365 int ret = 0; 366 u32 end_offset; 367 368 n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE / 369 ath11k_hal_srng_get_entrysize(HAL_WBM_IDLE_LINK); 370 num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE); 371 372 if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX) 373 return -EINVAL; 374 375 for (i = 0; i < num_scatter_buf; i++) { 376 slist[i].vaddr = dma_alloc_coherent(ab->dev, 377 HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX, 378 &slist[i].paddr, GFP_KERNEL); 379 if (!slist[i].vaddr) { 380 ret = -ENOMEM; 381 goto err; 382 } 383 } 384 385 scatter_idx = 0; 386 scatter_buf = slist[scatter_idx].vaddr; 387 rem_entries = n_entries_per_buf; 388 389 for (i = 0; i < n_link_desc_bank; i++) { 390 align_bytes = link_desc_banks[i].vaddr - 391 link_desc_banks[i].vaddr_unaligned; 392 n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) / 393 HAL_LINK_DESC_SIZE; 394 paddr = link_desc_banks[i].paddr; 395 while (n_entries) { 396 ath11k_hal_set_link_desc_addr(scatter_buf, i, paddr); 397 n_entries--; 398 paddr += HAL_LINK_DESC_SIZE; 399 if (rem_entries) { 400 rem_entries--; 401 scatter_buf++; 402 continue; 403 } 404 405 rem_entries = n_entries_per_buf; 406 scatter_idx++; 407 scatter_buf = slist[scatter_idx].vaddr; 408 } 409 } 410 411 end_offset = (scatter_buf - slist[scatter_idx].vaddr) * 412 sizeof(struct hal_wbm_link_desc); 413 ath11k_hal_setup_link_idle_list(ab, slist, num_scatter_buf, 414 n_link_desc, end_offset); 415 416 return 0; 417 418 err: 419 ath11k_dp_scatter_idle_link_desc_cleanup(ab); 420 421 return ret; 422 } 423 424 static void 425 ath11k_dp_link_desc_bank_free(struct ath11k_base *ab, 426 struct dp_link_desc_bank *link_desc_banks) 427 { 428 int i; 429 430 for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) { 431 if (link_desc_banks[i].vaddr_unaligned) { 432 dma_free_coherent(ab->dev, 433 link_desc_banks[i].size, 434 link_desc_banks[i].vaddr_unaligned, 435 link_desc_banks[i].paddr_unaligned); 436 link_desc_banks[i].vaddr_unaligned = NULL; 437 } 438 } 439 } 440 441 static int ath11k_dp_link_desc_bank_alloc(struct ath11k_base *ab, 442 struct dp_link_desc_bank *desc_bank, 443 int n_link_desc_bank, 444 int last_bank_sz) 445 { 446 struct ath11k_dp *dp = &ab->dp; 447 int i; 448 int ret = 0; 449 int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH; 450 451 for (i = 0; i < n_link_desc_bank; i++) { 452 if (i == (n_link_desc_bank - 1) && last_bank_sz) 453 desc_sz = last_bank_sz; 454 455 desc_bank[i].vaddr_unaligned = 456 dma_alloc_coherent(ab->dev, desc_sz, 457 &desc_bank[i].paddr_unaligned, 458 GFP_KERNEL); 459 if (!desc_bank[i].vaddr_unaligned) { 460 ret = -ENOMEM; 461 goto err; 462 } 463 464 desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned, 465 HAL_LINK_DESC_ALIGN); 466 desc_bank[i].paddr = desc_bank[i].paddr_unaligned + 467 ((unsigned long)desc_bank[i].vaddr - 468 (unsigned long)desc_bank[i].vaddr_unaligned); 469 desc_bank[i].size = desc_sz; 470 } 471 472 return 0; 473 474 err: 475 ath11k_dp_link_desc_bank_free(ab, dp->link_desc_banks); 476 477 return ret; 478 } 479 480 void ath11k_dp_link_desc_cleanup(struct ath11k_base *ab, 481 struct dp_link_desc_bank *desc_bank, 482 u32 ring_type, struct dp_srng *ring) 483 { 484 ath11k_dp_link_desc_bank_free(ab, desc_bank); 485 486 if (ring_type != HAL_RXDMA_MONITOR_DESC) { 487 ath11k_dp_srng_cleanup(ab, ring); 488 ath11k_dp_scatter_idle_link_desc_cleanup(ab); 489 } 490 } 491 492 static int ath11k_wbm_idle_ring_setup(struct ath11k_base *ab, u32 *n_link_desc) 493 { 494 struct ath11k_dp *dp = &ab->dp; 495 u32 n_mpdu_link_desc, n_mpdu_queue_desc; 496 u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc; 497 int ret = 0; 498 499 n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) / 500 HAL_NUM_MPDUS_PER_LINK_DESC; 501 502 n_mpdu_queue_desc = n_mpdu_link_desc / 503 HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC; 504 505 n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID * 506 DP_AVG_MSDUS_PER_FLOW) / 507 HAL_NUM_TX_MSDUS_PER_LINK_DESC; 508 509 n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX * 510 DP_AVG_MSDUS_PER_MPDU) / 511 HAL_NUM_RX_MSDUS_PER_LINK_DESC; 512 513 *n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc + 514 n_tx_msdu_link_desc + n_rx_msdu_link_desc; 515 516 if (*n_link_desc & (*n_link_desc - 1)) 517 *n_link_desc = 1 << fls(*n_link_desc); 518 519 ret = ath11k_dp_srng_setup(ab, &dp->wbm_idle_ring, 520 HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc); 521 if (ret) { 522 ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret); 523 return ret; 524 } 525 return ret; 526 } 527 528 int ath11k_dp_link_desc_setup(struct ath11k_base *ab, 529 struct dp_link_desc_bank *link_desc_banks, 530 u32 ring_type, struct hal_srng *srng, 531 u32 n_link_desc) 532 { 533 u32 tot_mem_sz; 534 u32 n_link_desc_bank, last_bank_sz; 535 u32 entry_sz, align_bytes, n_entries; 536 u32 paddr; 537 u32 *desc; 538 int i, ret; 539 540 tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE; 541 tot_mem_sz += HAL_LINK_DESC_ALIGN; 542 543 if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) { 544 n_link_desc_bank = 1; 545 last_bank_sz = tot_mem_sz; 546 } else { 547 n_link_desc_bank = tot_mem_sz / 548 (DP_LINK_DESC_ALLOC_SIZE_THRESH - 549 HAL_LINK_DESC_ALIGN); 550 last_bank_sz = tot_mem_sz % 551 (DP_LINK_DESC_ALLOC_SIZE_THRESH - 552 HAL_LINK_DESC_ALIGN); 553 554 if (last_bank_sz) 555 n_link_desc_bank += 1; 556 } 557 558 if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX) 559 return -EINVAL; 560 561 ret = ath11k_dp_link_desc_bank_alloc(ab, link_desc_banks, 562 n_link_desc_bank, last_bank_sz); 563 if (ret) 564 return ret; 565 566 /* Setup link desc idle list for HW internal usage */ 567 entry_sz = ath11k_hal_srng_get_entrysize(ring_type); 568 tot_mem_sz = entry_sz * n_link_desc; 569 570 /* Setup scatter desc list when the total memory requirement is more */ 571 if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH && 572 ring_type != HAL_RXDMA_MONITOR_DESC) { 573 ret = ath11k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz, 574 n_link_desc_bank, 575 n_link_desc, 576 last_bank_sz); 577 if (ret) { 578 ath11k_warn(ab, "failed to setup scatting idle list descriptor :%d\n", 579 ret); 580 goto fail_desc_bank_free; 581 } 582 583 return 0; 584 } 585 586 spin_lock_bh(&srng->lock); 587 588 ath11k_hal_srng_access_begin(ab, srng); 589 590 for (i = 0; i < n_link_desc_bank; i++) { 591 align_bytes = link_desc_banks[i].vaddr - 592 link_desc_banks[i].vaddr_unaligned; 593 n_entries = (link_desc_banks[i].size - align_bytes) / 594 HAL_LINK_DESC_SIZE; 595 paddr = link_desc_banks[i].paddr; 596 while (n_entries && 597 (desc = ath11k_hal_srng_src_get_next_entry(ab, srng))) { 598 ath11k_hal_set_link_desc_addr((struct hal_wbm_link_desc *)desc, 599 i, paddr); 600 n_entries--; 601 paddr += HAL_LINK_DESC_SIZE; 602 } 603 } 604 605 ath11k_hal_srng_access_end(ab, srng); 606 607 spin_unlock_bh(&srng->lock); 608 609 return 0; 610 611 fail_desc_bank_free: 612 ath11k_dp_link_desc_bank_free(ab, link_desc_banks); 613 614 return ret; 615 } 616 617 int ath11k_dp_service_srng(struct ath11k_base *ab, 618 struct ath11k_ext_irq_grp *irq_grp, 619 int budget) 620 { 621 struct napi_struct *napi = &irq_grp->napi; 622 int grp_id = irq_grp->grp_id; 623 int work_done = 0; 624 int i = 0; 625 int tot_work_done = 0; 626 627 while (ath11k_tx_ring_mask[grp_id] >> i) { 628 if (ath11k_tx_ring_mask[grp_id] & BIT(i)) 629 ath11k_dp_tx_completion_handler(ab, i); 630 i++; 631 } 632 633 if (ath11k_rx_err_ring_mask[grp_id]) { 634 work_done = ath11k_dp_process_rx_err(ab, napi, budget); 635 budget -= work_done; 636 tot_work_done += work_done; 637 if (budget <= 0) 638 goto done; 639 } 640 641 if (ath11k_rx_wbm_rel_ring_mask[grp_id]) { 642 work_done = ath11k_dp_rx_process_wbm_err(ab, 643 napi, 644 budget); 645 budget -= work_done; 646 tot_work_done += work_done; 647 648 if (budget <= 0) 649 goto done; 650 } 651 652 if (ath11k_rx_ring_mask[grp_id]) { 653 i = fls(ath11k_rx_ring_mask[grp_id]) - 1; 654 work_done = ath11k_dp_process_rx(ab, i, napi, 655 budget); 656 budget -= work_done; 657 tot_work_done += work_done; 658 if (budget <= 0) 659 goto done; 660 } 661 662 if (rx_mon_status_ring_mask[grp_id]) { 663 for (i = 0; i < ab->num_radios; i++) { 664 if (rx_mon_status_ring_mask[grp_id] & BIT(i)) { 665 work_done = 666 ath11k_dp_rx_process_mon_rings(ab, 667 i, napi, 668 budget); 669 budget -= work_done; 670 tot_work_done += work_done; 671 } 672 if (budget <= 0) 673 goto done; 674 } 675 } 676 677 if (ath11k_reo_status_ring_mask[grp_id]) 678 ath11k_dp_process_reo_status(ab); 679 680 for (i = 0; i < ab->num_radios; i++) { 681 if (ath11k_rxdma2host_ring_mask[grp_id] & BIT(i)) { 682 work_done = ath11k_dp_process_rxdma_err(ab, i, budget); 683 budget -= work_done; 684 tot_work_done += work_done; 685 } 686 687 if (budget <= 0) 688 goto done; 689 690 if (ath11k_host2rxdma_ring_mask[grp_id] & BIT(i)) { 691 struct ath11k_pdev_dp *dp = &ab->pdevs[i].ar->dp; 692 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 693 694 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, 0, 695 HAL_RX_BUF_RBM_SW3_BM, 696 GFP_ATOMIC); 697 } 698 } 699 /* TODO: Implement handler for other interrupts */ 700 701 done: 702 return tot_work_done; 703 } 704 705 void ath11k_dp_pdev_free(struct ath11k_base *ab) 706 { 707 struct ath11k *ar; 708 int i; 709 710 for (i = 0; i < ab->num_radios; i++) { 711 ar = ab->pdevs[i].ar; 712 ath11k_dp_rx_pdev_free(ab, i); 713 ath11k_debug_unregister(ar); 714 ath11k_dp_rx_pdev_mon_detach(ar); 715 } 716 } 717 718 void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab) 719 { 720 struct ath11k *ar; 721 struct ath11k_pdev_dp *dp; 722 int i; 723 724 for (i = 0; i < ab->num_radios; i++) { 725 ar = ab->pdevs[i].ar; 726 dp = &ar->dp; 727 dp->mac_id = i; 728 idr_init(&dp->rx_refill_buf_ring.bufs_idr); 729 spin_lock_init(&dp->rx_refill_buf_ring.idr_lock); 730 atomic_set(&dp->num_tx_pending, 0); 731 init_waitqueue_head(&dp->tx_empty_waitq); 732 idr_init(&dp->rx_mon_status_refill_ring.bufs_idr); 733 spin_lock_init(&dp->rx_mon_status_refill_ring.idr_lock); 734 idr_init(&dp->rxdma_mon_buf_ring.bufs_idr); 735 spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock); 736 } 737 } 738 739 int ath11k_dp_pdev_alloc(struct ath11k_base *ab) 740 { 741 struct ath11k *ar; 742 int ret; 743 int i; 744 745 /* TODO:Per-pdev rx ring unlike tx ring which is mapped to different AC's */ 746 for (i = 0; i < ab->num_radios; i++) { 747 ar = ab->pdevs[i].ar; 748 ret = ath11k_dp_rx_pdev_alloc(ab, i); 749 if (ret) { 750 ath11k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n", 751 i); 752 goto err; 753 } 754 ret = ath11k_dp_rx_pdev_mon_attach(ar); 755 if (ret) { 756 ath11k_warn(ab, "failed to initialize mon pdev %d\n", 757 i); 758 goto err; 759 } 760 } 761 762 return 0; 763 764 err: 765 ath11k_dp_pdev_free(ab); 766 767 return ret; 768 } 769 770 int ath11k_dp_htt_connect(struct ath11k_dp *dp) 771 { 772 struct ath11k_htc_svc_conn_req conn_req; 773 struct ath11k_htc_svc_conn_resp conn_resp; 774 int status; 775 776 memset(&conn_req, 0, sizeof(conn_req)); 777 memset(&conn_resp, 0, sizeof(conn_resp)); 778 779 conn_req.ep_ops.ep_tx_complete = ath11k_dp_htt_htc_tx_complete; 780 conn_req.ep_ops.ep_rx_complete = ath11k_dp_htt_htc_t2h_msg_handler; 781 782 /* connect to control service */ 783 conn_req.service_id = ATH11K_HTC_SVC_ID_HTT_DATA_MSG; 784 785 status = ath11k_htc_connect_service(&dp->ab->htc, &conn_req, 786 &conn_resp); 787 788 if (status) 789 return status; 790 791 dp->eid = conn_resp.eid; 792 793 return 0; 794 } 795 796 static void ath11k_dp_update_vdev_search(struct ath11k_vif *arvif) 797 { 798 /* For STA mode, enable address search index, 799 * tcl uses ast_hash value in the descriptor. 800 */ 801 switch (arvif->vdev_type) { 802 case WMI_VDEV_TYPE_STA: 803 arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN; 804 arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX; 805 break; 806 case WMI_VDEV_TYPE_AP: 807 case WMI_VDEV_TYPE_IBSS: 808 arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN; 809 arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT; 810 break; 811 case WMI_VDEV_TYPE_MONITOR: 812 default: 813 return; 814 } 815 } 816 817 void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif) 818 { 819 arvif->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 1) | 820 FIELD_PREP(HTT_TCL_META_DATA_VDEV_ID, 821 arvif->vdev_id) | 822 FIELD_PREP(HTT_TCL_META_DATA_PDEV_ID, 823 ar->pdev->pdev_id); 824 825 /* set HTT extension valid bit to 0 by default */ 826 arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT; 827 828 ath11k_dp_update_vdev_search(arvif); 829 } 830 831 static int ath11k_dp_tx_pending_cleanup(int buf_id, void *skb, void *ctx) 832 { 833 struct ath11k_base *ab = (struct ath11k_base *)ctx; 834 struct sk_buff *msdu = skb; 835 836 dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len, 837 DMA_TO_DEVICE); 838 839 dev_kfree_skb_any(msdu); 840 841 return 0; 842 } 843 844 void ath11k_dp_free(struct ath11k_base *ab) 845 { 846 struct ath11k_dp *dp = &ab->dp; 847 int i; 848 849 ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks, 850 HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring); 851 852 ath11k_dp_srng_common_cleanup(ab); 853 854 ath11k_dp_reo_cmd_list_cleanup(ab); 855 856 for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) { 857 spin_lock_bh(&dp->tx_ring[i].tx_idr_lock); 858 idr_for_each(&dp->tx_ring[i].txbuf_idr, 859 ath11k_dp_tx_pending_cleanup, ab); 860 idr_destroy(&dp->tx_ring[i].txbuf_idr); 861 spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock); 862 kfree(dp->tx_ring[i].tx_status); 863 } 864 865 /* Deinit any SOC level resource */ 866 } 867 868 int ath11k_dp_alloc(struct ath11k_base *ab) 869 { 870 struct ath11k_dp *dp = &ab->dp; 871 struct hal_srng *srng = NULL; 872 size_t size = 0; 873 u32 n_link_desc = 0; 874 int ret; 875 int i; 876 877 dp->ab = ab; 878 879 INIT_LIST_HEAD(&dp->reo_cmd_list); 880 INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list); 881 spin_lock_init(&dp->reo_cmd_lock); 882 883 ret = ath11k_wbm_idle_ring_setup(ab, &n_link_desc); 884 if (ret) { 885 ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret); 886 return ret; 887 } 888 889 srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id]; 890 891 ret = ath11k_dp_link_desc_setup(ab, dp->link_desc_banks, 892 HAL_WBM_IDLE_LINK, srng, n_link_desc); 893 if (ret) { 894 ath11k_warn(ab, "failed to setup link desc: %d\n", ret); 895 return ret; 896 } 897 898 ret = ath11k_dp_srng_common_setup(ab); 899 if (ret) 900 goto fail_link_desc_cleanup; 901 902 size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE; 903 904 for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) { 905 idr_init(&dp->tx_ring[i].txbuf_idr); 906 spin_lock_init(&dp->tx_ring[i].tx_idr_lock); 907 dp->tx_ring[i].tcl_data_ring_id = i; 908 909 dp->tx_ring[i].tx_status_head = 0; 910 dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1; 911 dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL); 912 if (!dp->tx_ring[i].tx_status) 913 goto fail_cmn_srng_cleanup; 914 } 915 916 for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++) 917 ath11k_hal_tx_set_dscp_tid_map(ab, i); 918 919 /* Init any SOC level resource for DP */ 920 921 return 0; 922 923 fail_cmn_srng_cleanup: 924 ath11k_dp_srng_common_cleanup(ab); 925 926 fail_link_desc_cleanup: 927 ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks, 928 HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring); 929 930 return ret; 931 } 932