1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 5 */ 6 7 #include <crypto/hash.h> 8 #include "core.h" 9 #include "dp_tx.h" 10 #include "hal_tx.h" 11 #include "hif.h" 12 #include "debug.h" 13 #include "dp_rx.h" 14 #include "peer.h" 15 16 static void ath11k_dp_htt_htc_tx_complete(struct ath11k_base *ab, 17 struct sk_buff *skb) 18 { 19 dev_kfree_skb_any(skb); 20 } 21 22 void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr) 23 { 24 struct ath11k_base *ab = ar->ab; 25 struct ath11k_peer *peer; 26 27 /* TODO: Any other peer specific DP cleanup */ 28 29 spin_lock_bh(&ab->base_lock); 30 peer = ath11k_peer_find(ab, vdev_id, addr); 31 if (!peer) { 32 ath11k_warn(ab, "failed to lookup peer %pM on vdev %d\n", 33 addr, vdev_id); 34 spin_unlock_bh(&ab->base_lock); 35 return; 36 } 37 38 ath11k_peer_rx_tid_cleanup(ar, peer); 39 peer->dp_setup_done = false; 40 crypto_free_shash(peer->tfm_mmic); 41 spin_unlock_bh(&ab->base_lock); 42 } 43 44 int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr) 45 { 46 struct ath11k_base *ab = ar->ab; 47 struct ath11k_peer *peer; 48 u32 reo_dest; 49 int ret = 0, tid; 50 51 /* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */ 52 reo_dest = ar->dp.mac_id + 1; 53 ret = ath11k_wmi_set_peer_param(ar, addr, vdev_id, 54 WMI_PEER_SET_DEFAULT_ROUTING, 55 DP_RX_HASH_ENABLE | (reo_dest << 1)); 56 57 if (ret) { 58 ath11k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n", 59 ret, addr, vdev_id); 60 return ret; 61 } 62 63 for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) { 64 ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id, tid, 1, 0, 65 HAL_PN_TYPE_NONE); 66 if (ret) { 67 ath11k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n", 68 tid, ret); 69 goto peer_clean; 70 } 71 } 72 73 ret = ath11k_peer_rx_frag_setup(ar, addr, vdev_id); 74 if (ret) { 75 ath11k_warn(ab, "failed to setup rx defrag context\n"); 76 tid--; 77 goto peer_clean; 78 } 79 80 /* TODO: Setup other peer specific resource used in data path */ 81 82 return 0; 83 84 peer_clean: 85 spin_lock_bh(&ab->base_lock); 86 87 peer = ath11k_peer_find(ab, vdev_id, addr); 88 if (!peer) { 89 ath11k_warn(ab, "failed to find the peer to del rx tid\n"); 90 spin_unlock_bh(&ab->base_lock); 91 return -ENOENT; 92 } 93 94 for (; tid >= 0; tid--) 95 ath11k_peer_rx_tid_delete(ar, peer, tid); 96 97 spin_unlock_bh(&ab->base_lock); 98 99 return ret; 100 } 101 102 void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring) 103 { 104 if (!ring->vaddr_unaligned) 105 return; 106 107 if (ring->cached) 108 kfree(ring->vaddr_unaligned); 109 else 110 dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned, 111 ring->paddr_unaligned); 112 113 ring->vaddr_unaligned = NULL; 114 } 115 116 static int ath11k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask) 117 { 118 int ext_group_num; 119 u8 mask = 1 << ring_num; 120 121 for (ext_group_num = 0; ext_group_num < ATH11K_EXT_IRQ_GRP_NUM_MAX; 122 ext_group_num++) { 123 if (mask & grp_mask[ext_group_num]) 124 return ext_group_num; 125 } 126 127 return -ENOENT; 128 } 129 130 static int ath11k_dp_srng_calculate_msi_group(struct ath11k_base *ab, 131 enum hal_ring_type type, int ring_num) 132 { 133 const u8 *grp_mask; 134 135 switch (type) { 136 case HAL_WBM2SW_RELEASE: 137 if (ring_num == DP_RX_RELEASE_RING_NUM) { 138 grp_mask = &ab->hw_params.ring_mask->rx_wbm_rel[0]; 139 ring_num = 0; 140 } else { 141 grp_mask = &ab->hw_params.ring_mask->tx[0]; 142 } 143 break; 144 case HAL_REO_EXCEPTION: 145 grp_mask = &ab->hw_params.ring_mask->rx_err[0]; 146 break; 147 case HAL_REO_DST: 148 grp_mask = &ab->hw_params.ring_mask->rx[0]; 149 break; 150 case HAL_REO_STATUS: 151 grp_mask = &ab->hw_params.ring_mask->reo_status[0]; 152 break; 153 case HAL_RXDMA_MONITOR_STATUS: 154 case HAL_RXDMA_MONITOR_DST: 155 grp_mask = &ab->hw_params.ring_mask->rx_mon_status[0]; 156 break; 157 case HAL_RXDMA_DST: 158 grp_mask = &ab->hw_params.ring_mask->rxdma2host[0]; 159 break; 160 case HAL_RXDMA_BUF: 161 grp_mask = &ab->hw_params.ring_mask->host2rxdma[0]; 162 break; 163 case HAL_RXDMA_MONITOR_BUF: 164 case HAL_TCL_DATA: 165 case HAL_TCL_CMD: 166 case HAL_REO_CMD: 167 case HAL_SW2WBM_RELEASE: 168 case HAL_WBM_IDLE_LINK: 169 case HAL_TCL_STATUS: 170 case HAL_REO_REINJECT: 171 case HAL_CE_SRC: 172 case HAL_CE_DST: 173 case HAL_CE_DST_STATUS: 174 default: 175 return -ENOENT; 176 } 177 178 return ath11k_dp_srng_find_ring_in_mask(ring_num, grp_mask); 179 } 180 181 static void ath11k_dp_srng_msi_setup(struct ath11k_base *ab, 182 struct hal_srng_params *ring_params, 183 enum hal_ring_type type, int ring_num) 184 { 185 int msi_group_number, msi_data_count; 186 u32 msi_data_start, msi_irq_start, addr_lo, addr_hi; 187 int ret; 188 189 ret = ath11k_get_user_msi_vector(ab, "DP", 190 &msi_data_count, &msi_data_start, 191 &msi_irq_start); 192 if (ret) 193 return; 194 195 msi_group_number = ath11k_dp_srng_calculate_msi_group(ab, type, 196 ring_num); 197 if (msi_group_number < 0) { 198 ath11k_dbg(ab, ATH11K_DBG_PCI, 199 "ring not part of an ext_group; ring_type: %d,ring_num %d", 200 type, ring_num); 201 ring_params->msi_addr = 0; 202 ring_params->msi_data = 0; 203 return; 204 } 205 206 if (msi_group_number > msi_data_count) { 207 ath11k_dbg(ab, ATH11K_DBG_PCI, 208 "multiple msi_groups share one msi, msi_group_num %d", 209 msi_group_number); 210 } 211 212 ath11k_get_msi_address(ab, &addr_lo, &addr_hi); 213 214 ring_params->msi_addr = addr_lo; 215 ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32); 216 ring_params->msi_data = (msi_group_number % msi_data_count) 217 + msi_data_start; 218 ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR; 219 } 220 221 int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring, 222 enum hal_ring_type type, int ring_num, 223 int mac_id, int num_entries) 224 { 225 struct hal_srng_params params = { 0 }; 226 int entry_sz = ath11k_hal_srng_get_entrysize(ab, type); 227 int max_entries = ath11k_hal_srng_get_max_entries(ab, type); 228 int ret; 229 bool cached = false; 230 231 if (max_entries < 0 || entry_sz < 0) 232 return -EINVAL; 233 234 if (num_entries > max_entries) 235 num_entries = max_entries; 236 237 ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1; 238 239 if (ab->hw_params.alloc_cacheable_memory) { 240 /* Allocate the reo dst and tx completion rings from cacheable memory */ 241 switch (type) { 242 case HAL_REO_DST: 243 case HAL_WBM2SW_RELEASE: 244 cached = true; 245 break; 246 default: 247 cached = false; 248 } 249 250 if (cached) { 251 ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL); 252 ring->paddr_unaligned = virt_to_phys(ring->vaddr_unaligned); 253 } 254 } 255 256 if (!cached) 257 ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size, 258 &ring->paddr_unaligned, 259 GFP_KERNEL); 260 261 if (!ring->vaddr_unaligned) 262 return -ENOMEM; 263 264 ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN); 265 ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr - 266 (unsigned long)ring->vaddr_unaligned); 267 268 params.ring_base_vaddr = ring->vaddr; 269 params.ring_base_paddr = ring->paddr; 270 params.num_entries = num_entries; 271 ath11k_dp_srng_msi_setup(ab, ¶ms, type, ring_num + mac_id); 272 273 switch (type) { 274 case HAL_REO_DST: 275 params.intr_batch_cntr_thres_entries = 276 HAL_SRNG_INT_BATCH_THRESHOLD_RX; 277 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX; 278 break; 279 case HAL_RXDMA_BUF: 280 case HAL_RXDMA_MONITOR_BUF: 281 case HAL_RXDMA_MONITOR_STATUS: 282 params.low_threshold = num_entries >> 3; 283 params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN; 284 params.intr_batch_cntr_thres_entries = 0; 285 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX; 286 break; 287 case HAL_WBM2SW_RELEASE: 288 if (ring_num < 3) { 289 params.intr_batch_cntr_thres_entries = 290 HAL_SRNG_INT_BATCH_THRESHOLD_TX; 291 params.intr_timer_thres_us = 292 HAL_SRNG_INT_TIMER_THRESHOLD_TX; 293 break; 294 } 295 /* follow through when ring_num >= 3 */ 296 fallthrough; 297 case HAL_REO_EXCEPTION: 298 case HAL_REO_REINJECT: 299 case HAL_REO_CMD: 300 case HAL_REO_STATUS: 301 case HAL_TCL_DATA: 302 case HAL_TCL_CMD: 303 case HAL_TCL_STATUS: 304 case HAL_WBM_IDLE_LINK: 305 case HAL_SW2WBM_RELEASE: 306 case HAL_RXDMA_DST: 307 case HAL_RXDMA_MONITOR_DST: 308 case HAL_RXDMA_MONITOR_DESC: 309 params.intr_batch_cntr_thres_entries = 310 HAL_SRNG_INT_BATCH_THRESHOLD_OTHER; 311 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER; 312 break; 313 case HAL_RXDMA_DIR_BUF: 314 break; 315 default: 316 ath11k_warn(ab, "Not a valid ring type in dp :%d\n", type); 317 return -EINVAL; 318 } 319 320 if (cached) { 321 params.flags |= HAL_SRNG_FLAGS_CACHED; 322 ring->cached = 1; 323 } 324 325 ret = ath11k_hal_srng_setup(ab, type, ring_num, mac_id, ¶ms); 326 if (ret < 0) { 327 ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n", 328 ret, ring_num); 329 return ret; 330 } 331 332 ring->ring_id = ret; 333 334 return 0; 335 } 336 337 void ath11k_dp_stop_shadow_timers(struct ath11k_base *ab) 338 { 339 int i; 340 341 if (!ab->hw_params.supports_shadow_regs) 342 return; 343 344 for (i = 0; i < ab->hw_params.max_tx_ring; i++) 345 ath11k_dp_shadow_stop_timer(ab, &ab->dp.tx_ring_timer[i]); 346 347 ath11k_dp_shadow_stop_timer(ab, &ab->dp.reo_cmd_timer); 348 } 349 350 static void ath11k_dp_srng_common_cleanup(struct ath11k_base *ab) 351 { 352 struct ath11k_dp *dp = &ab->dp; 353 int i; 354 355 ath11k_dp_stop_shadow_timers(ab); 356 ath11k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring); 357 ath11k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring); 358 ath11k_dp_srng_cleanup(ab, &dp->tcl_status_ring); 359 for (i = 0; i < ab->hw_params.max_tx_ring; i++) { 360 ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring); 361 ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring); 362 } 363 ath11k_dp_srng_cleanup(ab, &dp->reo_reinject_ring); 364 ath11k_dp_srng_cleanup(ab, &dp->rx_rel_ring); 365 ath11k_dp_srng_cleanup(ab, &dp->reo_except_ring); 366 ath11k_dp_srng_cleanup(ab, &dp->reo_cmd_ring); 367 ath11k_dp_srng_cleanup(ab, &dp->reo_status_ring); 368 } 369 370 static int ath11k_dp_srng_common_setup(struct ath11k_base *ab) 371 { 372 struct ath11k_dp *dp = &ab->dp; 373 struct hal_srng *srng; 374 int i, ret; 375 u8 tcl_num, wbm_num; 376 377 ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring, 378 HAL_SW2WBM_RELEASE, 0, 0, 379 DP_WBM_RELEASE_RING_SIZE); 380 if (ret) { 381 ath11k_warn(ab, "failed to set up wbm2sw_release ring :%d\n", 382 ret); 383 goto err; 384 } 385 386 ret = ath11k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0, 387 DP_TCL_CMD_RING_SIZE); 388 if (ret) { 389 ath11k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret); 390 goto err; 391 } 392 393 ret = ath11k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS, 394 0, 0, DP_TCL_STATUS_RING_SIZE); 395 if (ret) { 396 ath11k_warn(ab, "failed to set up tcl_status ring :%d\n", ret); 397 goto err; 398 } 399 400 for (i = 0; i < ab->hw_params.max_tx_ring; i++) { 401 tcl_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].tcl_ring_num; 402 wbm_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num; 403 404 ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring, 405 HAL_TCL_DATA, tcl_num, 0, 406 ab->hw_params.tx_ring_size); 407 if (ret) { 408 ath11k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n", 409 i, ret); 410 goto err; 411 } 412 413 ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring, 414 HAL_WBM2SW_RELEASE, wbm_num, 0, 415 DP_TX_COMP_RING_SIZE); 416 if (ret) { 417 ath11k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n", 418 i, ret); 419 goto err; 420 } 421 422 srng = &ab->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id]; 423 ath11k_hal_tx_init_data_ring(ab, srng); 424 425 ath11k_dp_shadow_init_timer(ab, &dp->tx_ring_timer[i], 426 ATH11K_SHADOW_DP_TIMER_INTERVAL, 427 dp->tx_ring[i].tcl_data_ring.ring_id); 428 } 429 430 ret = ath11k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT, 431 0, 0, DP_REO_REINJECT_RING_SIZE); 432 if (ret) { 433 ath11k_warn(ab, "failed to set up reo_reinject ring :%d\n", 434 ret); 435 goto err; 436 } 437 438 ret = ath11k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE, 439 DP_RX_RELEASE_RING_NUM, 0, DP_RX_RELEASE_RING_SIZE); 440 if (ret) { 441 ath11k_warn(ab, "failed to set up rx_rel ring :%d\n", ret); 442 goto err; 443 } 444 445 ret = ath11k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION, 446 0, 0, DP_REO_EXCEPTION_RING_SIZE); 447 if (ret) { 448 ath11k_warn(ab, "failed to set up reo_exception ring :%d\n", 449 ret); 450 goto err; 451 } 452 453 ret = ath11k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD, 454 0, 0, DP_REO_CMD_RING_SIZE); 455 if (ret) { 456 ath11k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret); 457 goto err; 458 } 459 460 srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id]; 461 ath11k_hal_reo_init_cmd_ring(ab, srng); 462 463 ath11k_dp_shadow_init_timer(ab, &dp->reo_cmd_timer, 464 ATH11K_SHADOW_CTRL_TIMER_INTERVAL, 465 dp->reo_cmd_ring.ring_id); 466 467 ret = ath11k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS, 468 0, 0, DP_REO_STATUS_RING_SIZE); 469 if (ret) { 470 ath11k_warn(ab, "failed to set up reo_status ring :%d\n", ret); 471 goto err; 472 } 473 474 /* When hash based routing of rx packet is enabled, 32 entries to map 475 * the hash values to the ring will be configured. 476 */ 477 ab->hw_params.hw_ops->reo_setup(ab); 478 479 return 0; 480 481 err: 482 ath11k_dp_srng_common_cleanup(ab); 483 484 return ret; 485 } 486 487 static void ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base *ab) 488 { 489 struct ath11k_dp *dp = &ab->dp; 490 struct hal_wbm_idle_scatter_list *slist = dp->scatter_list; 491 int i; 492 493 for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) { 494 if (!slist[i].vaddr) 495 continue; 496 497 dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX, 498 slist[i].vaddr, slist[i].paddr); 499 slist[i].vaddr = NULL; 500 } 501 } 502 503 static int ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base *ab, 504 int size, 505 u32 n_link_desc_bank, 506 u32 n_link_desc, 507 u32 last_bank_sz) 508 { 509 struct ath11k_dp *dp = &ab->dp; 510 struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks; 511 struct hal_wbm_idle_scatter_list *slist = dp->scatter_list; 512 u32 n_entries_per_buf; 513 int num_scatter_buf, scatter_idx; 514 struct hal_wbm_link_desc *scatter_buf; 515 int align_bytes, n_entries; 516 dma_addr_t paddr; 517 int rem_entries; 518 int i; 519 int ret = 0; 520 u32 end_offset; 521 522 n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE / 523 ath11k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK); 524 num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE); 525 526 if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX) 527 return -EINVAL; 528 529 for (i = 0; i < num_scatter_buf; i++) { 530 slist[i].vaddr = dma_alloc_coherent(ab->dev, 531 HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX, 532 &slist[i].paddr, GFP_KERNEL); 533 if (!slist[i].vaddr) { 534 ret = -ENOMEM; 535 goto err; 536 } 537 } 538 539 scatter_idx = 0; 540 scatter_buf = slist[scatter_idx].vaddr; 541 rem_entries = n_entries_per_buf; 542 543 for (i = 0; i < n_link_desc_bank; i++) { 544 align_bytes = link_desc_banks[i].vaddr - 545 link_desc_banks[i].vaddr_unaligned; 546 n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) / 547 HAL_LINK_DESC_SIZE; 548 paddr = link_desc_banks[i].paddr; 549 while (n_entries) { 550 ath11k_hal_set_link_desc_addr(scatter_buf, i, paddr); 551 n_entries--; 552 paddr += HAL_LINK_DESC_SIZE; 553 if (rem_entries) { 554 rem_entries--; 555 scatter_buf++; 556 continue; 557 } 558 559 rem_entries = n_entries_per_buf; 560 scatter_idx++; 561 scatter_buf = slist[scatter_idx].vaddr; 562 } 563 } 564 565 end_offset = (scatter_buf - slist[scatter_idx].vaddr) * 566 sizeof(struct hal_wbm_link_desc); 567 ath11k_hal_setup_link_idle_list(ab, slist, num_scatter_buf, 568 n_link_desc, end_offset); 569 570 return 0; 571 572 err: 573 ath11k_dp_scatter_idle_link_desc_cleanup(ab); 574 575 return ret; 576 } 577 578 static void 579 ath11k_dp_link_desc_bank_free(struct ath11k_base *ab, 580 struct dp_link_desc_bank *link_desc_banks) 581 { 582 int i; 583 584 for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) { 585 if (link_desc_banks[i].vaddr_unaligned) { 586 dma_free_coherent(ab->dev, 587 link_desc_banks[i].size, 588 link_desc_banks[i].vaddr_unaligned, 589 link_desc_banks[i].paddr_unaligned); 590 link_desc_banks[i].vaddr_unaligned = NULL; 591 } 592 } 593 } 594 595 static int ath11k_dp_link_desc_bank_alloc(struct ath11k_base *ab, 596 struct dp_link_desc_bank *desc_bank, 597 int n_link_desc_bank, 598 int last_bank_sz) 599 { 600 struct ath11k_dp *dp = &ab->dp; 601 int i; 602 int ret = 0; 603 int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH; 604 605 for (i = 0; i < n_link_desc_bank; i++) { 606 if (i == (n_link_desc_bank - 1) && last_bank_sz) 607 desc_sz = last_bank_sz; 608 609 desc_bank[i].vaddr_unaligned = 610 dma_alloc_coherent(ab->dev, desc_sz, 611 &desc_bank[i].paddr_unaligned, 612 GFP_KERNEL); 613 if (!desc_bank[i].vaddr_unaligned) { 614 ret = -ENOMEM; 615 goto err; 616 } 617 618 desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned, 619 HAL_LINK_DESC_ALIGN); 620 desc_bank[i].paddr = desc_bank[i].paddr_unaligned + 621 ((unsigned long)desc_bank[i].vaddr - 622 (unsigned long)desc_bank[i].vaddr_unaligned); 623 desc_bank[i].size = desc_sz; 624 } 625 626 return 0; 627 628 err: 629 ath11k_dp_link_desc_bank_free(ab, dp->link_desc_banks); 630 631 return ret; 632 } 633 634 void ath11k_dp_link_desc_cleanup(struct ath11k_base *ab, 635 struct dp_link_desc_bank *desc_bank, 636 u32 ring_type, struct dp_srng *ring) 637 { 638 ath11k_dp_link_desc_bank_free(ab, desc_bank); 639 640 if (ring_type != HAL_RXDMA_MONITOR_DESC) { 641 ath11k_dp_srng_cleanup(ab, ring); 642 ath11k_dp_scatter_idle_link_desc_cleanup(ab); 643 } 644 } 645 646 static int ath11k_wbm_idle_ring_setup(struct ath11k_base *ab, u32 *n_link_desc) 647 { 648 struct ath11k_dp *dp = &ab->dp; 649 u32 n_mpdu_link_desc, n_mpdu_queue_desc; 650 u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc; 651 int ret = 0; 652 653 n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) / 654 HAL_NUM_MPDUS_PER_LINK_DESC; 655 656 n_mpdu_queue_desc = n_mpdu_link_desc / 657 HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC; 658 659 n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID * 660 DP_AVG_MSDUS_PER_FLOW) / 661 HAL_NUM_TX_MSDUS_PER_LINK_DESC; 662 663 n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX * 664 DP_AVG_MSDUS_PER_MPDU) / 665 HAL_NUM_RX_MSDUS_PER_LINK_DESC; 666 667 *n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc + 668 n_tx_msdu_link_desc + n_rx_msdu_link_desc; 669 670 if (*n_link_desc & (*n_link_desc - 1)) 671 *n_link_desc = 1 << fls(*n_link_desc); 672 673 ret = ath11k_dp_srng_setup(ab, &dp->wbm_idle_ring, 674 HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc); 675 if (ret) { 676 ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret); 677 return ret; 678 } 679 return ret; 680 } 681 682 int ath11k_dp_link_desc_setup(struct ath11k_base *ab, 683 struct dp_link_desc_bank *link_desc_banks, 684 u32 ring_type, struct hal_srng *srng, 685 u32 n_link_desc) 686 { 687 u32 tot_mem_sz; 688 u32 n_link_desc_bank, last_bank_sz; 689 u32 entry_sz, align_bytes, n_entries; 690 u32 paddr; 691 u32 *desc; 692 int i, ret; 693 694 tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE; 695 tot_mem_sz += HAL_LINK_DESC_ALIGN; 696 697 if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) { 698 n_link_desc_bank = 1; 699 last_bank_sz = tot_mem_sz; 700 } else { 701 n_link_desc_bank = tot_mem_sz / 702 (DP_LINK_DESC_ALLOC_SIZE_THRESH - 703 HAL_LINK_DESC_ALIGN); 704 last_bank_sz = tot_mem_sz % 705 (DP_LINK_DESC_ALLOC_SIZE_THRESH - 706 HAL_LINK_DESC_ALIGN); 707 708 if (last_bank_sz) 709 n_link_desc_bank += 1; 710 } 711 712 if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX) 713 return -EINVAL; 714 715 ret = ath11k_dp_link_desc_bank_alloc(ab, link_desc_banks, 716 n_link_desc_bank, last_bank_sz); 717 if (ret) 718 return ret; 719 720 /* Setup link desc idle list for HW internal usage */ 721 entry_sz = ath11k_hal_srng_get_entrysize(ab, ring_type); 722 tot_mem_sz = entry_sz * n_link_desc; 723 724 /* Setup scatter desc list when the total memory requirement is more */ 725 if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH && 726 ring_type != HAL_RXDMA_MONITOR_DESC) { 727 ret = ath11k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz, 728 n_link_desc_bank, 729 n_link_desc, 730 last_bank_sz); 731 if (ret) { 732 ath11k_warn(ab, "failed to setup scatting idle list descriptor :%d\n", 733 ret); 734 goto fail_desc_bank_free; 735 } 736 737 return 0; 738 } 739 740 spin_lock_bh(&srng->lock); 741 742 ath11k_hal_srng_access_begin(ab, srng); 743 744 for (i = 0; i < n_link_desc_bank; i++) { 745 align_bytes = link_desc_banks[i].vaddr - 746 link_desc_banks[i].vaddr_unaligned; 747 n_entries = (link_desc_banks[i].size - align_bytes) / 748 HAL_LINK_DESC_SIZE; 749 paddr = link_desc_banks[i].paddr; 750 while (n_entries && 751 (desc = ath11k_hal_srng_src_get_next_entry(ab, srng))) { 752 ath11k_hal_set_link_desc_addr((struct hal_wbm_link_desc *)desc, 753 i, paddr); 754 n_entries--; 755 paddr += HAL_LINK_DESC_SIZE; 756 } 757 } 758 759 ath11k_hal_srng_access_end(ab, srng); 760 761 spin_unlock_bh(&srng->lock); 762 763 return 0; 764 765 fail_desc_bank_free: 766 ath11k_dp_link_desc_bank_free(ab, link_desc_banks); 767 768 return ret; 769 } 770 771 int ath11k_dp_service_srng(struct ath11k_base *ab, 772 struct ath11k_ext_irq_grp *irq_grp, 773 int budget) 774 { 775 struct napi_struct *napi = &irq_grp->napi; 776 const struct ath11k_hw_hal_params *hal_params; 777 int grp_id = irq_grp->grp_id; 778 int work_done = 0; 779 int i, j; 780 int tot_work_done = 0; 781 782 for (i = 0; i < ab->hw_params.max_tx_ring; i++) { 783 if (BIT(ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num) & 784 ab->hw_params.ring_mask->tx[grp_id]) 785 ath11k_dp_tx_completion_handler(ab, i); 786 } 787 788 if (ab->hw_params.ring_mask->rx_err[grp_id]) { 789 work_done = ath11k_dp_process_rx_err(ab, napi, budget); 790 budget -= work_done; 791 tot_work_done += work_done; 792 if (budget <= 0) 793 goto done; 794 } 795 796 if (ab->hw_params.ring_mask->rx_wbm_rel[grp_id]) { 797 work_done = ath11k_dp_rx_process_wbm_err(ab, 798 napi, 799 budget); 800 budget -= work_done; 801 tot_work_done += work_done; 802 803 if (budget <= 0) 804 goto done; 805 } 806 807 if (ab->hw_params.ring_mask->rx[grp_id]) { 808 i = fls(ab->hw_params.ring_mask->rx[grp_id]) - 1; 809 work_done = ath11k_dp_process_rx(ab, i, napi, 810 budget); 811 budget -= work_done; 812 tot_work_done += work_done; 813 if (budget <= 0) 814 goto done; 815 } 816 817 if (ab->hw_params.ring_mask->rx_mon_status[grp_id]) { 818 for (i = 0; i < ab->num_radios; i++) { 819 for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) { 820 int id = i * ab->hw_params.num_rxmda_per_pdev + j; 821 822 if (ab->hw_params.ring_mask->rx_mon_status[grp_id] & 823 BIT(id)) { 824 work_done = 825 ath11k_dp_rx_process_mon_rings(ab, 826 id, 827 napi, budget); 828 budget -= work_done; 829 tot_work_done += work_done; 830 831 if (budget <= 0) 832 goto done; 833 } 834 } 835 } 836 } 837 838 if (ab->hw_params.ring_mask->reo_status[grp_id]) 839 ath11k_dp_process_reo_status(ab); 840 841 for (i = 0; i < ab->num_radios; i++) { 842 for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) { 843 int id = i * ab->hw_params.num_rxmda_per_pdev + j; 844 845 if (ab->hw_params.ring_mask->rxdma2host[grp_id] & BIT(id)) { 846 work_done = ath11k_dp_process_rxdma_err(ab, id, budget); 847 budget -= work_done; 848 tot_work_done += work_done; 849 } 850 851 if (budget <= 0) 852 goto done; 853 854 if (ab->hw_params.ring_mask->host2rxdma[grp_id] & BIT(id)) { 855 struct ath11k *ar = ath11k_ab_to_ar(ab, id); 856 struct ath11k_pdev_dp *dp = &ar->dp; 857 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 858 859 hal_params = ab->hw_params.hal_params; 860 ath11k_dp_rxbufs_replenish(ab, id, rx_ring, 0, 861 hal_params->rx_buf_rbm); 862 } 863 } 864 } 865 /* TODO: Implement handler for other interrupts */ 866 867 done: 868 return tot_work_done; 869 } 870 EXPORT_SYMBOL(ath11k_dp_service_srng); 871 872 void ath11k_dp_pdev_free(struct ath11k_base *ab) 873 { 874 struct ath11k *ar; 875 int i; 876 877 del_timer_sync(&ab->mon_reap_timer); 878 879 for (i = 0; i < ab->num_radios; i++) { 880 ar = ab->pdevs[i].ar; 881 ath11k_dp_rx_pdev_free(ab, i); 882 ath11k_debugfs_unregister(ar); 883 ath11k_dp_rx_pdev_mon_detach(ar); 884 } 885 } 886 887 void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab) 888 { 889 struct ath11k *ar; 890 struct ath11k_pdev_dp *dp; 891 int i; 892 int j; 893 894 for (i = 0; i < ab->num_radios; i++) { 895 ar = ab->pdevs[i].ar; 896 dp = &ar->dp; 897 dp->mac_id = i; 898 idr_init(&dp->rx_refill_buf_ring.bufs_idr); 899 spin_lock_init(&dp->rx_refill_buf_ring.idr_lock); 900 atomic_set(&dp->num_tx_pending, 0); 901 init_waitqueue_head(&dp->tx_empty_waitq); 902 for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) { 903 idr_init(&dp->rx_mon_status_refill_ring[j].bufs_idr); 904 spin_lock_init(&dp->rx_mon_status_refill_ring[j].idr_lock); 905 } 906 idr_init(&dp->rxdma_mon_buf_ring.bufs_idr); 907 spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock); 908 } 909 } 910 911 int ath11k_dp_pdev_alloc(struct ath11k_base *ab) 912 { 913 struct ath11k *ar; 914 int ret; 915 int i; 916 917 /* TODO:Per-pdev rx ring unlike tx ring which is mapped to different AC's */ 918 for (i = 0; i < ab->num_radios; i++) { 919 ar = ab->pdevs[i].ar; 920 ret = ath11k_dp_rx_pdev_alloc(ab, i); 921 if (ret) { 922 ath11k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n", 923 i); 924 goto err; 925 } 926 ret = ath11k_dp_rx_pdev_mon_attach(ar); 927 if (ret) { 928 ath11k_warn(ab, "failed to initialize mon pdev %d\n", 929 i); 930 goto err; 931 } 932 } 933 934 return 0; 935 936 err: 937 ath11k_dp_pdev_free(ab); 938 939 return ret; 940 } 941 942 int ath11k_dp_htt_connect(struct ath11k_dp *dp) 943 { 944 struct ath11k_htc_svc_conn_req conn_req; 945 struct ath11k_htc_svc_conn_resp conn_resp; 946 int status; 947 948 memset(&conn_req, 0, sizeof(conn_req)); 949 memset(&conn_resp, 0, sizeof(conn_resp)); 950 951 conn_req.ep_ops.ep_tx_complete = ath11k_dp_htt_htc_tx_complete; 952 conn_req.ep_ops.ep_rx_complete = ath11k_dp_htt_htc_t2h_msg_handler; 953 954 /* connect to control service */ 955 conn_req.service_id = ATH11K_HTC_SVC_ID_HTT_DATA_MSG; 956 957 status = ath11k_htc_connect_service(&dp->ab->htc, &conn_req, 958 &conn_resp); 959 960 if (status) 961 return status; 962 963 dp->eid = conn_resp.eid; 964 965 return 0; 966 } 967 968 static void ath11k_dp_update_vdev_search(struct ath11k_vif *arvif) 969 { 970 /* When v2_map_support is true:for STA mode, enable address 971 * search index, tcl uses ast_hash value in the descriptor. 972 * When v2_map_support is false: for STA mode, don't enable 973 * address search index. 974 */ 975 switch (arvif->vdev_type) { 976 case WMI_VDEV_TYPE_STA: 977 if (arvif->ar->ab->hw_params.htt_peer_map_v2) { 978 arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN; 979 arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX; 980 } else { 981 arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN; 982 arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT; 983 } 984 break; 985 case WMI_VDEV_TYPE_AP: 986 case WMI_VDEV_TYPE_IBSS: 987 arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN; 988 arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT; 989 break; 990 case WMI_VDEV_TYPE_MONITOR: 991 default: 992 return; 993 } 994 } 995 996 void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif) 997 { 998 arvif->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 1) | 999 FIELD_PREP(HTT_TCL_META_DATA_VDEV_ID, 1000 arvif->vdev_id) | 1001 FIELD_PREP(HTT_TCL_META_DATA_PDEV_ID, 1002 ar->pdev->pdev_id); 1003 1004 /* set HTT extension valid bit to 0 by default */ 1005 arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT; 1006 1007 ath11k_dp_update_vdev_search(arvif); 1008 } 1009 1010 static int ath11k_dp_tx_pending_cleanup(int buf_id, void *skb, void *ctx) 1011 { 1012 struct ath11k_base *ab = (struct ath11k_base *)ctx; 1013 struct sk_buff *msdu = skb; 1014 1015 dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len, 1016 DMA_TO_DEVICE); 1017 1018 dev_kfree_skb_any(msdu); 1019 1020 return 0; 1021 } 1022 1023 void ath11k_dp_free(struct ath11k_base *ab) 1024 { 1025 struct ath11k_dp *dp = &ab->dp; 1026 int i; 1027 1028 ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks, 1029 HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring); 1030 1031 ath11k_dp_srng_common_cleanup(ab); 1032 1033 ath11k_dp_reo_cmd_list_cleanup(ab); 1034 1035 for (i = 0; i < ab->hw_params.max_tx_ring; i++) { 1036 spin_lock_bh(&dp->tx_ring[i].tx_idr_lock); 1037 idr_for_each(&dp->tx_ring[i].txbuf_idr, 1038 ath11k_dp_tx_pending_cleanup, ab); 1039 idr_destroy(&dp->tx_ring[i].txbuf_idr); 1040 spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock); 1041 kfree(dp->tx_ring[i].tx_status); 1042 } 1043 1044 /* Deinit any SOC level resource */ 1045 } 1046 1047 int ath11k_dp_alloc(struct ath11k_base *ab) 1048 { 1049 struct ath11k_dp *dp = &ab->dp; 1050 struct hal_srng *srng = NULL; 1051 size_t size = 0; 1052 u32 n_link_desc = 0; 1053 int ret; 1054 int i; 1055 1056 dp->ab = ab; 1057 1058 INIT_LIST_HEAD(&dp->reo_cmd_list); 1059 INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list); 1060 INIT_LIST_HEAD(&dp->dp_full_mon_mpdu_list); 1061 spin_lock_init(&dp->reo_cmd_lock); 1062 1063 dp->reo_cmd_cache_flush_count = 0; 1064 1065 ret = ath11k_wbm_idle_ring_setup(ab, &n_link_desc); 1066 if (ret) { 1067 ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret); 1068 return ret; 1069 } 1070 1071 srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id]; 1072 1073 ret = ath11k_dp_link_desc_setup(ab, dp->link_desc_banks, 1074 HAL_WBM_IDLE_LINK, srng, n_link_desc); 1075 if (ret) { 1076 ath11k_warn(ab, "failed to setup link desc: %d\n", ret); 1077 return ret; 1078 } 1079 1080 ret = ath11k_dp_srng_common_setup(ab); 1081 if (ret) 1082 goto fail_link_desc_cleanup; 1083 1084 size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE; 1085 1086 for (i = 0; i < ab->hw_params.max_tx_ring; i++) { 1087 idr_init(&dp->tx_ring[i].txbuf_idr); 1088 spin_lock_init(&dp->tx_ring[i].tx_idr_lock); 1089 dp->tx_ring[i].tcl_data_ring_id = i; 1090 1091 dp->tx_ring[i].tx_status_head = 0; 1092 dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1; 1093 dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL); 1094 if (!dp->tx_ring[i].tx_status) { 1095 ret = -ENOMEM; 1096 goto fail_cmn_srng_cleanup; 1097 } 1098 } 1099 1100 for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++) 1101 ath11k_hal_tx_set_dscp_tid_map(ab, i); 1102 1103 /* Init any SOC level resource for DP */ 1104 1105 return 0; 1106 1107 fail_cmn_srng_cleanup: 1108 ath11k_dp_srng_common_cleanup(ab); 1109 1110 fail_link_desc_cleanup: 1111 ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks, 1112 HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring); 1113 1114 return ret; 1115 } 1116 1117 static void ath11k_dp_shadow_timer_handler(struct timer_list *t) 1118 { 1119 struct ath11k_hp_update_timer *update_timer = from_timer(update_timer, 1120 t, timer); 1121 struct ath11k_base *ab = update_timer->ab; 1122 struct hal_srng *srng = &ab->hal.srng_list[update_timer->ring_id]; 1123 1124 spin_lock_bh(&srng->lock); 1125 1126 /* when the timer is fired, the handler checks whether there 1127 * are new TX happened. The handler updates HP only when there 1128 * are no TX operations during the timeout interval, and stop 1129 * the timer. Timer will be started again when TX happens again. 1130 */ 1131 if (update_timer->timer_tx_num != update_timer->tx_num) { 1132 update_timer->timer_tx_num = update_timer->tx_num; 1133 mod_timer(&update_timer->timer, jiffies + 1134 msecs_to_jiffies(update_timer->interval)); 1135 } else { 1136 update_timer->started = false; 1137 ath11k_hal_srng_shadow_update_hp_tp(ab, srng); 1138 } 1139 1140 spin_unlock_bh(&srng->lock); 1141 } 1142 1143 void ath11k_dp_shadow_start_timer(struct ath11k_base *ab, 1144 struct hal_srng *srng, 1145 struct ath11k_hp_update_timer *update_timer) 1146 { 1147 lockdep_assert_held(&srng->lock); 1148 1149 if (!ab->hw_params.supports_shadow_regs) 1150 return; 1151 1152 update_timer->tx_num++; 1153 1154 if (update_timer->started) 1155 return; 1156 1157 update_timer->started = true; 1158 update_timer->timer_tx_num = update_timer->tx_num; 1159 mod_timer(&update_timer->timer, jiffies + 1160 msecs_to_jiffies(update_timer->interval)); 1161 } 1162 1163 void ath11k_dp_shadow_stop_timer(struct ath11k_base *ab, 1164 struct ath11k_hp_update_timer *update_timer) 1165 { 1166 if (!ab->hw_params.supports_shadow_regs) 1167 return; 1168 1169 if (!update_timer->init) 1170 return; 1171 1172 del_timer_sync(&update_timer->timer); 1173 } 1174 1175 void ath11k_dp_shadow_init_timer(struct ath11k_base *ab, 1176 struct ath11k_hp_update_timer *update_timer, 1177 u32 interval, u32 ring_id) 1178 { 1179 if (!ab->hw_params.supports_shadow_regs) 1180 return; 1181 1182 update_timer->tx_num = 0; 1183 update_timer->timer_tx_num = 0; 1184 update_timer->ab = ab; 1185 update_timer->ring_id = ring_id; 1186 update_timer->interval = interval; 1187 update_timer->init = true; 1188 timer_setup(&update_timer->timer, 1189 ath11k_dp_shadow_timer_handler, 0); 1190 } 1191