1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. 5 */ 6 7 #include <crypto/hash.h> 8 #include "core.h" 9 #include "dp_tx.h" 10 #include "hal_tx.h" 11 #include "hif.h" 12 #include "debug.h" 13 #include "dp_rx.h" 14 #include "peer.h" 15 16 static void ath11k_dp_htt_htc_tx_complete(struct ath11k_base *ab, 17 struct sk_buff *skb) 18 { 19 dev_kfree_skb_any(skb); 20 } 21 22 void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr) 23 { 24 struct ath11k_base *ab = ar->ab; 25 struct ath11k_peer *peer; 26 27 /* TODO: Any other peer specific DP cleanup */ 28 29 spin_lock_bh(&ab->base_lock); 30 peer = ath11k_peer_find(ab, vdev_id, addr); 31 if (!peer) { 32 ath11k_warn(ab, "failed to lookup peer %pM on vdev %d\n", 33 addr, vdev_id); 34 spin_unlock_bh(&ab->base_lock); 35 return; 36 } 37 38 ath11k_peer_rx_tid_cleanup(ar, peer); 39 crypto_free_shash(peer->tfm_mmic); 40 spin_unlock_bh(&ab->base_lock); 41 } 42 43 int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr) 44 { 45 struct ath11k_base *ab = ar->ab; 46 struct ath11k_peer *peer; 47 u32 reo_dest; 48 int ret = 0, tid; 49 50 /* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */ 51 reo_dest = ar->dp.mac_id + 1; 52 ret = ath11k_wmi_set_peer_param(ar, addr, vdev_id, 53 WMI_PEER_SET_DEFAULT_ROUTING, 54 DP_RX_HASH_ENABLE | (reo_dest << 1)); 55 56 if (ret) { 57 ath11k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n", 58 ret, addr, vdev_id); 59 return ret; 60 } 61 62 for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) { 63 ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id, tid, 1, 0, 64 HAL_PN_TYPE_NONE); 65 if (ret) { 66 ath11k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n", 67 tid, ret); 68 goto peer_clean; 69 } 70 } 71 72 ret = ath11k_peer_rx_frag_setup(ar, addr, vdev_id); 73 if (ret) { 74 ath11k_warn(ab, "failed to setup rx defrag context\n"); 75 return ret; 76 } 77 78 /* TODO: Setup other peer specific resource used in data path */ 79 80 return 0; 81 82 peer_clean: 83 spin_lock_bh(&ab->base_lock); 84 85 peer = ath11k_peer_find(ab, vdev_id, addr); 86 if (!peer) { 87 ath11k_warn(ab, "failed to find the peer to del rx tid\n"); 88 spin_unlock_bh(&ab->base_lock); 89 return -ENOENT; 90 } 91 92 for (; tid >= 0; tid--) 93 ath11k_peer_rx_tid_delete(ar, peer, tid); 94 95 spin_unlock_bh(&ab->base_lock); 96 97 return ret; 98 } 99 100 void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring) 101 { 102 if (!ring->vaddr_unaligned) 103 return; 104 105 if (ring->cached) 106 kfree(ring->vaddr_unaligned); 107 else 108 dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned, 109 ring->paddr_unaligned); 110 111 ring->vaddr_unaligned = NULL; 112 } 113 114 static int ath11k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask) 115 { 116 int ext_group_num; 117 u8 mask = 1 << ring_num; 118 119 for (ext_group_num = 0; ext_group_num < ATH11K_EXT_IRQ_GRP_NUM_MAX; 120 ext_group_num++) { 121 if (mask & grp_mask[ext_group_num]) 122 return ext_group_num; 123 } 124 125 return -ENOENT; 126 } 127 128 static int ath11k_dp_srng_calculate_msi_group(struct ath11k_base *ab, 129 enum hal_ring_type type, int ring_num) 130 { 131 const u8 *grp_mask; 132 133 switch (type) { 134 case HAL_WBM2SW_RELEASE: 135 if (ring_num == DP_RX_RELEASE_RING_NUM) { 136 grp_mask = &ab->hw_params.ring_mask->rx_wbm_rel[0]; 137 ring_num = 0; 138 } else { 139 grp_mask = &ab->hw_params.ring_mask->tx[0]; 140 } 141 break; 142 case HAL_REO_EXCEPTION: 143 grp_mask = &ab->hw_params.ring_mask->rx_err[0]; 144 break; 145 case HAL_REO_DST: 146 grp_mask = &ab->hw_params.ring_mask->rx[0]; 147 break; 148 case HAL_REO_STATUS: 149 grp_mask = &ab->hw_params.ring_mask->reo_status[0]; 150 break; 151 case HAL_RXDMA_MONITOR_STATUS: 152 case HAL_RXDMA_MONITOR_DST: 153 grp_mask = &ab->hw_params.ring_mask->rx_mon_status[0]; 154 break; 155 case HAL_RXDMA_DST: 156 grp_mask = &ab->hw_params.ring_mask->rxdma2host[0]; 157 break; 158 case HAL_RXDMA_BUF: 159 grp_mask = &ab->hw_params.ring_mask->host2rxdma[0]; 160 break; 161 case HAL_RXDMA_MONITOR_BUF: 162 case HAL_TCL_DATA: 163 case HAL_TCL_CMD: 164 case HAL_REO_CMD: 165 case HAL_SW2WBM_RELEASE: 166 case HAL_WBM_IDLE_LINK: 167 case HAL_TCL_STATUS: 168 case HAL_REO_REINJECT: 169 case HAL_CE_SRC: 170 case HAL_CE_DST: 171 case HAL_CE_DST_STATUS: 172 default: 173 return -ENOENT; 174 } 175 176 return ath11k_dp_srng_find_ring_in_mask(ring_num, grp_mask); 177 } 178 179 static void ath11k_dp_srng_msi_setup(struct ath11k_base *ab, 180 struct hal_srng_params *ring_params, 181 enum hal_ring_type type, int ring_num) 182 { 183 int msi_group_number, msi_data_count; 184 u32 msi_data_start, msi_irq_start, addr_lo, addr_hi; 185 int ret; 186 187 ret = ath11k_get_user_msi_vector(ab, "DP", 188 &msi_data_count, &msi_data_start, 189 &msi_irq_start); 190 if (ret) 191 return; 192 193 msi_group_number = ath11k_dp_srng_calculate_msi_group(ab, type, 194 ring_num); 195 if (msi_group_number < 0) { 196 ath11k_dbg(ab, ATH11K_DBG_PCI, 197 "ring not part of an ext_group; ring_type: %d,ring_num %d", 198 type, ring_num); 199 ring_params->msi_addr = 0; 200 ring_params->msi_data = 0; 201 return; 202 } 203 204 if (msi_group_number > msi_data_count) { 205 ath11k_dbg(ab, ATH11K_DBG_PCI, 206 "multiple msi_groups share one msi, msi_group_num %d", 207 msi_group_number); 208 } 209 210 ath11k_get_msi_address(ab, &addr_lo, &addr_hi); 211 212 ring_params->msi_addr = addr_lo; 213 ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32); 214 ring_params->msi_data = (msi_group_number % msi_data_count) 215 + msi_data_start; 216 ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR; 217 } 218 219 int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring, 220 enum hal_ring_type type, int ring_num, 221 int mac_id, int num_entries) 222 { 223 struct hal_srng_params params = { 0 }; 224 int entry_sz = ath11k_hal_srng_get_entrysize(ab, type); 225 int max_entries = ath11k_hal_srng_get_max_entries(ab, type); 226 int ret; 227 bool cached = false; 228 229 if (max_entries < 0 || entry_sz < 0) 230 return -EINVAL; 231 232 if (num_entries > max_entries) 233 num_entries = max_entries; 234 235 ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1; 236 237 if (ab->hw_params.alloc_cacheable_memory) { 238 /* Allocate the reo dst and tx completion rings from cacheable memory */ 239 switch (type) { 240 case HAL_REO_DST: 241 case HAL_WBM2SW_RELEASE: 242 cached = true; 243 break; 244 default: 245 cached = false; 246 } 247 248 if (cached) { 249 ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL); 250 ring->paddr_unaligned = virt_to_phys(ring->vaddr_unaligned); 251 } 252 } 253 254 if (!cached) 255 ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size, 256 &ring->paddr_unaligned, 257 GFP_KERNEL); 258 259 if (!ring->vaddr_unaligned) 260 return -ENOMEM; 261 262 ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN); 263 ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr - 264 (unsigned long)ring->vaddr_unaligned); 265 266 params.ring_base_vaddr = ring->vaddr; 267 params.ring_base_paddr = ring->paddr; 268 params.num_entries = num_entries; 269 ath11k_dp_srng_msi_setup(ab, ¶ms, type, ring_num + mac_id); 270 271 switch (type) { 272 case HAL_REO_DST: 273 params.intr_batch_cntr_thres_entries = 274 HAL_SRNG_INT_BATCH_THRESHOLD_RX; 275 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX; 276 break; 277 case HAL_RXDMA_BUF: 278 case HAL_RXDMA_MONITOR_BUF: 279 case HAL_RXDMA_MONITOR_STATUS: 280 params.low_threshold = num_entries >> 3; 281 params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN; 282 params.intr_batch_cntr_thres_entries = 0; 283 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX; 284 break; 285 case HAL_WBM2SW_RELEASE: 286 if (ring_num < 3) { 287 params.intr_batch_cntr_thres_entries = 288 HAL_SRNG_INT_BATCH_THRESHOLD_TX; 289 params.intr_timer_thres_us = 290 HAL_SRNG_INT_TIMER_THRESHOLD_TX; 291 break; 292 } 293 /* follow through when ring_num >= 3 */ 294 fallthrough; 295 case HAL_REO_EXCEPTION: 296 case HAL_REO_REINJECT: 297 case HAL_REO_CMD: 298 case HAL_REO_STATUS: 299 case HAL_TCL_DATA: 300 case HAL_TCL_CMD: 301 case HAL_TCL_STATUS: 302 case HAL_WBM_IDLE_LINK: 303 case HAL_SW2WBM_RELEASE: 304 case HAL_RXDMA_DST: 305 case HAL_RXDMA_MONITOR_DST: 306 case HAL_RXDMA_MONITOR_DESC: 307 params.intr_batch_cntr_thres_entries = 308 HAL_SRNG_INT_BATCH_THRESHOLD_OTHER; 309 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER; 310 break; 311 case HAL_RXDMA_DIR_BUF: 312 break; 313 default: 314 ath11k_warn(ab, "Not a valid ring type in dp :%d\n", type); 315 return -EINVAL; 316 } 317 318 if (cached) { 319 params.flags |= HAL_SRNG_FLAGS_CACHED; 320 ring->cached = 1; 321 } 322 323 ret = ath11k_hal_srng_setup(ab, type, ring_num, mac_id, ¶ms); 324 if (ret < 0) { 325 ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n", 326 ret, ring_num); 327 return ret; 328 } 329 330 ring->ring_id = ret; 331 332 return 0; 333 } 334 335 void ath11k_dp_stop_shadow_timers(struct ath11k_base *ab) 336 { 337 int i; 338 339 if (!ab->hw_params.supports_shadow_regs) 340 return; 341 342 for (i = 0; i < ab->hw_params.max_tx_ring; i++) 343 ath11k_dp_shadow_stop_timer(ab, &ab->dp.tx_ring_timer[i]); 344 345 ath11k_dp_shadow_stop_timer(ab, &ab->dp.reo_cmd_timer); 346 } 347 348 static void ath11k_dp_srng_common_cleanup(struct ath11k_base *ab) 349 { 350 struct ath11k_dp *dp = &ab->dp; 351 int i; 352 353 ath11k_dp_stop_shadow_timers(ab); 354 ath11k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring); 355 ath11k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring); 356 ath11k_dp_srng_cleanup(ab, &dp->tcl_status_ring); 357 for (i = 0; i < ab->hw_params.max_tx_ring; i++) { 358 ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring); 359 ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring); 360 } 361 ath11k_dp_srng_cleanup(ab, &dp->reo_reinject_ring); 362 ath11k_dp_srng_cleanup(ab, &dp->rx_rel_ring); 363 ath11k_dp_srng_cleanup(ab, &dp->reo_except_ring); 364 ath11k_dp_srng_cleanup(ab, &dp->reo_cmd_ring); 365 ath11k_dp_srng_cleanup(ab, &dp->reo_status_ring); 366 } 367 368 static int ath11k_dp_srng_common_setup(struct ath11k_base *ab) 369 { 370 struct ath11k_dp *dp = &ab->dp; 371 struct hal_srng *srng; 372 int i, ret; 373 u8 tcl_num, wbm_num; 374 375 ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring, 376 HAL_SW2WBM_RELEASE, 0, 0, 377 DP_WBM_RELEASE_RING_SIZE); 378 if (ret) { 379 ath11k_warn(ab, "failed to set up wbm2sw_release ring :%d\n", 380 ret); 381 goto err; 382 } 383 384 ret = ath11k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0, 385 DP_TCL_CMD_RING_SIZE); 386 if (ret) { 387 ath11k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret); 388 goto err; 389 } 390 391 ret = ath11k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS, 392 0, 0, DP_TCL_STATUS_RING_SIZE); 393 if (ret) { 394 ath11k_warn(ab, "failed to set up tcl_status ring :%d\n", ret); 395 goto err; 396 } 397 398 for (i = 0; i < ab->hw_params.max_tx_ring; i++) { 399 tcl_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].tcl_ring_num; 400 wbm_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num; 401 402 ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring, 403 HAL_TCL_DATA, tcl_num, 0, 404 ab->hw_params.tx_ring_size); 405 if (ret) { 406 ath11k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n", 407 i, ret); 408 goto err; 409 } 410 411 ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring, 412 HAL_WBM2SW_RELEASE, wbm_num, 0, 413 DP_TX_COMP_RING_SIZE); 414 if (ret) { 415 ath11k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n", 416 i, ret); 417 goto err; 418 } 419 420 srng = &ab->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id]; 421 ath11k_hal_tx_init_data_ring(ab, srng); 422 423 ath11k_dp_shadow_init_timer(ab, &dp->tx_ring_timer[i], 424 ATH11K_SHADOW_DP_TIMER_INTERVAL, 425 dp->tx_ring[i].tcl_data_ring.ring_id); 426 } 427 428 ret = ath11k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT, 429 0, 0, DP_REO_REINJECT_RING_SIZE); 430 if (ret) { 431 ath11k_warn(ab, "failed to set up reo_reinject ring :%d\n", 432 ret); 433 goto err; 434 } 435 436 ret = ath11k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE, 437 DP_RX_RELEASE_RING_NUM, 0, DP_RX_RELEASE_RING_SIZE); 438 if (ret) { 439 ath11k_warn(ab, "failed to set up rx_rel ring :%d\n", ret); 440 goto err; 441 } 442 443 ret = ath11k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION, 444 0, 0, DP_REO_EXCEPTION_RING_SIZE); 445 if (ret) { 446 ath11k_warn(ab, "failed to set up reo_exception ring :%d\n", 447 ret); 448 goto err; 449 } 450 451 ret = ath11k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD, 452 0, 0, DP_REO_CMD_RING_SIZE); 453 if (ret) { 454 ath11k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret); 455 goto err; 456 } 457 458 srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id]; 459 ath11k_hal_reo_init_cmd_ring(ab, srng); 460 461 ath11k_dp_shadow_init_timer(ab, &dp->reo_cmd_timer, 462 ATH11K_SHADOW_CTRL_TIMER_INTERVAL, 463 dp->reo_cmd_ring.ring_id); 464 465 ret = ath11k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS, 466 0, 0, DP_REO_STATUS_RING_SIZE); 467 if (ret) { 468 ath11k_warn(ab, "failed to set up reo_status ring :%d\n", ret); 469 goto err; 470 } 471 472 /* When hash based routing of rx packet is enabled, 32 entries to map 473 * the hash values to the ring will be configured. 474 */ 475 ab->hw_params.hw_ops->reo_setup(ab); 476 477 return 0; 478 479 err: 480 ath11k_dp_srng_common_cleanup(ab); 481 482 return ret; 483 } 484 485 static void ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base *ab) 486 { 487 struct ath11k_dp *dp = &ab->dp; 488 struct hal_wbm_idle_scatter_list *slist = dp->scatter_list; 489 int i; 490 491 for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) { 492 if (!slist[i].vaddr) 493 continue; 494 495 dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX, 496 slist[i].vaddr, slist[i].paddr); 497 slist[i].vaddr = NULL; 498 } 499 } 500 501 static int ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base *ab, 502 int size, 503 u32 n_link_desc_bank, 504 u32 n_link_desc, 505 u32 last_bank_sz) 506 { 507 struct ath11k_dp *dp = &ab->dp; 508 struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks; 509 struct hal_wbm_idle_scatter_list *slist = dp->scatter_list; 510 u32 n_entries_per_buf; 511 int num_scatter_buf, scatter_idx; 512 struct hal_wbm_link_desc *scatter_buf; 513 int align_bytes, n_entries; 514 dma_addr_t paddr; 515 int rem_entries; 516 int i; 517 int ret = 0; 518 u32 end_offset; 519 520 n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE / 521 ath11k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK); 522 num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE); 523 524 if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX) 525 return -EINVAL; 526 527 for (i = 0; i < num_scatter_buf; i++) { 528 slist[i].vaddr = dma_alloc_coherent(ab->dev, 529 HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX, 530 &slist[i].paddr, GFP_KERNEL); 531 if (!slist[i].vaddr) { 532 ret = -ENOMEM; 533 goto err; 534 } 535 } 536 537 scatter_idx = 0; 538 scatter_buf = slist[scatter_idx].vaddr; 539 rem_entries = n_entries_per_buf; 540 541 for (i = 0; i < n_link_desc_bank; i++) { 542 align_bytes = link_desc_banks[i].vaddr - 543 link_desc_banks[i].vaddr_unaligned; 544 n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) / 545 HAL_LINK_DESC_SIZE; 546 paddr = link_desc_banks[i].paddr; 547 while (n_entries) { 548 ath11k_hal_set_link_desc_addr(scatter_buf, i, paddr); 549 n_entries--; 550 paddr += HAL_LINK_DESC_SIZE; 551 if (rem_entries) { 552 rem_entries--; 553 scatter_buf++; 554 continue; 555 } 556 557 rem_entries = n_entries_per_buf; 558 scatter_idx++; 559 scatter_buf = slist[scatter_idx].vaddr; 560 } 561 } 562 563 end_offset = (scatter_buf - slist[scatter_idx].vaddr) * 564 sizeof(struct hal_wbm_link_desc); 565 ath11k_hal_setup_link_idle_list(ab, slist, num_scatter_buf, 566 n_link_desc, end_offset); 567 568 return 0; 569 570 err: 571 ath11k_dp_scatter_idle_link_desc_cleanup(ab); 572 573 return ret; 574 } 575 576 static void 577 ath11k_dp_link_desc_bank_free(struct ath11k_base *ab, 578 struct dp_link_desc_bank *link_desc_banks) 579 { 580 int i; 581 582 for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) { 583 if (link_desc_banks[i].vaddr_unaligned) { 584 dma_free_coherent(ab->dev, 585 link_desc_banks[i].size, 586 link_desc_banks[i].vaddr_unaligned, 587 link_desc_banks[i].paddr_unaligned); 588 link_desc_banks[i].vaddr_unaligned = NULL; 589 } 590 } 591 } 592 593 static int ath11k_dp_link_desc_bank_alloc(struct ath11k_base *ab, 594 struct dp_link_desc_bank *desc_bank, 595 int n_link_desc_bank, 596 int last_bank_sz) 597 { 598 struct ath11k_dp *dp = &ab->dp; 599 int i; 600 int ret = 0; 601 int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH; 602 603 for (i = 0; i < n_link_desc_bank; i++) { 604 if (i == (n_link_desc_bank - 1) && last_bank_sz) 605 desc_sz = last_bank_sz; 606 607 desc_bank[i].vaddr_unaligned = 608 dma_alloc_coherent(ab->dev, desc_sz, 609 &desc_bank[i].paddr_unaligned, 610 GFP_KERNEL); 611 if (!desc_bank[i].vaddr_unaligned) { 612 ret = -ENOMEM; 613 goto err; 614 } 615 616 desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned, 617 HAL_LINK_DESC_ALIGN); 618 desc_bank[i].paddr = desc_bank[i].paddr_unaligned + 619 ((unsigned long)desc_bank[i].vaddr - 620 (unsigned long)desc_bank[i].vaddr_unaligned); 621 desc_bank[i].size = desc_sz; 622 } 623 624 return 0; 625 626 err: 627 ath11k_dp_link_desc_bank_free(ab, dp->link_desc_banks); 628 629 return ret; 630 } 631 632 void ath11k_dp_link_desc_cleanup(struct ath11k_base *ab, 633 struct dp_link_desc_bank *desc_bank, 634 u32 ring_type, struct dp_srng *ring) 635 { 636 ath11k_dp_link_desc_bank_free(ab, desc_bank); 637 638 if (ring_type != HAL_RXDMA_MONITOR_DESC) { 639 ath11k_dp_srng_cleanup(ab, ring); 640 ath11k_dp_scatter_idle_link_desc_cleanup(ab); 641 } 642 } 643 644 static int ath11k_wbm_idle_ring_setup(struct ath11k_base *ab, u32 *n_link_desc) 645 { 646 struct ath11k_dp *dp = &ab->dp; 647 u32 n_mpdu_link_desc, n_mpdu_queue_desc; 648 u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc; 649 int ret = 0; 650 651 n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) / 652 HAL_NUM_MPDUS_PER_LINK_DESC; 653 654 n_mpdu_queue_desc = n_mpdu_link_desc / 655 HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC; 656 657 n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID * 658 DP_AVG_MSDUS_PER_FLOW) / 659 HAL_NUM_TX_MSDUS_PER_LINK_DESC; 660 661 n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX * 662 DP_AVG_MSDUS_PER_MPDU) / 663 HAL_NUM_RX_MSDUS_PER_LINK_DESC; 664 665 *n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc + 666 n_tx_msdu_link_desc + n_rx_msdu_link_desc; 667 668 if (*n_link_desc & (*n_link_desc - 1)) 669 *n_link_desc = 1 << fls(*n_link_desc); 670 671 ret = ath11k_dp_srng_setup(ab, &dp->wbm_idle_ring, 672 HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc); 673 if (ret) { 674 ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret); 675 return ret; 676 } 677 return ret; 678 } 679 680 int ath11k_dp_link_desc_setup(struct ath11k_base *ab, 681 struct dp_link_desc_bank *link_desc_banks, 682 u32 ring_type, struct hal_srng *srng, 683 u32 n_link_desc) 684 { 685 u32 tot_mem_sz; 686 u32 n_link_desc_bank, last_bank_sz; 687 u32 entry_sz, align_bytes, n_entries; 688 u32 paddr; 689 u32 *desc; 690 int i, ret; 691 692 tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE; 693 tot_mem_sz += HAL_LINK_DESC_ALIGN; 694 695 if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) { 696 n_link_desc_bank = 1; 697 last_bank_sz = tot_mem_sz; 698 } else { 699 n_link_desc_bank = tot_mem_sz / 700 (DP_LINK_DESC_ALLOC_SIZE_THRESH - 701 HAL_LINK_DESC_ALIGN); 702 last_bank_sz = tot_mem_sz % 703 (DP_LINK_DESC_ALLOC_SIZE_THRESH - 704 HAL_LINK_DESC_ALIGN); 705 706 if (last_bank_sz) 707 n_link_desc_bank += 1; 708 } 709 710 if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX) 711 return -EINVAL; 712 713 ret = ath11k_dp_link_desc_bank_alloc(ab, link_desc_banks, 714 n_link_desc_bank, last_bank_sz); 715 if (ret) 716 return ret; 717 718 /* Setup link desc idle list for HW internal usage */ 719 entry_sz = ath11k_hal_srng_get_entrysize(ab, ring_type); 720 tot_mem_sz = entry_sz * n_link_desc; 721 722 /* Setup scatter desc list when the total memory requirement is more */ 723 if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH && 724 ring_type != HAL_RXDMA_MONITOR_DESC) { 725 ret = ath11k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz, 726 n_link_desc_bank, 727 n_link_desc, 728 last_bank_sz); 729 if (ret) { 730 ath11k_warn(ab, "failed to setup scatting idle list descriptor :%d\n", 731 ret); 732 goto fail_desc_bank_free; 733 } 734 735 return 0; 736 } 737 738 spin_lock_bh(&srng->lock); 739 740 ath11k_hal_srng_access_begin(ab, srng); 741 742 for (i = 0; i < n_link_desc_bank; i++) { 743 align_bytes = link_desc_banks[i].vaddr - 744 link_desc_banks[i].vaddr_unaligned; 745 n_entries = (link_desc_banks[i].size - align_bytes) / 746 HAL_LINK_DESC_SIZE; 747 paddr = link_desc_banks[i].paddr; 748 while (n_entries && 749 (desc = ath11k_hal_srng_src_get_next_entry(ab, srng))) { 750 ath11k_hal_set_link_desc_addr((struct hal_wbm_link_desc *)desc, 751 i, paddr); 752 n_entries--; 753 paddr += HAL_LINK_DESC_SIZE; 754 } 755 } 756 757 ath11k_hal_srng_access_end(ab, srng); 758 759 spin_unlock_bh(&srng->lock); 760 761 return 0; 762 763 fail_desc_bank_free: 764 ath11k_dp_link_desc_bank_free(ab, link_desc_banks); 765 766 return ret; 767 } 768 769 int ath11k_dp_service_srng(struct ath11k_base *ab, 770 struct ath11k_ext_irq_grp *irq_grp, 771 int budget) 772 { 773 struct napi_struct *napi = &irq_grp->napi; 774 const struct ath11k_hw_hal_params *hal_params; 775 int grp_id = irq_grp->grp_id; 776 int work_done = 0; 777 int i, j; 778 int tot_work_done = 0; 779 780 for (i = 0; i < ab->hw_params.max_tx_ring; i++) { 781 if (BIT(ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num) & 782 ab->hw_params.ring_mask->tx[grp_id]) 783 ath11k_dp_tx_completion_handler(ab, i); 784 } 785 786 if (ab->hw_params.ring_mask->rx_err[grp_id]) { 787 work_done = ath11k_dp_process_rx_err(ab, napi, budget); 788 budget -= work_done; 789 tot_work_done += work_done; 790 if (budget <= 0) 791 goto done; 792 } 793 794 if (ab->hw_params.ring_mask->rx_wbm_rel[grp_id]) { 795 work_done = ath11k_dp_rx_process_wbm_err(ab, 796 napi, 797 budget); 798 budget -= work_done; 799 tot_work_done += work_done; 800 801 if (budget <= 0) 802 goto done; 803 } 804 805 if (ab->hw_params.ring_mask->rx[grp_id]) { 806 i = fls(ab->hw_params.ring_mask->rx[grp_id]) - 1; 807 work_done = ath11k_dp_process_rx(ab, i, napi, 808 budget); 809 budget -= work_done; 810 tot_work_done += work_done; 811 if (budget <= 0) 812 goto done; 813 } 814 815 if (ab->hw_params.ring_mask->rx_mon_status[grp_id]) { 816 for (i = 0; i < ab->num_radios; i++) { 817 for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) { 818 int id = i * ab->hw_params.num_rxmda_per_pdev + j; 819 820 if (ab->hw_params.ring_mask->rx_mon_status[grp_id] & 821 BIT(id)) { 822 work_done = 823 ath11k_dp_rx_process_mon_rings(ab, 824 id, 825 napi, budget); 826 budget -= work_done; 827 tot_work_done += work_done; 828 829 if (budget <= 0) 830 goto done; 831 } 832 } 833 } 834 } 835 836 if (ab->hw_params.ring_mask->reo_status[grp_id]) 837 ath11k_dp_process_reo_status(ab); 838 839 for (i = 0; i < ab->num_radios; i++) { 840 for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) { 841 int id = i * ab->hw_params.num_rxmda_per_pdev + j; 842 843 if (ab->hw_params.ring_mask->rxdma2host[grp_id] & BIT(id)) { 844 work_done = ath11k_dp_process_rxdma_err(ab, id, budget); 845 budget -= work_done; 846 tot_work_done += work_done; 847 } 848 849 if (budget <= 0) 850 goto done; 851 852 if (ab->hw_params.ring_mask->host2rxdma[grp_id] & BIT(id)) { 853 struct ath11k *ar = ath11k_ab_to_ar(ab, id); 854 struct ath11k_pdev_dp *dp = &ar->dp; 855 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 856 857 hal_params = ab->hw_params.hal_params; 858 ath11k_dp_rxbufs_replenish(ab, id, rx_ring, 0, 859 hal_params->rx_buf_rbm); 860 } 861 } 862 } 863 /* TODO: Implement handler for other interrupts */ 864 865 done: 866 return tot_work_done; 867 } 868 EXPORT_SYMBOL(ath11k_dp_service_srng); 869 870 void ath11k_dp_pdev_free(struct ath11k_base *ab) 871 { 872 struct ath11k *ar; 873 int i; 874 875 del_timer_sync(&ab->mon_reap_timer); 876 877 for (i = 0; i < ab->num_radios; i++) { 878 ar = ab->pdevs[i].ar; 879 ath11k_dp_rx_pdev_free(ab, i); 880 ath11k_debugfs_unregister(ar); 881 ath11k_dp_rx_pdev_mon_detach(ar); 882 } 883 } 884 885 void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab) 886 { 887 struct ath11k *ar; 888 struct ath11k_pdev_dp *dp; 889 int i; 890 int j; 891 892 for (i = 0; i < ab->num_radios; i++) { 893 ar = ab->pdevs[i].ar; 894 dp = &ar->dp; 895 dp->mac_id = i; 896 idr_init(&dp->rx_refill_buf_ring.bufs_idr); 897 spin_lock_init(&dp->rx_refill_buf_ring.idr_lock); 898 atomic_set(&dp->num_tx_pending, 0); 899 init_waitqueue_head(&dp->tx_empty_waitq); 900 for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) { 901 idr_init(&dp->rx_mon_status_refill_ring[j].bufs_idr); 902 spin_lock_init(&dp->rx_mon_status_refill_ring[j].idr_lock); 903 } 904 idr_init(&dp->rxdma_mon_buf_ring.bufs_idr); 905 spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock); 906 } 907 } 908 909 int ath11k_dp_pdev_alloc(struct ath11k_base *ab) 910 { 911 struct ath11k *ar; 912 int ret; 913 int i; 914 915 /* TODO:Per-pdev rx ring unlike tx ring which is mapped to different AC's */ 916 for (i = 0; i < ab->num_radios; i++) { 917 ar = ab->pdevs[i].ar; 918 ret = ath11k_dp_rx_pdev_alloc(ab, i); 919 if (ret) { 920 ath11k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n", 921 i); 922 goto err; 923 } 924 ret = ath11k_dp_rx_pdev_mon_attach(ar); 925 if (ret) { 926 ath11k_warn(ab, "failed to initialize mon pdev %d\n", 927 i); 928 goto err; 929 } 930 } 931 932 return 0; 933 934 err: 935 ath11k_dp_pdev_free(ab); 936 937 return ret; 938 } 939 940 int ath11k_dp_htt_connect(struct ath11k_dp *dp) 941 { 942 struct ath11k_htc_svc_conn_req conn_req; 943 struct ath11k_htc_svc_conn_resp conn_resp; 944 int status; 945 946 memset(&conn_req, 0, sizeof(conn_req)); 947 memset(&conn_resp, 0, sizeof(conn_resp)); 948 949 conn_req.ep_ops.ep_tx_complete = ath11k_dp_htt_htc_tx_complete; 950 conn_req.ep_ops.ep_rx_complete = ath11k_dp_htt_htc_t2h_msg_handler; 951 952 /* connect to control service */ 953 conn_req.service_id = ATH11K_HTC_SVC_ID_HTT_DATA_MSG; 954 955 status = ath11k_htc_connect_service(&dp->ab->htc, &conn_req, 956 &conn_resp); 957 958 if (status) 959 return status; 960 961 dp->eid = conn_resp.eid; 962 963 return 0; 964 } 965 966 static void ath11k_dp_update_vdev_search(struct ath11k_vif *arvif) 967 { 968 /* When v2_map_support is true:for STA mode, enable address 969 * search index, tcl uses ast_hash value in the descriptor. 970 * When v2_map_support is false: for STA mode, don't enable 971 * address search index. 972 */ 973 switch (arvif->vdev_type) { 974 case WMI_VDEV_TYPE_STA: 975 if (arvif->ar->ab->hw_params.htt_peer_map_v2) { 976 arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN; 977 arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX; 978 } else { 979 arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN; 980 arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT; 981 } 982 break; 983 case WMI_VDEV_TYPE_AP: 984 case WMI_VDEV_TYPE_IBSS: 985 arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN; 986 arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT; 987 break; 988 case WMI_VDEV_TYPE_MONITOR: 989 default: 990 return; 991 } 992 } 993 994 void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif) 995 { 996 arvif->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 1) | 997 FIELD_PREP(HTT_TCL_META_DATA_VDEV_ID, 998 arvif->vdev_id) | 999 FIELD_PREP(HTT_TCL_META_DATA_PDEV_ID, 1000 ar->pdev->pdev_id); 1001 1002 /* set HTT extension valid bit to 0 by default */ 1003 arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT; 1004 1005 ath11k_dp_update_vdev_search(arvif); 1006 } 1007 1008 static int ath11k_dp_tx_pending_cleanup(int buf_id, void *skb, void *ctx) 1009 { 1010 struct ath11k_base *ab = (struct ath11k_base *)ctx; 1011 struct sk_buff *msdu = skb; 1012 1013 dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len, 1014 DMA_TO_DEVICE); 1015 1016 dev_kfree_skb_any(msdu); 1017 1018 return 0; 1019 } 1020 1021 void ath11k_dp_free(struct ath11k_base *ab) 1022 { 1023 struct ath11k_dp *dp = &ab->dp; 1024 int i; 1025 1026 ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks, 1027 HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring); 1028 1029 ath11k_dp_srng_common_cleanup(ab); 1030 1031 ath11k_dp_reo_cmd_list_cleanup(ab); 1032 1033 for (i = 0; i < ab->hw_params.max_tx_ring; i++) { 1034 spin_lock_bh(&dp->tx_ring[i].tx_idr_lock); 1035 idr_for_each(&dp->tx_ring[i].txbuf_idr, 1036 ath11k_dp_tx_pending_cleanup, ab); 1037 idr_destroy(&dp->tx_ring[i].txbuf_idr); 1038 spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock); 1039 kfree(dp->tx_ring[i].tx_status); 1040 } 1041 1042 /* Deinit any SOC level resource */ 1043 } 1044 1045 int ath11k_dp_alloc(struct ath11k_base *ab) 1046 { 1047 struct ath11k_dp *dp = &ab->dp; 1048 struct hal_srng *srng = NULL; 1049 size_t size = 0; 1050 u32 n_link_desc = 0; 1051 int ret; 1052 int i; 1053 1054 dp->ab = ab; 1055 1056 INIT_LIST_HEAD(&dp->reo_cmd_list); 1057 INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list); 1058 INIT_LIST_HEAD(&dp->dp_full_mon_mpdu_list); 1059 spin_lock_init(&dp->reo_cmd_lock); 1060 1061 dp->reo_cmd_cache_flush_count = 0; 1062 1063 ret = ath11k_wbm_idle_ring_setup(ab, &n_link_desc); 1064 if (ret) { 1065 ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret); 1066 return ret; 1067 } 1068 1069 srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id]; 1070 1071 ret = ath11k_dp_link_desc_setup(ab, dp->link_desc_banks, 1072 HAL_WBM_IDLE_LINK, srng, n_link_desc); 1073 if (ret) { 1074 ath11k_warn(ab, "failed to setup link desc: %d\n", ret); 1075 return ret; 1076 } 1077 1078 ret = ath11k_dp_srng_common_setup(ab); 1079 if (ret) 1080 goto fail_link_desc_cleanup; 1081 1082 size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE; 1083 1084 for (i = 0; i < ab->hw_params.max_tx_ring; i++) { 1085 idr_init(&dp->tx_ring[i].txbuf_idr); 1086 spin_lock_init(&dp->tx_ring[i].tx_idr_lock); 1087 dp->tx_ring[i].tcl_data_ring_id = i; 1088 1089 dp->tx_ring[i].tx_status_head = 0; 1090 dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1; 1091 dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL); 1092 if (!dp->tx_ring[i].tx_status) { 1093 ret = -ENOMEM; 1094 goto fail_cmn_srng_cleanup; 1095 } 1096 } 1097 1098 for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++) 1099 ath11k_hal_tx_set_dscp_tid_map(ab, i); 1100 1101 /* Init any SOC level resource for DP */ 1102 1103 return 0; 1104 1105 fail_cmn_srng_cleanup: 1106 ath11k_dp_srng_common_cleanup(ab); 1107 1108 fail_link_desc_cleanup: 1109 ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks, 1110 HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring); 1111 1112 return ret; 1113 } 1114 1115 static void ath11k_dp_shadow_timer_handler(struct timer_list *t) 1116 { 1117 struct ath11k_hp_update_timer *update_timer = from_timer(update_timer, 1118 t, timer); 1119 struct ath11k_base *ab = update_timer->ab; 1120 struct hal_srng *srng = &ab->hal.srng_list[update_timer->ring_id]; 1121 1122 spin_lock_bh(&srng->lock); 1123 1124 /* when the timer is fired, the handler checks whether there 1125 * are new TX happened. The handler updates HP only when there 1126 * are no TX operations during the timeout interval, and stop 1127 * the timer. Timer will be started again when TX happens again. 1128 */ 1129 if (update_timer->timer_tx_num != update_timer->tx_num) { 1130 update_timer->timer_tx_num = update_timer->tx_num; 1131 mod_timer(&update_timer->timer, jiffies + 1132 msecs_to_jiffies(update_timer->interval)); 1133 } else { 1134 update_timer->started = false; 1135 ath11k_hal_srng_shadow_update_hp_tp(ab, srng); 1136 } 1137 1138 spin_unlock_bh(&srng->lock); 1139 } 1140 1141 void ath11k_dp_shadow_start_timer(struct ath11k_base *ab, 1142 struct hal_srng *srng, 1143 struct ath11k_hp_update_timer *update_timer) 1144 { 1145 lockdep_assert_held(&srng->lock); 1146 1147 if (!ab->hw_params.supports_shadow_regs) 1148 return; 1149 1150 update_timer->tx_num++; 1151 1152 if (update_timer->started) 1153 return; 1154 1155 update_timer->started = true; 1156 update_timer->timer_tx_num = update_timer->tx_num; 1157 mod_timer(&update_timer->timer, jiffies + 1158 msecs_to_jiffies(update_timer->interval)); 1159 } 1160 1161 void ath11k_dp_shadow_stop_timer(struct ath11k_base *ab, 1162 struct ath11k_hp_update_timer *update_timer) 1163 { 1164 if (!ab->hw_params.supports_shadow_regs) 1165 return; 1166 1167 if (!update_timer->init) 1168 return; 1169 1170 del_timer_sync(&update_timer->timer); 1171 } 1172 1173 void ath11k_dp_shadow_init_timer(struct ath11k_base *ab, 1174 struct ath11k_hp_update_timer *update_timer, 1175 u32 interval, u32 ring_id) 1176 { 1177 if (!ab->hw_params.supports_shadow_regs) 1178 return; 1179 1180 update_timer->tx_num = 0; 1181 update_timer->timer_tx_num = 0; 1182 update_timer->ab = ab; 1183 update_timer->ring_id = ring_id; 1184 update_timer->interval = interval; 1185 update_timer->init = true; 1186 timer_setup(&update_timer->timer, 1187 ath11k_dp_shadow_timer_handler, 0); 1188 } 1189