1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/ieee80211.h> 7 #include "core.h" 8 #include "debug.h" 9 #include "hal_desc.h" 10 #include "hw.h" 11 #include "dp_rx.h" 12 #include "hal_rx.h" 13 #include "dp_tx.h" 14 #include "peer.h" 15 16 static u8 *ath11k_dp_rx_h_80211_hdr(struct hal_rx_desc *desc) 17 { 18 return desc->hdr_status; 19 } 20 21 static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct hal_rx_desc *desc) 22 { 23 if (!(__le32_to_cpu(desc->mpdu_start.info1) & 24 RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID)) 25 return HAL_ENCRYPT_TYPE_OPEN; 26 27 return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE, 28 __le32_to_cpu(desc->mpdu_start.info2)); 29 } 30 31 static u8 ath11k_dp_rx_h_mpdu_start_decap_type(struct hal_rx_desc *desc) 32 { 33 return FIELD_GET(RX_MPDU_START_INFO5_DECAP_TYPE, 34 __le32_to_cpu(desc->mpdu_start.info5)); 35 } 36 37 static bool ath11k_dp_rx_h_attn_msdu_done(struct hal_rx_desc *desc) 38 { 39 return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE, 40 __le32_to_cpu(desc->attention.info2)); 41 } 42 43 static bool ath11k_dp_rx_h_attn_first_mpdu(struct hal_rx_desc *desc) 44 { 45 return !!FIELD_GET(RX_ATTENTION_INFO1_FIRST_MPDU, 46 __le32_to_cpu(desc->attention.info1)); 47 } 48 49 static bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct hal_rx_desc *desc) 50 { 51 return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL, 52 __le32_to_cpu(desc->attention.info1)); 53 } 54 55 static bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct hal_rx_desc *desc) 56 { 57 return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL, 58 __le32_to_cpu(desc->attention.info1)); 59 } 60 61 static bool ath11k_dp_rx_h_attn_is_decrypted(struct hal_rx_desc *desc) 62 { 63 return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE, 64 __le32_to_cpu(desc->attention.info2)) == 65 RX_DESC_DECRYPT_STATUS_CODE_OK); 66 } 67 68 static u32 ath11k_dp_rx_h_attn_mpdu_err(struct hal_rx_desc *desc) 69 { 70 u32 info = __le32_to_cpu(desc->attention.info1); 71 u32 errmap = 0; 72 73 if (info & RX_ATTENTION_INFO1_FCS_ERR) 74 errmap |= DP_RX_MPDU_ERR_FCS; 75 76 if (info & RX_ATTENTION_INFO1_DECRYPT_ERR) 77 errmap |= DP_RX_MPDU_ERR_DECRYPT; 78 79 if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR) 80 errmap |= DP_RX_MPDU_ERR_TKIP_MIC; 81 82 if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR) 83 errmap |= DP_RX_MPDU_ERR_AMSDU_ERR; 84 85 if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR) 86 errmap |= DP_RX_MPDU_ERR_OVERFLOW; 87 88 if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR) 89 errmap |= DP_RX_MPDU_ERR_MSDU_LEN; 90 91 if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR) 92 errmap |= DP_RX_MPDU_ERR_MPDU_LEN; 93 94 return errmap; 95 } 96 97 static u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct hal_rx_desc *desc) 98 { 99 return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH, 100 __le32_to_cpu(desc->msdu_start.info1)); 101 } 102 103 static u8 ath11k_dp_rx_h_msdu_start_sgi(struct hal_rx_desc *desc) 104 { 105 return FIELD_GET(RX_MSDU_START_INFO3_SGI, 106 __le32_to_cpu(desc->msdu_start.info3)); 107 } 108 109 static u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct hal_rx_desc *desc) 110 { 111 return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS, 112 __le32_to_cpu(desc->msdu_start.info3)); 113 } 114 115 static u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct hal_rx_desc *desc) 116 { 117 return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW, 118 __le32_to_cpu(desc->msdu_start.info3)); 119 } 120 121 static u32 ath11k_dp_rx_h_msdu_start_freq(struct hal_rx_desc *desc) 122 { 123 return __le32_to_cpu(desc->msdu_start.phy_meta_data); 124 } 125 126 static u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct hal_rx_desc *desc) 127 { 128 return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE, 129 __le32_to_cpu(desc->msdu_start.info3)); 130 } 131 132 static u8 ath11k_dp_rx_h_msdu_start_nss(struct hal_rx_desc *desc) 133 { 134 u8 mimo_ss_bitmap = FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP, 135 __le32_to_cpu(desc->msdu_start.info3)); 136 137 return hweight8(mimo_ss_bitmap); 138 } 139 140 static u8 ath11k_dp_rx_h_msdu_end_l3pad(struct hal_rx_desc *desc) 141 { 142 return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING, 143 __le32_to_cpu(desc->msdu_end.info2)); 144 } 145 146 static bool ath11k_dp_rx_h_msdu_end_first_msdu(struct hal_rx_desc *desc) 147 { 148 return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU, 149 __le32_to_cpu(desc->msdu_end.info2)); 150 } 151 152 static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct hal_rx_desc *desc) 153 { 154 return !!FIELD_GET(RX_MSDU_END_INFO2_LAST_MSDU, 155 __le32_to_cpu(desc->msdu_end.info2)); 156 } 157 158 static void ath11k_dp_rx_desc_end_tlv_copy(struct hal_rx_desc *fdesc, 159 struct hal_rx_desc *ldesc) 160 { 161 memcpy((u8 *)&fdesc->msdu_end, (u8 *)&ldesc->msdu_end, 162 sizeof(struct rx_msdu_end)); 163 memcpy((u8 *)&fdesc->attention, (u8 *)&ldesc->attention, 164 sizeof(struct rx_attention)); 165 memcpy((u8 *)&fdesc->mpdu_end, (u8 *)&ldesc->mpdu_end, 166 sizeof(struct rx_mpdu_end)); 167 } 168 169 static u32 ath11k_dp_rxdesc_get_mpdulen_err(struct hal_rx_desc *rx_desc) 170 { 171 struct rx_attention *rx_attn; 172 173 rx_attn = &rx_desc->attention; 174 175 return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR, 176 __le32_to_cpu(rx_attn->info1)); 177 } 178 179 static u32 ath11k_dp_rxdesc_get_decap_format(struct hal_rx_desc *rx_desc) 180 { 181 struct rx_msdu_start *rx_msdu_start; 182 183 rx_msdu_start = &rx_desc->msdu_start; 184 185 return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT, 186 __le32_to_cpu(rx_msdu_start->info2)); 187 } 188 189 static u8 *ath11k_dp_rxdesc_get_80211hdr(struct hal_rx_desc *rx_desc) 190 { 191 u8 *rx_pkt_hdr; 192 193 rx_pkt_hdr = &rx_desc->msdu_payload[0]; 194 195 return rx_pkt_hdr; 196 } 197 198 static bool ath11k_dp_rxdesc_mpdu_valid(struct hal_rx_desc *rx_desc) 199 { 200 u32 tlv_tag; 201 202 tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG, 203 __le32_to_cpu(rx_desc->mpdu_start_tag)); 204 205 return tlv_tag == HAL_RX_MPDU_START ? true : false; 206 } 207 208 static u32 ath11k_dp_rxdesc_get_ppduid(struct hal_rx_desc *rx_desc) 209 { 210 return __le16_to_cpu(rx_desc->mpdu_start.phy_ppdu_id); 211 } 212 213 /* Returns number of Rx buffers replenished */ 214 int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id, 215 struct dp_rxdma_ring *rx_ring, 216 int req_entries, 217 enum hal_rx_buf_return_buf_manager mgr, 218 gfp_t gfp) 219 { 220 struct hal_srng *srng; 221 u32 *desc; 222 struct sk_buff *skb; 223 int num_free; 224 int num_remain; 225 int buf_id; 226 u32 cookie; 227 dma_addr_t paddr; 228 229 req_entries = min(req_entries, rx_ring->bufs_max); 230 231 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 232 233 spin_lock_bh(&srng->lock); 234 235 ath11k_hal_srng_access_begin(ab, srng); 236 237 num_free = ath11k_hal_srng_src_num_free(ab, srng, true); 238 if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4)) 239 req_entries = num_free; 240 241 req_entries = min(num_free, req_entries); 242 num_remain = req_entries; 243 244 while (num_remain > 0) { 245 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + 246 DP_RX_BUFFER_ALIGN_SIZE); 247 if (!skb) 248 break; 249 250 if (!IS_ALIGNED((unsigned long)skb->data, 251 DP_RX_BUFFER_ALIGN_SIZE)) { 252 skb_pull(skb, 253 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - 254 skb->data); 255 } 256 257 paddr = dma_map_single(ab->dev, skb->data, 258 skb->len + skb_tailroom(skb), 259 DMA_FROM_DEVICE); 260 if (dma_mapping_error(ab->dev, paddr)) 261 goto fail_free_skb; 262 263 spin_lock_bh(&rx_ring->idr_lock); 264 buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, 265 rx_ring->bufs_max * 3, gfp); 266 spin_unlock_bh(&rx_ring->idr_lock); 267 if (buf_id < 0) 268 goto fail_dma_unmap; 269 270 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 271 if (!desc) 272 goto fail_idr_remove; 273 274 ATH11K_SKB_RXCB(skb)->paddr = paddr; 275 276 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 277 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 278 279 num_remain--; 280 281 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); 282 } 283 284 ath11k_hal_srng_access_end(ab, srng); 285 286 spin_unlock_bh(&srng->lock); 287 288 return req_entries - num_remain; 289 290 fail_idr_remove: 291 spin_lock_bh(&rx_ring->idr_lock); 292 idr_remove(&rx_ring->bufs_idr, buf_id); 293 spin_unlock_bh(&rx_ring->idr_lock); 294 fail_dma_unmap: 295 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 296 DMA_FROM_DEVICE); 297 fail_free_skb: 298 dev_kfree_skb_any(skb); 299 300 ath11k_hal_srng_access_end(ab, srng); 301 302 spin_unlock_bh(&srng->lock); 303 304 return req_entries - num_remain; 305 } 306 307 static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar, 308 struct dp_rxdma_ring *rx_ring) 309 { 310 struct ath11k_pdev_dp *dp = &ar->dp; 311 struct sk_buff *skb; 312 int buf_id; 313 314 spin_lock_bh(&rx_ring->idr_lock); 315 idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { 316 idr_remove(&rx_ring->bufs_idr, buf_id); 317 /* TODO: Understand where internal driver does this dma_unmap of 318 * of rxdma_buffer. 319 */ 320 dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr, 321 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); 322 dev_kfree_skb_any(skb); 323 } 324 325 idr_destroy(&rx_ring->bufs_idr); 326 spin_unlock_bh(&rx_ring->idr_lock); 327 328 rx_ring = &dp->rx_mon_status_refill_ring; 329 330 spin_lock_bh(&rx_ring->idr_lock); 331 idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { 332 idr_remove(&rx_ring->bufs_idr, buf_id); 333 /* XXX: Understand where internal driver does this dma_unmap of 334 * of rxdma_buffer. 335 */ 336 dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr, 337 skb->len + skb_tailroom(skb), DMA_BIDIRECTIONAL); 338 dev_kfree_skb_any(skb); 339 } 340 341 idr_destroy(&rx_ring->bufs_idr); 342 spin_unlock_bh(&rx_ring->idr_lock); 343 return 0; 344 } 345 346 static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar) 347 { 348 struct ath11k_pdev_dp *dp = &ar->dp; 349 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 350 351 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 352 353 rx_ring = &dp->rxdma_mon_buf_ring; 354 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 355 356 rx_ring = &dp->rx_mon_status_refill_ring; 357 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 358 return 0; 359 } 360 361 static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar, 362 struct dp_rxdma_ring *rx_ring, 363 u32 ringtype) 364 { 365 struct ath11k_pdev_dp *dp = &ar->dp; 366 int num_entries; 367 368 num_entries = rx_ring->refill_buf_ring.size / 369 ath11k_hal_srng_get_entrysize(ringtype); 370 371 rx_ring->bufs_max = num_entries; 372 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries, 373 HAL_RX_BUF_RBM_SW3_BM, GFP_KERNEL); 374 return 0; 375 } 376 377 static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar) 378 { 379 struct ath11k_pdev_dp *dp = &ar->dp; 380 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 381 382 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF); 383 384 rx_ring = &dp->rxdma_mon_buf_ring; 385 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF); 386 387 rx_ring = &dp->rx_mon_status_refill_ring; 388 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS); 389 390 return 0; 391 } 392 393 static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar) 394 { 395 struct ath11k_pdev_dp *dp = &ar->dp; 396 397 ath11k_dp_srng_cleanup(ar->ab, &dp->rx_refill_buf_ring.refill_buf_ring); 398 ath11k_dp_srng_cleanup(ar->ab, &dp->rxdma_err_dst_ring); 399 ath11k_dp_srng_cleanup(ar->ab, &dp->rx_mon_status_refill_ring.refill_buf_ring); 400 ath11k_dp_srng_cleanup(ar->ab, &dp->rxdma_mon_buf_ring.refill_buf_ring); 401 } 402 403 void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab) 404 { 405 struct ath11k_pdev_dp *dp; 406 struct ath11k *ar; 407 int i; 408 409 for (i = 0; i < ab->num_radios; i++) { 410 ar = ab->pdevs[i].ar; 411 dp = &ar->dp; 412 ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring); 413 } 414 } 415 416 int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab) 417 { 418 struct ath11k *ar; 419 struct ath11k_pdev_dp *dp; 420 int ret; 421 int i; 422 423 for (i = 0; i < ab->num_radios; i++) { 424 ar = ab->pdevs[i].ar; 425 dp = &ar->dp; 426 ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring, HAL_REO_DST, 427 dp->mac_id, dp->mac_id, 428 DP_REO_DST_RING_SIZE); 429 if (ret) { 430 ath11k_warn(ar->ab, "failed to setup reo_dst_ring\n"); 431 goto err_reo_cleanup; 432 } 433 } 434 435 return 0; 436 437 err_reo_cleanup: 438 ath11k_dp_pdev_reo_cleanup(ab); 439 440 return ret; 441 } 442 443 static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar) 444 { 445 struct ath11k_pdev_dp *dp = &ar->dp; 446 struct dp_srng *srng = NULL; 447 int ret; 448 449 ret = ath11k_dp_srng_setup(ar->ab, 450 &dp->rx_refill_buf_ring.refill_buf_ring, 451 HAL_RXDMA_BUF, 0, 452 dp->mac_id, DP_RXDMA_BUF_RING_SIZE); 453 if (ret) { 454 ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n"); 455 return ret; 456 } 457 458 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring, 459 HAL_RXDMA_DST, 0, dp->mac_id, 460 DP_RXDMA_ERR_DST_RING_SIZE); 461 if (ret) { 462 ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring\n"); 463 return ret; 464 } 465 466 srng = &dp->rx_mon_status_refill_ring.refill_buf_ring; 467 ret = ath11k_dp_srng_setup(ar->ab, 468 srng, 469 HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id, 470 DP_RXDMA_MON_STATUS_RING_SIZE); 471 if (ret) { 472 ath11k_warn(ar->ab, 473 "failed to setup rx_mon_status_refill_ring\n"); 474 return ret; 475 } 476 ret = ath11k_dp_srng_setup(ar->ab, 477 &dp->rxdma_mon_buf_ring.refill_buf_ring, 478 HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id, 479 DP_RXDMA_MONITOR_BUF_RING_SIZE); 480 if (ret) { 481 ath11k_warn(ar->ab, 482 "failed to setup HAL_RXDMA_MONITOR_BUF\n"); 483 return ret; 484 } 485 486 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring, 487 HAL_RXDMA_MONITOR_DST, 0, dp->mac_id, 488 DP_RXDMA_MONITOR_DST_RING_SIZE); 489 if (ret) { 490 ath11k_warn(ar->ab, 491 "failed to setup HAL_RXDMA_MONITOR_DST\n"); 492 return ret; 493 } 494 495 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring, 496 HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id, 497 DP_RXDMA_MONITOR_DESC_RING_SIZE); 498 if (ret) { 499 ath11k_warn(ar->ab, 500 "failed to setup HAL_RXDMA_MONITOR_DESC\n"); 501 return ret; 502 } 503 504 return 0; 505 } 506 507 void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab) 508 { 509 struct ath11k_dp *dp = &ab->dp; 510 struct dp_reo_cmd *cmd, *tmp; 511 struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache; 512 513 spin_lock_bh(&dp->reo_cmd_lock); 514 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 515 list_del(&cmd->list); 516 dma_unmap_single(ab->dev, cmd->data.paddr, 517 cmd->data.size, DMA_BIDIRECTIONAL); 518 kfree(cmd->data.vaddr); 519 kfree(cmd); 520 } 521 522 list_for_each_entry_safe(cmd_cache, tmp_cache, 523 &dp->reo_cmd_cache_flush_list, list) { 524 list_del(&cmd_cache->list); 525 dma_unmap_single(ab->dev, cmd_cache->data.paddr, 526 cmd_cache->data.size, DMA_BIDIRECTIONAL); 527 kfree(cmd_cache->data.vaddr); 528 kfree(cmd_cache); 529 } 530 spin_unlock_bh(&dp->reo_cmd_lock); 531 } 532 533 static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx, 534 enum hal_reo_cmd_status status) 535 { 536 struct dp_rx_tid *rx_tid = ctx; 537 538 if (status != HAL_REO_CMD_SUCCESS) 539 ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n", 540 rx_tid->tid, status); 541 542 dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size, 543 DMA_BIDIRECTIONAL); 544 kfree(rx_tid->vaddr); 545 } 546 547 static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab, 548 struct dp_rx_tid *rx_tid) 549 { 550 struct ath11k_hal_reo_cmd cmd = {0}; 551 unsigned long tot_desc_sz, desc_sz; 552 int ret; 553 554 tot_desc_sz = rx_tid->size; 555 desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID); 556 557 while (tot_desc_sz > desc_sz) { 558 tot_desc_sz -= desc_sz; 559 cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz); 560 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 561 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, 562 HAL_REO_CMD_FLUSH_CACHE, &cmd, 563 NULL); 564 if (ret) 565 ath11k_warn(ab, 566 "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n", 567 rx_tid->tid, ret); 568 } 569 570 memset(&cmd, 0, sizeof(cmd)); 571 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 572 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 573 cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS; 574 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, 575 HAL_REO_CMD_FLUSH_CACHE, 576 &cmd, ath11k_dp_reo_cmd_free); 577 if (ret) { 578 ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n", 579 rx_tid->tid, ret); 580 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 581 DMA_BIDIRECTIONAL); 582 kfree(rx_tid->vaddr); 583 } 584 } 585 586 static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx, 587 enum hal_reo_cmd_status status) 588 { 589 struct ath11k_base *ab = dp->ab; 590 struct dp_rx_tid *rx_tid = ctx; 591 struct dp_reo_cache_flush_elem *elem, *tmp; 592 593 if (status == HAL_REO_CMD_DRAIN) { 594 goto free_desc; 595 } else if (status != HAL_REO_CMD_SUCCESS) { 596 /* Shouldn't happen! Cleanup in case of other failure? */ 597 ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n", 598 rx_tid->tid, status); 599 return; 600 } 601 602 elem = kzalloc(sizeof(*elem), GFP_ATOMIC); 603 if (!elem) 604 goto free_desc; 605 606 elem->ts = jiffies; 607 memcpy(&elem->data, rx_tid, sizeof(*rx_tid)); 608 609 spin_lock_bh(&dp->reo_cmd_lock); 610 list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list); 611 spin_unlock_bh(&dp->reo_cmd_lock); 612 613 /* Flush and invalidate aged REO desc from HW cache */ 614 spin_lock_bh(&dp->reo_cmd_lock); 615 list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list, 616 list) { 617 if (time_after(jiffies, elem->ts + 618 msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) { 619 list_del(&elem->list); 620 spin_unlock_bh(&dp->reo_cmd_lock); 621 622 ath11k_dp_reo_cache_flush(ab, &elem->data); 623 kfree(elem); 624 spin_lock_bh(&dp->reo_cmd_lock); 625 } 626 } 627 spin_unlock_bh(&dp->reo_cmd_lock); 628 629 return; 630 free_desc: 631 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 632 DMA_BIDIRECTIONAL); 633 kfree(rx_tid->vaddr); 634 } 635 636 static void ath11k_peer_rx_tid_delete(struct ath11k *ar, 637 struct ath11k_peer *peer, u8 tid) 638 { 639 struct ath11k_hal_reo_cmd cmd = {0}; 640 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 641 int ret; 642 643 if (!rx_tid->active) 644 return; 645 646 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 647 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 648 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 649 cmd.upd0 |= HAL_REO_CMD_UPD0_VLD; 650 ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid, 651 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 652 ath11k_dp_rx_tid_del_func); 653 if (ret) { 654 ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n", 655 tid, ret); 656 dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size, 657 DMA_BIDIRECTIONAL); 658 kfree(rx_tid->vaddr); 659 } 660 661 rx_tid->active = false; 662 } 663 664 void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer) 665 { 666 int i; 667 668 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) 669 ath11k_peer_rx_tid_delete(ar, peer, i); 670 } 671 672 static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar, 673 struct ath11k_peer *peer, 674 struct dp_rx_tid *rx_tid, 675 u32 ba_win_sz, u16 ssn, 676 bool update_ssn) 677 { 678 struct ath11k_hal_reo_cmd cmd = {0}; 679 int ret; 680 681 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 682 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 683 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 684 cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE; 685 cmd.ba_window_size = ba_win_sz; 686 687 if (update_ssn) { 688 cmd.upd0 |= HAL_REO_CMD_UPD0_SSN; 689 cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn); 690 } 691 692 ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid, 693 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 694 NULL); 695 if (ret) { 696 ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n", 697 rx_tid->tid, ret); 698 return ret; 699 } 700 701 rx_tid->ba_win_sz = ba_win_sz; 702 703 return 0; 704 } 705 706 static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab, 707 const u8 *peer_mac, int vdev_id, u8 tid) 708 { 709 struct ath11k_peer *peer; 710 struct dp_rx_tid *rx_tid; 711 712 spin_lock_bh(&ab->base_lock); 713 714 peer = ath11k_peer_find(ab, vdev_id, peer_mac); 715 if (!peer) { 716 ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n"); 717 goto unlock_exit; 718 } 719 720 rx_tid = &peer->rx_tid[tid]; 721 if (!rx_tid->active) 722 goto unlock_exit; 723 724 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 725 DMA_BIDIRECTIONAL); 726 kfree(rx_tid->vaddr); 727 728 rx_tid->active = false; 729 730 unlock_exit: 731 spin_unlock_bh(&ab->base_lock); 732 } 733 734 int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id, 735 u8 tid, u32 ba_win_sz, u16 ssn) 736 { 737 struct ath11k_base *ab = ar->ab; 738 struct ath11k_peer *peer; 739 struct dp_rx_tid *rx_tid; 740 u32 hw_desc_sz; 741 u32 *addr_aligned; 742 void *vaddr; 743 dma_addr_t paddr; 744 int ret; 745 746 spin_lock_bh(&ab->base_lock); 747 748 peer = ath11k_peer_find(ab, vdev_id, peer_mac); 749 if (!peer) { 750 ath11k_warn(ab, "failed to find the peer to set up rx tid\n"); 751 spin_unlock_bh(&ab->base_lock); 752 return -ENOENT; 753 } 754 755 rx_tid = &peer->rx_tid[tid]; 756 /* Update the tid queue if it is already setup */ 757 if (rx_tid->active) { 758 paddr = rx_tid->paddr; 759 ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid, 760 ba_win_sz, ssn, true); 761 spin_unlock_bh(&ab->base_lock); 762 if (ret) { 763 ath11k_warn(ab, "failed to update reo for rx tid %d\n", tid); 764 return ret; 765 } 766 767 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, 768 peer_mac, paddr, 769 tid, 1, ba_win_sz); 770 if (ret) 771 ath11k_warn(ab, "failed to send wmi command to update rx reorder queue, tid :%d (%d)\n", 772 tid, ret); 773 return ret; 774 } 775 776 rx_tid->tid = tid; 777 778 rx_tid->ba_win_sz = ba_win_sz; 779 780 /* TODO: Optimize the memory allocation for qos tid based on the 781 * the actual BA window size in REO tid update path. 782 */ 783 if (tid == HAL_DESC_REO_NON_QOS_TID) 784 hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid); 785 else 786 hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid); 787 788 vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_KERNEL); 789 if (!vaddr) { 790 spin_unlock_bh(&ab->base_lock); 791 return -ENOMEM; 792 } 793 794 addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN); 795 796 ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz, ssn); 797 798 paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz, 799 DMA_BIDIRECTIONAL); 800 801 ret = dma_mapping_error(ab->dev, paddr); 802 if (ret) { 803 spin_unlock_bh(&ab->base_lock); 804 goto err_mem_free; 805 } 806 807 rx_tid->vaddr = vaddr; 808 rx_tid->paddr = paddr; 809 rx_tid->size = hw_desc_sz; 810 rx_tid->active = true; 811 812 spin_unlock_bh(&ab->base_lock); 813 814 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, 815 paddr, tid, 1, ba_win_sz); 816 if (ret) { 817 ath11k_warn(ar->ab, "failed to setup rx reorder queue, tid :%d (%d)\n", 818 tid, ret); 819 ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid); 820 } 821 822 return ret; 823 824 err_mem_free: 825 kfree(vaddr); 826 827 return ret; 828 } 829 830 int ath11k_dp_rx_ampdu_start(struct ath11k *ar, 831 struct ieee80211_ampdu_params *params) 832 { 833 struct ath11k_base *ab = ar->ab; 834 struct ath11k_sta *arsta = (void *)params->sta->drv_priv; 835 int vdev_id = arsta->arvif->vdev_id; 836 int ret; 837 838 ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id, 839 params->tid, params->buf_size, 840 params->ssn); 841 if (ret) 842 ath11k_warn(ab, "failed to setup rx tid %d\n", ret); 843 844 return ret; 845 } 846 847 int ath11k_dp_rx_ampdu_stop(struct ath11k *ar, 848 struct ieee80211_ampdu_params *params) 849 { 850 struct ath11k_base *ab = ar->ab; 851 struct ath11k_peer *peer; 852 struct ath11k_sta *arsta = (void *)params->sta->drv_priv; 853 int vdev_id = arsta->arvif->vdev_id; 854 dma_addr_t paddr; 855 bool active; 856 int ret; 857 858 spin_lock_bh(&ab->base_lock); 859 860 peer = ath11k_peer_find(ab, vdev_id, params->sta->addr); 861 if (!peer) { 862 ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n"); 863 spin_unlock_bh(&ab->base_lock); 864 return -ENOENT; 865 } 866 867 paddr = peer->rx_tid[params->tid].paddr; 868 active = peer->rx_tid[params->tid].active; 869 870 if (!active) { 871 spin_unlock_bh(&ab->base_lock); 872 return 0; 873 } 874 875 ret = ath11k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false); 876 spin_unlock_bh(&ab->base_lock); 877 if (ret) { 878 ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n", 879 params->tid, ret); 880 return ret; 881 } 882 883 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, 884 params->sta->addr, paddr, 885 params->tid, 1, 1); 886 if (ret) 887 ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n", 888 ret); 889 890 return ret; 891 } 892 893 static int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats, 894 u16 peer_id) 895 { 896 int i; 897 898 for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) { 899 if (ppdu_stats->user_stats[i].is_valid_peer_id) { 900 if (peer_id == ppdu_stats->user_stats[i].peer_id) 901 return i; 902 } else { 903 return i; 904 } 905 } 906 907 return -EINVAL; 908 } 909 910 static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab, 911 u16 tag, u16 len, const void *ptr, 912 void *data) 913 { 914 struct htt_ppdu_stats_info *ppdu_info; 915 struct htt_ppdu_user_stats *user_stats; 916 int cur_user; 917 u16 peer_id; 918 919 ppdu_info = (struct htt_ppdu_stats_info *)data; 920 921 switch (tag) { 922 case HTT_PPDU_STATS_TAG_COMMON: 923 if (len < sizeof(struct htt_ppdu_stats_common)) { 924 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 925 len, tag); 926 return -EINVAL; 927 } 928 memcpy((void *)&ppdu_info->ppdu_stats.common, ptr, 929 sizeof(struct htt_ppdu_stats_common)); 930 break; 931 case HTT_PPDU_STATS_TAG_USR_RATE: 932 if (len < sizeof(struct htt_ppdu_stats_user_rate)) { 933 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 934 len, tag); 935 return -EINVAL; 936 } 937 938 peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id; 939 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 940 peer_id); 941 if (cur_user < 0) 942 return -EINVAL; 943 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 944 user_stats->peer_id = peer_id; 945 user_stats->is_valid_peer_id = true; 946 memcpy((void *)&user_stats->rate, ptr, 947 sizeof(struct htt_ppdu_stats_user_rate)); 948 user_stats->tlv_flags |= BIT(tag); 949 break; 950 case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON: 951 if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) { 952 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 953 len, tag); 954 return -EINVAL; 955 } 956 957 peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id; 958 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 959 peer_id); 960 if (cur_user < 0) 961 return -EINVAL; 962 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 963 user_stats->peer_id = peer_id; 964 user_stats->is_valid_peer_id = true; 965 memcpy((void *)&user_stats->cmpltn_cmn, ptr, 966 sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)); 967 user_stats->tlv_flags |= BIT(tag); 968 break; 969 case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS: 970 if (len < 971 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) { 972 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 973 len, tag); 974 return -EINVAL; 975 } 976 977 peer_id = 978 ((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id; 979 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 980 peer_id); 981 if (cur_user < 0) 982 return -EINVAL; 983 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 984 user_stats->peer_id = peer_id; 985 user_stats->is_valid_peer_id = true; 986 memcpy((void *)&user_stats->ack_ba, ptr, 987 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)); 988 user_stats->tlv_flags |= BIT(tag); 989 break; 990 } 991 return 0; 992 } 993 994 int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len, 995 int (*iter)(struct ath11k_base *ar, u16 tag, u16 len, 996 const void *ptr, void *data), 997 void *data) 998 { 999 const struct htt_tlv *tlv; 1000 const void *begin = ptr; 1001 u16 tlv_tag, tlv_len; 1002 int ret = -EINVAL; 1003 1004 while (len > 0) { 1005 if (len < sizeof(*tlv)) { 1006 ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n", 1007 ptr - begin, len, sizeof(*tlv)); 1008 return -EINVAL; 1009 } 1010 tlv = (struct htt_tlv *)ptr; 1011 tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header); 1012 tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header); 1013 ptr += sizeof(*tlv); 1014 len -= sizeof(*tlv); 1015 1016 if (tlv_len > len) { 1017 ath11k_err(ab, "htt tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n", 1018 tlv_tag, ptr - begin, len, tlv_len); 1019 return -EINVAL; 1020 } 1021 ret = iter(ab, tlv_tag, tlv_len, ptr, data); 1022 if (ret == -ENOMEM) 1023 return ret; 1024 1025 ptr += tlv_len; 1026 len -= tlv_len; 1027 } 1028 return 0; 1029 } 1030 1031 static u32 ath11k_bw_to_mac80211_bwflags(u8 bw) 1032 { 1033 u32 bwflags = 0; 1034 1035 switch (bw) { 1036 case ATH11K_BW_40: 1037 bwflags = IEEE80211_TX_RC_40_MHZ_WIDTH; 1038 break; 1039 case ATH11K_BW_80: 1040 bwflags = IEEE80211_TX_RC_80_MHZ_WIDTH; 1041 break; 1042 case ATH11K_BW_160: 1043 bwflags = IEEE80211_TX_RC_160_MHZ_WIDTH; 1044 break; 1045 } 1046 1047 return bwflags; 1048 } 1049 1050 static void 1051 ath11k_update_per_peer_tx_stats(struct ath11k *ar, 1052 struct htt_ppdu_stats *ppdu_stats, u8 user) 1053 { 1054 struct ath11k_base *ab = ar->ab; 1055 struct ath11k_peer *peer; 1056 struct ieee80211_sta *sta; 1057 struct ath11k_sta *arsta; 1058 struct htt_ppdu_stats_user_rate *user_rate; 1059 struct ieee80211_chanctx_conf *conf = NULL; 1060 struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats; 1061 struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user]; 1062 struct htt_ppdu_stats_common *common = &ppdu_stats->common; 1063 int ret; 1064 u8 flags, mcs, nss, bw, sgi, rate_idx = 0; 1065 u32 succ_bytes = 0; 1066 u16 rate = 0, succ_pkts = 0; 1067 u32 tx_duration = 0; 1068 u8 tid = HTT_PPDU_STATS_NON_QOS_TID; 1069 bool is_ampdu = false; 1070 1071 if (!usr_stats) 1072 return; 1073 1074 if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE))) 1075 return; 1076 1077 if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON)) 1078 is_ampdu = 1079 HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags); 1080 1081 if (usr_stats->tlv_flags & 1082 BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) { 1083 succ_bytes = usr_stats->ack_ba.success_bytes; 1084 succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M, 1085 usr_stats->ack_ba.info); 1086 tid = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM, 1087 usr_stats->ack_ba.info); 1088 } 1089 1090 if (common->fes_duration_us) 1091 tx_duration = common->fes_duration_us; 1092 1093 user_rate = &usr_stats->rate; 1094 flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags); 1095 bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2; 1096 nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1; 1097 mcs = HTT_USR_RATE_MCS(user_rate->rate_flags); 1098 sgi = HTT_USR_RATE_GI(user_rate->rate_flags); 1099 1100 /* Note: If host configured fixed rates and in some other special 1101 * cases, the broadcast/management frames are sent in different rates. 1102 * Firmware rate's control to be skipped for this? 1103 */ 1104 1105 if (flags == WMI_RATE_PREAMBLE_VHT && mcs > 9) { 1106 ath11k_warn(ab, "Invalid VHT mcs %hhd peer stats", mcs); 1107 return; 1108 } 1109 1110 if (flags == WMI_RATE_PREAMBLE_HT && (mcs > 7 || nss < 1)) { 1111 ath11k_warn(ab, "Invalid HT mcs %hhd nss %hhd peer stats", 1112 mcs, nss); 1113 return; 1114 } 1115 1116 if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) { 1117 ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs, 1118 flags, 1119 &rate_idx, 1120 &rate); 1121 if (ret < 0) 1122 return; 1123 } 1124 1125 rcu_read_lock(); 1126 spin_lock_bh(&ab->base_lock); 1127 peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id); 1128 1129 if (!peer || !peer->sta) { 1130 spin_unlock_bh(&ab->base_lock); 1131 rcu_read_unlock(); 1132 return; 1133 } 1134 1135 sta = peer->sta; 1136 arsta = (struct ath11k_sta *)sta->drv_priv; 1137 1138 memset(&arsta->txrate, 0, sizeof(arsta->txrate)); 1139 memset(&arsta->tx_info.status, 0, sizeof(arsta->tx_info.status)); 1140 1141 switch (flags) { 1142 case WMI_RATE_PREAMBLE_OFDM: 1143 arsta->txrate.legacy = rate; 1144 if (arsta->arvif && arsta->arvif->vif) 1145 conf = rcu_dereference(arsta->arvif->vif->chanctx_conf); 1146 if (conf && conf->def.chan->band == NL80211_BAND_5GHZ) 1147 arsta->tx_info.status.rates[0].idx = rate_idx - 4; 1148 break; 1149 case WMI_RATE_PREAMBLE_CCK: 1150 arsta->txrate.legacy = rate; 1151 arsta->tx_info.status.rates[0].idx = rate_idx; 1152 if (mcs > ATH11K_HW_RATE_CCK_LP_1M && 1153 mcs <= ATH11K_HW_RATE_CCK_SP_2M) 1154 arsta->tx_info.status.rates[0].flags |= 1155 IEEE80211_TX_RC_USE_SHORT_PREAMBLE; 1156 break; 1157 case WMI_RATE_PREAMBLE_HT: 1158 arsta->txrate.mcs = mcs + 8 * (nss - 1); 1159 arsta->tx_info.status.rates[0].idx = arsta->txrate.mcs; 1160 arsta->txrate.flags = RATE_INFO_FLAGS_MCS; 1161 arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_MCS; 1162 if (sgi) { 1163 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1164 arsta->tx_info.status.rates[0].flags |= 1165 IEEE80211_TX_RC_SHORT_GI; 1166 } 1167 break; 1168 case WMI_RATE_PREAMBLE_VHT: 1169 arsta->txrate.mcs = mcs; 1170 ieee80211_rate_set_vht(&arsta->tx_info.status.rates[0], mcs, nss); 1171 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; 1172 arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_VHT_MCS; 1173 if (sgi) { 1174 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1175 arsta->tx_info.status.rates[0].flags |= 1176 IEEE80211_TX_RC_SHORT_GI; 1177 } 1178 break; 1179 } 1180 1181 arsta->txrate.nss = nss; 1182 arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw); 1183 arsta->tx_info.status.rates[0].flags |= ath11k_bw_to_mac80211_bwflags(bw); 1184 arsta->tx_duration += tx_duration; 1185 memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info)); 1186 1187 if (succ_pkts) { 1188 arsta->tx_info.flags = IEEE80211_TX_STAT_ACK; 1189 arsta->tx_info.status.rates[0].count = 1; 1190 ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info); 1191 } 1192 1193 /* PPDU stats reported for mgmt packet doesn't have valid tx bytes. 1194 * So skip peer stats update for mgmt packets. 1195 */ 1196 if (tid < HTT_PPDU_STATS_NON_QOS_TID) { 1197 memset(peer_stats, 0, sizeof(*peer_stats)); 1198 peer_stats->succ_pkts = succ_pkts; 1199 peer_stats->succ_bytes = succ_bytes; 1200 peer_stats->is_ampdu = is_ampdu; 1201 peer_stats->duration = tx_duration; 1202 peer_stats->ba_fails = 1203 HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) + 1204 HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags); 1205 1206 if (ath11k_debug_is_extd_tx_stats_enabled(ar)) 1207 ath11k_accumulate_per_peer_tx_stats(arsta, 1208 peer_stats, rate_idx); 1209 } 1210 1211 spin_unlock_bh(&ab->base_lock); 1212 rcu_read_unlock(); 1213 } 1214 1215 static void ath11k_htt_update_ppdu_stats(struct ath11k *ar, 1216 struct htt_ppdu_stats *ppdu_stats) 1217 { 1218 u8 user; 1219 1220 for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++) 1221 ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user); 1222 } 1223 1224 static 1225 struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar, 1226 u32 ppdu_id) 1227 { 1228 struct htt_ppdu_stats_info *ppdu_info; 1229 1230 spin_lock_bh(&ar->data_lock); 1231 if (!list_empty(&ar->ppdu_stats_info)) { 1232 list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) { 1233 if (ppdu_info->ppdu_id == ppdu_id) { 1234 spin_unlock_bh(&ar->data_lock); 1235 return ppdu_info; 1236 } 1237 } 1238 1239 if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) { 1240 ppdu_info = list_first_entry(&ar->ppdu_stats_info, 1241 typeof(*ppdu_info), list); 1242 list_del(&ppdu_info->list); 1243 ar->ppdu_stat_list_depth--; 1244 ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats); 1245 kfree(ppdu_info); 1246 } 1247 } 1248 spin_unlock_bh(&ar->data_lock); 1249 1250 ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_KERNEL); 1251 if (!ppdu_info) 1252 return NULL; 1253 1254 spin_lock_bh(&ar->data_lock); 1255 list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info); 1256 ar->ppdu_stat_list_depth++; 1257 spin_unlock_bh(&ar->data_lock); 1258 1259 return ppdu_info; 1260 } 1261 1262 static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab, 1263 struct sk_buff *skb) 1264 { 1265 struct ath11k_htt_ppdu_stats_msg *msg; 1266 struct htt_ppdu_stats_info *ppdu_info; 1267 struct ath11k *ar; 1268 int ret; 1269 u8 pdev_id; 1270 u32 ppdu_id, len; 1271 1272 msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data; 1273 len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info); 1274 pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info); 1275 ppdu_id = msg->ppdu_id; 1276 1277 rcu_read_lock(); 1278 ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); 1279 if (!ar) { 1280 ret = -EINVAL; 1281 goto exit; 1282 } 1283 1284 if (ath11k_debug_is_pktlog_lite_mode_enabled(ar)) 1285 trace_ath11k_htt_ppdu_stats(ar, skb->data, len); 1286 1287 ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id); 1288 if (!ppdu_info) { 1289 ret = -EINVAL; 1290 goto exit; 1291 } 1292 1293 ppdu_info->ppdu_id = ppdu_id; 1294 ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len, 1295 ath11k_htt_tlv_ppdu_stats_parse, 1296 (void *)ppdu_info); 1297 if (ret) { 1298 ath11k_warn(ab, "Failed to parse tlv %d\n", ret); 1299 goto exit; 1300 } 1301 1302 exit: 1303 rcu_read_unlock(); 1304 1305 return ret; 1306 } 1307 1308 static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb) 1309 { 1310 struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data; 1311 struct ath11k *ar; 1312 u32 len; 1313 u8 pdev_id; 1314 1315 len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, data->hdr); 1316 if (len > ATH11K_HTT_PKTLOG_MAX_SIZE) { 1317 ath11k_warn(ab, "htt pktlog buffer size %d, expected < %d\n", 1318 len, 1319 ATH11K_HTT_PKTLOG_MAX_SIZE); 1320 return; 1321 } 1322 1323 pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr); 1324 ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); 1325 if (!ar) { 1326 ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id); 1327 return; 1328 } 1329 1330 trace_ath11k_htt_pktlog(ar, data->payload, len); 1331 } 1332 1333 void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab, 1334 struct sk_buff *skb) 1335 { 1336 struct ath11k_dp *dp = &ab->dp; 1337 struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data; 1338 enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp); 1339 u16 peer_id; 1340 u8 vdev_id; 1341 u8 mac_addr[ETH_ALEN]; 1342 u16 peer_mac_h16; 1343 u16 ast_hash; 1344 1345 ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type); 1346 1347 switch (type) { 1348 case HTT_T2H_MSG_TYPE_VERSION_CONF: 1349 dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR, 1350 resp->version_msg.version); 1351 dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR, 1352 resp->version_msg.version); 1353 complete(&dp->htt_tgt_version_received); 1354 break; 1355 case HTT_T2H_MSG_TYPE_PEER_MAP: 1356 vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID, 1357 resp->peer_map_ev.info); 1358 peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID, 1359 resp->peer_map_ev.info); 1360 peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16, 1361 resp->peer_map_ev.info1); 1362 ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32, 1363 peer_mac_h16, mac_addr); 1364 ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL, 1365 resp->peer_map_ev.info2); 1366 ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash); 1367 break; 1368 case HTT_T2H_MSG_TYPE_PEER_UNMAP: 1369 peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID, 1370 resp->peer_unmap_ev.info); 1371 ath11k_peer_unmap_event(ab, peer_id); 1372 break; 1373 case HTT_T2H_MSG_TYPE_PPDU_STATS_IND: 1374 ath11k_htt_pull_ppdu_stats(ab, skb); 1375 break; 1376 case HTT_T2H_MSG_TYPE_EXT_STATS_CONF: 1377 ath11k_dbg_htt_ext_stats_handler(ab, skb); 1378 break; 1379 case HTT_T2H_MSG_TYPE_PKTLOG: 1380 ath11k_htt_pktlog(ab, skb); 1381 break; 1382 default: 1383 ath11k_warn(ab, "htt event %d not handled\n", type); 1384 break; 1385 } 1386 1387 dev_kfree_skb_any(skb); 1388 } 1389 1390 static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar, 1391 struct sk_buff_head *msdu_list, 1392 struct sk_buff *first, struct sk_buff *last, 1393 u8 l3pad_bytes, int msdu_len) 1394 { 1395 struct sk_buff *skb; 1396 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first); 1397 int buf_first_hdr_len, buf_first_len; 1398 struct hal_rx_desc *ldesc; 1399 int space_extra; 1400 int rem_len; 1401 int buf_len; 1402 1403 /* As the msdu is spread across multiple rx buffers, 1404 * find the offset to the start of msdu for computing 1405 * the length of the msdu in the first buffer. 1406 */ 1407 buf_first_hdr_len = HAL_RX_DESC_SIZE + l3pad_bytes; 1408 buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len; 1409 1410 if (WARN_ON_ONCE(msdu_len <= buf_first_len)) { 1411 skb_put(first, buf_first_hdr_len + msdu_len); 1412 skb_pull(first, buf_first_hdr_len); 1413 return 0; 1414 } 1415 1416 ldesc = (struct hal_rx_desc *)last->data; 1417 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ldesc); 1418 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ldesc); 1419 1420 /* MSDU spans over multiple buffers because the length of the MSDU 1421 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data 1422 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. 1423 */ 1424 skb_put(first, DP_RX_BUFFER_SIZE); 1425 skb_pull(first, buf_first_hdr_len); 1426 1427 /* When an MSDU spread over multiple buffers attention, MSDU_END and 1428 * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs. 1429 */ 1430 ath11k_dp_rx_desc_end_tlv_copy(rxcb->rx_desc, ldesc); 1431 1432 space_extra = msdu_len - (buf_first_len + skb_tailroom(first)); 1433 if (space_extra > 0 && 1434 (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) { 1435 /* Free up all buffers of the MSDU */ 1436 while ((skb = __skb_dequeue(msdu_list)) != NULL) { 1437 rxcb = ATH11K_SKB_RXCB(skb); 1438 if (!rxcb->is_continuation) { 1439 dev_kfree_skb_any(skb); 1440 break; 1441 } 1442 dev_kfree_skb_any(skb); 1443 } 1444 return -ENOMEM; 1445 } 1446 1447 rem_len = msdu_len - buf_first_len; 1448 while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) { 1449 rxcb = ATH11K_SKB_RXCB(skb); 1450 if (rxcb->is_continuation) 1451 buf_len = DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE; 1452 else 1453 buf_len = rem_len; 1454 1455 if (buf_len > (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE)) { 1456 WARN_ON_ONCE(1); 1457 dev_kfree_skb_any(skb); 1458 return -EINVAL; 1459 } 1460 1461 skb_put(skb, buf_len + HAL_RX_DESC_SIZE); 1462 skb_pull(skb, HAL_RX_DESC_SIZE); 1463 skb_copy_from_linear_data(skb, skb_put(first, buf_len), 1464 buf_len); 1465 dev_kfree_skb_any(skb); 1466 1467 rem_len -= buf_len; 1468 if (!rxcb->is_continuation) 1469 break; 1470 } 1471 1472 return 0; 1473 } 1474 1475 static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list, 1476 struct sk_buff *first) 1477 { 1478 struct sk_buff *skb; 1479 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first); 1480 1481 if (!rxcb->is_continuation) 1482 return first; 1483 1484 skb_queue_walk(msdu_list, skb) { 1485 rxcb = ATH11K_SKB_RXCB(skb); 1486 if (!rxcb->is_continuation) 1487 return skb; 1488 } 1489 1490 return NULL; 1491 } 1492 1493 static int ath11k_dp_rx_retrieve_amsdu(struct ath11k *ar, 1494 struct sk_buff_head *msdu_list, 1495 struct sk_buff_head *amsdu_list) 1496 { 1497 struct sk_buff *msdu = skb_peek(msdu_list); 1498 struct sk_buff *last_buf; 1499 struct ath11k_skb_rxcb *rxcb; 1500 struct ieee80211_hdr *hdr; 1501 struct hal_rx_desc *rx_desc, *lrx_desc; 1502 u16 msdu_len; 1503 u8 l3_pad_bytes; 1504 u8 *hdr_status; 1505 int ret; 1506 1507 if (!msdu) 1508 return -ENOENT; 1509 1510 rx_desc = (struct hal_rx_desc *)msdu->data; 1511 hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc); 1512 hdr = (struct ieee80211_hdr *)hdr_status; 1513 /* Process only data frames */ 1514 if (!ieee80211_is_data(hdr->frame_control)) { 1515 __skb_unlink(msdu, msdu_list); 1516 dev_kfree_skb_any(msdu); 1517 return -EINVAL; 1518 } 1519 1520 do { 1521 __skb_unlink(msdu, msdu_list); 1522 last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu); 1523 if (!last_buf) { 1524 ath11k_warn(ar->ab, 1525 "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n"); 1526 ret = -EIO; 1527 goto free_out; 1528 } 1529 1530 rx_desc = (struct hal_rx_desc *)msdu->data; 1531 lrx_desc = (struct hal_rx_desc *)last_buf->data; 1532 1533 if (!ath11k_dp_rx_h_attn_msdu_done(lrx_desc)) { 1534 ath11k_warn(ar->ab, "msdu_done bit in attention is not set\n"); 1535 ret = -EIO; 1536 goto free_out; 1537 } 1538 1539 rxcb = ATH11K_SKB_RXCB(msdu); 1540 rxcb->rx_desc = rx_desc; 1541 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc); 1542 l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(lrx_desc); 1543 1544 if (!rxcb->is_continuation) { 1545 skb_put(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes + msdu_len); 1546 skb_pull(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes); 1547 } else { 1548 ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list, 1549 msdu, last_buf, 1550 l3_pad_bytes, msdu_len); 1551 if (ret) { 1552 ath11k_warn(ar->ab, 1553 "failed to coalesce msdu rx buffer%d\n", ret); 1554 goto free_out; 1555 } 1556 } 1557 __skb_queue_tail(amsdu_list, msdu); 1558 1559 /* Should we also consider msdu_cnt from mpdu_meta while 1560 * preparing amsdu list? 1561 */ 1562 if (rxcb->is_last_msdu) 1563 break; 1564 } while ((msdu = skb_peek(msdu_list)) != NULL); 1565 1566 return 0; 1567 1568 free_out: 1569 dev_kfree_skb_any(msdu); 1570 __skb_queue_purge(amsdu_list); 1571 1572 return ret; 1573 } 1574 1575 static void ath11k_dp_rx_h_csum_offload(struct sk_buff *msdu) 1576 { 1577 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 1578 bool ip_csum_fail, l4_csum_fail; 1579 1580 ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rxcb->rx_desc); 1581 l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rxcb->rx_desc); 1582 1583 msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ? 1584 CHECKSUM_NONE : CHECKSUM_UNNECESSARY; 1585 } 1586 1587 static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, 1588 enum hal_encrypt_type enctype) 1589 { 1590 switch (enctype) { 1591 case HAL_ENCRYPT_TYPE_OPEN: 1592 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1593 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1594 return 0; 1595 case HAL_ENCRYPT_TYPE_CCMP_128: 1596 return IEEE80211_CCMP_MIC_LEN; 1597 case HAL_ENCRYPT_TYPE_CCMP_256: 1598 return IEEE80211_CCMP_256_MIC_LEN; 1599 case HAL_ENCRYPT_TYPE_GCMP_128: 1600 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1601 return IEEE80211_GCMP_MIC_LEN; 1602 case HAL_ENCRYPT_TYPE_WEP_40: 1603 case HAL_ENCRYPT_TYPE_WEP_104: 1604 case HAL_ENCRYPT_TYPE_WEP_128: 1605 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1606 case HAL_ENCRYPT_TYPE_WAPI: 1607 break; 1608 } 1609 1610 ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype); 1611 return 0; 1612 } 1613 1614 static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar, 1615 enum hal_encrypt_type enctype) 1616 { 1617 switch (enctype) { 1618 case HAL_ENCRYPT_TYPE_OPEN: 1619 return 0; 1620 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1621 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1622 return IEEE80211_TKIP_IV_LEN; 1623 case HAL_ENCRYPT_TYPE_CCMP_128: 1624 return IEEE80211_CCMP_HDR_LEN; 1625 case HAL_ENCRYPT_TYPE_CCMP_256: 1626 return IEEE80211_CCMP_256_HDR_LEN; 1627 case HAL_ENCRYPT_TYPE_GCMP_128: 1628 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1629 return IEEE80211_GCMP_HDR_LEN; 1630 case HAL_ENCRYPT_TYPE_WEP_40: 1631 case HAL_ENCRYPT_TYPE_WEP_104: 1632 case HAL_ENCRYPT_TYPE_WEP_128: 1633 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1634 case HAL_ENCRYPT_TYPE_WAPI: 1635 break; 1636 } 1637 1638 ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 1639 return 0; 1640 } 1641 1642 static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar, 1643 enum hal_encrypt_type enctype) 1644 { 1645 switch (enctype) { 1646 case HAL_ENCRYPT_TYPE_OPEN: 1647 case HAL_ENCRYPT_TYPE_CCMP_128: 1648 case HAL_ENCRYPT_TYPE_CCMP_256: 1649 case HAL_ENCRYPT_TYPE_GCMP_128: 1650 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1651 return 0; 1652 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1653 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1654 return IEEE80211_TKIP_ICV_LEN; 1655 case HAL_ENCRYPT_TYPE_WEP_40: 1656 case HAL_ENCRYPT_TYPE_WEP_104: 1657 case HAL_ENCRYPT_TYPE_WEP_128: 1658 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1659 case HAL_ENCRYPT_TYPE_WAPI: 1660 break; 1661 } 1662 1663 ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 1664 return 0; 1665 } 1666 1667 static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar, 1668 struct sk_buff *msdu, 1669 u8 *first_hdr, 1670 enum hal_encrypt_type enctype, 1671 struct ieee80211_rx_status *status) 1672 { 1673 struct ieee80211_hdr *hdr; 1674 size_t hdr_len; 1675 u8 da[ETH_ALEN]; 1676 u8 sa[ETH_ALEN]; 1677 1678 /* pull decapped header and copy SA & DA */ 1679 hdr = (struct ieee80211_hdr *)msdu->data; 1680 ether_addr_copy(da, ieee80211_get_DA(hdr)); 1681 ether_addr_copy(sa, ieee80211_get_SA(hdr)); 1682 skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control)); 1683 1684 /* push original 802.11 header */ 1685 hdr = (struct ieee80211_hdr *)first_hdr; 1686 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1687 1688 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 1689 memcpy(skb_push(msdu, 1690 ath11k_dp_rx_crypto_param_len(ar, enctype)), 1691 (void *)hdr + hdr_len, 1692 ath11k_dp_rx_crypto_param_len(ar, enctype)); 1693 } 1694 1695 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1696 1697 /* original 802.11 header has a different DA and in 1698 * case of 4addr it may also have different SA 1699 */ 1700 hdr = (struct ieee80211_hdr *)msdu->data; 1701 ether_addr_copy(ieee80211_get_DA(hdr), da); 1702 ether_addr_copy(ieee80211_get_SA(hdr), sa); 1703 } 1704 1705 static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu, 1706 enum hal_encrypt_type enctype, 1707 struct ieee80211_rx_status *status, 1708 bool decrypted) 1709 { 1710 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 1711 struct ieee80211_hdr *hdr; 1712 size_t hdr_len; 1713 size_t crypto_len; 1714 1715 if (!rxcb->is_first_msdu || 1716 !(rxcb->is_first_msdu && rxcb->is_last_msdu)) { 1717 WARN_ON_ONCE(1); 1718 return; 1719 } 1720 1721 skb_trim(msdu, msdu->len - FCS_LEN); 1722 1723 if (!decrypted) 1724 return; 1725 1726 hdr = (void *)msdu->data; 1727 1728 /* Tail */ 1729 if (status->flag & RX_FLAG_IV_STRIPPED) { 1730 skb_trim(msdu, msdu->len - 1731 ath11k_dp_rx_crypto_mic_len(ar, enctype)); 1732 1733 skb_trim(msdu, msdu->len - 1734 ath11k_dp_rx_crypto_icv_len(ar, enctype)); 1735 } else { 1736 /* MIC */ 1737 if (status->flag & RX_FLAG_MIC_STRIPPED) 1738 skb_trim(msdu, msdu->len - 1739 ath11k_dp_rx_crypto_mic_len(ar, enctype)); 1740 1741 /* ICV */ 1742 if (status->flag & RX_FLAG_ICV_STRIPPED) 1743 skb_trim(msdu, msdu->len - 1744 ath11k_dp_rx_crypto_icv_len(ar, enctype)); 1745 } 1746 1747 /* MMIC */ 1748 if ((status->flag & RX_FLAG_MMIC_STRIPPED) && 1749 !ieee80211_has_morefrags(hdr->frame_control) && 1750 enctype == HAL_ENCRYPT_TYPE_TKIP_MIC) 1751 skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN); 1752 1753 /* Head */ 1754 if (status->flag & RX_FLAG_IV_STRIPPED) { 1755 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1756 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 1757 1758 memmove((void *)msdu->data + crypto_len, 1759 (void *)msdu->data, hdr_len); 1760 skb_pull(msdu, crypto_len); 1761 } 1762 } 1763 1764 static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar, 1765 struct sk_buff *msdu, 1766 enum hal_encrypt_type enctype) 1767 { 1768 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 1769 struct ieee80211_hdr *hdr; 1770 size_t hdr_len, crypto_len; 1771 void *rfc1042; 1772 bool is_amsdu; 1773 1774 is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu); 1775 hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(rxcb->rx_desc); 1776 rfc1042 = hdr; 1777 1778 if (rxcb->is_first_msdu) { 1779 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1780 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 1781 1782 rfc1042 += hdr_len + crypto_len; 1783 } 1784 1785 if (is_amsdu) 1786 rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr); 1787 1788 return rfc1042; 1789 } 1790 1791 static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar, 1792 struct sk_buff *msdu, 1793 u8 *first_hdr, 1794 enum hal_encrypt_type enctype, 1795 struct ieee80211_rx_status *status) 1796 { 1797 struct ieee80211_hdr *hdr; 1798 struct ethhdr *eth; 1799 size_t hdr_len; 1800 u8 da[ETH_ALEN]; 1801 u8 sa[ETH_ALEN]; 1802 void *rfc1042; 1803 1804 rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype); 1805 if (WARN_ON_ONCE(!rfc1042)) 1806 return; 1807 1808 /* pull decapped header and copy SA & DA */ 1809 eth = (struct ethhdr *)msdu->data; 1810 ether_addr_copy(da, eth->h_dest); 1811 ether_addr_copy(sa, eth->h_source); 1812 skb_pull(msdu, sizeof(struct ethhdr)); 1813 1814 /* push rfc1042/llc/snap */ 1815 memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042, 1816 sizeof(struct ath11k_dp_rfc1042_hdr)); 1817 1818 /* push original 802.11 header */ 1819 hdr = (struct ieee80211_hdr *)first_hdr; 1820 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1821 1822 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 1823 memcpy(skb_push(msdu, 1824 ath11k_dp_rx_crypto_param_len(ar, enctype)), 1825 (void *)hdr + hdr_len, 1826 ath11k_dp_rx_crypto_param_len(ar, enctype)); 1827 } 1828 1829 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1830 1831 /* original 802.11 header has a different DA and in 1832 * case of 4addr it may also have different SA 1833 */ 1834 hdr = (struct ieee80211_hdr *)msdu->data; 1835 ether_addr_copy(ieee80211_get_DA(hdr), da); 1836 ether_addr_copy(ieee80211_get_SA(hdr), sa); 1837 } 1838 1839 static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu, 1840 struct hal_rx_desc *rx_desc, 1841 enum hal_encrypt_type enctype, 1842 struct ieee80211_rx_status *status, 1843 bool decrypted) 1844 { 1845 u8 *first_hdr; 1846 u8 decap; 1847 1848 first_hdr = ath11k_dp_rx_h_80211_hdr(rx_desc); 1849 decap = ath11k_dp_rx_h_mpdu_start_decap_type(rx_desc); 1850 1851 switch (decap) { 1852 case DP_RX_DECAP_TYPE_NATIVE_WIFI: 1853 ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr, 1854 enctype, status); 1855 break; 1856 case DP_RX_DECAP_TYPE_RAW: 1857 ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status, 1858 decrypted); 1859 break; 1860 case DP_RX_DECAP_TYPE_ETHERNET2_DIX: 1861 ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr, 1862 enctype, status); 1863 break; 1864 case DP_RX_DECAP_TYPE_8023: 1865 /* TODO: Handle undecap for these formats */ 1866 break; 1867 } 1868 } 1869 1870 static void ath11k_dp_rx_h_mpdu(struct ath11k *ar, 1871 struct sk_buff_head *amsdu_list, 1872 struct hal_rx_desc *rx_desc, 1873 struct ieee80211_rx_status *rx_status) 1874 { 1875 struct ieee80211_hdr *hdr; 1876 enum hal_encrypt_type enctype; 1877 struct sk_buff *last_msdu; 1878 struct sk_buff *msdu; 1879 struct ath11k_skb_rxcb *last_rxcb; 1880 bool is_decrypted; 1881 u32 err_bitmap; 1882 u8 *qos; 1883 1884 if (skb_queue_empty(amsdu_list)) 1885 return; 1886 1887 hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(rx_desc); 1888 1889 /* Each A-MSDU subframe will use the original header as the base and be 1890 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl. 1891 */ 1892 if (ieee80211_is_data_qos(hdr->frame_control)) { 1893 qos = ieee80211_get_qos_ctl(hdr); 1894 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; 1895 } 1896 1897 is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc); 1898 enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc); 1899 1900 /* Some attention flags are valid only in the last MSDU. */ 1901 last_msdu = skb_peek_tail(amsdu_list); 1902 last_rxcb = ATH11K_SKB_RXCB(last_msdu); 1903 1904 err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(last_rxcb->rx_desc); 1905 1906 /* Clear per-MPDU flags while leaving per-PPDU flags intact. */ 1907 rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | 1908 RX_FLAG_MMIC_ERROR | 1909 RX_FLAG_DECRYPTED | 1910 RX_FLAG_IV_STRIPPED | 1911 RX_FLAG_MMIC_STRIPPED); 1912 1913 if (err_bitmap & DP_RX_MPDU_ERR_FCS) 1914 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; 1915 1916 if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC) 1917 rx_status->flag |= RX_FLAG_MMIC_ERROR; 1918 1919 if (is_decrypted) 1920 rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED | 1921 RX_FLAG_MIC_STRIPPED | RX_FLAG_ICV_STRIPPED; 1922 1923 skb_queue_walk(amsdu_list, msdu) { 1924 ath11k_dp_rx_h_csum_offload(msdu); 1925 ath11k_dp_rx_h_undecap(ar, msdu, rx_desc, 1926 enctype, rx_status, is_decrypted); 1927 } 1928 } 1929 1930 static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc, 1931 struct ieee80211_rx_status *rx_status) 1932 { 1933 struct ieee80211_supported_band *sband; 1934 enum rx_msdu_start_pkt_type pkt_type; 1935 u8 bw; 1936 u8 rate_mcs, nss; 1937 u8 sgi; 1938 bool is_cck; 1939 1940 pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(rx_desc); 1941 bw = ath11k_dp_rx_h_msdu_start_rx_bw(rx_desc); 1942 rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(rx_desc); 1943 nss = ath11k_dp_rx_h_msdu_start_nss(rx_desc); 1944 sgi = ath11k_dp_rx_h_msdu_start_sgi(rx_desc); 1945 1946 switch (pkt_type) { 1947 case RX_MSDU_START_PKT_TYPE_11A: 1948 case RX_MSDU_START_PKT_TYPE_11B: 1949 is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B); 1950 sband = &ar->mac.sbands[rx_status->band]; 1951 rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs, 1952 is_cck); 1953 break; 1954 case RX_MSDU_START_PKT_TYPE_11N: 1955 rx_status->encoding = RX_ENC_HT; 1956 if (rate_mcs > ATH11K_HT_MCS_MAX) { 1957 ath11k_warn(ar->ab, 1958 "Received with invalid mcs in HT mode %d\n", 1959 rate_mcs); 1960 break; 1961 } 1962 rx_status->rate_idx = rate_mcs + (8 * (nss - 1)); 1963 if (sgi) 1964 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 1965 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 1966 break; 1967 case RX_MSDU_START_PKT_TYPE_11AC: 1968 rx_status->encoding = RX_ENC_VHT; 1969 rx_status->rate_idx = rate_mcs; 1970 if (rate_mcs > ATH11K_VHT_MCS_MAX) { 1971 ath11k_warn(ar->ab, 1972 "Received with invalid mcs in VHT mode %d\n", 1973 rate_mcs); 1974 break; 1975 } 1976 rx_status->nss = nss; 1977 if (sgi) 1978 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 1979 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 1980 break; 1981 case RX_MSDU_START_PKT_TYPE_11AX: 1982 rx_status->rate_idx = rate_mcs; 1983 if (rate_mcs > ATH11K_HE_MCS_MAX) { 1984 ath11k_warn(ar->ab, 1985 "Received with invalid mcs in HE mode %d\n", 1986 rate_mcs); 1987 break; 1988 } 1989 rx_status->encoding = RX_ENC_HE; 1990 rx_status->nss = nss; 1991 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 1992 break; 1993 } 1994 } 1995 1996 static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc, 1997 struct ieee80211_rx_status *rx_status) 1998 { 1999 u8 channel_num; 2000 2001 rx_status->freq = 0; 2002 rx_status->rate_idx = 0; 2003 rx_status->nss = 0; 2004 rx_status->encoding = RX_ENC_LEGACY; 2005 rx_status->bw = RATE_INFO_BW_20; 2006 2007 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 2008 2009 channel_num = ath11k_dp_rx_h_msdu_start_freq(rx_desc); 2010 2011 if (channel_num >= 1 && channel_num <= 14) { 2012 rx_status->band = NL80211_BAND_2GHZ; 2013 } else if (channel_num >= 36 && channel_num <= 173) { 2014 rx_status->band = NL80211_BAND_5GHZ; 2015 } else { 2016 ath11k_warn(ar->ab, "Unsupported Channel info received %d\n", 2017 channel_num); 2018 return; 2019 } 2020 2021 rx_status->freq = ieee80211_channel_to_frequency(channel_num, 2022 rx_status->band); 2023 2024 ath11k_dp_rx_h_rate(ar, rx_desc, rx_status); 2025 } 2026 2027 static void ath11k_dp_rx_process_amsdu(struct ath11k *ar, 2028 struct sk_buff_head *amsdu_list, 2029 struct ieee80211_rx_status *rx_status) 2030 { 2031 struct sk_buff *first; 2032 struct ath11k_skb_rxcb *rxcb; 2033 struct hal_rx_desc *rx_desc; 2034 bool first_mpdu; 2035 2036 if (skb_queue_empty(amsdu_list)) 2037 return; 2038 2039 first = skb_peek(amsdu_list); 2040 rxcb = ATH11K_SKB_RXCB(first); 2041 rx_desc = rxcb->rx_desc; 2042 2043 first_mpdu = ath11k_dp_rx_h_attn_first_mpdu(rx_desc); 2044 if (first_mpdu) 2045 ath11k_dp_rx_h_ppdu(ar, rx_desc, rx_status); 2046 2047 ath11k_dp_rx_h_mpdu(ar, amsdu_list, rx_desc, rx_status); 2048 } 2049 2050 static char *ath11k_print_get_tid(struct ieee80211_hdr *hdr, char *out, 2051 size_t size) 2052 { 2053 u8 *qc; 2054 int tid; 2055 2056 if (!ieee80211_is_data_qos(hdr->frame_control)) 2057 return ""; 2058 2059 qc = ieee80211_get_qos_ctl(hdr); 2060 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 2061 snprintf(out, size, "tid %d", tid); 2062 2063 return out; 2064 } 2065 2066 static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi, 2067 struct sk_buff *msdu) 2068 { 2069 static const struct ieee80211_radiotap_he known = { 2070 .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | 2071 IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN), 2072 .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN), 2073 }; 2074 struct ieee80211_rx_status *status; 2075 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; 2076 struct ieee80211_radiotap_he *he = NULL; 2077 char tid[32]; 2078 2079 status = IEEE80211_SKB_RXCB(msdu); 2080 if (status->encoding == RX_ENC_HE) { 2081 he = skb_push(msdu, sizeof(known)); 2082 memcpy(he, &known, sizeof(known)); 2083 status->flag |= RX_FLAG_RADIOTAP_HE; 2084 } 2085 2086 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 2087 "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", 2088 msdu, 2089 msdu->len, 2090 ieee80211_get_SA(hdr), 2091 ath11k_print_get_tid(hdr, tid, sizeof(tid)), 2092 is_multicast_ether_addr(ieee80211_get_DA(hdr)) ? 2093 "mcast" : "ucast", 2094 (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4, 2095 (status->encoding == RX_ENC_LEGACY) ? "legacy" : "", 2096 (status->encoding == RX_ENC_HT) ? "ht" : "", 2097 (status->encoding == RX_ENC_VHT) ? "vht" : "", 2098 (status->encoding == RX_ENC_HE) ? "he" : "", 2099 (status->bw == RATE_INFO_BW_40) ? "40" : "", 2100 (status->bw == RATE_INFO_BW_80) ? "80" : "", 2101 (status->bw == RATE_INFO_BW_160) ? "160" : "", 2102 status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", 2103 status->rate_idx, 2104 status->nss, 2105 status->freq, 2106 status->band, status->flag, 2107 !!(status->flag & RX_FLAG_FAILED_FCS_CRC), 2108 !!(status->flag & RX_FLAG_MMIC_ERROR), 2109 !!(status->flag & RX_FLAG_AMSDU_MORE)); 2110 2111 /* TODO: trace rx packet */ 2112 2113 ieee80211_rx_napi(ar->hw, NULL, msdu, napi); 2114 } 2115 2116 static void ath11k_dp_rx_pre_deliver_amsdu(struct ath11k *ar, 2117 struct sk_buff_head *amsdu_list, 2118 struct ieee80211_rx_status *rxs) 2119 { 2120 struct sk_buff *msdu; 2121 struct sk_buff *first_subframe; 2122 struct ieee80211_rx_status *status; 2123 2124 first_subframe = skb_peek(amsdu_list); 2125 2126 skb_queue_walk(amsdu_list, msdu) { 2127 /* Setup per-MSDU flags */ 2128 if (skb_queue_empty(amsdu_list)) 2129 rxs->flag &= ~RX_FLAG_AMSDU_MORE; 2130 else 2131 rxs->flag |= RX_FLAG_AMSDU_MORE; 2132 2133 if (msdu == first_subframe) { 2134 first_subframe = NULL; 2135 rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN; 2136 } else { 2137 rxs->flag |= RX_FLAG_ALLOW_SAME_PN; 2138 } 2139 rxs->flag |= RX_FLAG_SKIP_MONITOR; 2140 2141 status = IEEE80211_SKB_RXCB(msdu); 2142 *status = *rxs; 2143 } 2144 } 2145 2146 static void ath11k_dp_rx_process_pending_packets(struct ath11k_base *ab, 2147 struct napi_struct *napi, 2148 struct sk_buff_head *pending_q, 2149 int *quota, u8 mac_id) 2150 { 2151 struct ath11k *ar; 2152 struct sk_buff *msdu; 2153 struct ath11k_pdev *pdev; 2154 2155 if (skb_queue_empty(pending_q)) 2156 return; 2157 2158 ar = ab->pdevs[mac_id].ar; 2159 2160 rcu_read_lock(); 2161 pdev = rcu_dereference(ab->pdevs_active[mac_id]); 2162 2163 while (*quota && (msdu = __skb_dequeue(pending_q))) { 2164 if (!pdev) { 2165 dev_kfree_skb_any(msdu); 2166 continue; 2167 } 2168 2169 ath11k_dp_rx_deliver_msdu(ar, napi, msdu); 2170 (*quota)--; 2171 } 2172 rcu_read_unlock(); 2173 } 2174 2175 int ath11k_dp_process_rx(struct ath11k_base *ab, int mac_id, 2176 struct napi_struct *napi, struct sk_buff_head *pending_q, 2177 int budget) 2178 { 2179 struct ath11k *ar = ab->pdevs[mac_id].ar; 2180 struct ath11k_pdev_dp *dp = &ar->dp; 2181 struct ieee80211_rx_status *rx_status = &dp->rx_status; 2182 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 2183 struct hal_srng *srng; 2184 struct sk_buff *msdu; 2185 struct sk_buff_head msdu_list; 2186 struct sk_buff_head amsdu_list; 2187 struct ath11k_skb_rxcb *rxcb; 2188 u32 *rx_desc; 2189 int buf_id; 2190 int num_buffs_reaped = 0; 2191 int quota = budget; 2192 int ret; 2193 bool done = false; 2194 2195 /* Process any pending packets from the previous napi poll. 2196 * Note: All msdu's in this pending_q corresponds to the same mac id 2197 * due to pdev based reo dest mapping and also since each irq group id 2198 * maps to specific reo dest ring. 2199 */ 2200 ath11k_dp_rx_process_pending_packets(ab, napi, pending_q, "a, 2201 mac_id); 2202 2203 /* If all quota is exhausted by processing the pending_q, 2204 * Wait for the next napi poll to reap the new info 2205 */ 2206 if (!quota) 2207 goto exit; 2208 2209 __skb_queue_head_init(&msdu_list); 2210 2211 srng = &ab->hal.srng_list[dp->reo_dst_ring.ring_id]; 2212 2213 spin_lock_bh(&srng->lock); 2214 2215 ath11k_hal_srng_access_begin(ab, srng); 2216 2217 try_again: 2218 while ((rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 2219 struct hal_reo_dest_ring *desc = (struct hal_reo_dest_ring *)rx_desc; 2220 enum hal_reo_dest_ring_push_reason push_reason; 2221 u32 cookie; 2222 2223 cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, 2224 desc->buf_addr_info.info1); 2225 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 2226 cookie); 2227 spin_lock_bh(&rx_ring->idr_lock); 2228 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 2229 if (!msdu) { 2230 ath11k_warn(ab, "frame rx with invalid buf_id %d\n", 2231 buf_id); 2232 spin_unlock_bh(&rx_ring->idr_lock); 2233 continue; 2234 } 2235 2236 idr_remove(&rx_ring->bufs_idr, buf_id); 2237 spin_unlock_bh(&rx_ring->idr_lock); 2238 2239 rxcb = ATH11K_SKB_RXCB(msdu); 2240 dma_unmap_single(ab->dev, rxcb->paddr, 2241 msdu->len + skb_tailroom(msdu), 2242 DMA_FROM_DEVICE); 2243 2244 num_buffs_reaped++; 2245 2246 push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON, 2247 desc->info0); 2248 if (push_reason != 2249 HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) { 2250 /* TODO: Check if the msdu can be sent up for processing */ 2251 dev_kfree_skb_any(msdu); 2252 ab->soc_stats.hal_reo_error[dp->reo_dst_ring.ring_id]++; 2253 continue; 2254 } 2255 2256 rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 & 2257 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU); 2258 rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 & 2259 RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU); 2260 rxcb->is_continuation = !!(desc->rx_msdu_info.info0 & 2261 RX_MSDU_DESC_INFO0_MSDU_CONTINUATION); 2262 rxcb->mac_id = mac_id; 2263 __skb_queue_tail(&msdu_list, msdu); 2264 2265 /* Stop reaping from the ring once quota is exhausted 2266 * and we've received all msdu's in the the AMSDU. The 2267 * additional msdu's reaped in excess of quota here would 2268 * be pushed into the pending queue to be processed during 2269 * the next napi poll. 2270 * Note: More profiling can be done to see the impact on 2271 * pending_q and throughput during various traffic & density 2272 * and how use of budget instead of remaining quota affects it. 2273 */ 2274 if (num_buffs_reaped >= quota && rxcb->is_last_msdu && 2275 !rxcb->is_continuation) { 2276 done = true; 2277 break; 2278 } 2279 } 2280 2281 /* Hw might have updated the head pointer after we cached it. 2282 * In this case, even though there are entries in the ring we'll 2283 * get rx_desc NULL. Give the read another try with updated cached 2284 * head pointer so that we can reap complete MPDU in the current 2285 * rx processing. 2286 */ 2287 if (!done && ath11k_hal_srng_dst_num_free(ab, srng, true)) { 2288 ath11k_hal_srng_access_end(ab, srng); 2289 goto try_again; 2290 } 2291 2292 ath11k_hal_srng_access_end(ab, srng); 2293 2294 spin_unlock_bh(&srng->lock); 2295 2296 if (!num_buffs_reaped) 2297 goto exit; 2298 2299 /* Should we reschedule it later if we are not able to replenish all 2300 * the buffers? 2301 */ 2302 ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buffs_reaped, 2303 HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC); 2304 2305 rcu_read_lock(); 2306 if (!rcu_dereference(ab->pdevs_active[mac_id])) { 2307 __skb_queue_purge(&msdu_list); 2308 goto rcu_unlock; 2309 } 2310 2311 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 2312 __skb_queue_purge(&msdu_list); 2313 goto rcu_unlock; 2314 } 2315 2316 while (!skb_queue_empty(&msdu_list)) { 2317 __skb_queue_head_init(&amsdu_list); 2318 ret = ath11k_dp_rx_retrieve_amsdu(ar, &msdu_list, &amsdu_list); 2319 if (ret) { 2320 if (ret == -EIO) { 2321 ath11k_err(ab, "rx ring got corrupted %d\n", ret); 2322 __skb_queue_purge(&msdu_list); 2323 /* Should stop processing any more rx in 2324 * future from this ring? 2325 */ 2326 goto rcu_unlock; 2327 } 2328 2329 /* A-MSDU retrieval got failed due to non-fatal condition, 2330 * continue processing with the next msdu. 2331 */ 2332 continue; 2333 } 2334 2335 ath11k_dp_rx_process_amsdu(ar, &amsdu_list, rx_status); 2336 2337 ath11k_dp_rx_pre_deliver_amsdu(ar, &amsdu_list, rx_status); 2338 skb_queue_splice_tail(&amsdu_list, pending_q); 2339 } 2340 2341 while (quota && (msdu = __skb_dequeue(pending_q))) { 2342 ath11k_dp_rx_deliver_msdu(ar, napi, msdu); 2343 quota--; 2344 } 2345 2346 rcu_unlock: 2347 rcu_read_unlock(); 2348 exit: 2349 return budget - quota; 2350 } 2351 2352 static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta, 2353 struct hal_rx_mon_ppdu_info *ppdu_info) 2354 { 2355 struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats; 2356 u32 num_msdu; 2357 2358 if (!rx_stats) 2359 return; 2360 2361 num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count + 2362 ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count; 2363 2364 rx_stats->num_msdu += num_msdu; 2365 rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count + 2366 ppdu_info->tcp_ack_msdu_count; 2367 rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count; 2368 rx_stats->other_msdu_count += ppdu_info->other_msdu_count; 2369 2370 if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A || 2371 ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) { 2372 ppdu_info->nss = 1; 2373 ppdu_info->mcs = HAL_RX_MAX_MCS; 2374 ppdu_info->tid = IEEE80211_NUM_TIDS; 2375 } 2376 2377 if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS) 2378 rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu; 2379 2380 if (ppdu_info->mcs <= HAL_RX_MAX_MCS) 2381 rx_stats->mcs_count[ppdu_info->mcs] += num_msdu; 2382 2383 if (ppdu_info->gi < HAL_RX_GI_MAX) 2384 rx_stats->gi_count[ppdu_info->gi] += num_msdu; 2385 2386 if (ppdu_info->bw < HAL_RX_BW_MAX) 2387 rx_stats->bw_count[ppdu_info->bw] += num_msdu; 2388 2389 if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX) 2390 rx_stats->coding_count[ppdu_info->ldpc] += num_msdu; 2391 2392 if (ppdu_info->tid <= IEEE80211_NUM_TIDS) 2393 rx_stats->tid_count[ppdu_info->tid] += num_msdu; 2394 2395 if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX) 2396 rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu; 2397 2398 if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX) 2399 rx_stats->reception_type[ppdu_info->reception_type] += num_msdu; 2400 2401 if (ppdu_info->is_stbc) 2402 rx_stats->stbc_count += num_msdu; 2403 2404 if (ppdu_info->beamformed) 2405 rx_stats->beamformed_count += num_msdu; 2406 2407 if (ppdu_info->num_mpdu_fcs_ok > 1) 2408 rx_stats->ampdu_msdu_count += num_msdu; 2409 else 2410 rx_stats->non_ampdu_msdu_count += num_msdu; 2411 2412 rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok; 2413 rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err; 2414 2415 arsta->rssi_comb = ppdu_info->rssi_comb; 2416 rx_stats->rx_duration += ppdu_info->rx_duration; 2417 arsta->rx_duration = rx_stats->rx_duration; 2418 } 2419 2420 static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab, 2421 struct dp_rxdma_ring *rx_ring, 2422 int *buf_id, gfp_t gfp) 2423 { 2424 struct sk_buff *skb; 2425 dma_addr_t paddr; 2426 2427 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + 2428 DP_RX_BUFFER_ALIGN_SIZE); 2429 2430 if (!skb) 2431 goto fail_alloc_skb; 2432 2433 if (!IS_ALIGNED((unsigned long)skb->data, 2434 DP_RX_BUFFER_ALIGN_SIZE)) { 2435 skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - 2436 skb->data); 2437 } 2438 2439 paddr = dma_map_single(ab->dev, skb->data, 2440 skb->len + skb_tailroom(skb), 2441 DMA_BIDIRECTIONAL); 2442 if (unlikely(dma_mapping_error(ab->dev, paddr))) 2443 goto fail_free_skb; 2444 2445 spin_lock_bh(&rx_ring->idr_lock); 2446 *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, 2447 rx_ring->bufs_max, gfp); 2448 spin_unlock_bh(&rx_ring->idr_lock); 2449 if (*buf_id < 0) 2450 goto fail_dma_unmap; 2451 2452 ATH11K_SKB_RXCB(skb)->paddr = paddr; 2453 return skb; 2454 2455 fail_dma_unmap: 2456 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 2457 DMA_BIDIRECTIONAL); 2458 fail_free_skb: 2459 dev_kfree_skb_any(skb); 2460 fail_alloc_skb: 2461 return NULL; 2462 } 2463 2464 int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id, 2465 struct dp_rxdma_ring *rx_ring, 2466 int req_entries, 2467 enum hal_rx_buf_return_buf_manager mgr, 2468 gfp_t gfp) 2469 { 2470 struct hal_srng *srng; 2471 u32 *desc; 2472 struct sk_buff *skb; 2473 int num_free; 2474 int num_remain; 2475 int buf_id; 2476 u32 cookie; 2477 dma_addr_t paddr; 2478 2479 req_entries = min(req_entries, rx_ring->bufs_max); 2480 2481 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 2482 2483 spin_lock_bh(&srng->lock); 2484 2485 ath11k_hal_srng_access_begin(ab, srng); 2486 2487 num_free = ath11k_hal_srng_src_num_free(ab, srng, true); 2488 2489 req_entries = min(num_free, req_entries); 2490 num_remain = req_entries; 2491 2492 while (num_remain > 0) { 2493 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, 2494 &buf_id, gfp); 2495 if (!skb) 2496 break; 2497 paddr = ATH11K_SKB_RXCB(skb)->paddr; 2498 2499 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 2500 if (!desc) 2501 goto fail_desc_get; 2502 2503 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 2504 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 2505 2506 num_remain--; 2507 2508 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); 2509 } 2510 2511 ath11k_hal_srng_access_end(ab, srng); 2512 2513 spin_unlock_bh(&srng->lock); 2514 2515 return req_entries - num_remain; 2516 2517 fail_desc_get: 2518 spin_lock_bh(&rx_ring->idr_lock); 2519 idr_remove(&rx_ring->bufs_idr, buf_id); 2520 spin_unlock_bh(&rx_ring->idr_lock); 2521 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 2522 DMA_BIDIRECTIONAL); 2523 dev_kfree_skb_any(skb); 2524 ath11k_hal_srng_access_end(ab, srng); 2525 spin_unlock_bh(&srng->lock); 2526 2527 return req_entries - num_remain; 2528 } 2529 2530 static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id, 2531 int *budget, struct sk_buff_head *skb_list) 2532 { 2533 struct ath11k *ar = ab->pdevs[mac_id].ar; 2534 struct ath11k_pdev_dp *dp = &ar->dp; 2535 struct dp_rxdma_ring *rx_ring = &dp->rx_mon_status_refill_ring; 2536 struct hal_srng *srng; 2537 void *rx_mon_status_desc; 2538 struct sk_buff *skb; 2539 struct ath11k_skb_rxcb *rxcb; 2540 struct hal_tlv_hdr *tlv; 2541 u32 cookie; 2542 int buf_id; 2543 dma_addr_t paddr; 2544 u8 rbm; 2545 int num_buffs_reaped = 0; 2546 2547 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 2548 2549 spin_lock_bh(&srng->lock); 2550 2551 ath11k_hal_srng_access_begin(ab, srng); 2552 while (*budget) { 2553 *budget -= 1; 2554 rx_mon_status_desc = 2555 ath11k_hal_srng_src_peek(ab, srng); 2556 if (!rx_mon_status_desc) 2557 break; 2558 2559 ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr, 2560 &cookie, &rbm); 2561 if (paddr) { 2562 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie); 2563 2564 spin_lock_bh(&rx_ring->idr_lock); 2565 skb = idr_find(&rx_ring->bufs_idr, buf_id); 2566 if (!skb) { 2567 ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n", 2568 buf_id); 2569 spin_unlock_bh(&rx_ring->idr_lock); 2570 continue; 2571 } 2572 2573 idr_remove(&rx_ring->bufs_idr, buf_id); 2574 spin_unlock_bh(&rx_ring->idr_lock); 2575 2576 rxcb = ATH11K_SKB_RXCB(skb); 2577 2578 dma_sync_single_for_cpu(ab->dev, rxcb->paddr, 2579 skb->len + skb_tailroom(skb), 2580 DMA_FROM_DEVICE); 2581 2582 dma_unmap_single(ab->dev, rxcb->paddr, 2583 skb->len + skb_tailroom(skb), 2584 DMA_BIDIRECTIONAL); 2585 2586 tlv = (struct hal_tlv_hdr *)skb->data; 2587 if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != 2588 HAL_RX_STATUS_BUFFER_DONE) { 2589 ath11k_hal_srng_src_get_next_entry(ab, srng); 2590 continue; 2591 } 2592 2593 __skb_queue_tail(skb_list, skb); 2594 } 2595 2596 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, 2597 &buf_id, GFP_ATOMIC); 2598 2599 if (!skb) { 2600 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0, 2601 HAL_RX_BUF_RBM_SW3_BM); 2602 num_buffs_reaped++; 2603 break; 2604 } 2605 rxcb = ATH11K_SKB_RXCB(skb); 2606 2607 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 2608 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 2609 2610 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr, 2611 cookie, HAL_RX_BUF_RBM_SW3_BM); 2612 ath11k_hal_srng_src_get_next_entry(ab, srng); 2613 num_buffs_reaped++; 2614 } 2615 ath11k_hal_srng_access_end(ab, srng); 2616 spin_unlock_bh(&srng->lock); 2617 2618 return num_buffs_reaped; 2619 } 2620 2621 int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id, 2622 struct napi_struct *napi, int budget) 2623 { 2624 struct ath11k *ar = ab->pdevs[mac_id].ar; 2625 enum hal_rx_mon_status hal_status; 2626 struct sk_buff *skb; 2627 struct sk_buff_head skb_list; 2628 struct hal_rx_mon_ppdu_info ppdu_info; 2629 struct ath11k_peer *peer; 2630 struct ath11k_sta *arsta; 2631 int num_buffs_reaped = 0; 2632 2633 __skb_queue_head_init(&skb_list); 2634 2635 num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget, 2636 &skb_list); 2637 if (!num_buffs_reaped) 2638 goto exit; 2639 2640 while ((skb = __skb_dequeue(&skb_list))) { 2641 memset(&ppdu_info, 0, sizeof(ppdu_info)); 2642 ppdu_info.peer_id = HAL_INVALID_PEERID; 2643 2644 if (ath11k_debug_is_pktlog_rx_stats_enabled(ar)) 2645 trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE); 2646 2647 hal_status = ath11k_hal_rx_parse_mon_status(ab, &ppdu_info, skb); 2648 2649 if (ppdu_info.peer_id == HAL_INVALID_PEERID || 2650 hal_status != HAL_RX_MON_STATUS_PPDU_DONE) { 2651 dev_kfree_skb_any(skb); 2652 continue; 2653 } 2654 2655 rcu_read_lock(); 2656 spin_lock_bh(&ab->base_lock); 2657 peer = ath11k_peer_find_by_id(ab, ppdu_info.peer_id); 2658 2659 if (!peer || !peer->sta) { 2660 ath11k_dbg(ab, ATH11K_DBG_DATA, 2661 "failed to find the peer with peer_id %d\n", 2662 ppdu_info.peer_id); 2663 spin_unlock_bh(&ab->base_lock); 2664 rcu_read_unlock(); 2665 dev_kfree_skb_any(skb); 2666 continue; 2667 } 2668 2669 arsta = (struct ath11k_sta *)peer->sta->drv_priv; 2670 ath11k_dp_rx_update_peer_stats(arsta, &ppdu_info); 2671 2672 if (ath11k_debug_is_pktlog_peer_valid(ar, peer->addr)) 2673 trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE); 2674 2675 spin_unlock_bh(&ab->base_lock); 2676 rcu_read_unlock(); 2677 2678 dev_kfree_skb_any(skb); 2679 } 2680 exit: 2681 return num_buffs_reaped; 2682 } 2683 2684 static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab, 2685 u32 *link_desc, 2686 enum hal_wbm_rel_bm_act action) 2687 { 2688 struct ath11k_dp *dp = &ab->dp; 2689 struct hal_srng *srng; 2690 u32 *desc; 2691 int ret = 0; 2692 2693 srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id]; 2694 2695 spin_lock_bh(&srng->lock); 2696 2697 ath11k_hal_srng_access_begin(ab, srng); 2698 2699 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 2700 if (!desc) { 2701 ret = -ENOBUFS; 2702 goto exit; 2703 } 2704 2705 ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc, 2706 action); 2707 2708 exit: 2709 ath11k_hal_srng_access_end(ab, srng); 2710 2711 spin_unlock_bh(&srng->lock); 2712 2713 return ret; 2714 } 2715 2716 static void ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar, 2717 struct sk_buff *msdu, 2718 struct hal_rx_desc *rx_desc, 2719 struct ieee80211_rx_status *rx_status) 2720 { 2721 u8 rx_channel; 2722 enum hal_encrypt_type enctype; 2723 bool is_decrypted; 2724 u32 err_bitmap; 2725 2726 is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc); 2727 enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc); 2728 err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_desc); 2729 2730 if (err_bitmap & DP_RX_MPDU_ERR_FCS) 2731 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; 2732 2733 if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC) 2734 rx_status->flag |= RX_FLAG_MMIC_ERROR; 2735 2736 rx_status->encoding = RX_ENC_LEGACY; 2737 rx_status->bw = RATE_INFO_BW_20; 2738 2739 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 2740 2741 rx_channel = ath11k_dp_rx_h_msdu_start_freq(rx_desc); 2742 2743 if (rx_channel >= 1 && rx_channel <= 14) { 2744 rx_status->band = NL80211_BAND_2GHZ; 2745 } else if (rx_channel >= 36 && rx_channel <= 173) { 2746 rx_status->band = NL80211_BAND_5GHZ; 2747 } else { 2748 ath11k_warn(ar->ab, "Unsupported Channel info received %d\n", 2749 rx_channel); 2750 return; 2751 } 2752 2753 rx_status->freq = ieee80211_channel_to_frequency(rx_channel, 2754 rx_status->band); 2755 ath11k_dp_rx_h_rate(ar, rx_desc, rx_status); 2756 2757 /* Rx fragments are received in raw mode */ 2758 skb_trim(msdu, msdu->len - FCS_LEN); 2759 2760 if (is_decrypted) { 2761 rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MIC_STRIPPED; 2762 skb_trim(msdu, msdu->len - 2763 ath11k_dp_rx_crypto_mic_len(ar, enctype)); 2764 } 2765 } 2766 2767 static int 2768 ath11k_dp_process_rx_err_buf(struct ath11k *ar, struct napi_struct *napi, 2769 int buf_id, bool frag) 2770 { 2771 struct ath11k_pdev_dp *dp = &ar->dp; 2772 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 2773 struct ieee80211_rx_status rx_status = {0}; 2774 struct sk_buff *msdu; 2775 struct ath11k_skb_rxcb *rxcb; 2776 struct ieee80211_rx_status *status; 2777 struct hal_rx_desc *rx_desc; 2778 u16 msdu_len; 2779 2780 spin_lock_bh(&rx_ring->idr_lock); 2781 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 2782 if (!msdu) { 2783 ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n", 2784 buf_id); 2785 spin_unlock_bh(&rx_ring->idr_lock); 2786 return -EINVAL; 2787 } 2788 2789 idr_remove(&rx_ring->bufs_idr, buf_id); 2790 spin_unlock_bh(&rx_ring->idr_lock); 2791 2792 rxcb = ATH11K_SKB_RXCB(msdu); 2793 dma_unmap_single(ar->ab->dev, rxcb->paddr, 2794 msdu->len + skb_tailroom(msdu), 2795 DMA_FROM_DEVICE); 2796 2797 if (!frag) { 2798 /* Process only rx fragments below, and drop 2799 * msdu's indicated due to error reasons. 2800 */ 2801 dev_kfree_skb_any(msdu); 2802 return 0; 2803 } 2804 2805 rcu_read_lock(); 2806 if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) { 2807 dev_kfree_skb_any(msdu); 2808 goto exit; 2809 } 2810 2811 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 2812 dev_kfree_skb_any(msdu); 2813 goto exit; 2814 } 2815 2816 rx_desc = (struct hal_rx_desc *)msdu->data; 2817 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc); 2818 skb_put(msdu, HAL_RX_DESC_SIZE + msdu_len); 2819 skb_pull(msdu, HAL_RX_DESC_SIZE); 2820 2821 ath11k_dp_rx_frag_h_mpdu(ar, msdu, rx_desc, &rx_status); 2822 2823 status = IEEE80211_SKB_RXCB(msdu); 2824 2825 *status = rx_status; 2826 2827 ath11k_dp_rx_deliver_msdu(ar, napi, msdu); 2828 2829 exit: 2830 rcu_read_unlock(); 2831 return 0; 2832 } 2833 2834 int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi, 2835 int budget) 2836 { 2837 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; 2838 struct dp_link_desc_bank *link_desc_banks; 2839 enum hal_rx_buf_return_buf_manager rbm; 2840 int tot_n_bufs_reaped, quota, ret, i; 2841 int n_bufs_reaped[MAX_RADIOS] = {0}; 2842 struct dp_rxdma_ring *rx_ring; 2843 struct dp_srng *reo_except; 2844 u32 desc_bank, num_msdus; 2845 struct hal_srng *srng; 2846 struct ath11k_dp *dp; 2847 void *link_desc_va; 2848 int buf_id, mac_id; 2849 struct ath11k *ar; 2850 dma_addr_t paddr; 2851 u32 *desc; 2852 bool is_frag; 2853 2854 tot_n_bufs_reaped = 0; 2855 quota = budget; 2856 2857 dp = &ab->dp; 2858 reo_except = &dp->reo_except_ring; 2859 link_desc_banks = dp->link_desc_banks; 2860 2861 srng = &ab->hal.srng_list[reo_except->ring_id]; 2862 2863 spin_lock_bh(&srng->lock); 2864 2865 ath11k_hal_srng_access_begin(ab, srng); 2866 2867 while (budget && 2868 (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 2869 struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc; 2870 2871 ab->soc_stats.err_ring_pkts++; 2872 ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr, 2873 &desc_bank); 2874 if (ret) { 2875 ath11k_warn(ab, "failed to parse error reo desc %d\n", 2876 ret); 2877 continue; 2878 } 2879 link_desc_va = link_desc_banks[desc_bank].vaddr + 2880 (paddr - link_desc_banks[desc_bank].paddr); 2881 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies, 2882 &rbm); 2883 if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST && 2884 rbm != HAL_RX_BUF_RBM_SW3_BM) { 2885 ab->soc_stats.invalid_rbm++; 2886 ath11k_warn(ab, "invalid return buffer manager %d\n", rbm); 2887 ath11k_dp_rx_link_desc_return(ab, desc, 2888 HAL_WBM_REL_BM_ACT_REL_MSDU); 2889 continue; 2890 } 2891 2892 is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG); 2893 2894 /* Return the link desc back to wbm idle list */ 2895 ath11k_dp_rx_link_desc_return(ab, desc, 2896 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 2897 2898 for (i = 0; i < num_msdus; i++) { 2899 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 2900 msdu_cookies[i]); 2901 2902 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, 2903 msdu_cookies[i]); 2904 2905 ar = ab->pdevs[mac_id].ar; 2906 2907 if (!ath11k_dp_process_rx_err_buf(ar, napi, buf_id, 2908 is_frag)) { 2909 n_bufs_reaped[mac_id]++; 2910 tot_n_bufs_reaped++; 2911 } 2912 } 2913 2914 if (tot_n_bufs_reaped >= quota) { 2915 tot_n_bufs_reaped = quota; 2916 goto exit; 2917 } 2918 2919 budget = quota - tot_n_bufs_reaped; 2920 } 2921 2922 exit: 2923 ath11k_hal_srng_access_end(ab, srng); 2924 2925 spin_unlock_bh(&srng->lock); 2926 2927 for (i = 0; i < ab->num_radios; i++) { 2928 if (!n_bufs_reaped[i]) 2929 continue; 2930 2931 ar = ab->pdevs[i].ar; 2932 rx_ring = &ar->dp.rx_refill_buf_ring; 2933 2934 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i], 2935 HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC); 2936 } 2937 2938 return tot_n_bufs_reaped; 2939 } 2940 2941 static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar, 2942 int msdu_len, 2943 struct sk_buff_head *msdu_list) 2944 { 2945 struct sk_buff *skb, *tmp; 2946 struct ath11k_skb_rxcb *rxcb; 2947 int n_buffs; 2948 2949 n_buffs = DIV_ROUND_UP(msdu_len, 2950 (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE)); 2951 2952 skb_queue_walk_safe(msdu_list, skb, tmp) { 2953 rxcb = ATH11K_SKB_RXCB(skb); 2954 if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO && 2955 rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) { 2956 if (!n_buffs) 2957 break; 2958 __skb_unlink(skb, msdu_list); 2959 dev_kfree_skb_any(skb); 2960 n_buffs--; 2961 } 2962 } 2963 } 2964 2965 static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu, 2966 struct ieee80211_rx_status *status, 2967 struct sk_buff_head *msdu_list) 2968 { 2969 struct sk_buff_head amsdu_list; 2970 u16 msdu_len; 2971 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 2972 u8 l3pad_bytes; 2973 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 2974 2975 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc); 2976 2977 if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) { 2978 /* First buffer will be freed by the caller, so deduct it's length */ 2979 msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE); 2980 ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list); 2981 return -EINVAL; 2982 } 2983 2984 if (!ath11k_dp_rx_h_attn_msdu_done(desc)) { 2985 ath11k_warn(ar->ab, 2986 "msdu_done bit not set in null_q_des processing\n"); 2987 __skb_queue_purge(msdu_list); 2988 return -EIO; 2989 } 2990 2991 /* Handle NULL queue descriptor violations arising out a missing 2992 * REO queue for a given peer or a given TID. This typically 2993 * may happen if a packet is received on a QOS enabled TID before the 2994 * ADDBA negotiation for that TID, when the TID queue is setup. Or 2995 * it may also happen for MC/BC frames if they are not routed to the 2996 * non-QOS TID queue, in the absence of any other default TID queue. 2997 * This error can show up both in a REO destination or WBM release ring. 2998 */ 2999 3000 __skb_queue_head_init(&amsdu_list); 3001 3002 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc); 3003 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc); 3004 3005 l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc); 3006 3007 if ((HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) 3008 return -EINVAL; 3009 3010 skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len); 3011 skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes); 3012 3013 ath11k_dp_rx_h_ppdu(ar, desc, status); 3014 3015 __skb_queue_tail(&amsdu_list, msdu); 3016 3017 ath11k_dp_rx_h_mpdu(ar, &amsdu_list, desc, status); 3018 3019 /* Please note that caller will having the access to msdu and completing 3020 * rx with mac80211. Need not worry about cleaning up amsdu_list. 3021 */ 3022 3023 return 0; 3024 } 3025 3026 static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu, 3027 struct ieee80211_rx_status *status, 3028 struct sk_buff_head *msdu_list) 3029 { 3030 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3031 bool drop = false; 3032 3033 ar->ab->soc_stats.reo_error[rxcb->err_code]++; 3034 3035 switch (rxcb->err_code) { 3036 case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO: 3037 if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list)) 3038 drop = true; 3039 break; 3040 default: 3041 /* TODO: Review other errors and process them to mac80211 3042 * as appropriate. 3043 */ 3044 drop = true; 3045 break; 3046 } 3047 3048 return drop; 3049 } 3050 3051 static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu, 3052 struct ieee80211_rx_status *status) 3053 { 3054 u16 msdu_len; 3055 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 3056 u8 l3pad_bytes; 3057 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3058 3059 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc); 3060 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc); 3061 3062 l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc); 3063 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc); 3064 skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len); 3065 skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes); 3066 3067 ath11k_dp_rx_h_ppdu(ar, desc, status); 3068 3069 status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR | 3070 RX_FLAG_DECRYPTED); 3071 3072 ath11k_dp_rx_h_undecap(ar, msdu, desc, 3073 HAL_ENCRYPT_TYPE_TKIP_MIC, status, false); 3074 } 3075 3076 static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar, struct sk_buff *msdu, 3077 struct ieee80211_rx_status *status) 3078 { 3079 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3080 bool drop = false; 3081 3082 ar->ab->soc_stats.rxdma_error[rxcb->err_code]++; 3083 3084 switch (rxcb->err_code) { 3085 case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR: 3086 ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status); 3087 break; 3088 default: 3089 /* TODO: Review other rxdma error code to check if anything is 3090 * worth reporting to mac80211 3091 */ 3092 drop = true; 3093 break; 3094 } 3095 3096 return drop; 3097 } 3098 3099 static void ath11k_dp_rx_wbm_err(struct ath11k *ar, 3100 struct napi_struct *napi, 3101 struct sk_buff *msdu, 3102 struct sk_buff_head *msdu_list) 3103 { 3104 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3105 struct ieee80211_rx_status rxs = {0}; 3106 struct ieee80211_rx_status *status; 3107 bool drop = true; 3108 3109 switch (rxcb->err_rel_src) { 3110 case HAL_WBM_REL_SRC_MODULE_REO: 3111 drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list); 3112 break; 3113 case HAL_WBM_REL_SRC_MODULE_RXDMA: 3114 drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs); 3115 break; 3116 default: 3117 /* msdu will get freed */ 3118 break; 3119 } 3120 3121 if (drop) { 3122 dev_kfree_skb_any(msdu); 3123 return; 3124 } 3125 3126 status = IEEE80211_SKB_RXCB(msdu); 3127 *status = rxs; 3128 3129 ath11k_dp_rx_deliver_msdu(ar, napi, msdu); 3130 } 3131 3132 int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab, 3133 struct napi_struct *napi, int budget) 3134 { 3135 struct ath11k *ar; 3136 struct ath11k_dp *dp = &ab->dp; 3137 struct dp_rxdma_ring *rx_ring; 3138 struct hal_rx_wbm_rel_info err_info; 3139 struct hal_srng *srng; 3140 struct sk_buff *msdu; 3141 struct sk_buff_head msdu_list[MAX_RADIOS]; 3142 struct ath11k_skb_rxcb *rxcb; 3143 u32 *rx_desc; 3144 int buf_id, mac_id; 3145 int num_buffs_reaped[MAX_RADIOS] = {0}; 3146 int total_num_buffs_reaped = 0; 3147 int ret, i; 3148 3149 for (i = 0; i < MAX_RADIOS; i++) 3150 __skb_queue_head_init(&msdu_list[i]); 3151 3152 srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id]; 3153 3154 spin_lock_bh(&srng->lock); 3155 3156 ath11k_hal_srng_access_begin(ab, srng); 3157 3158 while (budget) { 3159 rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng); 3160 if (!rx_desc) 3161 break; 3162 3163 ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info); 3164 if (ret) { 3165 ath11k_warn(ab, 3166 "failed to parse rx error in wbm_rel ring desc %d\n", 3167 ret); 3168 continue; 3169 } 3170 3171 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie); 3172 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie); 3173 3174 ar = ab->pdevs[mac_id].ar; 3175 rx_ring = &ar->dp.rx_refill_buf_ring; 3176 3177 spin_lock_bh(&rx_ring->idr_lock); 3178 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 3179 if (!msdu) { 3180 ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n", 3181 buf_id, mac_id); 3182 spin_unlock_bh(&rx_ring->idr_lock); 3183 continue; 3184 } 3185 3186 idr_remove(&rx_ring->bufs_idr, buf_id); 3187 spin_unlock_bh(&rx_ring->idr_lock); 3188 3189 rxcb = ATH11K_SKB_RXCB(msdu); 3190 dma_unmap_single(ab->dev, rxcb->paddr, 3191 msdu->len + skb_tailroom(msdu), 3192 DMA_FROM_DEVICE); 3193 3194 num_buffs_reaped[mac_id]++; 3195 total_num_buffs_reaped++; 3196 budget--; 3197 3198 if (err_info.push_reason != 3199 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { 3200 dev_kfree_skb_any(msdu); 3201 continue; 3202 } 3203 3204 rxcb->err_rel_src = err_info.err_rel_src; 3205 rxcb->err_code = err_info.err_code; 3206 rxcb->rx_desc = (struct hal_rx_desc *)msdu->data; 3207 __skb_queue_tail(&msdu_list[mac_id], msdu); 3208 } 3209 3210 ath11k_hal_srng_access_end(ab, srng); 3211 3212 spin_unlock_bh(&srng->lock); 3213 3214 if (!total_num_buffs_reaped) 3215 goto done; 3216 3217 for (i = 0; i < ab->num_radios; i++) { 3218 if (!num_buffs_reaped[i]) 3219 continue; 3220 3221 ar = ab->pdevs[i].ar; 3222 rx_ring = &ar->dp.rx_refill_buf_ring; 3223 3224 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i], 3225 HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC); 3226 } 3227 3228 rcu_read_lock(); 3229 for (i = 0; i < ab->num_radios; i++) { 3230 if (!rcu_dereference(ab->pdevs_active[i])) { 3231 __skb_queue_purge(&msdu_list[i]); 3232 continue; 3233 } 3234 3235 ar = ab->pdevs[i].ar; 3236 3237 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 3238 __skb_queue_purge(&msdu_list[i]); 3239 continue; 3240 } 3241 3242 while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL) 3243 ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]); 3244 } 3245 rcu_read_unlock(); 3246 done: 3247 return total_num_buffs_reaped; 3248 } 3249 3250 int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget) 3251 { 3252 struct ath11k *ar = ab->pdevs[mac_id].ar; 3253 struct dp_srng *err_ring = &ar->dp.rxdma_err_dst_ring; 3254 struct dp_rxdma_ring *rx_ring = &ar->dp.rx_refill_buf_ring; 3255 struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks; 3256 struct hal_srng *srng; 3257 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; 3258 enum hal_rx_buf_return_buf_manager rbm; 3259 enum hal_reo_entr_rxdma_ecode rxdma_err_code; 3260 struct ath11k_skb_rxcb *rxcb; 3261 struct sk_buff *skb; 3262 struct hal_reo_entrance_ring *entr_ring; 3263 void *desc; 3264 int num_buf_freed = 0; 3265 int quota = budget; 3266 dma_addr_t paddr; 3267 u32 desc_bank; 3268 void *link_desc_va; 3269 int num_msdus; 3270 int i; 3271 int buf_id; 3272 3273 srng = &ab->hal.srng_list[err_ring->ring_id]; 3274 3275 spin_lock_bh(&srng->lock); 3276 3277 ath11k_hal_srng_access_begin(ab, srng); 3278 3279 while (quota-- && 3280 (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 3281 ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank); 3282 3283 entr_ring = (struct hal_reo_entrance_ring *)desc; 3284 rxdma_err_code = 3285 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE, 3286 entr_ring->info1); 3287 ab->soc_stats.rxdma_error[rxdma_err_code]++; 3288 3289 link_desc_va = link_desc_banks[desc_bank].vaddr + 3290 (paddr - link_desc_banks[desc_bank].paddr); 3291 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, 3292 msdu_cookies, &rbm); 3293 3294 for (i = 0; i < num_msdus; i++) { 3295 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 3296 msdu_cookies[i]); 3297 3298 spin_lock_bh(&rx_ring->idr_lock); 3299 skb = idr_find(&rx_ring->bufs_idr, buf_id); 3300 if (!skb) { 3301 ath11k_warn(ab, "rxdma error with invalid buf_id %d\n", 3302 buf_id); 3303 spin_unlock_bh(&rx_ring->idr_lock); 3304 continue; 3305 } 3306 3307 idr_remove(&rx_ring->bufs_idr, buf_id); 3308 spin_unlock_bh(&rx_ring->idr_lock); 3309 3310 rxcb = ATH11K_SKB_RXCB(skb); 3311 dma_unmap_single(ab->dev, rxcb->paddr, 3312 skb->len + skb_tailroom(skb), 3313 DMA_FROM_DEVICE); 3314 dev_kfree_skb_any(skb); 3315 3316 num_buf_freed++; 3317 } 3318 3319 ath11k_dp_rx_link_desc_return(ab, desc, 3320 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3321 } 3322 3323 ath11k_hal_srng_access_end(ab, srng); 3324 3325 spin_unlock_bh(&srng->lock); 3326 3327 if (num_buf_freed) 3328 ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed, 3329 HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC); 3330 3331 return budget - quota; 3332 } 3333 3334 void ath11k_dp_process_reo_status(struct ath11k_base *ab) 3335 { 3336 struct ath11k_dp *dp = &ab->dp; 3337 struct hal_srng *srng; 3338 struct dp_reo_cmd *cmd, *tmp; 3339 bool found = false; 3340 u32 *reo_desc; 3341 u16 tag; 3342 struct hal_reo_status reo_status; 3343 3344 srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id]; 3345 3346 memset(&reo_status, 0, sizeof(reo_status)); 3347 3348 spin_lock_bh(&srng->lock); 3349 3350 ath11k_hal_srng_access_begin(ab, srng); 3351 3352 while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 3353 tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc); 3354 3355 switch (tag) { 3356 case HAL_REO_GET_QUEUE_STATS_STATUS: 3357 ath11k_hal_reo_status_queue_stats(ab, reo_desc, 3358 &reo_status); 3359 break; 3360 case HAL_REO_FLUSH_QUEUE_STATUS: 3361 ath11k_hal_reo_flush_queue_status(ab, reo_desc, 3362 &reo_status); 3363 break; 3364 case HAL_REO_FLUSH_CACHE_STATUS: 3365 ath11k_hal_reo_flush_cache_status(ab, reo_desc, 3366 &reo_status); 3367 break; 3368 case HAL_REO_UNBLOCK_CACHE_STATUS: 3369 ath11k_hal_reo_unblk_cache_status(ab, reo_desc, 3370 &reo_status); 3371 break; 3372 case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS: 3373 ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc, 3374 &reo_status); 3375 break; 3376 case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS: 3377 ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc, 3378 &reo_status); 3379 break; 3380 case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS: 3381 ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc, 3382 &reo_status); 3383 break; 3384 default: 3385 ath11k_warn(ab, "Unknown reo status type %d\n", tag); 3386 continue; 3387 } 3388 3389 spin_lock_bh(&dp->reo_cmd_lock); 3390 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 3391 if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) { 3392 found = true; 3393 list_del(&cmd->list); 3394 break; 3395 } 3396 } 3397 spin_unlock_bh(&dp->reo_cmd_lock); 3398 3399 if (found) { 3400 cmd->handler(dp, (void *)&cmd->data, 3401 reo_status.uniform_hdr.cmd_status); 3402 kfree(cmd); 3403 } 3404 3405 found = false; 3406 } 3407 3408 ath11k_hal_srng_access_end(ab, srng); 3409 3410 spin_unlock_bh(&srng->lock); 3411 } 3412 3413 void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id) 3414 { 3415 struct ath11k *ar = ab->pdevs[mac_id].ar; 3416 3417 ath11k_dp_rx_pdev_srng_free(ar); 3418 ath11k_dp_rxdma_pdev_buf_free(ar); 3419 } 3420 3421 int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id) 3422 { 3423 struct ath11k *ar = ab->pdevs[mac_id].ar; 3424 struct ath11k_pdev_dp *dp = &ar->dp; 3425 u32 ring_id; 3426 int ret; 3427 3428 ret = ath11k_dp_rx_pdev_srng_alloc(ar); 3429 if (ret) { 3430 ath11k_warn(ab, "failed to setup rx srngs\n"); 3431 return ret; 3432 } 3433 3434 ret = ath11k_dp_rxdma_pdev_buf_setup(ar); 3435 if (ret) { 3436 ath11k_warn(ab, "failed to setup rxdma ring\n"); 3437 return ret; 3438 } 3439 3440 ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; 3441 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF); 3442 if (ret) { 3443 ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n", 3444 ret); 3445 return ret; 3446 } 3447 3448 ring_id = dp->rxdma_err_dst_ring.ring_id; 3449 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_DST); 3450 if (ret) { 3451 ath11k_warn(ab, "failed to configure rxdma_err_dest_ring %d\n", 3452 ret); 3453 return ret; 3454 } 3455 3456 ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id; 3457 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, 3458 mac_id, HAL_RXDMA_MONITOR_BUF); 3459 if (ret) { 3460 ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n", 3461 ret); 3462 return ret; 3463 } 3464 ret = ath11k_dp_tx_htt_srng_setup(ab, 3465 dp->rxdma_mon_dst_ring.ring_id, 3466 mac_id, HAL_RXDMA_MONITOR_DST); 3467 if (ret) { 3468 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n", 3469 ret); 3470 return ret; 3471 } 3472 ret = ath11k_dp_tx_htt_srng_setup(ab, 3473 dp->rxdma_mon_desc_ring.ring_id, 3474 mac_id, HAL_RXDMA_MONITOR_DESC); 3475 if (ret) { 3476 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n", 3477 ret); 3478 return ret; 3479 } 3480 ring_id = dp->rx_mon_status_refill_ring.refill_buf_ring.ring_id; 3481 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, 3482 HAL_RXDMA_MONITOR_STATUS); 3483 if (ret) { 3484 ath11k_warn(ab, 3485 "failed to configure mon_status_refill_ring %d\n", 3486 ret); 3487 return ret; 3488 } 3489 return 0; 3490 } 3491 3492 static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len) 3493 { 3494 if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) { 3495 *frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc); 3496 *total_len -= *frag_len; 3497 } else { 3498 *frag_len = *total_len; 3499 *total_len = 0; 3500 } 3501 } 3502 3503 static 3504 int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar, 3505 void *p_last_buf_addr_info, 3506 u8 mac_id) 3507 { 3508 struct ath11k_pdev_dp *dp = &ar->dp; 3509 struct dp_srng *dp_srng; 3510 void *hal_srng; 3511 void *src_srng_desc; 3512 int ret = 0; 3513 3514 dp_srng = &dp->rxdma_mon_desc_ring; 3515 hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id]; 3516 3517 ath11k_hal_srng_access_begin(ar->ab, hal_srng); 3518 3519 src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng); 3520 3521 if (src_srng_desc) { 3522 struct ath11k_buffer_addr *src_desc = 3523 (struct ath11k_buffer_addr *)src_srng_desc; 3524 3525 *src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info); 3526 } else { 3527 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 3528 "Monitor Link Desc Ring %d Full", mac_id); 3529 ret = -ENOMEM; 3530 } 3531 3532 ath11k_hal_srng_access_end(ar->ab, hal_srng); 3533 return ret; 3534 } 3535 3536 static 3537 void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc, 3538 dma_addr_t *paddr, u32 *sw_cookie, 3539 void **pp_buf_addr_info) 3540 { 3541 struct hal_rx_msdu_link *msdu_link = 3542 (struct hal_rx_msdu_link *)rx_msdu_link_desc; 3543 struct ath11k_buffer_addr *buf_addr_info; 3544 u8 rbm = 0; 3545 3546 buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info; 3547 3548 ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, &rbm); 3549 3550 *pp_buf_addr_info = (void *)buf_addr_info; 3551 } 3552 3553 static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len) 3554 { 3555 if (skb->len > len) { 3556 skb_trim(skb, len); 3557 } else { 3558 if (skb_tailroom(skb) < len - skb->len) { 3559 if ((pskb_expand_head(skb, 0, 3560 len - skb->len - skb_tailroom(skb), 3561 GFP_ATOMIC))) { 3562 dev_kfree_skb_any(skb); 3563 return -ENOMEM; 3564 } 3565 } 3566 skb_put(skb, (len - skb->len)); 3567 } 3568 return 0; 3569 } 3570 3571 static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar, 3572 void *msdu_link_desc, 3573 struct hal_rx_msdu_list *msdu_list, 3574 u16 *num_msdus) 3575 { 3576 struct hal_rx_msdu_details *msdu_details = NULL; 3577 struct rx_msdu_desc *msdu_desc_info = NULL; 3578 struct hal_rx_msdu_link *msdu_link = NULL; 3579 int i; 3580 u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1); 3581 u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1); 3582 u8 tmp = 0; 3583 3584 msdu_link = (struct hal_rx_msdu_link *)msdu_link_desc; 3585 msdu_details = &msdu_link->msdu_link[0]; 3586 3587 for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) { 3588 if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR, 3589 msdu_details[i].buf_addr_info.info0) == 0) { 3590 msdu_desc_info = &msdu_details[i - 1].rx_msdu_info; 3591 msdu_desc_info->info0 |= last; 3592 ; 3593 break; 3594 } 3595 msdu_desc_info = &msdu_details[i].rx_msdu_info; 3596 3597 if (!i) 3598 msdu_desc_info->info0 |= first; 3599 else if (i == (HAL_RX_NUM_MSDU_DESC - 1)) 3600 msdu_desc_info->info0 |= last; 3601 msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0; 3602 msdu_list->msdu_info[i].msdu_len = 3603 HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0); 3604 msdu_list->sw_cookie[i] = 3605 FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, 3606 msdu_details[i].buf_addr_info.info1); 3607 tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR, 3608 msdu_details[i].buf_addr_info.info1); 3609 msdu_list->rbm[i] = tmp; 3610 } 3611 *num_msdus = i; 3612 } 3613 3614 static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id, 3615 u32 *rx_bufs_used) 3616 { 3617 u32 ret = 0; 3618 3619 if ((*ppdu_id < msdu_ppdu_id) && 3620 ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) { 3621 *ppdu_id = msdu_ppdu_id; 3622 ret = msdu_ppdu_id; 3623 } else if ((*ppdu_id > msdu_ppdu_id) && 3624 ((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) { 3625 /* mon_dst is behind than mon_status 3626 * skip dst_ring and free it 3627 */ 3628 *rx_bufs_used += 1; 3629 *ppdu_id = msdu_ppdu_id; 3630 ret = msdu_ppdu_id; 3631 } 3632 return ret; 3633 } 3634 3635 static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info, 3636 bool *is_frag, u32 *total_len, 3637 u32 *frag_len, u32 *msdu_cnt) 3638 { 3639 if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) { 3640 if (!*is_frag) { 3641 *total_len = info->msdu_len; 3642 *is_frag = true; 3643 } 3644 ath11k_dp_mon_set_frag_len(total_len, 3645 frag_len); 3646 } else { 3647 if (*is_frag) { 3648 ath11k_dp_mon_set_frag_len(total_len, 3649 frag_len); 3650 } else { 3651 *frag_len = info->msdu_len; 3652 } 3653 *is_frag = false; 3654 *msdu_cnt -= 1; 3655 } 3656 } 3657 3658 static u32 3659 ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, 3660 void *ring_entry, struct sk_buff **head_msdu, 3661 struct sk_buff **tail_msdu, u32 *npackets, 3662 u32 *ppdu_id) 3663 { 3664 struct ath11k_pdev_dp *dp = &ar->dp; 3665 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 3666 struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring; 3667 struct sk_buff *msdu = NULL, *last = NULL; 3668 struct hal_rx_msdu_list msdu_list; 3669 void *p_buf_addr_info, *p_last_buf_addr_info; 3670 struct hal_rx_desc *rx_desc; 3671 void *rx_msdu_link_desc; 3672 dma_addr_t paddr; 3673 u16 num_msdus = 0; 3674 u32 rx_buf_size, rx_pkt_offset, sw_cookie; 3675 u32 rx_bufs_used = 0, i = 0; 3676 u32 msdu_ppdu_id = 0, msdu_cnt = 0; 3677 u32 total_len = 0, frag_len = 0; 3678 bool is_frag, is_first_msdu; 3679 bool drop_mpdu = false; 3680 struct ath11k_skb_rxcb *rxcb; 3681 struct hal_reo_entrance_ring *ent_desc = 3682 (struct hal_reo_entrance_ring *)ring_entry; 3683 int buf_id; 3684 3685 ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr, 3686 &sw_cookie, &p_last_buf_addr_info, 3687 &msdu_cnt); 3688 3689 if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON, 3690 ent_desc->info1) == 3691 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { 3692 u8 rxdma_err = 3693 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE, 3694 ent_desc->info1); 3695 if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR || 3696 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR || 3697 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) { 3698 drop_mpdu = true; 3699 pmon->rx_mon_stats.dest_mpdu_drop++; 3700 } 3701 } 3702 3703 is_frag = false; 3704 is_first_msdu = true; 3705 3706 do { 3707 if (pmon->mon_last_linkdesc_paddr == paddr) { 3708 pmon->rx_mon_stats.dup_mon_linkdesc_cnt++; 3709 return rx_bufs_used; 3710 } 3711 3712 rx_msdu_link_desc = 3713 (void *)pmon->link_desc_banks[sw_cookie].vaddr + 3714 (paddr - pmon->link_desc_banks[sw_cookie].paddr); 3715 3716 ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list, 3717 &num_msdus); 3718 3719 for (i = 0; i < num_msdus; i++) { 3720 u32 l2_hdr_offset; 3721 3722 if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) { 3723 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 3724 "i %d last_cookie %d is same\n", 3725 i, pmon->mon_last_buf_cookie); 3726 drop_mpdu = true; 3727 pmon->rx_mon_stats.dup_mon_buf_cnt++; 3728 continue; 3729 } 3730 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 3731 msdu_list.sw_cookie[i]); 3732 3733 spin_lock_bh(&rx_ring->idr_lock); 3734 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 3735 spin_unlock_bh(&rx_ring->idr_lock); 3736 if (!msdu) { 3737 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 3738 "msdu_pop: invalid buf_id %d\n", buf_id); 3739 break; 3740 } 3741 rxcb = ATH11K_SKB_RXCB(msdu); 3742 if (!rxcb->unmapped) { 3743 dma_unmap_single(ar->ab->dev, rxcb->paddr, 3744 msdu->len + 3745 skb_tailroom(msdu), 3746 DMA_FROM_DEVICE); 3747 rxcb->unmapped = 1; 3748 } 3749 if (drop_mpdu) { 3750 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 3751 "i %d drop msdu %p *ppdu_id %x\n", 3752 i, msdu, *ppdu_id); 3753 dev_kfree_skb_any(msdu); 3754 msdu = NULL; 3755 goto next_msdu; 3756 } 3757 3758 rx_desc = (struct hal_rx_desc *)msdu->data; 3759 3760 rx_pkt_offset = sizeof(struct hal_rx_desc); 3761 l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(rx_desc); 3762 3763 if (is_first_msdu) { 3764 if (!ath11k_dp_rxdesc_mpdu_valid(rx_desc)) { 3765 drop_mpdu = true; 3766 dev_kfree_skb_any(msdu); 3767 msdu = NULL; 3768 pmon->mon_last_linkdesc_paddr = paddr; 3769 goto next_msdu; 3770 } 3771 3772 msdu_ppdu_id = 3773 ath11k_dp_rxdesc_get_ppduid(rx_desc); 3774 3775 if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id, 3776 ppdu_id, 3777 &rx_bufs_used)) { 3778 if (rx_bufs_used) { 3779 drop_mpdu = true; 3780 dev_kfree_skb_any(msdu); 3781 msdu = NULL; 3782 goto next_msdu; 3783 } 3784 return rx_bufs_used; 3785 } 3786 pmon->mon_last_linkdesc_paddr = paddr; 3787 is_first_msdu = false; 3788 } 3789 ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i], 3790 &is_frag, &total_len, 3791 &frag_len, &msdu_cnt); 3792 rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len; 3793 3794 ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size); 3795 3796 if (!(*head_msdu)) 3797 *head_msdu = msdu; 3798 else if (last) 3799 last->next = msdu; 3800 3801 last = msdu; 3802 next_msdu: 3803 pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i]; 3804 rx_bufs_used++; 3805 spin_lock_bh(&rx_ring->idr_lock); 3806 idr_remove(&rx_ring->bufs_idr, buf_id); 3807 spin_unlock_bh(&rx_ring->idr_lock); 3808 } 3809 3810 ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr, 3811 &sw_cookie, 3812 &p_buf_addr_info); 3813 3814 if (ath11k_dp_rx_monitor_link_desc_return(ar, 3815 p_last_buf_addr_info, 3816 dp->mac_id)) 3817 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 3818 "dp_rx_monitor_link_desc_return failed"); 3819 3820 p_last_buf_addr_info = p_buf_addr_info; 3821 3822 } while (paddr && msdu_cnt); 3823 3824 if (last) 3825 last->next = NULL; 3826 3827 *tail_msdu = msdu; 3828 3829 if (msdu_cnt == 0) 3830 *npackets = 1; 3831 3832 return rx_bufs_used; 3833 } 3834 3835 static void ath11k_dp_rx_msdus_set_payload(struct sk_buff *msdu) 3836 { 3837 u32 rx_pkt_offset, l2_hdr_offset; 3838 3839 rx_pkt_offset = sizeof(struct hal_rx_desc); 3840 l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad((struct hal_rx_desc *)msdu->data); 3841 skb_pull(msdu, rx_pkt_offset + l2_hdr_offset); 3842 } 3843 3844 static struct sk_buff * 3845 ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, 3846 u32 mac_id, struct sk_buff *head_msdu, 3847 struct sk_buff *last_msdu, 3848 struct ieee80211_rx_status *rxs) 3849 { 3850 struct sk_buff *msdu, *mpdu_buf, *prev_buf; 3851 u32 decap_format, wifi_hdr_len; 3852 struct hal_rx_desc *rx_desc; 3853 char *hdr_desc; 3854 u8 *dest; 3855 struct ieee80211_hdr_3addr *wh; 3856 3857 mpdu_buf = NULL; 3858 3859 if (!head_msdu) 3860 goto err_merge_fail; 3861 3862 rx_desc = (struct hal_rx_desc *)head_msdu->data; 3863 3864 if (ath11k_dp_rxdesc_get_mpdulen_err(rx_desc)) 3865 return NULL; 3866 3867 decap_format = ath11k_dp_rxdesc_get_decap_format(rx_desc); 3868 3869 ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs); 3870 3871 if (decap_format == DP_RX_DECAP_TYPE_RAW) { 3872 ath11k_dp_rx_msdus_set_payload(head_msdu); 3873 3874 prev_buf = head_msdu; 3875 msdu = head_msdu->next; 3876 3877 while (msdu) { 3878 ath11k_dp_rx_msdus_set_payload(msdu); 3879 3880 prev_buf = msdu; 3881 msdu = msdu->next; 3882 } 3883 3884 prev_buf->next = NULL; 3885 3886 skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN); 3887 } else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) { 3888 __le16 qos_field; 3889 u8 qos_pkt = 0; 3890 3891 rx_desc = (struct hal_rx_desc *)head_msdu->data; 3892 hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc); 3893 3894 /* Base size */ 3895 wifi_hdr_len = sizeof(struct ieee80211_hdr_3addr); 3896 wh = (struct ieee80211_hdr_3addr *)hdr_desc; 3897 3898 if (ieee80211_is_data_qos(wh->frame_control)) { 3899 struct ieee80211_qos_hdr *qwh = 3900 (struct ieee80211_qos_hdr *)hdr_desc; 3901 3902 qos_field = qwh->qos_ctrl; 3903 qos_pkt = 1; 3904 } 3905 msdu = head_msdu; 3906 3907 while (msdu) { 3908 rx_desc = (struct hal_rx_desc *)msdu->data; 3909 hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc); 3910 3911 if (qos_pkt) { 3912 dest = skb_push(msdu, sizeof(__le16)); 3913 if (!dest) 3914 goto err_merge_fail; 3915 memcpy(dest, hdr_desc, wifi_hdr_len); 3916 memcpy(dest + wifi_hdr_len, 3917 (u8 *)&qos_field, sizeof(__le16)); 3918 } 3919 ath11k_dp_rx_msdus_set_payload(msdu); 3920 prev_buf = msdu; 3921 msdu = msdu->next; 3922 } 3923 dest = skb_put(prev_buf, HAL_RX_FCS_LEN); 3924 if (!dest) 3925 goto err_merge_fail; 3926 3927 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 3928 "mpdu_buf %pK mpdu_buf->len %u", 3929 prev_buf, prev_buf->len); 3930 } else { 3931 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 3932 "decap format %d is not supported!\n", 3933 decap_format); 3934 goto err_merge_fail; 3935 } 3936 3937 return head_msdu; 3938 3939 err_merge_fail: 3940 if (mpdu_buf && decap_format != DP_RX_DECAP_TYPE_RAW) { 3941 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 3942 "err_merge_fail mpdu_buf %pK", mpdu_buf); 3943 /* Free the head buffer */ 3944 dev_kfree_skb_any(mpdu_buf); 3945 } 3946 return NULL; 3947 } 3948 3949 static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id, 3950 struct sk_buff *head_msdu, 3951 struct sk_buff *tail_msdu, 3952 struct napi_struct *napi) 3953 { 3954 struct ath11k_pdev_dp *dp = &ar->dp; 3955 struct sk_buff *mon_skb, *skb_next, *header; 3956 struct ieee80211_rx_status *rxs = &dp->rx_status, *status; 3957 3958 mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu, 3959 tail_msdu, rxs); 3960 3961 if (!mon_skb) 3962 goto mon_deliver_fail; 3963 3964 header = mon_skb; 3965 3966 rxs->flag = 0; 3967 do { 3968 skb_next = mon_skb->next; 3969 if (!skb_next) 3970 rxs->flag &= ~RX_FLAG_AMSDU_MORE; 3971 else 3972 rxs->flag |= RX_FLAG_AMSDU_MORE; 3973 3974 if (mon_skb == header) { 3975 header = NULL; 3976 rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN; 3977 } else { 3978 rxs->flag |= RX_FLAG_ALLOW_SAME_PN; 3979 } 3980 rxs->flag |= RX_FLAG_ONLY_MONITOR; 3981 3982 status = IEEE80211_SKB_RXCB(mon_skb); 3983 *status = *rxs; 3984 3985 ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb); 3986 mon_skb = skb_next; 3987 } while (mon_skb); 3988 rxs->flag = 0; 3989 3990 return 0; 3991 3992 mon_deliver_fail: 3993 mon_skb = head_msdu; 3994 while (mon_skb) { 3995 skb_next = mon_skb->next; 3996 dev_kfree_skb_any(mon_skb); 3997 mon_skb = skb_next; 3998 } 3999 return -EINVAL; 4000 } 4001 4002 static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, u32 quota, 4003 struct napi_struct *napi) 4004 { 4005 struct ath11k_pdev_dp *dp = &ar->dp; 4006 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4007 void *ring_entry; 4008 void *mon_dst_srng; 4009 u32 ppdu_id; 4010 u32 rx_bufs_used; 4011 struct ath11k_pdev_mon_stats *rx_mon_stats; 4012 u32 npackets = 0; 4013 4014 mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id]; 4015 4016 if (!mon_dst_srng) { 4017 ath11k_warn(ar->ab, 4018 "HAL Monitor Destination Ring Init Failed -- %pK", 4019 mon_dst_srng); 4020 return; 4021 } 4022 4023 spin_lock_bh(&pmon->mon_lock); 4024 4025 ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng); 4026 4027 ppdu_id = pmon->mon_ppdu_info.ppdu_id; 4028 rx_bufs_used = 0; 4029 rx_mon_stats = &pmon->rx_mon_stats; 4030 4031 while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) { 4032 struct sk_buff *head_msdu, *tail_msdu; 4033 4034 head_msdu = NULL; 4035 tail_msdu = NULL; 4036 4037 rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, ring_entry, 4038 &head_msdu, 4039 &tail_msdu, 4040 &npackets, &ppdu_id); 4041 4042 if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) { 4043 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 4044 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4045 "dest_rx: new ppdu_id %x != status ppdu_id %x", 4046 ppdu_id, pmon->mon_ppdu_info.ppdu_id); 4047 break; 4048 } 4049 if (head_msdu && tail_msdu) { 4050 ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu, 4051 tail_msdu, napi); 4052 rx_mon_stats->dest_mpdu_done++; 4053 } 4054 4055 ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab, 4056 mon_dst_srng); 4057 } 4058 ath11k_hal_srng_access_end(ar->ab, mon_dst_srng); 4059 4060 spin_unlock_bh(&pmon->mon_lock); 4061 4062 if (rx_bufs_used) { 4063 rx_mon_stats->dest_ppdu_done++; 4064 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, 4065 &dp->rxdma_mon_buf_ring, 4066 rx_bufs_used, 4067 HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC); 4068 } 4069 } 4070 4071 static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar, 4072 u32 quota, 4073 struct napi_struct *napi) 4074 { 4075 struct ath11k_pdev_dp *dp = &ar->dp; 4076 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4077 struct hal_rx_mon_ppdu_info *ppdu_info; 4078 struct sk_buff *status_skb; 4079 u32 tlv_status = HAL_TLV_STATUS_BUF_DONE; 4080 struct ath11k_pdev_mon_stats *rx_mon_stats; 4081 4082 ppdu_info = &pmon->mon_ppdu_info; 4083 rx_mon_stats = &pmon->rx_mon_stats; 4084 4085 if (pmon->mon_ppdu_status != DP_PPDU_STATUS_START) 4086 return; 4087 4088 while (!skb_queue_empty(&pmon->rx_status_q)) { 4089 status_skb = skb_dequeue(&pmon->rx_status_q); 4090 4091 tlv_status = ath11k_hal_rx_parse_mon_status(ar->ab, ppdu_info, 4092 status_skb); 4093 if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) { 4094 rx_mon_stats->status_ppdu_done++; 4095 pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE; 4096 ath11k_dp_rx_mon_dest_process(ar, quota, napi); 4097 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 4098 } 4099 dev_kfree_skb_any(status_skb); 4100 } 4101 } 4102 4103 static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id, 4104 struct napi_struct *napi, int budget) 4105 { 4106 struct ath11k *ar = ab->pdevs[mac_id].ar; 4107 struct ath11k_pdev_dp *dp = &ar->dp; 4108 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4109 int num_buffs_reaped = 0; 4110 4111 num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ar->ab, dp->mac_id, &budget, 4112 &pmon->rx_status_q); 4113 if (num_buffs_reaped) 4114 ath11k_dp_rx_mon_status_process_tlv(ar, budget, napi); 4115 4116 return num_buffs_reaped; 4117 } 4118 4119 int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id, 4120 struct napi_struct *napi, int budget) 4121 { 4122 struct ath11k *ar = ab->pdevs[mac_id].ar; 4123 int ret = 0; 4124 4125 if (test_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags)) 4126 ret = ath11k_dp_mon_process_rx(ab, mac_id, napi, budget); 4127 else 4128 ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget); 4129 return ret; 4130 } 4131 4132 static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar) 4133 { 4134 struct ath11k_pdev_dp *dp = &ar->dp; 4135 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4136 4137 skb_queue_head_init(&pmon->rx_status_q); 4138 4139 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 4140 4141 memset(&pmon->rx_mon_stats, 0, 4142 sizeof(pmon->rx_mon_stats)); 4143 return 0; 4144 } 4145 4146 int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar) 4147 { 4148 struct ath11k_pdev_dp *dp = &ar->dp; 4149 struct ath11k_mon_data *pmon = &dp->mon_data; 4150 struct hal_srng *mon_desc_srng = NULL; 4151 struct dp_srng *dp_srng; 4152 int ret = 0; 4153 u32 n_link_desc = 0; 4154 4155 ret = ath11k_dp_rx_pdev_mon_status_attach(ar); 4156 if (ret) { 4157 ath11k_warn(ar->ab, "pdev_mon_status_attach() failed"); 4158 return ret; 4159 } 4160 4161 dp_srng = &dp->rxdma_mon_desc_ring; 4162 n_link_desc = dp_srng->size / 4163 ath11k_hal_srng_get_entrysize(HAL_RXDMA_MONITOR_DESC); 4164 mon_desc_srng = 4165 &ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id]; 4166 4167 ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks, 4168 HAL_RXDMA_MONITOR_DESC, mon_desc_srng, 4169 n_link_desc); 4170 if (ret) { 4171 ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed"); 4172 return ret; 4173 } 4174 pmon->mon_last_linkdesc_paddr = 0; 4175 pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1; 4176 spin_lock_init(&pmon->mon_lock); 4177 return 0; 4178 } 4179 4180 static int ath11k_dp_mon_link_free(struct ath11k *ar) 4181 { 4182 struct ath11k_pdev_dp *dp = &ar->dp; 4183 struct ath11k_mon_data *pmon = &dp->mon_data; 4184 4185 ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks, 4186 HAL_RXDMA_MONITOR_DESC, 4187 &dp->rxdma_mon_desc_ring); 4188 return 0; 4189 } 4190 4191 int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar) 4192 { 4193 ath11k_dp_mon_link_free(ar); 4194 return 0; 4195 } 4196