1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/ieee80211.h> 7 #include "core.h" 8 #include "debug.h" 9 #include "hal_desc.h" 10 #include "hw.h" 11 #include "dp_rx.h" 12 #include "hal_rx.h" 13 #include "dp_tx.h" 14 #include "peer.h" 15 16 static u8 *ath11k_dp_rx_h_80211_hdr(struct hal_rx_desc *desc) 17 { 18 return desc->hdr_status; 19 } 20 21 static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct hal_rx_desc *desc) 22 { 23 if (!(__le32_to_cpu(desc->mpdu_start.info1) & 24 RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID)) 25 return HAL_ENCRYPT_TYPE_OPEN; 26 27 return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE, 28 __le32_to_cpu(desc->mpdu_start.info2)); 29 } 30 31 static u8 ath11k_dp_rx_h_mpdu_start_decap_type(struct hal_rx_desc *desc) 32 { 33 return FIELD_GET(RX_MPDU_START_INFO5_DECAP_TYPE, 34 __le32_to_cpu(desc->mpdu_start.info5)); 35 } 36 37 static bool ath11k_dp_rx_h_attn_msdu_done(struct hal_rx_desc *desc) 38 { 39 return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE, 40 __le32_to_cpu(desc->attention.info2)); 41 } 42 43 static bool ath11k_dp_rx_h_attn_first_mpdu(struct hal_rx_desc *desc) 44 { 45 return !!FIELD_GET(RX_ATTENTION_INFO1_FIRST_MPDU, 46 __le32_to_cpu(desc->attention.info1)); 47 } 48 49 static bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct hal_rx_desc *desc) 50 { 51 return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL, 52 __le32_to_cpu(desc->attention.info1)); 53 } 54 55 static bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct hal_rx_desc *desc) 56 { 57 return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL, 58 __le32_to_cpu(desc->attention.info1)); 59 } 60 61 static bool ath11k_dp_rx_h_attn_is_decrypted(struct hal_rx_desc *desc) 62 { 63 return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE, 64 __le32_to_cpu(desc->attention.info2)) == 65 RX_DESC_DECRYPT_STATUS_CODE_OK); 66 } 67 68 static u32 ath11k_dp_rx_h_attn_mpdu_err(struct hal_rx_desc *desc) 69 { 70 u32 info = __le32_to_cpu(desc->attention.info1); 71 u32 errmap = 0; 72 73 if (info & RX_ATTENTION_INFO1_FCS_ERR) 74 errmap |= DP_RX_MPDU_ERR_FCS; 75 76 if (info & RX_ATTENTION_INFO1_DECRYPT_ERR) 77 errmap |= DP_RX_MPDU_ERR_DECRYPT; 78 79 if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR) 80 errmap |= DP_RX_MPDU_ERR_TKIP_MIC; 81 82 if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR) 83 errmap |= DP_RX_MPDU_ERR_AMSDU_ERR; 84 85 if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR) 86 errmap |= DP_RX_MPDU_ERR_OVERFLOW; 87 88 if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR) 89 errmap |= DP_RX_MPDU_ERR_MSDU_LEN; 90 91 if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR) 92 errmap |= DP_RX_MPDU_ERR_MPDU_LEN; 93 94 return errmap; 95 } 96 97 static u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct hal_rx_desc *desc) 98 { 99 return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH, 100 __le32_to_cpu(desc->msdu_start.info1)); 101 } 102 103 static u8 ath11k_dp_rx_h_msdu_start_sgi(struct hal_rx_desc *desc) 104 { 105 return FIELD_GET(RX_MSDU_START_INFO3_SGI, 106 __le32_to_cpu(desc->msdu_start.info3)); 107 } 108 109 static u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct hal_rx_desc *desc) 110 { 111 return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS, 112 __le32_to_cpu(desc->msdu_start.info3)); 113 } 114 115 static u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct hal_rx_desc *desc) 116 { 117 return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW, 118 __le32_to_cpu(desc->msdu_start.info3)); 119 } 120 121 static u32 ath11k_dp_rx_h_msdu_start_freq(struct hal_rx_desc *desc) 122 { 123 return __le32_to_cpu(desc->msdu_start.phy_meta_data); 124 } 125 126 static u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct hal_rx_desc *desc) 127 { 128 return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE, 129 __le32_to_cpu(desc->msdu_start.info3)); 130 } 131 132 static u8 ath11k_dp_rx_h_msdu_start_nss(struct hal_rx_desc *desc) 133 { 134 u8 mimo_ss_bitmap = FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP, 135 __le32_to_cpu(desc->msdu_start.info3)); 136 137 return hweight8(mimo_ss_bitmap); 138 } 139 140 static u8 ath11k_dp_rx_h_msdu_end_l3pad(struct hal_rx_desc *desc) 141 { 142 return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING, 143 __le32_to_cpu(desc->msdu_end.info2)); 144 } 145 146 static bool ath11k_dp_rx_h_msdu_end_first_msdu(struct hal_rx_desc *desc) 147 { 148 return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU, 149 __le32_to_cpu(desc->msdu_end.info2)); 150 } 151 152 static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct hal_rx_desc *desc) 153 { 154 return !!FIELD_GET(RX_MSDU_END_INFO2_LAST_MSDU, 155 __le32_to_cpu(desc->msdu_end.info2)); 156 } 157 158 static void ath11k_dp_rx_desc_end_tlv_copy(struct hal_rx_desc *fdesc, 159 struct hal_rx_desc *ldesc) 160 { 161 memcpy((u8 *)&fdesc->msdu_end, (u8 *)&ldesc->msdu_end, 162 sizeof(struct rx_msdu_end)); 163 memcpy((u8 *)&fdesc->attention, (u8 *)&ldesc->attention, 164 sizeof(struct rx_attention)); 165 memcpy((u8 *)&fdesc->mpdu_end, (u8 *)&ldesc->mpdu_end, 166 sizeof(struct rx_mpdu_end)); 167 } 168 169 static u32 ath11k_dp_rxdesc_get_mpdulen_err(struct hal_rx_desc *rx_desc) 170 { 171 struct rx_attention *rx_attn; 172 173 rx_attn = &rx_desc->attention; 174 175 return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR, 176 __le32_to_cpu(rx_attn->info1)); 177 } 178 179 static u32 ath11k_dp_rxdesc_get_decap_format(struct hal_rx_desc *rx_desc) 180 { 181 struct rx_msdu_start *rx_msdu_start; 182 183 rx_msdu_start = &rx_desc->msdu_start; 184 185 return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT, 186 __le32_to_cpu(rx_msdu_start->info2)); 187 } 188 189 static u8 *ath11k_dp_rxdesc_get_80211hdr(struct hal_rx_desc *rx_desc) 190 { 191 u8 *rx_pkt_hdr; 192 193 rx_pkt_hdr = &rx_desc->msdu_payload[0]; 194 195 return rx_pkt_hdr; 196 } 197 198 static bool ath11k_dp_rxdesc_mpdu_valid(struct hal_rx_desc *rx_desc) 199 { 200 u32 tlv_tag; 201 202 tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG, 203 __le32_to_cpu(rx_desc->mpdu_start_tag)); 204 205 return tlv_tag == HAL_RX_MPDU_START ? true : false; 206 } 207 208 static u32 ath11k_dp_rxdesc_get_ppduid(struct hal_rx_desc *rx_desc) 209 { 210 return __le16_to_cpu(rx_desc->mpdu_start.phy_ppdu_id); 211 } 212 213 /* Returns number of Rx buffers replenished */ 214 int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id, 215 struct dp_rxdma_ring *rx_ring, 216 int req_entries, 217 enum hal_rx_buf_return_buf_manager mgr, 218 gfp_t gfp) 219 { 220 struct hal_srng *srng; 221 u32 *desc; 222 struct sk_buff *skb; 223 int num_free; 224 int num_remain; 225 int buf_id; 226 u32 cookie; 227 dma_addr_t paddr; 228 229 req_entries = min(req_entries, rx_ring->bufs_max); 230 231 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 232 233 spin_lock_bh(&srng->lock); 234 235 ath11k_hal_srng_access_begin(ab, srng); 236 237 num_free = ath11k_hal_srng_src_num_free(ab, srng, true); 238 if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4)) 239 req_entries = num_free; 240 241 req_entries = min(num_free, req_entries); 242 num_remain = req_entries; 243 244 while (num_remain > 0) { 245 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + 246 DP_RX_BUFFER_ALIGN_SIZE); 247 if (!skb) 248 break; 249 250 if (!IS_ALIGNED((unsigned long)skb->data, 251 DP_RX_BUFFER_ALIGN_SIZE)) { 252 skb_pull(skb, 253 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - 254 skb->data); 255 } 256 257 paddr = dma_map_single(ab->dev, skb->data, 258 skb->len + skb_tailroom(skb), 259 DMA_FROM_DEVICE); 260 if (dma_mapping_error(ab->dev, paddr)) 261 goto fail_free_skb; 262 263 spin_lock_bh(&rx_ring->idr_lock); 264 buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, 265 rx_ring->bufs_max * 3, gfp); 266 spin_unlock_bh(&rx_ring->idr_lock); 267 if (buf_id < 0) 268 goto fail_dma_unmap; 269 270 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 271 if (!desc) 272 goto fail_idr_remove; 273 274 ATH11K_SKB_RXCB(skb)->paddr = paddr; 275 276 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 277 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 278 279 num_remain--; 280 281 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); 282 } 283 284 ath11k_hal_srng_access_end(ab, srng); 285 286 spin_unlock_bh(&srng->lock); 287 288 return req_entries - num_remain; 289 290 fail_idr_remove: 291 spin_lock_bh(&rx_ring->idr_lock); 292 idr_remove(&rx_ring->bufs_idr, buf_id); 293 spin_unlock_bh(&rx_ring->idr_lock); 294 fail_dma_unmap: 295 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 296 DMA_FROM_DEVICE); 297 fail_free_skb: 298 dev_kfree_skb_any(skb); 299 300 ath11k_hal_srng_access_end(ab, srng); 301 302 spin_unlock_bh(&srng->lock); 303 304 return req_entries - num_remain; 305 } 306 307 static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar, 308 struct dp_rxdma_ring *rx_ring) 309 { 310 struct ath11k_pdev_dp *dp = &ar->dp; 311 struct sk_buff *skb; 312 int buf_id; 313 314 spin_lock_bh(&rx_ring->idr_lock); 315 idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { 316 idr_remove(&rx_ring->bufs_idr, buf_id); 317 /* TODO: Understand where internal driver does this dma_unmap of 318 * of rxdma_buffer. 319 */ 320 dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr, 321 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); 322 dev_kfree_skb_any(skb); 323 } 324 325 idr_destroy(&rx_ring->bufs_idr); 326 spin_unlock_bh(&rx_ring->idr_lock); 327 328 rx_ring = &dp->rx_mon_status_refill_ring; 329 330 spin_lock_bh(&rx_ring->idr_lock); 331 idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { 332 idr_remove(&rx_ring->bufs_idr, buf_id); 333 /* XXX: Understand where internal driver does this dma_unmap of 334 * of rxdma_buffer. 335 */ 336 dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr, 337 skb->len + skb_tailroom(skb), DMA_BIDIRECTIONAL); 338 dev_kfree_skb_any(skb); 339 } 340 341 idr_destroy(&rx_ring->bufs_idr); 342 spin_unlock_bh(&rx_ring->idr_lock); 343 return 0; 344 } 345 346 static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar) 347 { 348 struct ath11k_pdev_dp *dp = &ar->dp; 349 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 350 351 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 352 353 rx_ring = &dp->rxdma_mon_buf_ring; 354 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 355 356 rx_ring = &dp->rx_mon_status_refill_ring; 357 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 358 return 0; 359 } 360 361 static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar, 362 struct dp_rxdma_ring *rx_ring, 363 u32 ringtype) 364 { 365 struct ath11k_pdev_dp *dp = &ar->dp; 366 int num_entries; 367 368 num_entries = rx_ring->refill_buf_ring.size / 369 ath11k_hal_srng_get_entrysize(ringtype); 370 371 rx_ring->bufs_max = num_entries; 372 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries, 373 HAL_RX_BUF_RBM_SW3_BM, GFP_KERNEL); 374 return 0; 375 } 376 377 static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar) 378 { 379 struct ath11k_pdev_dp *dp = &ar->dp; 380 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 381 382 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF); 383 384 rx_ring = &dp->rxdma_mon_buf_ring; 385 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF); 386 387 rx_ring = &dp->rx_mon_status_refill_ring; 388 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS); 389 390 return 0; 391 } 392 393 static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar) 394 { 395 struct ath11k_pdev_dp *dp = &ar->dp; 396 397 ath11k_dp_srng_cleanup(ar->ab, &dp->rx_refill_buf_ring.refill_buf_ring); 398 ath11k_dp_srng_cleanup(ar->ab, &dp->rxdma_err_dst_ring); 399 ath11k_dp_srng_cleanup(ar->ab, &dp->rx_mon_status_refill_ring.refill_buf_ring); 400 ath11k_dp_srng_cleanup(ar->ab, &dp->rxdma_mon_buf_ring.refill_buf_ring); 401 } 402 403 void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab) 404 { 405 struct ath11k_pdev_dp *dp; 406 struct ath11k *ar; 407 int i; 408 409 for (i = 0; i < ab->num_radios; i++) { 410 ar = ab->pdevs[i].ar; 411 dp = &ar->dp; 412 ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring); 413 } 414 } 415 416 int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab) 417 { 418 struct ath11k *ar; 419 struct ath11k_pdev_dp *dp; 420 int ret; 421 int i; 422 423 for (i = 0; i < ab->num_radios; i++) { 424 ar = ab->pdevs[i].ar; 425 dp = &ar->dp; 426 ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring, HAL_REO_DST, 427 dp->mac_id, dp->mac_id, 428 DP_REO_DST_RING_SIZE); 429 if (ret) { 430 ath11k_warn(ar->ab, "failed to setup reo_dst_ring\n"); 431 goto err_reo_cleanup; 432 } 433 } 434 435 return 0; 436 437 err_reo_cleanup: 438 ath11k_dp_pdev_reo_cleanup(ab); 439 440 return ret; 441 } 442 443 static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar) 444 { 445 struct ath11k_pdev_dp *dp = &ar->dp; 446 struct dp_srng *srng = NULL; 447 int ret; 448 449 ret = ath11k_dp_srng_setup(ar->ab, 450 &dp->rx_refill_buf_ring.refill_buf_ring, 451 HAL_RXDMA_BUF, 0, 452 dp->mac_id, DP_RXDMA_BUF_RING_SIZE); 453 if (ret) { 454 ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n"); 455 return ret; 456 } 457 458 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring, 459 HAL_RXDMA_DST, 0, dp->mac_id, 460 DP_RXDMA_ERR_DST_RING_SIZE); 461 if (ret) { 462 ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring\n"); 463 return ret; 464 } 465 466 srng = &dp->rx_mon_status_refill_ring.refill_buf_ring; 467 ret = ath11k_dp_srng_setup(ar->ab, 468 srng, 469 HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id, 470 DP_RXDMA_MON_STATUS_RING_SIZE); 471 if (ret) { 472 ath11k_warn(ar->ab, 473 "failed to setup rx_mon_status_refill_ring\n"); 474 return ret; 475 } 476 ret = ath11k_dp_srng_setup(ar->ab, 477 &dp->rxdma_mon_buf_ring.refill_buf_ring, 478 HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id, 479 DP_RXDMA_MONITOR_BUF_RING_SIZE); 480 if (ret) { 481 ath11k_warn(ar->ab, 482 "failed to setup HAL_RXDMA_MONITOR_BUF\n"); 483 return ret; 484 } 485 486 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring, 487 HAL_RXDMA_MONITOR_DST, 0, dp->mac_id, 488 DP_RXDMA_MONITOR_DST_RING_SIZE); 489 if (ret) { 490 ath11k_warn(ar->ab, 491 "failed to setup HAL_RXDMA_MONITOR_DST\n"); 492 return ret; 493 } 494 495 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring, 496 HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id, 497 DP_RXDMA_MONITOR_DESC_RING_SIZE); 498 if (ret) { 499 ath11k_warn(ar->ab, 500 "failed to setup HAL_RXDMA_MONITOR_DESC\n"); 501 return ret; 502 } 503 504 return 0; 505 } 506 507 void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab) 508 { 509 struct ath11k_dp *dp = &ab->dp; 510 struct dp_reo_cmd *cmd, *tmp; 511 struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache; 512 513 spin_lock_bh(&dp->reo_cmd_lock); 514 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 515 list_del(&cmd->list); 516 dma_unmap_single(ab->dev, cmd->data.paddr, 517 cmd->data.size, DMA_BIDIRECTIONAL); 518 kfree(cmd->data.vaddr); 519 kfree(cmd); 520 } 521 522 list_for_each_entry_safe(cmd_cache, tmp_cache, 523 &dp->reo_cmd_cache_flush_list, list) { 524 list_del(&cmd_cache->list); 525 dma_unmap_single(ab->dev, cmd_cache->data.paddr, 526 cmd_cache->data.size, DMA_BIDIRECTIONAL); 527 kfree(cmd_cache->data.vaddr); 528 kfree(cmd_cache); 529 } 530 spin_unlock_bh(&dp->reo_cmd_lock); 531 } 532 533 static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx, 534 enum hal_reo_cmd_status status) 535 { 536 struct dp_rx_tid *rx_tid = ctx; 537 538 if (status != HAL_REO_CMD_SUCCESS) 539 ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n", 540 rx_tid->tid, status); 541 542 dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size, 543 DMA_BIDIRECTIONAL); 544 kfree(rx_tid->vaddr); 545 } 546 547 static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab, 548 struct dp_rx_tid *rx_tid) 549 { 550 struct ath11k_hal_reo_cmd cmd = {0}; 551 unsigned long tot_desc_sz, desc_sz; 552 int ret; 553 554 tot_desc_sz = rx_tid->size; 555 desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID); 556 557 while (tot_desc_sz > desc_sz) { 558 tot_desc_sz -= desc_sz; 559 cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz); 560 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 561 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, 562 HAL_REO_CMD_FLUSH_CACHE, &cmd, 563 NULL); 564 if (ret) 565 ath11k_warn(ab, 566 "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n", 567 rx_tid->tid, ret); 568 } 569 570 memset(&cmd, 0, sizeof(cmd)); 571 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 572 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 573 cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS; 574 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, 575 HAL_REO_CMD_FLUSH_CACHE, 576 &cmd, ath11k_dp_reo_cmd_free); 577 if (ret) { 578 ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n", 579 rx_tid->tid, ret); 580 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 581 DMA_BIDIRECTIONAL); 582 kfree(rx_tid->vaddr); 583 } 584 } 585 586 static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx, 587 enum hal_reo_cmd_status status) 588 { 589 struct ath11k_base *ab = dp->ab; 590 struct dp_rx_tid *rx_tid = ctx; 591 struct dp_reo_cache_flush_elem *elem, *tmp; 592 593 if (status == HAL_REO_CMD_DRAIN) { 594 goto free_desc; 595 } else if (status != HAL_REO_CMD_SUCCESS) { 596 /* Shouldn't happen! Cleanup in case of other failure? */ 597 ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n", 598 rx_tid->tid, status); 599 return; 600 } 601 602 elem = kzalloc(sizeof(*elem), GFP_ATOMIC); 603 if (!elem) 604 goto free_desc; 605 606 elem->ts = jiffies; 607 memcpy(&elem->data, rx_tid, sizeof(*rx_tid)); 608 609 spin_lock_bh(&dp->reo_cmd_lock); 610 list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list); 611 spin_unlock_bh(&dp->reo_cmd_lock); 612 613 /* Flush and invalidate aged REO desc from HW cache */ 614 spin_lock_bh(&dp->reo_cmd_lock); 615 list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list, 616 list) { 617 if (time_after(jiffies, elem->ts + 618 msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) { 619 list_del(&elem->list); 620 spin_unlock_bh(&dp->reo_cmd_lock); 621 622 ath11k_dp_reo_cache_flush(ab, &elem->data); 623 kfree(elem); 624 spin_lock_bh(&dp->reo_cmd_lock); 625 } 626 } 627 spin_unlock_bh(&dp->reo_cmd_lock); 628 629 return; 630 free_desc: 631 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 632 DMA_BIDIRECTIONAL); 633 kfree(rx_tid->vaddr); 634 } 635 636 static void ath11k_peer_rx_tid_delete(struct ath11k *ar, 637 struct ath11k_peer *peer, u8 tid) 638 { 639 struct ath11k_hal_reo_cmd cmd = {0}; 640 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 641 int ret; 642 643 if (!rx_tid->active) 644 return; 645 646 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 647 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 648 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 649 cmd.upd0 |= HAL_REO_CMD_UPD0_VLD; 650 ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid, 651 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 652 ath11k_dp_rx_tid_del_func); 653 if (ret) { 654 ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n", 655 tid, ret); 656 dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size, 657 DMA_BIDIRECTIONAL); 658 kfree(rx_tid->vaddr); 659 } 660 661 rx_tid->active = false; 662 } 663 664 void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer) 665 { 666 int i; 667 668 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) 669 ath11k_peer_rx_tid_delete(ar, peer, i); 670 } 671 672 static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar, 673 struct ath11k_peer *peer, 674 struct dp_rx_tid *rx_tid, 675 u32 ba_win_sz, u16 ssn, 676 bool update_ssn) 677 { 678 struct ath11k_hal_reo_cmd cmd = {0}; 679 int ret; 680 681 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 682 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 683 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 684 cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE; 685 cmd.ba_window_size = ba_win_sz; 686 687 if (update_ssn) { 688 cmd.upd0 |= HAL_REO_CMD_UPD0_SSN; 689 cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn); 690 } 691 692 ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid, 693 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 694 NULL); 695 if (ret) { 696 ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n", 697 rx_tid->tid, ret); 698 return ret; 699 } 700 701 rx_tid->ba_win_sz = ba_win_sz; 702 703 return 0; 704 } 705 706 static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab, 707 const u8 *peer_mac, int vdev_id, u8 tid) 708 { 709 struct ath11k_peer *peer; 710 struct dp_rx_tid *rx_tid; 711 712 spin_lock_bh(&ab->base_lock); 713 714 peer = ath11k_peer_find(ab, vdev_id, peer_mac); 715 if (!peer) { 716 ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n"); 717 goto unlock_exit; 718 } 719 720 rx_tid = &peer->rx_tid[tid]; 721 if (!rx_tid->active) 722 goto unlock_exit; 723 724 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 725 DMA_BIDIRECTIONAL); 726 kfree(rx_tid->vaddr); 727 728 rx_tid->active = false; 729 730 unlock_exit: 731 spin_unlock_bh(&ab->base_lock); 732 } 733 734 int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id, 735 u8 tid, u32 ba_win_sz, u16 ssn) 736 { 737 struct ath11k_base *ab = ar->ab; 738 struct ath11k_peer *peer; 739 struct dp_rx_tid *rx_tid; 740 u32 hw_desc_sz; 741 u32 *addr_aligned; 742 void *vaddr; 743 dma_addr_t paddr; 744 int ret; 745 746 spin_lock_bh(&ab->base_lock); 747 748 peer = ath11k_peer_find(ab, vdev_id, peer_mac); 749 if (!peer) { 750 ath11k_warn(ab, "failed to find the peer to set up rx tid\n"); 751 spin_unlock_bh(&ab->base_lock); 752 return -ENOENT; 753 } 754 755 rx_tid = &peer->rx_tid[tid]; 756 /* Update the tid queue if it is already setup */ 757 if (rx_tid->active) { 758 paddr = rx_tid->paddr; 759 ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid, 760 ba_win_sz, ssn, true); 761 spin_unlock_bh(&ab->base_lock); 762 if (ret) { 763 ath11k_warn(ab, "failed to update reo for rx tid %d\n", tid); 764 return ret; 765 } 766 767 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, 768 peer_mac, paddr, 769 tid, 1, ba_win_sz); 770 if (ret) 771 ath11k_warn(ab, "failed to send wmi command to update rx reorder queue, tid :%d (%d)\n", 772 tid, ret); 773 return ret; 774 } 775 776 rx_tid->tid = tid; 777 778 rx_tid->ba_win_sz = ba_win_sz; 779 780 /* TODO: Optimize the memory allocation for qos tid based on the 781 * the actual BA window size in REO tid update path. 782 */ 783 if (tid == HAL_DESC_REO_NON_QOS_TID) 784 hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid); 785 else 786 hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid); 787 788 vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_KERNEL); 789 if (!vaddr) { 790 spin_unlock_bh(&ab->base_lock); 791 return -ENOMEM; 792 } 793 794 addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN); 795 796 ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz, ssn); 797 798 paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz, 799 DMA_BIDIRECTIONAL); 800 801 ret = dma_mapping_error(ab->dev, paddr); 802 if (ret) { 803 spin_unlock_bh(&ab->base_lock); 804 goto err_mem_free; 805 } 806 807 rx_tid->vaddr = vaddr; 808 rx_tid->paddr = paddr; 809 rx_tid->size = hw_desc_sz; 810 rx_tid->active = true; 811 812 spin_unlock_bh(&ab->base_lock); 813 814 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, 815 paddr, tid, 1, ba_win_sz); 816 if (ret) { 817 ath11k_warn(ar->ab, "failed to setup rx reorder queue, tid :%d (%d)\n", 818 tid, ret); 819 ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid); 820 } 821 822 return ret; 823 824 err_mem_free: 825 kfree(vaddr); 826 827 return ret; 828 } 829 830 int ath11k_dp_rx_ampdu_start(struct ath11k *ar, 831 struct ieee80211_ampdu_params *params) 832 { 833 struct ath11k_base *ab = ar->ab; 834 struct ath11k_sta *arsta = (void *)params->sta->drv_priv; 835 int vdev_id = arsta->arvif->vdev_id; 836 int ret; 837 838 ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id, 839 params->tid, params->buf_size, 840 params->ssn); 841 if (ret) 842 ath11k_warn(ab, "failed to setup rx tid %d\n", ret); 843 844 return ret; 845 } 846 847 int ath11k_dp_rx_ampdu_stop(struct ath11k *ar, 848 struct ieee80211_ampdu_params *params) 849 { 850 struct ath11k_base *ab = ar->ab; 851 struct ath11k_peer *peer; 852 struct ath11k_sta *arsta = (void *)params->sta->drv_priv; 853 int vdev_id = arsta->arvif->vdev_id; 854 dma_addr_t paddr; 855 bool active; 856 int ret; 857 858 spin_lock_bh(&ab->base_lock); 859 860 peer = ath11k_peer_find(ab, vdev_id, params->sta->addr); 861 if (!peer) { 862 ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n"); 863 spin_unlock_bh(&ab->base_lock); 864 return -ENOENT; 865 } 866 867 paddr = peer->rx_tid[params->tid].paddr; 868 active = peer->rx_tid[params->tid].active; 869 870 if (!active) { 871 spin_unlock_bh(&ab->base_lock); 872 return 0; 873 } 874 875 ret = ath11k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false); 876 spin_unlock_bh(&ab->base_lock); 877 if (ret) { 878 ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n", 879 params->tid, ret); 880 return ret; 881 } 882 883 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, 884 params->sta->addr, paddr, 885 params->tid, 1, 1); 886 if (ret) 887 ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n", 888 ret); 889 890 return ret; 891 } 892 893 static int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats, 894 u16 peer_id) 895 { 896 int i; 897 898 for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) { 899 if (ppdu_stats->user_stats[i].is_valid_peer_id) { 900 if (peer_id == ppdu_stats->user_stats[i].peer_id) 901 return i; 902 } else { 903 return i; 904 } 905 } 906 907 return -EINVAL; 908 } 909 910 static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab, 911 u16 tag, u16 len, const void *ptr, 912 void *data) 913 { 914 struct htt_ppdu_stats_info *ppdu_info; 915 struct htt_ppdu_user_stats *user_stats; 916 int cur_user; 917 u16 peer_id; 918 919 ppdu_info = (struct htt_ppdu_stats_info *)data; 920 921 switch (tag) { 922 case HTT_PPDU_STATS_TAG_COMMON: 923 if (len < sizeof(struct htt_ppdu_stats_common)) { 924 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 925 len, tag); 926 return -EINVAL; 927 } 928 memcpy((void *)&ppdu_info->ppdu_stats.common, ptr, 929 sizeof(struct htt_ppdu_stats_common)); 930 break; 931 case HTT_PPDU_STATS_TAG_USR_RATE: 932 if (len < sizeof(struct htt_ppdu_stats_user_rate)) { 933 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 934 len, tag); 935 return -EINVAL; 936 } 937 938 peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id; 939 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 940 peer_id); 941 if (cur_user < 0) 942 return -EINVAL; 943 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 944 user_stats->peer_id = peer_id; 945 user_stats->is_valid_peer_id = true; 946 memcpy((void *)&user_stats->rate, ptr, 947 sizeof(struct htt_ppdu_stats_user_rate)); 948 user_stats->tlv_flags |= BIT(tag); 949 break; 950 case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON: 951 if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) { 952 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 953 len, tag); 954 return -EINVAL; 955 } 956 957 peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id; 958 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 959 peer_id); 960 if (cur_user < 0) 961 return -EINVAL; 962 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 963 user_stats->peer_id = peer_id; 964 user_stats->is_valid_peer_id = true; 965 memcpy((void *)&user_stats->cmpltn_cmn, ptr, 966 sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)); 967 user_stats->tlv_flags |= BIT(tag); 968 break; 969 case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS: 970 if (len < 971 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) { 972 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 973 len, tag); 974 return -EINVAL; 975 } 976 977 peer_id = 978 ((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id; 979 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 980 peer_id); 981 if (cur_user < 0) 982 return -EINVAL; 983 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 984 user_stats->peer_id = peer_id; 985 user_stats->is_valid_peer_id = true; 986 memcpy((void *)&user_stats->ack_ba, ptr, 987 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)); 988 user_stats->tlv_flags |= BIT(tag); 989 break; 990 } 991 return 0; 992 } 993 994 int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len, 995 int (*iter)(struct ath11k_base *ar, u16 tag, u16 len, 996 const void *ptr, void *data), 997 void *data) 998 { 999 const struct htt_tlv *tlv; 1000 const void *begin = ptr; 1001 u16 tlv_tag, tlv_len; 1002 int ret = -EINVAL; 1003 1004 while (len > 0) { 1005 if (len < sizeof(*tlv)) { 1006 ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n", 1007 ptr - begin, len, sizeof(*tlv)); 1008 return -EINVAL; 1009 } 1010 tlv = (struct htt_tlv *)ptr; 1011 tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header); 1012 tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header); 1013 ptr += sizeof(*tlv); 1014 len -= sizeof(*tlv); 1015 1016 if (tlv_len > len) { 1017 ath11k_err(ab, "htt tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n", 1018 tlv_tag, ptr - begin, len, tlv_len); 1019 return -EINVAL; 1020 } 1021 ret = iter(ab, tlv_tag, tlv_len, ptr, data); 1022 if (ret == -ENOMEM) 1023 return ret; 1024 1025 ptr += tlv_len; 1026 len -= tlv_len; 1027 } 1028 return 0; 1029 } 1030 1031 static u32 ath11k_bw_to_mac80211_bwflags(u8 bw) 1032 { 1033 u32 bwflags = 0; 1034 1035 switch (bw) { 1036 case ATH11K_BW_40: 1037 bwflags = IEEE80211_TX_RC_40_MHZ_WIDTH; 1038 break; 1039 case ATH11K_BW_80: 1040 bwflags = IEEE80211_TX_RC_80_MHZ_WIDTH; 1041 break; 1042 case ATH11K_BW_160: 1043 bwflags = IEEE80211_TX_RC_160_MHZ_WIDTH; 1044 break; 1045 } 1046 1047 return bwflags; 1048 } 1049 1050 static void 1051 ath11k_update_per_peer_tx_stats(struct ath11k *ar, 1052 struct htt_ppdu_stats *ppdu_stats, u8 user) 1053 { 1054 struct ath11k_base *ab = ar->ab; 1055 struct ath11k_peer *peer; 1056 struct ieee80211_sta *sta; 1057 struct ath11k_sta *arsta; 1058 struct htt_ppdu_stats_user_rate *user_rate; 1059 struct ieee80211_chanctx_conf *conf = NULL; 1060 struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats; 1061 struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user]; 1062 struct htt_ppdu_stats_common *common = &ppdu_stats->common; 1063 int ret; 1064 u8 flags, mcs, nss, bw, sgi, rate_idx = 0; 1065 u32 succ_bytes = 0; 1066 u16 rate = 0, succ_pkts = 0; 1067 u32 tx_duration = 0; 1068 u8 tid = HTT_PPDU_STATS_NON_QOS_TID; 1069 bool is_ampdu = false; 1070 1071 if (!usr_stats) 1072 return; 1073 1074 if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE))) 1075 return; 1076 1077 if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON)) 1078 is_ampdu = 1079 HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags); 1080 1081 if (usr_stats->tlv_flags & 1082 BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) { 1083 succ_bytes = usr_stats->ack_ba.success_bytes; 1084 succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M, 1085 usr_stats->ack_ba.info); 1086 tid = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM, 1087 usr_stats->ack_ba.info); 1088 } 1089 1090 if (common->fes_duration_us) 1091 tx_duration = common->fes_duration_us; 1092 1093 user_rate = &usr_stats->rate; 1094 flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags); 1095 bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2; 1096 nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1; 1097 mcs = HTT_USR_RATE_MCS(user_rate->rate_flags); 1098 sgi = HTT_USR_RATE_GI(user_rate->rate_flags); 1099 1100 /* Note: If host configured fixed rates and in some other special 1101 * cases, the broadcast/management frames are sent in different rates. 1102 * Firmware rate's control to be skipped for this? 1103 */ 1104 1105 if (flags == WMI_RATE_PREAMBLE_VHT && mcs > 9) { 1106 ath11k_warn(ab, "Invalid VHT mcs %hhd peer stats", mcs); 1107 return; 1108 } 1109 1110 if (flags == WMI_RATE_PREAMBLE_HT && (mcs > 7 || nss < 1)) { 1111 ath11k_warn(ab, "Invalid HT mcs %hhd nss %hhd peer stats", 1112 mcs, nss); 1113 return; 1114 } 1115 1116 if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) { 1117 ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs, 1118 flags, 1119 &rate_idx, 1120 &rate); 1121 if (ret < 0) 1122 return; 1123 } 1124 1125 rcu_read_lock(); 1126 spin_lock_bh(&ab->base_lock); 1127 peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id); 1128 1129 if (!peer || !peer->sta) { 1130 spin_unlock_bh(&ab->base_lock); 1131 rcu_read_unlock(); 1132 return; 1133 } 1134 1135 sta = peer->sta; 1136 arsta = (struct ath11k_sta *)sta->drv_priv; 1137 1138 memset(&arsta->txrate, 0, sizeof(arsta->txrate)); 1139 memset(&arsta->tx_info.status, 0, sizeof(arsta->tx_info.status)); 1140 1141 switch (flags) { 1142 case WMI_RATE_PREAMBLE_OFDM: 1143 arsta->txrate.legacy = rate; 1144 if (arsta->arvif && arsta->arvif->vif) 1145 conf = rcu_dereference(arsta->arvif->vif->chanctx_conf); 1146 if (conf && conf->def.chan->band == NL80211_BAND_5GHZ) 1147 arsta->tx_info.status.rates[0].idx = rate_idx - 4; 1148 break; 1149 case WMI_RATE_PREAMBLE_CCK: 1150 arsta->txrate.legacy = rate; 1151 arsta->tx_info.status.rates[0].idx = rate_idx; 1152 if (mcs > ATH11K_HW_RATE_CCK_LP_1M && 1153 mcs <= ATH11K_HW_RATE_CCK_SP_2M) 1154 arsta->tx_info.status.rates[0].flags |= 1155 IEEE80211_TX_RC_USE_SHORT_PREAMBLE; 1156 break; 1157 case WMI_RATE_PREAMBLE_HT: 1158 arsta->txrate.mcs = mcs + 8 * (nss - 1); 1159 arsta->tx_info.status.rates[0].idx = arsta->txrate.mcs; 1160 arsta->txrate.flags = RATE_INFO_FLAGS_MCS; 1161 arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_MCS; 1162 if (sgi) { 1163 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1164 arsta->tx_info.status.rates[0].flags |= 1165 IEEE80211_TX_RC_SHORT_GI; 1166 } 1167 break; 1168 case WMI_RATE_PREAMBLE_VHT: 1169 arsta->txrate.mcs = mcs; 1170 ieee80211_rate_set_vht(&arsta->tx_info.status.rates[0], mcs, nss); 1171 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; 1172 arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_VHT_MCS; 1173 if (sgi) { 1174 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1175 arsta->tx_info.status.rates[0].flags |= 1176 IEEE80211_TX_RC_SHORT_GI; 1177 } 1178 break; 1179 } 1180 1181 arsta->txrate.nss = nss; 1182 arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw); 1183 arsta->tx_info.status.rates[0].flags |= ath11k_bw_to_mac80211_bwflags(bw); 1184 arsta->tx_duration += tx_duration; 1185 memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info)); 1186 1187 if (succ_pkts) { 1188 arsta->tx_info.flags = IEEE80211_TX_STAT_ACK; 1189 arsta->tx_info.status.rates[0].count = 1; 1190 ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info); 1191 } 1192 1193 /* PPDU stats reported for mgmt packet doesn't have valid tx bytes. 1194 * So skip peer stats update for mgmt packets. 1195 */ 1196 if (tid < HTT_PPDU_STATS_NON_QOS_TID) { 1197 memset(peer_stats, 0, sizeof(*peer_stats)); 1198 peer_stats->succ_pkts = succ_pkts; 1199 peer_stats->succ_bytes = succ_bytes; 1200 peer_stats->is_ampdu = is_ampdu; 1201 peer_stats->duration = tx_duration; 1202 peer_stats->ba_fails = 1203 HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) + 1204 HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags); 1205 1206 if (ath11k_debug_is_extd_tx_stats_enabled(ar)) 1207 ath11k_accumulate_per_peer_tx_stats(arsta, 1208 peer_stats, rate_idx); 1209 } 1210 1211 spin_unlock_bh(&ab->base_lock); 1212 rcu_read_unlock(); 1213 } 1214 1215 static void ath11k_htt_update_ppdu_stats(struct ath11k *ar, 1216 struct htt_ppdu_stats *ppdu_stats) 1217 { 1218 u8 user; 1219 1220 for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++) 1221 ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user); 1222 } 1223 1224 static 1225 struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar, 1226 u32 ppdu_id) 1227 { 1228 struct htt_ppdu_stats_info *ppdu_info = NULL; 1229 1230 spin_lock_bh(&ar->data_lock); 1231 if (!list_empty(&ar->ppdu_stats_info)) { 1232 list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) { 1233 if (ppdu_info && ppdu_info->ppdu_id == ppdu_id) { 1234 spin_unlock_bh(&ar->data_lock); 1235 return ppdu_info; 1236 } 1237 } 1238 1239 if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) { 1240 ppdu_info = list_first_entry(&ar->ppdu_stats_info, 1241 typeof(*ppdu_info), list); 1242 list_del(&ppdu_info->list); 1243 ar->ppdu_stat_list_depth--; 1244 ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats); 1245 kfree(ppdu_info); 1246 } 1247 } 1248 spin_unlock_bh(&ar->data_lock); 1249 1250 ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_KERNEL); 1251 if (!ppdu_info) 1252 return NULL; 1253 1254 spin_lock_bh(&ar->data_lock); 1255 list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info); 1256 ar->ppdu_stat_list_depth++; 1257 spin_unlock_bh(&ar->data_lock); 1258 1259 return ppdu_info; 1260 } 1261 1262 static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab, 1263 struct sk_buff *skb) 1264 { 1265 struct ath11k_htt_ppdu_stats_msg *msg; 1266 struct htt_ppdu_stats_info *ppdu_info; 1267 struct ath11k *ar; 1268 int ret; 1269 u8 pdev_id; 1270 u32 ppdu_id, len; 1271 1272 msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data; 1273 len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info); 1274 pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info); 1275 ppdu_id = msg->ppdu_id; 1276 1277 rcu_read_lock(); 1278 ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); 1279 if (!ar) { 1280 ret = -EINVAL; 1281 goto exit; 1282 } 1283 1284 if (ath11k_debug_is_pktlog_lite_mode_enabled(ar)) 1285 trace_ath11k_htt_ppdu_stats(ar, skb->data, len); 1286 1287 ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id); 1288 if (!ppdu_info) { 1289 ret = -EINVAL; 1290 goto exit; 1291 } 1292 1293 ppdu_info->ppdu_id = ppdu_id; 1294 ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len, 1295 ath11k_htt_tlv_ppdu_stats_parse, 1296 (void *)ppdu_info); 1297 if (ret) { 1298 ath11k_warn(ab, "Failed to parse tlv %d\n", ret); 1299 goto exit; 1300 } 1301 1302 exit: 1303 rcu_read_unlock(); 1304 1305 return ret; 1306 } 1307 1308 static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb) 1309 { 1310 struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data; 1311 struct ath11k *ar; 1312 u32 len; 1313 u8 pdev_id; 1314 1315 len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, data->hdr); 1316 if (len > ATH11K_HTT_PKTLOG_MAX_SIZE) { 1317 ath11k_warn(ab, "htt pktlog buffer size %d, expected < %d\n", 1318 len, 1319 ATH11K_HTT_PKTLOG_MAX_SIZE); 1320 return; 1321 } 1322 1323 pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr); 1324 ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); 1325 if (!ar) { 1326 ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id); 1327 return; 1328 } 1329 1330 trace_ath11k_htt_pktlog(ar, data->payload, len); 1331 } 1332 1333 void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab, 1334 struct sk_buff *skb) 1335 { 1336 struct ath11k_dp *dp = &ab->dp; 1337 struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data; 1338 enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp); 1339 u16 peer_id; 1340 u8 vdev_id; 1341 u8 mac_addr[ETH_ALEN]; 1342 u16 peer_mac_h16; 1343 u16 ast_hash; 1344 1345 ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type); 1346 1347 switch (type) { 1348 case HTT_T2H_MSG_TYPE_VERSION_CONF: 1349 dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR, 1350 resp->version_msg.version); 1351 dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR, 1352 resp->version_msg.version); 1353 complete(&dp->htt_tgt_version_received); 1354 break; 1355 case HTT_T2H_MSG_TYPE_PEER_MAP: 1356 vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID, 1357 resp->peer_map_ev.info); 1358 peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID, 1359 resp->peer_map_ev.info); 1360 peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16, 1361 resp->peer_map_ev.info1); 1362 ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32, 1363 peer_mac_h16, mac_addr); 1364 ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL, 1365 resp->peer_map_ev.info2); 1366 ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash); 1367 break; 1368 case HTT_T2H_MSG_TYPE_PEER_UNMAP: 1369 peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID, 1370 resp->peer_unmap_ev.info); 1371 ath11k_peer_unmap_event(ab, peer_id); 1372 break; 1373 case HTT_T2H_MSG_TYPE_PPDU_STATS_IND: 1374 ath11k_htt_pull_ppdu_stats(ab, skb); 1375 break; 1376 case HTT_T2H_MSG_TYPE_EXT_STATS_CONF: 1377 ath11k_dbg_htt_ext_stats_handler(ab, skb); 1378 break; 1379 case HTT_T2H_MSG_TYPE_PKTLOG: 1380 ath11k_htt_pktlog(ab, skb); 1381 break; 1382 default: 1383 ath11k_warn(ab, "htt event %d not handled\n", type); 1384 break; 1385 } 1386 1387 dev_kfree_skb_any(skb); 1388 } 1389 1390 static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar, 1391 struct sk_buff_head *msdu_list, 1392 struct sk_buff *first, struct sk_buff *last, 1393 u8 l3pad_bytes, int msdu_len) 1394 { 1395 struct sk_buff *skb; 1396 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first); 1397 int buf_first_hdr_len, buf_first_len; 1398 struct hal_rx_desc *ldesc; 1399 int space_extra; 1400 int rem_len; 1401 int buf_len; 1402 1403 /* As the msdu is spread across multiple rx buffers, 1404 * find the offset to the start of msdu for computing 1405 * the length of the msdu in the first buffer. 1406 */ 1407 buf_first_hdr_len = HAL_RX_DESC_SIZE + l3pad_bytes; 1408 buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len; 1409 1410 if (WARN_ON_ONCE(msdu_len <= buf_first_len)) { 1411 skb_put(first, buf_first_hdr_len + msdu_len); 1412 skb_pull(first, buf_first_hdr_len); 1413 return 0; 1414 } 1415 1416 ldesc = (struct hal_rx_desc *)last->data; 1417 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ldesc); 1418 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ldesc); 1419 1420 /* MSDU spans over multiple buffers because the length of the MSDU 1421 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data 1422 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. 1423 */ 1424 skb_put(first, DP_RX_BUFFER_SIZE); 1425 skb_pull(first, buf_first_hdr_len); 1426 1427 /* When an MSDU spread over multiple buffers attention, MSDU_END and 1428 * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs. 1429 */ 1430 ath11k_dp_rx_desc_end_tlv_copy(rxcb->rx_desc, ldesc); 1431 1432 space_extra = msdu_len - (buf_first_len + skb_tailroom(first)); 1433 if (space_extra > 0 && 1434 (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) { 1435 /* Free up all buffers of the MSDU */ 1436 while ((skb = __skb_dequeue(msdu_list)) != NULL) { 1437 rxcb = ATH11K_SKB_RXCB(skb); 1438 if (!rxcb->is_continuation) { 1439 dev_kfree_skb_any(skb); 1440 break; 1441 } 1442 dev_kfree_skb_any(skb); 1443 } 1444 return -ENOMEM; 1445 } 1446 1447 rem_len = msdu_len - buf_first_len; 1448 while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) { 1449 rxcb = ATH11K_SKB_RXCB(skb); 1450 if (rxcb->is_continuation) 1451 buf_len = DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE; 1452 else 1453 buf_len = rem_len; 1454 1455 if (buf_len > (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE)) { 1456 WARN_ON_ONCE(1); 1457 dev_kfree_skb_any(skb); 1458 return -EINVAL; 1459 } 1460 1461 skb_put(skb, buf_len + HAL_RX_DESC_SIZE); 1462 skb_pull(skb, HAL_RX_DESC_SIZE); 1463 skb_copy_from_linear_data(skb, skb_put(first, buf_len), 1464 buf_len); 1465 dev_kfree_skb_any(skb); 1466 1467 rem_len -= buf_len; 1468 if (!rxcb->is_continuation) 1469 break; 1470 } 1471 1472 return 0; 1473 } 1474 1475 static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list, 1476 struct sk_buff *first) 1477 { 1478 struct sk_buff *skb; 1479 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first); 1480 1481 if (!rxcb->is_continuation) 1482 return first; 1483 1484 skb_queue_walk(msdu_list, skb) { 1485 rxcb = ATH11K_SKB_RXCB(skb); 1486 if (!rxcb->is_continuation) 1487 return skb; 1488 } 1489 1490 return NULL; 1491 } 1492 1493 static int ath11k_dp_rx_retrieve_amsdu(struct ath11k *ar, 1494 struct sk_buff_head *msdu_list, 1495 struct sk_buff_head *amsdu_list) 1496 { 1497 struct sk_buff *msdu = skb_peek(msdu_list); 1498 struct sk_buff *last_buf; 1499 struct ath11k_skb_rxcb *rxcb; 1500 struct ieee80211_hdr *hdr; 1501 struct hal_rx_desc *rx_desc, *lrx_desc; 1502 u16 msdu_len; 1503 u8 l3_pad_bytes; 1504 u8 *hdr_status; 1505 int ret; 1506 1507 if (!msdu) 1508 return -ENOENT; 1509 1510 rx_desc = (struct hal_rx_desc *)msdu->data; 1511 hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc); 1512 hdr = (struct ieee80211_hdr *)hdr_status; 1513 /* Process only data frames */ 1514 if (!ieee80211_is_data(hdr->frame_control)) { 1515 __skb_unlink(msdu, msdu_list); 1516 dev_kfree_skb_any(msdu); 1517 return -EINVAL; 1518 } 1519 1520 do { 1521 __skb_unlink(msdu, msdu_list); 1522 last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu); 1523 if (!last_buf) { 1524 ath11k_warn(ar->ab, 1525 "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n"); 1526 ret = -EIO; 1527 goto free_out; 1528 } 1529 1530 rx_desc = (struct hal_rx_desc *)msdu->data; 1531 lrx_desc = (struct hal_rx_desc *)last_buf->data; 1532 1533 if (!ath11k_dp_rx_h_attn_msdu_done(lrx_desc)) { 1534 ath11k_warn(ar->ab, "msdu_done bit in attention is not set\n"); 1535 ret = -EIO; 1536 goto free_out; 1537 } 1538 1539 rxcb = ATH11K_SKB_RXCB(msdu); 1540 rxcb->rx_desc = rx_desc; 1541 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc); 1542 l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(lrx_desc); 1543 1544 if (!rxcb->is_continuation) { 1545 skb_put(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes + msdu_len); 1546 skb_pull(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes); 1547 } else { 1548 ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list, 1549 msdu, last_buf, 1550 l3_pad_bytes, msdu_len); 1551 if (ret) { 1552 ath11k_warn(ar->ab, 1553 "failed to coalesce msdu rx buffer%d\n", ret); 1554 goto free_out; 1555 } 1556 } 1557 __skb_queue_tail(amsdu_list, msdu); 1558 1559 /* Should we also consider msdu_cnt from mpdu_meta while 1560 * preparing amsdu list? 1561 */ 1562 if (rxcb->is_last_msdu) 1563 break; 1564 } while ((msdu = skb_peek(msdu_list)) != NULL); 1565 1566 return 0; 1567 1568 free_out: 1569 dev_kfree_skb_any(msdu); 1570 __skb_queue_purge(amsdu_list); 1571 1572 return ret; 1573 } 1574 1575 static void ath11k_dp_rx_h_csum_offload(struct sk_buff *msdu) 1576 { 1577 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 1578 bool ip_csum_fail, l4_csum_fail; 1579 1580 ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rxcb->rx_desc); 1581 l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rxcb->rx_desc); 1582 1583 msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ? 1584 CHECKSUM_NONE : CHECKSUM_UNNECESSARY; 1585 } 1586 1587 static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, 1588 enum hal_encrypt_type enctype) 1589 { 1590 switch (enctype) { 1591 case HAL_ENCRYPT_TYPE_OPEN: 1592 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1593 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1594 return 0; 1595 case HAL_ENCRYPT_TYPE_CCMP_128: 1596 return IEEE80211_CCMP_MIC_LEN; 1597 case HAL_ENCRYPT_TYPE_CCMP_256: 1598 return IEEE80211_CCMP_256_MIC_LEN; 1599 case HAL_ENCRYPT_TYPE_GCMP_128: 1600 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1601 return IEEE80211_GCMP_MIC_LEN; 1602 case HAL_ENCRYPT_TYPE_WEP_40: 1603 case HAL_ENCRYPT_TYPE_WEP_104: 1604 case HAL_ENCRYPT_TYPE_WEP_128: 1605 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1606 case HAL_ENCRYPT_TYPE_WAPI: 1607 break; 1608 } 1609 1610 ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype); 1611 return 0; 1612 } 1613 1614 static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar, 1615 enum hal_encrypt_type enctype) 1616 { 1617 switch (enctype) { 1618 case HAL_ENCRYPT_TYPE_OPEN: 1619 return 0; 1620 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1621 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1622 return IEEE80211_TKIP_IV_LEN; 1623 case HAL_ENCRYPT_TYPE_CCMP_128: 1624 return IEEE80211_CCMP_HDR_LEN; 1625 case HAL_ENCRYPT_TYPE_CCMP_256: 1626 return IEEE80211_CCMP_256_HDR_LEN; 1627 case HAL_ENCRYPT_TYPE_GCMP_128: 1628 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1629 return IEEE80211_GCMP_HDR_LEN; 1630 case HAL_ENCRYPT_TYPE_WEP_40: 1631 case HAL_ENCRYPT_TYPE_WEP_104: 1632 case HAL_ENCRYPT_TYPE_WEP_128: 1633 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1634 case HAL_ENCRYPT_TYPE_WAPI: 1635 break; 1636 } 1637 1638 ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 1639 return 0; 1640 } 1641 1642 static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar, 1643 enum hal_encrypt_type enctype) 1644 { 1645 switch (enctype) { 1646 case HAL_ENCRYPT_TYPE_OPEN: 1647 case HAL_ENCRYPT_TYPE_CCMP_128: 1648 case HAL_ENCRYPT_TYPE_CCMP_256: 1649 case HAL_ENCRYPT_TYPE_GCMP_128: 1650 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1651 return 0; 1652 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1653 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1654 return IEEE80211_TKIP_ICV_LEN; 1655 case HAL_ENCRYPT_TYPE_WEP_40: 1656 case HAL_ENCRYPT_TYPE_WEP_104: 1657 case HAL_ENCRYPT_TYPE_WEP_128: 1658 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1659 case HAL_ENCRYPT_TYPE_WAPI: 1660 break; 1661 } 1662 1663 ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 1664 return 0; 1665 } 1666 1667 static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar, 1668 struct sk_buff *msdu, 1669 u8 *first_hdr, 1670 enum hal_encrypt_type enctype, 1671 struct ieee80211_rx_status *status) 1672 { 1673 struct ieee80211_hdr *hdr; 1674 size_t hdr_len; 1675 u8 da[ETH_ALEN]; 1676 u8 sa[ETH_ALEN]; 1677 1678 /* pull decapped header and copy SA & DA */ 1679 hdr = (struct ieee80211_hdr *)msdu->data; 1680 ether_addr_copy(da, ieee80211_get_DA(hdr)); 1681 ether_addr_copy(sa, ieee80211_get_SA(hdr)); 1682 skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control)); 1683 1684 /* push original 802.11 header */ 1685 hdr = (struct ieee80211_hdr *)first_hdr; 1686 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1687 1688 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 1689 memcpy(skb_push(msdu, 1690 ath11k_dp_rx_crypto_param_len(ar, enctype)), 1691 (void *)hdr + hdr_len, 1692 ath11k_dp_rx_crypto_param_len(ar, enctype)); 1693 } 1694 1695 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1696 1697 /* original 802.11 header has a different DA and in 1698 * case of 4addr it may also have different SA 1699 */ 1700 hdr = (struct ieee80211_hdr *)msdu->data; 1701 ether_addr_copy(ieee80211_get_DA(hdr), da); 1702 ether_addr_copy(ieee80211_get_SA(hdr), sa); 1703 } 1704 1705 static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu, 1706 enum hal_encrypt_type enctype, 1707 struct ieee80211_rx_status *status, 1708 bool decrypted) 1709 { 1710 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 1711 struct ieee80211_hdr *hdr; 1712 size_t hdr_len; 1713 size_t crypto_len; 1714 1715 if (!rxcb->is_first_msdu || 1716 !(rxcb->is_first_msdu && rxcb->is_last_msdu)) { 1717 WARN_ON_ONCE(1); 1718 return; 1719 } 1720 1721 skb_trim(msdu, msdu->len - FCS_LEN); 1722 1723 if (!decrypted) 1724 return; 1725 1726 hdr = (void *)msdu->data; 1727 1728 /* Tail */ 1729 if (status->flag & RX_FLAG_IV_STRIPPED) { 1730 skb_trim(msdu, msdu->len - 1731 ath11k_dp_rx_crypto_mic_len(ar, enctype)); 1732 1733 skb_trim(msdu, msdu->len - 1734 ath11k_dp_rx_crypto_icv_len(ar, enctype)); 1735 } else { 1736 /* MIC */ 1737 if (status->flag & RX_FLAG_MIC_STRIPPED) 1738 skb_trim(msdu, msdu->len - 1739 ath11k_dp_rx_crypto_mic_len(ar, enctype)); 1740 1741 /* ICV */ 1742 if (status->flag & RX_FLAG_ICV_STRIPPED) 1743 skb_trim(msdu, msdu->len - 1744 ath11k_dp_rx_crypto_icv_len(ar, enctype)); 1745 } 1746 1747 /* MMIC */ 1748 if ((status->flag & RX_FLAG_MMIC_STRIPPED) && 1749 !ieee80211_has_morefrags(hdr->frame_control) && 1750 enctype == HAL_ENCRYPT_TYPE_TKIP_MIC) 1751 skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN); 1752 1753 /* Head */ 1754 if (status->flag & RX_FLAG_IV_STRIPPED) { 1755 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1756 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 1757 1758 memmove((void *)msdu->data + crypto_len, 1759 (void *)msdu->data, hdr_len); 1760 skb_pull(msdu, crypto_len); 1761 } 1762 } 1763 1764 static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar, 1765 struct sk_buff *msdu, 1766 enum hal_encrypt_type enctype) 1767 { 1768 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 1769 struct ieee80211_hdr *hdr; 1770 size_t hdr_len, crypto_len; 1771 void *rfc1042; 1772 bool is_amsdu; 1773 1774 is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu); 1775 hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(rxcb->rx_desc); 1776 rfc1042 = hdr; 1777 1778 if (rxcb->is_first_msdu) { 1779 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1780 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 1781 1782 rfc1042 += hdr_len + crypto_len; 1783 } 1784 1785 if (is_amsdu) 1786 rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr); 1787 1788 return rfc1042; 1789 } 1790 1791 static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar, 1792 struct sk_buff *msdu, 1793 u8 *first_hdr, 1794 enum hal_encrypt_type enctype, 1795 struct ieee80211_rx_status *status) 1796 { 1797 struct ieee80211_hdr *hdr; 1798 struct ethhdr *eth; 1799 size_t hdr_len; 1800 u8 da[ETH_ALEN]; 1801 u8 sa[ETH_ALEN]; 1802 void *rfc1042; 1803 1804 rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype); 1805 if (WARN_ON_ONCE(!rfc1042)) 1806 return; 1807 1808 /* pull decapped header and copy SA & DA */ 1809 eth = (struct ethhdr *)msdu->data; 1810 ether_addr_copy(da, eth->h_dest); 1811 ether_addr_copy(sa, eth->h_source); 1812 skb_pull(msdu, sizeof(struct ethhdr)); 1813 1814 /* push rfc1042/llc/snap */ 1815 memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042, 1816 sizeof(struct ath11k_dp_rfc1042_hdr)); 1817 1818 /* push original 802.11 header */ 1819 hdr = (struct ieee80211_hdr *)first_hdr; 1820 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1821 1822 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 1823 memcpy(skb_push(msdu, 1824 ath11k_dp_rx_crypto_param_len(ar, enctype)), 1825 (void *)hdr + hdr_len, 1826 ath11k_dp_rx_crypto_param_len(ar, enctype)); 1827 } 1828 1829 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1830 1831 /* original 802.11 header has a different DA and in 1832 * case of 4addr it may also have different SA 1833 */ 1834 hdr = (struct ieee80211_hdr *)msdu->data; 1835 ether_addr_copy(ieee80211_get_DA(hdr), da); 1836 ether_addr_copy(ieee80211_get_SA(hdr), sa); 1837 } 1838 1839 static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu, 1840 struct hal_rx_desc *rx_desc, 1841 enum hal_encrypt_type enctype, 1842 struct ieee80211_rx_status *status, 1843 bool decrypted) 1844 { 1845 u8 *first_hdr; 1846 u8 decap; 1847 1848 first_hdr = ath11k_dp_rx_h_80211_hdr(rx_desc); 1849 decap = ath11k_dp_rx_h_mpdu_start_decap_type(rx_desc); 1850 1851 switch (decap) { 1852 case DP_RX_DECAP_TYPE_NATIVE_WIFI: 1853 ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr, 1854 enctype, status); 1855 break; 1856 case DP_RX_DECAP_TYPE_RAW: 1857 ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status, 1858 decrypted); 1859 break; 1860 case DP_RX_DECAP_TYPE_ETHERNET2_DIX: 1861 ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr, 1862 enctype, status); 1863 break; 1864 case DP_RX_DECAP_TYPE_8023: 1865 /* TODO: Handle undecap for these formats */ 1866 break; 1867 } 1868 } 1869 1870 static void ath11k_dp_rx_h_mpdu(struct ath11k *ar, 1871 struct sk_buff_head *amsdu_list, 1872 struct hal_rx_desc *rx_desc, 1873 struct ieee80211_rx_status *rx_status) 1874 { 1875 struct ieee80211_hdr *hdr; 1876 enum hal_encrypt_type enctype; 1877 struct sk_buff *last_msdu; 1878 struct sk_buff *msdu; 1879 struct ath11k_skb_rxcb *last_rxcb; 1880 bool is_decrypted; 1881 u32 err_bitmap; 1882 u8 *qos; 1883 1884 if (skb_queue_empty(amsdu_list)) 1885 return; 1886 1887 hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(rx_desc); 1888 1889 /* Each A-MSDU subframe will use the original header as the base and be 1890 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl. 1891 */ 1892 if (ieee80211_is_data_qos(hdr->frame_control)) { 1893 qos = ieee80211_get_qos_ctl(hdr); 1894 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; 1895 } 1896 1897 is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc); 1898 enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc); 1899 1900 /* Some attention flags are valid only in the last MSDU. */ 1901 last_msdu = skb_peek_tail(amsdu_list); 1902 last_rxcb = ATH11K_SKB_RXCB(last_msdu); 1903 1904 err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(last_rxcb->rx_desc); 1905 1906 /* Clear per-MPDU flags while leaving per-PPDU flags intact. */ 1907 rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | 1908 RX_FLAG_MMIC_ERROR | 1909 RX_FLAG_DECRYPTED | 1910 RX_FLAG_IV_STRIPPED | 1911 RX_FLAG_MMIC_STRIPPED); 1912 1913 if (err_bitmap & DP_RX_MPDU_ERR_FCS) 1914 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; 1915 1916 if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC) 1917 rx_status->flag |= RX_FLAG_MMIC_ERROR; 1918 1919 if (is_decrypted) 1920 rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED | 1921 RX_FLAG_MIC_STRIPPED | RX_FLAG_ICV_STRIPPED; 1922 1923 skb_queue_walk(amsdu_list, msdu) { 1924 ath11k_dp_rx_h_csum_offload(msdu); 1925 ath11k_dp_rx_h_undecap(ar, msdu, rx_desc, 1926 enctype, rx_status, is_decrypted); 1927 } 1928 } 1929 1930 static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc, 1931 struct ieee80211_rx_status *rx_status) 1932 { 1933 struct ieee80211_supported_band *sband; 1934 enum rx_msdu_start_pkt_type pkt_type; 1935 u8 bw; 1936 u8 rate_mcs, nss; 1937 u8 sgi; 1938 bool is_cck; 1939 1940 pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(rx_desc); 1941 bw = ath11k_dp_rx_h_msdu_start_rx_bw(rx_desc); 1942 rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(rx_desc); 1943 nss = ath11k_dp_rx_h_msdu_start_nss(rx_desc); 1944 sgi = ath11k_dp_rx_h_msdu_start_sgi(rx_desc); 1945 1946 switch (pkt_type) { 1947 case RX_MSDU_START_PKT_TYPE_11A: 1948 case RX_MSDU_START_PKT_TYPE_11B: 1949 is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B); 1950 sband = &ar->mac.sbands[rx_status->band]; 1951 rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs, 1952 is_cck); 1953 break; 1954 case RX_MSDU_START_PKT_TYPE_11N: 1955 rx_status->encoding = RX_ENC_HT; 1956 if (rate_mcs > ATH11K_HT_MCS_MAX) { 1957 ath11k_warn(ar->ab, 1958 "Received with invalid mcs in HT mode %d\n", 1959 rate_mcs); 1960 break; 1961 } 1962 rx_status->rate_idx = rate_mcs + (8 * (nss - 1)); 1963 if (sgi) 1964 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 1965 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 1966 break; 1967 case RX_MSDU_START_PKT_TYPE_11AC: 1968 rx_status->encoding = RX_ENC_VHT; 1969 rx_status->rate_idx = rate_mcs; 1970 if (rate_mcs > ATH11K_VHT_MCS_MAX) { 1971 ath11k_warn(ar->ab, 1972 "Received with invalid mcs in VHT mode %d\n", 1973 rate_mcs); 1974 break; 1975 } 1976 rx_status->nss = nss; 1977 if (sgi) 1978 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 1979 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 1980 break; 1981 case RX_MSDU_START_PKT_TYPE_11AX: 1982 rx_status->rate_idx = rate_mcs; 1983 if (rate_mcs > ATH11K_HE_MCS_MAX) { 1984 ath11k_warn(ar->ab, 1985 "Received with invalid mcs in HE mode %d\n", 1986 rate_mcs); 1987 break; 1988 } 1989 rx_status->encoding = RX_ENC_HE; 1990 rx_status->nss = nss; 1991 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 1992 break; 1993 } 1994 } 1995 1996 static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc, 1997 struct ieee80211_rx_status *rx_status) 1998 { 1999 u8 channel_num; 2000 2001 rx_status->freq = 0; 2002 rx_status->rate_idx = 0; 2003 rx_status->nss = 0; 2004 rx_status->encoding = RX_ENC_LEGACY; 2005 rx_status->bw = RATE_INFO_BW_20; 2006 2007 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 2008 2009 channel_num = ath11k_dp_rx_h_msdu_start_freq(rx_desc); 2010 2011 if (channel_num >= 1 && channel_num <= 14) { 2012 rx_status->band = NL80211_BAND_2GHZ; 2013 } else if (channel_num >= 36 && channel_num <= 173) { 2014 rx_status->band = NL80211_BAND_5GHZ; 2015 } else { 2016 ath11k_warn(ar->ab, "Unsupported Channel info received %d\n", 2017 channel_num); 2018 return; 2019 } 2020 2021 rx_status->freq = ieee80211_channel_to_frequency(channel_num, 2022 rx_status->band); 2023 2024 ath11k_dp_rx_h_rate(ar, rx_desc, rx_status); 2025 } 2026 2027 static void ath11k_dp_rx_process_amsdu(struct ath11k *ar, 2028 struct sk_buff_head *amsdu_list, 2029 struct ieee80211_rx_status *rx_status) 2030 { 2031 struct sk_buff *first; 2032 struct ath11k_skb_rxcb *rxcb; 2033 struct hal_rx_desc *rx_desc; 2034 bool first_mpdu; 2035 2036 if (skb_queue_empty(amsdu_list)) 2037 return; 2038 2039 first = skb_peek(amsdu_list); 2040 rxcb = ATH11K_SKB_RXCB(first); 2041 rx_desc = rxcb->rx_desc; 2042 2043 first_mpdu = ath11k_dp_rx_h_attn_first_mpdu(rx_desc); 2044 if (first_mpdu) 2045 ath11k_dp_rx_h_ppdu(ar, rx_desc, rx_status); 2046 2047 ath11k_dp_rx_h_mpdu(ar, amsdu_list, rx_desc, rx_status); 2048 } 2049 2050 static char *ath11k_print_get_tid(struct ieee80211_hdr *hdr, char *out, 2051 size_t size) 2052 { 2053 u8 *qc; 2054 int tid; 2055 2056 if (!ieee80211_is_data_qos(hdr->frame_control)) 2057 return ""; 2058 2059 qc = ieee80211_get_qos_ctl(hdr); 2060 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 2061 snprintf(out, size, "tid %d", tid); 2062 2063 return out; 2064 } 2065 2066 static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi, 2067 struct sk_buff *msdu) 2068 { 2069 static const struct ieee80211_radiotap_he known = { 2070 .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN), 2071 .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN), 2072 }; 2073 struct ieee80211_rx_status *status; 2074 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; 2075 struct ieee80211_radiotap_he *he = NULL; 2076 char tid[32]; 2077 2078 status = IEEE80211_SKB_RXCB(msdu); 2079 if (status->encoding == RX_ENC_HE) { 2080 he = skb_push(msdu, sizeof(known)); 2081 memcpy(he, &known, sizeof(known)); 2082 status->flag |= RX_FLAG_RADIOTAP_HE; 2083 } 2084 2085 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 2086 "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", 2087 msdu, 2088 msdu->len, 2089 ieee80211_get_SA(hdr), 2090 ath11k_print_get_tid(hdr, tid, sizeof(tid)), 2091 is_multicast_ether_addr(ieee80211_get_DA(hdr)) ? 2092 "mcast" : "ucast", 2093 (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4, 2094 (status->encoding == RX_ENC_LEGACY) ? "legacy" : "", 2095 (status->encoding == RX_ENC_HT) ? "ht" : "", 2096 (status->encoding == RX_ENC_VHT) ? "vht" : "", 2097 (status->encoding == RX_ENC_HE) ? "he" : "", 2098 (status->bw == RATE_INFO_BW_40) ? "40" : "", 2099 (status->bw == RATE_INFO_BW_80) ? "80" : "", 2100 (status->bw == RATE_INFO_BW_160) ? "160" : "", 2101 status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", 2102 status->rate_idx, 2103 status->nss, 2104 status->freq, 2105 status->band, status->flag, 2106 !!(status->flag & RX_FLAG_FAILED_FCS_CRC), 2107 !!(status->flag & RX_FLAG_MMIC_ERROR), 2108 !!(status->flag & RX_FLAG_AMSDU_MORE)); 2109 2110 /* TODO: trace rx packet */ 2111 2112 ieee80211_rx_napi(ar->hw, NULL, msdu, napi); 2113 } 2114 2115 static void ath11k_dp_rx_pre_deliver_amsdu(struct ath11k *ar, 2116 struct sk_buff_head *amsdu_list, 2117 struct ieee80211_rx_status *rxs) 2118 { 2119 struct sk_buff *msdu; 2120 struct sk_buff *first_subframe; 2121 struct ieee80211_rx_status *status; 2122 2123 first_subframe = skb_peek(amsdu_list); 2124 2125 skb_queue_walk(amsdu_list, msdu) { 2126 /* Setup per-MSDU flags */ 2127 if (skb_queue_empty(amsdu_list)) 2128 rxs->flag &= ~RX_FLAG_AMSDU_MORE; 2129 else 2130 rxs->flag |= RX_FLAG_AMSDU_MORE; 2131 2132 if (msdu == first_subframe) { 2133 first_subframe = NULL; 2134 rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN; 2135 } else { 2136 rxs->flag |= RX_FLAG_ALLOW_SAME_PN; 2137 } 2138 rxs->flag |= RX_FLAG_SKIP_MONITOR; 2139 2140 status = IEEE80211_SKB_RXCB(msdu); 2141 *status = *rxs; 2142 } 2143 } 2144 2145 static void ath11k_dp_rx_process_pending_packets(struct ath11k_base *ab, 2146 struct napi_struct *napi, 2147 struct sk_buff_head *pending_q, 2148 int *quota, u8 mac_id) 2149 { 2150 struct ath11k *ar; 2151 struct sk_buff *msdu; 2152 struct ath11k_pdev *pdev; 2153 2154 if (skb_queue_empty(pending_q)) 2155 return; 2156 2157 ar = ab->pdevs[mac_id].ar; 2158 2159 rcu_read_lock(); 2160 pdev = rcu_dereference(ab->pdevs_active[mac_id]); 2161 2162 while (*quota && (msdu = __skb_dequeue(pending_q))) { 2163 if (!pdev) { 2164 dev_kfree_skb_any(msdu); 2165 continue; 2166 } 2167 2168 ath11k_dp_rx_deliver_msdu(ar, napi, msdu); 2169 (*quota)--; 2170 } 2171 rcu_read_unlock(); 2172 } 2173 2174 int ath11k_dp_process_rx(struct ath11k_base *ab, int mac_id, 2175 struct napi_struct *napi, struct sk_buff_head *pending_q, 2176 int budget) 2177 { 2178 struct ath11k *ar = ab->pdevs[mac_id].ar; 2179 struct ath11k_pdev_dp *dp = &ar->dp; 2180 struct ieee80211_rx_status *rx_status = &dp->rx_status; 2181 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 2182 struct hal_srng *srng; 2183 struct sk_buff *msdu; 2184 struct sk_buff_head msdu_list; 2185 struct sk_buff_head amsdu_list; 2186 struct ath11k_skb_rxcb *rxcb; 2187 u32 *rx_desc; 2188 int buf_id; 2189 int num_buffs_reaped = 0; 2190 int quota = budget; 2191 int ret; 2192 bool done = false; 2193 2194 /* Process any pending packets from the previous napi poll. 2195 * Note: All msdu's in this pending_q corresponds to the same mac id 2196 * due to pdev based reo dest mapping and also since each irq group id 2197 * maps to specific reo dest ring. 2198 */ 2199 ath11k_dp_rx_process_pending_packets(ab, napi, pending_q, "a, 2200 mac_id); 2201 2202 /* If all quota is exhausted by processing the pending_q, 2203 * Wait for the next napi poll to reap the new info 2204 */ 2205 if (!quota) 2206 goto exit; 2207 2208 __skb_queue_head_init(&msdu_list); 2209 2210 srng = &ab->hal.srng_list[dp->reo_dst_ring.ring_id]; 2211 2212 spin_lock_bh(&srng->lock); 2213 2214 ath11k_hal_srng_access_begin(ab, srng); 2215 2216 try_again: 2217 while ((rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 2218 struct hal_reo_dest_ring *desc = (struct hal_reo_dest_ring *)rx_desc; 2219 enum hal_reo_dest_ring_push_reason push_reason; 2220 u32 cookie; 2221 2222 cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, 2223 desc->buf_addr_info.info1); 2224 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 2225 cookie); 2226 spin_lock_bh(&rx_ring->idr_lock); 2227 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 2228 if (!msdu) { 2229 ath11k_warn(ab, "frame rx with invalid buf_id %d\n", 2230 buf_id); 2231 spin_unlock_bh(&rx_ring->idr_lock); 2232 continue; 2233 } 2234 2235 idr_remove(&rx_ring->bufs_idr, buf_id); 2236 spin_unlock_bh(&rx_ring->idr_lock); 2237 2238 rxcb = ATH11K_SKB_RXCB(msdu); 2239 dma_unmap_single(ab->dev, rxcb->paddr, 2240 msdu->len + skb_tailroom(msdu), 2241 DMA_FROM_DEVICE); 2242 2243 num_buffs_reaped++; 2244 2245 push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON, 2246 desc->info0); 2247 if (push_reason != 2248 HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) { 2249 /* TODO: Check if the msdu can be sent up for processing */ 2250 dev_kfree_skb_any(msdu); 2251 ab->soc_stats.hal_reo_error[dp->reo_dst_ring.ring_id]++; 2252 continue; 2253 } 2254 2255 rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 & 2256 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU); 2257 rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 & 2258 RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU); 2259 rxcb->is_continuation = !!(desc->rx_msdu_info.info0 & 2260 RX_MSDU_DESC_INFO0_MSDU_CONTINUATION); 2261 rxcb->mac_id = mac_id; 2262 __skb_queue_tail(&msdu_list, msdu); 2263 2264 /* Stop reaping from the ring once quota is exhausted 2265 * and we've received all msdu's in the the AMSDU. The 2266 * additional msdu's reaped in excess of quota here would 2267 * be pushed into the pending queue to be processed during 2268 * the next napi poll. 2269 * Note: More profiling can be done to see the impact on 2270 * pending_q and throughput during various traffic & density 2271 * and how use of budget instead of remaining quota affects it. 2272 */ 2273 if (num_buffs_reaped >= quota && rxcb->is_last_msdu && 2274 !rxcb->is_continuation) { 2275 done = true; 2276 break; 2277 } 2278 } 2279 2280 /* Hw might have updated the head pointer after we cached it. 2281 * In this case, even though there are entries in the ring we'll 2282 * get rx_desc NULL. Give the read another try with updated cached 2283 * head pointer so that we can reap complete MPDU in the current 2284 * rx processing. 2285 */ 2286 if (!done && ath11k_hal_srng_dst_num_free(ab, srng, true)) { 2287 ath11k_hal_srng_access_end(ab, srng); 2288 goto try_again; 2289 } 2290 2291 ath11k_hal_srng_access_end(ab, srng); 2292 2293 spin_unlock_bh(&srng->lock); 2294 2295 if (!num_buffs_reaped) 2296 goto exit; 2297 2298 /* Should we reschedule it later if we are not able to replenish all 2299 * the buffers? 2300 */ 2301 ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buffs_reaped, 2302 HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC); 2303 2304 rcu_read_lock(); 2305 if (!rcu_dereference(ab->pdevs_active[mac_id])) { 2306 __skb_queue_purge(&msdu_list); 2307 goto rcu_unlock; 2308 } 2309 2310 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 2311 __skb_queue_purge(&msdu_list); 2312 goto rcu_unlock; 2313 } 2314 2315 while (!skb_queue_empty(&msdu_list)) { 2316 __skb_queue_head_init(&amsdu_list); 2317 ret = ath11k_dp_rx_retrieve_amsdu(ar, &msdu_list, &amsdu_list); 2318 if (ret) { 2319 if (ret == -EIO) { 2320 ath11k_err(ab, "rx ring got corrupted %d\n", ret); 2321 __skb_queue_purge(&msdu_list); 2322 /* Should stop processing any more rx in 2323 * future from this ring? 2324 */ 2325 goto rcu_unlock; 2326 } 2327 2328 /* A-MSDU retrieval got failed due to non-fatal condition, 2329 * continue processing with the next msdu. 2330 */ 2331 continue; 2332 } 2333 2334 ath11k_dp_rx_process_amsdu(ar, &amsdu_list, rx_status); 2335 2336 ath11k_dp_rx_pre_deliver_amsdu(ar, &amsdu_list, rx_status); 2337 skb_queue_splice_tail(&amsdu_list, pending_q); 2338 } 2339 2340 while (quota && (msdu = __skb_dequeue(pending_q))) { 2341 ath11k_dp_rx_deliver_msdu(ar, napi, msdu); 2342 quota--; 2343 } 2344 2345 rcu_unlock: 2346 rcu_read_unlock(); 2347 exit: 2348 return budget - quota; 2349 } 2350 2351 static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta, 2352 struct hal_rx_mon_ppdu_info *ppdu_info) 2353 { 2354 struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats; 2355 u32 num_msdu; 2356 2357 if (!rx_stats) 2358 return; 2359 2360 num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count + 2361 ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count; 2362 2363 rx_stats->num_msdu += num_msdu; 2364 rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count + 2365 ppdu_info->tcp_ack_msdu_count; 2366 rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count; 2367 rx_stats->other_msdu_count += ppdu_info->other_msdu_count; 2368 2369 if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A || 2370 ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) { 2371 ppdu_info->nss = 1; 2372 ppdu_info->mcs = HAL_RX_MAX_MCS; 2373 ppdu_info->tid = IEEE80211_NUM_TIDS; 2374 } 2375 2376 if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS) 2377 rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu; 2378 2379 if (ppdu_info->mcs <= HAL_RX_MAX_MCS) 2380 rx_stats->mcs_count[ppdu_info->mcs] += num_msdu; 2381 2382 if (ppdu_info->gi < HAL_RX_GI_MAX) 2383 rx_stats->gi_count[ppdu_info->gi] += num_msdu; 2384 2385 if (ppdu_info->bw < HAL_RX_BW_MAX) 2386 rx_stats->bw_count[ppdu_info->bw] += num_msdu; 2387 2388 if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX) 2389 rx_stats->coding_count[ppdu_info->ldpc] += num_msdu; 2390 2391 if (ppdu_info->tid <= IEEE80211_NUM_TIDS) 2392 rx_stats->tid_count[ppdu_info->tid] += num_msdu; 2393 2394 if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX) 2395 rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu; 2396 2397 if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX) 2398 rx_stats->reception_type[ppdu_info->reception_type] += num_msdu; 2399 2400 if (ppdu_info->is_stbc) 2401 rx_stats->stbc_count += num_msdu; 2402 2403 if (ppdu_info->beamformed) 2404 rx_stats->beamformed_count += num_msdu; 2405 2406 if (ppdu_info->num_mpdu_fcs_ok > 1) 2407 rx_stats->ampdu_msdu_count += num_msdu; 2408 else 2409 rx_stats->non_ampdu_msdu_count += num_msdu; 2410 2411 rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok; 2412 rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err; 2413 2414 arsta->rssi_comb = ppdu_info->rssi_comb; 2415 rx_stats->rx_duration += ppdu_info->rx_duration; 2416 arsta->rx_duration = rx_stats->rx_duration; 2417 } 2418 2419 static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab, 2420 struct dp_rxdma_ring *rx_ring, 2421 int *buf_id, gfp_t gfp) 2422 { 2423 struct sk_buff *skb; 2424 dma_addr_t paddr; 2425 2426 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + 2427 DP_RX_BUFFER_ALIGN_SIZE); 2428 2429 if (!skb) 2430 goto fail_alloc_skb; 2431 2432 if (!IS_ALIGNED((unsigned long)skb->data, 2433 DP_RX_BUFFER_ALIGN_SIZE)) { 2434 skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - 2435 skb->data); 2436 } 2437 2438 paddr = dma_map_single(ab->dev, skb->data, 2439 skb->len + skb_tailroom(skb), 2440 DMA_BIDIRECTIONAL); 2441 if (unlikely(dma_mapping_error(ab->dev, paddr))) 2442 goto fail_free_skb; 2443 2444 spin_lock_bh(&rx_ring->idr_lock); 2445 *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, 2446 rx_ring->bufs_max, gfp); 2447 spin_unlock_bh(&rx_ring->idr_lock); 2448 if (*buf_id < 0) 2449 goto fail_dma_unmap; 2450 2451 ATH11K_SKB_RXCB(skb)->paddr = paddr; 2452 return skb; 2453 2454 fail_dma_unmap: 2455 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 2456 DMA_BIDIRECTIONAL); 2457 fail_free_skb: 2458 dev_kfree_skb_any(skb); 2459 fail_alloc_skb: 2460 return NULL; 2461 } 2462 2463 int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id, 2464 struct dp_rxdma_ring *rx_ring, 2465 int req_entries, 2466 enum hal_rx_buf_return_buf_manager mgr, 2467 gfp_t gfp) 2468 { 2469 struct hal_srng *srng; 2470 u32 *desc; 2471 struct sk_buff *skb; 2472 int num_free; 2473 int num_remain; 2474 int buf_id; 2475 u32 cookie; 2476 dma_addr_t paddr; 2477 2478 req_entries = min(req_entries, rx_ring->bufs_max); 2479 2480 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 2481 2482 spin_lock_bh(&srng->lock); 2483 2484 ath11k_hal_srng_access_begin(ab, srng); 2485 2486 num_free = ath11k_hal_srng_src_num_free(ab, srng, true); 2487 2488 req_entries = min(num_free, req_entries); 2489 num_remain = req_entries; 2490 2491 while (num_remain > 0) { 2492 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, 2493 &buf_id, gfp); 2494 if (!skb) 2495 break; 2496 paddr = ATH11K_SKB_RXCB(skb)->paddr; 2497 2498 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 2499 if (!desc) 2500 goto fail_desc_get; 2501 2502 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 2503 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 2504 2505 num_remain--; 2506 2507 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); 2508 } 2509 2510 ath11k_hal_srng_access_end(ab, srng); 2511 2512 spin_unlock_bh(&srng->lock); 2513 2514 return req_entries - num_remain; 2515 2516 fail_desc_get: 2517 spin_lock_bh(&rx_ring->idr_lock); 2518 idr_remove(&rx_ring->bufs_idr, buf_id); 2519 spin_unlock_bh(&rx_ring->idr_lock); 2520 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 2521 DMA_BIDIRECTIONAL); 2522 dev_kfree_skb_any(skb); 2523 ath11k_hal_srng_access_end(ab, srng); 2524 spin_unlock_bh(&srng->lock); 2525 2526 return req_entries - num_remain; 2527 } 2528 2529 static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id, 2530 int *budget, struct sk_buff_head *skb_list) 2531 { 2532 struct ath11k *ar = ab->pdevs[mac_id].ar; 2533 struct ath11k_pdev_dp *dp = &ar->dp; 2534 struct dp_rxdma_ring *rx_ring = &dp->rx_mon_status_refill_ring; 2535 struct hal_srng *srng; 2536 void *rx_mon_status_desc; 2537 struct sk_buff *skb; 2538 struct ath11k_skb_rxcb *rxcb; 2539 struct hal_tlv_hdr *tlv; 2540 u32 cookie; 2541 int buf_id; 2542 dma_addr_t paddr; 2543 u8 rbm; 2544 int num_buffs_reaped = 0; 2545 2546 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 2547 2548 spin_lock_bh(&srng->lock); 2549 2550 ath11k_hal_srng_access_begin(ab, srng); 2551 while (*budget) { 2552 *budget -= 1; 2553 rx_mon_status_desc = 2554 ath11k_hal_srng_src_peek(ab, srng); 2555 if (!rx_mon_status_desc) 2556 break; 2557 2558 ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr, 2559 &cookie, &rbm); 2560 if (paddr) { 2561 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie); 2562 2563 spin_lock_bh(&rx_ring->idr_lock); 2564 skb = idr_find(&rx_ring->bufs_idr, buf_id); 2565 if (!skb) { 2566 ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n", 2567 buf_id); 2568 spin_unlock_bh(&rx_ring->idr_lock); 2569 continue; 2570 } 2571 2572 idr_remove(&rx_ring->bufs_idr, buf_id); 2573 spin_unlock_bh(&rx_ring->idr_lock); 2574 2575 rxcb = ATH11K_SKB_RXCB(skb); 2576 2577 dma_sync_single_for_cpu(ab->dev, rxcb->paddr, 2578 skb->len + skb_tailroom(skb), 2579 DMA_FROM_DEVICE); 2580 2581 dma_unmap_single(ab->dev, rxcb->paddr, 2582 skb->len + skb_tailroom(skb), 2583 DMA_BIDIRECTIONAL); 2584 2585 tlv = (struct hal_tlv_hdr *)skb->data; 2586 if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != 2587 HAL_RX_STATUS_BUFFER_DONE) { 2588 ath11k_hal_srng_src_get_next_entry(ab, srng); 2589 continue; 2590 } 2591 2592 __skb_queue_tail(skb_list, skb); 2593 } 2594 2595 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, 2596 &buf_id, GFP_ATOMIC); 2597 2598 if (!skb) { 2599 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0, 2600 HAL_RX_BUF_RBM_SW3_BM); 2601 num_buffs_reaped++; 2602 break; 2603 } 2604 rxcb = ATH11K_SKB_RXCB(skb); 2605 2606 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 2607 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 2608 2609 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr, 2610 cookie, HAL_RX_BUF_RBM_SW3_BM); 2611 ath11k_hal_srng_src_get_next_entry(ab, srng); 2612 num_buffs_reaped++; 2613 } 2614 ath11k_hal_srng_access_end(ab, srng); 2615 spin_unlock_bh(&srng->lock); 2616 2617 return num_buffs_reaped; 2618 } 2619 2620 int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id, 2621 struct napi_struct *napi, int budget) 2622 { 2623 struct ath11k *ar = ab->pdevs[mac_id].ar; 2624 enum hal_rx_mon_status hal_status; 2625 struct sk_buff *skb; 2626 struct sk_buff_head skb_list; 2627 struct hal_rx_mon_ppdu_info ppdu_info; 2628 struct ath11k_peer *peer; 2629 struct ath11k_sta *arsta; 2630 int num_buffs_reaped = 0; 2631 2632 __skb_queue_head_init(&skb_list); 2633 2634 num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget, 2635 &skb_list); 2636 if (!num_buffs_reaped) 2637 goto exit; 2638 2639 while ((skb = __skb_dequeue(&skb_list))) { 2640 memset(&ppdu_info, 0, sizeof(ppdu_info)); 2641 ppdu_info.peer_id = HAL_INVALID_PEERID; 2642 2643 if (ath11k_debug_is_pktlog_rx_stats_enabled(ar)) 2644 trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE); 2645 2646 hal_status = ath11k_hal_rx_parse_mon_status(ab, &ppdu_info, skb); 2647 2648 if (ppdu_info.peer_id == HAL_INVALID_PEERID || 2649 hal_status != HAL_RX_MON_STATUS_PPDU_DONE) { 2650 dev_kfree_skb_any(skb); 2651 continue; 2652 } 2653 2654 rcu_read_lock(); 2655 spin_lock_bh(&ab->base_lock); 2656 peer = ath11k_peer_find_by_id(ab, ppdu_info.peer_id); 2657 2658 if (!peer || !peer->sta) { 2659 ath11k_dbg(ab, ATH11K_DBG_DATA, 2660 "failed to find the peer with peer_id %d\n", 2661 ppdu_info.peer_id); 2662 spin_unlock_bh(&ab->base_lock); 2663 rcu_read_unlock(); 2664 dev_kfree_skb_any(skb); 2665 continue; 2666 } 2667 2668 arsta = (struct ath11k_sta *)peer->sta->drv_priv; 2669 ath11k_dp_rx_update_peer_stats(arsta, &ppdu_info); 2670 2671 if (ath11k_debug_is_pktlog_peer_valid(ar, peer->addr)) 2672 trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE); 2673 2674 spin_unlock_bh(&ab->base_lock); 2675 rcu_read_unlock(); 2676 2677 dev_kfree_skb_any(skb); 2678 } 2679 exit: 2680 return num_buffs_reaped; 2681 } 2682 2683 static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab, 2684 u32 *link_desc, 2685 enum hal_wbm_rel_bm_act action) 2686 { 2687 struct ath11k_dp *dp = &ab->dp; 2688 struct hal_srng *srng; 2689 u32 *desc; 2690 int ret = 0; 2691 2692 srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id]; 2693 2694 spin_lock_bh(&srng->lock); 2695 2696 ath11k_hal_srng_access_begin(ab, srng); 2697 2698 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 2699 if (!desc) { 2700 ret = -ENOBUFS; 2701 goto exit; 2702 } 2703 2704 ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc, 2705 action); 2706 2707 exit: 2708 ath11k_hal_srng_access_end(ab, srng); 2709 2710 spin_unlock_bh(&srng->lock); 2711 2712 return ret; 2713 } 2714 2715 static void ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar, 2716 struct sk_buff *msdu, 2717 struct hal_rx_desc *rx_desc, 2718 struct ieee80211_rx_status *rx_status) 2719 { 2720 u8 rx_channel; 2721 enum hal_encrypt_type enctype; 2722 bool is_decrypted; 2723 u32 err_bitmap; 2724 2725 is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc); 2726 enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc); 2727 err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_desc); 2728 2729 if (err_bitmap & DP_RX_MPDU_ERR_FCS) 2730 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; 2731 2732 if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC) 2733 rx_status->flag |= RX_FLAG_MMIC_ERROR; 2734 2735 rx_status->encoding = RX_ENC_LEGACY; 2736 rx_status->bw = RATE_INFO_BW_20; 2737 2738 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 2739 2740 rx_channel = ath11k_dp_rx_h_msdu_start_freq(rx_desc); 2741 2742 if (rx_channel >= 1 && rx_channel <= 14) { 2743 rx_status->band = NL80211_BAND_2GHZ; 2744 } else if (rx_channel >= 36 && rx_channel <= 173) { 2745 rx_status->band = NL80211_BAND_5GHZ; 2746 } else { 2747 ath11k_warn(ar->ab, "Unsupported Channel info received %d\n", 2748 rx_channel); 2749 return; 2750 } 2751 2752 rx_status->freq = ieee80211_channel_to_frequency(rx_channel, 2753 rx_status->band); 2754 ath11k_dp_rx_h_rate(ar, rx_desc, rx_status); 2755 2756 /* Rx fragments are received in raw mode */ 2757 skb_trim(msdu, msdu->len - FCS_LEN); 2758 2759 if (is_decrypted) { 2760 rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MIC_STRIPPED; 2761 skb_trim(msdu, msdu->len - 2762 ath11k_dp_rx_crypto_mic_len(ar, enctype)); 2763 } 2764 } 2765 2766 static int 2767 ath11k_dp_process_rx_err_buf(struct ath11k *ar, struct napi_struct *napi, 2768 int buf_id, bool frag) 2769 { 2770 struct ath11k_pdev_dp *dp = &ar->dp; 2771 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 2772 struct ieee80211_rx_status rx_status = {0}; 2773 struct sk_buff *msdu; 2774 struct ath11k_skb_rxcb *rxcb; 2775 struct ieee80211_rx_status *status; 2776 struct hal_rx_desc *rx_desc; 2777 u16 msdu_len; 2778 2779 spin_lock_bh(&rx_ring->idr_lock); 2780 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 2781 if (!msdu) { 2782 ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n", 2783 buf_id); 2784 spin_unlock_bh(&rx_ring->idr_lock); 2785 return -EINVAL; 2786 } 2787 2788 idr_remove(&rx_ring->bufs_idr, buf_id); 2789 spin_unlock_bh(&rx_ring->idr_lock); 2790 2791 rxcb = ATH11K_SKB_RXCB(msdu); 2792 dma_unmap_single(ar->ab->dev, rxcb->paddr, 2793 msdu->len + skb_tailroom(msdu), 2794 DMA_FROM_DEVICE); 2795 2796 if (!frag) { 2797 /* Process only rx fragments below, and drop 2798 * msdu's indicated due to error reasons. 2799 */ 2800 dev_kfree_skb_any(msdu); 2801 return 0; 2802 } 2803 2804 rcu_read_lock(); 2805 if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) { 2806 dev_kfree_skb_any(msdu); 2807 goto exit; 2808 } 2809 2810 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 2811 dev_kfree_skb_any(msdu); 2812 goto exit; 2813 } 2814 2815 rx_desc = (struct hal_rx_desc *)msdu->data; 2816 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc); 2817 skb_put(msdu, HAL_RX_DESC_SIZE + msdu_len); 2818 skb_pull(msdu, HAL_RX_DESC_SIZE); 2819 2820 ath11k_dp_rx_frag_h_mpdu(ar, msdu, rx_desc, &rx_status); 2821 2822 status = IEEE80211_SKB_RXCB(msdu); 2823 2824 *status = rx_status; 2825 2826 ath11k_dp_rx_deliver_msdu(ar, napi, msdu); 2827 2828 exit: 2829 rcu_read_unlock(); 2830 return 0; 2831 } 2832 2833 int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi, 2834 int budget) 2835 { 2836 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; 2837 struct dp_link_desc_bank *link_desc_banks; 2838 enum hal_rx_buf_return_buf_manager rbm; 2839 int tot_n_bufs_reaped, quota, ret, i; 2840 int n_bufs_reaped[MAX_RADIOS] = {0}; 2841 struct dp_rxdma_ring *rx_ring; 2842 struct dp_srng *reo_except; 2843 u32 desc_bank, num_msdus; 2844 struct hal_srng *srng; 2845 struct ath11k_dp *dp; 2846 void *link_desc_va; 2847 int buf_id, mac_id; 2848 struct ath11k *ar; 2849 dma_addr_t paddr; 2850 u32 *desc; 2851 bool is_frag; 2852 2853 tot_n_bufs_reaped = 0; 2854 quota = budget; 2855 2856 dp = &ab->dp; 2857 reo_except = &dp->reo_except_ring; 2858 link_desc_banks = dp->link_desc_banks; 2859 2860 srng = &ab->hal.srng_list[reo_except->ring_id]; 2861 2862 spin_lock_bh(&srng->lock); 2863 2864 ath11k_hal_srng_access_begin(ab, srng); 2865 2866 while (budget && 2867 (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 2868 struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc; 2869 2870 ab->soc_stats.err_ring_pkts++; 2871 ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr, 2872 &desc_bank); 2873 if (ret) { 2874 ath11k_warn(ab, "failed to parse error reo desc %d\n", 2875 ret); 2876 continue; 2877 } 2878 link_desc_va = link_desc_banks[desc_bank].vaddr + 2879 (paddr - link_desc_banks[desc_bank].paddr); 2880 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies, 2881 &rbm); 2882 if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST && 2883 rbm != HAL_RX_BUF_RBM_SW3_BM) { 2884 ab->soc_stats.invalid_rbm++; 2885 ath11k_warn(ab, "invalid return buffer manager %d\n", rbm); 2886 ath11k_dp_rx_link_desc_return(ab, desc, 2887 HAL_WBM_REL_BM_ACT_REL_MSDU); 2888 continue; 2889 } 2890 2891 is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG); 2892 2893 /* Return the link desc back to wbm idle list */ 2894 ath11k_dp_rx_link_desc_return(ab, desc, 2895 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 2896 2897 for (i = 0; i < num_msdus; i++) { 2898 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 2899 msdu_cookies[i]); 2900 2901 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, 2902 msdu_cookies[i]); 2903 2904 ar = ab->pdevs[mac_id].ar; 2905 2906 if (!ath11k_dp_process_rx_err_buf(ar, napi, buf_id, 2907 is_frag)) { 2908 n_bufs_reaped[mac_id]++; 2909 tot_n_bufs_reaped++; 2910 } 2911 } 2912 2913 if (tot_n_bufs_reaped >= quota) { 2914 tot_n_bufs_reaped = quota; 2915 goto exit; 2916 } 2917 2918 budget = quota - tot_n_bufs_reaped; 2919 } 2920 2921 exit: 2922 ath11k_hal_srng_access_end(ab, srng); 2923 2924 spin_unlock_bh(&srng->lock); 2925 2926 for (i = 0; i < ab->num_radios; i++) { 2927 if (!n_bufs_reaped[i]) 2928 continue; 2929 2930 ar = ab->pdevs[i].ar; 2931 rx_ring = &ar->dp.rx_refill_buf_ring; 2932 2933 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i], 2934 HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC); 2935 } 2936 2937 return tot_n_bufs_reaped; 2938 } 2939 2940 static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar, 2941 int msdu_len, 2942 struct sk_buff_head *msdu_list) 2943 { 2944 struct sk_buff *skb, *tmp; 2945 struct ath11k_skb_rxcb *rxcb; 2946 int n_buffs; 2947 2948 n_buffs = DIV_ROUND_UP(msdu_len, 2949 (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE)); 2950 2951 skb_queue_walk_safe(msdu_list, skb, tmp) { 2952 rxcb = ATH11K_SKB_RXCB(skb); 2953 if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO && 2954 rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) { 2955 if (!n_buffs) 2956 break; 2957 __skb_unlink(skb, msdu_list); 2958 dev_kfree_skb_any(skb); 2959 n_buffs--; 2960 } 2961 } 2962 } 2963 2964 static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu, 2965 struct ieee80211_rx_status *status, 2966 struct sk_buff_head *msdu_list) 2967 { 2968 struct sk_buff_head amsdu_list; 2969 u16 msdu_len; 2970 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 2971 u8 l3pad_bytes; 2972 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 2973 2974 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc); 2975 2976 if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) { 2977 /* First buffer will be freed by the caller, so deduct it's length */ 2978 msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE); 2979 ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list); 2980 return -EINVAL; 2981 } 2982 2983 if (!ath11k_dp_rx_h_attn_msdu_done(desc)) { 2984 ath11k_warn(ar->ab, 2985 "msdu_done bit not set in null_q_des processing\n"); 2986 __skb_queue_purge(msdu_list); 2987 return -EIO; 2988 } 2989 2990 /* Handle NULL queue descriptor violations arising out a missing 2991 * REO queue for a given peer or a given TID. This typically 2992 * may happen if a packet is received on a QOS enabled TID before the 2993 * ADDBA negotiation for that TID, when the TID queue is setup. Or 2994 * it may also happen for MC/BC frames if they are not routed to the 2995 * non-QOS TID queue, in the absence of any other default TID queue. 2996 * This error can show up both in a REO destination or WBM release ring. 2997 */ 2998 2999 __skb_queue_head_init(&amsdu_list); 3000 3001 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc); 3002 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc); 3003 3004 l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc); 3005 3006 if ((HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) 3007 return -EINVAL; 3008 3009 skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len); 3010 skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes); 3011 3012 ath11k_dp_rx_h_ppdu(ar, desc, status); 3013 3014 __skb_queue_tail(&amsdu_list, msdu); 3015 3016 ath11k_dp_rx_h_mpdu(ar, &amsdu_list, desc, status); 3017 3018 /* Please note that caller will having the access to msdu and completing 3019 * rx with mac80211. Need not worry about cleaning up amsdu_list. 3020 */ 3021 3022 return 0; 3023 } 3024 3025 static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu, 3026 struct ieee80211_rx_status *status, 3027 struct sk_buff_head *msdu_list) 3028 { 3029 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3030 bool drop = false; 3031 3032 ar->ab->soc_stats.reo_error[rxcb->err_code]++; 3033 3034 switch (rxcb->err_code) { 3035 case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO: 3036 if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list)) 3037 drop = true; 3038 break; 3039 default: 3040 /* TODO: Review other errors and process them to mac80211 3041 * as appropriate. 3042 */ 3043 drop = true; 3044 break; 3045 } 3046 3047 return drop; 3048 } 3049 3050 static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu, 3051 struct ieee80211_rx_status *status) 3052 { 3053 u16 msdu_len; 3054 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 3055 u8 l3pad_bytes; 3056 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3057 3058 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc); 3059 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc); 3060 3061 l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc); 3062 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc); 3063 skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len); 3064 skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes); 3065 3066 ath11k_dp_rx_h_ppdu(ar, desc, status); 3067 3068 status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR | 3069 RX_FLAG_DECRYPTED); 3070 3071 ath11k_dp_rx_h_undecap(ar, msdu, desc, 3072 HAL_ENCRYPT_TYPE_TKIP_MIC, status, false); 3073 } 3074 3075 static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar, struct sk_buff *msdu, 3076 struct ieee80211_rx_status *status) 3077 { 3078 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3079 bool drop = false; 3080 3081 ar->ab->soc_stats.rxdma_error[rxcb->err_code]++; 3082 3083 switch (rxcb->err_code) { 3084 case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR: 3085 ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status); 3086 break; 3087 default: 3088 /* TODO: Review other rxdma error code to check if anything is 3089 * worth reporting to mac80211 3090 */ 3091 drop = true; 3092 break; 3093 } 3094 3095 return drop; 3096 } 3097 3098 static void ath11k_dp_rx_wbm_err(struct ath11k *ar, 3099 struct napi_struct *napi, 3100 struct sk_buff *msdu, 3101 struct sk_buff_head *msdu_list) 3102 { 3103 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3104 struct ieee80211_rx_status rxs = {0}; 3105 struct ieee80211_rx_status *status; 3106 bool drop = true; 3107 3108 switch (rxcb->err_rel_src) { 3109 case HAL_WBM_REL_SRC_MODULE_REO: 3110 drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list); 3111 break; 3112 case HAL_WBM_REL_SRC_MODULE_RXDMA: 3113 drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs); 3114 break; 3115 default: 3116 /* msdu will get freed */ 3117 break; 3118 } 3119 3120 if (drop) { 3121 dev_kfree_skb_any(msdu); 3122 return; 3123 } 3124 3125 status = IEEE80211_SKB_RXCB(msdu); 3126 *status = rxs; 3127 3128 ath11k_dp_rx_deliver_msdu(ar, napi, msdu); 3129 } 3130 3131 int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab, 3132 struct napi_struct *napi, int budget) 3133 { 3134 struct ath11k *ar; 3135 struct ath11k_dp *dp = &ab->dp; 3136 struct dp_rxdma_ring *rx_ring; 3137 struct hal_rx_wbm_rel_info err_info; 3138 struct hal_srng *srng; 3139 struct sk_buff *msdu; 3140 struct sk_buff_head msdu_list[MAX_RADIOS]; 3141 struct ath11k_skb_rxcb *rxcb; 3142 u32 *rx_desc; 3143 int buf_id, mac_id; 3144 int num_buffs_reaped[MAX_RADIOS] = {0}; 3145 int total_num_buffs_reaped = 0; 3146 int ret, i; 3147 3148 for (i = 0; i < MAX_RADIOS; i++) 3149 __skb_queue_head_init(&msdu_list[i]); 3150 3151 srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id]; 3152 3153 spin_lock_bh(&srng->lock); 3154 3155 ath11k_hal_srng_access_begin(ab, srng); 3156 3157 while (budget) { 3158 rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng); 3159 if (!rx_desc) 3160 break; 3161 3162 ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info); 3163 if (ret) { 3164 ath11k_warn(ab, 3165 "failed to parse rx error in wbm_rel ring desc %d\n", 3166 ret); 3167 continue; 3168 } 3169 3170 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie); 3171 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie); 3172 3173 ar = ab->pdevs[mac_id].ar; 3174 rx_ring = &ar->dp.rx_refill_buf_ring; 3175 3176 spin_lock_bh(&rx_ring->idr_lock); 3177 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 3178 if (!msdu) { 3179 ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n", 3180 buf_id, mac_id); 3181 spin_unlock_bh(&rx_ring->idr_lock); 3182 continue; 3183 } 3184 3185 idr_remove(&rx_ring->bufs_idr, buf_id); 3186 spin_unlock_bh(&rx_ring->idr_lock); 3187 3188 rxcb = ATH11K_SKB_RXCB(msdu); 3189 dma_unmap_single(ab->dev, rxcb->paddr, 3190 msdu->len + skb_tailroom(msdu), 3191 DMA_FROM_DEVICE); 3192 3193 num_buffs_reaped[mac_id]++; 3194 total_num_buffs_reaped++; 3195 budget--; 3196 3197 if (err_info.push_reason != 3198 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { 3199 dev_kfree_skb_any(msdu); 3200 continue; 3201 } 3202 3203 rxcb->err_rel_src = err_info.err_rel_src; 3204 rxcb->err_code = err_info.err_code; 3205 rxcb->rx_desc = (struct hal_rx_desc *)msdu->data; 3206 __skb_queue_tail(&msdu_list[mac_id], msdu); 3207 } 3208 3209 ath11k_hal_srng_access_end(ab, srng); 3210 3211 spin_unlock_bh(&srng->lock); 3212 3213 if (!total_num_buffs_reaped) 3214 goto done; 3215 3216 for (i = 0; i < ab->num_radios; i++) { 3217 if (!num_buffs_reaped[i]) 3218 continue; 3219 3220 ar = ab->pdevs[i].ar; 3221 rx_ring = &ar->dp.rx_refill_buf_ring; 3222 3223 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i], 3224 HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC); 3225 } 3226 3227 rcu_read_lock(); 3228 for (i = 0; i < ab->num_radios; i++) { 3229 if (!rcu_dereference(ab->pdevs_active[i])) { 3230 __skb_queue_purge(&msdu_list[i]); 3231 continue; 3232 } 3233 3234 ar = ab->pdevs[i].ar; 3235 3236 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 3237 __skb_queue_purge(&msdu_list[i]); 3238 continue; 3239 } 3240 3241 while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL) 3242 ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]); 3243 } 3244 rcu_read_unlock(); 3245 done: 3246 return total_num_buffs_reaped; 3247 } 3248 3249 int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget) 3250 { 3251 struct ath11k *ar = ab->pdevs[mac_id].ar; 3252 struct dp_srng *err_ring = &ar->dp.rxdma_err_dst_ring; 3253 struct dp_rxdma_ring *rx_ring = &ar->dp.rx_refill_buf_ring; 3254 struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks; 3255 struct hal_srng *srng; 3256 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; 3257 enum hal_rx_buf_return_buf_manager rbm; 3258 enum hal_reo_entr_rxdma_ecode rxdma_err_code; 3259 struct ath11k_skb_rxcb *rxcb; 3260 struct sk_buff *skb; 3261 struct hal_reo_entrance_ring *entr_ring; 3262 void *desc; 3263 int num_buf_freed = 0; 3264 int quota = budget; 3265 dma_addr_t paddr; 3266 u32 desc_bank; 3267 void *link_desc_va; 3268 int num_msdus; 3269 int i; 3270 int buf_id; 3271 3272 srng = &ab->hal.srng_list[err_ring->ring_id]; 3273 3274 spin_lock_bh(&srng->lock); 3275 3276 ath11k_hal_srng_access_begin(ab, srng); 3277 3278 while (quota-- && 3279 (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 3280 ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank); 3281 3282 entr_ring = (struct hal_reo_entrance_ring *)desc; 3283 rxdma_err_code = 3284 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE, 3285 entr_ring->info1); 3286 ab->soc_stats.rxdma_error[rxdma_err_code]++; 3287 3288 link_desc_va = link_desc_banks[desc_bank].vaddr + 3289 (paddr - link_desc_banks[desc_bank].paddr); 3290 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, 3291 msdu_cookies, &rbm); 3292 3293 for (i = 0; i < num_msdus; i++) { 3294 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 3295 msdu_cookies[i]); 3296 3297 spin_lock_bh(&rx_ring->idr_lock); 3298 skb = idr_find(&rx_ring->bufs_idr, buf_id); 3299 if (!skb) { 3300 ath11k_warn(ab, "rxdma error with invalid buf_id %d\n", 3301 buf_id); 3302 spin_unlock_bh(&rx_ring->idr_lock); 3303 continue; 3304 } 3305 3306 idr_remove(&rx_ring->bufs_idr, buf_id); 3307 spin_unlock_bh(&rx_ring->idr_lock); 3308 3309 rxcb = ATH11K_SKB_RXCB(skb); 3310 dma_unmap_single(ab->dev, rxcb->paddr, 3311 skb->len + skb_tailroom(skb), 3312 DMA_FROM_DEVICE); 3313 dev_kfree_skb_any(skb); 3314 3315 num_buf_freed++; 3316 } 3317 3318 ath11k_dp_rx_link_desc_return(ab, desc, 3319 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3320 } 3321 3322 ath11k_hal_srng_access_end(ab, srng); 3323 3324 spin_unlock_bh(&srng->lock); 3325 3326 if (num_buf_freed) 3327 ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed, 3328 HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC); 3329 3330 return budget - quota; 3331 } 3332 3333 void ath11k_dp_process_reo_status(struct ath11k_base *ab) 3334 { 3335 struct ath11k_dp *dp = &ab->dp; 3336 struct hal_srng *srng; 3337 struct dp_reo_cmd *cmd, *tmp; 3338 bool found = false; 3339 u32 *reo_desc; 3340 u16 tag; 3341 struct hal_reo_status reo_status; 3342 3343 srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id]; 3344 3345 memset(&reo_status, 0, sizeof(reo_status)); 3346 3347 spin_lock_bh(&srng->lock); 3348 3349 ath11k_hal_srng_access_begin(ab, srng); 3350 3351 while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 3352 tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc); 3353 3354 switch (tag) { 3355 case HAL_REO_GET_QUEUE_STATS_STATUS: 3356 ath11k_hal_reo_status_queue_stats(ab, reo_desc, 3357 &reo_status); 3358 break; 3359 case HAL_REO_FLUSH_QUEUE_STATUS: 3360 ath11k_hal_reo_flush_queue_status(ab, reo_desc, 3361 &reo_status); 3362 break; 3363 case HAL_REO_FLUSH_CACHE_STATUS: 3364 ath11k_hal_reo_flush_cache_status(ab, reo_desc, 3365 &reo_status); 3366 break; 3367 case HAL_REO_UNBLOCK_CACHE_STATUS: 3368 ath11k_hal_reo_unblk_cache_status(ab, reo_desc, 3369 &reo_status); 3370 break; 3371 case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS: 3372 ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc, 3373 &reo_status); 3374 break; 3375 case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS: 3376 ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc, 3377 &reo_status); 3378 break; 3379 case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS: 3380 ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc, 3381 &reo_status); 3382 break; 3383 default: 3384 ath11k_warn(ab, "Unknown reo status type %d\n", tag); 3385 continue; 3386 } 3387 3388 spin_lock_bh(&dp->reo_cmd_lock); 3389 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 3390 if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) { 3391 found = true; 3392 list_del(&cmd->list); 3393 break; 3394 } 3395 } 3396 spin_unlock_bh(&dp->reo_cmd_lock); 3397 3398 if (found) { 3399 cmd->handler(dp, (void *)&cmd->data, 3400 reo_status.uniform_hdr.cmd_status); 3401 kfree(cmd); 3402 } 3403 3404 found = false; 3405 } 3406 3407 ath11k_hal_srng_access_end(ab, srng); 3408 3409 spin_unlock_bh(&srng->lock); 3410 } 3411 3412 void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id) 3413 { 3414 struct ath11k *ar = ab->pdevs[mac_id].ar; 3415 3416 ath11k_dp_rx_pdev_srng_free(ar); 3417 ath11k_dp_rxdma_pdev_buf_free(ar); 3418 } 3419 3420 int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id) 3421 { 3422 struct ath11k *ar = ab->pdevs[mac_id].ar; 3423 struct ath11k_pdev_dp *dp = &ar->dp; 3424 u32 ring_id; 3425 int ret; 3426 3427 ret = ath11k_dp_rx_pdev_srng_alloc(ar); 3428 if (ret) { 3429 ath11k_warn(ab, "failed to setup rx srngs\n"); 3430 return ret; 3431 } 3432 3433 ret = ath11k_dp_rxdma_pdev_buf_setup(ar); 3434 if (ret) { 3435 ath11k_warn(ab, "failed to setup rxdma ring\n"); 3436 return ret; 3437 } 3438 3439 ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; 3440 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF); 3441 if (ret) { 3442 ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n", 3443 ret); 3444 return ret; 3445 } 3446 3447 ring_id = dp->rxdma_err_dst_ring.ring_id; 3448 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_DST); 3449 if (ret) { 3450 ath11k_warn(ab, "failed to configure rxdma_err_dest_ring %d\n", 3451 ret); 3452 return ret; 3453 } 3454 3455 ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id; 3456 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, 3457 mac_id, HAL_RXDMA_MONITOR_BUF); 3458 if (ret) { 3459 ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n", 3460 ret); 3461 return ret; 3462 } 3463 ret = ath11k_dp_tx_htt_srng_setup(ab, 3464 dp->rxdma_mon_dst_ring.ring_id, 3465 mac_id, HAL_RXDMA_MONITOR_DST); 3466 if (ret) { 3467 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n", 3468 ret); 3469 return ret; 3470 } 3471 ret = ath11k_dp_tx_htt_srng_setup(ab, 3472 dp->rxdma_mon_desc_ring.ring_id, 3473 mac_id, HAL_RXDMA_MONITOR_DESC); 3474 if (ret) { 3475 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n", 3476 ret); 3477 return ret; 3478 } 3479 ring_id = dp->rx_mon_status_refill_ring.refill_buf_ring.ring_id; 3480 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, 3481 HAL_RXDMA_MONITOR_STATUS); 3482 if (ret) { 3483 ath11k_warn(ab, 3484 "failed to configure mon_status_refill_ring %d\n", 3485 ret); 3486 return ret; 3487 } 3488 return 0; 3489 } 3490 3491 static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len) 3492 { 3493 if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) { 3494 *frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc); 3495 *total_len -= *frag_len; 3496 } else { 3497 *frag_len = *total_len; 3498 *total_len = 0; 3499 } 3500 } 3501 3502 static 3503 int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar, 3504 void *p_last_buf_addr_info, 3505 u8 mac_id) 3506 { 3507 struct ath11k_pdev_dp *dp = &ar->dp; 3508 struct dp_srng *dp_srng; 3509 void *hal_srng; 3510 void *src_srng_desc; 3511 int ret = 0; 3512 3513 dp_srng = &dp->rxdma_mon_desc_ring; 3514 hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id]; 3515 3516 ath11k_hal_srng_access_begin(ar->ab, hal_srng); 3517 3518 src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng); 3519 3520 if (src_srng_desc) { 3521 struct ath11k_buffer_addr *src_desc = 3522 (struct ath11k_buffer_addr *)src_srng_desc; 3523 3524 *src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info); 3525 } else { 3526 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 3527 "Monitor Link Desc Ring %d Full", mac_id); 3528 ret = -ENOMEM; 3529 } 3530 3531 ath11k_hal_srng_access_end(ar->ab, hal_srng); 3532 return ret; 3533 } 3534 3535 static 3536 void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc, 3537 dma_addr_t *paddr, u32 *sw_cookie, 3538 void **pp_buf_addr_info) 3539 { 3540 struct hal_rx_msdu_link *msdu_link = 3541 (struct hal_rx_msdu_link *)rx_msdu_link_desc; 3542 struct ath11k_buffer_addr *buf_addr_info; 3543 u8 rbm = 0; 3544 3545 buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info; 3546 3547 ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, &rbm); 3548 3549 *pp_buf_addr_info = (void *)buf_addr_info; 3550 } 3551 3552 static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len) 3553 { 3554 if (skb->len > len) { 3555 skb_trim(skb, len); 3556 } else { 3557 if (skb_tailroom(skb) < len - skb->len) { 3558 if ((pskb_expand_head(skb, 0, 3559 len - skb->len - skb_tailroom(skb), 3560 GFP_ATOMIC))) { 3561 dev_kfree_skb_any(skb); 3562 return -ENOMEM; 3563 } 3564 } 3565 skb_put(skb, (len - skb->len)); 3566 } 3567 return 0; 3568 } 3569 3570 static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar, 3571 void *msdu_link_desc, 3572 struct hal_rx_msdu_list *msdu_list, 3573 u16 *num_msdus) 3574 { 3575 struct hal_rx_msdu_details *msdu_details = NULL; 3576 struct rx_msdu_desc *msdu_desc_info = NULL; 3577 struct hal_rx_msdu_link *msdu_link = NULL; 3578 int i; 3579 u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1); 3580 u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1); 3581 u8 tmp = 0; 3582 3583 msdu_link = (struct hal_rx_msdu_link *)msdu_link_desc; 3584 msdu_details = &msdu_link->msdu_link[0]; 3585 3586 for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) { 3587 if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR, 3588 msdu_details[i].buf_addr_info.info0) == 0) { 3589 msdu_desc_info = &msdu_details[i - 1].rx_msdu_info; 3590 msdu_desc_info->info0 |= last; 3591 ; 3592 break; 3593 } 3594 msdu_desc_info = &msdu_details[i].rx_msdu_info; 3595 3596 if (!i) 3597 msdu_desc_info->info0 |= first; 3598 else if (i == (HAL_RX_NUM_MSDU_DESC - 1)) 3599 msdu_desc_info->info0 |= last; 3600 msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0; 3601 msdu_list->msdu_info[i].msdu_len = 3602 HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0); 3603 msdu_list->sw_cookie[i] = 3604 FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, 3605 msdu_details[i].buf_addr_info.info1); 3606 tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR, 3607 msdu_details[i].buf_addr_info.info1); 3608 msdu_list->rbm[i] = tmp; 3609 } 3610 *num_msdus = i; 3611 } 3612 3613 static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id, 3614 u32 *rx_bufs_used) 3615 { 3616 u32 ret = 0; 3617 3618 if ((*ppdu_id < msdu_ppdu_id) && 3619 ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) { 3620 *ppdu_id = msdu_ppdu_id; 3621 ret = msdu_ppdu_id; 3622 } else if ((*ppdu_id > msdu_ppdu_id) && 3623 ((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) { 3624 /* mon_dst is behind than mon_status 3625 * skip dst_ring and free it 3626 */ 3627 *rx_bufs_used += 1; 3628 *ppdu_id = msdu_ppdu_id; 3629 ret = msdu_ppdu_id; 3630 } 3631 return ret; 3632 } 3633 3634 static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info, 3635 bool *is_frag, u32 *total_len, 3636 u32 *frag_len, u32 *msdu_cnt) 3637 { 3638 if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) { 3639 if (!*is_frag) { 3640 *total_len = info->msdu_len; 3641 *is_frag = true; 3642 } 3643 ath11k_dp_mon_set_frag_len(total_len, 3644 frag_len); 3645 } else { 3646 if (*is_frag) { 3647 ath11k_dp_mon_set_frag_len(total_len, 3648 frag_len); 3649 } else { 3650 *frag_len = info->msdu_len; 3651 } 3652 *is_frag = false; 3653 *msdu_cnt -= 1; 3654 } 3655 } 3656 3657 static u32 3658 ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, 3659 void *ring_entry, struct sk_buff **head_msdu, 3660 struct sk_buff **tail_msdu, u32 *npackets, 3661 u32 *ppdu_id) 3662 { 3663 struct ath11k_pdev_dp *dp = &ar->dp; 3664 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 3665 struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring; 3666 struct sk_buff *msdu = NULL, *last = NULL; 3667 struct hal_rx_msdu_list msdu_list; 3668 void *p_buf_addr_info, *p_last_buf_addr_info; 3669 struct hal_rx_desc *rx_desc; 3670 void *rx_msdu_link_desc; 3671 dma_addr_t paddr; 3672 u16 num_msdus = 0; 3673 u32 rx_buf_size, rx_pkt_offset, sw_cookie; 3674 u32 rx_bufs_used = 0, i = 0; 3675 u32 msdu_ppdu_id = 0, msdu_cnt = 0; 3676 u32 total_len = 0, frag_len = 0; 3677 bool is_frag, is_first_msdu; 3678 bool drop_mpdu = false; 3679 struct ath11k_skb_rxcb *rxcb; 3680 struct hal_reo_entrance_ring *ent_desc = 3681 (struct hal_reo_entrance_ring *)ring_entry; 3682 int buf_id; 3683 3684 ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr, 3685 &sw_cookie, &p_last_buf_addr_info, 3686 &msdu_cnt); 3687 3688 if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON, 3689 ent_desc->info1) == 3690 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { 3691 u8 rxdma_err = 3692 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE, 3693 ent_desc->info1); 3694 if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR || 3695 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR || 3696 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) { 3697 drop_mpdu = true; 3698 pmon->rx_mon_stats.dest_mpdu_drop++; 3699 } 3700 } 3701 3702 is_frag = false; 3703 is_first_msdu = true; 3704 3705 do { 3706 if (pmon->mon_last_linkdesc_paddr == paddr) { 3707 pmon->rx_mon_stats.dup_mon_linkdesc_cnt++; 3708 return rx_bufs_used; 3709 } 3710 3711 rx_msdu_link_desc = 3712 (void *)pmon->link_desc_banks[sw_cookie].vaddr + 3713 (paddr - pmon->link_desc_banks[sw_cookie].paddr); 3714 3715 ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list, 3716 &num_msdus); 3717 3718 for (i = 0; i < num_msdus; i++) { 3719 u32 l2_hdr_offset; 3720 3721 if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) { 3722 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 3723 "i %d last_cookie %d is same\n", 3724 i, pmon->mon_last_buf_cookie); 3725 drop_mpdu = true; 3726 pmon->rx_mon_stats.dup_mon_buf_cnt++; 3727 continue; 3728 } 3729 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 3730 msdu_list.sw_cookie[i]); 3731 3732 spin_lock_bh(&rx_ring->idr_lock); 3733 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 3734 spin_unlock_bh(&rx_ring->idr_lock); 3735 if (!msdu) { 3736 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 3737 "msdu_pop: invalid buf_id %d\n", buf_id); 3738 break; 3739 } 3740 rxcb = ATH11K_SKB_RXCB(msdu); 3741 if (!rxcb->unmapped) { 3742 dma_unmap_single(ar->ab->dev, rxcb->paddr, 3743 msdu->len + 3744 skb_tailroom(msdu), 3745 DMA_FROM_DEVICE); 3746 rxcb->unmapped = 1; 3747 } 3748 if (drop_mpdu) { 3749 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 3750 "i %d drop msdu %p *ppdu_id %x\n", 3751 i, msdu, *ppdu_id); 3752 dev_kfree_skb_any(msdu); 3753 msdu = NULL; 3754 goto next_msdu; 3755 } 3756 3757 rx_desc = (struct hal_rx_desc *)msdu->data; 3758 3759 rx_pkt_offset = sizeof(struct hal_rx_desc); 3760 l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(rx_desc); 3761 3762 if (is_first_msdu) { 3763 if (!ath11k_dp_rxdesc_mpdu_valid(rx_desc)) { 3764 drop_mpdu = true; 3765 dev_kfree_skb_any(msdu); 3766 msdu = NULL; 3767 pmon->mon_last_linkdesc_paddr = paddr; 3768 goto next_msdu; 3769 } 3770 3771 msdu_ppdu_id = 3772 ath11k_dp_rxdesc_get_ppduid(rx_desc); 3773 3774 if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id, 3775 ppdu_id, 3776 &rx_bufs_used)) { 3777 if (rx_bufs_used) { 3778 drop_mpdu = true; 3779 dev_kfree_skb_any(msdu); 3780 msdu = NULL; 3781 goto next_msdu; 3782 } 3783 return rx_bufs_used; 3784 } 3785 pmon->mon_last_linkdesc_paddr = paddr; 3786 is_first_msdu = false; 3787 } 3788 ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i], 3789 &is_frag, &total_len, 3790 &frag_len, &msdu_cnt); 3791 rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len; 3792 3793 ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size); 3794 3795 if (!(*head_msdu)) 3796 *head_msdu = msdu; 3797 else if (last) 3798 last->next = msdu; 3799 3800 last = msdu; 3801 next_msdu: 3802 pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i]; 3803 rx_bufs_used++; 3804 spin_lock_bh(&rx_ring->idr_lock); 3805 idr_remove(&rx_ring->bufs_idr, buf_id); 3806 spin_unlock_bh(&rx_ring->idr_lock); 3807 } 3808 3809 ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr, 3810 &sw_cookie, 3811 &p_buf_addr_info); 3812 3813 if (ath11k_dp_rx_monitor_link_desc_return(ar, 3814 p_last_buf_addr_info, 3815 dp->mac_id)) 3816 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 3817 "dp_rx_monitor_link_desc_return failed"); 3818 3819 p_last_buf_addr_info = p_buf_addr_info; 3820 3821 } while (paddr && msdu_cnt); 3822 3823 if (last) 3824 last->next = NULL; 3825 3826 *tail_msdu = msdu; 3827 3828 if (msdu_cnt == 0) 3829 *npackets = 1; 3830 3831 return rx_bufs_used; 3832 } 3833 3834 static void ath11k_dp_rx_msdus_set_payload(struct sk_buff *msdu) 3835 { 3836 u32 rx_pkt_offset, l2_hdr_offset; 3837 3838 rx_pkt_offset = sizeof(struct hal_rx_desc); 3839 l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad((struct hal_rx_desc *)msdu->data); 3840 skb_pull(msdu, rx_pkt_offset + l2_hdr_offset); 3841 } 3842 3843 static struct sk_buff * 3844 ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, 3845 u32 mac_id, struct sk_buff *head_msdu, 3846 struct sk_buff *last_msdu, 3847 struct ieee80211_rx_status *rxs) 3848 { 3849 struct sk_buff *msdu, *mpdu_buf, *prev_buf; 3850 u32 decap_format, wifi_hdr_len; 3851 struct hal_rx_desc *rx_desc; 3852 char *hdr_desc; 3853 u8 *dest; 3854 struct ieee80211_hdr_3addr *wh; 3855 3856 mpdu_buf = NULL; 3857 3858 if (!head_msdu) 3859 goto err_merge_fail; 3860 3861 rx_desc = (struct hal_rx_desc *)head_msdu->data; 3862 3863 if (ath11k_dp_rxdesc_get_mpdulen_err(rx_desc)) 3864 return NULL; 3865 3866 decap_format = ath11k_dp_rxdesc_get_decap_format(rx_desc); 3867 3868 ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs); 3869 3870 if (decap_format == DP_RX_DECAP_TYPE_RAW) { 3871 ath11k_dp_rx_msdus_set_payload(head_msdu); 3872 3873 prev_buf = head_msdu; 3874 msdu = head_msdu->next; 3875 3876 while (msdu) { 3877 ath11k_dp_rx_msdus_set_payload(msdu); 3878 3879 prev_buf = msdu; 3880 msdu = msdu->next; 3881 } 3882 3883 prev_buf->next = NULL; 3884 3885 skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN); 3886 } else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) { 3887 __le16 qos_field; 3888 u8 qos_pkt = 0; 3889 3890 rx_desc = (struct hal_rx_desc *)head_msdu->data; 3891 hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc); 3892 3893 /* Base size */ 3894 wifi_hdr_len = sizeof(struct ieee80211_hdr_3addr); 3895 wh = (struct ieee80211_hdr_3addr *)hdr_desc; 3896 3897 if (ieee80211_is_data_qos(wh->frame_control)) { 3898 struct ieee80211_qos_hdr *qwh = 3899 (struct ieee80211_qos_hdr *)hdr_desc; 3900 3901 qos_field = qwh->qos_ctrl; 3902 qos_pkt = 1; 3903 } 3904 msdu = head_msdu; 3905 3906 while (msdu) { 3907 rx_desc = (struct hal_rx_desc *)msdu->data; 3908 hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc); 3909 3910 if (qos_pkt) { 3911 dest = skb_push(msdu, sizeof(__le16)); 3912 if (!dest) 3913 goto err_merge_fail; 3914 memcpy(dest, hdr_desc, wifi_hdr_len); 3915 memcpy(dest + wifi_hdr_len, 3916 (u8 *)&qos_field, sizeof(__le16)); 3917 } 3918 ath11k_dp_rx_msdus_set_payload(msdu); 3919 prev_buf = msdu; 3920 msdu = msdu->next; 3921 } 3922 dest = skb_put(prev_buf, HAL_RX_FCS_LEN); 3923 if (!dest) 3924 goto err_merge_fail; 3925 3926 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 3927 "mpdu_buf %pK mpdu_buf->len %u", 3928 prev_buf, prev_buf->len); 3929 } else { 3930 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 3931 "decap format %d is not supported!\n", 3932 decap_format); 3933 goto err_merge_fail; 3934 } 3935 3936 return head_msdu; 3937 3938 err_merge_fail: 3939 if (mpdu_buf && decap_format != DP_RX_DECAP_TYPE_RAW) { 3940 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 3941 "err_merge_fail mpdu_buf %pK", mpdu_buf); 3942 /* Free the head buffer */ 3943 dev_kfree_skb_any(mpdu_buf); 3944 } 3945 return NULL; 3946 } 3947 3948 static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id, 3949 struct sk_buff *head_msdu, 3950 struct sk_buff *tail_msdu, 3951 struct napi_struct *napi) 3952 { 3953 struct ath11k_pdev_dp *dp = &ar->dp; 3954 struct sk_buff *mon_skb, *skb_next, *header; 3955 struct ieee80211_rx_status *rxs = &dp->rx_status, *status; 3956 3957 mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu, 3958 tail_msdu, rxs); 3959 3960 if (!mon_skb) 3961 goto mon_deliver_fail; 3962 3963 header = mon_skb; 3964 3965 rxs->flag = 0; 3966 do { 3967 skb_next = mon_skb->next; 3968 if (!skb_next) 3969 rxs->flag &= ~RX_FLAG_AMSDU_MORE; 3970 else 3971 rxs->flag |= RX_FLAG_AMSDU_MORE; 3972 3973 if (mon_skb == header) { 3974 header = NULL; 3975 rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN; 3976 } else { 3977 rxs->flag |= RX_FLAG_ALLOW_SAME_PN; 3978 } 3979 rxs->flag |= RX_FLAG_ONLY_MONITOR; 3980 3981 status = IEEE80211_SKB_RXCB(mon_skb); 3982 *status = *rxs; 3983 3984 ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb); 3985 mon_skb = skb_next; 3986 } while (mon_skb); 3987 rxs->flag = 0; 3988 3989 return 0; 3990 3991 mon_deliver_fail: 3992 mon_skb = head_msdu; 3993 while (mon_skb) { 3994 skb_next = mon_skb->next; 3995 dev_kfree_skb_any(mon_skb); 3996 mon_skb = skb_next; 3997 } 3998 return -EINVAL; 3999 } 4000 4001 static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, u32 quota, 4002 struct napi_struct *napi) 4003 { 4004 struct ath11k_pdev_dp *dp = &ar->dp; 4005 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4006 void *ring_entry; 4007 void *mon_dst_srng; 4008 u32 ppdu_id; 4009 u32 rx_bufs_used; 4010 struct ath11k_pdev_mon_stats *rx_mon_stats; 4011 u32 npackets = 0; 4012 4013 mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id]; 4014 4015 if (!mon_dst_srng) { 4016 ath11k_warn(ar->ab, 4017 "HAL Monitor Destination Ring Init Failed -- %pK", 4018 mon_dst_srng); 4019 return; 4020 } 4021 4022 spin_lock_bh(&pmon->mon_lock); 4023 4024 ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng); 4025 4026 ppdu_id = pmon->mon_ppdu_info.ppdu_id; 4027 rx_bufs_used = 0; 4028 rx_mon_stats = &pmon->rx_mon_stats; 4029 4030 while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) { 4031 struct sk_buff *head_msdu, *tail_msdu; 4032 4033 head_msdu = NULL; 4034 tail_msdu = NULL; 4035 4036 rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, ring_entry, 4037 &head_msdu, 4038 &tail_msdu, 4039 &npackets, &ppdu_id); 4040 4041 if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) { 4042 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 4043 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4044 "dest_rx: new ppdu_id %x != status ppdu_id %x", 4045 ppdu_id, pmon->mon_ppdu_info.ppdu_id); 4046 break; 4047 } 4048 if (head_msdu && tail_msdu) { 4049 ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu, 4050 tail_msdu, napi); 4051 rx_mon_stats->dest_mpdu_done++; 4052 } 4053 4054 ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab, 4055 mon_dst_srng); 4056 } 4057 ath11k_hal_srng_access_end(ar->ab, mon_dst_srng); 4058 4059 spin_unlock_bh(&pmon->mon_lock); 4060 4061 if (rx_bufs_used) { 4062 rx_mon_stats->dest_ppdu_done++; 4063 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, 4064 &dp->rxdma_mon_buf_ring, 4065 rx_bufs_used, 4066 HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC); 4067 } 4068 } 4069 4070 static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar, 4071 u32 quota, 4072 struct napi_struct *napi) 4073 { 4074 struct ath11k_pdev_dp *dp = &ar->dp; 4075 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4076 struct hal_rx_mon_ppdu_info *ppdu_info; 4077 struct sk_buff *status_skb; 4078 u32 tlv_status = HAL_TLV_STATUS_BUF_DONE; 4079 struct ath11k_pdev_mon_stats *rx_mon_stats; 4080 4081 ppdu_info = &pmon->mon_ppdu_info; 4082 rx_mon_stats = &pmon->rx_mon_stats; 4083 4084 if (pmon->mon_ppdu_status != DP_PPDU_STATUS_START) 4085 return; 4086 4087 while (!skb_queue_empty(&pmon->rx_status_q)) { 4088 status_skb = skb_dequeue(&pmon->rx_status_q); 4089 4090 tlv_status = ath11k_hal_rx_parse_mon_status(ar->ab, ppdu_info, 4091 status_skb); 4092 if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) { 4093 rx_mon_stats->status_ppdu_done++; 4094 pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE; 4095 ath11k_dp_rx_mon_dest_process(ar, quota, napi); 4096 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 4097 } 4098 dev_kfree_skb_any(status_skb); 4099 } 4100 } 4101 4102 static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id, 4103 struct napi_struct *napi, int budget) 4104 { 4105 struct ath11k *ar = ab->pdevs[mac_id].ar; 4106 struct ath11k_pdev_dp *dp = &ar->dp; 4107 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4108 int num_buffs_reaped = 0; 4109 4110 num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ar->ab, dp->mac_id, &budget, 4111 &pmon->rx_status_q); 4112 if (num_buffs_reaped) 4113 ath11k_dp_rx_mon_status_process_tlv(ar, budget, napi); 4114 4115 return num_buffs_reaped; 4116 } 4117 4118 int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id, 4119 struct napi_struct *napi, int budget) 4120 { 4121 struct ath11k *ar = ab->pdevs[mac_id].ar; 4122 int ret = 0; 4123 4124 if (test_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags)) 4125 ret = ath11k_dp_mon_process_rx(ab, mac_id, napi, budget); 4126 else 4127 ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget); 4128 return ret; 4129 } 4130 4131 static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar) 4132 { 4133 struct ath11k_pdev_dp *dp = &ar->dp; 4134 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4135 4136 skb_queue_head_init(&pmon->rx_status_q); 4137 4138 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 4139 4140 memset(&pmon->rx_mon_stats, 0, 4141 sizeof(pmon->rx_mon_stats)); 4142 return 0; 4143 } 4144 4145 int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar) 4146 { 4147 struct ath11k_pdev_dp *dp = &ar->dp; 4148 struct ath11k_mon_data *pmon = &dp->mon_data; 4149 struct hal_srng *mon_desc_srng = NULL; 4150 struct dp_srng *dp_srng; 4151 int ret = 0; 4152 u32 n_link_desc = 0; 4153 4154 ret = ath11k_dp_rx_pdev_mon_status_attach(ar); 4155 if (ret) { 4156 ath11k_warn(ar->ab, "pdev_mon_status_attach() failed"); 4157 return ret; 4158 } 4159 4160 dp_srng = &dp->rxdma_mon_desc_ring; 4161 n_link_desc = dp_srng->size / 4162 ath11k_hal_srng_get_entrysize(HAL_RXDMA_MONITOR_DESC); 4163 mon_desc_srng = 4164 &ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id]; 4165 4166 ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks, 4167 HAL_RXDMA_MONITOR_DESC, mon_desc_srng, 4168 n_link_desc); 4169 if (ret) { 4170 ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed"); 4171 return ret; 4172 } 4173 pmon->mon_last_linkdesc_paddr = 0; 4174 pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1; 4175 spin_lock_init(&pmon->mon_lock); 4176 return 0; 4177 } 4178 4179 static int ath11k_dp_mon_link_free(struct ath11k *ar) 4180 { 4181 struct ath11k_pdev_dp *dp = &ar->dp; 4182 struct ath11k_mon_data *pmon = &dp->mon_data; 4183 4184 ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks, 4185 HAL_RXDMA_MONITOR_DESC, 4186 &dp->rxdma_mon_desc_ring); 4187 return 0; 4188 } 4189 4190 int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar) 4191 { 4192 ath11k_dp_mon_link_free(ar); 4193 return 0; 4194 } 4195