1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/ieee80211.h> 7 #include <linux/kernel.h> 8 #include <linux/skbuff.h> 9 #include <crypto/hash.h> 10 #include "core.h" 11 #include "debug.h" 12 #include "debugfs_htt_stats.h" 13 #include "debugfs_sta.h" 14 #include "hal_desc.h" 15 #include "hw.h" 16 #include "dp_rx.h" 17 #include "hal_rx.h" 18 #include "dp_tx.h" 19 #include "peer.h" 20 21 #define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ) 22 23 static u8 *ath11k_dp_rx_h_80211_hdr(struct ath11k_base *ab, struct hal_rx_desc *desc) 24 { 25 return ab->hw_params.hw_ops->rx_desc_get_hdr_status(desc); 26 } 27 28 static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_base *ab, 29 struct hal_rx_desc *desc) 30 { 31 if (!ab->hw_params.hw_ops->rx_desc_encrypt_valid(desc)) 32 return HAL_ENCRYPT_TYPE_OPEN; 33 34 return ab->hw_params.hw_ops->rx_desc_get_encrypt_type(desc); 35 } 36 37 static u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab, 38 struct hal_rx_desc *desc) 39 { 40 return ab->hw_params.hw_ops->rx_desc_get_decap_type(desc); 41 } 42 43 static u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab, 44 struct hal_rx_desc *desc) 45 { 46 return ab->hw_params.hw_ops->rx_desc_get_mesh_ctl(desc); 47 } 48 49 static bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct ath11k_base *ab, 50 struct hal_rx_desc *desc) 51 { 52 return ab->hw_params.hw_ops->rx_desc_get_mpdu_seq_ctl_vld(desc); 53 } 54 55 static bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct ath11k_base *ab, 56 struct hal_rx_desc *desc) 57 { 58 return ab->hw_params.hw_ops->rx_desc_get_mpdu_fc_valid(desc); 59 } 60 61 static bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab, 62 struct sk_buff *skb) 63 { 64 struct ieee80211_hdr *hdr; 65 66 hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz); 67 return ieee80211_has_morefrags(hdr->frame_control); 68 } 69 70 static u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab, 71 struct sk_buff *skb) 72 { 73 struct ieee80211_hdr *hdr; 74 75 hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz); 76 return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; 77 } 78 79 static u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct ath11k_base *ab, 80 struct hal_rx_desc *desc) 81 { 82 return ab->hw_params.hw_ops->rx_desc_get_mpdu_start_seq_no(desc); 83 } 84 85 static void *ath11k_dp_rx_get_attention(struct ath11k_base *ab, 86 struct hal_rx_desc *desc) 87 { 88 return ab->hw_params.hw_ops->rx_desc_get_attention(desc); 89 } 90 91 static bool ath11k_dp_rx_h_attn_msdu_done(struct rx_attention *attn) 92 { 93 return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE, 94 __le32_to_cpu(attn->info2)); 95 } 96 97 static bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct rx_attention *attn) 98 { 99 return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL, 100 __le32_to_cpu(attn->info1)); 101 } 102 103 static bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct rx_attention *attn) 104 { 105 return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL, 106 __le32_to_cpu(attn->info1)); 107 } 108 109 static bool ath11k_dp_rx_h_attn_is_decrypted(struct rx_attention *attn) 110 { 111 return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE, 112 __le32_to_cpu(attn->info2)) == 113 RX_DESC_DECRYPT_STATUS_CODE_OK); 114 } 115 116 static u32 ath11k_dp_rx_h_attn_mpdu_err(struct rx_attention *attn) 117 { 118 u32 info = __le32_to_cpu(attn->info1); 119 u32 errmap = 0; 120 121 if (info & RX_ATTENTION_INFO1_FCS_ERR) 122 errmap |= DP_RX_MPDU_ERR_FCS; 123 124 if (info & RX_ATTENTION_INFO1_DECRYPT_ERR) 125 errmap |= DP_RX_MPDU_ERR_DECRYPT; 126 127 if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR) 128 errmap |= DP_RX_MPDU_ERR_TKIP_MIC; 129 130 if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR) 131 errmap |= DP_RX_MPDU_ERR_AMSDU_ERR; 132 133 if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR) 134 errmap |= DP_RX_MPDU_ERR_OVERFLOW; 135 136 if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR) 137 errmap |= DP_RX_MPDU_ERR_MSDU_LEN; 138 139 if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR) 140 errmap |= DP_RX_MPDU_ERR_MPDU_LEN; 141 142 return errmap; 143 } 144 145 static u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab, 146 struct hal_rx_desc *desc) 147 { 148 return ab->hw_params.hw_ops->rx_desc_get_msdu_len(desc); 149 } 150 151 static u8 ath11k_dp_rx_h_msdu_start_sgi(struct ath11k_base *ab, 152 struct hal_rx_desc *desc) 153 { 154 return ab->hw_params.hw_ops->rx_desc_get_msdu_sgi(desc); 155 } 156 157 static u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct ath11k_base *ab, 158 struct hal_rx_desc *desc) 159 { 160 return ab->hw_params.hw_ops->rx_desc_get_msdu_rate_mcs(desc); 161 } 162 163 static u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct ath11k_base *ab, 164 struct hal_rx_desc *desc) 165 { 166 return ab->hw_params.hw_ops->rx_desc_get_msdu_rx_bw(desc); 167 } 168 169 static u32 ath11k_dp_rx_h_msdu_start_freq(struct ath11k_base *ab, 170 struct hal_rx_desc *desc) 171 { 172 return ab->hw_params.hw_ops->rx_desc_get_msdu_freq(desc); 173 } 174 175 static u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct ath11k_base *ab, 176 struct hal_rx_desc *desc) 177 { 178 return ab->hw_params.hw_ops->rx_desc_get_msdu_pkt_type(desc); 179 } 180 181 static u8 ath11k_dp_rx_h_msdu_start_nss(struct ath11k_base *ab, 182 struct hal_rx_desc *desc) 183 { 184 return hweight8(ab->hw_params.hw_ops->rx_desc_get_msdu_nss(desc)); 185 } 186 187 static u8 ath11k_dp_rx_h_mpdu_start_tid(struct ath11k_base *ab, 188 struct hal_rx_desc *desc) 189 { 190 return ab->hw_params.hw_ops->rx_desc_get_mpdu_tid(desc); 191 } 192 193 static u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct ath11k_base *ab, 194 struct hal_rx_desc *desc) 195 { 196 return ab->hw_params.hw_ops->rx_desc_get_mpdu_peer_id(desc); 197 } 198 199 static u8 ath11k_dp_rx_h_msdu_end_l3pad(struct ath11k_base *ab, 200 struct hal_rx_desc *desc) 201 { 202 return ab->hw_params.hw_ops->rx_desc_get_l3_pad_bytes(desc); 203 } 204 205 static bool ath11k_dp_rx_h_msdu_end_first_msdu(struct ath11k_base *ab, 206 struct hal_rx_desc *desc) 207 { 208 return ab->hw_params.hw_ops->rx_desc_get_first_msdu(desc); 209 } 210 211 static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct ath11k_base *ab, 212 struct hal_rx_desc *desc) 213 { 214 return ab->hw_params.hw_ops->rx_desc_get_last_msdu(desc); 215 } 216 217 static void ath11k_dp_rx_desc_end_tlv_copy(struct ath11k_base *ab, 218 struct hal_rx_desc *fdesc, 219 struct hal_rx_desc *ldesc) 220 { 221 ab->hw_params.hw_ops->rx_desc_copy_attn_end_tlv(fdesc, ldesc); 222 } 223 224 static u32 ath11k_dp_rxdesc_get_mpdulen_err(struct rx_attention *attn) 225 { 226 return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR, 227 __le32_to_cpu(attn->info1)); 228 } 229 230 static u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab, 231 struct hal_rx_desc *rx_desc) 232 { 233 u8 *rx_pkt_hdr; 234 235 rx_pkt_hdr = ab->hw_params.hw_ops->rx_desc_get_msdu_payload(rx_desc); 236 237 return rx_pkt_hdr; 238 } 239 240 static bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab, 241 struct hal_rx_desc *rx_desc) 242 { 243 u32 tlv_tag; 244 245 tlv_tag = ab->hw_params.hw_ops->rx_desc_get_mpdu_start_tag(rx_desc); 246 247 return tlv_tag == HAL_RX_MPDU_START; 248 } 249 250 static u32 ath11k_dp_rxdesc_get_ppduid(struct ath11k_base *ab, 251 struct hal_rx_desc *rx_desc) 252 { 253 return ab->hw_params.hw_ops->rx_desc_get_mpdu_ppdu_id(rx_desc); 254 } 255 256 static void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab, 257 struct hal_rx_desc *desc, 258 u16 len) 259 { 260 ab->hw_params.hw_ops->rx_desc_set_msdu_len(desc, len); 261 } 262 263 static void ath11k_dp_service_mon_ring(struct timer_list *t) 264 { 265 struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer); 266 int i; 267 268 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) 269 ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET); 270 271 mod_timer(&ab->mon_reap_timer, jiffies + 272 msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL)); 273 } 274 275 static int ath11k_dp_purge_mon_ring(struct ath11k_base *ab) 276 { 277 int i, reaped = 0; 278 unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS); 279 280 do { 281 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) 282 reaped += ath11k_dp_rx_process_mon_rings(ab, i, 283 NULL, 284 DP_MON_SERVICE_BUDGET); 285 286 /* nothing more to reap */ 287 if (reaped < DP_MON_SERVICE_BUDGET) 288 return 0; 289 290 } while (time_before(jiffies, timeout)); 291 292 ath11k_warn(ab, "dp mon ring purge timeout"); 293 294 return -ETIMEDOUT; 295 } 296 297 /* Returns number of Rx buffers replenished */ 298 int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id, 299 struct dp_rxdma_ring *rx_ring, 300 int req_entries, 301 enum hal_rx_buf_return_buf_manager mgr) 302 { 303 struct hal_srng *srng; 304 u32 *desc; 305 struct sk_buff *skb; 306 int num_free; 307 int num_remain; 308 int buf_id; 309 u32 cookie; 310 dma_addr_t paddr; 311 312 req_entries = min(req_entries, rx_ring->bufs_max); 313 314 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 315 316 spin_lock_bh(&srng->lock); 317 318 ath11k_hal_srng_access_begin(ab, srng); 319 320 num_free = ath11k_hal_srng_src_num_free(ab, srng, true); 321 if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4)) 322 req_entries = num_free; 323 324 req_entries = min(num_free, req_entries); 325 num_remain = req_entries; 326 327 while (num_remain > 0) { 328 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + 329 DP_RX_BUFFER_ALIGN_SIZE); 330 if (!skb) 331 break; 332 333 if (!IS_ALIGNED((unsigned long)skb->data, 334 DP_RX_BUFFER_ALIGN_SIZE)) { 335 skb_pull(skb, 336 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - 337 skb->data); 338 } 339 340 paddr = dma_map_single(ab->dev, skb->data, 341 skb->len + skb_tailroom(skb), 342 DMA_FROM_DEVICE); 343 if (dma_mapping_error(ab->dev, paddr)) 344 goto fail_free_skb; 345 346 spin_lock_bh(&rx_ring->idr_lock); 347 buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, 348 rx_ring->bufs_max * 3, GFP_ATOMIC); 349 spin_unlock_bh(&rx_ring->idr_lock); 350 if (buf_id < 0) 351 goto fail_dma_unmap; 352 353 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 354 if (!desc) 355 goto fail_idr_remove; 356 357 ATH11K_SKB_RXCB(skb)->paddr = paddr; 358 359 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 360 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 361 362 num_remain--; 363 364 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); 365 } 366 367 ath11k_hal_srng_access_end(ab, srng); 368 369 spin_unlock_bh(&srng->lock); 370 371 return req_entries - num_remain; 372 373 fail_idr_remove: 374 spin_lock_bh(&rx_ring->idr_lock); 375 idr_remove(&rx_ring->bufs_idr, buf_id); 376 spin_unlock_bh(&rx_ring->idr_lock); 377 fail_dma_unmap: 378 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 379 DMA_FROM_DEVICE); 380 fail_free_skb: 381 dev_kfree_skb_any(skb); 382 383 ath11k_hal_srng_access_end(ab, srng); 384 385 spin_unlock_bh(&srng->lock); 386 387 return req_entries - num_remain; 388 } 389 390 static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar, 391 struct dp_rxdma_ring *rx_ring) 392 { 393 struct ath11k_pdev_dp *dp = &ar->dp; 394 struct sk_buff *skb; 395 int buf_id; 396 397 spin_lock_bh(&rx_ring->idr_lock); 398 idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { 399 idr_remove(&rx_ring->bufs_idr, buf_id); 400 /* TODO: Understand where internal driver does this dma_unmap 401 * of rxdma_buffer. 402 */ 403 dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr, 404 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); 405 dev_kfree_skb_any(skb); 406 } 407 408 idr_destroy(&rx_ring->bufs_idr); 409 spin_unlock_bh(&rx_ring->idr_lock); 410 411 /* if rxdma1_enable is false, mon_status_refill_ring 412 * isn't setup, so don't clean. 413 */ 414 if (!ar->ab->hw_params.rxdma1_enable) 415 return 0; 416 417 rx_ring = &dp->rx_mon_status_refill_ring[0]; 418 419 spin_lock_bh(&rx_ring->idr_lock); 420 idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { 421 idr_remove(&rx_ring->bufs_idr, buf_id); 422 /* XXX: Understand where internal driver does this dma_unmap 423 * of rxdma_buffer. 424 */ 425 dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr, 426 skb->len + skb_tailroom(skb), DMA_BIDIRECTIONAL); 427 dev_kfree_skb_any(skb); 428 } 429 430 idr_destroy(&rx_ring->bufs_idr); 431 spin_unlock_bh(&rx_ring->idr_lock); 432 433 return 0; 434 } 435 436 static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar) 437 { 438 struct ath11k_pdev_dp *dp = &ar->dp; 439 struct ath11k_base *ab = ar->ab; 440 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 441 int i; 442 443 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 444 445 rx_ring = &dp->rxdma_mon_buf_ring; 446 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 447 448 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 449 rx_ring = &dp->rx_mon_status_refill_ring[i]; 450 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 451 } 452 453 return 0; 454 } 455 456 static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar, 457 struct dp_rxdma_ring *rx_ring, 458 u32 ringtype) 459 { 460 struct ath11k_pdev_dp *dp = &ar->dp; 461 int num_entries; 462 463 num_entries = rx_ring->refill_buf_ring.size / 464 ath11k_hal_srng_get_entrysize(ar->ab, ringtype); 465 466 rx_ring->bufs_max = num_entries; 467 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries, 468 HAL_RX_BUF_RBM_SW3_BM); 469 return 0; 470 } 471 472 static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar) 473 { 474 struct ath11k_pdev_dp *dp = &ar->dp; 475 struct ath11k_base *ab = ar->ab; 476 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 477 int i; 478 479 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF); 480 481 if (ar->ab->hw_params.rxdma1_enable) { 482 rx_ring = &dp->rxdma_mon_buf_ring; 483 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF); 484 } 485 486 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 487 rx_ring = &dp->rx_mon_status_refill_ring[i]; 488 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS); 489 } 490 491 return 0; 492 } 493 494 static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar) 495 { 496 struct ath11k_pdev_dp *dp = &ar->dp; 497 struct ath11k_base *ab = ar->ab; 498 int i; 499 500 ath11k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring); 501 502 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 503 if (ab->hw_params.rx_mac_buf_ring) 504 ath11k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]); 505 506 ath11k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]); 507 ath11k_dp_srng_cleanup(ab, 508 &dp->rx_mon_status_refill_ring[i].refill_buf_ring); 509 } 510 511 ath11k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring); 512 } 513 514 void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab) 515 { 516 struct ath11k_dp *dp = &ab->dp; 517 int i; 518 519 for (i = 0; i < DP_REO_DST_RING_MAX; i++) 520 ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]); 521 } 522 523 int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab) 524 { 525 struct ath11k_dp *dp = &ab->dp; 526 int ret; 527 int i; 528 529 for (i = 0; i < DP_REO_DST_RING_MAX; i++) { 530 ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring[i], 531 HAL_REO_DST, i, 0, 532 DP_REO_DST_RING_SIZE); 533 if (ret) { 534 ath11k_warn(ab, "failed to setup reo_dst_ring\n"); 535 goto err_reo_cleanup; 536 } 537 } 538 539 return 0; 540 541 err_reo_cleanup: 542 ath11k_dp_pdev_reo_cleanup(ab); 543 544 return ret; 545 } 546 547 static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar) 548 { 549 struct ath11k_pdev_dp *dp = &ar->dp; 550 struct ath11k_base *ab = ar->ab; 551 struct dp_srng *srng = NULL; 552 int i; 553 int ret; 554 555 ret = ath11k_dp_srng_setup(ar->ab, 556 &dp->rx_refill_buf_ring.refill_buf_ring, 557 HAL_RXDMA_BUF, 0, 558 dp->mac_id, DP_RXDMA_BUF_RING_SIZE); 559 if (ret) { 560 ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n"); 561 return ret; 562 } 563 564 if (ar->ab->hw_params.rx_mac_buf_ring) { 565 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 566 ret = ath11k_dp_srng_setup(ar->ab, 567 &dp->rx_mac_buf_ring[i], 568 HAL_RXDMA_BUF, 1, 569 dp->mac_id + i, 1024); 570 if (ret) { 571 ath11k_warn(ar->ab, "failed to setup rx_mac_buf_ring %d\n", 572 i); 573 return ret; 574 } 575 } 576 } 577 578 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 579 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring[i], 580 HAL_RXDMA_DST, 0, dp->mac_id + i, 581 DP_RXDMA_ERR_DST_RING_SIZE); 582 if (ret) { 583 ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring %d\n", i); 584 return ret; 585 } 586 } 587 588 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 589 srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring; 590 ret = ath11k_dp_srng_setup(ar->ab, 591 srng, 592 HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id + i, 593 DP_RXDMA_MON_STATUS_RING_SIZE); 594 if (ret) { 595 ath11k_warn(ar->ab, 596 "failed to setup rx_mon_status_refill_ring %d\n", i); 597 return ret; 598 } 599 } 600 601 /* if rxdma1_enable is false, then it doesn't need 602 * to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring 603 * and rxdma_mon_desc_ring. 604 * init reap timer for QCA6390. 605 */ 606 if (!ar->ab->hw_params.rxdma1_enable) { 607 //init mon status buffer reap timer 608 timer_setup(&ar->ab->mon_reap_timer, 609 ath11k_dp_service_mon_ring, 0); 610 return 0; 611 } 612 613 ret = ath11k_dp_srng_setup(ar->ab, 614 &dp->rxdma_mon_buf_ring.refill_buf_ring, 615 HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id, 616 DP_RXDMA_MONITOR_BUF_RING_SIZE); 617 if (ret) { 618 ath11k_warn(ar->ab, 619 "failed to setup HAL_RXDMA_MONITOR_BUF\n"); 620 return ret; 621 } 622 623 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring, 624 HAL_RXDMA_MONITOR_DST, 0, dp->mac_id, 625 DP_RXDMA_MONITOR_DST_RING_SIZE); 626 if (ret) { 627 ath11k_warn(ar->ab, 628 "failed to setup HAL_RXDMA_MONITOR_DST\n"); 629 return ret; 630 } 631 632 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring, 633 HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id, 634 DP_RXDMA_MONITOR_DESC_RING_SIZE); 635 if (ret) { 636 ath11k_warn(ar->ab, 637 "failed to setup HAL_RXDMA_MONITOR_DESC\n"); 638 return ret; 639 } 640 641 return 0; 642 } 643 644 void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab) 645 { 646 struct ath11k_dp *dp = &ab->dp; 647 struct dp_reo_cmd *cmd, *tmp; 648 struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache; 649 650 spin_lock_bh(&dp->reo_cmd_lock); 651 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 652 list_del(&cmd->list); 653 dma_unmap_single(ab->dev, cmd->data.paddr, 654 cmd->data.size, DMA_BIDIRECTIONAL); 655 kfree(cmd->data.vaddr); 656 kfree(cmd); 657 } 658 659 list_for_each_entry_safe(cmd_cache, tmp_cache, 660 &dp->reo_cmd_cache_flush_list, list) { 661 list_del(&cmd_cache->list); 662 dp->reo_cmd_cache_flush_count--; 663 dma_unmap_single(ab->dev, cmd_cache->data.paddr, 664 cmd_cache->data.size, DMA_BIDIRECTIONAL); 665 kfree(cmd_cache->data.vaddr); 666 kfree(cmd_cache); 667 } 668 spin_unlock_bh(&dp->reo_cmd_lock); 669 } 670 671 static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx, 672 enum hal_reo_cmd_status status) 673 { 674 struct dp_rx_tid *rx_tid = ctx; 675 676 if (status != HAL_REO_CMD_SUCCESS) 677 ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n", 678 rx_tid->tid, status); 679 680 dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size, 681 DMA_BIDIRECTIONAL); 682 kfree(rx_tid->vaddr); 683 } 684 685 static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab, 686 struct dp_rx_tid *rx_tid) 687 { 688 struct ath11k_hal_reo_cmd cmd = {0}; 689 unsigned long tot_desc_sz, desc_sz; 690 int ret; 691 692 tot_desc_sz = rx_tid->size; 693 desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID); 694 695 while (tot_desc_sz > desc_sz) { 696 tot_desc_sz -= desc_sz; 697 cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz); 698 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 699 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, 700 HAL_REO_CMD_FLUSH_CACHE, &cmd, 701 NULL); 702 if (ret) 703 ath11k_warn(ab, 704 "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n", 705 rx_tid->tid, ret); 706 } 707 708 memset(&cmd, 0, sizeof(cmd)); 709 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 710 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 711 cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS; 712 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, 713 HAL_REO_CMD_FLUSH_CACHE, 714 &cmd, ath11k_dp_reo_cmd_free); 715 if (ret) { 716 ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n", 717 rx_tid->tid, ret); 718 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 719 DMA_BIDIRECTIONAL); 720 kfree(rx_tid->vaddr); 721 } 722 } 723 724 static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx, 725 enum hal_reo_cmd_status status) 726 { 727 struct ath11k_base *ab = dp->ab; 728 struct dp_rx_tid *rx_tid = ctx; 729 struct dp_reo_cache_flush_elem *elem, *tmp; 730 731 if (status == HAL_REO_CMD_DRAIN) { 732 goto free_desc; 733 } else if (status != HAL_REO_CMD_SUCCESS) { 734 /* Shouldn't happen! Cleanup in case of other failure? */ 735 ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n", 736 rx_tid->tid, status); 737 return; 738 } 739 740 elem = kzalloc(sizeof(*elem), GFP_ATOMIC); 741 if (!elem) 742 goto free_desc; 743 744 elem->ts = jiffies; 745 memcpy(&elem->data, rx_tid, sizeof(*rx_tid)); 746 747 spin_lock_bh(&dp->reo_cmd_lock); 748 list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list); 749 dp->reo_cmd_cache_flush_count++; 750 751 /* Flush and invalidate aged REO desc from HW cache */ 752 list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list, 753 list) { 754 if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD || 755 time_after(jiffies, elem->ts + 756 msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) { 757 list_del(&elem->list); 758 dp->reo_cmd_cache_flush_count--; 759 spin_unlock_bh(&dp->reo_cmd_lock); 760 761 ath11k_dp_reo_cache_flush(ab, &elem->data); 762 kfree(elem); 763 spin_lock_bh(&dp->reo_cmd_lock); 764 } 765 } 766 spin_unlock_bh(&dp->reo_cmd_lock); 767 768 return; 769 free_desc: 770 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 771 DMA_BIDIRECTIONAL); 772 kfree(rx_tid->vaddr); 773 } 774 775 void ath11k_peer_rx_tid_delete(struct ath11k *ar, 776 struct ath11k_peer *peer, u8 tid) 777 { 778 struct ath11k_hal_reo_cmd cmd = {0}; 779 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 780 int ret; 781 782 if (!rx_tid->active) 783 return; 784 785 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 786 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 787 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 788 cmd.upd0 |= HAL_REO_CMD_UPD0_VLD; 789 ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid, 790 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 791 ath11k_dp_rx_tid_del_func); 792 if (ret) { 793 ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n", 794 tid, ret); 795 dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size, 796 DMA_BIDIRECTIONAL); 797 kfree(rx_tid->vaddr); 798 } 799 800 rx_tid->active = false; 801 } 802 803 static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab, 804 u32 *link_desc, 805 enum hal_wbm_rel_bm_act action) 806 { 807 struct ath11k_dp *dp = &ab->dp; 808 struct hal_srng *srng; 809 u32 *desc; 810 int ret = 0; 811 812 srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id]; 813 814 spin_lock_bh(&srng->lock); 815 816 ath11k_hal_srng_access_begin(ab, srng); 817 818 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 819 if (!desc) { 820 ret = -ENOBUFS; 821 goto exit; 822 } 823 824 ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc, 825 action); 826 827 exit: 828 ath11k_hal_srng_access_end(ab, srng); 829 830 spin_unlock_bh(&srng->lock); 831 832 return ret; 833 } 834 835 static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_desc) 836 { 837 struct ath11k_base *ab = rx_tid->ab; 838 839 lockdep_assert_held(&ab->base_lock); 840 841 if (rx_tid->dst_ring_desc) { 842 if (rel_link_desc) 843 ath11k_dp_rx_link_desc_return(ab, (u32 *)rx_tid->dst_ring_desc, 844 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 845 kfree(rx_tid->dst_ring_desc); 846 rx_tid->dst_ring_desc = NULL; 847 } 848 849 rx_tid->cur_sn = 0; 850 rx_tid->last_frag_no = 0; 851 rx_tid->rx_frag_bitmap = 0; 852 __skb_queue_purge(&rx_tid->rx_frags); 853 } 854 855 void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer) 856 { 857 struct dp_rx_tid *rx_tid; 858 int i; 859 860 lockdep_assert_held(&ar->ab->base_lock); 861 862 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 863 rx_tid = &peer->rx_tid[i]; 864 865 ath11k_peer_rx_tid_delete(ar, peer, i); 866 ath11k_dp_rx_frags_cleanup(rx_tid, true); 867 868 spin_unlock_bh(&ar->ab->base_lock); 869 del_timer_sync(&rx_tid->frag_timer); 870 spin_lock_bh(&ar->ab->base_lock); 871 } 872 } 873 874 static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar, 875 struct ath11k_peer *peer, 876 struct dp_rx_tid *rx_tid, 877 u32 ba_win_sz, u16 ssn, 878 bool update_ssn) 879 { 880 struct ath11k_hal_reo_cmd cmd = {0}; 881 int ret; 882 883 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 884 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 885 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 886 cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE; 887 cmd.ba_window_size = ba_win_sz; 888 889 if (update_ssn) { 890 cmd.upd0 |= HAL_REO_CMD_UPD0_SSN; 891 cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn); 892 } 893 894 ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid, 895 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 896 NULL); 897 if (ret) { 898 ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n", 899 rx_tid->tid, ret); 900 return ret; 901 } 902 903 rx_tid->ba_win_sz = ba_win_sz; 904 905 return 0; 906 } 907 908 static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab, 909 const u8 *peer_mac, int vdev_id, u8 tid) 910 { 911 struct ath11k_peer *peer; 912 struct dp_rx_tid *rx_tid; 913 914 spin_lock_bh(&ab->base_lock); 915 916 peer = ath11k_peer_find(ab, vdev_id, peer_mac); 917 if (!peer) { 918 ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n"); 919 goto unlock_exit; 920 } 921 922 rx_tid = &peer->rx_tid[tid]; 923 if (!rx_tid->active) 924 goto unlock_exit; 925 926 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 927 DMA_BIDIRECTIONAL); 928 kfree(rx_tid->vaddr); 929 930 rx_tid->active = false; 931 932 unlock_exit: 933 spin_unlock_bh(&ab->base_lock); 934 } 935 936 int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id, 937 u8 tid, u32 ba_win_sz, u16 ssn, 938 enum hal_pn_type pn_type) 939 { 940 struct ath11k_base *ab = ar->ab; 941 struct ath11k_peer *peer; 942 struct dp_rx_tid *rx_tid; 943 u32 hw_desc_sz; 944 u32 *addr_aligned; 945 void *vaddr; 946 dma_addr_t paddr; 947 int ret; 948 949 spin_lock_bh(&ab->base_lock); 950 951 peer = ath11k_peer_find(ab, vdev_id, peer_mac); 952 if (!peer) { 953 ath11k_warn(ab, "failed to find the peer to set up rx tid\n"); 954 spin_unlock_bh(&ab->base_lock); 955 return -ENOENT; 956 } 957 958 rx_tid = &peer->rx_tid[tid]; 959 /* Update the tid queue if it is already setup */ 960 if (rx_tid->active) { 961 paddr = rx_tid->paddr; 962 ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid, 963 ba_win_sz, ssn, true); 964 spin_unlock_bh(&ab->base_lock); 965 if (ret) { 966 ath11k_warn(ab, "failed to update reo for rx tid %d\n", tid); 967 return ret; 968 } 969 970 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, 971 peer_mac, paddr, 972 tid, 1, ba_win_sz); 973 if (ret) 974 ath11k_warn(ab, "failed to send wmi command to update rx reorder queue, tid :%d (%d)\n", 975 tid, ret); 976 return ret; 977 } 978 979 rx_tid->tid = tid; 980 981 rx_tid->ba_win_sz = ba_win_sz; 982 983 /* TODO: Optimize the memory allocation for qos tid based on 984 * the actual BA window size in REO tid update path. 985 */ 986 if (tid == HAL_DESC_REO_NON_QOS_TID) 987 hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid); 988 else 989 hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid); 990 991 vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC); 992 if (!vaddr) { 993 spin_unlock_bh(&ab->base_lock); 994 return -ENOMEM; 995 } 996 997 addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN); 998 999 ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz, 1000 ssn, pn_type); 1001 1002 paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz, 1003 DMA_BIDIRECTIONAL); 1004 1005 ret = dma_mapping_error(ab->dev, paddr); 1006 if (ret) { 1007 spin_unlock_bh(&ab->base_lock); 1008 goto err_mem_free; 1009 } 1010 1011 rx_tid->vaddr = vaddr; 1012 rx_tid->paddr = paddr; 1013 rx_tid->size = hw_desc_sz; 1014 rx_tid->active = true; 1015 1016 spin_unlock_bh(&ab->base_lock); 1017 1018 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, 1019 paddr, tid, 1, ba_win_sz); 1020 if (ret) { 1021 ath11k_warn(ar->ab, "failed to setup rx reorder queue, tid :%d (%d)\n", 1022 tid, ret); 1023 ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid); 1024 } 1025 1026 return ret; 1027 1028 err_mem_free: 1029 kfree(vaddr); 1030 1031 return ret; 1032 } 1033 1034 int ath11k_dp_rx_ampdu_start(struct ath11k *ar, 1035 struct ieee80211_ampdu_params *params) 1036 { 1037 struct ath11k_base *ab = ar->ab; 1038 struct ath11k_sta *arsta = (void *)params->sta->drv_priv; 1039 int vdev_id = arsta->arvif->vdev_id; 1040 int ret; 1041 1042 ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id, 1043 params->tid, params->buf_size, 1044 params->ssn, arsta->pn_type); 1045 if (ret) 1046 ath11k_warn(ab, "failed to setup rx tid %d\n", ret); 1047 1048 return ret; 1049 } 1050 1051 int ath11k_dp_rx_ampdu_stop(struct ath11k *ar, 1052 struct ieee80211_ampdu_params *params) 1053 { 1054 struct ath11k_base *ab = ar->ab; 1055 struct ath11k_peer *peer; 1056 struct ath11k_sta *arsta = (void *)params->sta->drv_priv; 1057 int vdev_id = arsta->arvif->vdev_id; 1058 dma_addr_t paddr; 1059 bool active; 1060 int ret; 1061 1062 spin_lock_bh(&ab->base_lock); 1063 1064 peer = ath11k_peer_find(ab, vdev_id, params->sta->addr); 1065 if (!peer) { 1066 ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n"); 1067 spin_unlock_bh(&ab->base_lock); 1068 return -ENOENT; 1069 } 1070 1071 paddr = peer->rx_tid[params->tid].paddr; 1072 active = peer->rx_tid[params->tid].active; 1073 1074 if (!active) { 1075 spin_unlock_bh(&ab->base_lock); 1076 return 0; 1077 } 1078 1079 ret = ath11k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false); 1080 spin_unlock_bh(&ab->base_lock); 1081 if (ret) { 1082 ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n", 1083 params->tid, ret); 1084 return ret; 1085 } 1086 1087 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, 1088 params->sta->addr, paddr, 1089 params->tid, 1, 1); 1090 if (ret) 1091 ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n", 1092 ret); 1093 1094 return ret; 1095 } 1096 1097 int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif, 1098 const u8 *peer_addr, 1099 enum set_key_cmd key_cmd, 1100 struct ieee80211_key_conf *key) 1101 { 1102 struct ath11k *ar = arvif->ar; 1103 struct ath11k_base *ab = ar->ab; 1104 struct ath11k_hal_reo_cmd cmd = {0}; 1105 struct ath11k_peer *peer; 1106 struct dp_rx_tid *rx_tid; 1107 u8 tid; 1108 int ret = 0; 1109 1110 /* NOTE: Enable PN/TSC replay check offload only for unicast frames. 1111 * We use mac80211 PN/TSC replay check functionality for bcast/mcast 1112 * for now. 1113 */ 1114 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) 1115 return 0; 1116 1117 cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS; 1118 cmd.upd0 |= HAL_REO_CMD_UPD0_PN | 1119 HAL_REO_CMD_UPD0_PN_SIZE | 1120 HAL_REO_CMD_UPD0_PN_VALID | 1121 HAL_REO_CMD_UPD0_PN_CHECK | 1122 HAL_REO_CMD_UPD0_SVLD; 1123 1124 switch (key->cipher) { 1125 case WLAN_CIPHER_SUITE_TKIP: 1126 case WLAN_CIPHER_SUITE_CCMP: 1127 case WLAN_CIPHER_SUITE_CCMP_256: 1128 case WLAN_CIPHER_SUITE_GCMP: 1129 case WLAN_CIPHER_SUITE_GCMP_256: 1130 if (key_cmd == SET_KEY) { 1131 cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK; 1132 cmd.pn_size = 48; 1133 } 1134 break; 1135 default: 1136 break; 1137 } 1138 1139 spin_lock_bh(&ab->base_lock); 1140 1141 peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr); 1142 if (!peer) { 1143 ath11k_warn(ab, "failed to find the peer to configure pn replay detection\n"); 1144 spin_unlock_bh(&ab->base_lock); 1145 return -ENOENT; 1146 } 1147 1148 for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) { 1149 rx_tid = &peer->rx_tid[tid]; 1150 if (!rx_tid->active) 1151 continue; 1152 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 1153 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 1154 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, 1155 HAL_REO_CMD_UPDATE_RX_QUEUE, 1156 &cmd, NULL); 1157 if (ret) { 1158 ath11k_warn(ab, "failed to configure rx tid %d queue for pn replay detection %d\n", 1159 tid, ret); 1160 break; 1161 } 1162 } 1163 1164 spin_unlock_bh(&ab->base_lock); 1165 1166 return ret; 1167 } 1168 1169 static inline int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats, 1170 u16 peer_id) 1171 { 1172 int i; 1173 1174 for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) { 1175 if (ppdu_stats->user_stats[i].is_valid_peer_id) { 1176 if (peer_id == ppdu_stats->user_stats[i].peer_id) 1177 return i; 1178 } else { 1179 return i; 1180 } 1181 } 1182 1183 return -EINVAL; 1184 } 1185 1186 static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab, 1187 u16 tag, u16 len, const void *ptr, 1188 void *data) 1189 { 1190 struct htt_ppdu_stats_info *ppdu_info; 1191 struct htt_ppdu_user_stats *user_stats; 1192 int cur_user; 1193 u16 peer_id; 1194 1195 ppdu_info = (struct htt_ppdu_stats_info *)data; 1196 1197 switch (tag) { 1198 case HTT_PPDU_STATS_TAG_COMMON: 1199 if (len < sizeof(struct htt_ppdu_stats_common)) { 1200 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1201 len, tag); 1202 return -EINVAL; 1203 } 1204 memcpy((void *)&ppdu_info->ppdu_stats.common, ptr, 1205 sizeof(struct htt_ppdu_stats_common)); 1206 break; 1207 case HTT_PPDU_STATS_TAG_USR_RATE: 1208 if (len < sizeof(struct htt_ppdu_stats_user_rate)) { 1209 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1210 len, tag); 1211 return -EINVAL; 1212 } 1213 1214 peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id; 1215 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1216 peer_id); 1217 if (cur_user < 0) 1218 return -EINVAL; 1219 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1220 user_stats->peer_id = peer_id; 1221 user_stats->is_valid_peer_id = true; 1222 memcpy((void *)&user_stats->rate, ptr, 1223 sizeof(struct htt_ppdu_stats_user_rate)); 1224 user_stats->tlv_flags |= BIT(tag); 1225 break; 1226 case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON: 1227 if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) { 1228 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1229 len, tag); 1230 return -EINVAL; 1231 } 1232 1233 peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id; 1234 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1235 peer_id); 1236 if (cur_user < 0) 1237 return -EINVAL; 1238 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1239 user_stats->peer_id = peer_id; 1240 user_stats->is_valid_peer_id = true; 1241 memcpy((void *)&user_stats->cmpltn_cmn, ptr, 1242 sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)); 1243 user_stats->tlv_flags |= BIT(tag); 1244 break; 1245 case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS: 1246 if (len < 1247 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) { 1248 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1249 len, tag); 1250 return -EINVAL; 1251 } 1252 1253 peer_id = 1254 ((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id; 1255 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1256 peer_id); 1257 if (cur_user < 0) 1258 return -EINVAL; 1259 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1260 user_stats->peer_id = peer_id; 1261 user_stats->is_valid_peer_id = true; 1262 memcpy((void *)&user_stats->ack_ba, ptr, 1263 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)); 1264 user_stats->tlv_flags |= BIT(tag); 1265 break; 1266 } 1267 return 0; 1268 } 1269 1270 int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len, 1271 int (*iter)(struct ath11k_base *ar, u16 tag, u16 len, 1272 const void *ptr, void *data), 1273 void *data) 1274 { 1275 const struct htt_tlv *tlv; 1276 const void *begin = ptr; 1277 u16 tlv_tag, tlv_len; 1278 int ret = -EINVAL; 1279 1280 while (len > 0) { 1281 if (len < sizeof(*tlv)) { 1282 ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n", 1283 ptr - begin, len, sizeof(*tlv)); 1284 return -EINVAL; 1285 } 1286 tlv = (struct htt_tlv *)ptr; 1287 tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header); 1288 tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header); 1289 ptr += sizeof(*tlv); 1290 len -= sizeof(*tlv); 1291 1292 if (tlv_len > len) { 1293 ath11k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n", 1294 tlv_tag, ptr - begin, len, tlv_len); 1295 return -EINVAL; 1296 } 1297 ret = iter(ab, tlv_tag, tlv_len, ptr, data); 1298 if (ret == -ENOMEM) 1299 return ret; 1300 1301 ptr += tlv_len; 1302 len -= tlv_len; 1303 } 1304 return 0; 1305 } 1306 1307 static inline u32 ath11k_he_gi_to_nl80211_he_gi(u8 sgi) 1308 { 1309 u32 ret = 0; 1310 1311 switch (sgi) { 1312 case RX_MSDU_START_SGI_0_8_US: 1313 ret = NL80211_RATE_INFO_HE_GI_0_8; 1314 break; 1315 case RX_MSDU_START_SGI_1_6_US: 1316 ret = NL80211_RATE_INFO_HE_GI_1_6; 1317 break; 1318 case RX_MSDU_START_SGI_3_2_US: 1319 ret = NL80211_RATE_INFO_HE_GI_3_2; 1320 break; 1321 } 1322 1323 return ret; 1324 } 1325 1326 static void 1327 ath11k_update_per_peer_tx_stats(struct ath11k *ar, 1328 struct htt_ppdu_stats *ppdu_stats, u8 user) 1329 { 1330 struct ath11k_base *ab = ar->ab; 1331 struct ath11k_peer *peer; 1332 struct ieee80211_sta *sta; 1333 struct ath11k_sta *arsta; 1334 struct htt_ppdu_stats_user_rate *user_rate; 1335 struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats; 1336 struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user]; 1337 struct htt_ppdu_stats_common *common = &ppdu_stats->common; 1338 int ret; 1339 u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0; 1340 u32 succ_bytes = 0; 1341 u16 rate = 0, succ_pkts = 0; 1342 u32 tx_duration = 0; 1343 u8 tid = HTT_PPDU_STATS_NON_QOS_TID; 1344 bool is_ampdu = false; 1345 1346 if (!usr_stats) 1347 return; 1348 1349 if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE))) 1350 return; 1351 1352 if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON)) 1353 is_ampdu = 1354 HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags); 1355 1356 if (usr_stats->tlv_flags & 1357 BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) { 1358 succ_bytes = usr_stats->ack_ba.success_bytes; 1359 succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M, 1360 usr_stats->ack_ba.info); 1361 tid = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM, 1362 usr_stats->ack_ba.info); 1363 } 1364 1365 if (common->fes_duration_us) 1366 tx_duration = common->fes_duration_us; 1367 1368 user_rate = &usr_stats->rate; 1369 flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags); 1370 bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2; 1371 nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1; 1372 mcs = HTT_USR_RATE_MCS(user_rate->rate_flags); 1373 sgi = HTT_USR_RATE_GI(user_rate->rate_flags); 1374 dcm = HTT_USR_RATE_DCM(user_rate->rate_flags); 1375 1376 /* Note: If host configured fixed rates and in some other special 1377 * cases, the broadcast/management frames are sent in different rates. 1378 * Firmware rate's control to be skipped for this? 1379 */ 1380 1381 if (flags == WMI_RATE_PREAMBLE_HE && mcs > 11) { 1382 ath11k_warn(ab, "Invalid HE mcs %d peer stats", mcs); 1383 return; 1384 } 1385 1386 if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) { 1387 ath11k_warn(ab, "Invalid HE mcs %d peer stats", mcs); 1388 return; 1389 } 1390 1391 if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH11K_VHT_MCS_MAX) { 1392 ath11k_warn(ab, "Invalid VHT mcs %d peer stats", mcs); 1393 return; 1394 } 1395 1396 if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH11K_HT_MCS_MAX || nss < 1)) { 1397 ath11k_warn(ab, "Invalid HT mcs %d nss %d peer stats", 1398 mcs, nss); 1399 return; 1400 } 1401 1402 if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) { 1403 ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs, 1404 flags, 1405 &rate_idx, 1406 &rate); 1407 if (ret < 0) 1408 return; 1409 } 1410 1411 rcu_read_lock(); 1412 spin_lock_bh(&ab->base_lock); 1413 peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id); 1414 1415 if (!peer || !peer->sta) { 1416 spin_unlock_bh(&ab->base_lock); 1417 rcu_read_unlock(); 1418 return; 1419 } 1420 1421 sta = peer->sta; 1422 arsta = (struct ath11k_sta *)sta->drv_priv; 1423 1424 memset(&arsta->txrate, 0, sizeof(arsta->txrate)); 1425 1426 switch (flags) { 1427 case WMI_RATE_PREAMBLE_OFDM: 1428 arsta->txrate.legacy = rate; 1429 break; 1430 case WMI_RATE_PREAMBLE_CCK: 1431 arsta->txrate.legacy = rate; 1432 break; 1433 case WMI_RATE_PREAMBLE_HT: 1434 arsta->txrate.mcs = mcs + 8 * (nss - 1); 1435 arsta->txrate.flags = RATE_INFO_FLAGS_MCS; 1436 if (sgi) 1437 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1438 break; 1439 case WMI_RATE_PREAMBLE_VHT: 1440 arsta->txrate.mcs = mcs; 1441 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; 1442 if (sgi) 1443 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1444 break; 1445 case WMI_RATE_PREAMBLE_HE: 1446 arsta->txrate.mcs = mcs; 1447 arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS; 1448 arsta->txrate.he_dcm = dcm; 1449 arsta->txrate.he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi); 1450 arsta->txrate.he_ru_alloc = ath11k_he_ru_tones_to_nl80211_he_ru_alloc( 1451 (user_rate->ru_end - 1452 user_rate->ru_start) + 1); 1453 break; 1454 } 1455 1456 arsta->txrate.nss = nss; 1457 arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw); 1458 arsta->tx_duration += tx_duration; 1459 memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info)); 1460 1461 /* PPDU stats reported for mgmt packet doesn't have valid tx bytes. 1462 * So skip peer stats update for mgmt packets. 1463 */ 1464 if (tid < HTT_PPDU_STATS_NON_QOS_TID) { 1465 memset(peer_stats, 0, sizeof(*peer_stats)); 1466 peer_stats->succ_pkts = succ_pkts; 1467 peer_stats->succ_bytes = succ_bytes; 1468 peer_stats->is_ampdu = is_ampdu; 1469 peer_stats->duration = tx_duration; 1470 peer_stats->ba_fails = 1471 HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) + 1472 HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags); 1473 1474 if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) 1475 ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx); 1476 } 1477 1478 spin_unlock_bh(&ab->base_lock); 1479 rcu_read_unlock(); 1480 } 1481 1482 static void ath11k_htt_update_ppdu_stats(struct ath11k *ar, 1483 struct htt_ppdu_stats *ppdu_stats) 1484 { 1485 u8 user; 1486 1487 for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++) 1488 ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user); 1489 } 1490 1491 static 1492 struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar, 1493 u32 ppdu_id) 1494 { 1495 struct htt_ppdu_stats_info *ppdu_info; 1496 1497 spin_lock_bh(&ar->data_lock); 1498 if (!list_empty(&ar->ppdu_stats_info)) { 1499 list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) { 1500 if (ppdu_info->ppdu_id == ppdu_id) { 1501 spin_unlock_bh(&ar->data_lock); 1502 return ppdu_info; 1503 } 1504 } 1505 1506 if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) { 1507 ppdu_info = list_first_entry(&ar->ppdu_stats_info, 1508 typeof(*ppdu_info), list); 1509 list_del(&ppdu_info->list); 1510 ar->ppdu_stat_list_depth--; 1511 ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats); 1512 kfree(ppdu_info); 1513 } 1514 } 1515 spin_unlock_bh(&ar->data_lock); 1516 1517 ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC); 1518 if (!ppdu_info) 1519 return NULL; 1520 1521 spin_lock_bh(&ar->data_lock); 1522 list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info); 1523 ar->ppdu_stat_list_depth++; 1524 spin_unlock_bh(&ar->data_lock); 1525 1526 return ppdu_info; 1527 } 1528 1529 static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab, 1530 struct sk_buff *skb) 1531 { 1532 struct ath11k_htt_ppdu_stats_msg *msg; 1533 struct htt_ppdu_stats_info *ppdu_info; 1534 struct ath11k *ar; 1535 int ret; 1536 u8 pdev_id; 1537 u32 ppdu_id, len; 1538 1539 msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data; 1540 len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info); 1541 pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info); 1542 ppdu_id = msg->ppdu_id; 1543 1544 rcu_read_lock(); 1545 ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); 1546 if (!ar) { 1547 ret = -EINVAL; 1548 goto exit; 1549 } 1550 1551 if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) 1552 trace_ath11k_htt_ppdu_stats(ar, skb->data, len); 1553 1554 ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id); 1555 if (!ppdu_info) { 1556 ret = -EINVAL; 1557 goto exit; 1558 } 1559 1560 ppdu_info->ppdu_id = ppdu_id; 1561 ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len, 1562 ath11k_htt_tlv_ppdu_stats_parse, 1563 (void *)ppdu_info); 1564 if (ret) { 1565 ath11k_warn(ab, "Failed to parse tlv %d\n", ret); 1566 goto exit; 1567 } 1568 1569 exit: 1570 rcu_read_unlock(); 1571 1572 return ret; 1573 } 1574 1575 static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb) 1576 { 1577 struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data; 1578 struct ath_pktlog_hdr *hdr = (struct ath_pktlog_hdr *)data; 1579 struct ath11k *ar; 1580 u8 pdev_id; 1581 1582 pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr); 1583 ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); 1584 if (!ar) { 1585 ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id); 1586 return; 1587 } 1588 1589 trace_ath11k_htt_pktlog(ar, data->payload, hdr->size, 1590 ar->ab->pktlog_defs_checksum); 1591 } 1592 1593 static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab, 1594 struct sk_buff *skb) 1595 { 1596 u32 *data = (u32 *)skb->data; 1597 u8 pdev_id, ring_type, ring_id, pdev_idx; 1598 u16 hp, tp; 1599 u32 backpressure_time; 1600 struct ath11k_bp_stats *bp_stats; 1601 1602 pdev_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_PDEV_ID_M, *data); 1603 ring_type = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_TYPE_M, *data); 1604 ring_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_ID_M, *data); 1605 ++data; 1606 1607 hp = FIELD_GET(HTT_BACKPRESSURE_EVENT_HP_M, *data); 1608 tp = FIELD_GET(HTT_BACKPRESSURE_EVENT_TP_M, *data); 1609 ++data; 1610 1611 backpressure_time = *data; 1612 1613 ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "htt backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n", 1614 pdev_id, ring_type, ring_id, hp, tp, backpressure_time); 1615 1616 if (ring_type == HTT_BACKPRESSURE_UMAC_RING_TYPE) { 1617 if (ring_id >= HTT_SW_UMAC_RING_IDX_MAX) 1618 return; 1619 1620 bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[ring_id]; 1621 } else if (ring_type == HTT_BACKPRESSURE_LMAC_RING_TYPE) { 1622 pdev_idx = DP_HW2SW_MACID(pdev_id); 1623 1624 if (ring_id >= HTT_SW_LMAC_RING_IDX_MAX || pdev_idx >= MAX_RADIOS) 1625 return; 1626 1627 bp_stats = &ab->soc_stats.bp_stats.lmac_ring_bp_stats[ring_id][pdev_idx]; 1628 } else { 1629 ath11k_warn(ab, "unknown ring type received in htt bp event %d\n", 1630 ring_type); 1631 return; 1632 } 1633 1634 spin_lock_bh(&ab->base_lock); 1635 bp_stats->hp = hp; 1636 bp_stats->tp = tp; 1637 bp_stats->count++; 1638 bp_stats->jiffies = jiffies; 1639 spin_unlock_bh(&ab->base_lock); 1640 } 1641 1642 void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab, 1643 struct sk_buff *skb) 1644 { 1645 struct ath11k_dp *dp = &ab->dp; 1646 struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data; 1647 enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp); 1648 u16 peer_id; 1649 u8 vdev_id; 1650 u8 mac_addr[ETH_ALEN]; 1651 u16 peer_mac_h16; 1652 u16 ast_hash; 1653 u16 hw_peer_id; 1654 1655 ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type); 1656 1657 switch (type) { 1658 case HTT_T2H_MSG_TYPE_VERSION_CONF: 1659 dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR, 1660 resp->version_msg.version); 1661 dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR, 1662 resp->version_msg.version); 1663 complete(&dp->htt_tgt_version_received); 1664 break; 1665 case HTT_T2H_MSG_TYPE_PEER_MAP: 1666 vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID, 1667 resp->peer_map_ev.info); 1668 peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID, 1669 resp->peer_map_ev.info); 1670 peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16, 1671 resp->peer_map_ev.info1); 1672 ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32, 1673 peer_mac_h16, mac_addr); 1674 ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0); 1675 break; 1676 case HTT_T2H_MSG_TYPE_PEER_MAP2: 1677 vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID, 1678 resp->peer_map_ev.info); 1679 peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID, 1680 resp->peer_map_ev.info); 1681 peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16, 1682 resp->peer_map_ev.info1); 1683 ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32, 1684 peer_mac_h16, mac_addr); 1685 ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL, 1686 resp->peer_map_ev.info2); 1687 hw_peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID, 1688 resp->peer_map_ev.info1); 1689 ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash, 1690 hw_peer_id); 1691 break; 1692 case HTT_T2H_MSG_TYPE_PEER_UNMAP: 1693 case HTT_T2H_MSG_TYPE_PEER_UNMAP2: 1694 peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID, 1695 resp->peer_unmap_ev.info); 1696 ath11k_peer_unmap_event(ab, peer_id); 1697 break; 1698 case HTT_T2H_MSG_TYPE_PPDU_STATS_IND: 1699 ath11k_htt_pull_ppdu_stats(ab, skb); 1700 break; 1701 case HTT_T2H_MSG_TYPE_EXT_STATS_CONF: 1702 ath11k_debugfs_htt_ext_stats_handler(ab, skb); 1703 break; 1704 case HTT_T2H_MSG_TYPE_PKTLOG: 1705 ath11k_htt_pktlog(ab, skb); 1706 break; 1707 case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND: 1708 ath11k_htt_backpressure_event_handler(ab, skb); 1709 break; 1710 default: 1711 ath11k_warn(ab, "htt event %d not handled\n", type); 1712 break; 1713 } 1714 1715 dev_kfree_skb_any(skb); 1716 } 1717 1718 static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar, 1719 struct sk_buff_head *msdu_list, 1720 struct sk_buff *first, struct sk_buff *last, 1721 u8 l3pad_bytes, int msdu_len) 1722 { 1723 struct ath11k_base *ab = ar->ab; 1724 struct sk_buff *skb; 1725 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first); 1726 int buf_first_hdr_len, buf_first_len; 1727 struct hal_rx_desc *ldesc; 1728 int space_extra, rem_len, buf_len; 1729 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 1730 1731 /* As the msdu is spread across multiple rx buffers, 1732 * find the offset to the start of msdu for computing 1733 * the length of the msdu in the first buffer. 1734 */ 1735 buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes; 1736 buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len; 1737 1738 if (WARN_ON_ONCE(msdu_len <= buf_first_len)) { 1739 skb_put(first, buf_first_hdr_len + msdu_len); 1740 skb_pull(first, buf_first_hdr_len); 1741 return 0; 1742 } 1743 1744 ldesc = (struct hal_rx_desc *)last->data; 1745 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ab, ldesc); 1746 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ab, ldesc); 1747 1748 /* MSDU spans over multiple buffers because the length of the MSDU 1749 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data 1750 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. 1751 */ 1752 skb_put(first, DP_RX_BUFFER_SIZE); 1753 skb_pull(first, buf_first_hdr_len); 1754 1755 /* When an MSDU spread over multiple buffers attention, MSDU_END and 1756 * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs. 1757 */ 1758 ath11k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc); 1759 1760 space_extra = msdu_len - (buf_first_len + skb_tailroom(first)); 1761 if (space_extra > 0 && 1762 (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) { 1763 /* Free up all buffers of the MSDU */ 1764 while ((skb = __skb_dequeue(msdu_list)) != NULL) { 1765 rxcb = ATH11K_SKB_RXCB(skb); 1766 if (!rxcb->is_continuation) { 1767 dev_kfree_skb_any(skb); 1768 break; 1769 } 1770 dev_kfree_skb_any(skb); 1771 } 1772 return -ENOMEM; 1773 } 1774 1775 rem_len = msdu_len - buf_first_len; 1776 while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) { 1777 rxcb = ATH11K_SKB_RXCB(skb); 1778 if (rxcb->is_continuation) 1779 buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz; 1780 else 1781 buf_len = rem_len; 1782 1783 if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) { 1784 WARN_ON_ONCE(1); 1785 dev_kfree_skb_any(skb); 1786 return -EINVAL; 1787 } 1788 1789 skb_put(skb, buf_len + hal_rx_desc_sz); 1790 skb_pull(skb, hal_rx_desc_sz); 1791 skb_copy_from_linear_data(skb, skb_put(first, buf_len), 1792 buf_len); 1793 dev_kfree_skb_any(skb); 1794 1795 rem_len -= buf_len; 1796 if (!rxcb->is_continuation) 1797 break; 1798 } 1799 1800 return 0; 1801 } 1802 1803 static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list, 1804 struct sk_buff *first) 1805 { 1806 struct sk_buff *skb; 1807 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first); 1808 1809 if (!rxcb->is_continuation) 1810 return first; 1811 1812 skb_queue_walk(msdu_list, skb) { 1813 rxcb = ATH11K_SKB_RXCB(skb); 1814 if (!rxcb->is_continuation) 1815 return skb; 1816 } 1817 1818 return NULL; 1819 } 1820 1821 static void ath11k_dp_rx_h_csum_offload(struct ath11k *ar, struct sk_buff *msdu) 1822 { 1823 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 1824 struct rx_attention *rx_attention; 1825 bool ip_csum_fail, l4_csum_fail; 1826 1827 rx_attention = ath11k_dp_rx_get_attention(ar->ab, rxcb->rx_desc); 1828 ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rx_attention); 1829 l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rx_attention); 1830 1831 msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ? 1832 CHECKSUM_NONE : CHECKSUM_UNNECESSARY; 1833 } 1834 1835 static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, 1836 enum hal_encrypt_type enctype) 1837 { 1838 switch (enctype) { 1839 case HAL_ENCRYPT_TYPE_OPEN: 1840 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1841 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1842 return 0; 1843 case HAL_ENCRYPT_TYPE_CCMP_128: 1844 return IEEE80211_CCMP_MIC_LEN; 1845 case HAL_ENCRYPT_TYPE_CCMP_256: 1846 return IEEE80211_CCMP_256_MIC_LEN; 1847 case HAL_ENCRYPT_TYPE_GCMP_128: 1848 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1849 return IEEE80211_GCMP_MIC_LEN; 1850 case HAL_ENCRYPT_TYPE_WEP_40: 1851 case HAL_ENCRYPT_TYPE_WEP_104: 1852 case HAL_ENCRYPT_TYPE_WEP_128: 1853 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1854 case HAL_ENCRYPT_TYPE_WAPI: 1855 break; 1856 } 1857 1858 ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype); 1859 return 0; 1860 } 1861 1862 static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar, 1863 enum hal_encrypt_type enctype) 1864 { 1865 switch (enctype) { 1866 case HAL_ENCRYPT_TYPE_OPEN: 1867 return 0; 1868 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1869 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1870 return IEEE80211_TKIP_IV_LEN; 1871 case HAL_ENCRYPT_TYPE_CCMP_128: 1872 return IEEE80211_CCMP_HDR_LEN; 1873 case HAL_ENCRYPT_TYPE_CCMP_256: 1874 return IEEE80211_CCMP_256_HDR_LEN; 1875 case HAL_ENCRYPT_TYPE_GCMP_128: 1876 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1877 return IEEE80211_GCMP_HDR_LEN; 1878 case HAL_ENCRYPT_TYPE_WEP_40: 1879 case HAL_ENCRYPT_TYPE_WEP_104: 1880 case HAL_ENCRYPT_TYPE_WEP_128: 1881 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1882 case HAL_ENCRYPT_TYPE_WAPI: 1883 break; 1884 } 1885 1886 ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 1887 return 0; 1888 } 1889 1890 static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar, 1891 enum hal_encrypt_type enctype) 1892 { 1893 switch (enctype) { 1894 case HAL_ENCRYPT_TYPE_OPEN: 1895 case HAL_ENCRYPT_TYPE_CCMP_128: 1896 case HAL_ENCRYPT_TYPE_CCMP_256: 1897 case HAL_ENCRYPT_TYPE_GCMP_128: 1898 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1899 return 0; 1900 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1901 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1902 return IEEE80211_TKIP_ICV_LEN; 1903 case HAL_ENCRYPT_TYPE_WEP_40: 1904 case HAL_ENCRYPT_TYPE_WEP_104: 1905 case HAL_ENCRYPT_TYPE_WEP_128: 1906 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1907 case HAL_ENCRYPT_TYPE_WAPI: 1908 break; 1909 } 1910 1911 ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 1912 return 0; 1913 } 1914 1915 static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar, 1916 struct sk_buff *msdu, 1917 u8 *first_hdr, 1918 enum hal_encrypt_type enctype, 1919 struct ieee80211_rx_status *status) 1920 { 1921 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 1922 u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN]; 1923 struct ieee80211_hdr *hdr; 1924 size_t hdr_len; 1925 u8 da[ETH_ALEN]; 1926 u8 sa[ETH_ALEN]; 1927 u16 qos_ctl = 0; 1928 u8 *qos; 1929 1930 /* copy SA & DA and pull decapped header */ 1931 hdr = (struct ieee80211_hdr *)msdu->data; 1932 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1933 ether_addr_copy(da, ieee80211_get_DA(hdr)); 1934 ether_addr_copy(sa, ieee80211_get_SA(hdr)); 1935 skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control)); 1936 1937 if (rxcb->is_first_msdu) { 1938 /* original 802.11 header is valid for the first msdu 1939 * hence we can reuse the same header 1940 */ 1941 hdr = (struct ieee80211_hdr *)first_hdr; 1942 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1943 1944 /* Each A-MSDU subframe will be reported as a separate MSDU, 1945 * so strip the A-MSDU bit from QoS Ctl. 1946 */ 1947 if (ieee80211_is_data_qos(hdr->frame_control)) { 1948 qos = ieee80211_get_qos_ctl(hdr); 1949 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; 1950 } 1951 } else { 1952 /* Rebuild qos header if this is a middle/last msdu */ 1953 hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 1954 1955 /* Reset the order bit as the HT_Control header is stripped */ 1956 hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER)); 1957 1958 qos_ctl = rxcb->tid; 1959 1960 if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(ar->ab, rxcb->rx_desc)) 1961 qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT; 1962 1963 /* TODO Add other QoS ctl fields when required */ 1964 1965 /* copy decap header before overwriting for reuse below */ 1966 memcpy(decap_hdr, (uint8_t *)hdr, hdr_len); 1967 } 1968 1969 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 1970 memcpy(skb_push(msdu, 1971 ath11k_dp_rx_crypto_param_len(ar, enctype)), 1972 (void *)hdr + hdr_len, 1973 ath11k_dp_rx_crypto_param_len(ar, enctype)); 1974 } 1975 1976 if (!rxcb->is_first_msdu) { 1977 memcpy(skb_push(msdu, 1978 IEEE80211_QOS_CTL_LEN), &qos_ctl, 1979 IEEE80211_QOS_CTL_LEN); 1980 memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len); 1981 return; 1982 } 1983 1984 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1985 1986 /* original 802.11 header has a different DA and in 1987 * case of 4addr it may also have different SA 1988 */ 1989 hdr = (struct ieee80211_hdr *)msdu->data; 1990 ether_addr_copy(ieee80211_get_DA(hdr), da); 1991 ether_addr_copy(ieee80211_get_SA(hdr), sa); 1992 } 1993 1994 static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu, 1995 enum hal_encrypt_type enctype, 1996 struct ieee80211_rx_status *status, 1997 bool decrypted) 1998 { 1999 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 2000 struct ieee80211_hdr *hdr; 2001 size_t hdr_len; 2002 size_t crypto_len; 2003 2004 if (!rxcb->is_first_msdu || 2005 !(rxcb->is_first_msdu && rxcb->is_last_msdu)) { 2006 WARN_ON_ONCE(1); 2007 return; 2008 } 2009 2010 skb_trim(msdu, msdu->len - FCS_LEN); 2011 2012 if (!decrypted) 2013 return; 2014 2015 hdr = (void *)msdu->data; 2016 2017 /* Tail */ 2018 if (status->flag & RX_FLAG_IV_STRIPPED) { 2019 skb_trim(msdu, msdu->len - 2020 ath11k_dp_rx_crypto_mic_len(ar, enctype)); 2021 2022 skb_trim(msdu, msdu->len - 2023 ath11k_dp_rx_crypto_icv_len(ar, enctype)); 2024 } else { 2025 /* MIC */ 2026 if (status->flag & RX_FLAG_MIC_STRIPPED) 2027 skb_trim(msdu, msdu->len - 2028 ath11k_dp_rx_crypto_mic_len(ar, enctype)); 2029 2030 /* ICV */ 2031 if (status->flag & RX_FLAG_ICV_STRIPPED) 2032 skb_trim(msdu, msdu->len - 2033 ath11k_dp_rx_crypto_icv_len(ar, enctype)); 2034 } 2035 2036 /* MMIC */ 2037 if ((status->flag & RX_FLAG_MMIC_STRIPPED) && 2038 !ieee80211_has_morefrags(hdr->frame_control) && 2039 enctype == HAL_ENCRYPT_TYPE_TKIP_MIC) 2040 skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN); 2041 2042 /* Head */ 2043 if (status->flag & RX_FLAG_IV_STRIPPED) { 2044 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2045 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 2046 2047 memmove((void *)msdu->data + crypto_len, 2048 (void *)msdu->data, hdr_len); 2049 skb_pull(msdu, crypto_len); 2050 } 2051 } 2052 2053 static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar, 2054 struct sk_buff *msdu, 2055 enum hal_encrypt_type enctype) 2056 { 2057 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 2058 struct ieee80211_hdr *hdr; 2059 size_t hdr_len, crypto_len; 2060 void *rfc1042; 2061 bool is_amsdu; 2062 2063 is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu); 2064 hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(ar->ab, rxcb->rx_desc); 2065 rfc1042 = hdr; 2066 2067 if (rxcb->is_first_msdu) { 2068 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2069 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 2070 2071 rfc1042 += hdr_len + crypto_len; 2072 } 2073 2074 if (is_amsdu) 2075 rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr); 2076 2077 return rfc1042; 2078 } 2079 2080 static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar, 2081 struct sk_buff *msdu, 2082 u8 *first_hdr, 2083 enum hal_encrypt_type enctype, 2084 struct ieee80211_rx_status *status) 2085 { 2086 struct ieee80211_hdr *hdr; 2087 struct ethhdr *eth; 2088 size_t hdr_len; 2089 u8 da[ETH_ALEN]; 2090 u8 sa[ETH_ALEN]; 2091 void *rfc1042; 2092 2093 rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype); 2094 if (WARN_ON_ONCE(!rfc1042)) 2095 return; 2096 2097 /* pull decapped header and copy SA & DA */ 2098 eth = (struct ethhdr *)msdu->data; 2099 ether_addr_copy(da, eth->h_dest); 2100 ether_addr_copy(sa, eth->h_source); 2101 skb_pull(msdu, sizeof(struct ethhdr)); 2102 2103 /* push rfc1042/llc/snap */ 2104 memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042, 2105 sizeof(struct ath11k_dp_rfc1042_hdr)); 2106 2107 /* push original 802.11 header */ 2108 hdr = (struct ieee80211_hdr *)first_hdr; 2109 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2110 2111 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 2112 memcpy(skb_push(msdu, 2113 ath11k_dp_rx_crypto_param_len(ar, enctype)), 2114 (void *)hdr + hdr_len, 2115 ath11k_dp_rx_crypto_param_len(ar, enctype)); 2116 } 2117 2118 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 2119 2120 /* original 802.11 header has a different DA and in 2121 * case of 4addr it may also have different SA 2122 */ 2123 hdr = (struct ieee80211_hdr *)msdu->data; 2124 ether_addr_copy(ieee80211_get_DA(hdr), da); 2125 ether_addr_copy(ieee80211_get_SA(hdr), sa); 2126 } 2127 2128 static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu, 2129 struct hal_rx_desc *rx_desc, 2130 enum hal_encrypt_type enctype, 2131 struct ieee80211_rx_status *status, 2132 bool decrypted) 2133 { 2134 u8 *first_hdr; 2135 u8 decap; 2136 2137 first_hdr = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc); 2138 decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc); 2139 2140 switch (decap) { 2141 case DP_RX_DECAP_TYPE_NATIVE_WIFI: 2142 ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr, 2143 enctype, status); 2144 break; 2145 case DP_RX_DECAP_TYPE_RAW: 2146 ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status, 2147 decrypted); 2148 break; 2149 case DP_RX_DECAP_TYPE_ETHERNET2_DIX: 2150 /* TODO undecap support for middle/last msdu's of amsdu */ 2151 ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr, 2152 enctype, status); 2153 break; 2154 case DP_RX_DECAP_TYPE_8023: 2155 /* TODO: Handle undecap for these formats */ 2156 break; 2157 } 2158 } 2159 2160 static void ath11k_dp_rx_h_mpdu(struct ath11k *ar, 2161 struct sk_buff *msdu, 2162 struct hal_rx_desc *rx_desc, 2163 struct ieee80211_rx_status *rx_status) 2164 { 2165 bool fill_crypto_hdr, mcast; 2166 enum hal_encrypt_type enctype; 2167 bool is_decrypted = false; 2168 struct ieee80211_hdr *hdr; 2169 struct ath11k_peer *peer; 2170 struct rx_attention *rx_attention; 2171 u32 err_bitmap; 2172 2173 hdr = (struct ieee80211_hdr *)msdu->data; 2174 2175 /* PN for multicast packets will be checked in mac80211 */ 2176 2177 mcast = is_multicast_ether_addr(hdr->addr1); 2178 fill_crypto_hdr = mcast; 2179 2180 spin_lock_bh(&ar->ab->base_lock); 2181 peer = ath11k_peer_find_by_addr(ar->ab, hdr->addr2); 2182 if (peer) { 2183 if (mcast) 2184 enctype = peer->sec_type_grp; 2185 else 2186 enctype = peer->sec_type; 2187 } else { 2188 enctype = HAL_ENCRYPT_TYPE_OPEN; 2189 } 2190 spin_unlock_bh(&ar->ab->base_lock); 2191 2192 rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc); 2193 err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention); 2194 if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap) 2195 is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention); 2196 2197 /* Clear per-MPDU flags while leaving per-PPDU flags intact */ 2198 rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | 2199 RX_FLAG_MMIC_ERROR | 2200 RX_FLAG_DECRYPTED | 2201 RX_FLAG_IV_STRIPPED | 2202 RX_FLAG_MMIC_STRIPPED); 2203 2204 if (err_bitmap & DP_RX_MPDU_ERR_FCS) 2205 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; 2206 if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC) 2207 rx_status->flag |= RX_FLAG_MMIC_ERROR; 2208 2209 if (is_decrypted) { 2210 rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED; 2211 2212 if (fill_crypto_hdr) 2213 rx_status->flag |= RX_FLAG_MIC_STRIPPED | 2214 RX_FLAG_ICV_STRIPPED; 2215 else 2216 rx_status->flag |= RX_FLAG_IV_STRIPPED | 2217 RX_FLAG_PN_VALIDATED; 2218 } 2219 2220 ath11k_dp_rx_h_csum_offload(ar, msdu); 2221 ath11k_dp_rx_h_undecap(ar, msdu, rx_desc, 2222 enctype, rx_status, is_decrypted); 2223 2224 if (!is_decrypted || fill_crypto_hdr) 2225 return; 2226 2227 hdr = (void *)msdu->data; 2228 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 2229 } 2230 2231 static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc, 2232 struct ieee80211_rx_status *rx_status) 2233 { 2234 struct ieee80211_supported_band *sband; 2235 enum rx_msdu_start_pkt_type pkt_type; 2236 u8 bw; 2237 u8 rate_mcs, nss; 2238 u8 sgi; 2239 bool is_cck; 2240 2241 pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(ar->ab, rx_desc); 2242 bw = ath11k_dp_rx_h_msdu_start_rx_bw(ar->ab, rx_desc); 2243 rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(ar->ab, rx_desc); 2244 nss = ath11k_dp_rx_h_msdu_start_nss(ar->ab, rx_desc); 2245 sgi = ath11k_dp_rx_h_msdu_start_sgi(ar->ab, rx_desc); 2246 2247 switch (pkt_type) { 2248 case RX_MSDU_START_PKT_TYPE_11A: 2249 case RX_MSDU_START_PKT_TYPE_11B: 2250 is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B); 2251 sband = &ar->mac.sbands[rx_status->band]; 2252 rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs, 2253 is_cck); 2254 break; 2255 case RX_MSDU_START_PKT_TYPE_11N: 2256 rx_status->encoding = RX_ENC_HT; 2257 if (rate_mcs > ATH11K_HT_MCS_MAX) { 2258 ath11k_warn(ar->ab, 2259 "Received with invalid mcs in HT mode %d\n", 2260 rate_mcs); 2261 break; 2262 } 2263 rx_status->rate_idx = rate_mcs + (8 * (nss - 1)); 2264 if (sgi) 2265 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 2266 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 2267 break; 2268 case RX_MSDU_START_PKT_TYPE_11AC: 2269 rx_status->encoding = RX_ENC_VHT; 2270 rx_status->rate_idx = rate_mcs; 2271 if (rate_mcs > ATH11K_VHT_MCS_MAX) { 2272 ath11k_warn(ar->ab, 2273 "Received with invalid mcs in VHT mode %d\n", 2274 rate_mcs); 2275 break; 2276 } 2277 rx_status->nss = nss; 2278 if (sgi) 2279 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 2280 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 2281 break; 2282 case RX_MSDU_START_PKT_TYPE_11AX: 2283 rx_status->rate_idx = rate_mcs; 2284 if (rate_mcs > ATH11K_HE_MCS_MAX) { 2285 ath11k_warn(ar->ab, 2286 "Received with invalid mcs in HE mode %d\n", 2287 rate_mcs); 2288 break; 2289 } 2290 rx_status->encoding = RX_ENC_HE; 2291 rx_status->nss = nss; 2292 rx_status->he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi); 2293 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 2294 break; 2295 } 2296 } 2297 2298 static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc, 2299 struct ieee80211_rx_status *rx_status) 2300 { 2301 u8 channel_num; 2302 u32 center_freq, meta_data; 2303 struct ieee80211_channel *channel; 2304 2305 rx_status->freq = 0; 2306 rx_status->rate_idx = 0; 2307 rx_status->nss = 0; 2308 rx_status->encoding = RX_ENC_LEGACY; 2309 rx_status->bw = RATE_INFO_BW_20; 2310 2311 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 2312 2313 meta_data = ath11k_dp_rx_h_msdu_start_freq(ar->ab, rx_desc); 2314 channel_num = meta_data; 2315 center_freq = meta_data >> 16; 2316 2317 if (center_freq >= 5935 && center_freq <= 7105) { 2318 rx_status->band = NL80211_BAND_6GHZ; 2319 } else if (channel_num >= 1 && channel_num <= 14) { 2320 rx_status->band = NL80211_BAND_2GHZ; 2321 } else if (channel_num >= 36 && channel_num <= 173) { 2322 rx_status->band = NL80211_BAND_5GHZ; 2323 } else { 2324 spin_lock_bh(&ar->data_lock); 2325 channel = ar->rx_channel; 2326 if (channel) { 2327 rx_status->band = channel->band; 2328 channel_num = 2329 ieee80211_frequency_to_channel(channel->center_freq); 2330 } 2331 spin_unlock_bh(&ar->data_lock); 2332 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ", 2333 rx_desc, sizeof(struct hal_rx_desc)); 2334 } 2335 2336 rx_status->freq = ieee80211_channel_to_frequency(channel_num, 2337 rx_status->band); 2338 2339 ath11k_dp_rx_h_rate(ar, rx_desc, rx_status); 2340 } 2341 2342 static char *ath11k_print_get_tid(struct ieee80211_hdr *hdr, char *out, 2343 size_t size) 2344 { 2345 u8 *qc; 2346 int tid; 2347 2348 if (!ieee80211_is_data_qos(hdr->frame_control)) 2349 return ""; 2350 2351 qc = ieee80211_get_qos_ctl(hdr); 2352 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 2353 snprintf(out, size, "tid %d", tid); 2354 2355 return out; 2356 } 2357 2358 static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi, 2359 struct sk_buff *msdu) 2360 { 2361 static const struct ieee80211_radiotap_he known = { 2362 .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | 2363 IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN), 2364 .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN), 2365 }; 2366 struct ieee80211_rx_status *status; 2367 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; 2368 struct ieee80211_radiotap_he *he = NULL; 2369 char tid[32]; 2370 2371 status = IEEE80211_SKB_RXCB(msdu); 2372 if (status->encoding == RX_ENC_HE) { 2373 he = skb_push(msdu, sizeof(known)); 2374 memcpy(he, &known, sizeof(known)); 2375 status->flag |= RX_FLAG_RADIOTAP_HE; 2376 } 2377 2378 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 2379 "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", 2380 msdu, 2381 msdu->len, 2382 ieee80211_get_SA(hdr), 2383 ath11k_print_get_tid(hdr, tid, sizeof(tid)), 2384 is_multicast_ether_addr(ieee80211_get_DA(hdr)) ? 2385 "mcast" : "ucast", 2386 (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4, 2387 (status->encoding == RX_ENC_LEGACY) ? "legacy" : "", 2388 (status->encoding == RX_ENC_HT) ? "ht" : "", 2389 (status->encoding == RX_ENC_VHT) ? "vht" : "", 2390 (status->encoding == RX_ENC_HE) ? "he" : "", 2391 (status->bw == RATE_INFO_BW_40) ? "40" : "", 2392 (status->bw == RATE_INFO_BW_80) ? "80" : "", 2393 (status->bw == RATE_INFO_BW_160) ? "160" : "", 2394 status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", 2395 status->rate_idx, 2396 status->nss, 2397 status->freq, 2398 status->band, status->flag, 2399 !!(status->flag & RX_FLAG_FAILED_FCS_CRC), 2400 !!(status->flag & RX_FLAG_MMIC_ERROR), 2401 !!(status->flag & RX_FLAG_AMSDU_MORE)); 2402 2403 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DP_RX, NULL, "dp rx msdu: ", 2404 msdu->data, msdu->len); 2405 2406 /* TODO: trace rx packet */ 2407 2408 ieee80211_rx_napi(ar->hw, NULL, msdu, napi); 2409 } 2410 2411 static int ath11k_dp_rx_process_msdu(struct ath11k *ar, 2412 struct sk_buff *msdu, 2413 struct sk_buff_head *msdu_list) 2414 { 2415 struct ath11k_base *ab = ar->ab; 2416 struct hal_rx_desc *rx_desc, *lrx_desc; 2417 struct rx_attention *rx_attention; 2418 struct ieee80211_rx_status rx_status = {0}; 2419 struct ieee80211_rx_status *status; 2420 struct ath11k_skb_rxcb *rxcb; 2421 struct ieee80211_hdr *hdr; 2422 struct sk_buff *last_buf; 2423 u8 l3_pad_bytes; 2424 u8 *hdr_status; 2425 u16 msdu_len; 2426 int ret; 2427 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 2428 2429 last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu); 2430 if (!last_buf) { 2431 ath11k_warn(ab, 2432 "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n"); 2433 ret = -EIO; 2434 goto free_out; 2435 } 2436 2437 rx_desc = (struct hal_rx_desc *)msdu->data; 2438 lrx_desc = (struct hal_rx_desc *)last_buf->data; 2439 rx_attention = ath11k_dp_rx_get_attention(ab, lrx_desc); 2440 if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) { 2441 ath11k_warn(ab, "msdu_done bit in attention is not set\n"); 2442 ret = -EIO; 2443 goto free_out; 2444 } 2445 2446 rxcb = ATH11K_SKB_RXCB(msdu); 2447 rxcb->rx_desc = rx_desc; 2448 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ab, rx_desc); 2449 l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ab, lrx_desc); 2450 2451 if (rxcb->is_frag) { 2452 skb_pull(msdu, hal_rx_desc_sz); 2453 } else if (!rxcb->is_continuation) { 2454 if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) { 2455 hdr_status = ath11k_dp_rx_h_80211_hdr(ab, rx_desc); 2456 ret = -EINVAL; 2457 ath11k_warn(ab, "invalid msdu len %u\n", msdu_len); 2458 ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", hdr_status, 2459 sizeof(struct ieee80211_hdr)); 2460 ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", rx_desc, 2461 sizeof(struct hal_rx_desc)); 2462 goto free_out; 2463 } 2464 skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len); 2465 skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes); 2466 } else { 2467 ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list, 2468 msdu, last_buf, 2469 l3_pad_bytes, msdu_len); 2470 if (ret) { 2471 ath11k_warn(ab, 2472 "failed to coalesce msdu rx buffer%d\n", ret); 2473 goto free_out; 2474 } 2475 } 2476 2477 hdr = (struct ieee80211_hdr *)msdu->data; 2478 2479 /* Process only data frames */ 2480 if (!ieee80211_is_data(hdr->frame_control)) 2481 return -EINVAL; 2482 2483 ath11k_dp_rx_h_ppdu(ar, rx_desc, &rx_status); 2484 ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, &rx_status); 2485 2486 rx_status.flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED; 2487 2488 status = IEEE80211_SKB_RXCB(msdu); 2489 *status = rx_status; 2490 return 0; 2491 2492 free_out: 2493 return ret; 2494 } 2495 2496 static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab, 2497 struct napi_struct *napi, 2498 struct sk_buff_head *msdu_list, 2499 int *quota, int ring_id) 2500 { 2501 struct ath11k_skb_rxcb *rxcb; 2502 struct sk_buff *msdu; 2503 struct ath11k *ar; 2504 u8 mac_id; 2505 int ret; 2506 2507 if (skb_queue_empty(msdu_list)) 2508 return; 2509 2510 rcu_read_lock(); 2511 2512 while (*quota && (msdu = __skb_dequeue(msdu_list))) { 2513 rxcb = ATH11K_SKB_RXCB(msdu); 2514 mac_id = rxcb->mac_id; 2515 ar = ab->pdevs[mac_id].ar; 2516 if (!rcu_dereference(ab->pdevs_active[mac_id])) { 2517 dev_kfree_skb_any(msdu); 2518 continue; 2519 } 2520 2521 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 2522 dev_kfree_skb_any(msdu); 2523 continue; 2524 } 2525 2526 ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list); 2527 if (ret) { 2528 ath11k_dbg(ab, ATH11K_DBG_DATA, 2529 "Unable to process msdu %d", ret); 2530 dev_kfree_skb_any(msdu); 2531 continue; 2532 } 2533 2534 ath11k_dp_rx_deliver_msdu(ar, napi, msdu); 2535 (*quota)--; 2536 } 2537 2538 rcu_read_unlock(); 2539 } 2540 2541 int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id, 2542 struct napi_struct *napi, int budget) 2543 { 2544 struct ath11k_dp *dp = &ab->dp; 2545 struct dp_rxdma_ring *rx_ring; 2546 int num_buffs_reaped[MAX_RADIOS] = {0}; 2547 struct sk_buff_head msdu_list; 2548 struct ath11k_skb_rxcb *rxcb; 2549 int total_msdu_reaped = 0; 2550 struct hal_srng *srng; 2551 struct sk_buff *msdu; 2552 int quota = budget; 2553 bool done = false; 2554 int buf_id, mac_id; 2555 struct ath11k *ar; 2556 u32 *rx_desc; 2557 int i; 2558 2559 __skb_queue_head_init(&msdu_list); 2560 2561 srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id]; 2562 2563 spin_lock_bh(&srng->lock); 2564 2565 ath11k_hal_srng_access_begin(ab, srng); 2566 2567 try_again: 2568 while ((rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 2569 struct hal_reo_dest_ring desc = *(struct hal_reo_dest_ring *)rx_desc; 2570 enum hal_reo_dest_ring_push_reason push_reason; 2571 u32 cookie; 2572 2573 cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, 2574 desc.buf_addr_info.info1); 2575 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 2576 cookie); 2577 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie); 2578 2579 ar = ab->pdevs[mac_id].ar; 2580 rx_ring = &ar->dp.rx_refill_buf_ring; 2581 spin_lock_bh(&rx_ring->idr_lock); 2582 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 2583 if (!msdu) { 2584 ath11k_warn(ab, "frame rx with invalid buf_id %d\n", 2585 buf_id); 2586 spin_unlock_bh(&rx_ring->idr_lock); 2587 continue; 2588 } 2589 2590 idr_remove(&rx_ring->bufs_idr, buf_id); 2591 spin_unlock_bh(&rx_ring->idr_lock); 2592 2593 rxcb = ATH11K_SKB_RXCB(msdu); 2594 dma_unmap_single(ab->dev, rxcb->paddr, 2595 msdu->len + skb_tailroom(msdu), 2596 DMA_FROM_DEVICE); 2597 2598 num_buffs_reaped[mac_id]++; 2599 total_msdu_reaped++; 2600 2601 push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON, 2602 desc.info0); 2603 if (push_reason != 2604 HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) { 2605 dev_kfree_skb_any(msdu); 2606 ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++; 2607 continue; 2608 } 2609 2610 rxcb->is_first_msdu = !!(desc.rx_msdu_info.info0 & 2611 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU); 2612 rxcb->is_last_msdu = !!(desc.rx_msdu_info.info0 & 2613 RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU); 2614 rxcb->is_continuation = !!(desc.rx_msdu_info.info0 & 2615 RX_MSDU_DESC_INFO0_MSDU_CONTINUATION); 2616 rxcb->mac_id = mac_id; 2617 rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM, 2618 desc.info0); 2619 2620 __skb_queue_tail(&msdu_list, msdu); 2621 2622 if (total_msdu_reaped >= quota && !rxcb->is_continuation) { 2623 done = true; 2624 break; 2625 } 2626 } 2627 2628 /* Hw might have updated the head pointer after we cached it. 2629 * In this case, even though there are entries in the ring we'll 2630 * get rx_desc NULL. Give the read another try with updated cached 2631 * head pointer so that we can reap complete MPDU in the current 2632 * rx processing. 2633 */ 2634 if (!done && ath11k_hal_srng_dst_num_free(ab, srng, true)) { 2635 ath11k_hal_srng_access_end(ab, srng); 2636 goto try_again; 2637 } 2638 2639 ath11k_hal_srng_access_end(ab, srng); 2640 2641 spin_unlock_bh(&srng->lock); 2642 2643 if (!total_msdu_reaped) 2644 goto exit; 2645 2646 for (i = 0; i < ab->num_radios; i++) { 2647 if (!num_buffs_reaped[i]) 2648 continue; 2649 2650 ar = ab->pdevs[i].ar; 2651 rx_ring = &ar->dp.rx_refill_buf_ring; 2652 2653 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i], 2654 HAL_RX_BUF_RBM_SW3_BM); 2655 } 2656 2657 ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list, 2658 "a, ring_id); 2659 2660 exit: 2661 return budget - quota; 2662 } 2663 2664 static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta, 2665 struct hal_rx_mon_ppdu_info *ppdu_info) 2666 { 2667 struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats; 2668 u32 num_msdu; 2669 2670 if (!rx_stats) 2671 return; 2672 2673 num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count + 2674 ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count; 2675 2676 rx_stats->num_msdu += num_msdu; 2677 rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count + 2678 ppdu_info->tcp_ack_msdu_count; 2679 rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count; 2680 rx_stats->other_msdu_count += ppdu_info->other_msdu_count; 2681 2682 if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A || 2683 ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) { 2684 ppdu_info->nss = 1; 2685 ppdu_info->mcs = HAL_RX_MAX_MCS; 2686 ppdu_info->tid = IEEE80211_NUM_TIDS; 2687 } 2688 2689 if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS) 2690 rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu; 2691 2692 if (ppdu_info->mcs <= HAL_RX_MAX_MCS) 2693 rx_stats->mcs_count[ppdu_info->mcs] += num_msdu; 2694 2695 if (ppdu_info->gi < HAL_RX_GI_MAX) 2696 rx_stats->gi_count[ppdu_info->gi] += num_msdu; 2697 2698 if (ppdu_info->bw < HAL_RX_BW_MAX) 2699 rx_stats->bw_count[ppdu_info->bw] += num_msdu; 2700 2701 if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX) 2702 rx_stats->coding_count[ppdu_info->ldpc] += num_msdu; 2703 2704 if (ppdu_info->tid <= IEEE80211_NUM_TIDS) 2705 rx_stats->tid_count[ppdu_info->tid] += num_msdu; 2706 2707 if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX) 2708 rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu; 2709 2710 if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX) 2711 rx_stats->reception_type[ppdu_info->reception_type] += num_msdu; 2712 2713 if (ppdu_info->is_stbc) 2714 rx_stats->stbc_count += num_msdu; 2715 2716 if (ppdu_info->beamformed) 2717 rx_stats->beamformed_count += num_msdu; 2718 2719 if (ppdu_info->num_mpdu_fcs_ok > 1) 2720 rx_stats->ampdu_msdu_count += num_msdu; 2721 else 2722 rx_stats->non_ampdu_msdu_count += num_msdu; 2723 2724 rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok; 2725 rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err; 2726 rx_stats->dcm_count += ppdu_info->dcm; 2727 rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu; 2728 2729 arsta->rssi_comb = ppdu_info->rssi_comb; 2730 rx_stats->rx_duration += ppdu_info->rx_duration; 2731 arsta->rx_duration = rx_stats->rx_duration; 2732 } 2733 2734 static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab, 2735 struct dp_rxdma_ring *rx_ring, 2736 int *buf_id) 2737 { 2738 struct sk_buff *skb; 2739 dma_addr_t paddr; 2740 2741 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + 2742 DP_RX_BUFFER_ALIGN_SIZE); 2743 2744 if (!skb) 2745 goto fail_alloc_skb; 2746 2747 if (!IS_ALIGNED((unsigned long)skb->data, 2748 DP_RX_BUFFER_ALIGN_SIZE)) { 2749 skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - 2750 skb->data); 2751 } 2752 2753 paddr = dma_map_single(ab->dev, skb->data, 2754 skb->len + skb_tailroom(skb), 2755 DMA_FROM_DEVICE); 2756 if (unlikely(dma_mapping_error(ab->dev, paddr))) 2757 goto fail_free_skb; 2758 2759 spin_lock_bh(&rx_ring->idr_lock); 2760 *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, 2761 rx_ring->bufs_max, GFP_ATOMIC); 2762 spin_unlock_bh(&rx_ring->idr_lock); 2763 if (*buf_id < 0) 2764 goto fail_dma_unmap; 2765 2766 ATH11K_SKB_RXCB(skb)->paddr = paddr; 2767 return skb; 2768 2769 fail_dma_unmap: 2770 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 2771 DMA_FROM_DEVICE); 2772 fail_free_skb: 2773 dev_kfree_skb_any(skb); 2774 fail_alloc_skb: 2775 return NULL; 2776 } 2777 2778 int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id, 2779 struct dp_rxdma_ring *rx_ring, 2780 int req_entries, 2781 enum hal_rx_buf_return_buf_manager mgr) 2782 { 2783 struct hal_srng *srng; 2784 u32 *desc; 2785 struct sk_buff *skb; 2786 int num_free; 2787 int num_remain; 2788 int buf_id; 2789 u32 cookie; 2790 dma_addr_t paddr; 2791 2792 req_entries = min(req_entries, rx_ring->bufs_max); 2793 2794 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 2795 2796 spin_lock_bh(&srng->lock); 2797 2798 ath11k_hal_srng_access_begin(ab, srng); 2799 2800 num_free = ath11k_hal_srng_src_num_free(ab, srng, true); 2801 2802 req_entries = min(num_free, req_entries); 2803 num_remain = req_entries; 2804 2805 while (num_remain > 0) { 2806 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, 2807 &buf_id); 2808 if (!skb) 2809 break; 2810 paddr = ATH11K_SKB_RXCB(skb)->paddr; 2811 2812 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 2813 if (!desc) 2814 goto fail_desc_get; 2815 2816 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 2817 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 2818 2819 num_remain--; 2820 2821 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); 2822 } 2823 2824 ath11k_hal_srng_access_end(ab, srng); 2825 2826 spin_unlock_bh(&srng->lock); 2827 2828 return req_entries - num_remain; 2829 2830 fail_desc_get: 2831 spin_lock_bh(&rx_ring->idr_lock); 2832 idr_remove(&rx_ring->bufs_idr, buf_id); 2833 spin_unlock_bh(&rx_ring->idr_lock); 2834 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 2835 DMA_FROM_DEVICE); 2836 dev_kfree_skb_any(skb); 2837 ath11k_hal_srng_access_end(ab, srng); 2838 spin_unlock_bh(&srng->lock); 2839 2840 return req_entries - num_remain; 2841 } 2842 2843 static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id, 2844 int *budget, struct sk_buff_head *skb_list) 2845 { 2846 struct ath11k *ar; 2847 struct ath11k_pdev_dp *dp; 2848 struct dp_rxdma_ring *rx_ring; 2849 struct hal_srng *srng; 2850 void *rx_mon_status_desc; 2851 struct sk_buff *skb; 2852 struct ath11k_skb_rxcb *rxcb; 2853 struct hal_tlv_hdr *tlv; 2854 u32 cookie; 2855 int buf_id, srng_id; 2856 dma_addr_t paddr; 2857 u8 rbm; 2858 int num_buffs_reaped = 0; 2859 2860 ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar; 2861 dp = &ar->dp; 2862 srng_id = ath11k_hw_mac_id_to_srng_id(&ab->hw_params, mac_id); 2863 rx_ring = &dp->rx_mon_status_refill_ring[srng_id]; 2864 2865 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 2866 2867 spin_lock_bh(&srng->lock); 2868 2869 ath11k_hal_srng_access_begin(ab, srng); 2870 while (*budget) { 2871 *budget -= 1; 2872 rx_mon_status_desc = 2873 ath11k_hal_srng_src_peek(ab, srng); 2874 if (!rx_mon_status_desc) 2875 break; 2876 2877 ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr, 2878 &cookie, &rbm); 2879 if (paddr) { 2880 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie); 2881 2882 spin_lock_bh(&rx_ring->idr_lock); 2883 skb = idr_find(&rx_ring->bufs_idr, buf_id); 2884 if (!skb) { 2885 ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n", 2886 buf_id); 2887 spin_unlock_bh(&rx_ring->idr_lock); 2888 goto move_next; 2889 } 2890 2891 idr_remove(&rx_ring->bufs_idr, buf_id); 2892 spin_unlock_bh(&rx_ring->idr_lock); 2893 2894 rxcb = ATH11K_SKB_RXCB(skb); 2895 2896 dma_unmap_single(ab->dev, rxcb->paddr, 2897 skb->len + skb_tailroom(skb), 2898 DMA_FROM_DEVICE); 2899 2900 tlv = (struct hal_tlv_hdr *)skb->data; 2901 if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != 2902 HAL_RX_STATUS_BUFFER_DONE) { 2903 ath11k_warn(ab, "mon status DONE not set %lx\n", 2904 FIELD_GET(HAL_TLV_HDR_TAG, 2905 tlv->tl)); 2906 dev_kfree_skb_any(skb); 2907 goto move_next; 2908 } 2909 2910 __skb_queue_tail(skb_list, skb); 2911 } 2912 move_next: 2913 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, 2914 &buf_id); 2915 2916 if (!skb) { 2917 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0, 2918 HAL_RX_BUF_RBM_SW3_BM); 2919 num_buffs_reaped++; 2920 break; 2921 } 2922 rxcb = ATH11K_SKB_RXCB(skb); 2923 2924 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 2925 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 2926 2927 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr, 2928 cookie, HAL_RX_BUF_RBM_SW3_BM); 2929 ath11k_hal_srng_src_get_next_entry(ab, srng); 2930 num_buffs_reaped++; 2931 } 2932 ath11k_hal_srng_access_end(ab, srng); 2933 spin_unlock_bh(&srng->lock); 2934 2935 return num_buffs_reaped; 2936 } 2937 2938 int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id, 2939 struct napi_struct *napi, int budget) 2940 { 2941 struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id); 2942 enum hal_rx_mon_status hal_status; 2943 struct sk_buff *skb; 2944 struct sk_buff_head skb_list; 2945 struct hal_rx_mon_ppdu_info ppdu_info; 2946 struct ath11k_peer *peer; 2947 struct ath11k_sta *arsta; 2948 int num_buffs_reaped = 0; 2949 2950 __skb_queue_head_init(&skb_list); 2951 2952 num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget, 2953 &skb_list); 2954 if (!num_buffs_reaped) 2955 goto exit; 2956 2957 while ((skb = __skb_dequeue(&skb_list))) { 2958 memset(&ppdu_info, 0, sizeof(ppdu_info)); 2959 ppdu_info.peer_id = HAL_INVALID_PEERID; 2960 2961 if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) 2962 trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE); 2963 2964 hal_status = ath11k_hal_rx_parse_mon_status(ab, &ppdu_info, skb); 2965 2966 if (ppdu_info.peer_id == HAL_INVALID_PEERID || 2967 hal_status != HAL_RX_MON_STATUS_PPDU_DONE) { 2968 dev_kfree_skb_any(skb); 2969 continue; 2970 } 2971 2972 rcu_read_lock(); 2973 spin_lock_bh(&ab->base_lock); 2974 peer = ath11k_peer_find_by_id(ab, ppdu_info.peer_id); 2975 2976 if (!peer || !peer->sta) { 2977 ath11k_dbg(ab, ATH11K_DBG_DATA, 2978 "failed to find the peer with peer_id %d\n", 2979 ppdu_info.peer_id); 2980 spin_unlock_bh(&ab->base_lock); 2981 rcu_read_unlock(); 2982 dev_kfree_skb_any(skb); 2983 continue; 2984 } 2985 2986 arsta = (struct ath11k_sta *)peer->sta->drv_priv; 2987 ath11k_dp_rx_update_peer_stats(arsta, &ppdu_info); 2988 2989 if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr)) 2990 trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE); 2991 2992 spin_unlock_bh(&ab->base_lock); 2993 rcu_read_unlock(); 2994 2995 dev_kfree_skb_any(skb); 2996 } 2997 exit: 2998 return num_buffs_reaped; 2999 } 3000 3001 static void ath11k_dp_rx_frag_timer(struct timer_list *timer) 3002 { 3003 struct dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer); 3004 3005 spin_lock_bh(&rx_tid->ab->base_lock); 3006 if (rx_tid->last_frag_no && 3007 rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) { 3008 spin_unlock_bh(&rx_tid->ab->base_lock); 3009 return; 3010 } 3011 ath11k_dp_rx_frags_cleanup(rx_tid, true); 3012 spin_unlock_bh(&rx_tid->ab->base_lock); 3013 } 3014 3015 int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id) 3016 { 3017 struct ath11k_base *ab = ar->ab; 3018 struct crypto_shash *tfm; 3019 struct ath11k_peer *peer; 3020 struct dp_rx_tid *rx_tid; 3021 int i; 3022 3023 tfm = crypto_alloc_shash("michael_mic", 0, 0); 3024 if (IS_ERR(tfm)) 3025 return PTR_ERR(tfm); 3026 3027 spin_lock_bh(&ab->base_lock); 3028 3029 peer = ath11k_peer_find(ab, vdev_id, peer_mac); 3030 if (!peer) { 3031 ath11k_warn(ab, "failed to find the peer to set up fragment info\n"); 3032 spin_unlock_bh(&ab->base_lock); 3033 return -ENOENT; 3034 } 3035 3036 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 3037 rx_tid = &peer->rx_tid[i]; 3038 rx_tid->ab = ab; 3039 timer_setup(&rx_tid->frag_timer, ath11k_dp_rx_frag_timer, 0); 3040 skb_queue_head_init(&rx_tid->rx_frags); 3041 } 3042 3043 peer->tfm_mmic = tfm; 3044 spin_unlock_bh(&ab->base_lock); 3045 3046 return 0; 3047 } 3048 3049 static int ath11k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key, 3050 struct ieee80211_hdr *hdr, u8 *data, 3051 size_t data_len, u8 *mic) 3052 { 3053 SHASH_DESC_ON_STACK(desc, tfm); 3054 u8 mic_hdr[16] = {0}; 3055 u8 tid = 0; 3056 int ret; 3057 3058 if (!tfm) 3059 return -EINVAL; 3060 3061 desc->tfm = tfm; 3062 3063 ret = crypto_shash_setkey(tfm, key, 8); 3064 if (ret) 3065 goto out; 3066 3067 ret = crypto_shash_init(desc); 3068 if (ret) 3069 goto out; 3070 3071 /* TKIP MIC header */ 3072 memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN); 3073 memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN); 3074 if (ieee80211_is_data_qos(hdr->frame_control)) 3075 tid = ieee80211_get_tid(hdr); 3076 mic_hdr[12] = tid; 3077 3078 ret = crypto_shash_update(desc, mic_hdr, 16); 3079 if (ret) 3080 goto out; 3081 ret = crypto_shash_update(desc, data, data_len); 3082 if (ret) 3083 goto out; 3084 ret = crypto_shash_final(desc, mic); 3085 out: 3086 shash_desc_zero(desc); 3087 return ret; 3088 } 3089 3090 static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer *peer, 3091 struct sk_buff *msdu) 3092 { 3093 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data; 3094 struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu); 3095 struct ieee80211_key_conf *key_conf; 3096 struct ieee80211_hdr *hdr; 3097 u8 mic[IEEE80211_CCMP_MIC_LEN]; 3098 int head_len, tail_len, ret; 3099 size_t data_len; 3100 u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 3101 u8 *key, *data; 3102 u8 key_idx; 3103 3104 if (ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc) != 3105 HAL_ENCRYPT_TYPE_TKIP_MIC) 3106 return 0; 3107 3108 hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz); 3109 hdr_len = ieee80211_hdrlen(hdr->frame_control); 3110 head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN; 3111 tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN; 3112 3113 if (!is_multicast_ether_addr(hdr->addr1)) 3114 key_idx = peer->ucast_keyidx; 3115 else 3116 key_idx = peer->mcast_keyidx; 3117 3118 key_conf = peer->keys[key_idx]; 3119 3120 data = msdu->data + head_len; 3121 data_len = msdu->len - head_len - tail_len; 3122 key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; 3123 3124 ret = ath11k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic); 3125 if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN)) 3126 goto mic_fail; 3127 3128 return 0; 3129 3130 mic_fail: 3131 (ATH11K_SKB_RXCB(msdu))->is_first_msdu = true; 3132 (ATH11K_SKB_RXCB(msdu))->is_last_msdu = true; 3133 3134 rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED | 3135 RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED; 3136 skb_pull(msdu, hal_rx_desc_sz); 3137 3138 ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs); 3139 ath11k_dp_rx_h_undecap(ar, msdu, rx_desc, 3140 HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true); 3141 ieee80211_rx(ar->hw, msdu); 3142 return -EINVAL; 3143 } 3144 3145 static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu, 3146 enum hal_encrypt_type enctype, u32 flags) 3147 { 3148 struct ieee80211_hdr *hdr; 3149 size_t hdr_len; 3150 size_t crypto_len; 3151 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 3152 3153 if (!flags) 3154 return; 3155 3156 hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz); 3157 3158 if (flags & RX_FLAG_MIC_STRIPPED) 3159 skb_trim(msdu, msdu->len - 3160 ath11k_dp_rx_crypto_mic_len(ar, enctype)); 3161 3162 if (flags & RX_FLAG_ICV_STRIPPED) 3163 skb_trim(msdu, msdu->len - 3164 ath11k_dp_rx_crypto_icv_len(ar, enctype)); 3165 3166 if (flags & RX_FLAG_IV_STRIPPED) { 3167 hdr_len = ieee80211_hdrlen(hdr->frame_control); 3168 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 3169 3170 memmove((void *)msdu->data + hal_rx_desc_sz + crypto_len, 3171 (void *)msdu->data + hal_rx_desc_sz, hdr_len); 3172 skb_pull(msdu, crypto_len); 3173 } 3174 } 3175 3176 static int ath11k_dp_rx_h_defrag(struct ath11k *ar, 3177 struct ath11k_peer *peer, 3178 struct dp_rx_tid *rx_tid, 3179 struct sk_buff **defrag_skb) 3180 { 3181 struct hal_rx_desc *rx_desc; 3182 struct sk_buff *skb, *first_frag, *last_frag; 3183 struct ieee80211_hdr *hdr; 3184 struct rx_attention *rx_attention; 3185 enum hal_encrypt_type enctype; 3186 bool is_decrypted = false; 3187 int msdu_len = 0; 3188 int extra_space; 3189 u32 flags, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 3190 3191 first_frag = skb_peek(&rx_tid->rx_frags); 3192 last_frag = skb_peek_tail(&rx_tid->rx_frags); 3193 3194 skb_queue_walk(&rx_tid->rx_frags, skb) { 3195 flags = 0; 3196 rx_desc = (struct hal_rx_desc *)skb->data; 3197 hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz); 3198 3199 enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc); 3200 if (enctype != HAL_ENCRYPT_TYPE_OPEN) { 3201 rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc); 3202 is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention); 3203 } 3204 3205 if (is_decrypted) { 3206 if (skb != first_frag) 3207 flags |= RX_FLAG_IV_STRIPPED; 3208 if (skb != last_frag) 3209 flags |= RX_FLAG_ICV_STRIPPED | 3210 RX_FLAG_MIC_STRIPPED; 3211 } 3212 3213 /* RX fragments are always raw packets */ 3214 if (skb != last_frag) 3215 skb_trim(skb, skb->len - FCS_LEN); 3216 ath11k_dp_rx_h_undecap_frag(ar, skb, enctype, flags); 3217 3218 if (skb != first_frag) 3219 skb_pull(skb, hal_rx_desc_sz + 3220 ieee80211_hdrlen(hdr->frame_control)); 3221 msdu_len += skb->len; 3222 } 3223 3224 extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag)); 3225 if (extra_space > 0 && 3226 (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0)) 3227 return -ENOMEM; 3228 3229 __skb_unlink(first_frag, &rx_tid->rx_frags); 3230 while ((skb = __skb_dequeue(&rx_tid->rx_frags))) { 3231 skb_put_data(first_frag, skb->data, skb->len); 3232 dev_kfree_skb_any(skb); 3233 } 3234 3235 hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz); 3236 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); 3237 ATH11K_SKB_RXCB(first_frag)->is_frag = 1; 3238 3239 if (ath11k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag)) 3240 first_frag = NULL; 3241 3242 *defrag_skb = first_frag; 3243 return 0; 3244 } 3245 3246 static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_tid *rx_tid, 3247 struct sk_buff *defrag_skb) 3248 { 3249 struct ath11k_base *ab = ar->ab; 3250 struct ath11k_pdev_dp *dp = &ar->dp; 3251 struct dp_rxdma_ring *rx_refill_ring = &dp->rx_refill_buf_ring; 3252 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data; 3253 struct hal_reo_entrance_ring *reo_ent_ring; 3254 struct hal_reo_dest_ring *reo_dest_ring; 3255 struct dp_link_desc_bank *link_desc_banks; 3256 struct hal_rx_msdu_link *msdu_link; 3257 struct hal_rx_msdu_details *msdu0; 3258 struct hal_srng *srng; 3259 dma_addr_t paddr; 3260 u32 desc_bank, msdu_info, mpdu_info; 3261 u32 dst_idx, cookie, hal_rx_desc_sz; 3262 int ret, buf_id; 3263 3264 hal_rx_desc_sz = ab->hw_params.hal_desc_sz; 3265 link_desc_banks = ab->dp.link_desc_banks; 3266 reo_dest_ring = rx_tid->dst_ring_desc; 3267 3268 ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank); 3269 msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr + 3270 (paddr - link_desc_banks[desc_bank].paddr)); 3271 msdu0 = &msdu_link->msdu_link[0]; 3272 dst_idx = FIELD_GET(RX_MSDU_DESC_INFO0_REO_DEST_IND, msdu0->rx_msdu_info.info0); 3273 memset(msdu0, 0, sizeof(*msdu0)); 3274 3275 msdu_info = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1) | 3276 FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1) | 3277 FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_CONTINUATION, 0) | 3278 FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_LENGTH, 3279 defrag_skb->len - hal_rx_desc_sz) | 3280 FIELD_PREP(RX_MSDU_DESC_INFO0_REO_DEST_IND, dst_idx) | 3281 FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_SA, 1) | 3282 FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_DA, 1); 3283 msdu0->rx_msdu_info.info0 = msdu_info; 3284 3285 /* change msdu len in hal rx desc */ 3286 ath11k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz); 3287 3288 paddr = dma_map_single(ab->dev, defrag_skb->data, 3289 defrag_skb->len + skb_tailroom(defrag_skb), 3290 DMA_FROM_DEVICE); 3291 if (dma_mapping_error(ab->dev, paddr)) 3292 return -ENOMEM; 3293 3294 spin_lock_bh(&rx_refill_ring->idr_lock); 3295 buf_id = idr_alloc(&rx_refill_ring->bufs_idr, defrag_skb, 0, 3296 rx_refill_ring->bufs_max * 3, GFP_ATOMIC); 3297 spin_unlock_bh(&rx_refill_ring->idr_lock); 3298 if (buf_id < 0) { 3299 ret = -ENOMEM; 3300 goto err_unmap_dma; 3301 } 3302 3303 ATH11K_SKB_RXCB(defrag_skb)->paddr = paddr; 3304 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, dp->mac_id) | 3305 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 3306 3307 ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie, HAL_RX_BUF_RBM_SW3_BM); 3308 3309 /* Fill mpdu details into reo entrace ring */ 3310 srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id]; 3311 3312 spin_lock_bh(&srng->lock); 3313 ath11k_hal_srng_access_begin(ab, srng); 3314 3315 reo_ent_ring = (struct hal_reo_entrance_ring *) 3316 ath11k_hal_srng_src_get_next_entry(ab, srng); 3317 if (!reo_ent_ring) { 3318 ath11k_hal_srng_access_end(ab, srng); 3319 spin_unlock_bh(&srng->lock); 3320 ret = -ENOSPC; 3321 goto err_free_idr; 3322 } 3323 memset(reo_ent_ring, 0, sizeof(*reo_ent_ring)); 3324 3325 ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank); 3326 ath11k_hal_rx_buf_addr_info_set(reo_ent_ring, paddr, desc_bank, 3327 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST); 3328 3329 mpdu_info = FIELD_PREP(RX_MPDU_DESC_INFO0_MSDU_COUNT, 1) | 3330 FIELD_PREP(RX_MPDU_DESC_INFO0_SEQ_NUM, rx_tid->cur_sn) | 3331 FIELD_PREP(RX_MPDU_DESC_INFO0_FRAG_FLAG, 0) | 3332 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_SA, 1) | 3333 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_DA, 1) | 3334 FIELD_PREP(RX_MPDU_DESC_INFO0_RAW_MPDU, 1) | 3335 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_PN, 1); 3336 3337 reo_ent_ring->rx_mpdu_info.info0 = mpdu_info; 3338 reo_ent_ring->rx_mpdu_info.meta_data = reo_dest_ring->rx_mpdu_info.meta_data; 3339 reo_ent_ring->queue_addr_lo = reo_dest_ring->queue_addr_lo; 3340 reo_ent_ring->info0 = FIELD_PREP(HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI, 3341 FIELD_GET(HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI, 3342 reo_dest_ring->info0)) | 3343 FIELD_PREP(HAL_REO_ENTR_RING_INFO0_DEST_IND, dst_idx); 3344 ath11k_hal_srng_access_end(ab, srng); 3345 spin_unlock_bh(&srng->lock); 3346 3347 return 0; 3348 3349 err_free_idr: 3350 spin_lock_bh(&rx_refill_ring->idr_lock); 3351 idr_remove(&rx_refill_ring->bufs_idr, buf_id); 3352 spin_unlock_bh(&rx_refill_ring->idr_lock); 3353 err_unmap_dma: 3354 dma_unmap_single(ab->dev, paddr, defrag_skb->len + skb_tailroom(defrag_skb), 3355 DMA_FROM_DEVICE); 3356 return ret; 3357 } 3358 3359 static int ath11k_dp_rx_h_cmp_frags(struct ath11k *ar, 3360 struct sk_buff *a, struct sk_buff *b) 3361 { 3362 int frag1, frag2; 3363 3364 frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, a); 3365 frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, b); 3366 3367 return frag1 - frag2; 3368 } 3369 3370 static void ath11k_dp_rx_h_sort_frags(struct ath11k *ar, 3371 struct sk_buff_head *frag_list, 3372 struct sk_buff *cur_frag) 3373 { 3374 struct sk_buff *skb; 3375 int cmp; 3376 3377 skb_queue_walk(frag_list, skb) { 3378 cmp = ath11k_dp_rx_h_cmp_frags(ar, skb, cur_frag); 3379 if (cmp < 0) 3380 continue; 3381 __skb_queue_before(frag_list, skb, cur_frag); 3382 return; 3383 } 3384 __skb_queue_tail(frag_list, cur_frag); 3385 } 3386 3387 static u64 ath11k_dp_rx_h_get_pn(struct ath11k *ar, struct sk_buff *skb) 3388 { 3389 struct ieee80211_hdr *hdr; 3390 u64 pn = 0; 3391 u8 *ehdr; 3392 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 3393 3394 hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz); 3395 ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control); 3396 3397 pn = ehdr[0]; 3398 pn |= (u64)ehdr[1] << 8; 3399 pn |= (u64)ehdr[4] << 16; 3400 pn |= (u64)ehdr[5] << 24; 3401 pn |= (u64)ehdr[6] << 32; 3402 pn |= (u64)ehdr[7] << 40; 3403 3404 return pn; 3405 } 3406 3407 static bool 3408 ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k *ar, struct dp_rx_tid *rx_tid) 3409 { 3410 enum hal_encrypt_type encrypt_type; 3411 struct sk_buff *first_frag, *skb; 3412 struct hal_rx_desc *desc; 3413 u64 last_pn; 3414 u64 cur_pn; 3415 3416 first_frag = skb_peek(&rx_tid->rx_frags); 3417 desc = (struct hal_rx_desc *)first_frag->data; 3418 3419 encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, desc); 3420 if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 && 3421 encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 && 3422 encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 && 3423 encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256) 3424 return true; 3425 3426 last_pn = ath11k_dp_rx_h_get_pn(ar, first_frag); 3427 skb_queue_walk(&rx_tid->rx_frags, skb) { 3428 if (skb == first_frag) 3429 continue; 3430 3431 cur_pn = ath11k_dp_rx_h_get_pn(ar, skb); 3432 if (cur_pn != last_pn + 1) 3433 return false; 3434 last_pn = cur_pn; 3435 } 3436 return true; 3437 } 3438 3439 static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar, 3440 struct sk_buff *msdu, 3441 u32 *ring_desc) 3442 { 3443 struct ath11k_base *ab = ar->ab; 3444 struct hal_rx_desc *rx_desc; 3445 struct ath11k_peer *peer; 3446 struct dp_rx_tid *rx_tid; 3447 struct sk_buff *defrag_skb = NULL; 3448 u32 peer_id; 3449 u16 seqno, frag_no; 3450 u8 tid; 3451 int ret = 0; 3452 bool more_frags; 3453 3454 rx_desc = (struct hal_rx_desc *)msdu->data; 3455 peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc); 3456 tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, rx_desc); 3457 seqno = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc); 3458 frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, msdu); 3459 more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(ar->ab, msdu); 3460 3461 if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(ar->ab, rx_desc) || 3462 !ath11k_dp_rx_h_mpdu_start_fc_valid(ar->ab, rx_desc) || 3463 tid > IEEE80211_NUM_TIDS) 3464 return -EINVAL; 3465 3466 /* received unfragmented packet in reo 3467 * exception ring, this shouldn't happen 3468 * as these packets typically come from 3469 * reo2sw srngs. 3470 */ 3471 if (WARN_ON_ONCE(!frag_no && !more_frags)) 3472 return -EINVAL; 3473 3474 spin_lock_bh(&ab->base_lock); 3475 peer = ath11k_peer_find_by_id(ab, peer_id); 3476 if (!peer) { 3477 ath11k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n", 3478 peer_id); 3479 ret = -ENOENT; 3480 goto out_unlock; 3481 } 3482 rx_tid = &peer->rx_tid[tid]; 3483 3484 if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) || 3485 skb_queue_empty(&rx_tid->rx_frags)) { 3486 /* Flush stored fragments and start a new sequence */ 3487 ath11k_dp_rx_frags_cleanup(rx_tid, true); 3488 rx_tid->cur_sn = seqno; 3489 } 3490 3491 if (rx_tid->rx_frag_bitmap & BIT(frag_no)) { 3492 /* Fragment already present */ 3493 ret = -EINVAL; 3494 goto out_unlock; 3495 } 3496 3497 if (frag_no > __fls(rx_tid->rx_frag_bitmap)) 3498 __skb_queue_tail(&rx_tid->rx_frags, msdu); 3499 else 3500 ath11k_dp_rx_h_sort_frags(ar, &rx_tid->rx_frags, msdu); 3501 3502 rx_tid->rx_frag_bitmap |= BIT(frag_no); 3503 if (!more_frags) 3504 rx_tid->last_frag_no = frag_no; 3505 3506 if (frag_no == 0) { 3507 rx_tid->dst_ring_desc = kmemdup(ring_desc, 3508 sizeof(*rx_tid->dst_ring_desc), 3509 GFP_ATOMIC); 3510 if (!rx_tid->dst_ring_desc) { 3511 ret = -ENOMEM; 3512 goto out_unlock; 3513 } 3514 } else { 3515 ath11k_dp_rx_link_desc_return(ab, ring_desc, 3516 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3517 } 3518 3519 if (!rx_tid->last_frag_no || 3520 rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) { 3521 mod_timer(&rx_tid->frag_timer, jiffies + 3522 ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS); 3523 goto out_unlock; 3524 } 3525 3526 spin_unlock_bh(&ab->base_lock); 3527 del_timer_sync(&rx_tid->frag_timer); 3528 spin_lock_bh(&ab->base_lock); 3529 3530 peer = ath11k_peer_find_by_id(ab, peer_id); 3531 if (!peer) 3532 goto err_frags_cleanup; 3533 3534 if (!ath11k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid)) 3535 goto err_frags_cleanup; 3536 3537 if (ath11k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb)) 3538 goto err_frags_cleanup; 3539 3540 if (!defrag_skb) 3541 goto err_frags_cleanup; 3542 3543 if (ath11k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb)) 3544 goto err_frags_cleanup; 3545 3546 ath11k_dp_rx_frags_cleanup(rx_tid, false); 3547 goto out_unlock; 3548 3549 err_frags_cleanup: 3550 dev_kfree_skb_any(defrag_skb); 3551 ath11k_dp_rx_frags_cleanup(rx_tid, true); 3552 out_unlock: 3553 spin_unlock_bh(&ab->base_lock); 3554 return ret; 3555 } 3556 3557 static int 3558 ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool drop) 3559 { 3560 struct ath11k_pdev_dp *dp = &ar->dp; 3561 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 3562 struct sk_buff *msdu; 3563 struct ath11k_skb_rxcb *rxcb; 3564 struct hal_rx_desc *rx_desc; 3565 u8 *hdr_status; 3566 u16 msdu_len; 3567 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 3568 3569 spin_lock_bh(&rx_ring->idr_lock); 3570 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 3571 if (!msdu) { 3572 ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n", 3573 buf_id); 3574 spin_unlock_bh(&rx_ring->idr_lock); 3575 return -EINVAL; 3576 } 3577 3578 idr_remove(&rx_ring->bufs_idr, buf_id); 3579 spin_unlock_bh(&rx_ring->idr_lock); 3580 3581 rxcb = ATH11K_SKB_RXCB(msdu); 3582 dma_unmap_single(ar->ab->dev, rxcb->paddr, 3583 msdu->len + skb_tailroom(msdu), 3584 DMA_FROM_DEVICE); 3585 3586 if (drop) { 3587 dev_kfree_skb_any(msdu); 3588 return 0; 3589 } 3590 3591 rcu_read_lock(); 3592 if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) { 3593 dev_kfree_skb_any(msdu); 3594 goto exit; 3595 } 3596 3597 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 3598 dev_kfree_skb_any(msdu); 3599 goto exit; 3600 } 3601 3602 rx_desc = (struct hal_rx_desc *)msdu->data; 3603 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, rx_desc); 3604 if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) { 3605 hdr_status = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc); 3606 ath11k_warn(ar->ab, "invalid msdu leng %u", msdu_len); 3607 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status, 3608 sizeof(struct ieee80211_hdr)); 3609 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc, 3610 sizeof(struct hal_rx_desc)); 3611 dev_kfree_skb_any(msdu); 3612 goto exit; 3613 } 3614 3615 skb_put(msdu, hal_rx_desc_sz + msdu_len); 3616 3617 if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) { 3618 dev_kfree_skb_any(msdu); 3619 ath11k_dp_rx_link_desc_return(ar->ab, ring_desc, 3620 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3621 } 3622 exit: 3623 rcu_read_unlock(); 3624 return 0; 3625 } 3626 3627 int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi, 3628 int budget) 3629 { 3630 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; 3631 struct dp_link_desc_bank *link_desc_banks; 3632 enum hal_rx_buf_return_buf_manager rbm; 3633 int tot_n_bufs_reaped, quota, ret, i; 3634 int n_bufs_reaped[MAX_RADIOS] = {0}; 3635 struct dp_rxdma_ring *rx_ring; 3636 struct dp_srng *reo_except; 3637 u32 desc_bank, num_msdus; 3638 struct hal_srng *srng; 3639 struct ath11k_dp *dp; 3640 void *link_desc_va; 3641 int buf_id, mac_id; 3642 struct ath11k *ar; 3643 dma_addr_t paddr; 3644 u32 *desc; 3645 bool is_frag; 3646 u8 drop = 0; 3647 3648 tot_n_bufs_reaped = 0; 3649 quota = budget; 3650 3651 dp = &ab->dp; 3652 reo_except = &dp->reo_except_ring; 3653 link_desc_banks = dp->link_desc_banks; 3654 3655 srng = &ab->hal.srng_list[reo_except->ring_id]; 3656 3657 spin_lock_bh(&srng->lock); 3658 3659 ath11k_hal_srng_access_begin(ab, srng); 3660 3661 while (budget && 3662 (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 3663 struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc; 3664 3665 ab->soc_stats.err_ring_pkts++; 3666 ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr, 3667 &desc_bank); 3668 if (ret) { 3669 ath11k_warn(ab, "failed to parse error reo desc %d\n", 3670 ret); 3671 continue; 3672 } 3673 link_desc_va = link_desc_banks[desc_bank].vaddr + 3674 (paddr - link_desc_banks[desc_bank].paddr); 3675 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies, 3676 &rbm); 3677 if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST && 3678 rbm != HAL_RX_BUF_RBM_SW3_BM) { 3679 ab->soc_stats.invalid_rbm++; 3680 ath11k_warn(ab, "invalid return buffer manager %d\n", rbm); 3681 ath11k_dp_rx_link_desc_return(ab, desc, 3682 HAL_WBM_REL_BM_ACT_REL_MSDU); 3683 continue; 3684 } 3685 3686 is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG); 3687 3688 /* Process only rx fragments with one msdu per link desc below, and drop 3689 * msdu's indicated due to error reasons. 3690 */ 3691 if (!is_frag || num_msdus > 1) { 3692 drop = 1; 3693 /* Return the link desc back to wbm idle list */ 3694 ath11k_dp_rx_link_desc_return(ab, desc, 3695 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3696 } 3697 3698 for (i = 0; i < num_msdus; i++) { 3699 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 3700 msdu_cookies[i]); 3701 3702 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, 3703 msdu_cookies[i]); 3704 3705 ar = ab->pdevs[mac_id].ar; 3706 3707 if (!ath11k_dp_process_rx_err_buf(ar, desc, buf_id, drop)) { 3708 n_bufs_reaped[mac_id]++; 3709 tot_n_bufs_reaped++; 3710 } 3711 } 3712 3713 if (tot_n_bufs_reaped >= quota) { 3714 tot_n_bufs_reaped = quota; 3715 goto exit; 3716 } 3717 3718 budget = quota - tot_n_bufs_reaped; 3719 } 3720 3721 exit: 3722 ath11k_hal_srng_access_end(ab, srng); 3723 3724 spin_unlock_bh(&srng->lock); 3725 3726 for (i = 0; i < ab->num_radios; i++) { 3727 if (!n_bufs_reaped[i]) 3728 continue; 3729 3730 ar = ab->pdevs[i].ar; 3731 rx_ring = &ar->dp.rx_refill_buf_ring; 3732 3733 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i], 3734 HAL_RX_BUF_RBM_SW3_BM); 3735 } 3736 3737 return tot_n_bufs_reaped; 3738 } 3739 3740 static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar, 3741 int msdu_len, 3742 struct sk_buff_head *msdu_list) 3743 { 3744 struct sk_buff *skb, *tmp; 3745 struct ath11k_skb_rxcb *rxcb; 3746 int n_buffs; 3747 3748 n_buffs = DIV_ROUND_UP(msdu_len, 3749 (DP_RX_BUFFER_SIZE - ar->ab->hw_params.hal_desc_sz)); 3750 3751 skb_queue_walk_safe(msdu_list, skb, tmp) { 3752 rxcb = ATH11K_SKB_RXCB(skb); 3753 if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO && 3754 rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) { 3755 if (!n_buffs) 3756 break; 3757 __skb_unlink(skb, msdu_list); 3758 dev_kfree_skb_any(skb); 3759 n_buffs--; 3760 } 3761 } 3762 } 3763 3764 static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu, 3765 struct ieee80211_rx_status *status, 3766 struct sk_buff_head *msdu_list) 3767 { 3768 u16 msdu_len; 3769 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 3770 struct rx_attention *rx_attention; 3771 u8 l3pad_bytes; 3772 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3773 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 3774 3775 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc); 3776 3777 if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) { 3778 /* First buffer will be freed by the caller, so deduct it's length */ 3779 msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz); 3780 ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list); 3781 return -EINVAL; 3782 } 3783 3784 rx_attention = ath11k_dp_rx_get_attention(ar->ab, desc); 3785 if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) { 3786 ath11k_warn(ar->ab, 3787 "msdu_done bit not set in null_q_des processing\n"); 3788 __skb_queue_purge(msdu_list); 3789 return -EIO; 3790 } 3791 3792 /* Handle NULL queue descriptor violations arising out a missing 3793 * REO queue for a given peer or a given TID. This typically 3794 * may happen if a packet is received on a QOS enabled TID before the 3795 * ADDBA negotiation for that TID, when the TID queue is setup. Or 3796 * it may also happen for MC/BC frames if they are not routed to the 3797 * non-QOS TID queue, in the absence of any other default TID queue. 3798 * This error can show up both in a REO destination or WBM release ring. 3799 */ 3800 3801 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc); 3802 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc); 3803 3804 if (rxcb->is_frag) { 3805 skb_pull(msdu, hal_rx_desc_sz); 3806 } else { 3807 l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc); 3808 3809 if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) 3810 return -EINVAL; 3811 3812 skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); 3813 skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); 3814 } 3815 ath11k_dp_rx_h_ppdu(ar, desc, status); 3816 3817 ath11k_dp_rx_h_mpdu(ar, msdu, desc, status); 3818 3819 rxcb->tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, desc); 3820 3821 /* Please note that caller will having the access to msdu and completing 3822 * rx with mac80211. Need not worry about cleaning up amsdu_list. 3823 */ 3824 3825 return 0; 3826 } 3827 3828 static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu, 3829 struct ieee80211_rx_status *status, 3830 struct sk_buff_head *msdu_list) 3831 { 3832 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3833 bool drop = false; 3834 3835 ar->ab->soc_stats.reo_error[rxcb->err_code]++; 3836 3837 switch (rxcb->err_code) { 3838 case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO: 3839 if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list)) 3840 drop = true; 3841 break; 3842 case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED: 3843 /* TODO: Do not drop PN failed packets in the driver; 3844 * instead, it is good to drop such packets in mac80211 3845 * after incrementing the replay counters. 3846 */ 3847 fallthrough; 3848 default: 3849 /* TODO: Review other errors and process them to mac80211 3850 * as appropriate. 3851 */ 3852 drop = true; 3853 break; 3854 } 3855 3856 return drop; 3857 } 3858 3859 static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu, 3860 struct ieee80211_rx_status *status) 3861 { 3862 u16 msdu_len; 3863 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 3864 u8 l3pad_bytes; 3865 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3866 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 3867 3868 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc); 3869 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc); 3870 3871 l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc); 3872 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc); 3873 skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); 3874 skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); 3875 3876 ath11k_dp_rx_h_ppdu(ar, desc, status); 3877 3878 status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR | 3879 RX_FLAG_DECRYPTED); 3880 3881 ath11k_dp_rx_h_undecap(ar, msdu, desc, 3882 HAL_ENCRYPT_TYPE_TKIP_MIC, status, false); 3883 } 3884 3885 static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar, struct sk_buff *msdu, 3886 struct ieee80211_rx_status *status) 3887 { 3888 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3889 bool drop = false; 3890 3891 ar->ab->soc_stats.rxdma_error[rxcb->err_code]++; 3892 3893 switch (rxcb->err_code) { 3894 case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR: 3895 ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status); 3896 break; 3897 default: 3898 /* TODO: Review other rxdma error code to check if anything is 3899 * worth reporting to mac80211 3900 */ 3901 drop = true; 3902 break; 3903 } 3904 3905 return drop; 3906 } 3907 3908 static void ath11k_dp_rx_wbm_err(struct ath11k *ar, 3909 struct napi_struct *napi, 3910 struct sk_buff *msdu, 3911 struct sk_buff_head *msdu_list) 3912 { 3913 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3914 struct ieee80211_rx_status rxs = {0}; 3915 struct ieee80211_rx_status *status; 3916 bool drop = true; 3917 3918 switch (rxcb->err_rel_src) { 3919 case HAL_WBM_REL_SRC_MODULE_REO: 3920 drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list); 3921 break; 3922 case HAL_WBM_REL_SRC_MODULE_RXDMA: 3923 drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs); 3924 break; 3925 default: 3926 /* msdu will get freed */ 3927 break; 3928 } 3929 3930 if (drop) { 3931 dev_kfree_skb_any(msdu); 3932 return; 3933 } 3934 3935 status = IEEE80211_SKB_RXCB(msdu); 3936 *status = rxs; 3937 3938 ath11k_dp_rx_deliver_msdu(ar, napi, msdu); 3939 } 3940 3941 int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab, 3942 struct napi_struct *napi, int budget) 3943 { 3944 struct ath11k *ar; 3945 struct ath11k_dp *dp = &ab->dp; 3946 struct dp_rxdma_ring *rx_ring; 3947 struct hal_rx_wbm_rel_info err_info; 3948 struct hal_srng *srng; 3949 struct sk_buff *msdu; 3950 struct sk_buff_head msdu_list[MAX_RADIOS]; 3951 struct ath11k_skb_rxcb *rxcb; 3952 u32 *rx_desc; 3953 int buf_id, mac_id; 3954 int num_buffs_reaped[MAX_RADIOS] = {0}; 3955 int total_num_buffs_reaped = 0; 3956 int ret, i; 3957 3958 for (i = 0; i < ab->num_radios; i++) 3959 __skb_queue_head_init(&msdu_list[i]); 3960 3961 srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id]; 3962 3963 spin_lock_bh(&srng->lock); 3964 3965 ath11k_hal_srng_access_begin(ab, srng); 3966 3967 while (budget) { 3968 rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng); 3969 if (!rx_desc) 3970 break; 3971 3972 ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info); 3973 if (ret) { 3974 ath11k_warn(ab, 3975 "failed to parse rx error in wbm_rel ring desc %d\n", 3976 ret); 3977 continue; 3978 } 3979 3980 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie); 3981 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie); 3982 3983 ar = ab->pdevs[mac_id].ar; 3984 rx_ring = &ar->dp.rx_refill_buf_ring; 3985 3986 spin_lock_bh(&rx_ring->idr_lock); 3987 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 3988 if (!msdu) { 3989 ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n", 3990 buf_id, mac_id); 3991 spin_unlock_bh(&rx_ring->idr_lock); 3992 continue; 3993 } 3994 3995 idr_remove(&rx_ring->bufs_idr, buf_id); 3996 spin_unlock_bh(&rx_ring->idr_lock); 3997 3998 rxcb = ATH11K_SKB_RXCB(msdu); 3999 dma_unmap_single(ab->dev, rxcb->paddr, 4000 msdu->len + skb_tailroom(msdu), 4001 DMA_FROM_DEVICE); 4002 4003 num_buffs_reaped[mac_id]++; 4004 total_num_buffs_reaped++; 4005 budget--; 4006 4007 if (err_info.push_reason != 4008 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { 4009 dev_kfree_skb_any(msdu); 4010 continue; 4011 } 4012 4013 rxcb->err_rel_src = err_info.err_rel_src; 4014 rxcb->err_code = err_info.err_code; 4015 rxcb->rx_desc = (struct hal_rx_desc *)msdu->data; 4016 __skb_queue_tail(&msdu_list[mac_id], msdu); 4017 } 4018 4019 ath11k_hal_srng_access_end(ab, srng); 4020 4021 spin_unlock_bh(&srng->lock); 4022 4023 if (!total_num_buffs_reaped) 4024 goto done; 4025 4026 for (i = 0; i < ab->num_radios; i++) { 4027 if (!num_buffs_reaped[i]) 4028 continue; 4029 4030 ar = ab->pdevs[i].ar; 4031 rx_ring = &ar->dp.rx_refill_buf_ring; 4032 4033 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i], 4034 HAL_RX_BUF_RBM_SW3_BM); 4035 } 4036 4037 rcu_read_lock(); 4038 for (i = 0; i < ab->num_radios; i++) { 4039 if (!rcu_dereference(ab->pdevs_active[i])) { 4040 __skb_queue_purge(&msdu_list[i]); 4041 continue; 4042 } 4043 4044 ar = ab->pdevs[i].ar; 4045 4046 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 4047 __skb_queue_purge(&msdu_list[i]); 4048 continue; 4049 } 4050 4051 while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL) 4052 ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]); 4053 } 4054 rcu_read_unlock(); 4055 done: 4056 return total_num_buffs_reaped; 4057 } 4058 4059 int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget) 4060 { 4061 struct ath11k *ar; 4062 struct dp_srng *err_ring; 4063 struct dp_rxdma_ring *rx_ring; 4064 struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks; 4065 struct hal_srng *srng; 4066 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; 4067 enum hal_rx_buf_return_buf_manager rbm; 4068 enum hal_reo_entr_rxdma_ecode rxdma_err_code; 4069 struct ath11k_skb_rxcb *rxcb; 4070 struct sk_buff *skb; 4071 struct hal_reo_entrance_ring *entr_ring; 4072 void *desc; 4073 int num_buf_freed = 0; 4074 int quota = budget; 4075 dma_addr_t paddr; 4076 u32 desc_bank; 4077 void *link_desc_va; 4078 int num_msdus; 4079 int i; 4080 int buf_id; 4081 4082 ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar; 4083 err_ring = &ar->dp.rxdma_err_dst_ring[ath11k_hw_mac_id_to_srng_id(&ab->hw_params, 4084 mac_id)]; 4085 rx_ring = &ar->dp.rx_refill_buf_ring; 4086 4087 srng = &ab->hal.srng_list[err_ring->ring_id]; 4088 4089 spin_lock_bh(&srng->lock); 4090 4091 ath11k_hal_srng_access_begin(ab, srng); 4092 4093 while (quota-- && 4094 (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 4095 ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank); 4096 4097 entr_ring = (struct hal_reo_entrance_ring *)desc; 4098 rxdma_err_code = 4099 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE, 4100 entr_ring->info1); 4101 ab->soc_stats.rxdma_error[rxdma_err_code]++; 4102 4103 link_desc_va = link_desc_banks[desc_bank].vaddr + 4104 (paddr - link_desc_banks[desc_bank].paddr); 4105 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, 4106 msdu_cookies, &rbm); 4107 4108 for (i = 0; i < num_msdus; i++) { 4109 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 4110 msdu_cookies[i]); 4111 4112 spin_lock_bh(&rx_ring->idr_lock); 4113 skb = idr_find(&rx_ring->bufs_idr, buf_id); 4114 if (!skb) { 4115 ath11k_warn(ab, "rxdma error with invalid buf_id %d\n", 4116 buf_id); 4117 spin_unlock_bh(&rx_ring->idr_lock); 4118 continue; 4119 } 4120 4121 idr_remove(&rx_ring->bufs_idr, buf_id); 4122 spin_unlock_bh(&rx_ring->idr_lock); 4123 4124 rxcb = ATH11K_SKB_RXCB(skb); 4125 dma_unmap_single(ab->dev, rxcb->paddr, 4126 skb->len + skb_tailroom(skb), 4127 DMA_FROM_DEVICE); 4128 dev_kfree_skb_any(skb); 4129 4130 num_buf_freed++; 4131 } 4132 4133 ath11k_dp_rx_link_desc_return(ab, desc, 4134 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 4135 } 4136 4137 ath11k_hal_srng_access_end(ab, srng); 4138 4139 spin_unlock_bh(&srng->lock); 4140 4141 if (num_buf_freed) 4142 ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed, 4143 HAL_RX_BUF_RBM_SW3_BM); 4144 4145 return budget - quota; 4146 } 4147 4148 void ath11k_dp_process_reo_status(struct ath11k_base *ab) 4149 { 4150 struct ath11k_dp *dp = &ab->dp; 4151 struct hal_srng *srng; 4152 struct dp_reo_cmd *cmd, *tmp; 4153 bool found = false; 4154 u32 *reo_desc; 4155 u16 tag; 4156 struct hal_reo_status reo_status; 4157 4158 srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id]; 4159 4160 memset(&reo_status, 0, sizeof(reo_status)); 4161 4162 spin_lock_bh(&srng->lock); 4163 4164 ath11k_hal_srng_access_begin(ab, srng); 4165 4166 while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 4167 tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc); 4168 4169 switch (tag) { 4170 case HAL_REO_GET_QUEUE_STATS_STATUS: 4171 ath11k_hal_reo_status_queue_stats(ab, reo_desc, 4172 &reo_status); 4173 break; 4174 case HAL_REO_FLUSH_QUEUE_STATUS: 4175 ath11k_hal_reo_flush_queue_status(ab, reo_desc, 4176 &reo_status); 4177 break; 4178 case HAL_REO_FLUSH_CACHE_STATUS: 4179 ath11k_hal_reo_flush_cache_status(ab, reo_desc, 4180 &reo_status); 4181 break; 4182 case HAL_REO_UNBLOCK_CACHE_STATUS: 4183 ath11k_hal_reo_unblk_cache_status(ab, reo_desc, 4184 &reo_status); 4185 break; 4186 case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS: 4187 ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc, 4188 &reo_status); 4189 break; 4190 case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS: 4191 ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc, 4192 &reo_status); 4193 break; 4194 case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS: 4195 ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc, 4196 &reo_status); 4197 break; 4198 default: 4199 ath11k_warn(ab, "Unknown reo status type %d\n", tag); 4200 continue; 4201 } 4202 4203 spin_lock_bh(&dp->reo_cmd_lock); 4204 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 4205 if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) { 4206 found = true; 4207 list_del(&cmd->list); 4208 break; 4209 } 4210 } 4211 spin_unlock_bh(&dp->reo_cmd_lock); 4212 4213 if (found) { 4214 cmd->handler(dp, (void *)&cmd->data, 4215 reo_status.uniform_hdr.cmd_status); 4216 kfree(cmd); 4217 } 4218 4219 found = false; 4220 } 4221 4222 ath11k_hal_srng_access_end(ab, srng); 4223 4224 spin_unlock_bh(&srng->lock); 4225 } 4226 4227 void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id) 4228 { 4229 struct ath11k *ar = ab->pdevs[mac_id].ar; 4230 4231 ath11k_dp_rx_pdev_srng_free(ar); 4232 ath11k_dp_rxdma_pdev_buf_free(ar); 4233 } 4234 4235 int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id) 4236 { 4237 struct ath11k *ar = ab->pdevs[mac_id].ar; 4238 struct ath11k_pdev_dp *dp = &ar->dp; 4239 u32 ring_id; 4240 int i; 4241 int ret; 4242 4243 ret = ath11k_dp_rx_pdev_srng_alloc(ar); 4244 if (ret) { 4245 ath11k_warn(ab, "failed to setup rx srngs\n"); 4246 return ret; 4247 } 4248 4249 ret = ath11k_dp_rxdma_pdev_buf_setup(ar); 4250 if (ret) { 4251 ath11k_warn(ab, "failed to setup rxdma ring\n"); 4252 return ret; 4253 } 4254 4255 ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; 4256 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF); 4257 if (ret) { 4258 ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n", 4259 ret); 4260 return ret; 4261 } 4262 4263 if (ab->hw_params.rx_mac_buf_ring) { 4264 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 4265 ring_id = dp->rx_mac_buf_ring[i].ring_id; 4266 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, 4267 mac_id + i, HAL_RXDMA_BUF); 4268 if (ret) { 4269 ath11k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n", 4270 i, ret); 4271 return ret; 4272 } 4273 } 4274 } 4275 4276 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 4277 ring_id = dp->rxdma_err_dst_ring[i].ring_id; 4278 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, 4279 mac_id + i, HAL_RXDMA_DST); 4280 if (ret) { 4281 ath11k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n", 4282 i, ret); 4283 return ret; 4284 } 4285 } 4286 4287 if (!ab->hw_params.rxdma1_enable) 4288 goto config_refill_ring; 4289 4290 ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id; 4291 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, 4292 mac_id, HAL_RXDMA_MONITOR_BUF); 4293 if (ret) { 4294 ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n", 4295 ret); 4296 return ret; 4297 } 4298 ret = ath11k_dp_tx_htt_srng_setup(ab, 4299 dp->rxdma_mon_dst_ring.ring_id, 4300 mac_id, HAL_RXDMA_MONITOR_DST); 4301 if (ret) { 4302 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n", 4303 ret); 4304 return ret; 4305 } 4306 ret = ath11k_dp_tx_htt_srng_setup(ab, 4307 dp->rxdma_mon_desc_ring.ring_id, 4308 mac_id, HAL_RXDMA_MONITOR_DESC); 4309 if (ret) { 4310 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n", 4311 ret); 4312 return ret; 4313 } 4314 4315 config_refill_ring: 4316 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 4317 ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; 4318 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i, 4319 HAL_RXDMA_MONITOR_STATUS); 4320 if (ret) { 4321 ath11k_warn(ab, 4322 "failed to configure mon_status_refill_ring%d %d\n", 4323 i, ret); 4324 return ret; 4325 } 4326 } 4327 4328 return 0; 4329 } 4330 4331 static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len) 4332 { 4333 if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) { 4334 *frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc); 4335 *total_len -= *frag_len; 4336 } else { 4337 *frag_len = *total_len; 4338 *total_len = 0; 4339 } 4340 } 4341 4342 static 4343 int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar, 4344 void *p_last_buf_addr_info, 4345 u8 mac_id) 4346 { 4347 struct ath11k_pdev_dp *dp = &ar->dp; 4348 struct dp_srng *dp_srng; 4349 void *hal_srng; 4350 void *src_srng_desc; 4351 int ret = 0; 4352 4353 if (ar->ab->hw_params.rxdma1_enable) { 4354 dp_srng = &dp->rxdma_mon_desc_ring; 4355 hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id]; 4356 } else { 4357 dp_srng = &ar->ab->dp.wbm_desc_rel_ring; 4358 hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id]; 4359 } 4360 4361 ath11k_hal_srng_access_begin(ar->ab, hal_srng); 4362 4363 src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng); 4364 4365 if (src_srng_desc) { 4366 struct ath11k_buffer_addr *src_desc = 4367 (struct ath11k_buffer_addr *)src_srng_desc; 4368 4369 *src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info); 4370 } else { 4371 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4372 "Monitor Link Desc Ring %d Full", mac_id); 4373 ret = -ENOMEM; 4374 } 4375 4376 ath11k_hal_srng_access_end(ar->ab, hal_srng); 4377 return ret; 4378 } 4379 4380 static 4381 void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc, 4382 dma_addr_t *paddr, u32 *sw_cookie, 4383 u8 *rbm, 4384 void **pp_buf_addr_info) 4385 { 4386 struct hal_rx_msdu_link *msdu_link = 4387 (struct hal_rx_msdu_link *)rx_msdu_link_desc; 4388 struct ath11k_buffer_addr *buf_addr_info; 4389 4390 buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info; 4391 4392 ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm); 4393 4394 *pp_buf_addr_info = (void *)buf_addr_info; 4395 } 4396 4397 static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len) 4398 { 4399 if (skb->len > len) { 4400 skb_trim(skb, len); 4401 } else { 4402 if (skb_tailroom(skb) < len - skb->len) { 4403 if ((pskb_expand_head(skb, 0, 4404 len - skb->len - skb_tailroom(skb), 4405 GFP_ATOMIC))) { 4406 dev_kfree_skb_any(skb); 4407 return -ENOMEM; 4408 } 4409 } 4410 skb_put(skb, (len - skb->len)); 4411 } 4412 return 0; 4413 } 4414 4415 static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar, 4416 void *msdu_link_desc, 4417 struct hal_rx_msdu_list *msdu_list, 4418 u16 *num_msdus) 4419 { 4420 struct hal_rx_msdu_details *msdu_details = NULL; 4421 struct rx_msdu_desc *msdu_desc_info = NULL; 4422 struct hal_rx_msdu_link *msdu_link = NULL; 4423 int i; 4424 u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1); 4425 u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1); 4426 u8 tmp = 0; 4427 4428 msdu_link = (struct hal_rx_msdu_link *)msdu_link_desc; 4429 msdu_details = &msdu_link->msdu_link[0]; 4430 4431 for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) { 4432 if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR, 4433 msdu_details[i].buf_addr_info.info0) == 0) { 4434 msdu_desc_info = &msdu_details[i - 1].rx_msdu_info; 4435 msdu_desc_info->info0 |= last; 4436 ; 4437 break; 4438 } 4439 msdu_desc_info = &msdu_details[i].rx_msdu_info; 4440 4441 if (!i) 4442 msdu_desc_info->info0 |= first; 4443 else if (i == (HAL_RX_NUM_MSDU_DESC - 1)) 4444 msdu_desc_info->info0 |= last; 4445 msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0; 4446 msdu_list->msdu_info[i].msdu_len = 4447 HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0); 4448 msdu_list->sw_cookie[i] = 4449 FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, 4450 msdu_details[i].buf_addr_info.info1); 4451 tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR, 4452 msdu_details[i].buf_addr_info.info1); 4453 msdu_list->rbm[i] = tmp; 4454 } 4455 *num_msdus = i; 4456 } 4457 4458 static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id, 4459 u32 *rx_bufs_used) 4460 { 4461 u32 ret = 0; 4462 4463 if ((*ppdu_id < msdu_ppdu_id) && 4464 ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) { 4465 *ppdu_id = msdu_ppdu_id; 4466 ret = msdu_ppdu_id; 4467 } else if ((*ppdu_id > msdu_ppdu_id) && 4468 ((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) { 4469 /* mon_dst is behind than mon_status 4470 * skip dst_ring and free it 4471 */ 4472 *rx_bufs_used += 1; 4473 *ppdu_id = msdu_ppdu_id; 4474 ret = msdu_ppdu_id; 4475 } 4476 return ret; 4477 } 4478 4479 static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info, 4480 bool *is_frag, u32 *total_len, 4481 u32 *frag_len, u32 *msdu_cnt) 4482 { 4483 if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) { 4484 if (!*is_frag) { 4485 *total_len = info->msdu_len; 4486 *is_frag = true; 4487 } 4488 ath11k_dp_mon_set_frag_len(total_len, 4489 frag_len); 4490 } else { 4491 if (*is_frag) { 4492 ath11k_dp_mon_set_frag_len(total_len, 4493 frag_len); 4494 } else { 4495 *frag_len = info->msdu_len; 4496 } 4497 *is_frag = false; 4498 *msdu_cnt -= 1; 4499 } 4500 } 4501 4502 static u32 4503 ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id, 4504 void *ring_entry, struct sk_buff **head_msdu, 4505 struct sk_buff **tail_msdu, u32 *npackets, 4506 u32 *ppdu_id) 4507 { 4508 struct ath11k_pdev_dp *dp = &ar->dp; 4509 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4510 struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring; 4511 struct sk_buff *msdu = NULL, *last = NULL; 4512 struct hal_rx_msdu_list msdu_list; 4513 void *p_buf_addr_info, *p_last_buf_addr_info; 4514 struct hal_rx_desc *rx_desc; 4515 void *rx_msdu_link_desc; 4516 dma_addr_t paddr; 4517 u16 num_msdus = 0; 4518 u32 rx_buf_size, rx_pkt_offset, sw_cookie; 4519 u32 rx_bufs_used = 0, i = 0; 4520 u32 msdu_ppdu_id = 0, msdu_cnt = 0; 4521 u32 total_len = 0, frag_len = 0; 4522 bool is_frag, is_first_msdu; 4523 bool drop_mpdu = false; 4524 struct ath11k_skb_rxcb *rxcb; 4525 struct hal_reo_entrance_ring *ent_desc = 4526 (struct hal_reo_entrance_ring *)ring_entry; 4527 int buf_id; 4528 u32 rx_link_buf_info[2]; 4529 u8 rbm; 4530 4531 if (!ar->ab->hw_params.rxdma1_enable) 4532 rx_ring = &dp->rx_refill_buf_ring; 4533 4534 ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr, 4535 &sw_cookie, 4536 &p_last_buf_addr_info, &rbm, 4537 &msdu_cnt); 4538 4539 if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON, 4540 ent_desc->info1) == 4541 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { 4542 u8 rxdma_err = 4543 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE, 4544 ent_desc->info1); 4545 if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR || 4546 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR || 4547 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) { 4548 drop_mpdu = true; 4549 pmon->rx_mon_stats.dest_mpdu_drop++; 4550 } 4551 } 4552 4553 is_frag = false; 4554 is_first_msdu = true; 4555 4556 do { 4557 if (pmon->mon_last_linkdesc_paddr == paddr) { 4558 pmon->rx_mon_stats.dup_mon_linkdesc_cnt++; 4559 return rx_bufs_used; 4560 } 4561 4562 if (ar->ab->hw_params.rxdma1_enable) 4563 rx_msdu_link_desc = 4564 (void *)pmon->link_desc_banks[sw_cookie].vaddr + 4565 (paddr - pmon->link_desc_banks[sw_cookie].paddr); 4566 else 4567 rx_msdu_link_desc = 4568 (void *)ar->ab->dp.link_desc_banks[sw_cookie].vaddr + 4569 (paddr - ar->ab->dp.link_desc_banks[sw_cookie].paddr); 4570 4571 ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list, 4572 &num_msdus); 4573 4574 for (i = 0; i < num_msdus; i++) { 4575 u32 l2_hdr_offset; 4576 4577 if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) { 4578 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4579 "i %d last_cookie %d is same\n", 4580 i, pmon->mon_last_buf_cookie); 4581 drop_mpdu = true; 4582 pmon->rx_mon_stats.dup_mon_buf_cnt++; 4583 continue; 4584 } 4585 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 4586 msdu_list.sw_cookie[i]); 4587 4588 spin_lock_bh(&rx_ring->idr_lock); 4589 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 4590 spin_unlock_bh(&rx_ring->idr_lock); 4591 if (!msdu) { 4592 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4593 "msdu_pop: invalid buf_id %d\n", buf_id); 4594 break; 4595 } 4596 rxcb = ATH11K_SKB_RXCB(msdu); 4597 if (!rxcb->unmapped) { 4598 dma_unmap_single(ar->ab->dev, rxcb->paddr, 4599 msdu->len + 4600 skb_tailroom(msdu), 4601 DMA_FROM_DEVICE); 4602 rxcb->unmapped = 1; 4603 } 4604 if (drop_mpdu) { 4605 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4606 "i %d drop msdu %p *ppdu_id %x\n", 4607 i, msdu, *ppdu_id); 4608 dev_kfree_skb_any(msdu); 4609 msdu = NULL; 4610 goto next_msdu; 4611 } 4612 4613 rx_desc = (struct hal_rx_desc *)msdu->data; 4614 4615 rx_pkt_offset = sizeof(struct hal_rx_desc); 4616 l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc); 4617 4618 if (is_first_msdu) { 4619 if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) { 4620 drop_mpdu = true; 4621 dev_kfree_skb_any(msdu); 4622 msdu = NULL; 4623 pmon->mon_last_linkdesc_paddr = paddr; 4624 goto next_msdu; 4625 } 4626 4627 msdu_ppdu_id = 4628 ath11k_dp_rxdesc_get_ppduid(ar->ab, rx_desc); 4629 4630 if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id, 4631 ppdu_id, 4632 &rx_bufs_used)) { 4633 if (rx_bufs_used) { 4634 drop_mpdu = true; 4635 dev_kfree_skb_any(msdu); 4636 msdu = NULL; 4637 goto next_msdu; 4638 } 4639 return rx_bufs_used; 4640 } 4641 pmon->mon_last_linkdesc_paddr = paddr; 4642 is_first_msdu = false; 4643 } 4644 ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i], 4645 &is_frag, &total_len, 4646 &frag_len, &msdu_cnt); 4647 rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len; 4648 4649 ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size); 4650 4651 if (!(*head_msdu)) 4652 *head_msdu = msdu; 4653 else if (last) 4654 last->next = msdu; 4655 4656 last = msdu; 4657 next_msdu: 4658 pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i]; 4659 rx_bufs_used++; 4660 spin_lock_bh(&rx_ring->idr_lock); 4661 idr_remove(&rx_ring->bufs_idr, buf_id); 4662 spin_unlock_bh(&rx_ring->idr_lock); 4663 } 4664 4665 ath11k_hal_rx_buf_addr_info_set(rx_link_buf_info, paddr, sw_cookie, rbm); 4666 4667 ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr, 4668 &sw_cookie, &rbm, 4669 &p_buf_addr_info); 4670 4671 if (ar->ab->hw_params.rxdma1_enable) { 4672 if (ath11k_dp_rx_monitor_link_desc_return(ar, 4673 p_last_buf_addr_info, 4674 dp->mac_id)) 4675 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4676 "dp_rx_monitor_link_desc_return failed"); 4677 } else { 4678 ath11k_dp_rx_link_desc_return(ar->ab, rx_link_buf_info, 4679 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 4680 } 4681 4682 p_last_buf_addr_info = p_buf_addr_info; 4683 4684 } while (paddr && msdu_cnt); 4685 4686 if (last) 4687 last->next = NULL; 4688 4689 *tail_msdu = msdu; 4690 4691 if (msdu_cnt == 0) 4692 *npackets = 1; 4693 4694 return rx_bufs_used; 4695 } 4696 4697 static void ath11k_dp_rx_msdus_set_payload(struct ath11k *ar, struct sk_buff *msdu) 4698 { 4699 u32 rx_pkt_offset, l2_hdr_offset; 4700 4701 rx_pkt_offset = ar->ab->hw_params.hal_desc_sz; 4702 l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, 4703 (struct hal_rx_desc *)msdu->data); 4704 skb_pull(msdu, rx_pkt_offset + l2_hdr_offset); 4705 } 4706 4707 static struct sk_buff * 4708 ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, 4709 u32 mac_id, struct sk_buff *head_msdu, 4710 struct sk_buff *last_msdu, 4711 struct ieee80211_rx_status *rxs) 4712 { 4713 struct ath11k_base *ab = ar->ab; 4714 struct sk_buff *msdu, *mpdu_buf, *prev_buf; 4715 u32 wifi_hdr_len; 4716 struct hal_rx_desc *rx_desc; 4717 char *hdr_desc; 4718 u8 *dest, decap_format; 4719 struct ieee80211_hdr_3addr *wh; 4720 struct rx_attention *rx_attention; 4721 4722 mpdu_buf = NULL; 4723 4724 if (!head_msdu) 4725 goto err_merge_fail; 4726 4727 rx_desc = (struct hal_rx_desc *)head_msdu->data; 4728 rx_attention = ath11k_dp_rx_get_attention(ab, rx_desc); 4729 4730 if (ath11k_dp_rxdesc_get_mpdulen_err(rx_attention)) 4731 return NULL; 4732 4733 decap_format = ath11k_dp_rx_h_msdu_start_decap_type(ab, rx_desc); 4734 4735 ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs); 4736 4737 if (decap_format == DP_RX_DECAP_TYPE_RAW) { 4738 ath11k_dp_rx_msdus_set_payload(ar, head_msdu); 4739 4740 prev_buf = head_msdu; 4741 msdu = head_msdu->next; 4742 4743 while (msdu) { 4744 ath11k_dp_rx_msdus_set_payload(ar, msdu); 4745 4746 prev_buf = msdu; 4747 msdu = msdu->next; 4748 } 4749 4750 prev_buf->next = NULL; 4751 4752 skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN); 4753 } else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) { 4754 __le16 qos_field; 4755 u8 qos_pkt = 0; 4756 4757 rx_desc = (struct hal_rx_desc *)head_msdu->data; 4758 hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc); 4759 4760 /* Base size */ 4761 wifi_hdr_len = sizeof(struct ieee80211_hdr_3addr); 4762 wh = (struct ieee80211_hdr_3addr *)hdr_desc; 4763 4764 if (ieee80211_is_data_qos(wh->frame_control)) { 4765 struct ieee80211_qos_hdr *qwh = 4766 (struct ieee80211_qos_hdr *)hdr_desc; 4767 4768 qos_field = qwh->qos_ctrl; 4769 qos_pkt = 1; 4770 } 4771 msdu = head_msdu; 4772 4773 while (msdu) { 4774 rx_desc = (struct hal_rx_desc *)msdu->data; 4775 hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc); 4776 4777 if (qos_pkt) { 4778 dest = skb_push(msdu, sizeof(__le16)); 4779 if (!dest) 4780 goto err_merge_fail; 4781 memcpy(dest, hdr_desc, wifi_hdr_len); 4782 memcpy(dest + wifi_hdr_len, 4783 (u8 *)&qos_field, sizeof(__le16)); 4784 } 4785 ath11k_dp_rx_msdus_set_payload(ar, msdu); 4786 prev_buf = msdu; 4787 msdu = msdu->next; 4788 } 4789 dest = skb_put(prev_buf, HAL_RX_FCS_LEN); 4790 if (!dest) 4791 goto err_merge_fail; 4792 4793 ath11k_dbg(ab, ATH11K_DBG_DATA, 4794 "mpdu_buf %pK mpdu_buf->len %u", 4795 prev_buf, prev_buf->len); 4796 } else { 4797 ath11k_dbg(ab, ATH11K_DBG_DATA, 4798 "decap format %d is not supported!\n", 4799 decap_format); 4800 goto err_merge_fail; 4801 } 4802 4803 return head_msdu; 4804 4805 err_merge_fail: 4806 if (mpdu_buf && decap_format != DP_RX_DECAP_TYPE_RAW) { 4807 ath11k_dbg(ab, ATH11K_DBG_DATA, 4808 "err_merge_fail mpdu_buf %pK", mpdu_buf); 4809 /* Free the head buffer */ 4810 dev_kfree_skb_any(mpdu_buf); 4811 } 4812 return NULL; 4813 } 4814 4815 static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id, 4816 struct sk_buff *head_msdu, 4817 struct sk_buff *tail_msdu, 4818 struct napi_struct *napi) 4819 { 4820 struct ath11k_pdev_dp *dp = &ar->dp; 4821 struct sk_buff *mon_skb, *skb_next, *header; 4822 struct ieee80211_rx_status *rxs = &dp->rx_status, *status; 4823 4824 mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu, 4825 tail_msdu, rxs); 4826 4827 if (!mon_skb) 4828 goto mon_deliver_fail; 4829 4830 header = mon_skb; 4831 4832 rxs->flag = 0; 4833 do { 4834 skb_next = mon_skb->next; 4835 if (!skb_next) 4836 rxs->flag &= ~RX_FLAG_AMSDU_MORE; 4837 else 4838 rxs->flag |= RX_FLAG_AMSDU_MORE; 4839 4840 if (mon_skb == header) { 4841 header = NULL; 4842 rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN; 4843 } else { 4844 rxs->flag |= RX_FLAG_ALLOW_SAME_PN; 4845 } 4846 rxs->flag |= RX_FLAG_ONLY_MONITOR; 4847 4848 status = IEEE80211_SKB_RXCB(mon_skb); 4849 *status = *rxs; 4850 4851 ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb); 4852 mon_skb = skb_next; 4853 } while (mon_skb); 4854 rxs->flag = 0; 4855 4856 return 0; 4857 4858 mon_deliver_fail: 4859 mon_skb = head_msdu; 4860 while (mon_skb) { 4861 skb_next = mon_skb->next; 4862 dev_kfree_skb_any(mon_skb); 4863 mon_skb = skb_next; 4864 } 4865 return -EINVAL; 4866 } 4867 4868 static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id, 4869 u32 quota, struct napi_struct *napi) 4870 { 4871 struct ath11k_pdev_dp *dp = &ar->dp; 4872 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4873 void *ring_entry; 4874 void *mon_dst_srng; 4875 u32 ppdu_id; 4876 u32 rx_bufs_used; 4877 u32 ring_id; 4878 struct ath11k_pdev_mon_stats *rx_mon_stats; 4879 u32 npackets = 0; 4880 4881 if (ar->ab->hw_params.rxdma1_enable) 4882 ring_id = dp->rxdma_mon_dst_ring.ring_id; 4883 else 4884 ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id; 4885 4886 mon_dst_srng = &ar->ab->hal.srng_list[ring_id]; 4887 4888 if (!mon_dst_srng) { 4889 ath11k_warn(ar->ab, 4890 "HAL Monitor Destination Ring Init Failed -- %pK", 4891 mon_dst_srng); 4892 return; 4893 } 4894 4895 spin_lock_bh(&pmon->mon_lock); 4896 4897 ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng); 4898 4899 ppdu_id = pmon->mon_ppdu_info.ppdu_id; 4900 rx_bufs_used = 0; 4901 rx_mon_stats = &pmon->rx_mon_stats; 4902 4903 while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) { 4904 struct sk_buff *head_msdu, *tail_msdu; 4905 4906 head_msdu = NULL; 4907 tail_msdu = NULL; 4908 4909 rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry, 4910 &head_msdu, 4911 &tail_msdu, 4912 &npackets, &ppdu_id); 4913 4914 if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) { 4915 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 4916 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4917 "dest_rx: new ppdu_id %x != status ppdu_id %x", 4918 ppdu_id, pmon->mon_ppdu_info.ppdu_id); 4919 break; 4920 } 4921 if (head_msdu && tail_msdu) { 4922 ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu, 4923 tail_msdu, napi); 4924 rx_mon_stats->dest_mpdu_done++; 4925 } 4926 4927 ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab, 4928 mon_dst_srng); 4929 } 4930 ath11k_hal_srng_access_end(ar->ab, mon_dst_srng); 4931 4932 spin_unlock_bh(&pmon->mon_lock); 4933 4934 if (rx_bufs_used) { 4935 rx_mon_stats->dest_ppdu_done++; 4936 if (ar->ab->hw_params.rxdma1_enable) 4937 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, 4938 &dp->rxdma_mon_buf_ring, 4939 rx_bufs_used, 4940 HAL_RX_BUF_RBM_SW3_BM); 4941 else 4942 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, 4943 &dp->rx_refill_buf_ring, 4944 rx_bufs_used, 4945 HAL_RX_BUF_RBM_SW3_BM); 4946 } 4947 } 4948 4949 static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar, 4950 int mac_id, u32 quota, 4951 struct napi_struct *napi) 4952 { 4953 struct ath11k_pdev_dp *dp = &ar->dp; 4954 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4955 struct hal_rx_mon_ppdu_info *ppdu_info; 4956 struct sk_buff *status_skb; 4957 u32 tlv_status = HAL_TLV_STATUS_BUF_DONE; 4958 struct ath11k_pdev_mon_stats *rx_mon_stats; 4959 4960 ppdu_info = &pmon->mon_ppdu_info; 4961 rx_mon_stats = &pmon->rx_mon_stats; 4962 4963 if (pmon->mon_ppdu_status != DP_PPDU_STATUS_START) 4964 return; 4965 4966 while (!skb_queue_empty(&pmon->rx_status_q)) { 4967 status_skb = skb_dequeue(&pmon->rx_status_q); 4968 4969 tlv_status = ath11k_hal_rx_parse_mon_status(ar->ab, ppdu_info, 4970 status_skb); 4971 if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) { 4972 rx_mon_stats->status_ppdu_done++; 4973 pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE; 4974 ath11k_dp_rx_mon_dest_process(ar, mac_id, quota, napi); 4975 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 4976 } 4977 dev_kfree_skb_any(status_skb); 4978 } 4979 } 4980 4981 static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id, 4982 struct napi_struct *napi, int budget) 4983 { 4984 struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id); 4985 struct ath11k_pdev_dp *dp = &ar->dp; 4986 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4987 int num_buffs_reaped = 0; 4988 4989 num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ar->ab, mac_id, &budget, 4990 &pmon->rx_status_q); 4991 if (num_buffs_reaped) 4992 ath11k_dp_rx_mon_status_process_tlv(ar, mac_id, budget, napi); 4993 4994 return num_buffs_reaped; 4995 } 4996 4997 int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id, 4998 struct napi_struct *napi, int budget) 4999 { 5000 struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id); 5001 int ret = 0; 5002 5003 if (test_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags)) 5004 ret = ath11k_dp_mon_process_rx(ab, mac_id, napi, budget); 5005 else 5006 ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget); 5007 return ret; 5008 } 5009 5010 static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar) 5011 { 5012 struct ath11k_pdev_dp *dp = &ar->dp; 5013 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 5014 5015 skb_queue_head_init(&pmon->rx_status_q); 5016 5017 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 5018 5019 memset(&pmon->rx_mon_stats, 0, 5020 sizeof(pmon->rx_mon_stats)); 5021 return 0; 5022 } 5023 5024 int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar) 5025 { 5026 struct ath11k_pdev_dp *dp = &ar->dp; 5027 struct ath11k_mon_data *pmon = &dp->mon_data; 5028 struct hal_srng *mon_desc_srng = NULL; 5029 struct dp_srng *dp_srng; 5030 int ret = 0; 5031 u32 n_link_desc = 0; 5032 5033 ret = ath11k_dp_rx_pdev_mon_status_attach(ar); 5034 if (ret) { 5035 ath11k_warn(ar->ab, "pdev_mon_status_attach() failed"); 5036 return ret; 5037 } 5038 5039 /* if rxdma1_enable is false, no need to setup 5040 * rxdma_mon_desc_ring. 5041 */ 5042 if (!ar->ab->hw_params.rxdma1_enable) 5043 return 0; 5044 5045 dp_srng = &dp->rxdma_mon_desc_ring; 5046 n_link_desc = dp_srng->size / 5047 ath11k_hal_srng_get_entrysize(ar->ab, HAL_RXDMA_MONITOR_DESC); 5048 mon_desc_srng = 5049 &ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id]; 5050 5051 ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks, 5052 HAL_RXDMA_MONITOR_DESC, mon_desc_srng, 5053 n_link_desc); 5054 if (ret) { 5055 ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed"); 5056 return ret; 5057 } 5058 pmon->mon_last_linkdesc_paddr = 0; 5059 pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1; 5060 spin_lock_init(&pmon->mon_lock); 5061 5062 return 0; 5063 } 5064 5065 static int ath11k_dp_mon_link_free(struct ath11k *ar) 5066 { 5067 struct ath11k_pdev_dp *dp = &ar->dp; 5068 struct ath11k_mon_data *pmon = &dp->mon_data; 5069 5070 ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks, 5071 HAL_RXDMA_MONITOR_DESC, 5072 &dp->rxdma_mon_desc_ring); 5073 return 0; 5074 } 5075 5076 int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar) 5077 { 5078 ath11k_dp_mon_link_free(ar); 5079 return 0; 5080 } 5081 5082 int ath11k_dp_rx_pktlog_start(struct ath11k_base *ab) 5083 { 5084 /* start reap timer */ 5085 mod_timer(&ab->mon_reap_timer, 5086 jiffies + msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL)); 5087 5088 return 0; 5089 } 5090 5091 int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer) 5092 { 5093 int ret; 5094 5095 if (stop_timer) 5096 del_timer_sync(&ab->mon_reap_timer); 5097 5098 /* reap all the monitor related rings */ 5099 ret = ath11k_dp_purge_mon_ring(ab); 5100 if (ret) { 5101 ath11k_warn(ab, "failed to purge dp mon ring: %d\n", ret); 5102 return ret; 5103 } 5104 5105 return 0; 5106 } 5107