1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/ieee80211.h> 7 #include <linux/kernel.h> 8 #include <linux/skbuff.h> 9 #include <crypto/hash.h> 10 #include "core.h" 11 #include "debug.h" 12 #include "debugfs_htt_stats.h" 13 #include "debugfs_sta.h" 14 #include "hal_desc.h" 15 #include "hw.h" 16 #include "dp_rx.h" 17 #include "hal_rx.h" 18 #include "dp_tx.h" 19 #include "peer.h" 20 21 #define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ) 22 23 static u8 *ath11k_dp_rx_h_80211_hdr(struct hal_rx_desc *desc) 24 { 25 return desc->hdr_status; 26 } 27 28 static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct hal_rx_desc *desc) 29 { 30 if (!(__le32_to_cpu(desc->mpdu_start.info1) & 31 RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID)) 32 return HAL_ENCRYPT_TYPE_OPEN; 33 34 return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE, 35 __le32_to_cpu(desc->mpdu_start.info2)); 36 } 37 38 static u8 ath11k_dp_rx_h_msdu_start_decap_type(struct hal_rx_desc *desc) 39 { 40 return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT, 41 __le32_to_cpu(desc->msdu_start.info2)); 42 } 43 44 static u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct hal_rx_desc *desc) 45 { 46 return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT, 47 __le32_to_cpu(desc->msdu_start.info2)); 48 } 49 50 static bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct hal_rx_desc *desc) 51 { 52 return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID, 53 __le32_to_cpu(desc->mpdu_start.info1)); 54 } 55 56 static bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct hal_rx_desc *desc) 57 { 58 return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_FCTRL_VALID, 59 __le32_to_cpu(desc->mpdu_start.info1)); 60 } 61 62 static bool ath11k_dp_rx_h_mpdu_start_more_frags(struct sk_buff *skb) 63 { 64 struct ieee80211_hdr *hdr; 65 66 hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE); 67 return ieee80211_has_morefrags(hdr->frame_control); 68 } 69 70 static u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct sk_buff *skb) 71 { 72 struct ieee80211_hdr *hdr; 73 74 hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE); 75 return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; 76 } 77 78 static u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct hal_rx_desc *desc) 79 { 80 return FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_NUM, 81 __le32_to_cpu(desc->mpdu_start.info1)); 82 } 83 84 static bool ath11k_dp_rx_h_attn_msdu_done(struct hal_rx_desc *desc) 85 { 86 return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE, 87 __le32_to_cpu(desc->attention.info2)); 88 } 89 90 static bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct hal_rx_desc *desc) 91 { 92 return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL, 93 __le32_to_cpu(desc->attention.info1)); 94 } 95 96 static bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct hal_rx_desc *desc) 97 { 98 return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL, 99 __le32_to_cpu(desc->attention.info1)); 100 } 101 102 static bool ath11k_dp_rx_h_attn_is_decrypted(struct hal_rx_desc *desc) 103 { 104 return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE, 105 __le32_to_cpu(desc->attention.info2)) == 106 RX_DESC_DECRYPT_STATUS_CODE_OK); 107 } 108 109 static u32 ath11k_dp_rx_h_attn_mpdu_err(struct hal_rx_desc *desc) 110 { 111 u32 info = __le32_to_cpu(desc->attention.info1); 112 u32 errmap = 0; 113 114 if (info & RX_ATTENTION_INFO1_FCS_ERR) 115 errmap |= DP_RX_MPDU_ERR_FCS; 116 117 if (info & RX_ATTENTION_INFO1_DECRYPT_ERR) 118 errmap |= DP_RX_MPDU_ERR_DECRYPT; 119 120 if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR) 121 errmap |= DP_RX_MPDU_ERR_TKIP_MIC; 122 123 if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR) 124 errmap |= DP_RX_MPDU_ERR_AMSDU_ERR; 125 126 if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR) 127 errmap |= DP_RX_MPDU_ERR_OVERFLOW; 128 129 if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR) 130 errmap |= DP_RX_MPDU_ERR_MSDU_LEN; 131 132 if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR) 133 errmap |= DP_RX_MPDU_ERR_MPDU_LEN; 134 135 return errmap; 136 } 137 138 static u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct hal_rx_desc *desc) 139 { 140 return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH, 141 __le32_to_cpu(desc->msdu_start.info1)); 142 } 143 144 static u8 ath11k_dp_rx_h_msdu_start_sgi(struct hal_rx_desc *desc) 145 { 146 return FIELD_GET(RX_MSDU_START_INFO3_SGI, 147 __le32_to_cpu(desc->msdu_start.info3)); 148 } 149 150 static u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct hal_rx_desc *desc) 151 { 152 return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS, 153 __le32_to_cpu(desc->msdu_start.info3)); 154 } 155 156 static u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct hal_rx_desc *desc) 157 { 158 return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW, 159 __le32_to_cpu(desc->msdu_start.info3)); 160 } 161 162 static u32 ath11k_dp_rx_h_msdu_start_freq(struct hal_rx_desc *desc) 163 { 164 return __le32_to_cpu(desc->msdu_start.phy_meta_data); 165 } 166 167 static u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct hal_rx_desc *desc) 168 { 169 return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE, 170 __le32_to_cpu(desc->msdu_start.info3)); 171 } 172 173 static u8 ath11k_dp_rx_h_msdu_start_nss(struct hal_rx_desc *desc) 174 { 175 u8 mimo_ss_bitmap = FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP, 176 __le32_to_cpu(desc->msdu_start.info3)); 177 178 return hweight8(mimo_ss_bitmap); 179 } 180 181 static u8 ath11k_dp_rx_h_mpdu_start_tid(struct hal_rx_desc *desc) 182 { 183 return FIELD_GET(RX_MPDU_START_INFO2_TID, 184 __le32_to_cpu(desc->mpdu_start.info2)); 185 } 186 187 static u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct hal_rx_desc *desc) 188 { 189 return __le16_to_cpu(desc->mpdu_start.sw_peer_id); 190 } 191 192 static u8 ath11k_dp_rx_h_msdu_end_l3pad(struct hal_rx_desc *desc) 193 { 194 return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING, 195 __le32_to_cpu(desc->msdu_end.info2)); 196 } 197 198 static bool ath11k_dp_rx_h_msdu_end_first_msdu(struct hal_rx_desc *desc) 199 { 200 return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU, 201 __le32_to_cpu(desc->msdu_end.info2)); 202 } 203 204 static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct hal_rx_desc *desc) 205 { 206 return !!FIELD_GET(RX_MSDU_END_INFO2_LAST_MSDU, 207 __le32_to_cpu(desc->msdu_end.info2)); 208 } 209 210 static void ath11k_dp_rx_desc_end_tlv_copy(struct hal_rx_desc *fdesc, 211 struct hal_rx_desc *ldesc) 212 { 213 memcpy((u8 *)&fdesc->msdu_end, (u8 *)&ldesc->msdu_end, 214 sizeof(struct rx_msdu_end)); 215 memcpy((u8 *)&fdesc->attention, (u8 *)&ldesc->attention, 216 sizeof(struct rx_attention)); 217 memcpy((u8 *)&fdesc->mpdu_end, (u8 *)&ldesc->mpdu_end, 218 sizeof(struct rx_mpdu_end)); 219 } 220 221 static u32 ath11k_dp_rxdesc_get_mpdulen_err(struct hal_rx_desc *rx_desc) 222 { 223 struct rx_attention *rx_attn; 224 225 rx_attn = &rx_desc->attention; 226 227 return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR, 228 __le32_to_cpu(rx_attn->info1)); 229 } 230 231 static u32 ath11k_dp_rxdesc_get_decap_format(struct hal_rx_desc *rx_desc) 232 { 233 struct rx_msdu_start *rx_msdu_start; 234 235 rx_msdu_start = &rx_desc->msdu_start; 236 237 return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT, 238 __le32_to_cpu(rx_msdu_start->info2)); 239 } 240 241 static u8 *ath11k_dp_rxdesc_get_80211hdr(struct hal_rx_desc *rx_desc) 242 { 243 u8 *rx_pkt_hdr; 244 245 rx_pkt_hdr = &rx_desc->msdu_payload[0]; 246 247 return rx_pkt_hdr; 248 } 249 250 static bool ath11k_dp_rxdesc_mpdu_valid(struct hal_rx_desc *rx_desc) 251 { 252 u32 tlv_tag; 253 254 tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG, 255 __le32_to_cpu(rx_desc->mpdu_start_tag)); 256 257 return tlv_tag == HAL_RX_MPDU_START; 258 } 259 260 static u32 ath11k_dp_rxdesc_get_ppduid(struct hal_rx_desc *rx_desc) 261 { 262 return __le16_to_cpu(rx_desc->mpdu_start.phy_ppdu_id); 263 } 264 265 static void ath11k_dp_service_mon_ring(struct timer_list *t) 266 { 267 struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer); 268 int i; 269 270 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) 271 ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET); 272 273 mod_timer(&ab->mon_reap_timer, jiffies + 274 msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL)); 275 } 276 277 static int ath11k_dp_purge_mon_ring(struct ath11k_base *ab) 278 { 279 int i, reaped = 0; 280 unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS); 281 282 do { 283 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) 284 reaped += ath11k_dp_rx_process_mon_rings(ab, i, 285 NULL, 286 DP_MON_SERVICE_BUDGET); 287 288 /* nothing more to reap */ 289 if (reaped < DP_MON_SERVICE_BUDGET) 290 return 0; 291 292 } while (time_before(jiffies, timeout)); 293 294 ath11k_warn(ab, "dp mon ring purge timeout"); 295 296 return -ETIMEDOUT; 297 } 298 299 /* Returns number of Rx buffers replenished */ 300 int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id, 301 struct dp_rxdma_ring *rx_ring, 302 int req_entries, 303 enum hal_rx_buf_return_buf_manager mgr) 304 { 305 struct hal_srng *srng; 306 u32 *desc; 307 struct sk_buff *skb; 308 int num_free; 309 int num_remain; 310 int buf_id; 311 u32 cookie; 312 dma_addr_t paddr; 313 314 req_entries = min(req_entries, rx_ring->bufs_max); 315 316 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 317 318 spin_lock_bh(&srng->lock); 319 320 ath11k_hal_srng_access_begin(ab, srng); 321 322 num_free = ath11k_hal_srng_src_num_free(ab, srng, true); 323 if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4)) 324 req_entries = num_free; 325 326 req_entries = min(num_free, req_entries); 327 num_remain = req_entries; 328 329 while (num_remain > 0) { 330 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + 331 DP_RX_BUFFER_ALIGN_SIZE); 332 if (!skb) 333 break; 334 335 if (!IS_ALIGNED((unsigned long)skb->data, 336 DP_RX_BUFFER_ALIGN_SIZE)) { 337 skb_pull(skb, 338 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - 339 skb->data); 340 } 341 342 paddr = dma_map_single(ab->dev, skb->data, 343 skb->len + skb_tailroom(skb), 344 DMA_FROM_DEVICE); 345 if (dma_mapping_error(ab->dev, paddr)) 346 goto fail_free_skb; 347 348 spin_lock_bh(&rx_ring->idr_lock); 349 buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, 350 rx_ring->bufs_max * 3, GFP_ATOMIC); 351 spin_unlock_bh(&rx_ring->idr_lock); 352 if (buf_id < 0) 353 goto fail_dma_unmap; 354 355 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 356 if (!desc) 357 goto fail_idr_remove; 358 359 ATH11K_SKB_RXCB(skb)->paddr = paddr; 360 361 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 362 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 363 364 num_remain--; 365 366 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); 367 } 368 369 ath11k_hal_srng_access_end(ab, srng); 370 371 spin_unlock_bh(&srng->lock); 372 373 return req_entries - num_remain; 374 375 fail_idr_remove: 376 spin_lock_bh(&rx_ring->idr_lock); 377 idr_remove(&rx_ring->bufs_idr, buf_id); 378 spin_unlock_bh(&rx_ring->idr_lock); 379 fail_dma_unmap: 380 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 381 DMA_FROM_DEVICE); 382 fail_free_skb: 383 dev_kfree_skb_any(skb); 384 385 ath11k_hal_srng_access_end(ab, srng); 386 387 spin_unlock_bh(&srng->lock); 388 389 return req_entries - num_remain; 390 } 391 392 static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar, 393 struct dp_rxdma_ring *rx_ring) 394 { 395 struct ath11k_pdev_dp *dp = &ar->dp; 396 struct sk_buff *skb; 397 int buf_id; 398 399 spin_lock_bh(&rx_ring->idr_lock); 400 idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { 401 idr_remove(&rx_ring->bufs_idr, buf_id); 402 /* TODO: Understand where internal driver does this dma_unmap 403 * of rxdma_buffer. 404 */ 405 dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr, 406 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); 407 dev_kfree_skb_any(skb); 408 } 409 410 idr_destroy(&rx_ring->bufs_idr); 411 spin_unlock_bh(&rx_ring->idr_lock); 412 413 /* if rxdma1_enable is false, mon_status_refill_ring 414 * isn't setup, so don't clean. 415 */ 416 if (!ar->ab->hw_params.rxdma1_enable) 417 return 0; 418 419 rx_ring = &dp->rx_mon_status_refill_ring[0]; 420 421 spin_lock_bh(&rx_ring->idr_lock); 422 idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { 423 idr_remove(&rx_ring->bufs_idr, buf_id); 424 /* XXX: Understand where internal driver does this dma_unmap 425 * of rxdma_buffer. 426 */ 427 dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr, 428 skb->len + skb_tailroom(skb), DMA_BIDIRECTIONAL); 429 dev_kfree_skb_any(skb); 430 } 431 432 idr_destroy(&rx_ring->bufs_idr); 433 spin_unlock_bh(&rx_ring->idr_lock); 434 435 return 0; 436 } 437 438 static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar) 439 { 440 struct ath11k_pdev_dp *dp = &ar->dp; 441 struct ath11k_base *ab = ar->ab; 442 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 443 int i; 444 445 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 446 447 rx_ring = &dp->rxdma_mon_buf_ring; 448 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 449 450 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 451 rx_ring = &dp->rx_mon_status_refill_ring[i]; 452 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 453 } 454 455 return 0; 456 } 457 458 static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar, 459 struct dp_rxdma_ring *rx_ring, 460 u32 ringtype) 461 { 462 struct ath11k_pdev_dp *dp = &ar->dp; 463 int num_entries; 464 465 num_entries = rx_ring->refill_buf_ring.size / 466 ath11k_hal_srng_get_entrysize(ar->ab, ringtype); 467 468 rx_ring->bufs_max = num_entries; 469 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries, 470 HAL_RX_BUF_RBM_SW3_BM); 471 return 0; 472 } 473 474 static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar) 475 { 476 struct ath11k_pdev_dp *dp = &ar->dp; 477 struct ath11k_base *ab = ar->ab; 478 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 479 int i; 480 481 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF); 482 483 if (ar->ab->hw_params.rxdma1_enable) { 484 rx_ring = &dp->rxdma_mon_buf_ring; 485 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF); 486 } 487 488 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 489 rx_ring = &dp->rx_mon_status_refill_ring[i]; 490 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS); 491 } 492 493 return 0; 494 } 495 496 static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar) 497 { 498 struct ath11k_pdev_dp *dp = &ar->dp; 499 struct ath11k_base *ab = ar->ab; 500 int i; 501 502 ath11k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring); 503 504 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 505 if (ab->hw_params.rx_mac_buf_ring) 506 ath11k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]); 507 508 ath11k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]); 509 ath11k_dp_srng_cleanup(ab, 510 &dp->rx_mon_status_refill_ring[i].refill_buf_ring); 511 } 512 513 ath11k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring); 514 } 515 516 void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab) 517 { 518 struct ath11k_dp *dp = &ab->dp; 519 int i; 520 521 for (i = 0; i < DP_REO_DST_RING_MAX; i++) 522 ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]); 523 } 524 525 int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab) 526 { 527 struct ath11k_dp *dp = &ab->dp; 528 int ret; 529 int i; 530 531 for (i = 0; i < DP_REO_DST_RING_MAX; i++) { 532 ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring[i], 533 HAL_REO_DST, i, 0, 534 DP_REO_DST_RING_SIZE); 535 if (ret) { 536 ath11k_warn(ab, "failed to setup reo_dst_ring\n"); 537 goto err_reo_cleanup; 538 } 539 } 540 541 return 0; 542 543 err_reo_cleanup: 544 ath11k_dp_pdev_reo_cleanup(ab); 545 546 return ret; 547 } 548 549 static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar) 550 { 551 struct ath11k_pdev_dp *dp = &ar->dp; 552 struct ath11k_base *ab = ar->ab; 553 struct dp_srng *srng = NULL; 554 int i; 555 int ret; 556 557 ret = ath11k_dp_srng_setup(ar->ab, 558 &dp->rx_refill_buf_ring.refill_buf_ring, 559 HAL_RXDMA_BUF, 0, 560 dp->mac_id, DP_RXDMA_BUF_RING_SIZE); 561 if (ret) { 562 ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n"); 563 return ret; 564 } 565 566 if (ar->ab->hw_params.rx_mac_buf_ring) { 567 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 568 ret = ath11k_dp_srng_setup(ar->ab, 569 &dp->rx_mac_buf_ring[i], 570 HAL_RXDMA_BUF, 1, 571 dp->mac_id + i, 1024); 572 if (ret) { 573 ath11k_warn(ar->ab, "failed to setup rx_mac_buf_ring %d\n", 574 i); 575 return ret; 576 } 577 } 578 } 579 580 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 581 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring[i], 582 HAL_RXDMA_DST, 0, dp->mac_id + i, 583 DP_RXDMA_ERR_DST_RING_SIZE); 584 if (ret) { 585 ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring %d\n", i); 586 return ret; 587 } 588 } 589 590 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 591 srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring; 592 ret = ath11k_dp_srng_setup(ar->ab, 593 srng, 594 HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id + i, 595 DP_RXDMA_MON_STATUS_RING_SIZE); 596 if (ret) { 597 ath11k_warn(ar->ab, 598 "failed to setup rx_mon_status_refill_ring %d\n", i); 599 return ret; 600 } 601 } 602 603 /* if rxdma1_enable is false, then it doesn't need 604 * to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring 605 * and rxdma_mon_desc_ring. 606 * init reap timer for QCA6390. 607 */ 608 if (!ar->ab->hw_params.rxdma1_enable) { 609 //init mon status buffer reap timer 610 timer_setup(&ar->ab->mon_reap_timer, 611 ath11k_dp_service_mon_ring, 0); 612 return 0; 613 } 614 615 ret = ath11k_dp_srng_setup(ar->ab, 616 &dp->rxdma_mon_buf_ring.refill_buf_ring, 617 HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id, 618 DP_RXDMA_MONITOR_BUF_RING_SIZE); 619 if (ret) { 620 ath11k_warn(ar->ab, 621 "failed to setup HAL_RXDMA_MONITOR_BUF\n"); 622 return ret; 623 } 624 625 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring, 626 HAL_RXDMA_MONITOR_DST, 0, dp->mac_id, 627 DP_RXDMA_MONITOR_DST_RING_SIZE); 628 if (ret) { 629 ath11k_warn(ar->ab, 630 "failed to setup HAL_RXDMA_MONITOR_DST\n"); 631 return ret; 632 } 633 634 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring, 635 HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id, 636 DP_RXDMA_MONITOR_DESC_RING_SIZE); 637 if (ret) { 638 ath11k_warn(ar->ab, 639 "failed to setup HAL_RXDMA_MONITOR_DESC\n"); 640 return ret; 641 } 642 643 return 0; 644 } 645 646 void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab) 647 { 648 struct ath11k_dp *dp = &ab->dp; 649 struct dp_reo_cmd *cmd, *tmp; 650 struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache; 651 652 spin_lock_bh(&dp->reo_cmd_lock); 653 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 654 list_del(&cmd->list); 655 dma_unmap_single(ab->dev, cmd->data.paddr, 656 cmd->data.size, DMA_BIDIRECTIONAL); 657 kfree(cmd->data.vaddr); 658 kfree(cmd); 659 } 660 661 list_for_each_entry_safe(cmd_cache, tmp_cache, 662 &dp->reo_cmd_cache_flush_list, list) { 663 list_del(&cmd_cache->list); 664 dp->reo_cmd_cache_flush_count--; 665 dma_unmap_single(ab->dev, cmd_cache->data.paddr, 666 cmd_cache->data.size, DMA_BIDIRECTIONAL); 667 kfree(cmd_cache->data.vaddr); 668 kfree(cmd_cache); 669 } 670 spin_unlock_bh(&dp->reo_cmd_lock); 671 } 672 673 static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx, 674 enum hal_reo_cmd_status status) 675 { 676 struct dp_rx_tid *rx_tid = ctx; 677 678 if (status != HAL_REO_CMD_SUCCESS) 679 ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n", 680 rx_tid->tid, status); 681 682 dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size, 683 DMA_BIDIRECTIONAL); 684 kfree(rx_tid->vaddr); 685 } 686 687 static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab, 688 struct dp_rx_tid *rx_tid) 689 { 690 struct ath11k_hal_reo_cmd cmd = {0}; 691 unsigned long tot_desc_sz, desc_sz; 692 int ret; 693 694 tot_desc_sz = rx_tid->size; 695 desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID); 696 697 while (tot_desc_sz > desc_sz) { 698 tot_desc_sz -= desc_sz; 699 cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz); 700 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 701 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, 702 HAL_REO_CMD_FLUSH_CACHE, &cmd, 703 NULL); 704 if (ret) 705 ath11k_warn(ab, 706 "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n", 707 rx_tid->tid, ret); 708 } 709 710 memset(&cmd, 0, sizeof(cmd)); 711 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 712 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 713 cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS; 714 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, 715 HAL_REO_CMD_FLUSH_CACHE, 716 &cmd, ath11k_dp_reo_cmd_free); 717 if (ret) { 718 ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n", 719 rx_tid->tid, ret); 720 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 721 DMA_BIDIRECTIONAL); 722 kfree(rx_tid->vaddr); 723 } 724 } 725 726 static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx, 727 enum hal_reo_cmd_status status) 728 { 729 struct ath11k_base *ab = dp->ab; 730 struct dp_rx_tid *rx_tid = ctx; 731 struct dp_reo_cache_flush_elem *elem, *tmp; 732 733 if (status == HAL_REO_CMD_DRAIN) { 734 goto free_desc; 735 } else if (status != HAL_REO_CMD_SUCCESS) { 736 /* Shouldn't happen! Cleanup in case of other failure? */ 737 ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n", 738 rx_tid->tid, status); 739 return; 740 } 741 742 elem = kzalloc(sizeof(*elem), GFP_ATOMIC); 743 if (!elem) 744 goto free_desc; 745 746 elem->ts = jiffies; 747 memcpy(&elem->data, rx_tid, sizeof(*rx_tid)); 748 749 spin_lock_bh(&dp->reo_cmd_lock); 750 list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list); 751 dp->reo_cmd_cache_flush_count++; 752 753 /* Flush and invalidate aged REO desc from HW cache */ 754 list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list, 755 list) { 756 if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD || 757 time_after(jiffies, elem->ts + 758 msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) { 759 list_del(&elem->list); 760 dp->reo_cmd_cache_flush_count--; 761 spin_unlock_bh(&dp->reo_cmd_lock); 762 763 ath11k_dp_reo_cache_flush(ab, &elem->data); 764 kfree(elem); 765 spin_lock_bh(&dp->reo_cmd_lock); 766 } 767 } 768 spin_unlock_bh(&dp->reo_cmd_lock); 769 770 return; 771 free_desc: 772 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 773 DMA_BIDIRECTIONAL); 774 kfree(rx_tid->vaddr); 775 } 776 777 void ath11k_peer_rx_tid_delete(struct ath11k *ar, 778 struct ath11k_peer *peer, u8 tid) 779 { 780 struct ath11k_hal_reo_cmd cmd = {0}; 781 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 782 int ret; 783 784 if (!rx_tid->active) 785 return; 786 787 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 788 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 789 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 790 cmd.upd0 |= HAL_REO_CMD_UPD0_VLD; 791 ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid, 792 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 793 ath11k_dp_rx_tid_del_func); 794 if (ret) { 795 ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n", 796 tid, ret); 797 dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size, 798 DMA_BIDIRECTIONAL); 799 kfree(rx_tid->vaddr); 800 } 801 802 rx_tid->active = false; 803 } 804 805 static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab, 806 u32 *link_desc, 807 enum hal_wbm_rel_bm_act action) 808 { 809 struct ath11k_dp *dp = &ab->dp; 810 struct hal_srng *srng; 811 u32 *desc; 812 int ret = 0; 813 814 srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id]; 815 816 spin_lock_bh(&srng->lock); 817 818 ath11k_hal_srng_access_begin(ab, srng); 819 820 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 821 if (!desc) { 822 ret = -ENOBUFS; 823 goto exit; 824 } 825 826 ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc, 827 action); 828 829 exit: 830 ath11k_hal_srng_access_end(ab, srng); 831 832 spin_unlock_bh(&srng->lock); 833 834 return ret; 835 } 836 837 static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_desc) 838 { 839 struct ath11k_base *ab = rx_tid->ab; 840 841 lockdep_assert_held(&ab->base_lock); 842 843 if (rx_tid->dst_ring_desc) { 844 if (rel_link_desc) 845 ath11k_dp_rx_link_desc_return(ab, (u32 *)rx_tid->dst_ring_desc, 846 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 847 kfree(rx_tid->dst_ring_desc); 848 rx_tid->dst_ring_desc = NULL; 849 } 850 851 rx_tid->cur_sn = 0; 852 rx_tid->last_frag_no = 0; 853 rx_tid->rx_frag_bitmap = 0; 854 __skb_queue_purge(&rx_tid->rx_frags); 855 } 856 857 void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer) 858 { 859 struct dp_rx_tid *rx_tid; 860 int i; 861 862 lockdep_assert_held(&ar->ab->base_lock); 863 864 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 865 rx_tid = &peer->rx_tid[i]; 866 867 ath11k_peer_rx_tid_delete(ar, peer, i); 868 ath11k_dp_rx_frags_cleanup(rx_tid, true); 869 870 spin_unlock_bh(&ar->ab->base_lock); 871 del_timer_sync(&rx_tid->frag_timer); 872 spin_lock_bh(&ar->ab->base_lock); 873 } 874 } 875 876 static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar, 877 struct ath11k_peer *peer, 878 struct dp_rx_tid *rx_tid, 879 u32 ba_win_sz, u16 ssn, 880 bool update_ssn) 881 { 882 struct ath11k_hal_reo_cmd cmd = {0}; 883 int ret; 884 885 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 886 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 887 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 888 cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE; 889 cmd.ba_window_size = ba_win_sz; 890 891 if (update_ssn) { 892 cmd.upd0 |= HAL_REO_CMD_UPD0_SSN; 893 cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn); 894 } 895 896 ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid, 897 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 898 NULL); 899 if (ret) { 900 ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n", 901 rx_tid->tid, ret); 902 return ret; 903 } 904 905 rx_tid->ba_win_sz = ba_win_sz; 906 907 return 0; 908 } 909 910 static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab, 911 const u8 *peer_mac, int vdev_id, u8 tid) 912 { 913 struct ath11k_peer *peer; 914 struct dp_rx_tid *rx_tid; 915 916 spin_lock_bh(&ab->base_lock); 917 918 peer = ath11k_peer_find(ab, vdev_id, peer_mac); 919 if (!peer) { 920 ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n"); 921 goto unlock_exit; 922 } 923 924 rx_tid = &peer->rx_tid[tid]; 925 if (!rx_tid->active) 926 goto unlock_exit; 927 928 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 929 DMA_BIDIRECTIONAL); 930 kfree(rx_tid->vaddr); 931 932 rx_tid->active = false; 933 934 unlock_exit: 935 spin_unlock_bh(&ab->base_lock); 936 } 937 938 int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id, 939 u8 tid, u32 ba_win_sz, u16 ssn, 940 enum hal_pn_type pn_type) 941 { 942 struct ath11k_base *ab = ar->ab; 943 struct ath11k_peer *peer; 944 struct dp_rx_tid *rx_tid; 945 u32 hw_desc_sz; 946 u32 *addr_aligned; 947 void *vaddr; 948 dma_addr_t paddr; 949 int ret; 950 951 spin_lock_bh(&ab->base_lock); 952 953 peer = ath11k_peer_find(ab, vdev_id, peer_mac); 954 if (!peer) { 955 ath11k_warn(ab, "failed to find the peer to set up rx tid\n"); 956 spin_unlock_bh(&ab->base_lock); 957 return -ENOENT; 958 } 959 960 rx_tid = &peer->rx_tid[tid]; 961 /* Update the tid queue if it is already setup */ 962 if (rx_tid->active) { 963 paddr = rx_tid->paddr; 964 ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid, 965 ba_win_sz, ssn, true); 966 spin_unlock_bh(&ab->base_lock); 967 if (ret) { 968 ath11k_warn(ab, "failed to update reo for rx tid %d\n", tid); 969 return ret; 970 } 971 972 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, 973 peer_mac, paddr, 974 tid, 1, ba_win_sz); 975 if (ret) 976 ath11k_warn(ab, "failed to send wmi command to update rx reorder queue, tid :%d (%d)\n", 977 tid, ret); 978 return ret; 979 } 980 981 rx_tid->tid = tid; 982 983 rx_tid->ba_win_sz = ba_win_sz; 984 985 /* TODO: Optimize the memory allocation for qos tid based on 986 * the actual BA window size in REO tid update path. 987 */ 988 if (tid == HAL_DESC_REO_NON_QOS_TID) 989 hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid); 990 else 991 hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid); 992 993 vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC); 994 if (!vaddr) { 995 spin_unlock_bh(&ab->base_lock); 996 return -ENOMEM; 997 } 998 999 addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN); 1000 1001 ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz, 1002 ssn, pn_type); 1003 1004 paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz, 1005 DMA_BIDIRECTIONAL); 1006 1007 ret = dma_mapping_error(ab->dev, paddr); 1008 if (ret) { 1009 spin_unlock_bh(&ab->base_lock); 1010 goto err_mem_free; 1011 } 1012 1013 rx_tid->vaddr = vaddr; 1014 rx_tid->paddr = paddr; 1015 rx_tid->size = hw_desc_sz; 1016 rx_tid->active = true; 1017 1018 spin_unlock_bh(&ab->base_lock); 1019 1020 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, 1021 paddr, tid, 1, ba_win_sz); 1022 if (ret) { 1023 ath11k_warn(ar->ab, "failed to setup rx reorder queue, tid :%d (%d)\n", 1024 tid, ret); 1025 ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid); 1026 } 1027 1028 return ret; 1029 1030 err_mem_free: 1031 kfree(vaddr); 1032 1033 return ret; 1034 } 1035 1036 int ath11k_dp_rx_ampdu_start(struct ath11k *ar, 1037 struct ieee80211_ampdu_params *params) 1038 { 1039 struct ath11k_base *ab = ar->ab; 1040 struct ath11k_sta *arsta = (void *)params->sta->drv_priv; 1041 int vdev_id = arsta->arvif->vdev_id; 1042 int ret; 1043 1044 ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id, 1045 params->tid, params->buf_size, 1046 params->ssn, arsta->pn_type); 1047 if (ret) 1048 ath11k_warn(ab, "failed to setup rx tid %d\n", ret); 1049 1050 return ret; 1051 } 1052 1053 int ath11k_dp_rx_ampdu_stop(struct ath11k *ar, 1054 struct ieee80211_ampdu_params *params) 1055 { 1056 struct ath11k_base *ab = ar->ab; 1057 struct ath11k_peer *peer; 1058 struct ath11k_sta *arsta = (void *)params->sta->drv_priv; 1059 int vdev_id = arsta->arvif->vdev_id; 1060 dma_addr_t paddr; 1061 bool active; 1062 int ret; 1063 1064 spin_lock_bh(&ab->base_lock); 1065 1066 peer = ath11k_peer_find(ab, vdev_id, params->sta->addr); 1067 if (!peer) { 1068 ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n"); 1069 spin_unlock_bh(&ab->base_lock); 1070 return -ENOENT; 1071 } 1072 1073 paddr = peer->rx_tid[params->tid].paddr; 1074 active = peer->rx_tid[params->tid].active; 1075 1076 if (!active) { 1077 spin_unlock_bh(&ab->base_lock); 1078 return 0; 1079 } 1080 1081 ret = ath11k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false); 1082 spin_unlock_bh(&ab->base_lock); 1083 if (ret) { 1084 ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n", 1085 params->tid, ret); 1086 return ret; 1087 } 1088 1089 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, 1090 params->sta->addr, paddr, 1091 params->tid, 1, 1); 1092 if (ret) 1093 ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n", 1094 ret); 1095 1096 return ret; 1097 } 1098 1099 int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif, 1100 const u8 *peer_addr, 1101 enum set_key_cmd key_cmd, 1102 struct ieee80211_key_conf *key) 1103 { 1104 struct ath11k *ar = arvif->ar; 1105 struct ath11k_base *ab = ar->ab; 1106 struct ath11k_hal_reo_cmd cmd = {0}; 1107 struct ath11k_peer *peer; 1108 struct dp_rx_tid *rx_tid; 1109 u8 tid; 1110 int ret = 0; 1111 1112 /* NOTE: Enable PN/TSC replay check offload only for unicast frames. 1113 * We use mac80211 PN/TSC replay check functionality for bcast/mcast 1114 * for now. 1115 */ 1116 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) 1117 return 0; 1118 1119 cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS; 1120 cmd.upd0 |= HAL_REO_CMD_UPD0_PN | 1121 HAL_REO_CMD_UPD0_PN_SIZE | 1122 HAL_REO_CMD_UPD0_PN_VALID | 1123 HAL_REO_CMD_UPD0_PN_CHECK | 1124 HAL_REO_CMD_UPD0_SVLD; 1125 1126 switch (key->cipher) { 1127 case WLAN_CIPHER_SUITE_TKIP: 1128 case WLAN_CIPHER_SUITE_CCMP: 1129 case WLAN_CIPHER_SUITE_CCMP_256: 1130 case WLAN_CIPHER_SUITE_GCMP: 1131 case WLAN_CIPHER_SUITE_GCMP_256: 1132 if (key_cmd == SET_KEY) { 1133 cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK; 1134 cmd.pn_size = 48; 1135 } 1136 break; 1137 default: 1138 break; 1139 } 1140 1141 spin_lock_bh(&ab->base_lock); 1142 1143 peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr); 1144 if (!peer) { 1145 ath11k_warn(ab, "failed to find the peer to configure pn replay detection\n"); 1146 spin_unlock_bh(&ab->base_lock); 1147 return -ENOENT; 1148 } 1149 1150 for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) { 1151 rx_tid = &peer->rx_tid[tid]; 1152 if (!rx_tid->active) 1153 continue; 1154 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 1155 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 1156 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, 1157 HAL_REO_CMD_UPDATE_RX_QUEUE, 1158 &cmd, NULL); 1159 if (ret) { 1160 ath11k_warn(ab, "failed to configure rx tid %d queue for pn replay detection %d\n", 1161 tid, ret); 1162 break; 1163 } 1164 } 1165 1166 spin_unlock_bh(&ab->base_lock); 1167 1168 return ret; 1169 } 1170 1171 static inline int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats, 1172 u16 peer_id) 1173 { 1174 int i; 1175 1176 for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) { 1177 if (ppdu_stats->user_stats[i].is_valid_peer_id) { 1178 if (peer_id == ppdu_stats->user_stats[i].peer_id) 1179 return i; 1180 } else { 1181 return i; 1182 } 1183 } 1184 1185 return -EINVAL; 1186 } 1187 1188 static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab, 1189 u16 tag, u16 len, const void *ptr, 1190 void *data) 1191 { 1192 struct htt_ppdu_stats_info *ppdu_info; 1193 struct htt_ppdu_user_stats *user_stats; 1194 int cur_user; 1195 u16 peer_id; 1196 1197 ppdu_info = (struct htt_ppdu_stats_info *)data; 1198 1199 switch (tag) { 1200 case HTT_PPDU_STATS_TAG_COMMON: 1201 if (len < sizeof(struct htt_ppdu_stats_common)) { 1202 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1203 len, tag); 1204 return -EINVAL; 1205 } 1206 memcpy((void *)&ppdu_info->ppdu_stats.common, ptr, 1207 sizeof(struct htt_ppdu_stats_common)); 1208 break; 1209 case HTT_PPDU_STATS_TAG_USR_RATE: 1210 if (len < sizeof(struct htt_ppdu_stats_user_rate)) { 1211 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1212 len, tag); 1213 return -EINVAL; 1214 } 1215 1216 peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id; 1217 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1218 peer_id); 1219 if (cur_user < 0) 1220 return -EINVAL; 1221 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1222 user_stats->peer_id = peer_id; 1223 user_stats->is_valid_peer_id = true; 1224 memcpy((void *)&user_stats->rate, ptr, 1225 sizeof(struct htt_ppdu_stats_user_rate)); 1226 user_stats->tlv_flags |= BIT(tag); 1227 break; 1228 case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON: 1229 if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) { 1230 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1231 len, tag); 1232 return -EINVAL; 1233 } 1234 1235 peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id; 1236 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1237 peer_id); 1238 if (cur_user < 0) 1239 return -EINVAL; 1240 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1241 user_stats->peer_id = peer_id; 1242 user_stats->is_valid_peer_id = true; 1243 memcpy((void *)&user_stats->cmpltn_cmn, ptr, 1244 sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)); 1245 user_stats->tlv_flags |= BIT(tag); 1246 break; 1247 case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS: 1248 if (len < 1249 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) { 1250 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1251 len, tag); 1252 return -EINVAL; 1253 } 1254 1255 peer_id = 1256 ((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id; 1257 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1258 peer_id); 1259 if (cur_user < 0) 1260 return -EINVAL; 1261 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1262 user_stats->peer_id = peer_id; 1263 user_stats->is_valid_peer_id = true; 1264 memcpy((void *)&user_stats->ack_ba, ptr, 1265 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)); 1266 user_stats->tlv_flags |= BIT(tag); 1267 break; 1268 } 1269 return 0; 1270 } 1271 1272 int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len, 1273 int (*iter)(struct ath11k_base *ar, u16 tag, u16 len, 1274 const void *ptr, void *data), 1275 void *data) 1276 { 1277 const struct htt_tlv *tlv; 1278 const void *begin = ptr; 1279 u16 tlv_tag, tlv_len; 1280 int ret = -EINVAL; 1281 1282 while (len > 0) { 1283 if (len < sizeof(*tlv)) { 1284 ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n", 1285 ptr - begin, len, sizeof(*tlv)); 1286 return -EINVAL; 1287 } 1288 tlv = (struct htt_tlv *)ptr; 1289 tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header); 1290 tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header); 1291 ptr += sizeof(*tlv); 1292 len -= sizeof(*tlv); 1293 1294 if (tlv_len > len) { 1295 ath11k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n", 1296 tlv_tag, ptr - begin, len, tlv_len); 1297 return -EINVAL; 1298 } 1299 ret = iter(ab, tlv_tag, tlv_len, ptr, data); 1300 if (ret == -ENOMEM) 1301 return ret; 1302 1303 ptr += tlv_len; 1304 len -= tlv_len; 1305 } 1306 return 0; 1307 } 1308 1309 static inline u32 ath11k_he_gi_to_nl80211_he_gi(u8 sgi) 1310 { 1311 u32 ret = 0; 1312 1313 switch (sgi) { 1314 case RX_MSDU_START_SGI_0_8_US: 1315 ret = NL80211_RATE_INFO_HE_GI_0_8; 1316 break; 1317 case RX_MSDU_START_SGI_1_6_US: 1318 ret = NL80211_RATE_INFO_HE_GI_1_6; 1319 break; 1320 case RX_MSDU_START_SGI_3_2_US: 1321 ret = NL80211_RATE_INFO_HE_GI_3_2; 1322 break; 1323 } 1324 1325 return ret; 1326 } 1327 1328 static void 1329 ath11k_update_per_peer_tx_stats(struct ath11k *ar, 1330 struct htt_ppdu_stats *ppdu_stats, u8 user) 1331 { 1332 struct ath11k_base *ab = ar->ab; 1333 struct ath11k_peer *peer; 1334 struct ieee80211_sta *sta; 1335 struct ath11k_sta *arsta; 1336 struct htt_ppdu_stats_user_rate *user_rate; 1337 struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats; 1338 struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user]; 1339 struct htt_ppdu_stats_common *common = &ppdu_stats->common; 1340 int ret; 1341 u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0; 1342 u32 succ_bytes = 0; 1343 u16 rate = 0, succ_pkts = 0; 1344 u32 tx_duration = 0; 1345 u8 tid = HTT_PPDU_STATS_NON_QOS_TID; 1346 bool is_ampdu = false; 1347 1348 if (!usr_stats) 1349 return; 1350 1351 if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE))) 1352 return; 1353 1354 if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON)) 1355 is_ampdu = 1356 HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags); 1357 1358 if (usr_stats->tlv_flags & 1359 BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) { 1360 succ_bytes = usr_stats->ack_ba.success_bytes; 1361 succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M, 1362 usr_stats->ack_ba.info); 1363 tid = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM, 1364 usr_stats->ack_ba.info); 1365 } 1366 1367 if (common->fes_duration_us) 1368 tx_duration = common->fes_duration_us; 1369 1370 user_rate = &usr_stats->rate; 1371 flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags); 1372 bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2; 1373 nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1; 1374 mcs = HTT_USR_RATE_MCS(user_rate->rate_flags); 1375 sgi = HTT_USR_RATE_GI(user_rate->rate_flags); 1376 dcm = HTT_USR_RATE_DCM(user_rate->rate_flags); 1377 1378 /* Note: If host configured fixed rates and in some other special 1379 * cases, the broadcast/management frames are sent in different rates. 1380 * Firmware rate's control to be skipped for this? 1381 */ 1382 1383 if (flags == WMI_RATE_PREAMBLE_HE && mcs > 11) { 1384 ath11k_warn(ab, "Invalid HE mcs %d peer stats", mcs); 1385 return; 1386 } 1387 1388 if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) { 1389 ath11k_warn(ab, "Invalid HE mcs %d peer stats", mcs); 1390 return; 1391 } 1392 1393 if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH11K_VHT_MCS_MAX) { 1394 ath11k_warn(ab, "Invalid VHT mcs %d peer stats", mcs); 1395 return; 1396 } 1397 1398 if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH11K_HT_MCS_MAX || nss < 1)) { 1399 ath11k_warn(ab, "Invalid HT mcs %d nss %d peer stats", 1400 mcs, nss); 1401 return; 1402 } 1403 1404 if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) { 1405 ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs, 1406 flags, 1407 &rate_idx, 1408 &rate); 1409 if (ret < 0) 1410 return; 1411 } 1412 1413 rcu_read_lock(); 1414 spin_lock_bh(&ab->base_lock); 1415 peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id); 1416 1417 if (!peer || !peer->sta) { 1418 spin_unlock_bh(&ab->base_lock); 1419 rcu_read_unlock(); 1420 return; 1421 } 1422 1423 sta = peer->sta; 1424 arsta = (struct ath11k_sta *)sta->drv_priv; 1425 1426 memset(&arsta->txrate, 0, sizeof(arsta->txrate)); 1427 1428 switch (flags) { 1429 case WMI_RATE_PREAMBLE_OFDM: 1430 arsta->txrate.legacy = rate; 1431 break; 1432 case WMI_RATE_PREAMBLE_CCK: 1433 arsta->txrate.legacy = rate; 1434 break; 1435 case WMI_RATE_PREAMBLE_HT: 1436 arsta->txrate.mcs = mcs + 8 * (nss - 1); 1437 arsta->txrate.flags = RATE_INFO_FLAGS_MCS; 1438 if (sgi) 1439 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1440 break; 1441 case WMI_RATE_PREAMBLE_VHT: 1442 arsta->txrate.mcs = mcs; 1443 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; 1444 if (sgi) 1445 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1446 break; 1447 case WMI_RATE_PREAMBLE_HE: 1448 arsta->txrate.mcs = mcs; 1449 arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS; 1450 arsta->txrate.he_dcm = dcm; 1451 arsta->txrate.he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi); 1452 arsta->txrate.he_ru_alloc = ath11k_he_ru_tones_to_nl80211_he_ru_alloc( 1453 (user_rate->ru_end - 1454 user_rate->ru_start) + 1); 1455 break; 1456 } 1457 1458 arsta->txrate.nss = nss; 1459 arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw); 1460 arsta->tx_duration += tx_duration; 1461 memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info)); 1462 1463 /* PPDU stats reported for mgmt packet doesn't have valid tx bytes. 1464 * So skip peer stats update for mgmt packets. 1465 */ 1466 if (tid < HTT_PPDU_STATS_NON_QOS_TID) { 1467 memset(peer_stats, 0, sizeof(*peer_stats)); 1468 peer_stats->succ_pkts = succ_pkts; 1469 peer_stats->succ_bytes = succ_bytes; 1470 peer_stats->is_ampdu = is_ampdu; 1471 peer_stats->duration = tx_duration; 1472 peer_stats->ba_fails = 1473 HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) + 1474 HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags); 1475 1476 if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) 1477 ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx); 1478 } 1479 1480 spin_unlock_bh(&ab->base_lock); 1481 rcu_read_unlock(); 1482 } 1483 1484 static void ath11k_htt_update_ppdu_stats(struct ath11k *ar, 1485 struct htt_ppdu_stats *ppdu_stats) 1486 { 1487 u8 user; 1488 1489 for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++) 1490 ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user); 1491 } 1492 1493 static 1494 struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar, 1495 u32 ppdu_id) 1496 { 1497 struct htt_ppdu_stats_info *ppdu_info; 1498 1499 spin_lock_bh(&ar->data_lock); 1500 if (!list_empty(&ar->ppdu_stats_info)) { 1501 list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) { 1502 if (ppdu_info->ppdu_id == ppdu_id) { 1503 spin_unlock_bh(&ar->data_lock); 1504 return ppdu_info; 1505 } 1506 } 1507 1508 if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) { 1509 ppdu_info = list_first_entry(&ar->ppdu_stats_info, 1510 typeof(*ppdu_info), list); 1511 list_del(&ppdu_info->list); 1512 ar->ppdu_stat_list_depth--; 1513 ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats); 1514 kfree(ppdu_info); 1515 } 1516 } 1517 spin_unlock_bh(&ar->data_lock); 1518 1519 ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC); 1520 if (!ppdu_info) 1521 return NULL; 1522 1523 spin_lock_bh(&ar->data_lock); 1524 list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info); 1525 ar->ppdu_stat_list_depth++; 1526 spin_unlock_bh(&ar->data_lock); 1527 1528 return ppdu_info; 1529 } 1530 1531 static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab, 1532 struct sk_buff *skb) 1533 { 1534 struct ath11k_htt_ppdu_stats_msg *msg; 1535 struct htt_ppdu_stats_info *ppdu_info; 1536 struct ath11k *ar; 1537 int ret; 1538 u8 pdev_id; 1539 u32 ppdu_id, len; 1540 1541 msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data; 1542 len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info); 1543 pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info); 1544 ppdu_id = msg->ppdu_id; 1545 1546 rcu_read_lock(); 1547 ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); 1548 if (!ar) { 1549 ret = -EINVAL; 1550 goto exit; 1551 } 1552 1553 if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) 1554 trace_ath11k_htt_ppdu_stats(ar, skb->data, len); 1555 1556 ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id); 1557 if (!ppdu_info) { 1558 ret = -EINVAL; 1559 goto exit; 1560 } 1561 1562 ppdu_info->ppdu_id = ppdu_id; 1563 ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len, 1564 ath11k_htt_tlv_ppdu_stats_parse, 1565 (void *)ppdu_info); 1566 if (ret) { 1567 ath11k_warn(ab, "Failed to parse tlv %d\n", ret); 1568 goto exit; 1569 } 1570 1571 exit: 1572 rcu_read_unlock(); 1573 1574 return ret; 1575 } 1576 1577 static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb) 1578 { 1579 struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data; 1580 struct ath_pktlog_hdr *hdr = (struct ath_pktlog_hdr *)data; 1581 struct ath11k *ar; 1582 u8 pdev_id; 1583 1584 pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr); 1585 ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); 1586 if (!ar) { 1587 ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id); 1588 return; 1589 } 1590 1591 trace_ath11k_htt_pktlog(ar, data->payload, hdr->size, 1592 ar->ab->pktlog_defs_checksum); 1593 } 1594 1595 static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab, 1596 struct sk_buff *skb) 1597 { 1598 u32 *data = (u32 *)skb->data; 1599 u8 pdev_id, ring_type, ring_id, pdev_idx; 1600 u16 hp, tp; 1601 u32 backpressure_time; 1602 struct ath11k_bp_stats *bp_stats; 1603 1604 pdev_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_PDEV_ID_M, *data); 1605 ring_type = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_TYPE_M, *data); 1606 ring_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_ID_M, *data); 1607 ++data; 1608 1609 hp = FIELD_GET(HTT_BACKPRESSURE_EVENT_HP_M, *data); 1610 tp = FIELD_GET(HTT_BACKPRESSURE_EVENT_TP_M, *data); 1611 ++data; 1612 1613 backpressure_time = *data; 1614 1615 ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "htt backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n", 1616 pdev_id, ring_type, ring_id, hp, tp, backpressure_time); 1617 1618 if (ring_type == HTT_BACKPRESSURE_UMAC_RING_TYPE) { 1619 if (ring_id >= HTT_SW_UMAC_RING_IDX_MAX) 1620 return; 1621 1622 bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[ring_id]; 1623 } else if (ring_type == HTT_BACKPRESSURE_LMAC_RING_TYPE) { 1624 pdev_idx = DP_HW2SW_MACID(pdev_id); 1625 1626 if (ring_id >= HTT_SW_LMAC_RING_IDX_MAX || pdev_idx >= MAX_RADIOS) 1627 return; 1628 1629 bp_stats = &ab->soc_stats.bp_stats.lmac_ring_bp_stats[ring_id][pdev_idx]; 1630 } else { 1631 ath11k_warn(ab, "unknown ring type received in htt bp event %d\n", 1632 ring_type); 1633 return; 1634 } 1635 1636 spin_lock_bh(&ab->base_lock); 1637 bp_stats->hp = hp; 1638 bp_stats->tp = tp; 1639 bp_stats->count++; 1640 bp_stats->jiffies = jiffies; 1641 spin_unlock_bh(&ab->base_lock); 1642 } 1643 1644 void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab, 1645 struct sk_buff *skb) 1646 { 1647 struct ath11k_dp *dp = &ab->dp; 1648 struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data; 1649 enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp); 1650 u16 peer_id; 1651 u8 vdev_id; 1652 u8 mac_addr[ETH_ALEN]; 1653 u16 peer_mac_h16; 1654 u16 ast_hash; 1655 u16 hw_peer_id; 1656 1657 ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type); 1658 1659 switch (type) { 1660 case HTT_T2H_MSG_TYPE_VERSION_CONF: 1661 dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR, 1662 resp->version_msg.version); 1663 dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR, 1664 resp->version_msg.version); 1665 complete(&dp->htt_tgt_version_received); 1666 break; 1667 case HTT_T2H_MSG_TYPE_PEER_MAP: 1668 vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID, 1669 resp->peer_map_ev.info); 1670 peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID, 1671 resp->peer_map_ev.info); 1672 peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16, 1673 resp->peer_map_ev.info1); 1674 ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32, 1675 peer_mac_h16, mac_addr); 1676 ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0); 1677 break; 1678 case HTT_T2H_MSG_TYPE_PEER_MAP2: 1679 vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID, 1680 resp->peer_map_ev.info); 1681 peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID, 1682 resp->peer_map_ev.info); 1683 peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16, 1684 resp->peer_map_ev.info1); 1685 ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32, 1686 peer_mac_h16, mac_addr); 1687 ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL, 1688 resp->peer_map_ev.info2); 1689 hw_peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID, 1690 resp->peer_map_ev.info1); 1691 ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash, 1692 hw_peer_id); 1693 break; 1694 case HTT_T2H_MSG_TYPE_PEER_UNMAP: 1695 case HTT_T2H_MSG_TYPE_PEER_UNMAP2: 1696 peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID, 1697 resp->peer_unmap_ev.info); 1698 ath11k_peer_unmap_event(ab, peer_id); 1699 break; 1700 case HTT_T2H_MSG_TYPE_PPDU_STATS_IND: 1701 ath11k_htt_pull_ppdu_stats(ab, skb); 1702 break; 1703 case HTT_T2H_MSG_TYPE_EXT_STATS_CONF: 1704 ath11k_debugfs_htt_ext_stats_handler(ab, skb); 1705 break; 1706 case HTT_T2H_MSG_TYPE_PKTLOG: 1707 ath11k_htt_pktlog(ab, skb); 1708 break; 1709 case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND: 1710 ath11k_htt_backpressure_event_handler(ab, skb); 1711 break; 1712 default: 1713 ath11k_warn(ab, "htt event %d not handled\n", type); 1714 break; 1715 } 1716 1717 dev_kfree_skb_any(skb); 1718 } 1719 1720 static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar, 1721 struct sk_buff_head *msdu_list, 1722 struct sk_buff *first, struct sk_buff *last, 1723 u8 l3pad_bytes, int msdu_len) 1724 { 1725 struct sk_buff *skb; 1726 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first); 1727 int buf_first_hdr_len, buf_first_len; 1728 struct hal_rx_desc *ldesc; 1729 int space_extra; 1730 int rem_len; 1731 int buf_len; 1732 1733 /* As the msdu is spread across multiple rx buffers, 1734 * find the offset to the start of msdu for computing 1735 * the length of the msdu in the first buffer. 1736 */ 1737 buf_first_hdr_len = HAL_RX_DESC_SIZE + l3pad_bytes; 1738 buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len; 1739 1740 if (WARN_ON_ONCE(msdu_len <= buf_first_len)) { 1741 skb_put(first, buf_first_hdr_len + msdu_len); 1742 skb_pull(first, buf_first_hdr_len); 1743 return 0; 1744 } 1745 1746 ldesc = (struct hal_rx_desc *)last->data; 1747 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ldesc); 1748 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ldesc); 1749 1750 /* MSDU spans over multiple buffers because the length of the MSDU 1751 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data 1752 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. 1753 */ 1754 skb_put(first, DP_RX_BUFFER_SIZE); 1755 skb_pull(first, buf_first_hdr_len); 1756 1757 /* When an MSDU spread over multiple buffers attention, MSDU_END and 1758 * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs. 1759 */ 1760 ath11k_dp_rx_desc_end_tlv_copy(rxcb->rx_desc, ldesc); 1761 1762 space_extra = msdu_len - (buf_first_len + skb_tailroom(first)); 1763 if (space_extra > 0 && 1764 (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) { 1765 /* Free up all buffers of the MSDU */ 1766 while ((skb = __skb_dequeue(msdu_list)) != NULL) { 1767 rxcb = ATH11K_SKB_RXCB(skb); 1768 if (!rxcb->is_continuation) { 1769 dev_kfree_skb_any(skb); 1770 break; 1771 } 1772 dev_kfree_skb_any(skb); 1773 } 1774 return -ENOMEM; 1775 } 1776 1777 rem_len = msdu_len - buf_first_len; 1778 while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) { 1779 rxcb = ATH11K_SKB_RXCB(skb); 1780 if (rxcb->is_continuation) 1781 buf_len = DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE; 1782 else 1783 buf_len = rem_len; 1784 1785 if (buf_len > (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE)) { 1786 WARN_ON_ONCE(1); 1787 dev_kfree_skb_any(skb); 1788 return -EINVAL; 1789 } 1790 1791 skb_put(skb, buf_len + HAL_RX_DESC_SIZE); 1792 skb_pull(skb, HAL_RX_DESC_SIZE); 1793 skb_copy_from_linear_data(skb, skb_put(first, buf_len), 1794 buf_len); 1795 dev_kfree_skb_any(skb); 1796 1797 rem_len -= buf_len; 1798 if (!rxcb->is_continuation) 1799 break; 1800 } 1801 1802 return 0; 1803 } 1804 1805 static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list, 1806 struct sk_buff *first) 1807 { 1808 struct sk_buff *skb; 1809 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first); 1810 1811 if (!rxcb->is_continuation) 1812 return first; 1813 1814 skb_queue_walk(msdu_list, skb) { 1815 rxcb = ATH11K_SKB_RXCB(skb); 1816 if (!rxcb->is_continuation) 1817 return skb; 1818 } 1819 1820 return NULL; 1821 } 1822 1823 static void ath11k_dp_rx_h_csum_offload(struct sk_buff *msdu) 1824 { 1825 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 1826 bool ip_csum_fail, l4_csum_fail; 1827 1828 ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rxcb->rx_desc); 1829 l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rxcb->rx_desc); 1830 1831 msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ? 1832 CHECKSUM_NONE : CHECKSUM_UNNECESSARY; 1833 } 1834 1835 static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, 1836 enum hal_encrypt_type enctype) 1837 { 1838 switch (enctype) { 1839 case HAL_ENCRYPT_TYPE_OPEN: 1840 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1841 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1842 return 0; 1843 case HAL_ENCRYPT_TYPE_CCMP_128: 1844 return IEEE80211_CCMP_MIC_LEN; 1845 case HAL_ENCRYPT_TYPE_CCMP_256: 1846 return IEEE80211_CCMP_256_MIC_LEN; 1847 case HAL_ENCRYPT_TYPE_GCMP_128: 1848 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1849 return IEEE80211_GCMP_MIC_LEN; 1850 case HAL_ENCRYPT_TYPE_WEP_40: 1851 case HAL_ENCRYPT_TYPE_WEP_104: 1852 case HAL_ENCRYPT_TYPE_WEP_128: 1853 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1854 case HAL_ENCRYPT_TYPE_WAPI: 1855 break; 1856 } 1857 1858 ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype); 1859 return 0; 1860 } 1861 1862 static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar, 1863 enum hal_encrypt_type enctype) 1864 { 1865 switch (enctype) { 1866 case HAL_ENCRYPT_TYPE_OPEN: 1867 return 0; 1868 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1869 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1870 return IEEE80211_TKIP_IV_LEN; 1871 case HAL_ENCRYPT_TYPE_CCMP_128: 1872 return IEEE80211_CCMP_HDR_LEN; 1873 case HAL_ENCRYPT_TYPE_CCMP_256: 1874 return IEEE80211_CCMP_256_HDR_LEN; 1875 case HAL_ENCRYPT_TYPE_GCMP_128: 1876 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1877 return IEEE80211_GCMP_HDR_LEN; 1878 case HAL_ENCRYPT_TYPE_WEP_40: 1879 case HAL_ENCRYPT_TYPE_WEP_104: 1880 case HAL_ENCRYPT_TYPE_WEP_128: 1881 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1882 case HAL_ENCRYPT_TYPE_WAPI: 1883 break; 1884 } 1885 1886 ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 1887 return 0; 1888 } 1889 1890 static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar, 1891 enum hal_encrypt_type enctype) 1892 { 1893 switch (enctype) { 1894 case HAL_ENCRYPT_TYPE_OPEN: 1895 case HAL_ENCRYPT_TYPE_CCMP_128: 1896 case HAL_ENCRYPT_TYPE_CCMP_256: 1897 case HAL_ENCRYPT_TYPE_GCMP_128: 1898 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1899 return 0; 1900 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1901 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1902 return IEEE80211_TKIP_ICV_LEN; 1903 case HAL_ENCRYPT_TYPE_WEP_40: 1904 case HAL_ENCRYPT_TYPE_WEP_104: 1905 case HAL_ENCRYPT_TYPE_WEP_128: 1906 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1907 case HAL_ENCRYPT_TYPE_WAPI: 1908 break; 1909 } 1910 1911 ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 1912 return 0; 1913 } 1914 1915 static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar, 1916 struct sk_buff *msdu, 1917 u8 *first_hdr, 1918 enum hal_encrypt_type enctype, 1919 struct ieee80211_rx_status *status) 1920 { 1921 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 1922 u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN]; 1923 struct ieee80211_hdr *hdr; 1924 size_t hdr_len; 1925 u8 da[ETH_ALEN]; 1926 u8 sa[ETH_ALEN]; 1927 u16 qos_ctl = 0; 1928 u8 *qos; 1929 1930 /* copy SA & DA and pull decapped header */ 1931 hdr = (struct ieee80211_hdr *)msdu->data; 1932 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1933 ether_addr_copy(da, ieee80211_get_DA(hdr)); 1934 ether_addr_copy(sa, ieee80211_get_SA(hdr)); 1935 skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control)); 1936 1937 if (rxcb->is_first_msdu) { 1938 /* original 802.11 header is valid for the first msdu 1939 * hence we can reuse the same header 1940 */ 1941 hdr = (struct ieee80211_hdr *)first_hdr; 1942 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1943 1944 /* Each A-MSDU subframe will be reported as a separate MSDU, 1945 * so strip the A-MSDU bit from QoS Ctl. 1946 */ 1947 if (ieee80211_is_data_qos(hdr->frame_control)) { 1948 qos = ieee80211_get_qos_ctl(hdr); 1949 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; 1950 } 1951 } else { 1952 /* Rebuild qos header if this is a middle/last msdu */ 1953 hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 1954 1955 /* Reset the order bit as the HT_Control header is stripped */ 1956 hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER)); 1957 1958 qos_ctl = rxcb->tid; 1959 1960 if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(rxcb->rx_desc)) 1961 qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT; 1962 1963 /* TODO Add other QoS ctl fields when required */ 1964 1965 /* copy decap header before overwriting for reuse below */ 1966 memcpy(decap_hdr, (uint8_t *)hdr, hdr_len); 1967 } 1968 1969 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 1970 memcpy(skb_push(msdu, 1971 ath11k_dp_rx_crypto_param_len(ar, enctype)), 1972 (void *)hdr + hdr_len, 1973 ath11k_dp_rx_crypto_param_len(ar, enctype)); 1974 } 1975 1976 if (!rxcb->is_first_msdu) { 1977 memcpy(skb_push(msdu, 1978 IEEE80211_QOS_CTL_LEN), &qos_ctl, 1979 IEEE80211_QOS_CTL_LEN); 1980 memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len); 1981 return; 1982 } 1983 1984 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1985 1986 /* original 802.11 header has a different DA and in 1987 * case of 4addr it may also have different SA 1988 */ 1989 hdr = (struct ieee80211_hdr *)msdu->data; 1990 ether_addr_copy(ieee80211_get_DA(hdr), da); 1991 ether_addr_copy(ieee80211_get_SA(hdr), sa); 1992 } 1993 1994 static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu, 1995 enum hal_encrypt_type enctype, 1996 struct ieee80211_rx_status *status, 1997 bool decrypted) 1998 { 1999 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 2000 struct ieee80211_hdr *hdr; 2001 size_t hdr_len; 2002 size_t crypto_len; 2003 2004 if (!rxcb->is_first_msdu || 2005 !(rxcb->is_first_msdu && rxcb->is_last_msdu)) { 2006 WARN_ON_ONCE(1); 2007 return; 2008 } 2009 2010 skb_trim(msdu, msdu->len - FCS_LEN); 2011 2012 if (!decrypted) 2013 return; 2014 2015 hdr = (void *)msdu->data; 2016 2017 /* Tail */ 2018 if (status->flag & RX_FLAG_IV_STRIPPED) { 2019 skb_trim(msdu, msdu->len - 2020 ath11k_dp_rx_crypto_mic_len(ar, enctype)); 2021 2022 skb_trim(msdu, msdu->len - 2023 ath11k_dp_rx_crypto_icv_len(ar, enctype)); 2024 } else { 2025 /* MIC */ 2026 if (status->flag & RX_FLAG_MIC_STRIPPED) 2027 skb_trim(msdu, msdu->len - 2028 ath11k_dp_rx_crypto_mic_len(ar, enctype)); 2029 2030 /* ICV */ 2031 if (status->flag & RX_FLAG_ICV_STRIPPED) 2032 skb_trim(msdu, msdu->len - 2033 ath11k_dp_rx_crypto_icv_len(ar, enctype)); 2034 } 2035 2036 /* MMIC */ 2037 if ((status->flag & RX_FLAG_MMIC_STRIPPED) && 2038 !ieee80211_has_morefrags(hdr->frame_control) && 2039 enctype == HAL_ENCRYPT_TYPE_TKIP_MIC) 2040 skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN); 2041 2042 /* Head */ 2043 if (status->flag & RX_FLAG_IV_STRIPPED) { 2044 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2045 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 2046 2047 memmove((void *)msdu->data + crypto_len, 2048 (void *)msdu->data, hdr_len); 2049 skb_pull(msdu, crypto_len); 2050 } 2051 } 2052 2053 static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar, 2054 struct sk_buff *msdu, 2055 enum hal_encrypt_type enctype) 2056 { 2057 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 2058 struct ieee80211_hdr *hdr; 2059 size_t hdr_len, crypto_len; 2060 void *rfc1042; 2061 bool is_amsdu; 2062 2063 is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu); 2064 hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(rxcb->rx_desc); 2065 rfc1042 = hdr; 2066 2067 if (rxcb->is_first_msdu) { 2068 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2069 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 2070 2071 rfc1042 += hdr_len + crypto_len; 2072 } 2073 2074 if (is_amsdu) 2075 rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr); 2076 2077 return rfc1042; 2078 } 2079 2080 static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar, 2081 struct sk_buff *msdu, 2082 u8 *first_hdr, 2083 enum hal_encrypt_type enctype, 2084 struct ieee80211_rx_status *status) 2085 { 2086 struct ieee80211_hdr *hdr; 2087 struct ethhdr *eth; 2088 size_t hdr_len; 2089 u8 da[ETH_ALEN]; 2090 u8 sa[ETH_ALEN]; 2091 void *rfc1042; 2092 2093 rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype); 2094 if (WARN_ON_ONCE(!rfc1042)) 2095 return; 2096 2097 /* pull decapped header and copy SA & DA */ 2098 eth = (struct ethhdr *)msdu->data; 2099 ether_addr_copy(da, eth->h_dest); 2100 ether_addr_copy(sa, eth->h_source); 2101 skb_pull(msdu, sizeof(struct ethhdr)); 2102 2103 /* push rfc1042/llc/snap */ 2104 memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042, 2105 sizeof(struct ath11k_dp_rfc1042_hdr)); 2106 2107 /* push original 802.11 header */ 2108 hdr = (struct ieee80211_hdr *)first_hdr; 2109 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2110 2111 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 2112 memcpy(skb_push(msdu, 2113 ath11k_dp_rx_crypto_param_len(ar, enctype)), 2114 (void *)hdr + hdr_len, 2115 ath11k_dp_rx_crypto_param_len(ar, enctype)); 2116 } 2117 2118 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 2119 2120 /* original 802.11 header has a different DA and in 2121 * case of 4addr it may also have different SA 2122 */ 2123 hdr = (struct ieee80211_hdr *)msdu->data; 2124 ether_addr_copy(ieee80211_get_DA(hdr), da); 2125 ether_addr_copy(ieee80211_get_SA(hdr), sa); 2126 } 2127 2128 static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu, 2129 struct hal_rx_desc *rx_desc, 2130 enum hal_encrypt_type enctype, 2131 struct ieee80211_rx_status *status, 2132 bool decrypted) 2133 { 2134 u8 *first_hdr; 2135 u8 decap; 2136 2137 first_hdr = ath11k_dp_rx_h_80211_hdr(rx_desc); 2138 decap = ath11k_dp_rx_h_msdu_start_decap_type(rx_desc); 2139 2140 switch (decap) { 2141 case DP_RX_DECAP_TYPE_NATIVE_WIFI: 2142 ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr, 2143 enctype, status); 2144 break; 2145 case DP_RX_DECAP_TYPE_RAW: 2146 ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status, 2147 decrypted); 2148 break; 2149 case DP_RX_DECAP_TYPE_ETHERNET2_DIX: 2150 /* TODO undecap support for middle/last msdu's of amsdu */ 2151 ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr, 2152 enctype, status); 2153 break; 2154 case DP_RX_DECAP_TYPE_8023: 2155 /* TODO: Handle undecap for these formats */ 2156 break; 2157 } 2158 } 2159 2160 static void ath11k_dp_rx_h_mpdu(struct ath11k *ar, 2161 struct sk_buff *msdu, 2162 struct hal_rx_desc *rx_desc, 2163 struct ieee80211_rx_status *rx_status) 2164 { 2165 bool fill_crypto_hdr, mcast; 2166 enum hal_encrypt_type enctype; 2167 bool is_decrypted = false; 2168 struct ieee80211_hdr *hdr; 2169 struct ath11k_peer *peer; 2170 u32 err_bitmap; 2171 2172 hdr = (struct ieee80211_hdr *)msdu->data; 2173 2174 /* PN for multicast packets will be checked in mac80211 */ 2175 2176 mcast = is_multicast_ether_addr(hdr->addr1); 2177 fill_crypto_hdr = mcast; 2178 2179 spin_lock_bh(&ar->ab->base_lock); 2180 peer = ath11k_peer_find_by_addr(ar->ab, hdr->addr2); 2181 if (peer) { 2182 if (mcast) 2183 enctype = peer->sec_type_grp; 2184 else 2185 enctype = peer->sec_type; 2186 } else { 2187 enctype = HAL_ENCRYPT_TYPE_OPEN; 2188 } 2189 spin_unlock_bh(&ar->ab->base_lock); 2190 2191 err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_desc); 2192 if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap) 2193 is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc); 2194 2195 /* Clear per-MPDU flags while leaving per-PPDU flags intact */ 2196 rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | 2197 RX_FLAG_MMIC_ERROR | 2198 RX_FLAG_DECRYPTED | 2199 RX_FLAG_IV_STRIPPED | 2200 RX_FLAG_MMIC_STRIPPED); 2201 2202 if (err_bitmap & DP_RX_MPDU_ERR_FCS) 2203 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; 2204 if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC) 2205 rx_status->flag |= RX_FLAG_MMIC_ERROR; 2206 2207 if (is_decrypted) { 2208 rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED; 2209 2210 if (fill_crypto_hdr) 2211 rx_status->flag |= RX_FLAG_MIC_STRIPPED | 2212 RX_FLAG_ICV_STRIPPED; 2213 else 2214 rx_status->flag |= RX_FLAG_IV_STRIPPED | 2215 RX_FLAG_PN_VALIDATED; 2216 } 2217 2218 ath11k_dp_rx_h_csum_offload(msdu); 2219 ath11k_dp_rx_h_undecap(ar, msdu, rx_desc, 2220 enctype, rx_status, is_decrypted); 2221 2222 if (!is_decrypted || fill_crypto_hdr) 2223 return; 2224 2225 hdr = (void *)msdu->data; 2226 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 2227 } 2228 2229 static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc, 2230 struct ieee80211_rx_status *rx_status) 2231 { 2232 struct ieee80211_supported_band *sband; 2233 enum rx_msdu_start_pkt_type pkt_type; 2234 u8 bw; 2235 u8 rate_mcs, nss; 2236 u8 sgi; 2237 bool is_cck; 2238 2239 pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(rx_desc); 2240 bw = ath11k_dp_rx_h_msdu_start_rx_bw(rx_desc); 2241 rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(rx_desc); 2242 nss = ath11k_dp_rx_h_msdu_start_nss(rx_desc); 2243 sgi = ath11k_dp_rx_h_msdu_start_sgi(rx_desc); 2244 2245 switch (pkt_type) { 2246 case RX_MSDU_START_PKT_TYPE_11A: 2247 case RX_MSDU_START_PKT_TYPE_11B: 2248 is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B); 2249 sband = &ar->mac.sbands[rx_status->band]; 2250 rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs, 2251 is_cck); 2252 break; 2253 case RX_MSDU_START_PKT_TYPE_11N: 2254 rx_status->encoding = RX_ENC_HT; 2255 if (rate_mcs > ATH11K_HT_MCS_MAX) { 2256 ath11k_warn(ar->ab, 2257 "Received with invalid mcs in HT mode %d\n", 2258 rate_mcs); 2259 break; 2260 } 2261 rx_status->rate_idx = rate_mcs + (8 * (nss - 1)); 2262 if (sgi) 2263 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 2264 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 2265 break; 2266 case RX_MSDU_START_PKT_TYPE_11AC: 2267 rx_status->encoding = RX_ENC_VHT; 2268 rx_status->rate_idx = rate_mcs; 2269 if (rate_mcs > ATH11K_VHT_MCS_MAX) { 2270 ath11k_warn(ar->ab, 2271 "Received with invalid mcs in VHT mode %d\n", 2272 rate_mcs); 2273 break; 2274 } 2275 rx_status->nss = nss; 2276 if (sgi) 2277 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 2278 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 2279 break; 2280 case RX_MSDU_START_PKT_TYPE_11AX: 2281 rx_status->rate_idx = rate_mcs; 2282 if (rate_mcs > ATH11K_HE_MCS_MAX) { 2283 ath11k_warn(ar->ab, 2284 "Received with invalid mcs in HE mode %d\n", 2285 rate_mcs); 2286 break; 2287 } 2288 rx_status->encoding = RX_ENC_HE; 2289 rx_status->nss = nss; 2290 rx_status->he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi); 2291 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 2292 break; 2293 } 2294 } 2295 2296 static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc, 2297 struct ieee80211_rx_status *rx_status) 2298 { 2299 u8 channel_num; 2300 u32 center_freq; 2301 struct ieee80211_channel *channel; 2302 2303 rx_status->freq = 0; 2304 rx_status->rate_idx = 0; 2305 rx_status->nss = 0; 2306 rx_status->encoding = RX_ENC_LEGACY; 2307 rx_status->bw = RATE_INFO_BW_20; 2308 2309 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 2310 2311 channel_num = ath11k_dp_rx_h_msdu_start_freq(rx_desc); 2312 center_freq = ath11k_dp_rx_h_msdu_start_freq(rx_desc) >> 16; 2313 2314 if (center_freq >= 5935 && center_freq <= 7105) { 2315 rx_status->band = NL80211_BAND_6GHZ; 2316 } else if (channel_num >= 1 && channel_num <= 14) { 2317 rx_status->band = NL80211_BAND_2GHZ; 2318 } else if (channel_num >= 36 && channel_num <= 173) { 2319 rx_status->band = NL80211_BAND_5GHZ; 2320 } else { 2321 spin_lock_bh(&ar->data_lock); 2322 channel = ar->rx_channel; 2323 if (channel) { 2324 rx_status->band = channel->band; 2325 channel_num = 2326 ieee80211_frequency_to_channel(channel->center_freq); 2327 } 2328 spin_unlock_bh(&ar->data_lock); 2329 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ", 2330 rx_desc, sizeof(struct hal_rx_desc)); 2331 } 2332 2333 rx_status->freq = ieee80211_channel_to_frequency(channel_num, 2334 rx_status->band); 2335 2336 ath11k_dp_rx_h_rate(ar, rx_desc, rx_status); 2337 } 2338 2339 static char *ath11k_print_get_tid(struct ieee80211_hdr *hdr, char *out, 2340 size_t size) 2341 { 2342 u8 *qc; 2343 int tid; 2344 2345 if (!ieee80211_is_data_qos(hdr->frame_control)) 2346 return ""; 2347 2348 qc = ieee80211_get_qos_ctl(hdr); 2349 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 2350 snprintf(out, size, "tid %d", tid); 2351 2352 return out; 2353 } 2354 2355 static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi, 2356 struct sk_buff *msdu) 2357 { 2358 static const struct ieee80211_radiotap_he known = { 2359 .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | 2360 IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN), 2361 .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN), 2362 }; 2363 struct ieee80211_rx_status *status; 2364 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; 2365 struct ieee80211_radiotap_he *he = NULL; 2366 char tid[32]; 2367 2368 status = IEEE80211_SKB_RXCB(msdu); 2369 if (status->encoding == RX_ENC_HE) { 2370 he = skb_push(msdu, sizeof(known)); 2371 memcpy(he, &known, sizeof(known)); 2372 status->flag |= RX_FLAG_RADIOTAP_HE; 2373 } 2374 2375 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 2376 "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", 2377 msdu, 2378 msdu->len, 2379 ieee80211_get_SA(hdr), 2380 ath11k_print_get_tid(hdr, tid, sizeof(tid)), 2381 is_multicast_ether_addr(ieee80211_get_DA(hdr)) ? 2382 "mcast" : "ucast", 2383 (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4, 2384 (status->encoding == RX_ENC_LEGACY) ? "legacy" : "", 2385 (status->encoding == RX_ENC_HT) ? "ht" : "", 2386 (status->encoding == RX_ENC_VHT) ? "vht" : "", 2387 (status->encoding == RX_ENC_HE) ? "he" : "", 2388 (status->bw == RATE_INFO_BW_40) ? "40" : "", 2389 (status->bw == RATE_INFO_BW_80) ? "80" : "", 2390 (status->bw == RATE_INFO_BW_160) ? "160" : "", 2391 status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", 2392 status->rate_idx, 2393 status->nss, 2394 status->freq, 2395 status->band, status->flag, 2396 !!(status->flag & RX_FLAG_FAILED_FCS_CRC), 2397 !!(status->flag & RX_FLAG_MMIC_ERROR), 2398 !!(status->flag & RX_FLAG_AMSDU_MORE)); 2399 2400 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DP_RX, NULL, "dp rx msdu: ", 2401 msdu->data, msdu->len); 2402 2403 /* TODO: trace rx packet */ 2404 2405 ieee80211_rx_napi(ar->hw, NULL, msdu, napi); 2406 } 2407 2408 static int ath11k_dp_rx_process_msdu(struct ath11k *ar, 2409 struct sk_buff *msdu, 2410 struct sk_buff_head *msdu_list) 2411 { 2412 struct hal_rx_desc *rx_desc, *lrx_desc; 2413 struct ieee80211_rx_status rx_status = {0}; 2414 struct ieee80211_rx_status *status; 2415 struct ath11k_skb_rxcb *rxcb; 2416 struct ieee80211_hdr *hdr; 2417 struct sk_buff *last_buf; 2418 u8 l3_pad_bytes; 2419 u8 *hdr_status; 2420 u16 msdu_len; 2421 int ret; 2422 2423 last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu); 2424 if (!last_buf) { 2425 ath11k_warn(ar->ab, 2426 "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n"); 2427 ret = -EIO; 2428 goto free_out; 2429 } 2430 2431 rx_desc = (struct hal_rx_desc *)msdu->data; 2432 lrx_desc = (struct hal_rx_desc *)last_buf->data; 2433 if (!ath11k_dp_rx_h_attn_msdu_done(lrx_desc)) { 2434 ath11k_warn(ar->ab, "msdu_done bit in attention is not set\n"); 2435 ret = -EIO; 2436 goto free_out; 2437 } 2438 2439 rxcb = ATH11K_SKB_RXCB(msdu); 2440 rxcb->rx_desc = rx_desc; 2441 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc); 2442 l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(lrx_desc); 2443 2444 if (rxcb->is_frag) { 2445 skb_pull(msdu, HAL_RX_DESC_SIZE); 2446 } else if (!rxcb->is_continuation) { 2447 if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) { 2448 hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc); 2449 ret = -EINVAL; 2450 ath11k_warn(ar->ab, "invalid msdu len %u\n", msdu_len); 2451 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status, 2452 sizeof(struct ieee80211_hdr)); 2453 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc, 2454 sizeof(struct hal_rx_desc)); 2455 goto free_out; 2456 } 2457 skb_put(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes + msdu_len); 2458 skb_pull(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes); 2459 } else { 2460 ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list, 2461 msdu, last_buf, 2462 l3_pad_bytes, msdu_len); 2463 if (ret) { 2464 ath11k_warn(ar->ab, 2465 "failed to coalesce msdu rx buffer%d\n", ret); 2466 goto free_out; 2467 } 2468 } 2469 2470 hdr = (struct ieee80211_hdr *)msdu->data; 2471 2472 /* Process only data frames */ 2473 if (!ieee80211_is_data(hdr->frame_control)) 2474 return -EINVAL; 2475 2476 ath11k_dp_rx_h_ppdu(ar, rx_desc, &rx_status); 2477 ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, &rx_status); 2478 2479 rx_status.flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED; 2480 2481 status = IEEE80211_SKB_RXCB(msdu); 2482 *status = rx_status; 2483 return 0; 2484 2485 free_out: 2486 return ret; 2487 } 2488 2489 static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab, 2490 struct napi_struct *napi, 2491 struct sk_buff_head *msdu_list, 2492 int *quota, int ring_id) 2493 { 2494 struct ath11k_skb_rxcb *rxcb; 2495 struct sk_buff *msdu; 2496 struct ath11k *ar; 2497 u8 mac_id; 2498 int ret; 2499 2500 if (skb_queue_empty(msdu_list)) 2501 return; 2502 2503 rcu_read_lock(); 2504 2505 while (*quota && (msdu = __skb_dequeue(msdu_list))) { 2506 rxcb = ATH11K_SKB_RXCB(msdu); 2507 mac_id = rxcb->mac_id; 2508 ar = ab->pdevs[mac_id].ar; 2509 if (!rcu_dereference(ab->pdevs_active[mac_id])) { 2510 dev_kfree_skb_any(msdu); 2511 continue; 2512 } 2513 2514 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 2515 dev_kfree_skb_any(msdu); 2516 continue; 2517 } 2518 2519 ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list); 2520 if (ret) { 2521 ath11k_dbg(ab, ATH11K_DBG_DATA, 2522 "Unable to process msdu %d", ret); 2523 dev_kfree_skb_any(msdu); 2524 continue; 2525 } 2526 2527 ath11k_dp_rx_deliver_msdu(ar, napi, msdu); 2528 (*quota)--; 2529 } 2530 2531 rcu_read_unlock(); 2532 } 2533 2534 int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id, 2535 struct napi_struct *napi, int budget) 2536 { 2537 struct ath11k_dp *dp = &ab->dp; 2538 struct dp_rxdma_ring *rx_ring; 2539 int num_buffs_reaped[MAX_RADIOS] = {0}; 2540 struct sk_buff_head msdu_list; 2541 struct ath11k_skb_rxcb *rxcb; 2542 int total_msdu_reaped = 0; 2543 struct hal_srng *srng; 2544 struct sk_buff *msdu; 2545 int quota = budget; 2546 bool done = false; 2547 int buf_id, mac_id; 2548 struct ath11k *ar; 2549 u32 *rx_desc; 2550 int i; 2551 2552 __skb_queue_head_init(&msdu_list); 2553 2554 srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id]; 2555 2556 spin_lock_bh(&srng->lock); 2557 2558 ath11k_hal_srng_access_begin(ab, srng); 2559 2560 try_again: 2561 while ((rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 2562 struct hal_reo_dest_ring desc = *(struct hal_reo_dest_ring *)rx_desc; 2563 enum hal_reo_dest_ring_push_reason push_reason; 2564 u32 cookie; 2565 2566 cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, 2567 desc.buf_addr_info.info1); 2568 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 2569 cookie); 2570 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie); 2571 2572 ar = ab->pdevs[mac_id].ar; 2573 rx_ring = &ar->dp.rx_refill_buf_ring; 2574 spin_lock_bh(&rx_ring->idr_lock); 2575 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 2576 if (!msdu) { 2577 ath11k_warn(ab, "frame rx with invalid buf_id %d\n", 2578 buf_id); 2579 spin_unlock_bh(&rx_ring->idr_lock); 2580 continue; 2581 } 2582 2583 idr_remove(&rx_ring->bufs_idr, buf_id); 2584 spin_unlock_bh(&rx_ring->idr_lock); 2585 2586 rxcb = ATH11K_SKB_RXCB(msdu); 2587 dma_unmap_single(ab->dev, rxcb->paddr, 2588 msdu->len + skb_tailroom(msdu), 2589 DMA_FROM_DEVICE); 2590 2591 num_buffs_reaped[mac_id]++; 2592 total_msdu_reaped++; 2593 2594 push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON, 2595 desc.info0); 2596 if (push_reason != 2597 HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) { 2598 dev_kfree_skb_any(msdu); 2599 ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++; 2600 continue; 2601 } 2602 2603 rxcb->is_first_msdu = !!(desc.rx_msdu_info.info0 & 2604 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU); 2605 rxcb->is_last_msdu = !!(desc.rx_msdu_info.info0 & 2606 RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU); 2607 rxcb->is_continuation = !!(desc.rx_msdu_info.info0 & 2608 RX_MSDU_DESC_INFO0_MSDU_CONTINUATION); 2609 rxcb->mac_id = mac_id; 2610 rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM, 2611 desc.info0); 2612 2613 __skb_queue_tail(&msdu_list, msdu); 2614 2615 if (total_msdu_reaped >= quota && !rxcb->is_continuation) { 2616 done = true; 2617 break; 2618 } 2619 } 2620 2621 /* Hw might have updated the head pointer after we cached it. 2622 * In this case, even though there are entries in the ring we'll 2623 * get rx_desc NULL. Give the read another try with updated cached 2624 * head pointer so that we can reap complete MPDU in the current 2625 * rx processing. 2626 */ 2627 if (!done && ath11k_hal_srng_dst_num_free(ab, srng, true)) { 2628 ath11k_hal_srng_access_end(ab, srng); 2629 goto try_again; 2630 } 2631 2632 ath11k_hal_srng_access_end(ab, srng); 2633 2634 spin_unlock_bh(&srng->lock); 2635 2636 if (!total_msdu_reaped) 2637 goto exit; 2638 2639 for (i = 0; i < ab->num_radios; i++) { 2640 if (!num_buffs_reaped[i]) 2641 continue; 2642 2643 ar = ab->pdevs[i].ar; 2644 rx_ring = &ar->dp.rx_refill_buf_ring; 2645 2646 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i], 2647 HAL_RX_BUF_RBM_SW3_BM); 2648 } 2649 2650 ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list, 2651 "a, ring_id); 2652 2653 exit: 2654 return budget - quota; 2655 } 2656 2657 static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta, 2658 struct hal_rx_mon_ppdu_info *ppdu_info) 2659 { 2660 struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats; 2661 u32 num_msdu; 2662 2663 if (!rx_stats) 2664 return; 2665 2666 num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count + 2667 ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count; 2668 2669 rx_stats->num_msdu += num_msdu; 2670 rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count + 2671 ppdu_info->tcp_ack_msdu_count; 2672 rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count; 2673 rx_stats->other_msdu_count += ppdu_info->other_msdu_count; 2674 2675 if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A || 2676 ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) { 2677 ppdu_info->nss = 1; 2678 ppdu_info->mcs = HAL_RX_MAX_MCS; 2679 ppdu_info->tid = IEEE80211_NUM_TIDS; 2680 } 2681 2682 if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS) 2683 rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu; 2684 2685 if (ppdu_info->mcs <= HAL_RX_MAX_MCS) 2686 rx_stats->mcs_count[ppdu_info->mcs] += num_msdu; 2687 2688 if (ppdu_info->gi < HAL_RX_GI_MAX) 2689 rx_stats->gi_count[ppdu_info->gi] += num_msdu; 2690 2691 if (ppdu_info->bw < HAL_RX_BW_MAX) 2692 rx_stats->bw_count[ppdu_info->bw] += num_msdu; 2693 2694 if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX) 2695 rx_stats->coding_count[ppdu_info->ldpc] += num_msdu; 2696 2697 if (ppdu_info->tid <= IEEE80211_NUM_TIDS) 2698 rx_stats->tid_count[ppdu_info->tid] += num_msdu; 2699 2700 if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX) 2701 rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu; 2702 2703 if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX) 2704 rx_stats->reception_type[ppdu_info->reception_type] += num_msdu; 2705 2706 if (ppdu_info->is_stbc) 2707 rx_stats->stbc_count += num_msdu; 2708 2709 if (ppdu_info->beamformed) 2710 rx_stats->beamformed_count += num_msdu; 2711 2712 if (ppdu_info->num_mpdu_fcs_ok > 1) 2713 rx_stats->ampdu_msdu_count += num_msdu; 2714 else 2715 rx_stats->non_ampdu_msdu_count += num_msdu; 2716 2717 rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok; 2718 rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err; 2719 rx_stats->dcm_count += ppdu_info->dcm; 2720 rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu; 2721 2722 arsta->rssi_comb = ppdu_info->rssi_comb; 2723 rx_stats->rx_duration += ppdu_info->rx_duration; 2724 arsta->rx_duration = rx_stats->rx_duration; 2725 } 2726 2727 static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab, 2728 struct dp_rxdma_ring *rx_ring, 2729 int *buf_id) 2730 { 2731 struct sk_buff *skb; 2732 dma_addr_t paddr; 2733 2734 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + 2735 DP_RX_BUFFER_ALIGN_SIZE); 2736 2737 if (!skb) 2738 goto fail_alloc_skb; 2739 2740 if (!IS_ALIGNED((unsigned long)skb->data, 2741 DP_RX_BUFFER_ALIGN_SIZE)) { 2742 skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - 2743 skb->data); 2744 } 2745 2746 paddr = dma_map_single(ab->dev, skb->data, 2747 skb->len + skb_tailroom(skb), 2748 DMA_FROM_DEVICE); 2749 if (unlikely(dma_mapping_error(ab->dev, paddr))) 2750 goto fail_free_skb; 2751 2752 spin_lock_bh(&rx_ring->idr_lock); 2753 *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, 2754 rx_ring->bufs_max, GFP_ATOMIC); 2755 spin_unlock_bh(&rx_ring->idr_lock); 2756 if (*buf_id < 0) 2757 goto fail_dma_unmap; 2758 2759 ATH11K_SKB_RXCB(skb)->paddr = paddr; 2760 return skb; 2761 2762 fail_dma_unmap: 2763 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 2764 DMA_FROM_DEVICE); 2765 fail_free_skb: 2766 dev_kfree_skb_any(skb); 2767 fail_alloc_skb: 2768 return NULL; 2769 } 2770 2771 int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id, 2772 struct dp_rxdma_ring *rx_ring, 2773 int req_entries, 2774 enum hal_rx_buf_return_buf_manager mgr) 2775 { 2776 struct hal_srng *srng; 2777 u32 *desc; 2778 struct sk_buff *skb; 2779 int num_free; 2780 int num_remain; 2781 int buf_id; 2782 u32 cookie; 2783 dma_addr_t paddr; 2784 2785 req_entries = min(req_entries, rx_ring->bufs_max); 2786 2787 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 2788 2789 spin_lock_bh(&srng->lock); 2790 2791 ath11k_hal_srng_access_begin(ab, srng); 2792 2793 num_free = ath11k_hal_srng_src_num_free(ab, srng, true); 2794 2795 req_entries = min(num_free, req_entries); 2796 num_remain = req_entries; 2797 2798 while (num_remain > 0) { 2799 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, 2800 &buf_id); 2801 if (!skb) 2802 break; 2803 paddr = ATH11K_SKB_RXCB(skb)->paddr; 2804 2805 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 2806 if (!desc) 2807 goto fail_desc_get; 2808 2809 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 2810 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 2811 2812 num_remain--; 2813 2814 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); 2815 } 2816 2817 ath11k_hal_srng_access_end(ab, srng); 2818 2819 spin_unlock_bh(&srng->lock); 2820 2821 return req_entries - num_remain; 2822 2823 fail_desc_get: 2824 spin_lock_bh(&rx_ring->idr_lock); 2825 idr_remove(&rx_ring->bufs_idr, buf_id); 2826 spin_unlock_bh(&rx_ring->idr_lock); 2827 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 2828 DMA_FROM_DEVICE); 2829 dev_kfree_skb_any(skb); 2830 ath11k_hal_srng_access_end(ab, srng); 2831 spin_unlock_bh(&srng->lock); 2832 2833 return req_entries - num_remain; 2834 } 2835 2836 static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id, 2837 int *budget, struct sk_buff_head *skb_list) 2838 { 2839 struct ath11k *ar; 2840 struct ath11k_pdev_dp *dp; 2841 struct dp_rxdma_ring *rx_ring; 2842 struct hal_srng *srng; 2843 void *rx_mon_status_desc; 2844 struct sk_buff *skb; 2845 struct ath11k_skb_rxcb *rxcb; 2846 struct hal_tlv_hdr *tlv; 2847 u32 cookie; 2848 int buf_id, srng_id; 2849 dma_addr_t paddr; 2850 u8 rbm; 2851 int num_buffs_reaped = 0; 2852 2853 ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar; 2854 dp = &ar->dp; 2855 srng_id = ath11k_hw_mac_id_to_srng_id(&ab->hw_params, mac_id); 2856 rx_ring = &dp->rx_mon_status_refill_ring[srng_id]; 2857 2858 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 2859 2860 spin_lock_bh(&srng->lock); 2861 2862 ath11k_hal_srng_access_begin(ab, srng); 2863 while (*budget) { 2864 *budget -= 1; 2865 rx_mon_status_desc = 2866 ath11k_hal_srng_src_peek(ab, srng); 2867 if (!rx_mon_status_desc) 2868 break; 2869 2870 ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr, 2871 &cookie, &rbm); 2872 if (paddr) { 2873 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie); 2874 2875 spin_lock_bh(&rx_ring->idr_lock); 2876 skb = idr_find(&rx_ring->bufs_idr, buf_id); 2877 if (!skb) { 2878 ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n", 2879 buf_id); 2880 spin_unlock_bh(&rx_ring->idr_lock); 2881 goto move_next; 2882 } 2883 2884 idr_remove(&rx_ring->bufs_idr, buf_id); 2885 spin_unlock_bh(&rx_ring->idr_lock); 2886 2887 rxcb = ATH11K_SKB_RXCB(skb); 2888 2889 dma_unmap_single(ab->dev, rxcb->paddr, 2890 skb->len + skb_tailroom(skb), 2891 DMA_FROM_DEVICE); 2892 2893 tlv = (struct hal_tlv_hdr *)skb->data; 2894 if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != 2895 HAL_RX_STATUS_BUFFER_DONE) { 2896 ath11k_warn(ab, "mon status DONE not set %lx\n", 2897 FIELD_GET(HAL_TLV_HDR_TAG, 2898 tlv->tl)); 2899 dev_kfree_skb_any(skb); 2900 goto move_next; 2901 } 2902 2903 __skb_queue_tail(skb_list, skb); 2904 } 2905 move_next: 2906 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, 2907 &buf_id); 2908 2909 if (!skb) { 2910 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0, 2911 HAL_RX_BUF_RBM_SW3_BM); 2912 num_buffs_reaped++; 2913 break; 2914 } 2915 rxcb = ATH11K_SKB_RXCB(skb); 2916 2917 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 2918 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 2919 2920 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr, 2921 cookie, HAL_RX_BUF_RBM_SW3_BM); 2922 ath11k_hal_srng_src_get_next_entry(ab, srng); 2923 num_buffs_reaped++; 2924 } 2925 ath11k_hal_srng_access_end(ab, srng); 2926 spin_unlock_bh(&srng->lock); 2927 2928 return num_buffs_reaped; 2929 } 2930 2931 int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id, 2932 struct napi_struct *napi, int budget) 2933 { 2934 struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id); 2935 enum hal_rx_mon_status hal_status; 2936 struct sk_buff *skb; 2937 struct sk_buff_head skb_list; 2938 struct hal_rx_mon_ppdu_info ppdu_info; 2939 struct ath11k_peer *peer; 2940 struct ath11k_sta *arsta; 2941 int num_buffs_reaped = 0; 2942 2943 __skb_queue_head_init(&skb_list); 2944 2945 num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget, 2946 &skb_list); 2947 if (!num_buffs_reaped) 2948 goto exit; 2949 2950 while ((skb = __skb_dequeue(&skb_list))) { 2951 memset(&ppdu_info, 0, sizeof(ppdu_info)); 2952 ppdu_info.peer_id = HAL_INVALID_PEERID; 2953 2954 if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) 2955 trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE); 2956 2957 hal_status = ath11k_hal_rx_parse_mon_status(ab, &ppdu_info, skb); 2958 2959 if (ppdu_info.peer_id == HAL_INVALID_PEERID || 2960 hal_status != HAL_RX_MON_STATUS_PPDU_DONE) { 2961 dev_kfree_skb_any(skb); 2962 continue; 2963 } 2964 2965 rcu_read_lock(); 2966 spin_lock_bh(&ab->base_lock); 2967 peer = ath11k_peer_find_by_id(ab, ppdu_info.peer_id); 2968 2969 if (!peer || !peer->sta) { 2970 ath11k_dbg(ab, ATH11K_DBG_DATA, 2971 "failed to find the peer with peer_id %d\n", 2972 ppdu_info.peer_id); 2973 spin_unlock_bh(&ab->base_lock); 2974 rcu_read_unlock(); 2975 dev_kfree_skb_any(skb); 2976 continue; 2977 } 2978 2979 arsta = (struct ath11k_sta *)peer->sta->drv_priv; 2980 ath11k_dp_rx_update_peer_stats(arsta, &ppdu_info); 2981 2982 if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr)) 2983 trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE); 2984 2985 spin_unlock_bh(&ab->base_lock); 2986 rcu_read_unlock(); 2987 2988 dev_kfree_skb_any(skb); 2989 } 2990 exit: 2991 return num_buffs_reaped; 2992 } 2993 2994 static void ath11k_dp_rx_frag_timer(struct timer_list *timer) 2995 { 2996 struct dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer); 2997 2998 spin_lock_bh(&rx_tid->ab->base_lock); 2999 if (rx_tid->last_frag_no && 3000 rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) { 3001 spin_unlock_bh(&rx_tid->ab->base_lock); 3002 return; 3003 } 3004 ath11k_dp_rx_frags_cleanup(rx_tid, true); 3005 spin_unlock_bh(&rx_tid->ab->base_lock); 3006 } 3007 3008 int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id) 3009 { 3010 struct ath11k_base *ab = ar->ab; 3011 struct crypto_shash *tfm; 3012 struct ath11k_peer *peer; 3013 struct dp_rx_tid *rx_tid; 3014 int i; 3015 3016 tfm = crypto_alloc_shash("michael_mic", 0, 0); 3017 if (IS_ERR(tfm)) 3018 return PTR_ERR(tfm); 3019 3020 spin_lock_bh(&ab->base_lock); 3021 3022 peer = ath11k_peer_find(ab, vdev_id, peer_mac); 3023 if (!peer) { 3024 ath11k_warn(ab, "failed to find the peer to set up fragment info\n"); 3025 spin_unlock_bh(&ab->base_lock); 3026 return -ENOENT; 3027 } 3028 3029 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 3030 rx_tid = &peer->rx_tid[i]; 3031 rx_tid->ab = ab; 3032 timer_setup(&rx_tid->frag_timer, ath11k_dp_rx_frag_timer, 0); 3033 skb_queue_head_init(&rx_tid->rx_frags); 3034 } 3035 3036 peer->tfm_mmic = tfm; 3037 spin_unlock_bh(&ab->base_lock); 3038 3039 return 0; 3040 } 3041 3042 static int ath11k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key, 3043 struct ieee80211_hdr *hdr, u8 *data, 3044 size_t data_len, u8 *mic) 3045 { 3046 SHASH_DESC_ON_STACK(desc, tfm); 3047 u8 mic_hdr[16] = {0}; 3048 u8 tid = 0; 3049 int ret; 3050 3051 if (!tfm) 3052 return -EINVAL; 3053 3054 desc->tfm = tfm; 3055 3056 ret = crypto_shash_setkey(tfm, key, 8); 3057 if (ret) 3058 goto out; 3059 3060 ret = crypto_shash_init(desc); 3061 if (ret) 3062 goto out; 3063 3064 /* TKIP MIC header */ 3065 memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN); 3066 memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN); 3067 if (ieee80211_is_data_qos(hdr->frame_control)) 3068 tid = ieee80211_get_tid(hdr); 3069 mic_hdr[12] = tid; 3070 3071 ret = crypto_shash_update(desc, mic_hdr, 16); 3072 if (ret) 3073 goto out; 3074 ret = crypto_shash_update(desc, data, data_len); 3075 if (ret) 3076 goto out; 3077 ret = crypto_shash_final(desc, mic); 3078 out: 3079 shash_desc_zero(desc); 3080 return ret; 3081 } 3082 3083 static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer *peer, 3084 struct sk_buff *msdu) 3085 { 3086 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data; 3087 struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu); 3088 struct ieee80211_key_conf *key_conf; 3089 struct ieee80211_hdr *hdr; 3090 u8 mic[IEEE80211_CCMP_MIC_LEN]; 3091 int head_len, tail_len, ret; 3092 size_t data_len; 3093 u32 hdr_len; 3094 u8 *key, *data; 3095 u8 key_idx; 3096 3097 if (ath11k_dp_rx_h_mpdu_start_enctype(rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC) 3098 return 0; 3099 3100 hdr = (struct ieee80211_hdr *)(msdu->data + HAL_RX_DESC_SIZE); 3101 hdr_len = ieee80211_hdrlen(hdr->frame_control); 3102 head_len = hdr_len + HAL_RX_DESC_SIZE + IEEE80211_TKIP_IV_LEN; 3103 tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN; 3104 3105 if (!is_multicast_ether_addr(hdr->addr1)) 3106 key_idx = peer->ucast_keyidx; 3107 else 3108 key_idx = peer->mcast_keyidx; 3109 3110 key_conf = peer->keys[key_idx]; 3111 3112 data = msdu->data + head_len; 3113 data_len = msdu->len - head_len - tail_len; 3114 key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; 3115 3116 ret = ath11k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic); 3117 if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN)) 3118 goto mic_fail; 3119 3120 return 0; 3121 3122 mic_fail: 3123 (ATH11K_SKB_RXCB(msdu))->is_first_msdu = true; 3124 (ATH11K_SKB_RXCB(msdu))->is_last_msdu = true; 3125 3126 rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED | 3127 RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED; 3128 skb_pull(msdu, HAL_RX_DESC_SIZE); 3129 3130 ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs); 3131 ath11k_dp_rx_h_undecap(ar, msdu, rx_desc, 3132 HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true); 3133 ieee80211_rx(ar->hw, msdu); 3134 return -EINVAL; 3135 } 3136 3137 static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu, 3138 enum hal_encrypt_type enctype, u32 flags) 3139 { 3140 struct ieee80211_hdr *hdr; 3141 size_t hdr_len; 3142 size_t crypto_len; 3143 3144 if (!flags) 3145 return; 3146 3147 hdr = (struct ieee80211_hdr *)(msdu->data + HAL_RX_DESC_SIZE); 3148 3149 if (flags & RX_FLAG_MIC_STRIPPED) 3150 skb_trim(msdu, msdu->len - 3151 ath11k_dp_rx_crypto_mic_len(ar, enctype)); 3152 3153 if (flags & RX_FLAG_ICV_STRIPPED) 3154 skb_trim(msdu, msdu->len - 3155 ath11k_dp_rx_crypto_icv_len(ar, enctype)); 3156 3157 if (flags & RX_FLAG_IV_STRIPPED) { 3158 hdr_len = ieee80211_hdrlen(hdr->frame_control); 3159 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 3160 3161 memmove((void *)msdu->data + HAL_RX_DESC_SIZE + crypto_len, 3162 (void *)msdu->data + HAL_RX_DESC_SIZE, hdr_len); 3163 skb_pull(msdu, crypto_len); 3164 } 3165 } 3166 3167 static int ath11k_dp_rx_h_defrag(struct ath11k *ar, 3168 struct ath11k_peer *peer, 3169 struct dp_rx_tid *rx_tid, 3170 struct sk_buff **defrag_skb) 3171 { 3172 struct hal_rx_desc *rx_desc; 3173 struct sk_buff *skb, *first_frag, *last_frag; 3174 struct ieee80211_hdr *hdr; 3175 enum hal_encrypt_type enctype; 3176 bool is_decrypted = false; 3177 int msdu_len = 0; 3178 int extra_space; 3179 u32 flags; 3180 3181 first_frag = skb_peek(&rx_tid->rx_frags); 3182 last_frag = skb_peek_tail(&rx_tid->rx_frags); 3183 3184 skb_queue_walk(&rx_tid->rx_frags, skb) { 3185 flags = 0; 3186 rx_desc = (struct hal_rx_desc *)skb->data; 3187 hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE); 3188 3189 enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc); 3190 if (enctype != HAL_ENCRYPT_TYPE_OPEN) 3191 is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc); 3192 3193 if (is_decrypted) { 3194 if (skb != first_frag) 3195 flags |= RX_FLAG_IV_STRIPPED; 3196 if (skb != last_frag) 3197 flags |= RX_FLAG_ICV_STRIPPED | 3198 RX_FLAG_MIC_STRIPPED; 3199 } 3200 3201 /* RX fragments are always raw packets */ 3202 if (skb != last_frag) 3203 skb_trim(skb, skb->len - FCS_LEN); 3204 ath11k_dp_rx_h_undecap_frag(ar, skb, enctype, flags); 3205 3206 if (skb != first_frag) 3207 skb_pull(skb, HAL_RX_DESC_SIZE + 3208 ieee80211_hdrlen(hdr->frame_control)); 3209 msdu_len += skb->len; 3210 } 3211 3212 extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag)); 3213 if (extra_space > 0 && 3214 (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0)) 3215 return -ENOMEM; 3216 3217 __skb_unlink(first_frag, &rx_tid->rx_frags); 3218 while ((skb = __skb_dequeue(&rx_tid->rx_frags))) { 3219 skb_put_data(first_frag, skb->data, skb->len); 3220 dev_kfree_skb_any(skb); 3221 } 3222 3223 hdr = (struct ieee80211_hdr *)(first_frag->data + HAL_RX_DESC_SIZE); 3224 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); 3225 ATH11K_SKB_RXCB(first_frag)->is_frag = 1; 3226 3227 if (ath11k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag)) 3228 first_frag = NULL; 3229 3230 *defrag_skb = first_frag; 3231 return 0; 3232 } 3233 3234 static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_tid *rx_tid, 3235 struct sk_buff *defrag_skb) 3236 { 3237 struct ath11k_base *ab = ar->ab; 3238 struct ath11k_pdev_dp *dp = &ar->dp; 3239 struct dp_rxdma_ring *rx_refill_ring = &dp->rx_refill_buf_ring; 3240 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data; 3241 struct hal_reo_entrance_ring *reo_ent_ring; 3242 struct hal_reo_dest_ring *reo_dest_ring; 3243 struct dp_link_desc_bank *link_desc_banks; 3244 struct hal_rx_msdu_link *msdu_link; 3245 struct hal_rx_msdu_details *msdu0; 3246 struct hal_srng *srng; 3247 dma_addr_t paddr; 3248 u32 desc_bank, msdu_info, mpdu_info; 3249 u32 dst_idx, cookie; 3250 u32 *msdu_len_offset; 3251 int ret, buf_id; 3252 3253 link_desc_banks = ab->dp.link_desc_banks; 3254 reo_dest_ring = rx_tid->dst_ring_desc; 3255 3256 ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank); 3257 msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr + 3258 (paddr - link_desc_banks[desc_bank].paddr)); 3259 msdu0 = &msdu_link->msdu_link[0]; 3260 dst_idx = FIELD_GET(RX_MSDU_DESC_INFO0_REO_DEST_IND, msdu0->rx_msdu_info.info0); 3261 memset(msdu0, 0, sizeof(*msdu0)); 3262 3263 msdu_info = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1) | 3264 FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1) | 3265 FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_CONTINUATION, 0) | 3266 FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_LENGTH, 3267 defrag_skb->len - HAL_RX_DESC_SIZE) | 3268 FIELD_PREP(RX_MSDU_DESC_INFO0_REO_DEST_IND, dst_idx) | 3269 FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_SA, 1) | 3270 FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_DA, 1); 3271 msdu0->rx_msdu_info.info0 = msdu_info; 3272 3273 /* change msdu len in hal rx desc */ 3274 msdu_len_offset = (u32 *)&rx_desc->msdu_start; 3275 *msdu_len_offset &= ~(RX_MSDU_START_INFO1_MSDU_LENGTH); 3276 *msdu_len_offset |= defrag_skb->len - HAL_RX_DESC_SIZE; 3277 3278 paddr = dma_map_single(ab->dev, defrag_skb->data, 3279 defrag_skb->len + skb_tailroom(defrag_skb), 3280 DMA_FROM_DEVICE); 3281 if (dma_mapping_error(ab->dev, paddr)) 3282 return -ENOMEM; 3283 3284 spin_lock_bh(&rx_refill_ring->idr_lock); 3285 buf_id = idr_alloc(&rx_refill_ring->bufs_idr, defrag_skb, 0, 3286 rx_refill_ring->bufs_max * 3, GFP_ATOMIC); 3287 spin_unlock_bh(&rx_refill_ring->idr_lock); 3288 if (buf_id < 0) { 3289 ret = -ENOMEM; 3290 goto err_unmap_dma; 3291 } 3292 3293 ATH11K_SKB_RXCB(defrag_skb)->paddr = paddr; 3294 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, dp->mac_id) | 3295 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 3296 3297 ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie, HAL_RX_BUF_RBM_SW3_BM); 3298 3299 /* Fill mpdu details into reo entrace ring */ 3300 srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id]; 3301 3302 spin_lock_bh(&srng->lock); 3303 ath11k_hal_srng_access_begin(ab, srng); 3304 3305 reo_ent_ring = (struct hal_reo_entrance_ring *) 3306 ath11k_hal_srng_src_get_next_entry(ab, srng); 3307 if (!reo_ent_ring) { 3308 ath11k_hal_srng_access_end(ab, srng); 3309 spin_unlock_bh(&srng->lock); 3310 ret = -ENOSPC; 3311 goto err_free_idr; 3312 } 3313 memset(reo_ent_ring, 0, sizeof(*reo_ent_ring)); 3314 3315 ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank); 3316 ath11k_hal_rx_buf_addr_info_set(reo_ent_ring, paddr, desc_bank, 3317 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST); 3318 3319 mpdu_info = FIELD_PREP(RX_MPDU_DESC_INFO0_MSDU_COUNT, 1) | 3320 FIELD_PREP(RX_MPDU_DESC_INFO0_SEQ_NUM, rx_tid->cur_sn) | 3321 FIELD_PREP(RX_MPDU_DESC_INFO0_FRAG_FLAG, 0) | 3322 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_SA, 1) | 3323 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_DA, 1) | 3324 FIELD_PREP(RX_MPDU_DESC_INFO0_RAW_MPDU, 1) | 3325 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_PN, 1); 3326 3327 reo_ent_ring->rx_mpdu_info.info0 = mpdu_info; 3328 reo_ent_ring->rx_mpdu_info.meta_data = reo_dest_ring->rx_mpdu_info.meta_data; 3329 reo_ent_ring->queue_addr_lo = reo_dest_ring->queue_addr_lo; 3330 reo_ent_ring->info0 = FIELD_PREP(HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI, 3331 FIELD_GET(HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI, 3332 reo_dest_ring->info0)) | 3333 FIELD_PREP(HAL_REO_ENTR_RING_INFO0_DEST_IND, dst_idx); 3334 ath11k_hal_srng_access_end(ab, srng); 3335 spin_unlock_bh(&srng->lock); 3336 3337 return 0; 3338 3339 err_free_idr: 3340 spin_lock_bh(&rx_refill_ring->idr_lock); 3341 idr_remove(&rx_refill_ring->bufs_idr, buf_id); 3342 spin_unlock_bh(&rx_refill_ring->idr_lock); 3343 err_unmap_dma: 3344 dma_unmap_single(ab->dev, paddr, defrag_skb->len + skb_tailroom(defrag_skb), 3345 DMA_FROM_DEVICE); 3346 return ret; 3347 } 3348 3349 static int ath11k_dp_rx_h_cmp_frags(struct sk_buff *a, struct sk_buff *b) 3350 { 3351 int frag1, frag2; 3352 3353 frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(a); 3354 frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(b); 3355 3356 return frag1 - frag2; 3357 } 3358 3359 static void ath11k_dp_rx_h_sort_frags(struct sk_buff_head *frag_list, 3360 struct sk_buff *cur_frag) 3361 { 3362 struct sk_buff *skb; 3363 int cmp; 3364 3365 skb_queue_walk(frag_list, skb) { 3366 cmp = ath11k_dp_rx_h_cmp_frags(skb, cur_frag); 3367 if (cmp < 0) 3368 continue; 3369 __skb_queue_before(frag_list, skb, cur_frag); 3370 return; 3371 } 3372 __skb_queue_tail(frag_list, cur_frag); 3373 } 3374 3375 static u64 ath11k_dp_rx_h_get_pn(struct sk_buff *skb) 3376 { 3377 struct ieee80211_hdr *hdr; 3378 u64 pn = 0; 3379 u8 *ehdr; 3380 3381 hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE); 3382 ehdr = skb->data + HAL_RX_DESC_SIZE + ieee80211_hdrlen(hdr->frame_control); 3383 3384 pn = ehdr[0]; 3385 pn |= (u64)ehdr[1] << 8; 3386 pn |= (u64)ehdr[4] << 16; 3387 pn |= (u64)ehdr[5] << 24; 3388 pn |= (u64)ehdr[6] << 32; 3389 pn |= (u64)ehdr[7] << 40; 3390 3391 return pn; 3392 } 3393 3394 static bool 3395 ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k *ar, struct dp_rx_tid *rx_tid) 3396 { 3397 enum hal_encrypt_type encrypt_type; 3398 struct sk_buff *first_frag, *skb; 3399 struct hal_rx_desc *desc; 3400 u64 last_pn; 3401 u64 cur_pn; 3402 3403 first_frag = skb_peek(&rx_tid->rx_frags); 3404 desc = (struct hal_rx_desc *)first_frag->data; 3405 3406 encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(desc); 3407 if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 && 3408 encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 && 3409 encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 && 3410 encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256) 3411 return true; 3412 3413 last_pn = ath11k_dp_rx_h_get_pn(first_frag); 3414 skb_queue_walk(&rx_tid->rx_frags, skb) { 3415 if (skb == first_frag) 3416 continue; 3417 3418 cur_pn = ath11k_dp_rx_h_get_pn(skb); 3419 if (cur_pn != last_pn + 1) 3420 return false; 3421 last_pn = cur_pn; 3422 } 3423 return true; 3424 } 3425 3426 static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar, 3427 struct sk_buff *msdu, 3428 u32 *ring_desc) 3429 { 3430 struct ath11k_base *ab = ar->ab; 3431 struct hal_rx_desc *rx_desc; 3432 struct ath11k_peer *peer; 3433 struct dp_rx_tid *rx_tid; 3434 struct sk_buff *defrag_skb = NULL; 3435 u32 peer_id; 3436 u16 seqno, frag_no; 3437 u8 tid; 3438 int ret = 0; 3439 bool more_frags; 3440 3441 rx_desc = (struct hal_rx_desc *)msdu->data; 3442 peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(rx_desc); 3443 tid = ath11k_dp_rx_h_mpdu_start_tid(rx_desc); 3444 seqno = ath11k_dp_rx_h_mpdu_start_seq_no(rx_desc); 3445 frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(msdu); 3446 more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(msdu); 3447 3448 if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(rx_desc) || 3449 !ath11k_dp_rx_h_mpdu_start_fc_valid(rx_desc) || 3450 tid > IEEE80211_NUM_TIDS) 3451 return -EINVAL; 3452 3453 /* received unfragmented packet in reo 3454 * exception ring, this shouldn't happen 3455 * as these packets typically come from 3456 * reo2sw srngs. 3457 */ 3458 if (WARN_ON_ONCE(!frag_no && !more_frags)) 3459 return -EINVAL; 3460 3461 spin_lock_bh(&ab->base_lock); 3462 peer = ath11k_peer_find_by_id(ab, peer_id); 3463 if (!peer) { 3464 ath11k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n", 3465 peer_id); 3466 ret = -ENOENT; 3467 goto out_unlock; 3468 } 3469 rx_tid = &peer->rx_tid[tid]; 3470 3471 if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) || 3472 skb_queue_empty(&rx_tid->rx_frags)) { 3473 /* Flush stored fragments and start a new sequence */ 3474 ath11k_dp_rx_frags_cleanup(rx_tid, true); 3475 rx_tid->cur_sn = seqno; 3476 } 3477 3478 if (rx_tid->rx_frag_bitmap & BIT(frag_no)) { 3479 /* Fragment already present */ 3480 ret = -EINVAL; 3481 goto out_unlock; 3482 } 3483 3484 if (frag_no > __fls(rx_tid->rx_frag_bitmap)) 3485 __skb_queue_tail(&rx_tid->rx_frags, msdu); 3486 else 3487 ath11k_dp_rx_h_sort_frags(&rx_tid->rx_frags, msdu); 3488 3489 rx_tid->rx_frag_bitmap |= BIT(frag_no); 3490 if (!more_frags) 3491 rx_tid->last_frag_no = frag_no; 3492 3493 if (frag_no == 0) { 3494 rx_tid->dst_ring_desc = kmemdup(ring_desc, 3495 sizeof(*rx_tid->dst_ring_desc), 3496 GFP_ATOMIC); 3497 if (!rx_tid->dst_ring_desc) { 3498 ret = -ENOMEM; 3499 goto out_unlock; 3500 } 3501 } else { 3502 ath11k_dp_rx_link_desc_return(ab, ring_desc, 3503 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3504 } 3505 3506 if (!rx_tid->last_frag_no || 3507 rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) { 3508 mod_timer(&rx_tid->frag_timer, jiffies + 3509 ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS); 3510 goto out_unlock; 3511 } 3512 3513 spin_unlock_bh(&ab->base_lock); 3514 del_timer_sync(&rx_tid->frag_timer); 3515 spin_lock_bh(&ab->base_lock); 3516 3517 peer = ath11k_peer_find_by_id(ab, peer_id); 3518 if (!peer) 3519 goto err_frags_cleanup; 3520 3521 if (!ath11k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid)) 3522 goto err_frags_cleanup; 3523 3524 if (ath11k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb)) 3525 goto err_frags_cleanup; 3526 3527 if (!defrag_skb) 3528 goto err_frags_cleanup; 3529 3530 if (ath11k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb)) 3531 goto err_frags_cleanup; 3532 3533 ath11k_dp_rx_frags_cleanup(rx_tid, false); 3534 goto out_unlock; 3535 3536 err_frags_cleanup: 3537 dev_kfree_skb_any(defrag_skb); 3538 ath11k_dp_rx_frags_cleanup(rx_tid, true); 3539 out_unlock: 3540 spin_unlock_bh(&ab->base_lock); 3541 return ret; 3542 } 3543 3544 static int 3545 ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool drop) 3546 { 3547 struct ath11k_pdev_dp *dp = &ar->dp; 3548 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 3549 struct sk_buff *msdu; 3550 struct ath11k_skb_rxcb *rxcb; 3551 struct hal_rx_desc *rx_desc; 3552 u8 *hdr_status; 3553 u16 msdu_len; 3554 3555 spin_lock_bh(&rx_ring->idr_lock); 3556 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 3557 if (!msdu) { 3558 ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n", 3559 buf_id); 3560 spin_unlock_bh(&rx_ring->idr_lock); 3561 return -EINVAL; 3562 } 3563 3564 idr_remove(&rx_ring->bufs_idr, buf_id); 3565 spin_unlock_bh(&rx_ring->idr_lock); 3566 3567 rxcb = ATH11K_SKB_RXCB(msdu); 3568 dma_unmap_single(ar->ab->dev, rxcb->paddr, 3569 msdu->len + skb_tailroom(msdu), 3570 DMA_FROM_DEVICE); 3571 3572 if (drop) { 3573 dev_kfree_skb_any(msdu); 3574 return 0; 3575 } 3576 3577 rcu_read_lock(); 3578 if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) { 3579 dev_kfree_skb_any(msdu); 3580 goto exit; 3581 } 3582 3583 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 3584 dev_kfree_skb_any(msdu); 3585 goto exit; 3586 } 3587 3588 rx_desc = (struct hal_rx_desc *)msdu->data; 3589 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc); 3590 if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) { 3591 hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc); 3592 ath11k_warn(ar->ab, "invalid msdu leng %u", msdu_len); 3593 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status, 3594 sizeof(struct ieee80211_hdr)); 3595 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc, 3596 sizeof(struct hal_rx_desc)); 3597 dev_kfree_skb_any(msdu); 3598 goto exit; 3599 } 3600 3601 skb_put(msdu, HAL_RX_DESC_SIZE + msdu_len); 3602 3603 if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) { 3604 dev_kfree_skb_any(msdu); 3605 ath11k_dp_rx_link_desc_return(ar->ab, ring_desc, 3606 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3607 } 3608 exit: 3609 rcu_read_unlock(); 3610 return 0; 3611 } 3612 3613 int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi, 3614 int budget) 3615 { 3616 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; 3617 struct dp_link_desc_bank *link_desc_banks; 3618 enum hal_rx_buf_return_buf_manager rbm; 3619 int tot_n_bufs_reaped, quota, ret, i; 3620 int n_bufs_reaped[MAX_RADIOS] = {0}; 3621 struct dp_rxdma_ring *rx_ring; 3622 struct dp_srng *reo_except; 3623 u32 desc_bank, num_msdus; 3624 struct hal_srng *srng; 3625 struct ath11k_dp *dp; 3626 void *link_desc_va; 3627 int buf_id, mac_id; 3628 struct ath11k *ar; 3629 dma_addr_t paddr; 3630 u32 *desc; 3631 bool is_frag; 3632 u8 drop = 0; 3633 3634 tot_n_bufs_reaped = 0; 3635 quota = budget; 3636 3637 dp = &ab->dp; 3638 reo_except = &dp->reo_except_ring; 3639 link_desc_banks = dp->link_desc_banks; 3640 3641 srng = &ab->hal.srng_list[reo_except->ring_id]; 3642 3643 spin_lock_bh(&srng->lock); 3644 3645 ath11k_hal_srng_access_begin(ab, srng); 3646 3647 while (budget && 3648 (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 3649 struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc; 3650 3651 ab->soc_stats.err_ring_pkts++; 3652 ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr, 3653 &desc_bank); 3654 if (ret) { 3655 ath11k_warn(ab, "failed to parse error reo desc %d\n", 3656 ret); 3657 continue; 3658 } 3659 link_desc_va = link_desc_banks[desc_bank].vaddr + 3660 (paddr - link_desc_banks[desc_bank].paddr); 3661 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies, 3662 &rbm); 3663 if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST && 3664 rbm != HAL_RX_BUF_RBM_SW3_BM) { 3665 ab->soc_stats.invalid_rbm++; 3666 ath11k_warn(ab, "invalid return buffer manager %d\n", rbm); 3667 ath11k_dp_rx_link_desc_return(ab, desc, 3668 HAL_WBM_REL_BM_ACT_REL_MSDU); 3669 continue; 3670 } 3671 3672 is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG); 3673 3674 /* Process only rx fragments with one msdu per link desc below, and drop 3675 * msdu's indicated due to error reasons. 3676 */ 3677 if (!is_frag || num_msdus > 1) { 3678 drop = 1; 3679 /* Return the link desc back to wbm idle list */ 3680 ath11k_dp_rx_link_desc_return(ab, desc, 3681 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3682 } 3683 3684 for (i = 0; i < num_msdus; i++) { 3685 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 3686 msdu_cookies[i]); 3687 3688 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, 3689 msdu_cookies[i]); 3690 3691 ar = ab->pdevs[mac_id].ar; 3692 3693 if (!ath11k_dp_process_rx_err_buf(ar, desc, buf_id, drop)) { 3694 n_bufs_reaped[mac_id]++; 3695 tot_n_bufs_reaped++; 3696 } 3697 } 3698 3699 if (tot_n_bufs_reaped >= quota) { 3700 tot_n_bufs_reaped = quota; 3701 goto exit; 3702 } 3703 3704 budget = quota - tot_n_bufs_reaped; 3705 } 3706 3707 exit: 3708 ath11k_hal_srng_access_end(ab, srng); 3709 3710 spin_unlock_bh(&srng->lock); 3711 3712 for (i = 0; i < ab->num_radios; i++) { 3713 if (!n_bufs_reaped[i]) 3714 continue; 3715 3716 ar = ab->pdevs[i].ar; 3717 rx_ring = &ar->dp.rx_refill_buf_ring; 3718 3719 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i], 3720 HAL_RX_BUF_RBM_SW3_BM); 3721 } 3722 3723 return tot_n_bufs_reaped; 3724 } 3725 3726 static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar, 3727 int msdu_len, 3728 struct sk_buff_head *msdu_list) 3729 { 3730 struct sk_buff *skb, *tmp; 3731 struct ath11k_skb_rxcb *rxcb; 3732 int n_buffs; 3733 3734 n_buffs = DIV_ROUND_UP(msdu_len, 3735 (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE)); 3736 3737 skb_queue_walk_safe(msdu_list, skb, tmp) { 3738 rxcb = ATH11K_SKB_RXCB(skb); 3739 if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO && 3740 rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) { 3741 if (!n_buffs) 3742 break; 3743 __skb_unlink(skb, msdu_list); 3744 dev_kfree_skb_any(skb); 3745 n_buffs--; 3746 } 3747 } 3748 } 3749 3750 static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu, 3751 struct ieee80211_rx_status *status, 3752 struct sk_buff_head *msdu_list) 3753 { 3754 u16 msdu_len; 3755 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 3756 u8 l3pad_bytes; 3757 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3758 3759 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc); 3760 3761 if (!rxcb->is_frag && ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE)) { 3762 /* First buffer will be freed by the caller, so deduct it's length */ 3763 msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE); 3764 ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list); 3765 return -EINVAL; 3766 } 3767 3768 if (!ath11k_dp_rx_h_attn_msdu_done(desc)) { 3769 ath11k_warn(ar->ab, 3770 "msdu_done bit not set in null_q_des processing\n"); 3771 __skb_queue_purge(msdu_list); 3772 return -EIO; 3773 } 3774 3775 /* Handle NULL queue descriptor violations arising out a missing 3776 * REO queue for a given peer or a given TID. This typically 3777 * may happen if a packet is received on a QOS enabled TID before the 3778 * ADDBA negotiation for that TID, when the TID queue is setup. Or 3779 * it may also happen for MC/BC frames if they are not routed to the 3780 * non-QOS TID queue, in the absence of any other default TID queue. 3781 * This error can show up both in a REO destination or WBM release ring. 3782 */ 3783 3784 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc); 3785 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc); 3786 3787 if (rxcb->is_frag) { 3788 skb_pull(msdu, HAL_RX_DESC_SIZE); 3789 } else { 3790 l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc); 3791 3792 if ((HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) 3793 return -EINVAL; 3794 3795 skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len); 3796 skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes); 3797 } 3798 ath11k_dp_rx_h_ppdu(ar, desc, status); 3799 3800 ath11k_dp_rx_h_mpdu(ar, msdu, desc, status); 3801 3802 rxcb->tid = ath11k_dp_rx_h_mpdu_start_tid(desc); 3803 3804 /* Please note that caller will having the access to msdu and completing 3805 * rx with mac80211. Need not worry about cleaning up amsdu_list. 3806 */ 3807 3808 return 0; 3809 } 3810 3811 static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu, 3812 struct ieee80211_rx_status *status, 3813 struct sk_buff_head *msdu_list) 3814 { 3815 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3816 bool drop = false; 3817 3818 ar->ab->soc_stats.reo_error[rxcb->err_code]++; 3819 3820 switch (rxcb->err_code) { 3821 case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO: 3822 if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list)) 3823 drop = true; 3824 break; 3825 case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED: 3826 /* TODO: Do not drop PN failed packets in the driver; 3827 * instead, it is good to drop such packets in mac80211 3828 * after incrementing the replay counters. 3829 */ 3830 fallthrough; 3831 default: 3832 /* TODO: Review other errors and process them to mac80211 3833 * as appropriate. 3834 */ 3835 drop = true; 3836 break; 3837 } 3838 3839 return drop; 3840 } 3841 3842 static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu, 3843 struct ieee80211_rx_status *status) 3844 { 3845 u16 msdu_len; 3846 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 3847 u8 l3pad_bytes; 3848 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3849 3850 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc); 3851 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc); 3852 3853 l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc); 3854 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc); 3855 skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len); 3856 skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes); 3857 3858 ath11k_dp_rx_h_ppdu(ar, desc, status); 3859 3860 status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR | 3861 RX_FLAG_DECRYPTED); 3862 3863 ath11k_dp_rx_h_undecap(ar, msdu, desc, 3864 HAL_ENCRYPT_TYPE_TKIP_MIC, status, false); 3865 } 3866 3867 static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar, struct sk_buff *msdu, 3868 struct ieee80211_rx_status *status) 3869 { 3870 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3871 bool drop = false; 3872 3873 ar->ab->soc_stats.rxdma_error[rxcb->err_code]++; 3874 3875 switch (rxcb->err_code) { 3876 case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR: 3877 ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status); 3878 break; 3879 default: 3880 /* TODO: Review other rxdma error code to check if anything is 3881 * worth reporting to mac80211 3882 */ 3883 drop = true; 3884 break; 3885 } 3886 3887 return drop; 3888 } 3889 3890 static void ath11k_dp_rx_wbm_err(struct ath11k *ar, 3891 struct napi_struct *napi, 3892 struct sk_buff *msdu, 3893 struct sk_buff_head *msdu_list) 3894 { 3895 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3896 struct ieee80211_rx_status rxs = {0}; 3897 struct ieee80211_rx_status *status; 3898 bool drop = true; 3899 3900 switch (rxcb->err_rel_src) { 3901 case HAL_WBM_REL_SRC_MODULE_REO: 3902 drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list); 3903 break; 3904 case HAL_WBM_REL_SRC_MODULE_RXDMA: 3905 drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs); 3906 break; 3907 default: 3908 /* msdu will get freed */ 3909 break; 3910 } 3911 3912 if (drop) { 3913 dev_kfree_skb_any(msdu); 3914 return; 3915 } 3916 3917 status = IEEE80211_SKB_RXCB(msdu); 3918 *status = rxs; 3919 3920 ath11k_dp_rx_deliver_msdu(ar, napi, msdu); 3921 } 3922 3923 int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab, 3924 struct napi_struct *napi, int budget) 3925 { 3926 struct ath11k *ar; 3927 struct ath11k_dp *dp = &ab->dp; 3928 struct dp_rxdma_ring *rx_ring; 3929 struct hal_rx_wbm_rel_info err_info; 3930 struct hal_srng *srng; 3931 struct sk_buff *msdu; 3932 struct sk_buff_head msdu_list[MAX_RADIOS]; 3933 struct ath11k_skb_rxcb *rxcb; 3934 u32 *rx_desc; 3935 int buf_id, mac_id; 3936 int num_buffs_reaped[MAX_RADIOS] = {0}; 3937 int total_num_buffs_reaped = 0; 3938 int ret, i; 3939 3940 for (i = 0; i < ab->num_radios; i++) 3941 __skb_queue_head_init(&msdu_list[i]); 3942 3943 srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id]; 3944 3945 spin_lock_bh(&srng->lock); 3946 3947 ath11k_hal_srng_access_begin(ab, srng); 3948 3949 while (budget) { 3950 rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng); 3951 if (!rx_desc) 3952 break; 3953 3954 ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info); 3955 if (ret) { 3956 ath11k_warn(ab, 3957 "failed to parse rx error in wbm_rel ring desc %d\n", 3958 ret); 3959 continue; 3960 } 3961 3962 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie); 3963 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie); 3964 3965 ar = ab->pdevs[mac_id].ar; 3966 rx_ring = &ar->dp.rx_refill_buf_ring; 3967 3968 spin_lock_bh(&rx_ring->idr_lock); 3969 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 3970 if (!msdu) { 3971 ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n", 3972 buf_id, mac_id); 3973 spin_unlock_bh(&rx_ring->idr_lock); 3974 continue; 3975 } 3976 3977 idr_remove(&rx_ring->bufs_idr, buf_id); 3978 spin_unlock_bh(&rx_ring->idr_lock); 3979 3980 rxcb = ATH11K_SKB_RXCB(msdu); 3981 dma_unmap_single(ab->dev, rxcb->paddr, 3982 msdu->len + skb_tailroom(msdu), 3983 DMA_FROM_DEVICE); 3984 3985 num_buffs_reaped[mac_id]++; 3986 total_num_buffs_reaped++; 3987 budget--; 3988 3989 if (err_info.push_reason != 3990 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { 3991 dev_kfree_skb_any(msdu); 3992 continue; 3993 } 3994 3995 rxcb->err_rel_src = err_info.err_rel_src; 3996 rxcb->err_code = err_info.err_code; 3997 rxcb->rx_desc = (struct hal_rx_desc *)msdu->data; 3998 __skb_queue_tail(&msdu_list[mac_id], msdu); 3999 } 4000 4001 ath11k_hal_srng_access_end(ab, srng); 4002 4003 spin_unlock_bh(&srng->lock); 4004 4005 if (!total_num_buffs_reaped) 4006 goto done; 4007 4008 for (i = 0; i < ab->num_radios; i++) { 4009 if (!num_buffs_reaped[i]) 4010 continue; 4011 4012 ar = ab->pdevs[i].ar; 4013 rx_ring = &ar->dp.rx_refill_buf_ring; 4014 4015 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i], 4016 HAL_RX_BUF_RBM_SW3_BM); 4017 } 4018 4019 rcu_read_lock(); 4020 for (i = 0; i < ab->num_radios; i++) { 4021 if (!rcu_dereference(ab->pdevs_active[i])) { 4022 __skb_queue_purge(&msdu_list[i]); 4023 continue; 4024 } 4025 4026 ar = ab->pdevs[i].ar; 4027 4028 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 4029 __skb_queue_purge(&msdu_list[i]); 4030 continue; 4031 } 4032 4033 while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL) 4034 ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]); 4035 } 4036 rcu_read_unlock(); 4037 done: 4038 return total_num_buffs_reaped; 4039 } 4040 4041 int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget) 4042 { 4043 struct ath11k *ar; 4044 struct dp_srng *err_ring; 4045 struct dp_rxdma_ring *rx_ring; 4046 struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks; 4047 struct hal_srng *srng; 4048 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; 4049 enum hal_rx_buf_return_buf_manager rbm; 4050 enum hal_reo_entr_rxdma_ecode rxdma_err_code; 4051 struct ath11k_skb_rxcb *rxcb; 4052 struct sk_buff *skb; 4053 struct hal_reo_entrance_ring *entr_ring; 4054 void *desc; 4055 int num_buf_freed = 0; 4056 int quota = budget; 4057 dma_addr_t paddr; 4058 u32 desc_bank; 4059 void *link_desc_va; 4060 int num_msdus; 4061 int i; 4062 int buf_id; 4063 4064 ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar; 4065 err_ring = &ar->dp.rxdma_err_dst_ring[ath11k_hw_mac_id_to_srng_id(&ab->hw_params, 4066 mac_id)]; 4067 rx_ring = &ar->dp.rx_refill_buf_ring; 4068 4069 srng = &ab->hal.srng_list[err_ring->ring_id]; 4070 4071 spin_lock_bh(&srng->lock); 4072 4073 ath11k_hal_srng_access_begin(ab, srng); 4074 4075 while (quota-- && 4076 (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 4077 ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank); 4078 4079 entr_ring = (struct hal_reo_entrance_ring *)desc; 4080 rxdma_err_code = 4081 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE, 4082 entr_ring->info1); 4083 ab->soc_stats.rxdma_error[rxdma_err_code]++; 4084 4085 link_desc_va = link_desc_banks[desc_bank].vaddr + 4086 (paddr - link_desc_banks[desc_bank].paddr); 4087 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, 4088 msdu_cookies, &rbm); 4089 4090 for (i = 0; i < num_msdus; i++) { 4091 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 4092 msdu_cookies[i]); 4093 4094 spin_lock_bh(&rx_ring->idr_lock); 4095 skb = idr_find(&rx_ring->bufs_idr, buf_id); 4096 if (!skb) { 4097 ath11k_warn(ab, "rxdma error with invalid buf_id %d\n", 4098 buf_id); 4099 spin_unlock_bh(&rx_ring->idr_lock); 4100 continue; 4101 } 4102 4103 idr_remove(&rx_ring->bufs_idr, buf_id); 4104 spin_unlock_bh(&rx_ring->idr_lock); 4105 4106 rxcb = ATH11K_SKB_RXCB(skb); 4107 dma_unmap_single(ab->dev, rxcb->paddr, 4108 skb->len + skb_tailroom(skb), 4109 DMA_FROM_DEVICE); 4110 dev_kfree_skb_any(skb); 4111 4112 num_buf_freed++; 4113 } 4114 4115 ath11k_dp_rx_link_desc_return(ab, desc, 4116 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 4117 } 4118 4119 ath11k_hal_srng_access_end(ab, srng); 4120 4121 spin_unlock_bh(&srng->lock); 4122 4123 if (num_buf_freed) 4124 ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed, 4125 HAL_RX_BUF_RBM_SW3_BM); 4126 4127 return budget - quota; 4128 } 4129 4130 void ath11k_dp_process_reo_status(struct ath11k_base *ab) 4131 { 4132 struct ath11k_dp *dp = &ab->dp; 4133 struct hal_srng *srng; 4134 struct dp_reo_cmd *cmd, *tmp; 4135 bool found = false; 4136 u32 *reo_desc; 4137 u16 tag; 4138 struct hal_reo_status reo_status; 4139 4140 srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id]; 4141 4142 memset(&reo_status, 0, sizeof(reo_status)); 4143 4144 spin_lock_bh(&srng->lock); 4145 4146 ath11k_hal_srng_access_begin(ab, srng); 4147 4148 while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 4149 tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc); 4150 4151 switch (tag) { 4152 case HAL_REO_GET_QUEUE_STATS_STATUS: 4153 ath11k_hal_reo_status_queue_stats(ab, reo_desc, 4154 &reo_status); 4155 break; 4156 case HAL_REO_FLUSH_QUEUE_STATUS: 4157 ath11k_hal_reo_flush_queue_status(ab, reo_desc, 4158 &reo_status); 4159 break; 4160 case HAL_REO_FLUSH_CACHE_STATUS: 4161 ath11k_hal_reo_flush_cache_status(ab, reo_desc, 4162 &reo_status); 4163 break; 4164 case HAL_REO_UNBLOCK_CACHE_STATUS: 4165 ath11k_hal_reo_unblk_cache_status(ab, reo_desc, 4166 &reo_status); 4167 break; 4168 case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS: 4169 ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc, 4170 &reo_status); 4171 break; 4172 case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS: 4173 ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc, 4174 &reo_status); 4175 break; 4176 case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS: 4177 ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc, 4178 &reo_status); 4179 break; 4180 default: 4181 ath11k_warn(ab, "Unknown reo status type %d\n", tag); 4182 continue; 4183 } 4184 4185 spin_lock_bh(&dp->reo_cmd_lock); 4186 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 4187 if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) { 4188 found = true; 4189 list_del(&cmd->list); 4190 break; 4191 } 4192 } 4193 spin_unlock_bh(&dp->reo_cmd_lock); 4194 4195 if (found) { 4196 cmd->handler(dp, (void *)&cmd->data, 4197 reo_status.uniform_hdr.cmd_status); 4198 kfree(cmd); 4199 } 4200 4201 found = false; 4202 } 4203 4204 ath11k_hal_srng_access_end(ab, srng); 4205 4206 spin_unlock_bh(&srng->lock); 4207 } 4208 4209 void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id) 4210 { 4211 struct ath11k *ar = ab->pdevs[mac_id].ar; 4212 4213 ath11k_dp_rx_pdev_srng_free(ar); 4214 ath11k_dp_rxdma_pdev_buf_free(ar); 4215 } 4216 4217 int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id) 4218 { 4219 struct ath11k *ar = ab->pdevs[mac_id].ar; 4220 struct ath11k_pdev_dp *dp = &ar->dp; 4221 u32 ring_id; 4222 int i; 4223 int ret; 4224 4225 ret = ath11k_dp_rx_pdev_srng_alloc(ar); 4226 if (ret) { 4227 ath11k_warn(ab, "failed to setup rx srngs\n"); 4228 return ret; 4229 } 4230 4231 ret = ath11k_dp_rxdma_pdev_buf_setup(ar); 4232 if (ret) { 4233 ath11k_warn(ab, "failed to setup rxdma ring\n"); 4234 return ret; 4235 } 4236 4237 ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; 4238 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF); 4239 if (ret) { 4240 ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n", 4241 ret); 4242 return ret; 4243 } 4244 4245 if (ab->hw_params.rx_mac_buf_ring) { 4246 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 4247 ring_id = dp->rx_mac_buf_ring[i].ring_id; 4248 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, 4249 mac_id + i, HAL_RXDMA_BUF); 4250 if (ret) { 4251 ath11k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n", 4252 i, ret); 4253 return ret; 4254 } 4255 } 4256 } 4257 4258 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 4259 ring_id = dp->rxdma_err_dst_ring[i].ring_id; 4260 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, 4261 mac_id + i, HAL_RXDMA_DST); 4262 if (ret) { 4263 ath11k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n", 4264 i, ret); 4265 return ret; 4266 } 4267 } 4268 4269 if (!ab->hw_params.rxdma1_enable) 4270 goto config_refill_ring; 4271 4272 ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id; 4273 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, 4274 mac_id, HAL_RXDMA_MONITOR_BUF); 4275 if (ret) { 4276 ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n", 4277 ret); 4278 return ret; 4279 } 4280 ret = ath11k_dp_tx_htt_srng_setup(ab, 4281 dp->rxdma_mon_dst_ring.ring_id, 4282 mac_id, HAL_RXDMA_MONITOR_DST); 4283 if (ret) { 4284 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n", 4285 ret); 4286 return ret; 4287 } 4288 ret = ath11k_dp_tx_htt_srng_setup(ab, 4289 dp->rxdma_mon_desc_ring.ring_id, 4290 mac_id, HAL_RXDMA_MONITOR_DESC); 4291 if (ret) { 4292 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n", 4293 ret); 4294 return ret; 4295 } 4296 4297 config_refill_ring: 4298 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 4299 ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; 4300 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i, 4301 HAL_RXDMA_MONITOR_STATUS); 4302 if (ret) { 4303 ath11k_warn(ab, 4304 "failed to configure mon_status_refill_ring%d %d\n", 4305 i, ret); 4306 return ret; 4307 } 4308 } 4309 4310 return 0; 4311 } 4312 4313 static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len) 4314 { 4315 if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) { 4316 *frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc); 4317 *total_len -= *frag_len; 4318 } else { 4319 *frag_len = *total_len; 4320 *total_len = 0; 4321 } 4322 } 4323 4324 static 4325 int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar, 4326 void *p_last_buf_addr_info, 4327 u8 mac_id) 4328 { 4329 struct ath11k_pdev_dp *dp = &ar->dp; 4330 struct dp_srng *dp_srng; 4331 void *hal_srng; 4332 void *src_srng_desc; 4333 int ret = 0; 4334 4335 if (ar->ab->hw_params.rxdma1_enable) { 4336 dp_srng = &dp->rxdma_mon_desc_ring; 4337 hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id]; 4338 } else { 4339 dp_srng = &ar->ab->dp.wbm_desc_rel_ring; 4340 hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id]; 4341 } 4342 4343 ath11k_hal_srng_access_begin(ar->ab, hal_srng); 4344 4345 src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng); 4346 4347 if (src_srng_desc) { 4348 struct ath11k_buffer_addr *src_desc = 4349 (struct ath11k_buffer_addr *)src_srng_desc; 4350 4351 *src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info); 4352 } else { 4353 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4354 "Monitor Link Desc Ring %d Full", mac_id); 4355 ret = -ENOMEM; 4356 } 4357 4358 ath11k_hal_srng_access_end(ar->ab, hal_srng); 4359 return ret; 4360 } 4361 4362 static 4363 void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc, 4364 dma_addr_t *paddr, u32 *sw_cookie, 4365 u8 *rbm, 4366 void **pp_buf_addr_info) 4367 { 4368 struct hal_rx_msdu_link *msdu_link = 4369 (struct hal_rx_msdu_link *)rx_msdu_link_desc; 4370 struct ath11k_buffer_addr *buf_addr_info; 4371 4372 buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info; 4373 4374 ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm); 4375 4376 *pp_buf_addr_info = (void *)buf_addr_info; 4377 } 4378 4379 static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len) 4380 { 4381 if (skb->len > len) { 4382 skb_trim(skb, len); 4383 } else { 4384 if (skb_tailroom(skb) < len - skb->len) { 4385 if ((pskb_expand_head(skb, 0, 4386 len - skb->len - skb_tailroom(skb), 4387 GFP_ATOMIC))) { 4388 dev_kfree_skb_any(skb); 4389 return -ENOMEM; 4390 } 4391 } 4392 skb_put(skb, (len - skb->len)); 4393 } 4394 return 0; 4395 } 4396 4397 static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar, 4398 void *msdu_link_desc, 4399 struct hal_rx_msdu_list *msdu_list, 4400 u16 *num_msdus) 4401 { 4402 struct hal_rx_msdu_details *msdu_details = NULL; 4403 struct rx_msdu_desc *msdu_desc_info = NULL; 4404 struct hal_rx_msdu_link *msdu_link = NULL; 4405 int i; 4406 u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1); 4407 u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1); 4408 u8 tmp = 0; 4409 4410 msdu_link = (struct hal_rx_msdu_link *)msdu_link_desc; 4411 msdu_details = &msdu_link->msdu_link[0]; 4412 4413 for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) { 4414 if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR, 4415 msdu_details[i].buf_addr_info.info0) == 0) { 4416 msdu_desc_info = &msdu_details[i - 1].rx_msdu_info; 4417 msdu_desc_info->info0 |= last; 4418 ; 4419 break; 4420 } 4421 msdu_desc_info = &msdu_details[i].rx_msdu_info; 4422 4423 if (!i) 4424 msdu_desc_info->info0 |= first; 4425 else if (i == (HAL_RX_NUM_MSDU_DESC - 1)) 4426 msdu_desc_info->info0 |= last; 4427 msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0; 4428 msdu_list->msdu_info[i].msdu_len = 4429 HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0); 4430 msdu_list->sw_cookie[i] = 4431 FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, 4432 msdu_details[i].buf_addr_info.info1); 4433 tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR, 4434 msdu_details[i].buf_addr_info.info1); 4435 msdu_list->rbm[i] = tmp; 4436 } 4437 *num_msdus = i; 4438 } 4439 4440 static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id, 4441 u32 *rx_bufs_used) 4442 { 4443 u32 ret = 0; 4444 4445 if ((*ppdu_id < msdu_ppdu_id) && 4446 ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) { 4447 *ppdu_id = msdu_ppdu_id; 4448 ret = msdu_ppdu_id; 4449 } else if ((*ppdu_id > msdu_ppdu_id) && 4450 ((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) { 4451 /* mon_dst is behind than mon_status 4452 * skip dst_ring and free it 4453 */ 4454 *rx_bufs_used += 1; 4455 *ppdu_id = msdu_ppdu_id; 4456 ret = msdu_ppdu_id; 4457 } 4458 return ret; 4459 } 4460 4461 static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info, 4462 bool *is_frag, u32 *total_len, 4463 u32 *frag_len, u32 *msdu_cnt) 4464 { 4465 if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) { 4466 if (!*is_frag) { 4467 *total_len = info->msdu_len; 4468 *is_frag = true; 4469 } 4470 ath11k_dp_mon_set_frag_len(total_len, 4471 frag_len); 4472 } else { 4473 if (*is_frag) { 4474 ath11k_dp_mon_set_frag_len(total_len, 4475 frag_len); 4476 } else { 4477 *frag_len = info->msdu_len; 4478 } 4479 *is_frag = false; 4480 *msdu_cnt -= 1; 4481 } 4482 } 4483 4484 static u32 4485 ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id, 4486 void *ring_entry, struct sk_buff **head_msdu, 4487 struct sk_buff **tail_msdu, u32 *npackets, 4488 u32 *ppdu_id) 4489 { 4490 struct ath11k_pdev_dp *dp = &ar->dp; 4491 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4492 struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring; 4493 struct sk_buff *msdu = NULL, *last = NULL; 4494 struct hal_rx_msdu_list msdu_list; 4495 void *p_buf_addr_info, *p_last_buf_addr_info; 4496 struct hal_rx_desc *rx_desc; 4497 void *rx_msdu_link_desc; 4498 dma_addr_t paddr; 4499 u16 num_msdus = 0; 4500 u32 rx_buf_size, rx_pkt_offset, sw_cookie; 4501 u32 rx_bufs_used = 0, i = 0; 4502 u32 msdu_ppdu_id = 0, msdu_cnt = 0; 4503 u32 total_len = 0, frag_len = 0; 4504 bool is_frag, is_first_msdu; 4505 bool drop_mpdu = false; 4506 struct ath11k_skb_rxcb *rxcb; 4507 struct hal_reo_entrance_ring *ent_desc = 4508 (struct hal_reo_entrance_ring *)ring_entry; 4509 int buf_id; 4510 u32 rx_link_buf_info[2]; 4511 u8 rbm; 4512 4513 if (!ar->ab->hw_params.rxdma1_enable) 4514 rx_ring = &dp->rx_refill_buf_ring; 4515 4516 ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr, 4517 &sw_cookie, 4518 &p_last_buf_addr_info, &rbm, 4519 &msdu_cnt); 4520 4521 if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON, 4522 ent_desc->info1) == 4523 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { 4524 u8 rxdma_err = 4525 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE, 4526 ent_desc->info1); 4527 if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR || 4528 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR || 4529 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) { 4530 drop_mpdu = true; 4531 pmon->rx_mon_stats.dest_mpdu_drop++; 4532 } 4533 } 4534 4535 is_frag = false; 4536 is_first_msdu = true; 4537 4538 do { 4539 if (pmon->mon_last_linkdesc_paddr == paddr) { 4540 pmon->rx_mon_stats.dup_mon_linkdesc_cnt++; 4541 return rx_bufs_used; 4542 } 4543 4544 if (ar->ab->hw_params.rxdma1_enable) 4545 rx_msdu_link_desc = 4546 (void *)pmon->link_desc_banks[sw_cookie].vaddr + 4547 (paddr - pmon->link_desc_banks[sw_cookie].paddr); 4548 else 4549 rx_msdu_link_desc = 4550 (void *)ar->ab->dp.link_desc_banks[sw_cookie].vaddr + 4551 (paddr - ar->ab->dp.link_desc_banks[sw_cookie].paddr); 4552 4553 ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list, 4554 &num_msdus); 4555 4556 for (i = 0; i < num_msdus; i++) { 4557 u32 l2_hdr_offset; 4558 4559 if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) { 4560 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4561 "i %d last_cookie %d is same\n", 4562 i, pmon->mon_last_buf_cookie); 4563 drop_mpdu = true; 4564 pmon->rx_mon_stats.dup_mon_buf_cnt++; 4565 continue; 4566 } 4567 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 4568 msdu_list.sw_cookie[i]); 4569 4570 spin_lock_bh(&rx_ring->idr_lock); 4571 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 4572 spin_unlock_bh(&rx_ring->idr_lock); 4573 if (!msdu) { 4574 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4575 "msdu_pop: invalid buf_id %d\n", buf_id); 4576 break; 4577 } 4578 rxcb = ATH11K_SKB_RXCB(msdu); 4579 if (!rxcb->unmapped) { 4580 dma_unmap_single(ar->ab->dev, rxcb->paddr, 4581 msdu->len + 4582 skb_tailroom(msdu), 4583 DMA_FROM_DEVICE); 4584 rxcb->unmapped = 1; 4585 } 4586 if (drop_mpdu) { 4587 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4588 "i %d drop msdu %p *ppdu_id %x\n", 4589 i, msdu, *ppdu_id); 4590 dev_kfree_skb_any(msdu); 4591 msdu = NULL; 4592 goto next_msdu; 4593 } 4594 4595 rx_desc = (struct hal_rx_desc *)msdu->data; 4596 4597 rx_pkt_offset = sizeof(struct hal_rx_desc); 4598 l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(rx_desc); 4599 4600 if (is_first_msdu) { 4601 if (!ath11k_dp_rxdesc_mpdu_valid(rx_desc)) { 4602 drop_mpdu = true; 4603 dev_kfree_skb_any(msdu); 4604 msdu = NULL; 4605 pmon->mon_last_linkdesc_paddr = paddr; 4606 goto next_msdu; 4607 } 4608 4609 msdu_ppdu_id = 4610 ath11k_dp_rxdesc_get_ppduid(rx_desc); 4611 4612 if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id, 4613 ppdu_id, 4614 &rx_bufs_used)) { 4615 if (rx_bufs_used) { 4616 drop_mpdu = true; 4617 dev_kfree_skb_any(msdu); 4618 msdu = NULL; 4619 goto next_msdu; 4620 } 4621 return rx_bufs_used; 4622 } 4623 pmon->mon_last_linkdesc_paddr = paddr; 4624 is_first_msdu = false; 4625 } 4626 ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i], 4627 &is_frag, &total_len, 4628 &frag_len, &msdu_cnt); 4629 rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len; 4630 4631 ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size); 4632 4633 if (!(*head_msdu)) 4634 *head_msdu = msdu; 4635 else if (last) 4636 last->next = msdu; 4637 4638 last = msdu; 4639 next_msdu: 4640 pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i]; 4641 rx_bufs_used++; 4642 spin_lock_bh(&rx_ring->idr_lock); 4643 idr_remove(&rx_ring->bufs_idr, buf_id); 4644 spin_unlock_bh(&rx_ring->idr_lock); 4645 } 4646 4647 ath11k_hal_rx_buf_addr_info_set(rx_link_buf_info, paddr, sw_cookie, rbm); 4648 4649 ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr, 4650 &sw_cookie, &rbm, 4651 &p_buf_addr_info); 4652 4653 if (ar->ab->hw_params.rxdma1_enable) { 4654 if (ath11k_dp_rx_monitor_link_desc_return(ar, 4655 p_last_buf_addr_info, 4656 dp->mac_id)) 4657 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4658 "dp_rx_monitor_link_desc_return failed"); 4659 } else { 4660 ath11k_dp_rx_link_desc_return(ar->ab, rx_link_buf_info, 4661 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 4662 } 4663 4664 p_last_buf_addr_info = p_buf_addr_info; 4665 4666 } while (paddr && msdu_cnt); 4667 4668 if (last) 4669 last->next = NULL; 4670 4671 *tail_msdu = msdu; 4672 4673 if (msdu_cnt == 0) 4674 *npackets = 1; 4675 4676 return rx_bufs_used; 4677 } 4678 4679 static void ath11k_dp_rx_msdus_set_payload(struct sk_buff *msdu) 4680 { 4681 u32 rx_pkt_offset, l2_hdr_offset; 4682 4683 rx_pkt_offset = sizeof(struct hal_rx_desc); 4684 l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad((struct hal_rx_desc *)msdu->data); 4685 skb_pull(msdu, rx_pkt_offset + l2_hdr_offset); 4686 } 4687 4688 static struct sk_buff * 4689 ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, 4690 u32 mac_id, struct sk_buff *head_msdu, 4691 struct sk_buff *last_msdu, 4692 struct ieee80211_rx_status *rxs) 4693 { 4694 struct sk_buff *msdu, *mpdu_buf, *prev_buf; 4695 u32 decap_format, wifi_hdr_len; 4696 struct hal_rx_desc *rx_desc; 4697 char *hdr_desc; 4698 u8 *dest; 4699 struct ieee80211_hdr_3addr *wh; 4700 4701 mpdu_buf = NULL; 4702 4703 if (!head_msdu) 4704 goto err_merge_fail; 4705 4706 rx_desc = (struct hal_rx_desc *)head_msdu->data; 4707 4708 if (ath11k_dp_rxdesc_get_mpdulen_err(rx_desc)) 4709 return NULL; 4710 4711 decap_format = ath11k_dp_rxdesc_get_decap_format(rx_desc); 4712 4713 ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs); 4714 4715 if (decap_format == DP_RX_DECAP_TYPE_RAW) { 4716 ath11k_dp_rx_msdus_set_payload(head_msdu); 4717 4718 prev_buf = head_msdu; 4719 msdu = head_msdu->next; 4720 4721 while (msdu) { 4722 ath11k_dp_rx_msdus_set_payload(msdu); 4723 4724 prev_buf = msdu; 4725 msdu = msdu->next; 4726 } 4727 4728 prev_buf->next = NULL; 4729 4730 skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN); 4731 } else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) { 4732 __le16 qos_field; 4733 u8 qos_pkt = 0; 4734 4735 rx_desc = (struct hal_rx_desc *)head_msdu->data; 4736 hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc); 4737 4738 /* Base size */ 4739 wifi_hdr_len = sizeof(struct ieee80211_hdr_3addr); 4740 wh = (struct ieee80211_hdr_3addr *)hdr_desc; 4741 4742 if (ieee80211_is_data_qos(wh->frame_control)) { 4743 struct ieee80211_qos_hdr *qwh = 4744 (struct ieee80211_qos_hdr *)hdr_desc; 4745 4746 qos_field = qwh->qos_ctrl; 4747 qos_pkt = 1; 4748 } 4749 msdu = head_msdu; 4750 4751 while (msdu) { 4752 rx_desc = (struct hal_rx_desc *)msdu->data; 4753 hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc); 4754 4755 if (qos_pkt) { 4756 dest = skb_push(msdu, sizeof(__le16)); 4757 if (!dest) 4758 goto err_merge_fail; 4759 memcpy(dest, hdr_desc, wifi_hdr_len); 4760 memcpy(dest + wifi_hdr_len, 4761 (u8 *)&qos_field, sizeof(__le16)); 4762 } 4763 ath11k_dp_rx_msdus_set_payload(msdu); 4764 prev_buf = msdu; 4765 msdu = msdu->next; 4766 } 4767 dest = skb_put(prev_buf, HAL_RX_FCS_LEN); 4768 if (!dest) 4769 goto err_merge_fail; 4770 4771 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4772 "mpdu_buf %pK mpdu_buf->len %u", 4773 prev_buf, prev_buf->len); 4774 } else { 4775 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4776 "decap format %d is not supported!\n", 4777 decap_format); 4778 goto err_merge_fail; 4779 } 4780 4781 return head_msdu; 4782 4783 err_merge_fail: 4784 if (mpdu_buf && decap_format != DP_RX_DECAP_TYPE_RAW) { 4785 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4786 "err_merge_fail mpdu_buf %pK", mpdu_buf); 4787 /* Free the head buffer */ 4788 dev_kfree_skb_any(mpdu_buf); 4789 } 4790 return NULL; 4791 } 4792 4793 static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id, 4794 struct sk_buff *head_msdu, 4795 struct sk_buff *tail_msdu, 4796 struct napi_struct *napi) 4797 { 4798 struct ath11k_pdev_dp *dp = &ar->dp; 4799 struct sk_buff *mon_skb, *skb_next, *header; 4800 struct ieee80211_rx_status *rxs = &dp->rx_status, *status; 4801 4802 mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu, 4803 tail_msdu, rxs); 4804 4805 if (!mon_skb) 4806 goto mon_deliver_fail; 4807 4808 header = mon_skb; 4809 4810 rxs->flag = 0; 4811 do { 4812 skb_next = mon_skb->next; 4813 if (!skb_next) 4814 rxs->flag &= ~RX_FLAG_AMSDU_MORE; 4815 else 4816 rxs->flag |= RX_FLAG_AMSDU_MORE; 4817 4818 if (mon_skb == header) { 4819 header = NULL; 4820 rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN; 4821 } else { 4822 rxs->flag |= RX_FLAG_ALLOW_SAME_PN; 4823 } 4824 rxs->flag |= RX_FLAG_ONLY_MONITOR; 4825 4826 status = IEEE80211_SKB_RXCB(mon_skb); 4827 *status = *rxs; 4828 4829 ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb); 4830 mon_skb = skb_next; 4831 } while (mon_skb); 4832 rxs->flag = 0; 4833 4834 return 0; 4835 4836 mon_deliver_fail: 4837 mon_skb = head_msdu; 4838 while (mon_skb) { 4839 skb_next = mon_skb->next; 4840 dev_kfree_skb_any(mon_skb); 4841 mon_skb = skb_next; 4842 } 4843 return -EINVAL; 4844 } 4845 4846 static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id, 4847 u32 quota, struct napi_struct *napi) 4848 { 4849 struct ath11k_pdev_dp *dp = &ar->dp; 4850 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4851 void *ring_entry; 4852 void *mon_dst_srng; 4853 u32 ppdu_id; 4854 u32 rx_bufs_used; 4855 u32 ring_id; 4856 struct ath11k_pdev_mon_stats *rx_mon_stats; 4857 u32 npackets = 0; 4858 4859 if (ar->ab->hw_params.rxdma1_enable) 4860 ring_id = dp->rxdma_mon_dst_ring.ring_id; 4861 else 4862 ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id; 4863 4864 mon_dst_srng = &ar->ab->hal.srng_list[ring_id]; 4865 4866 if (!mon_dst_srng) { 4867 ath11k_warn(ar->ab, 4868 "HAL Monitor Destination Ring Init Failed -- %pK", 4869 mon_dst_srng); 4870 return; 4871 } 4872 4873 spin_lock_bh(&pmon->mon_lock); 4874 4875 ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng); 4876 4877 ppdu_id = pmon->mon_ppdu_info.ppdu_id; 4878 rx_bufs_used = 0; 4879 rx_mon_stats = &pmon->rx_mon_stats; 4880 4881 while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) { 4882 struct sk_buff *head_msdu, *tail_msdu; 4883 4884 head_msdu = NULL; 4885 tail_msdu = NULL; 4886 4887 rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry, 4888 &head_msdu, 4889 &tail_msdu, 4890 &npackets, &ppdu_id); 4891 4892 if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) { 4893 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 4894 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4895 "dest_rx: new ppdu_id %x != status ppdu_id %x", 4896 ppdu_id, pmon->mon_ppdu_info.ppdu_id); 4897 break; 4898 } 4899 if (head_msdu && tail_msdu) { 4900 ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu, 4901 tail_msdu, napi); 4902 rx_mon_stats->dest_mpdu_done++; 4903 } 4904 4905 ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab, 4906 mon_dst_srng); 4907 } 4908 ath11k_hal_srng_access_end(ar->ab, mon_dst_srng); 4909 4910 spin_unlock_bh(&pmon->mon_lock); 4911 4912 if (rx_bufs_used) { 4913 rx_mon_stats->dest_ppdu_done++; 4914 if (ar->ab->hw_params.rxdma1_enable) 4915 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, 4916 &dp->rxdma_mon_buf_ring, 4917 rx_bufs_used, 4918 HAL_RX_BUF_RBM_SW3_BM); 4919 else 4920 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, 4921 &dp->rx_refill_buf_ring, 4922 rx_bufs_used, 4923 HAL_RX_BUF_RBM_SW3_BM); 4924 } 4925 } 4926 4927 static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar, 4928 int mac_id, u32 quota, 4929 struct napi_struct *napi) 4930 { 4931 struct ath11k_pdev_dp *dp = &ar->dp; 4932 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4933 struct hal_rx_mon_ppdu_info *ppdu_info; 4934 struct sk_buff *status_skb; 4935 u32 tlv_status = HAL_TLV_STATUS_BUF_DONE; 4936 struct ath11k_pdev_mon_stats *rx_mon_stats; 4937 4938 ppdu_info = &pmon->mon_ppdu_info; 4939 rx_mon_stats = &pmon->rx_mon_stats; 4940 4941 if (pmon->mon_ppdu_status != DP_PPDU_STATUS_START) 4942 return; 4943 4944 while (!skb_queue_empty(&pmon->rx_status_q)) { 4945 status_skb = skb_dequeue(&pmon->rx_status_q); 4946 4947 tlv_status = ath11k_hal_rx_parse_mon_status(ar->ab, ppdu_info, 4948 status_skb); 4949 if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) { 4950 rx_mon_stats->status_ppdu_done++; 4951 pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE; 4952 ath11k_dp_rx_mon_dest_process(ar, mac_id, quota, napi); 4953 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 4954 } 4955 dev_kfree_skb_any(status_skb); 4956 } 4957 } 4958 4959 static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id, 4960 struct napi_struct *napi, int budget) 4961 { 4962 struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id); 4963 struct ath11k_pdev_dp *dp = &ar->dp; 4964 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4965 int num_buffs_reaped = 0; 4966 4967 num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ar->ab, mac_id, &budget, 4968 &pmon->rx_status_q); 4969 if (num_buffs_reaped) 4970 ath11k_dp_rx_mon_status_process_tlv(ar, mac_id, budget, napi); 4971 4972 return num_buffs_reaped; 4973 } 4974 4975 int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id, 4976 struct napi_struct *napi, int budget) 4977 { 4978 struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id); 4979 int ret = 0; 4980 4981 if (test_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags)) 4982 ret = ath11k_dp_mon_process_rx(ab, mac_id, napi, budget); 4983 else 4984 ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget); 4985 return ret; 4986 } 4987 4988 static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar) 4989 { 4990 struct ath11k_pdev_dp *dp = &ar->dp; 4991 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4992 4993 skb_queue_head_init(&pmon->rx_status_q); 4994 4995 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 4996 4997 memset(&pmon->rx_mon_stats, 0, 4998 sizeof(pmon->rx_mon_stats)); 4999 return 0; 5000 } 5001 5002 int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar) 5003 { 5004 struct ath11k_pdev_dp *dp = &ar->dp; 5005 struct ath11k_mon_data *pmon = &dp->mon_data; 5006 struct hal_srng *mon_desc_srng = NULL; 5007 struct dp_srng *dp_srng; 5008 int ret = 0; 5009 u32 n_link_desc = 0; 5010 5011 ret = ath11k_dp_rx_pdev_mon_status_attach(ar); 5012 if (ret) { 5013 ath11k_warn(ar->ab, "pdev_mon_status_attach() failed"); 5014 return ret; 5015 } 5016 5017 /* if rxdma1_enable is false, no need to setup 5018 * rxdma_mon_desc_ring. 5019 */ 5020 if (!ar->ab->hw_params.rxdma1_enable) 5021 return 0; 5022 5023 dp_srng = &dp->rxdma_mon_desc_ring; 5024 n_link_desc = dp_srng->size / 5025 ath11k_hal_srng_get_entrysize(ar->ab, HAL_RXDMA_MONITOR_DESC); 5026 mon_desc_srng = 5027 &ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id]; 5028 5029 ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks, 5030 HAL_RXDMA_MONITOR_DESC, mon_desc_srng, 5031 n_link_desc); 5032 if (ret) { 5033 ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed"); 5034 return ret; 5035 } 5036 pmon->mon_last_linkdesc_paddr = 0; 5037 pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1; 5038 spin_lock_init(&pmon->mon_lock); 5039 5040 return 0; 5041 } 5042 5043 static int ath11k_dp_mon_link_free(struct ath11k *ar) 5044 { 5045 struct ath11k_pdev_dp *dp = &ar->dp; 5046 struct ath11k_mon_data *pmon = &dp->mon_data; 5047 5048 ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks, 5049 HAL_RXDMA_MONITOR_DESC, 5050 &dp->rxdma_mon_desc_ring); 5051 return 0; 5052 } 5053 5054 int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar) 5055 { 5056 ath11k_dp_mon_link_free(ar); 5057 return 0; 5058 } 5059 5060 int ath11k_dp_rx_pktlog_start(struct ath11k_base *ab) 5061 { 5062 /* start reap timer */ 5063 mod_timer(&ab->mon_reap_timer, 5064 jiffies + msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL)); 5065 5066 return 0; 5067 } 5068 5069 int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer) 5070 { 5071 int ret; 5072 5073 if (stop_timer) 5074 del_timer_sync(&ab->mon_reap_timer); 5075 5076 /* reap all the monitor related rings */ 5077 ret = ath11k_dp_purge_mon_ring(ab); 5078 if (ret) { 5079 ath11k_warn(ab, "failed to purge dp mon ring: %d\n", ret); 5080 return ret; 5081 } 5082 5083 return 0; 5084 } 5085