1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/ieee80211.h> 7 #include <linux/kernel.h> 8 #include <linux/skbuff.h> 9 #include <crypto/hash.h> 10 #include "core.h" 11 #include "debug.h" 12 #include "hal_desc.h" 13 #include "hw.h" 14 #include "dp_rx.h" 15 #include "hal_rx.h" 16 #include "dp_tx.h" 17 #include "peer.h" 18 19 #define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ) 20 21 static u8 *ath11k_dp_rx_h_80211_hdr(struct hal_rx_desc *desc) 22 { 23 return desc->hdr_status; 24 } 25 26 static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct hal_rx_desc *desc) 27 { 28 if (!(__le32_to_cpu(desc->mpdu_start.info1) & 29 RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID)) 30 return HAL_ENCRYPT_TYPE_OPEN; 31 32 return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE, 33 __le32_to_cpu(desc->mpdu_start.info2)); 34 } 35 36 static u8 ath11k_dp_rx_h_msdu_start_decap_type(struct hal_rx_desc *desc) 37 { 38 return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT, 39 __le32_to_cpu(desc->msdu_start.info2)); 40 } 41 42 static u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct hal_rx_desc *desc) 43 { 44 return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT, 45 __le32_to_cpu(desc->msdu_start.info2)); 46 } 47 48 static bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct hal_rx_desc *desc) 49 { 50 return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID, 51 __le32_to_cpu(desc->mpdu_start.info1)); 52 } 53 54 static bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct hal_rx_desc *desc) 55 { 56 return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_FCTRL_VALID, 57 __le32_to_cpu(desc->mpdu_start.info1)); 58 } 59 60 static bool ath11k_dp_rx_h_mpdu_start_more_frags(struct sk_buff *skb) 61 { 62 struct ieee80211_hdr *hdr; 63 64 hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE); 65 return ieee80211_has_morefrags(hdr->frame_control); 66 } 67 68 static u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct sk_buff *skb) 69 { 70 struct ieee80211_hdr *hdr; 71 72 hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE); 73 return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; 74 } 75 76 static u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct hal_rx_desc *desc) 77 { 78 return FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_NUM, 79 __le32_to_cpu(desc->mpdu_start.info1)); 80 } 81 82 static bool ath11k_dp_rx_h_attn_msdu_done(struct hal_rx_desc *desc) 83 { 84 return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE, 85 __le32_to_cpu(desc->attention.info2)); 86 } 87 88 static bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct hal_rx_desc *desc) 89 { 90 return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL, 91 __le32_to_cpu(desc->attention.info1)); 92 } 93 94 static bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct hal_rx_desc *desc) 95 { 96 return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL, 97 __le32_to_cpu(desc->attention.info1)); 98 } 99 100 static bool ath11k_dp_rx_h_attn_is_decrypted(struct hal_rx_desc *desc) 101 { 102 return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE, 103 __le32_to_cpu(desc->attention.info2)) == 104 RX_DESC_DECRYPT_STATUS_CODE_OK); 105 } 106 107 static u32 ath11k_dp_rx_h_attn_mpdu_err(struct hal_rx_desc *desc) 108 { 109 u32 info = __le32_to_cpu(desc->attention.info1); 110 u32 errmap = 0; 111 112 if (info & RX_ATTENTION_INFO1_FCS_ERR) 113 errmap |= DP_RX_MPDU_ERR_FCS; 114 115 if (info & RX_ATTENTION_INFO1_DECRYPT_ERR) 116 errmap |= DP_RX_MPDU_ERR_DECRYPT; 117 118 if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR) 119 errmap |= DP_RX_MPDU_ERR_TKIP_MIC; 120 121 if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR) 122 errmap |= DP_RX_MPDU_ERR_AMSDU_ERR; 123 124 if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR) 125 errmap |= DP_RX_MPDU_ERR_OVERFLOW; 126 127 if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR) 128 errmap |= DP_RX_MPDU_ERR_MSDU_LEN; 129 130 if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR) 131 errmap |= DP_RX_MPDU_ERR_MPDU_LEN; 132 133 return errmap; 134 } 135 136 static u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct hal_rx_desc *desc) 137 { 138 return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH, 139 __le32_to_cpu(desc->msdu_start.info1)); 140 } 141 142 static u8 ath11k_dp_rx_h_msdu_start_sgi(struct hal_rx_desc *desc) 143 { 144 return FIELD_GET(RX_MSDU_START_INFO3_SGI, 145 __le32_to_cpu(desc->msdu_start.info3)); 146 } 147 148 static u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct hal_rx_desc *desc) 149 { 150 return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS, 151 __le32_to_cpu(desc->msdu_start.info3)); 152 } 153 154 static u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct hal_rx_desc *desc) 155 { 156 return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW, 157 __le32_to_cpu(desc->msdu_start.info3)); 158 } 159 160 static u32 ath11k_dp_rx_h_msdu_start_freq(struct hal_rx_desc *desc) 161 { 162 return __le32_to_cpu(desc->msdu_start.phy_meta_data); 163 } 164 165 static u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct hal_rx_desc *desc) 166 { 167 return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE, 168 __le32_to_cpu(desc->msdu_start.info3)); 169 } 170 171 static u8 ath11k_dp_rx_h_msdu_start_nss(struct hal_rx_desc *desc) 172 { 173 u8 mimo_ss_bitmap = FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP, 174 __le32_to_cpu(desc->msdu_start.info3)); 175 176 return hweight8(mimo_ss_bitmap); 177 } 178 179 static u8 ath11k_dp_rx_h_mpdu_start_tid(struct hal_rx_desc *desc) 180 { 181 return FIELD_GET(RX_MPDU_START_INFO2_TID, 182 __le32_to_cpu(desc->mpdu_start.info2)); 183 } 184 185 static u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct hal_rx_desc *desc) 186 { 187 return __le16_to_cpu(desc->mpdu_start.sw_peer_id); 188 } 189 190 static u8 ath11k_dp_rx_h_msdu_end_l3pad(struct hal_rx_desc *desc) 191 { 192 return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING, 193 __le32_to_cpu(desc->msdu_end.info2)); 194 } 195 196 static bool ath11k_dp_rx_h_msdu_end_first_msdu(struct hal_rx_desc *desc) 197 { 198 return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU, 199 __le32_to_cpu(desc->msdu_end.info2)); 200 } 201 202 static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct hal_rx_desc *desc) 203 { 204 return !!FIELD_GET(RX_MSDU_END_INFO2_LAST_MSDU, 205 __le32_to_cpu(desc->msdu_end.info2)); 206 } 207 208 static void ath11k_dp_rx_desc_end_tlv_copy(struct hal_rx_desc *fdesc, 209 struct hal_rx_desc *ldesc) 210 { 211 memcpy((u8 *)&fdesc->msdu_end, (u8 *)&ldesc->msdu_end, 212 sizeof(struct rx_msdu_end)); 213 memcpy((u8 *)&fdesc->attention, (u8 *)&ldesc->attention, 214 sizeof(struct rx_attention)); 215 memcpy((u8 *)&fdesc->mpdu_end, (u8 *)&ldesc->mpdu_end, 216 sizeof(struct rx_mpdu_end)); 217 } 218 219 static u32 ath11k_dp_rxdesc_get_mpdulen_err(struct hal_rx_desc *rx_desc) 220 { 221 struct rx_attention *rx_attn; 222 223 rx_attn = &rx_desc->attention; 224 225 return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR, 226 __le32_to_cpu(rx_attn->info1)); 227 } 228 229 static u32 ath11k_dp_rxdesc_get_decap_format(struct hal_rx_desc *rx_desc) 230 { 231 struct rx_msdu_start *rx_msdu_start; 232 233 rx_msdu_start = &rx_desc->msdu_start; 234 235 return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT, 236 __le32_to_cpu(rx_msdu_start->info2)); 237 } 238 239 static u8 *ath11k_dp_rxdesc_get_80211hdr(struct hal_rx_desc *rx_desc) 240 { 241 u8 *rx_pkt_hdr; 242 243 rx_pkt_hdr = &rx_desc->msdu_payload[0]; 244 245 return rx_pkt_hdr; 246 } 247 248 static bool ath11k_dp_rxdesc_mpdu_valid(struct hal_rx_desc *rx_desc) 249 { 250 u32 tlv_tag; 251 252 tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG, 253 __le32_to_cpu(rx_desc->mpdu_start_tag)); 254 255 return tlv_tag == HAL_RX_MPDU_START; 256 } 257 258 static u32 ath11k_dp_rxdesc_get_ppduid(struct hal_rx_desc *rx_desc) 259 { 260 return __le16_to_cpu(rx_desc->mpdu_start.phy_ppdu_id); 261 } 262 263 /* Returns number of Rx buffers replenished */ 264 int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id, 265 struct dp_rxdma_ring *rx_ring, 266 int req_entries, 267 enum hal_rx_buf_return_buf_manager mgr, 268 gfp_t gfp) 269 { 270 struct hal_srng *srng; 271 u32 *desc; 272 struct sk_buff *skb; 273 int num_free; 274 int num_remain; 275 int buf_id; 276 u32 cookie; 277 dma_addr_t paddr; 278 279 req_entries = min(req_entries, rx_ring->bufs_max); 280 281 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 282 283 spin_lock_bh(&srng->lock); 284 285 ath11k_hal_srng_access_begin(ab, srng); 286 287 num_free = ath11k_hal_srng_src_num_free(ab, srng, true); 288 if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4)) 289 req_entries = num_free; 290 291 req_entries = min(num_free, req_entries); 292 num_remain = req_entries; 293 294 while (num_remain > 0) { 295 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + 296 DP_RX_BUFFER_ALIGN_SIZE); 297 if (!skb) 298 break; 299 300 if (!IS_ALIGNED((unsigned long)skb->data, 301 DP_RX_BUFFER_ALIGN_SIZE)) { 302 skb_pull(skb, 303 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - 304 skb->data); 305 } 306 307 paddr = dma_map_single(ab->dev, skb->data, 308 skb->len + skb_tailroom(skb), 309 DMA_FROM_DEVICE); 310 if (dma_mapping_error(ab->dev, paddr)) 311 goto fail_free_skb; 312 313 spin_lock_bh(&rx_ring->idr_lock); 314 buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, 315 rx_ring->bufs_max * 3, gfp); 316 spin_unlock_bh(&rx_ring->idr_lock); 317 if (buf_id < 0) 318 goto fail_dma_unmap; 319 320 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 321 if (!desc) 322 goto fail_idr_remove; 323 324 ATH11K_SKB_RXCB(skb)->paddr = paddr; 325 326 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 327 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 328 329 num_remain--; 330 331 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); 332 } 333 334 ath11k_hal_srng_access_end(ab, srng); 335 336 spin_unlock_bh(&srng->lock); 337 338 return req_entries - num_remain; 339 340 fail_idr_remove: 341 spin_lock_bh(&rx_ring->idr_lock); 342 idr_remove(&rx_ring->bufs_idr, buf_id); 343 spin_unlock_bh(&rx_ring->idr_lock); 344 fail_dma_unmap: 345 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 346 DMA_FROM_DEVICE); 347 fail_free_skb: 348 dev_kfree_skb_any(skb); 349 350 ath11k_hal_srng_access_end(ab, srng); 351 352 spin_unlock_bh(&srng->lock); 353 354 return req_entries - num_remain; 355 } 356 357 static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar, 358 struct dp_rxdma_ring *rx_ring) 359 { 360 struct ath11k_pdev_dp *dp = &ar->dp; 361 struct sk_buff *skb; 362 int buf_id; 363 364 spin_lock_bh(&rx_ring->idr_lock); 365 idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { 366 idr_remove(&rx_ring->bufs_idr, buf_id); 367 /* TODO: Understand where internal driver does this dma_unmap of 368 * of rxdma_buffer. 369 */ 370 dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr, 371 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); 372 dev_kfree_skb_any(skb); 373 } 374 375 idr_destroy(&rx_ring->bufs_idr); 376 spin_unlock_bh(&rx_ring->idr_lock); 377 378 /* if rxdma1_enable is false, mon_status_refill_ring 379 * isn't setup, so don't clean. 380 */ 381 if (!ar->ab->hw_params.rxdma1_enable) 382 return 0; 383 384 rx_ring = &dp->rx_mon_status_refill_ring[0]; 385 386 spin_lock_bh(&rx_ring->idr_lock); 387 idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { 388 idr_remove(&rx_ring->bufs_idr, buf_id); 389 /* XXX: Understand where internal driver does this dma_unmap of 390 * of rxdma_buffer. 391 */ 392 dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr, 393 skb->len + skb_tailroom(skb), DMA_BIDIRECTIONAL); 394 dev_kfree_skb_any(skb); 395 } 396 397 idr_destroy(&rx_ring->bufs_idr); 398 spin_unlock_bh(&rx_ring->idr_lock); 399 400 return 0; 401 } 402 403 static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar) 404 { 405 struct ath11k_pdev_dp *dp = &ar->dp; 406 struct ath11k_base *ab = ar->ab; 407 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 408 int i; 409 410 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 411 412 rx_ring = &dp->rxdma_mon_buf_ring; 413 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 414 415 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 416 rx_ring = &dp->rx_mon_status_refill_ring[i]; 417 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 418 } 419 420 return 0; 421 } 422 423 static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar, 424 struct dp_rxdma_ring *rx_ring, 425 u32 ringtype) 426 { 427 struct ath11k_pdev_dp *dp = &ar->dp; 428 int num_entries; 429 430 num_entries = rx_ring->refill_buf_ring.size / 431 ath11k_hal_srng_get_entrysize(ar->ab, ringtype); 432 433 rx_ring->bufs_max = num_entries; 434 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries, 435 HAL_RX_BUF_RBM_SW3_BM, GFP_KERNEL); 436 return 0; 437 } 438 439 static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar) 440 { 441 struct ath11k_pdev_dp *dp = &ar->dp; 442 struct ath11k_base *ab = ar->ab; 443 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 444 int i; 445 446 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF); 447 448 if (ar->ab->hw_params.rxdma1_enable) { 449 rx_ring = &dp->rxdma_mon_buf_ring; 450 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF); 451 } 452 453 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 454 rx_ring = &dp->rx_mon_status_refill_ring[i]; 455 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS); 456 } 457 458 return 0; 459 } 460 461 static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar) 462 { 463 struct ath11k_pdev_dp *dp = &ar->dp; 464 struct ath11k_base *ab = ar->ab; 465 int i; 466 467 ath11k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring); 468 469 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 470 if (ab->hw_params.rx_mac_buf_ring) 471 ath11k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]); 472 473 ath11k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]); 474 ath11k_dp_srng_cleanup(ab, 475 &dp->rx_mon_status_refill_ring[i].refill_buf_ring); 476 } 477 478 ath11k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring); 479 } 480 481 void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab) 482 { 483 struct ath11k_dp *dp = &ab->dp; 484 int i; 485 486 for (i = 0; i < DP_REO_DST_RING_MAX; i++) 487 ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]); 488 } 489 490 int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab) 491 { 492 struct ath11k_dp *dp = &ab->dp; 493 int ret; 494 int i; 495 496 for (i = 0; i < DP_REO_DST_RING_MAX; i++) { 497 ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring[i], 498 HAL_REO_DST, i, 0, 499 DP_REO_DST_RING_SIZE); 500 if (ret) { 501 ath11k_warn(ab, "failed to setup reo_dst_ring\n"); 502 goto err_reo_cleanup; 503 } 504 } 505 506 return 0; 507 508 err_reo_cleanup: 509 ath11k_dp_pdev_reo_cleanup(ab); 510 511 return ret; 512 } 513 514 static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar) 515 { 516 struct ath11k_pdev_dp *dp = &ar->dp; 517 struct ath11k_base *ab = ar->ab; 518 struct dp_srng *srng = NULL; 519 int i; 520 int ret; 521 522 ret = ath11k_dp_srng_setup(ar->ab, 523 &dp->rx_refill_buf_ring.refill_buf_ring, 524 HAL_RXDMA_BUF, 0, 525 dp->mac_id, DP_RXDMA_BUF_RING_SIZE); 526 if (ret) { 527 ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n"); 528 return ret; 529 } 530 531 if (ar->ab->hw_params.rx_mac_buf_ring) { 532 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 533 ret = ath11k_dp_srng_setup(ar->ab, 534 &dp->rx_mac_buf_ring[i], 535 HAL_RXDMA_BUF, 1, 536 dp->mac_id + i, 1024); 537 if (ret) { 538 ath11k_warn(ar->ab, "failed to setup rx_mac_buf_ring %d\n", 539 i); 540 return ret; 541 } 542 } 543 } 544 545 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 546 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring[i], 547 HAL_RXDMA_DST, 0, dp->mac_id + i, 548 DP_RXDMA_ERR_DST_RING_SIZE); 549 if (ret) { 550 ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring %d\n", i); 551 return ret; 552 } 553 } 554 555 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 556 srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring; 557 ret = ath11k_dp_srng_setup(ar->ab, 558 srng, 559 HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id + i, 560 DP_RXDMA_MON_STATUS_RING_SIZE); 561 if (ret) { 562 ath11k_warn(ar->ab, 563 "failed to setup rx_mon_status_refill_ring %d\n", i); 564 return ret; 565 } 566 } 567 568 /* if rxdma1_enable is false, then it doesn't need 569 * to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring 570 * and rxdma_mon_desc_ring. 571 */ 572 if (!ar->ab->hw_params.rxdma1_enable) 573 return 0; 574 575 ret = ath11k_dp_srng_setup(ar->ab, 576 &dp->rxdma_mon_buf_ring.refill_buf_ring, 577 HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id, 578 DP_RXDMA_MONITOR_BUF_RING_SIZE); 579 if (ret) { 580 ath11k_warn(ar->ab, 581 "failed to setup HAL_RXDMA_MONITOR_BUF\n"); 582 return ret; 583 } 584 585 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring, 586 HAL_RXDMA_MONITOR_DST, 0, dp->mac_id, 587 DP_RXDMA_MONITOR_DST_RING_SIZE); 588 if (ret) { 589 ath11k_warn(ar->ab, 590 "failed to setup HAL_RXDMA_MONITOR_DST\n"); 591 return ret; 592 } 593 594 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring, 595 HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id, 596 DP_RXDMA_MONITOR_DESC_RING_SIZE); 597 if (ret) { 598 ath11k_warn(ar->ab, 599 "failed to setup HAL_RXDMA_MONITOR_DESC\n"); 600 return ret; 601 } 602 603 return 0; 604 } 605 606 void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab) 607 { 608 struct ath11k_dp *dp = &ab->dp; 609 struct dp_reo_cmd *cmd, *tmp; 610 struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache; 611 612 spin_lock_bh(&dp->reo_cmd_lock); 613 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 614 list_del(&cmd->list); 615 dma_unmap_single(ab->dev, cmd->data.paddr, 616 cmd->data.size, DMA_BIDIRECTIONAL); 617 kfree(cmd->data.vaddr); 618 kfree(cmd); 619 } 620 621 list_for_each_entry_safe(cmd_cache, tmp_cache, 622 &dp->reo_cmd_cache_flush_list, list) { 623 list_del(&cmd_cache->list); 624 dp->reo_cmd_cache_flush_count--; 625 dma_unmap_single(ab->dev, cmd_cache->data.paddr, 626 cmd_cache->data.size, DMA_BIDIRECTIONAL); 627 kfree(cmd_cache->data.vaddr); 628 kfree(cmd_cache); 629 } 630 spin_unlock_bh(&dp->reo_cmd_lock); 631 } 632 633 static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx, 634 enum hal_reo_cmd_status status) 635 { 636 struct dp_rx_tid *rx_tid = ctx; 637 638 if (status != HAL_REO_CMD_SUCCESS) 639 ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n", 640 rx_tid->tid, status); 641 642 dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size, 643 DMA_BIDIRECTIONAL); 644 kfree(rx_tid->vaddr); 645 } 646 647 static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab, 648 struct dp_rx_tid *rx_tid) 649 { 650 struct ath11k_hal_reo_cmd cmd = {0}; 651 unsigned long tot_desc_sz, desc_sz; 652 int ret; 653 654 tot_desc_sz = rx_tid->size; 655 desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID); 656 657 while (tot_desc_sz > desc_sz) { 658 tot_desc_sz -= desc_sz; 659 cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz); 660 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 661 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, 662 HAL_REO_CMD_FLUSH_CACHE, &cmd, 663 NULL); 664 if (ret) 665 ath11k_warn(ab, 666 "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n", 667 rx_tid->tid, ret); 668 } 669 670 memset(&cmd, 0, sizeof(cmd)); 671 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 672 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 673 cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS; 674 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, 675 HAL_REO_CMD_FLUSH_CACHE, 676 &cmd, ath11k_dp_reo_cmd_free); 677 if (ret) { 678 ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n", 679 rx_tid->tid, ret); 680 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 681 DMA_BIDIRECTIONAL); 682 kfree(rx_tid->vaddr); 683 } 684 } 685 686 static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx, 687 enum hal_reo_cmd_status status) 688 { 689 struct ath11k_base *ab = dp->ab; 690 struct dp_rx_tid *rx_tid = ctx; 691 struct dp_reo_cache_flush_elem *elem, *tmp; 692 693 if (status == HAL_REO_CMD_DRAIN) { 694 goto free_desc; 695 } else if (status != HAL_REO_CMD_SUCCESS) { 696 /* Shouldn't happen! Cleanup in case of other failure? */ 697 ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n", 698 rx_tid->tid, status); 699 return; 700 } 701 702 elem = kzalloc(sizeof(*elem), GFP_ATOMIC); 703 if (!elem) 704 goto free_desc; 705 706 elem->ts = jiffies; 707 memcpy(&elem->data, rx_tid, sizeof(*rx_tid)); 708 709 spin_lock_bh(&dp->reo_cmd_lock); 710 list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list); 711 dp->reo_cmd_cache_flush_count++; 712 713 /* Flush and invalidate aged REO desc from HW cache */ 714 list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list, 715 list) { 716 if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD || 717 time_after(jiffies, elem->ts + 718 msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) { 719 list_del(&elem->list); 720 dp->reo_cmd_cache_flush_count--; 721 spin_unlock_bh(&dp->reo_cmd_lock); 722 723 ath11k_dp_reo_cache_flush(ab, &elem->data); 724 kfree(elem); 725 spin_lock_bh(&dp->reo_cmd_lock); 726 } 727 } 728 spin_unlock_bh(&dp->reo_cmd_lock); 729 730 return; 731 free_desc: 732 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 733 DMA_BIDIRECTIONAL); 734 kfree(rx_tid->vaddr); 735 } 736 737 void ath11k_peer_rx_tid_delete(struct ath11k *ar, 738 struct ath11k_peer *peer, u8 tid) 739 { 740 struct ath11k_hal_reo_cmd cmd = {0}; 741 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 742 int ret; 743 744 if (!rx_tid->active) 745 return; 746 747 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 748 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 749 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 750 cmd.upd0 |= HAL_REO_CMD_UPD0_VLD; 751 ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid, 752 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 753 ath11k_dp_rx_tid_del_func); 754 if (ret) { 755 ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n", 756 tid, ret); 757 dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size, 758 DMA_BIDIRECTIONAL); 759 kfree(rx_tid->vaddr); 760 } 761 762 rx_tid->active = false; 763 } 764 765 static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab, 766 u32 *link_desc, 767 enum hal_wbm_rel_bm_act action) 768 { 769 struct ath11k_dp *dp = &ab->dp; 770 struct hal_srng *srng; 771 u32 *desc; 772 int ret = 0; 773 774 srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id]; 775 776 spin_lock_bh(&srng->lock); 777 778 ath11k_hal_srng_access_begin(ab, srng); 779 780 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 781 if (!desc) { 782 ret = -ENOBUFS; 783 goto exit; 784 } 785 786 ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc, 787 action); 788 789 exit: 790 ath11k_hal_srng_access_end(ab, srng); 791 792 spin_unlock_bh(&srng->lock); 793 794 return ret; 795 } 796 797 static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_desc) 798 { 799 struct ath11k_base *ab = rx_tid->ab; 800 801 lockdep_assert_held(&ab->base_lock); 802 803 if (rx_tid->dst_ring_desc) { 804 if (rel_link_desc) 805 ath11k_dp_rx_link_desc_return(ab, (u32 *)rx_tid->dst_ring_desc, 806 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 807 kfree(rx_tid->dst_ring_desc); 808 rx_tid->dst_ring_desc = NULL; 809 } 810 811 rx_tid->cur_sn = 0; 812 rx_tid->last_frag_no = 0; 813 rx_tid->rx_frag_bitmap = 0; 814 __skb_queue_purge(&rx_tid->rx_frags); 815 } 816 817 void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer) 818 { 819 struct dp_rx_tid *rx_tid; 820 int i; 821 822 lockdep_assert_held(&ar->ab->base_lock); 823 824 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 825 rx_tid = &peer->rx_tid[i]; 826 827 ath11k_peer_rx_tid_delete(ar, peer, i); 828 ath11k_dp_rx_frags_cleanup(rx_tid, true); 829 830 spin_unlock_bh(&ar->ab->base_lock); 831 del_timer_sync(&rx_tid->frag_timer); 832 spin_lock_bh(&ar->ab->base_lock); 833 } 834 } 835 836 static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar, 837 struct ath11k_peer *peer, 838 struct dp_rx_tid *rx_tid, 839 u32 ba_win_sz, u16 ssn, 840 bool update_ssn) 841 { 842 struct ath11k_hal_reo_cmd cmd = {0}; 843 int ret; 844 845 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 846 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 847 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 848 cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE; 849 cmd.ba_window_size = ba_win_sz; 850 851 if (update_ssn) { 852 cmd.upd0 |= HAL_REO_CMD_UPD0_SSN; 853 cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn); 854 } 855 856 ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid, 857 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 858 NULL); 859 if (ret) { 860 ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n", 861 rx_tid->tid, ret); 862 return ret; 863 } 864 865 rx_tid->ba_win_sz = ba_win_sz; 866 867 return 0; 868 } 869 870 static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab, 871 const u8 *peer_mac, int vdev_id, u8 tid) 872 { 873 struct ath11k_peer *peer; 874 struct dp_rx_tid *rx_tid; 875 876 spin_lock_bh(&ab->base_lock); 877 878 peer = ath11k_peer_find(ab, vdev_id, peer_mac); 879 if (!peer) { 880 ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n"); 881 goto unlock_exit; 882 } 883 884 rx_tid = &peer->rx_tid[tid]; 885 if (!rx_tid->active) 886 goto unlock_exit; 887 888 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 889 DMA_BIDIRECTIONAL); 890 kfree(rx_tid->vaddr); 891 892 rx_tid->active = false; 893 894 unlock_exit: 895 spin_unlock_bh(&ab->base_lock); 896 } 897 898 int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id, 899 u8 tid, u32 ba_win_sz, u16 ssn, 900 enum hal_pn_type pn_type) 901 { 902 struct ath11k_base *ab = ar->ab; 903 struct ath11k_peer *peer; 904 struct dp_rx_tid *rx_tid; 905 u32 hw_desc_sz; 906 u32 *addr_aligned; 907 void *vaddr; 908 dma_addr_t paddr; 909 int ret; 910 911 spin_lock_bh(&ab->base_lock); 912 913 peer = ath11k_peer_find(ab, vdev_id, peer_mac); 914 if (!peer) { 915 ath11k_warn(ab, "failed to find the peer to set up rx tid\n"); 916 spin_unlock_bh(&ab->base_lock); 917 return -ENOENT; 918 } 919 920 rx_tid = &peer->rx_tid[tid]; 921 /* Update the tid queue if it is already setup */ 922 if (rx_tid->active) { 923 paddr = rx_tid->paddr; 924 ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid, 925 ba_win_sz, ssn, true); 926 spin_unlock_bh(&ab->base_lock); 927 if (ret) { 928 ath11k_warn(ab, "failed to update reo for rx tid %d\n", tid); 929 return ret; 930 } 931 932 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, 933 peer_mac, paddr, 934 tid, 1, ba_win_sz); 935 if (ret) 936 ath11k_warn(ab, "failed to send wmi command to update rx reorder queue, tid :%d (%d)\n", 937 tid, ret); 938 return ret; 939 } 940 941 rx_tid->tid = tid; 942 943 rx_tid->ba_win_sz = ba_win_sz; 944 945 /* TODO: Optimize the memory allocation for qos tid based on the 946 * the actual BA window size in REO tid update path. 947 */ 948 if (tid == HAL_DESC_REO_NON_QOS_TID) 949 hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid); 950 else 951 hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid); 952 953 vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC); 954 if (!vaddr) { 955 spin_unlock_bh(&ab->base_lock); 956 return -ENOMEM; 957 } 958 959 addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN); 960 961 ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz, 962 ssn, pn_type); 963 964 paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz, 965 DMA_BIDIRECTIONAL); 966 967 ret = dma_mapping_error(ab->dev, paddr); 968 if (ret) { 969 spin_unlock_bh(&ab->base_lock); 970 goto err_mem_free; 971 } 972 973 rx_tid->vaddr = vaddr; 974 rx_tid->paddr = paddr; 975 rx_tid->size = hw_desc_sz; 976 rx_tid->active = true; 977 978 spin_unlock_bh(&ab->base_lock); 979 980 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, 981 paddr, tid, 1, ba_win_sz); 982 if (ret) { 983 ath11k_warn(ar->ab, "failed to setup rx reorder queue, tid :%d (%d)\n", 984 tid, ret); 985 ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid); 986 } 987 988 return ret; 989 990 err_mem_free: 991 kfree(vaddr); 992 993 return ret; 994 } 995 996 int ath11k_dp_rx_ampdu_start(struct ath11k *ar, 997 struct ieee80211_ampdu_params *params) 998 { 999 struct ath11k_base *ab = ar->ab; 1000 struct ath11k_sta *arsta = (void *)params->sta->drv_priv; 1001 int vdev_id = arsta->arvif->vdev_id; 1002 int ret; 1003 1004 ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id, 1005 params->tid, params->buf_size, 1006 params->ssn, arsta->pn_type); 1007 if (ret) 1008 ath11k_warn(ab, "failed to setup rx tid %d\n", ret); 1009 1010 return ret; 1011 } 1012 1013 int ath11k_dp_rx_ampdu_stop(struct ath11k *ar, 1014 struct ieee80211_ampdu_params *params) 1015 { 1016 struct ath11k_base *ab = ar->ab; 1017 struct ath11k_peer *peer; 1018 struct ath11k_sta *arsta = (void *)params->sta->drv_priv; 1019 int vdev_id = arsta->arvif->vdev_id; 1020 dma_addr_t paddr; 1021 bool active; 1022 int ret; 1023 1024 spin_lock_bh(&ab->base_lock); 1025 1026 peer = ath11k_peer_find(ab, vdev_id, params->sta->addr); 1027 if (!peer) { 1028 ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n"); 1029 spin_unlock_bh(&ab->base_lock); 1030 return -ENOENT; 1031 } 1032 1033 paddr = peer->rx_tid[params->tid].paddr; 1034 active = peer->rx_tid[params->tid].active; 1035 1036 if (!active) { 1037 spin_unlock_bh(&ab->base_lock); 1038 return 0; 1039 } 1040 1041 ret = ath11k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false); 1042 spin_unlock_bh(&ab->base_lock); 1043 if (ret) { 1044 ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n", 1045 params->tid, ret); 1046 return ret; 1047 } 1048 1049 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, 1050 params->sta->addr, paddr, 1051 params->tid, 1, 1); 1052 if (ret) 1053 ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n", 1054 ret); 1055 1056 return ret; 1057 } 1058 1059 int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif, 1060 const u8 *peer_addr, 1061 enum set_key_cmd key_cmd, 1062 struct ieee80211_key_conf *key) 1063 { 1064 struct ath11k *ar = arvif->ar; 1065 struct ath11k_base *ab = ar->ab; 1066 struct ath11k_hal_reo_cmd cmd = {0}; 1067 struct ath11k_peer *peer; 1068 struct dp_rx_tid *rx_tid; 1069 u8 tid; 1070 int ret = 0; 1071 1072 /* NOTE: Enable PN/TSC replay check offload only for unicast frames. 1073 * We use mac80211 PN/TSC replay check functionality for bcast/mcast 1074 * for now. 1075 */ 1076 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) 1077 return 0; 1078 1079 cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS; 1080 cmd.upd0 |= HAL_REO_CMD_UPD0_PN | 1081 HAL_REO_CMD_UPD0_PN_SIZE | 1082 HAL_REO_CMD_UPD0_PN_VALID | 1083 HAL_REO_CMD_UPD0_PN_CHECK | 1084 HAL_REO_CMD_UPD0_SVLD; 1085 1086 switch (key->cipher) { 1087 case WLAN_CIPHER_SUITE_TKIP: 1088 case WLAN_CIPHER_SUITE_CCMP: 1089 case WLAN_CIPHER_SUITE_CCMP_256: 1090 case WLAN_CIPHER_SUITE_GCMP: 1091 case WLAN_CIPHER_SUITE_GCMP_256: 1092 if (key_cmd == SET_KEY) { 1093 cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK; 1094 cmd.pn_size = 48; 1095 } 1096 break; 1097 default: 1098 break; 1099 } 1100 1101 spin_lock_bh(&ab->base_lock); 1102 1103 peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr); 1104 if (!peer) { 1105 ath11k_warn(ab, "failed to find the peer to configure pn replay detection\n"); 1106 spin_unlock_bh(&ab->base_lock); 1107 return -ENOENT; 1108 } 1109 1110 for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) { 1111 rx_tid = &peer->rx_tid[tid]; 1112 if (!rx_tid->active) 1113 continue; 1114 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 1115 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 1116 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, 1117 HAL_REO_CMD_UPDATE_RX_QUEUE, 1118 &cmd, NULL); 1119 if (ret) { 1120 ath11k_warn(ab, "failed to configure rx tid %d queue for pn replay detection %d\n", 1121 tid, ret); 1122 break; 1123 } 1124 } 1125 1126 spin_unlock_bh(&ar->ab->base_lock); 1127 1128 return ret; 1129 } 1130 1131 static inline int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats, 1132 u16 peer_id) 1133 { 1134 int i; 1135 1136 for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) { 1137 if (ppdu_stats->user_stats[i].is_valid_peer_id) { 1138 if (peer_id == ppdu_stats->user_stats[i].peer_id) 1139 return i; 1140 } else { 1141 return i; 1142 } 1143 } 1144 1145 return -EINVAL; 1146 } 1147 1148 static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab, 1149 u16 tag, u16 len, const void *ptr, 1150 void *data) 1151 { 1152 struct htt_ppdu_stats_info *ppdu_info; 1153 struct htt_ppdu_user_stats *user_stats; 1154 int cur_user; 1155 u16 peer_id; 1156 1157 ppdu_info = (struct htt_ppdu_stats_info *)data; 1158 1159 switch (tag) { 1160 case HTT_PPDU_STATS_TAG_COMMON: 1161 if (len < sizeof(struct htt_ppdu_stats_common)) { 1162 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1163 len, tag); 1164 return -EINVAL; 1165 } 1166 memcpy((void *)&ppdu_info->ppdu_stats.common, ptr, 1167 sizeof(struct htt_ppdu_stats_common)); 1168 break; 1169 case HTT_PPDU_STATS_TAG_USR_RATE: 1170 if (len < sizeof(struct htt_ppdu_stats_user_rate)) { 1171 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1172 len, tag); 1173 return -EINVAL; 1174 } 1175 1176 peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id; 1177 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1178 peer_id); 1179 if (cur_user < 0) 1180 return -EINVAL; 1181 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1182 user_stats->peer_id = peer_id; 1183 user_stats->is_valid_peer_id = true; 1184 memcpy((void *)&user_stats->rate, ptr, 1185 sizeof(struct htt_ppdu_stats_user_rate)); 1186 user_stats->tlv_flags |= BIT(tag); 1187 break; 1188 case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON: 1189 if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) { 1190 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1191 len, tag); 1192 return -EINVAL; 1193 } 1194 1195 peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id; 1196 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1197 peer_id); 1198 if (cur_user < 0) 1199 return -EINVAL; 1200 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1201 user_stats->peer_id = peer_id; 1202 user_stats->is_valid_peer_id = true; 1203 memcpy((void *)&user_stats->cmpltn_cmn, ptr, 1204 sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)); 1205 user_stats->tlv_flags |= BIT(tag); 1206 break; 1207 case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS: 1208 if (len < 1209 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) { 1210 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1211 len, tag); 1212 return -EINVAL; 1213 } 1214 1215 peer_id = 1216 ((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id; 1217 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1218 peer_id); 1219 if (cur_user < 0) 1220 return -EINVAL; 1221 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1222 user_stats->peer_id = peer_id; 1223 user_stats->is_valid_peer_id = true; 1224 memcpy((void *)&user_stats->ack_ba, ptr, 1225 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)); 1226 user_stats->tlv_flags |= BIT(tag); 1227 break; 1228 } 1229 return 0; 1230 } 1231 1232 int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len, 1233 int (*iter)(struct ath11k_base *ar, u16 tag, u16 len, 1234 const void *ptr, void *data), 1235 void *data) 1236 { 1237 const struct htt_tlv *tlv; 1238 const void *begin = ptr; 1239 u16 tlv_tag, tlv_len; 1240 int ret = -EINVAL; 1241 1242 while (len > 0) { 1243 if (len < sizeof(*tlv)) { 1244 ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n", 1245 ptr - begin, len, sizeof(*tlv)); 1246 return -EINVAL; 1247 } 1248 tlv = (struct htt_tlv *)ptr; 1249 tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header); 1250 tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header); 1251 ptr += sizeof(*tlv); 1252 len -= sizeof(*tlv); 1253 1254 if (tlv_len > len) { 1255 ath11k_err(ab, "htt tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n", 1256 tlv_tag, ptr - begin, len, tlv_len); 1257 return -EINVAL; 1258 } 1259 ret = iter(ab, tlv_tag, tlv_len, ptr, data); 1260 if (ret == -ENOMEM) 1261 return ret; 1262 1263 ptr += tlv_len; 1264 len -= tlv_len; 1265 } 1266 return 0; 1267 } 1268 1269 static inline u32 ath11k_he_gi_to_nl80211_he_gi(u8 sgi) 1270 { 1271 u32 ret = 0; 1272 1273 switch (sgi) { 1274 case RX_MSDU_START_SGI_0_8_US: 1275 ret = NL80211_RATE_INFO_HE_GI_0_8; 1276 break; 1277 case RX_MSDU_START_SGI_1_6_US: 1278 ret = NL80211_RATE_INFO_HE_GI_1_6; 1279 break; 1280 case RX_MSDU_START_SGI_3_2_US: 1281 ret = NL80211_RATE_INFO_HE_GI_3_2; 1282 break; 1283 } 1284 1285 return ret; 1286 } 1287 1288 static void 1289 ath11k_update_per_peer_tx_stats(struct ath11k *ar, 1290 struct htt_ppdu_stats *ppdu_stats, u8 user) 1291 { 1292 struct ath11k_base *ab = ar->ab; 1293 struct ath11k_peer *peer; 1294 struct ieee80211_sta *sta; 1295 struct ath11k_sta *arsta; 1296 struct htt_ppdu_stats_user_rate *user_rate; 1297 struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats; 1298 struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user]; 1299 struct htt_ppdu_stats_common *common = &ppdu_stats->common; 1300 int ret; 1301 u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0; 1302 u32 succ_bytes = 0; 1303 u16 rate = 0, succ_pkts = 0; 1304 u32 tx_duration = 0; 1305 u8 tid = HTT_PPDU_STATS_NON_QOS_TID; 1306 bool is_ampdu = false; 1307 1308 if (!usr_stats) 1309 return; 1310 1311 if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE))) 1312 return; 1313 1314 if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON)) 1315 is_ampdu = 1316 HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags); 1317 1318 if (usr_stats->tlv_flags & 1319 BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) { 1320 succ_bytes = usr_stats->ack_ba.success_bytes; 1321 succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M, 1322 usr_stats->ack_ba.info); 1323 tid = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM, 1324 usr_stats->ack_ba.info); 1325 } 1326 1327 if (common->fes_duration_us) 1328 tx_duration = common->fes_duration_us; 1329 1330 user_rate = &usr_stats->rate; 1331 flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags); 1332 bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2; 1333 nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1; 1334 mcs = HTT_USR_RATE_MCS(user_rate->rate_flags); 1335 sgi = HTT_USR_RATE_GI(user_rate->rate_flags); 1336 dcm = HTT_USR_RATE_DCM(user_rate->rate_flags); 1337 1338 /* Note: If host configured fixed rates and in some other special 1339 * cases, the broadcast/management frames are sent in different rates. 1340 * Firmware rate's control to be skipped for this? 1341 */ 1342 1343 if (flags == WMI_RATE_PREAMBLE_HE && mcs > 11) { 1344 ath11k_warn(ab, "Invalid HE mcs %hhd peer stats", mcs); 1345 return; 1346 } 1347 1348 if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) { 1349 ath11k_warn(ab, "Invalid HE mcs %hhd peer stats", mcs); 1350 return; 1351 } 1352 1353 if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH11K_VHT_MCS_MAX) { 1354 ath11k_warn(ab, "Invalid VHT mcs %hhd peer stats", mcs); 1355 return; 1356 } 1357 1358 if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH11K_HT_MCS_MAX || nss < 1)) { 1359 ath11k_warn(ab, "Invalid HT mcs %hhd nss %hhd peer stats", 1360 mcs, nss); 1361 return; 1362 } 1363 1364 if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) { 1365 ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs, 1366 flags, 1367 &rate_idx, 1368 &rate); 1369 if (ret < 0) 1370 return; 1371 } 1372 1373 rcu_read_lock(); 1374 spin_lock_bh(&ab->base_lock); 1375 peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id); 1376 1377 if (!peer || !peer->sta) { 1378 spin_unlock_bh(&ab->base_lock); 1379 rcu_read_unlock(); 1380 return; 1381 } 1382 1383 sta = peer->sta; 1384 arsta = (struct ath11k_sta *)sta->drv_priv; 1385 1386 memset(&arsta->txrate, 0, sizeof(arsta->txrate)); 1387 1388 switch (flags) { 1389 case WMI_RATE_PREAMBLE_OFDM: 1390 arsta->txrate.legacy = rate; 1391 break; 1392 case WMI_RATE_PREAMBLE_CCK: 1393 arsta->txrate.legacy = rate; 1394 break; 1395 case WMI_RATE_PREAMBLE_HT: 1396 arsta->txrate.mcs = mcs + 8 * (nss - 1); 1397 arsta->txrate.flags = RATE_INFO_FLAGS_MCS; 1398 if (sgi) 1399 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1400 break; 1401 case WMI_RATE_PREAMBLE_VHT: 1402 arsta->txrate.mcs = mcs; 1403 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; 1404 if (sgi) 1405 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1406 break; 1407 case WMI_RATE_PREAMBLE_HE: 1408 arsta->txrate.mcs = mcs; 1409 arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS; 1410 arsta->txrate.he_dcm = dcm; 1411 arsta->txrate.he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi); 1412 arsta->txrate.he_ru_alloc = ath11k_he_ru_tones_to_nl80211_he_ru_alloc( 1413 (user_rate->ru_end - 1414 user_rate->ru_start) + 1); 1415 break; 1416 } 1417 1418 arsta->txrate.nss = nss; 1419 arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw); 1420 arsta->tx_duration += tx_duration; 1421 memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info)); 1422 1423 /* PPDU stats reported for mgmt packet doesn't have valid tx bytes. 1424 * So skip peer stats update for mgmt packets. 1425 */ 1426 if (tid < HTT_PPDU_STATS_NON_QOS_TID) { 1427 memset(peer_stats, 0, sizeof(*peer_stats)); 1428 peer_stats->succ_pkts = succ_pkts; 1429 peer_stats->succ_bytes = succ_bytes; 1430 peer_stats->is_ampdu = is_ampdu; 1431 peer_stats->duration = tx_duration; 1432 peer_stats->ba_fails = 1433 HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) + 1434 HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags); 1435 1436 if (ath11k_debug_is_extd_tx_stats_enabled(ar)) 1437 ath11k_accumulate_per_peer_tx_stats(arsta, 1438 peer_stats, rate_idx); 1439 } 1440 1441 spin_unlock_bh(&ab->base_lock); 1442 rcu_read_unlock(); 1443 } 1444 1445 static void ath11k_htt_update_ppdu_stats(struct ath11k *ar, 1446 struct htt_ppdu_stats *ppdu_stats) 1447 { 1448 u8 user; 1449 1450 for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++) 1451 ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user); 1452 } 1453 1454 static 1455 struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar, 1456 u32 ppdu_id) 1457 { 1458 struct htt_ppdu_stats_info *ppdu_info; 1459 1460 spin_lock_bh(&ar->data_lock); 1461 if (!list_empty(&ar->ppdu_stats_info)) { 1462 list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) { 1463 if (ppdu_info->ppdu_id == ppdu_id) { 1464 spin_unlock_bh(&ar->data_lock); 1465 return ppdu_info; 1466 } 1467 } 1468 1469 if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) { 1470 ppdu_info = list_first_entry(&ar->ppdu_stats_info, 1471 typeof(*ppdu_info), list); 1472 list_del(&ppdu_info->list); 1473 ar->ppdu_stat_list_depth--; 1474 ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats); 1475 kfree(ppdu_info); 1476 } 1477 } 1478 spin_unlock_bh(&ar->data_lock); 1479 1480 ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_KERNEL); 1481 if (!ppdu_info) 1482 return NULL; 1483 1484 spin_lock_bh(&ar->data_lock); 1485 list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info); 1486 ar->ppdu_stat_list_depth++; 1487 spin_unlock_bh(&ar->data_lock); 1488 1489 return ppdu_info; 1490 } 1491 1492 static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab, 1493 struct sk_buff *skb) 1494 { 1495 struct ath11k_htt_ppdu_stats_msg *msg; 1496 struct htt_ppdu_stats_info *ppdu_info; 1497 struct ath11k *ar; 1498 int ret; 1499 u8 pdev_id; 1500 u32 ppdu_id, len; 1501 1502 msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data; 1503 len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info); 1504 pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info); 1505 ppdu_id = msg->ppdu_id; 1506 1507 rcu_read_lock(); 1508 ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); 1509 if (!ar) { 1510 ret = -EINVAL; 1511 goto exit; 1512 } 1513 1514 if (ath11k_debug_is_pktlog_lite_mode_enabled(ar)) 1515 trace_ath11k_htt_ppdu_stats(ar, skb->data, len); 1516 1517 ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id); 1518 if (!ppdu_info) { 1519 ret = -EINVAL; 1520 goto exit; 1521 } 1522 1523 ppdu_info->ppdu_id = ppdu_id; 1524 ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len, 1525 ath11k_htt_tlv_ppdu_stats_parse, 1526 (void *)ppdu_info); 1527 if (ret) { 1528 ath11k_warn(ab, "Failed to parse tlv %d\n", ret); 1529 goto exit; 1530 } 1531 1532 exit: 1533 rcu_read_unlock(); 1534 1535 return ret; 1536 } 1537 1538 static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb) 1539 { 1540 struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data; 1541 struct ath_pktlog_hdr *hdr = (struct ath_pktlog_hdr *)data; 1542 struct ath11k *ar; 1543 u8 pdev_id; 1544 1545 pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr); 1546 ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); 1547 if (!ar) { 1548 ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id); 1549 return; 1550 } 1551 1552 trace_ath11k_htt_pktlog(ar, data->payload, hdr->size, 1553 ar->ab->pktlog_defs_checksum); 1554 } 1555 1556 static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab, 1557 struct sk_buff *skb) 1558 { 1559 u32 *data = (u32 *)skb->data; 1560 u8 pdev_id, ring_type, ring_id, pdev_idx; 1561 u16 hp, tp; 1562 u32 backpressure_time; 1563 struct ath11k_bp_stats *bp_stats; 1564 1565 pdev_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_PDEV_ID_M, *data); 1566 ring_type = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_TYPE_M, *data); 1567 ring_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_ID_M, *data); 1568 ++data; 1569 1570 hp = FIELD_GET(HTT_BACKPRESSURE_EVENT_HP_M, *data); 1571 tp = FIELD_GET(HTT_BACKPRESSURE_EVENT_TP_M, *data); 1572 ++data; 1573 1574 backpressure_time = *data; 1575 1576 ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "htt backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n", 1577 pdev_id, ring_type, ring_id, hp, tp, backpressure_time); 1578 1579 if (ring_type == HTT_BACKPRESSURE_UMAC_RING_TYPE) { 1580 if (ring_id >= HTT_SW_UMAC_RING_IDX_MAX) 1581 return; 1582 1583 bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[ring_id]; 1584 } else if (ring_type == HTT_BACKPRESSURE_LMAC_RING_TYPE) { 1585 pdev_idx = DP_HW2SW_MACID(pdev_id); 1586 1587 if (ring_id >= HTT_SW_LMAC_RING_IDX_MAX || pdev_idx >= MAX_RADIOS) 1588 return; 1589 1590 bp_stats = &ab->soc_stats.bp_stats.lmac_ring_bp_stats[ring_id][pdev_idx]; 1591 } else { 1592 ath11k_warn(ab, "unknown ring type received in htt bp event %d\n", 1593 ring_type); 1594 return; 1595 } 1596 1597 spin_lock_bh(&ab->base_lock); 1598 bp_stats->hp = hp; 1599 bp_stats->tp = tp; 1600 bp_stats->count++; 1601 bp_stats->jiffies = jiffies; 1602 spin_unlock_bh(&ab->base_lock); 1603 } 1604 1605 void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab, 1606 struct sk_buff *skb) 1607 { 1608 struct ath11k_dp *dp = &ab->dp; 1609 struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data; 1610 enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp); 1611 u16 peer_id; 1612 u8 vdev_id; 1613 u8 mac_addr[ETH_ALEN]; 1614 u16 peer_mac_h16; 1615 u16 ast_hash; 1616 1617 ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type); 1618 1619 switch (type) { 1620 case HTT_T2H_MSG_TYPE_VERSION_CONF: 1621 dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR, 1622 resp->version_msg.version); 1623 dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR, 1624 resp->version_msg.version); 1625 complete(&dp->htt_tgt_version_received); 1626 break; 1627 case HTT_T2H_MSG_TYPE_PEER_MAP: 1628 vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID, 1629 resp->peer_map_ev.info); 1630 peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID, 1631 resp->peer_map_ev.info); 1632 peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16, 1633 resp->peer_map_ev.info1); 1634 ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32, 1635 peer_mac_h16, mac_addr); 1636 ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0); 1637 break; 1638 case HTT_T2H_MSG_TYPE_PEER_MAP2: 1639 vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID, 1640 resp->peer_map_ev.info); 1641 peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID, 1642 resp->peer_map_ev.info); 1643 peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16, 1644 resp->peer_map_ev.info1); 1645 ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32, 1646 peer_mac_h16, mac_addr); 1647 ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL, 1648 resp->peer_map_ev.info2); 1649 ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash); 1650 break; 1651 case HTT_T2H_MSG_TYPE_PEER_UNMAP: 1652 case HTT_T2H_MSG_TYPE_PEER_UNMAP2: 1653 peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID, 1654 resp->peer_unmap_ev.info); 1655 ath11k_peer_unmap_event(ab, peer_id); 1656 break; 1657 case HTT_T2H_MSG_TYPE_PPDU_STATS_IND: 1658 ath11k_htt_pull_ppdu_stats(ab, skb); 1659 break; 1660 case HTT_T2H_MSG_TYPE_EXT_STATS_CONF: 1661 ath11k_dbg_htt_ext_stats_handler(ab, skb); 1662 break; 1663 case HTT_T2H_MSG_TYPE_PKTLOG: 1664 ath11k_htt_pktlog(ab, skb); 1665 break; 1666 case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND: 1667 ath11k_htt_backpressure_event_handler(ab, skb); 1668 break; 1669 default: 1670 ath11k_warn(ab, "htt event %d not handled\n", type); 1671 break; 1672 } 1673 1674 dev_kfree_skb_any(skb); 1675 } 1676 1677 static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar, 1678 struct sk_buff_head *msdu_list, 1679 struct sk_buff *first, struct sk_buff *last, 1680 u8 l3pad_bytes, int msdu_len) 1681 { 1682 struct sk_buff *skb; 1683 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first); 1684 int buf_first_hdr_len, buf_first_len; 1685 struct hal_rx_desc *ldesc; 1686 int space_extra; 1687 int rem_len; 1688 int buf_len; 1689 1690 /* As the msdu is spread across multiple rx buffers, 1691 * find the offset to the start of msdu for computing 1692 * the length of the msdu in the first buffer. 1693 */ 1694 buf_first_hdr_len = HAL_RX_DESC_SIZE + l3pad_bytes; 1695 buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len; 1696 1697 if (WARN_ON_ONCE(msdu_len <= buf_first_len)) { 1698 skb_put(first, buf_first_hdr_len + msdu_len); 1699 skb_pull(first, buf_first_hdr_len); 1700 return 0; 1701 } 1702 1703 ldesc = (struct hal_rx_desc *)last->data; 1704 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ldesc); 1705 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ldesc); 1706 1707 /* MSDU spans over multiple buffers because the length of the MSDU 1708 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data 1709 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. 1710 */ 1711 skb_put(first, DP_RX_BUFFER_SIZE); 1712 skb_pull(first, buf_first_hdr_len); 1713 1714 /* When an MSDU spread over multiple buffers attention, MSDU_END and 1715 * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs. 1716 */ 1717 ath11k_dp_rx_desc_end_tlv_copy(rxcb->rx_desc, ldesc); 1718 1719 space_extra = msdu_len - (buf_first_len + skb_tailroom(first)); 1720 if (space_extra > 0 && 1721 (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) { 1722 /* Free up all buffers of the MSDU */ 1723 while ((skb = __skb_dequeue(msdu_list)) != NULL) { 1724 rxcb = ATH11K_SKB_RXCB(skb); 1725 if (!rxcb->is_continuation) { 1726 dev_kfree_skb_any(skb); 1727 break; 1728 } 1729 dev_kfree_skb_any(skb); 1730 } 1731 return -ENOMEM; 1732 } 1733 1734 rem_len = msdu_len - buf_first_len; 1735 while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) { 1736 rxcb = ATH11K_SKB_RXCB(skb); 1737 if (rxcb->is_continuation) 1738 buf_len = DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE; 1739 else 1740 buf_len = rem_len; 1741 1742 if (buf_len > (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE)) { 1743 WARN_ON_ONCE(1); 1744 dev_kfree_skb_any(skb); 1745 return -EINVAL; 1746 } 1747 1748 skb_put(skb, buf_len + HAL_RX_DESC_SIZE); 1749 skb_pull(skb, HAL_RX_DESC_SIZE); 1750 skb_copy_from_linear_data(skb, skb_put(first, buf_len), 1751 buf_len); 1752 dev_kfree_skb_any(skb); 1753 1754 rem_len -= buf_len; 1755 if (!rxcb->is_continuation) 1756 break; 1757 } 1758 1759 return 0; 1760 } 1761 1762 static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list, 1763 struct sk_buff *first) 1764 { 1765 struct sk_buff *skb; 1766 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first); 1767 1768 if (!rxcb->is_continuation) 1769 return first; 1770 1771 skb_queue_walk(msdu_list, skb) { 1772 rxcb = ATH11K_SKB_RXCB(skb); 1773 if (!rxcb->is_continuation) 1774 return skb; 1775 } 1776 1777 return NULL; 1778 } 1779 1780 static void ath11k_dp_rx_h_csum_offload(struct sk_buff *msdu) 1781 { 1782 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 1783 bool ip_csum_fail, l4_csum_fail; 1784 1785 ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rxcb->rx_desc); 1786 l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rxcb->rx_desc); 1787 1788 msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ? 1789 CHECKSUM_NONE : CHECKSUM_UNNECESSARY; 1790 } 1791 1792 static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, 1793 enum hal_encrypt_type enctype) 1794 { 1795 switch (enctype) { 1796 case HAL_ENCRYPT_TYPE_OPEN: 1797 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1798 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1799 return 0; 1800 case HAL_ENCRYPT_TYPE_CCMP_128: 1801 return IEEE80211_CCMP_MIC_LEN; 1802 case HAL_ENCRYPT_TYPE_CCMP_256: 1803 return IEEE80211_CCMP_256_MIC_LEN; 1804 case HAL_ENCRYPT_TYPE_GCMP_128: 1805 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1806 return IEEE80211_GCMP_MIC_LEN; 1807 case HAL_ENCRYPT_TYPE_WEP_40: 1808 case HAL_ENCRYPT_TYPE_WEP_104: 1809 case HAL_ENCRYPT_TYPE_WEP_128: 1810 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1811 case HAL_ENCRYPT_TYPE_WAPI: 1812 break; 1813 } 1814 1815 ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype); 1816 return 0; 1817 } 1818 1819 static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar, 1820 enum hal_encrypt_type enctype) 1821 { 1822 switch (enctype) { 1823 case HAL_ENCRYPT_TYPE_OPEN: 1824 return 0; 1825 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1826 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1827 return IEEE80211_TKIP_IV_LEN; 1828 case HAL_ENCRYPT_TYPE_CCMP_128: 1829 return IEEE80211_CCMP_HDR_LEN; 1830 case HAL_ENCRYPT_TYPE_CCMP_256: 1831 return IEEE80211_CCMP_256_HDR_LEN; 1832 case HAL_ENCRYPT_TYPE_GCMP_128: 1833 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1834 return IEEE80211_GCMP_HDR_LEN; 1835 case HAL_ENCRYPT_TYPE_WEP_40: 1836 case HAL_ENCRYPT_TYPE_WEP_104: 1837 case HAL_ENCRYPT_TYPE_WEP_128: 1838 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1839 case HAL_ENCRYPT_TYPE_WAPI: 1840 break; 1841 } 1842 1843 ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 1844 return 0; 1845 } 1846 1847 static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar, 1848 enum hal_encrypt_type enctype) 1849 { 1850 switch (enctype) { 1851 case HAL_ENCRYPT_TYPE_OPEN: 1852 case HAL_ENCRYPT_TYPE_CCMP_128: 1853 case HAL_ENCRYPT_TYPE_CCMP_256: 1854 case HAL_ENCRYPT_TYPE_GCMP_128: 1855 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1856 return 0; 1857 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1858 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1859 return IEEE80211_TKIP_ICV_LEN; 1860 case HAL_ENCRYPT_TYPE_WEP_40: 1861 case HAL_ENCRYPT_TYPE_WEP_104: 1862 case HAL_ENCRYPT_TYPE_WEP_128: 1863 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1864 case HAL_ENCRYPT_TYPE_WAPI: 1865 break; 1866 } 1867 1868 ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 1869 return 0; 1870 } 1871 1872 static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar, 1873 struct sk_buff *msdu, 1874 u8 *first_hdr, 1875 enum hal_encrypt_type enctype, 1876 struct ieee80211_rx_status *status) 1877 { 1878 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 1879 u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN]; 1880 struct ieee80211_hdr *hdr; 1881 size_t hdr_len; 1882 u8 da[ETH_ALEN]; 1883 u8 sa[ETH_ALEN]; 1884 u16 qos_ctl = 0; 1885 u8 *qos; 1886 1887 /* copy SA & DA and pull decapped header */ 1888 hdr = (struct ieee80211_hdr *)msdu->data; 1889 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1890 ether_addr_copy(da, ieee80211_get_DA(hdr)); 1891 ether_addr_copy(sa, ieee80211_get_SA(hdr)); 1892 skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control)); 1893 1894 if (rxcb->is_first_msdu) { 1895 /* original 802.11 header is valid for the first msdu 1896 * hence we can reuse the same header 1897 */ 1898 hdr = (struct ieee80211_hdr *)first_hdr; 1899 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1900 1901 /* Each A-MSDU subframe will be reported as a separate MSDU, 1902 * so strip the A-MSDU bit from QoS Ctl. 1903 */ 1904 if (ieee80211_is_data_qos(hdr->frame_control)) { 1905 qos = ieee80211_get_qos_ctl(hdr); 1906 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; 1907 } 1908 } else { 1909 /* Rebuild qos header if this is a middle/last msdu */ 1910 hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 1911 1912 /* Reset the order bit as the HT_Control header is stripped */ 1913 hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER)); 1914 1915 qos_ctl = rxcb->tid; 1916 1917 if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(rxcb->rx_desc)) 1918 qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT; 1919 1920 /* TODO Add other QoS ctl fields when required */ 1921 1922 /* copy decap header before overwriting for reuse below */ 1923 memcpy(decap_hdr, (uint8_t *)hdr, hdr_len); 1924 } 1925 1926 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 1927 memcpy(skb_push(msdu, 1928 ath11k_dp_rx_crypto_param_len(ar, enctype)), 1929 (void *)hdr + hdr_len, 1930 ath11k_dp_rx_crypto_param_len(ar, enctype)); 1931 } 1932 1933 if (!rxcb->is_first_msdu) { 1934 memcpy(skb_push(msdu, 1935 IEEE80211_QOS_CTL_LEN), &qos_ctl, 1936 IEEE80211_QOS_CTL_LEN); 1937 memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len); 1938 return; 1939 } 1940 1941 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1942 1943 /* original 802.11 header has a different DA and in 1944 * case of 4addr it may also have different SA 1945 */ 1946 hdr = (struct ieee80211_hdr *)msdu->data; 1947 ether_addr_copy(ieee80211_get_DA(hdr), da); 1948 ether_addr_copy(ieee80211_get_SA(hdr), sa); 1949 } 1950 1951 static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu, 1952 enum hal_encrypt_type enctype, 1953 struct ieee80211_rx_status *status, 1954 bool decrypted) 1955 { 1956 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 1957 struct ieee80211_hdr *hdr; 1958 size_t hdr_len; 1959 size_t crypto_len; 1960 1961 if (!rxcb->is_first_msdu || 1962 !(rxcb->is_first_msdu && rxcb->is_last_msdu)) { 1963 WARN_ON_ONCE(1); 1964 return; 1965 } 1966 1967 skb_trim(msdu, msdu->len - FCS_LEN); 1968 1969 if (!decrypted) 1970 return; 1971 1972 hdr = (void *)msdu->data; 1973 1974 /* Tail */ 1975 if (status->flag & RX_FLAG_IV_STRIPPED) { 1976 skb_trim(msdu, msdu->len - 1977 ath11k_dp_rx_crypto_mic_len(ar, enctype)); 1978 1979 skb_trim(msdu, msdu->len - 1980 ath11k_dp_rx_crypto_icv_len(ar, enctype)); 1981 } else { 1982 /* MIC */ 1983 if (status->flag & RX_FLAG_MIC_STRIPPED) 1984 skb_trim(msdu, msdu->len - 1985 ath11k_dp_rx_crypto_mic_len(ar, enctype)); 1986 1987 /* ICV */ 1988 if (status->flag & RX_FLAG_ICV_STRIPPED) 1989 skb_trim(msdu, msdu->len - 1990 ath11k_dp_rx_crypto_icv_len(ar, enctype)); 1991 } 1992 1993 /* MMIC */ 1994 if ((status->flag & RX_FLAG_MMIC_STRIPPED) && 1995 !ieee80211_has_morefrags(hdr->frame_control) && 1996 enctype == HAL_ENCRYPT_TYPE_TKIP_MIC) 1997 skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN); 1998 1999 /* Head */ 2000 if (status->flag & RX_FLAG_IV_STRIPPED) { 2001 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2002 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 2003 2004 memmove((void *)msdu->data + crypto_len, 2005 (void *)msdu->data, hdr_len); 2006 skb_pull(msdu, crypto_len); 2007 } 2008 } 2009 2010 static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar, 2011 struct sk_buff *msdu, 2012 enum hal_encrypt_type enctype) 2013 { 2014 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 2015 struct ieee80211_hdr *hdr; 2016 size_t hdr_len, crypto_len; 2017 void *rfc1042; 2018 bool is_amsdu; 2019 2020 is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu); 2021 hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(rxcb->rx_desc); 2022 rfc1042 = hdr; 2023 2024 if (rxcb->is_first_msdu) { 2025 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2026 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 2027 2028 rfc1042 += hdr_len + crypto_len; 2029 } 2030 2031 if (is_amsdu) 2032 rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr); 2033 2034 return rfc1042; 2035 } 2036 2037 static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar, 2038 struct sk_buff *msdu, 2039 u8 *first_hdr, 2040 enum hal_encrypt_type enctype, 2041 struct ieee80211_rx_status *status) 2042 { 2043 struct ieee80211_hdr *hdr; 2044 struct ethhdr *eth; 2045 size_t hdr_len; 2046 u8 da[ETH_ALEN]; 2047 u8 sa[ETH_ALEN]; 2048 void *rfc1042; 2049 2050 rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype); 2051 if (WARN_ON_ONCE(!rfc1042)) 2052 return; 2053 2054 /* pull decapped header and copy SA & DA */ 2055 eth = (struct ethhdr *)msdu->data; 2056 ether_addr_copy(da, eth->h_dest); 2057 ether_addr_copy(sa, eth->h_source); 2058 skb_pull(msdu, sizeof(struct ethhdr)); 2059 2060 /* push rfc1042/llc/snap */ 2061 memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042, 2062 sizeof(struct ath11k_dp_rfc1042_hdr)); 2063 2064 /* push original 802.11 header */ 2065 hdr = (struct ieee80211_hdr *)first_hdr; 2066 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2067 2068 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 2069 memcpy(skb_push(msdu, 2070 ath11k_dp_rx_crypto_param_len(ar, enctype)), 2071 (void *)hdr + hdr_len, 2072 ath11k_dp_rx_crypto_param_len(ar, enctype)); 2073 } 2074 2075 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 2076 2077 /* original 802.11 header has a different DA and in 2078 * case of 4addr it may also have different SA 2079 */ 2080 hdr = (struct ieee80211_hdr *)msdu->data; 2081 ether_addr_copy(ieee80211_get_DA(hdr), da); 2082 ether_addr_copy(ieee80211_get_SA(hdr), sa); 2083 } 2084 2085 static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu, 2086 struct hal_rx_desc *rx_desc, 2087 enum hal_encrypt_type enctype, 2088 struct ieee80211_rx_status *status, 2089 bool decrypted) 2090 { 2091 u8 *first_hdr; 2092 u8 decap; 2093 2094 first_hdr = ath11k_dp_rx_h_80211_hdr(rx_desc); 2095 decap = ath11k_dp_rx_h_msdu_start_decap_type(rx_desc); 2096 2097 switch (decap) { 2098 case DP_RX_DECAP_TYPE_NATIVE_WIFI: 2099 ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr, 2100 enctype, status); 2101 break; 2102 case DP_RX_DECAP_TYPE_RAW: 2103 ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status, 2104 decrypted); 2105 break; 2106 case DP_RX_DECAP_TYPE_ETHERNET2_DIX: 2107 /* TODO undecap support for middle/last msdu's of amsdu */ 2108 ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr, 2109 enctype, status); 2110 break; 2111 case DP_RX_DECAP_TYPE_8023: 2112 /* TODO: Handle undecap for these formats */ 2113 break; 2114 } 2115 } 2116 2117 static void ath11k_dp_rx_h_mpdu(struct ath11k *ar, 2118 struct sk_buff *msdu, 2119 struct hal_rx_desc *rx_desc, 2120 struct ieee80211_rx_status *rx_status) 2121 { 2122 bool fill_crypto_hdr, mcast; 2123 enum hal_encrypt_type enctype; 2124 bool is_decrypted = false; 2125 struct ieee80211_hdr *hdr; 2126 struct ath11k_peer *peer; 2127 u32 err_bitmap; 2128 2129 hdr = (struct ieee80211_hdr *)msdu->data; 2130 2131 /* PN for multicast packets will be checked in mac80211 */ 2132 2133 mcast = is_multicast_ether_addr(hdr->addr1); 2134 fill_crypto_hdr = mcast; 2135 2136 is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc); 2137 2138 spin_lock_bh(&ar->ab->base_lock); 2139 peer = ath11k_peer_find_by_addr(ar->ab, hdr->addr2); 2140 if (peer) { 2141 if (mcast) 2142 enctype = peer->sec_type_grp; 2143 else 2144 enctype = peer->sec_type; 2145 } else { 2146 enctype = HAL_ENCRYPT_TYPE_OPEN; 2147 } 2148 spin_unlock_bh(&ar->ab->base_lock); 2149 2150 err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_desc); 2151 2152 /* Clear per-MPDU flags while leaving per-PPDU flags intact */ 2153 rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | 2154 RX_FLAG_MMIC_ERROR | 2155 RX_FLAG_DECRYPTED | 2156 RX_FLAG_IV_STRIPPED | 2157 RX_FLAG_MMIC_STRIPPED); 2158 2159 if (err_bitmap & DP_RX_MPDU_ERR_FCS) 2160 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; 2161 if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC) 2162 rx_status->flag |= RX_FLAG_MMIC_ERROR; 2163 2164 if (is_decrypted) { 2165 rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED; 2166 2167 if (fill_crypto_hdr) 2168 rx_status->flag |= RX_FLAG_MIC_STRIPPED | 2169 RX_FLAG_ICV_STRIPPED; 2170 else 2171 rx_status->flag |= RX_FLAG_IV_STRIPPED | 2172 RX_FLAG_PN_VALIDATED; 2173 } 2174 2175 ath11k_dp_rx_h_csum_offload(msdu); 2176 ath11k_dp_rx_h_undecap(ar, msdu, rx_desc, 2177 enctype, rx_status, is_decrypted); 2178 2179 if (!is_decrypted || fill_crypto_hdr) 2180 return; 2181 2182 hdr = (void *)msdu->data; 2183 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 2184 } 2185 2186 static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc, 2187 struct ieee80211_rx_status *rx_status) 2188 { 2189 struct ieee80211_supported_band *sband; 2190 enum rx_msdu_start_pkt_type pkt_type; 2191 u8 bw; 2192 u8 rate_mcs, nss; 2193 u8 sgi; 2194 bool is_cck; 2195 2196 pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(rx_desc); 2197 bw = ath11k_dp_rx_h_msdu_start_rx_bw(rx_desc); 2198 rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(rx_desc); 2199 nss = ath11k_dp_rx_h_msdu_start_nss(rx_desc); 2200 sgi = ath11k_dp_rx_h_msdu_start_sgi(rx_desc); 2201 2202 switch (pkt_type) { 2203 case RX_MSDU_START_PKT_TYPE_11A: 2204 case RX_MSDU_START_PKT_TYPE_11B: 2205 is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B); 2206 sband = &ar->mac.sbands[rx_status->band]; 2207 rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs, 2208 is_cck); 2209 break; 2210 case RX_MSDU_START_PKT_TYPE_11N: 2211 rx_status->encoding = RX_ENC_HT; 2212 if (rate_mcs > ATH11K_HT_MCS_MAX) { 2213 ath11k_warn(ar->ab, 2214 "Received with invalid mcs in HT mode %d\n", 2215 rate_mcs); 2216 break; 2217 } 2218 rx_status->rate_idx = rate_mcs + (8 * (nss - 1)); 2219 if (sgi) 2220 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 2221 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 2222 break; 2223 case RX_MSDU_START_PKT_TYPE_11AC: 2224 rx_status->encoding = RX_ENC_VHT; 2225 rx_status->rate_idx = rate_mcs; 2226 if (rate_mcs > ATH11K_VHT_MCS_MAX) { 2227 ath11k_warn(ar->ab, 2228 "Received with invalid mcs in VHT mode %d\n", 2229 rate_mcs); 2230 break; 2231 } 2232 rx_status->nss = nss; 2233 if (sgi) 2234 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 2235 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 2236 break; 2237 case RX_MSDU_START_PKT_TYPE_11AX: 2238 rx_status->rate_idx = rate_mcs; 2239 if (rate_mcs > ATH11K_HE_MCS_MAX) { 2240 ath11k_warn(ar->ab, 2241 "Received with invalid mcs in HE mode %d\n", 2242 rate_mcs); 2243 break; 2244 } 2245 rx_status->encoding = RX_ENC_HE; 2246 rx_status->nss = nss; 2247 rx_status->he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi); 2248 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 2249 break; 2250 } 2251 } 2252 2253 static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc, 2254 struct ieee80211_rx_status *rx_status) 2255 { 2256 u8 channel_num; 2257 u32 center_freq; 2258 2259 rx_status->freq = 0; 2260 rx_status->rate_idx = 0; 2261 rx_status->nss = 0; 2262 rx_status->encoding = RX_ENC_LEGACY; 2263 rx_status->bw = RATE_INFO_BW_20; 2264 2265 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 2266 2267 channel_num = ath11k_dp_rx_h_msdu_start_freq(rx_desc); 2268 center_freq = ath11k_dp_rx_h_msdu_start_freq(rx_desc) >> 16; 2269 2270 if (center_freq >= 5935 && center_freq <= 7105) { 2271 rx_status->band = NL80211_BAND_6GHZ; 2272 } else if (channel_num >= 1 && channel_num <= 14) { 2273 rx_status->band = NL80211_BAND_2GHZ; 2274 } else if (channel_num >= 36 && channel_num <= 173) { 2275 rx_status->band = NL80211_BAND_5GHZ; 2276 } else { 2277 spin_lock_bh(&ar->data_lock); 2278 rx_status->band = ar->rx_channel->band; 2279 channel_num = 2280 ieee80211_frequency_to_channel(ar->rx_channel->center_freq); 2281 spin_unlock_bh(&ar->data_lock); 2282 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ", 2283 rx_desc, sizeof(struct hal_rx_desc)); 2284 } 2285 2286 rx_status->freq = ieee80211_channel_to_frequency(channel_num, 2287 rx_status->band); 2288 2289 ath11k_dp_rx_h_rate(ar, rx_desc, rx_status); 2290 } 2291 2292 static char *ath11k_print_get_tid(struct ieee80211_hdr *hdr, char *out, 2293 size_t size) 2294 { 2295 u8 *qc; 2296 int tid; 2297 2298 if (!ieee80211_is_data_qos(hdr->frame_control)) 2299 return ""; 2300 2301 qc = ieee80211_get_qos_ctl(hdr); 2302 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 2303 snprintf(out, size, "tid %d", tid); 2304 2305 return out; 2306 } 2307 2308 static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi, 2309 struct sk_buff *msdu) 2310 { 2311 static const struct ieee80211_radiotap_he known = { 2312 .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | 2313 IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN), 2314 .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN), 2315 }; 2316 struct ieee80211_rx_status *status; 2317 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; 2318 struct ieee80211_radiotap_he *he = NULL; 2319 char tid[32]; 2320 2321 status = IEEE80211_SKB_RXCB(msdu); 2322 if (status->encoding == RX_ENC_HE) { 2323 he = skb_push(msdu, sizeof(known)); 2324 memcpy(he, &known, sizeof(known)); 2325 status->flag |= RX_FLAG_RADIOTAP_HE; 2326 } 2327 2328 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 2329 "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", 2330 msdu, 2331 msdu->len, 2332 ieee80211_get_SA(hdr), 2333 ath11k_print_get_tid(hdr, tid, sizeof(tid)), 2334 is_multicast_ether_addr(ieee80211_get_DA(hdr)) ? 2335 "mcast" : "ucast", 2336 (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4, 2337 (status->encoding == RX_ENC_LEGACY) ? "legacy" : "", 2338 (status->encoding == RX_ENC_HT) ? "ht" : "", 2339 (status->encoding == RX_ENC_VHT) ? "vht" : "", 2340 (status->encoding == RX_ENC_HE) ? "he" : "", 2341 (status->bw == RATE_INFO_BW_40) ? "40" : "", 2342 (status->bw == RATE_INFO_BW_80) ? "80" : "", 2343 (status->bw == RATE_INFO_BW_160) ? "160" : "", 2344 status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", 2345 status->rate_idx, 2346 status->nss, 2347 status->freq, 2348 status->band, status->flag, 2349 !!(status->flag & RX_FLAG_FAILED_FCS_CRC), 2350 !!(status->flag & RX_FLAG_MMIC_ERROR), 2351 !!(status->flag & RX_FLAG_AMSDU_MORE)); 2352 2353 /* TODO: trace rx packet */ 2354 2355 ieee80211_rx_napi(ar->hw, NULL, msdu, napi); 2356 } 2357 2358 static int ath11k_dp_rx_process_msdu(struct ath11k *ar, 2359 struct sk_buff *msdu, 2360 struct sk_buff_head *msdu_list) 2361 { 2362 struct hal_rx_desc *rx_desc, *lrx_desc; 2363 struct ieee80211_rx_status rx_status = {0}; 2364 struct ieee80211_rx_status *status; 2365 struct ath11k_skb_rxcb *rxcb; 2366 struct ieee80211_hdr *hdr; 2367 struct sk_buff *last_buf; 2368 u8 l3_pad_bytes; 2369 u8 *hdr_status; 2370 u16 msdu_len; 2371 int ret; 2372 2373 last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu); 2374 if (!last_buf) { 2375 ath11k_warn(ar->ab, 2376 "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n"); 2377 ret = -EIO; 2378 goto free_out; 2379 } 2380 2381 rx_desc = (struct hal_rx_desc *)msdu->data; 2382 lrx_desc = (struct hal_rx_desc *)last_buf->data; 2383 if (!ath11k_dp_rx_h_attn_msdu_done(lrx_desc)) { 2384 ath11k_warn(ar->ab, "msdu_done bit in attention is not set\n"); 2385 ret = -EIO; 2386 goto free_out; 2387 } 2388 2389 rxcb = ATH11K_SKB_RXCB(msdu); 2390 rxcb->rx_desc = rx_desc; 2391 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc); 2392 l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(lrx_desc); 2393 2394 if (rxcb->is_frag) { 2395 skb_pull(msdu, HAL_RX_DESC_SIZE); 2396 } else if (!rxcb->is_continuation) { 2397 if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) { 2398 hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc); 2399 ret = -EINVAL; 2400 ath11k_warn(ar->ab, "invalid msdu len %u\n", msdu_len); 2401 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status, 2402 sizeof(struct ieee80211_hdr)); 2403 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc, 2404 sizeof(struct hal_rx_desc)); 2405 goto free_out; 2406 } 2407 skb_put(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes + msdu_len); 2408 skb_pull(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes); 2409 } else { 2410 ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list, 2411 msdu, last_buf, 2412 l3_pad_bytes, msdu_len); 2413 if (ret) { 2414 ath11k_warn(ar->ab, 2415 "failed to coalesce msdu rx buffer%d\n", ret); 2416 goto free_out; 2417 } 2418 } 2419 2420 hdr = (struct ieee80211_hdr *)msdu->data; 2421 2422 /* Process only data frames */ 2423 if (!ieee80211_is_data(hdr->frame_control)) 2424 return -EINVAL; 2425 2426 ath11k_dp_rx_h_ppdu(ar, rx_desc, &rx_status); 2427 ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, &rx_status); 2428 2429 rx_status.flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED; 2430 2431 status = IEEE80211_SKB_RXCB(msdu); 2432 *status = rx_status; 2433 return 0; 2434 2435 free_out: 2436 return ret; 2437 } 2438 2439 static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab, 2440 struct napi_struct *napi, 2441 struct sk_buff_head *msdu_list, 2442 int *quota, int ring_id) 2443 { 2444 struct ath11k_skb_rxcb *rxcb; 2445 struct sk_buff *msdu; 2446 struct ath11k *ar; 2447 u8 mac_id; 2448 int ret; 2449 2450 if (skb_queue_empty(msdu_list)) 2451 return; 2452 2453 rcu_read_lock(); 2454 2455 while (*quota && (msdu = __skb_dequeue(msdu_list))) { 2456 rxcb = ATH11K_SKB_RXCB(msdu); 2457 mac_id = rxcb->mac_id; 2458 ar = ab->pdevs[mac_id].ar; 2459 if (!rcu_dereference(ab->pdevs_active[mac_id])) { 2460 dev_kfree_skb_any(msdu); 2461 continue; 2462 } 2463 2464 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 2465 dev_kfree_skb_any(msdu); 2466 continue; 2467 } 2468 2469 ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list); 2470 if (ret) { 2471 ath11k_dbg(ab, ATH11K_DBG_DATA, 2472 "Unable to process msdu %d", ret); 2473 dev_kfree_skb_any(msdu); 2474 continue; 2475 } 2476 2477 ath11k_dp_rx_deliver_msdu(ar, napi, msdu); 2478 (*quota)--; 2479 } 2480 2481 rcu_read_unlock(); 2482 } 2483 2484 int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id, 2485 struct napi_struct *napi, int budget) 2486 { 2487 struct ath11k_dp *dp = &ab->dp; 2488 struct dp_rxdma_ring *rx_ring; 2489 int num_buffs_reaped[MAX_RADIOS] = {0}; 2490 struct sk_buff_head msdu_list; 2491 struct ath11k_skb_rxcb *rxcb; 2492 int total_msdu_reaped = 0; 2493 struct hal_srng *srng; 2494 struct sk_buff *msdu; 2495 int quota = budget; 2496 bool done = false; 2497 int buf_id, mac_id; 2498 struct ath11k *ar; 2499 u32 *rx_desc; 2500 int i; 2501 2502 __skb_queue_head_init(&msdu_list); 2503 2504 srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id]; 2505 2506 spin_lock_bh(&srng->lock); 2507 2508 ath11k_hal_srng_access_begin(ab, srng); 2509 2510 try_again: 2511 while ((rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 2512 struct hal_reo_dest_ring desc = *(struct hal_reo_dest_ring *)rx_desc; 2513 enum hal_reo_dest_ring_push_reason push_reason; 2514 u32 cookie; 2515 2516 cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, 2517 desc.buf_addr_info.info1); 2518 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 2519 cookie); 2520 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie); 2521 2522 ar = ab->pdevs[mac_id].ar; 2523 rx_ring = &ar->dp.rx_refill_buf_ring; 2524 spin_lock_bh(&rx_ring->idr_lock); 2525 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 2526 if (!msdu) { 2527 ath11k_warn(ab, "frame rx with invalid buf_id %d\n", 2528 buf_id); 2529 spin_unlock_bh(&rx_ring->idr_lock); 2530 continue; 2531 } 2532 2533 idr_remove(&rx_ring->bufs_idr, buf_id); 2534 spin_unlock_bh(&rx_ring->idr_lock); 2535 2536 rxcb = ATH11K_SKB_RXCB(msdu); 2537 dma_unmap_single(ab->dev, rxcb->paddr, 2538 msdu->len + skb_tailroom(msdu), 2539 DMA_FROM_DEVICE); 2540 2541 num_buffs_reaped[mac_id]++; 2542 total_msdu_reaped++; 2543 2544 push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON, 2545 desc.info0); 2546 if (push_reason != 2547 HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) { 2548 dev_kfree_skb_any(msdu); 2549 ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++; 2550 continue; 2551 } 2552 2553 rxcb->is_first_msdu = !!(desc.rx_msdu_info.info0 & 2554 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU); 2555 rxcb->is_last_msdu = !!(desc.rx_msdu_info.info0 & 2556 RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU); 2557 rxcb->is_continuation = !!(desc.rx_msdu_info.info0 & 2558 RX_MSDU_DESC_INFO0_MSDU_CONTINUATION); 2559 rxcb->mac_id = mac_id; 2560 rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM, 2561 desc.info0); 2562 2563 __skb_queue_tail(&msdu_list, msdu); 2564 2565 if (total_msdu_reaped >= quota && !rxcb->is_continuation) { 2566 done = true; 2567 break; 2568 } 2569 } 2570 2571 /* Hw might have updated the head pointer after we cached it. 2572 * In this case, even though there are entries in the ring we'll 2573 * get rx_desc NULL. Give the read another try with updated cached 2574 * head pointer so that we can reap complete MPDU in the current 2575 * rx processing. 2576 */ 2577 if (!done && ath11k_hal_srng_dst_num_free(ab, srng, true)) { 2578 ath11k_hal_srng_access_end(ab, srng); 2579 goto try_again; 2580 } 2581 2582 ath11k_hal_srng_access_end(ab, srng); 2583 2584 spin_unlock_bh(&srng->lock); 2585 2586 if (!total_msdu_reaped) 2587 goto exit; 2588 2589 for (i = 0; i < ab->num_radios; i++) { 2590 if (!num_buffs_reaped[i]) 2591 continue; 2592 2593 ar = ab->pdevs[i].ar; 2594 rx_ring = &ar->dp.rx_refill_buf_ring; 2595 2596 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i], 2597 HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC); 2598 } 2599 2600 ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list, 2601 "a, ring_id); 2602 2603 exit: 2604 return budget - quota; 2605 } 2606 2607 static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta, 2608 struct hal_rx_mon_ppdu_info *ppdu_info) 2609 { 2610 struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats; 2611 u32 num_msdu; 2612 2613 if (!rx_stats) 2614 return; 2615 2616 num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count + 2617 ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count; 2618 2619 rx_stats->num_msdu += num_msdu; 2620 rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count + 2621 ppdu_info->tcp_ack_msdu_count; 2622 rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count; 2623 rx_stats->other_msdu_count += ppdu_info->other_msdu_count; 2624 2625 if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A || 2626 ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) { 2627 ppdu_info->nss = 1; 2628 ppdu_info->mcs = HAL_RX_MAX_MCS; 2629 ppdu_info->tid = IEEE80211_NUM_TIDS; 2630 } 2631 2632 if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS) 2633 rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu; 2634 2635 if (ppdu_info->mcs <= HAL_RX_MAX_MCS) 2636 rx_stats->mcs_count[ppdu_info->mcs] += num_msdu; 2637 2638 if (ppdu_info->gi < HAL_RX_GI_MAX) 2639 rx_stats->gi_count[ppdu_info->gi] += num_msdu; 2640 2641 if (ppdu_info->bw < HAL_RX_BW_MAX) 2642 rx_stats->bw_count[ppdu_info->bw] += num_msdu; 2643 2644 if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX) 2645 rx_stats->coding_count[ppdu_info->ldpc] += num_msdu; 2646 2647 if (ppdu_info->tid <= IEEE80211_NUM_TIDS) 2648 rx_stats->tid_count[ppdu_info->tid] += num_msdu; 2649 2650 if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX) 2651 rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu; 2652 2653 if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX) 2654 rx_stats->reception_type[ppdu_info->reception_type] += num_msdu; 2655 2656 if (ppdu_info->is_stbc) 2657 rx_stats->stbc_count += num_msdu; 2658 2659 if (ppdu_info->beamformed) 2660 rx_stats->beamformed_count += num_msdu; 2661 2662 if (ppdu_info->num_mpdu_fcs_ok > 1) 2663 rx_stats->ampdu_msdu_count += num_msdu; 2664 else 2665 rx_stats->non_ampdu_msdu_count += num_msdu; 2666 2667 rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok; 2668 rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err; 2669 rx_stats->dcm_count += ppdu_info->dcm; 2670 rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu; 2671 2672 arsta->rssi_comb = ppdu_info->rssi_comb; 2673 rx_stats->rx_duration += ppdu_info->rx_duration; 2674 arsta->rx_duration = rx_stats->rx_duration; 2675 } 2676 2677 static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab, 2678 struct dp_rxdma_ring *rx_ring, 2679 int *buf_id, gfp_t gfp) 2680 { 2681 struct sk_buff *skb; 2682 dma_addr_t paddr; 2683 2684 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + 2685 DP_RX_BUFFER_ALIGN_SIZE); 2686 2687 if (!skb) 2688 goto fail_alloc_skb; 2689 2690 if (!IS_ALIGNED((unsigned long)skb->data, 2691 DP_RX_BUFFER_ALIGN_SIZE)) { 2692 skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - 2693 skb->data); 2694 } 2695 2696 paddr = dma_map_single(ab->dev, skb->data, 2697 skb->len + skb_tailroom(skb), 2698 DMA_BIDIRECTIONAL); 2699 if (unlikely(dma_mapping_error(ab->dev, paddr))) 2700 goto fail_free_skb; 2701 2702 spin_lock_bh(&rx_ring->idr_lock); 2703 *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, 2704 rx_ring->bufs_max, gfp); 2705 spin_unlock_bh(&rx_ring->idr_lock); 2706 if (*buf_id < 0) 2707 goto fail_dma_unmap; 2708 2709 ATH11K_SKB_RXCB(skb)->paddr = paddr; 2710 return skb; 2711 2712 fail_dma_unmap: 2713 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 2714 DMA_BIDIRECTIONAL); 2715 fail_free_skb: 2716 dev_kfree_skb_any(skb); 2717 fail_alloc_skb: 2718 return NULL; 2719 } 2720 2721 int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id, 2722 struct dp_rxdma_ring *rx_ring, 2723 int req_entries, 2724 enum hal_rx_buf_return_buf_manager mgr, 2725 gfp_t gfp) 2726 { 2727 struct hal_srng *srng; 2728 u32 *desc; 2729 struct sk_buff *skb; 2730 int num_free; 2731 int num_remain; 2732 int buf_id; 2733 u32 cookie; 2734 dma_addr_t paddr; 2735 2736 req_entries = min(req_entries, rx_ring->bufs_max); 2737 2738 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 2739 2740 spin_lock_bh(&srng->lock); 2741 2742 ath11k_hal_srng_access_begin(ab, srng); 2743 2744 num_free = ath11k_hal_srng_src_num_free(ab, srng, true); 2745 2746 req_entries = min(num_free, req_entries); 2747 num_remain = req_entries; 2748 2749 while (num_remain > 0) { 2750 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, 2751 &buf_id, gfp); 2752 if (!skb) 2753 break; 2754 paddr = ATH11K_SKB_RXCB(skb)->paddr; 2755 2756 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 2757 if (!desc) 2758 goto fail_desc_get; 2759 2760 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 2761 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 2762 2763 num_remain--; 2764 2765 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); 2766 } 2767 2768 ath11k_hal_srng_access_end(ab, srng); 2769 2770 spin_unlock_bh(&srng->lock); 2771 2772 return req_entries - num_remain; 2773 2774 fail_desc_get: 2775 spin_lock_bh(&rx_ring->idr_lock); 2776 idr_remove(&rx_ring->bufs_idr, buf_id); 2777 spin_unlock_bh(&rx_ring->idr_lock); 2778 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 2779 DMA_BIDIRECTIONAL); 2780 dev_kfree_skb_any(skb); 2781 ath11k_hal_srng_access_end(ab, srng); 2782 spin_unlock_bh(&srng->lock); 2783 2784 return req_entries - num_remain; 2785 } 2786 2787 static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id, 2788 int *budget, struct sk_buff_head *skb_list) 2789 { 2790 struct ath11k *ar; 2791 struct ath11k_pdev_dp *dp; 2792 struct dp_rxdma_ring *rx_ring; 2793 struct hal_srng *srng; 2794 void *rx_mon_status_desc; 2795 struct sk_buff *skb; 2796 struct ath11k_skb_rxcb *rxcb; 2797 struct hal_tlv_hdr *tlv; 2798 u32 cookie; 2799 int buf_id, srng_id; 2800 dma_addr_t paddr; 2801 u8 rbm; 2802 int num_buffs_reaped = 0; 2803 2804 ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar; 2805 dp = &ar->dp; 2806 srng_id = ath11k_hw_mac_id_to_srng_id(&ab->hw_params, mac_id); 2807 rx_ring = &dp->rx_mon_status_refill_ring[srng_id]; 2808 2809 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 2810 2811 spin_lock_bh(&srng->lock); 2812 2813 ath11k_hal_srng_access_begin(ab, srng); 2814 while (*budget) { 2815 *budget -= 1; 2816 rx_mon_status_desc = 2817 ath11k_hal_srng_src_peek(ab, srng); 2818 if (!rx_mon_status_desc) 2819 break; 2820 2821 ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr, 2822 &cookie, &rbm); 2823 if (paddr) { 2824 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie); 2825 2826 spin_lock_bh(&rx_ring->idr_lock); 2827 skb = idr_find(&rx_ring->bufs_idr, buf_id); 2828 if (!skb) { 2829 ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n", 2830 buf_id); 2831 spin_unlock_bh(&rx_ring->idr_lock); 2832 goto move_next; 2833 } 2834 2835 idr_remove(&rx_ring->bufs_idr, buf_id); 2836 spin_unlock_bh(&rx_ring->idr_lock); 2837 2838 rxcb = ATH11K_SKB_RXCB(skb); 2839 2840 dma_sync_single_for_cpu(ab->dev, rxcb->paddr, 2841 skb->len + skb_tailroom(skb), 2842 DMA_FROM_DEVICE); 2843 2844 dma_unmap_single(ab->dev, rxcb->paddr, 2845 skb->len + skb_tailroom(skb), 2846 DMA_BIDIRECTIONAL); 2847 2848 tlv = (struct hal_tlv_hdr *)skb->data; 2849 if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != 2850 HAL_RX_STATUS_BUFFER_DONE) { 2851 ath11k_warn(ab, "mon status DONE not set %lx\n", 2852 FIELD_GET(HAL_TLV_HDR_TAG, 2853 tlv->tl)); 2854 dev_kfree_skb_any(skb); 2855 goto move_next; 2856 } 2857 2858 __skb_queue_tail(skb_list, skb); 2859 } 2860 move_next: 2861 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, 2862 &buf_id, GFP_ATOMIC); 2863 2864 if (!skb) { 2865 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0, 2866 HAL_RX_BUF_RBM_SW3_BM); 2867 num_buffs_reaped++; 2868 break; 2869 } 2870 rxcb = ATH11K_SKB_RXCB(skb); 2871 2872 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 2873 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 2874 2875 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr, 2876 cookie, HAL_RX_BUF_RBM_SW3_BM); 2877 ath11k_hal_srng_src_get_next_entry(ab, srng); 2878 num_buffs_reaped++; 2879 } 2880 ath11k_hal_srng_access_end(ab, srng); 2881 spin_unlock_bh(&srng->lock); 2882 2883 return num_buffs_reaped; 2884 } 2885 2886 int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id, 2887 struct napi_struct *napi, int budget) 2888 { 2889 struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id); 2890 enum hal_rx_mon_status hal_status; 2891 struct sk_buff *skb; 2892 struct sk_buff_head skb_list; 2893 struct hal_rx_mon_ppdu_info ppdu_info; 2894 struct ath11k_peer *peer; 2895 struct ath11k_sta *arsta; 2896 int num_buffs_reaped = 0; 2897 2898 __skb_queue_head_init(&skb_list); 2899 2900 num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget, 2901 &skb_list); 2902 if (!num_buffs_reaped) 2903 goto exit; 2904 2905 while ((skb = __skb_dequeue(&skb_list))) { 2906 memset(&ppdu_info, 0, sizeof(ppdu_info)); 2907 ppdu_info.peer_id = HAL_INVALID_PEERID; 2908 2909 if (ath11k_debug_is_pktlog_rx_stats_enabled(ar)) 2910 trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE); 2911 2912 hal_status = ath11k_hal_rx_parse_mon_status(ab, &ppdu_info, skb); 2913 2914 if (ppdu_info.peer_id == HAL_INVALID_PEERID || 2915 hal_status != HAL_RX_MON_STATUS_PPDU_DONE) { 2916 dev_kfree_skb_any(skb); 2917 continue; 2918 } 2919 2920 rcu_read_lock(); 2921 spin_lock_bh(&ab->base_lock); 2922 peer = ath11k_peer_find_by_id(ab, ppdu_info.peer_id); 2923 2924 if (!peer || !peer->sta) { 2925 ath11k_dbg(ab, ATH11K_DBG_DATA, 2926 "failed to find the peer with peer_id %d\n", 2927 ppdu_info.peer_id); 2928 spin_unlock_bh(&ab->base_lock); 2929 rcu_read_unlock(); 2930 dev_kfree_skb_any(skb); 2931 continue; 2932 } 2933 2934 arsta = (struct ath11k_sta *)peer->sta->drv_priv; 2935 ath11k_dp_rx_update_peer_stats(arsta, &ppdu_info); 2936 2937 if (ath11k_debug_is_pktlog_peer_valid(ar, peer->addr)) 2938 trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE); 2939 2940 spin_unlock_bh(&ab->base_lock); 2941 rcu_read_unlock(); 2942 2943 dev_kfree_skb_any(skb); 2944 } 2945 exit: 2946 return num_buffs_reaped; 2947 } 2948 2949 static void ath11k_dp_rx_frag_timer(struct timer_list *timer) 2950 { 2951 struct dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer); 2952 2953 spin_lock_bh(&rx_tid->ab->base_lock); 2954 if (rx_tid->last_frag_no && 2955 rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) { 2956 spin_unlock_bh(&rx_tid->ab->base_lock); 2957 return; 2958 } 2959 ath11k_dp_rx_frags_cleanup(rx_tid, true); 2960 spin_unlock_bh(&rx_tid->ab->base_lock); 2961 } 2962 2963 int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id) 2964 { 2965 struct ath11k_base *ab = ar->ab; 2966 struct crypto_shash *tfm; 2967 struct ath11k_peer *peer; 2968 struct dp_rx_tid *rx_tid; 2969 int i; 2970 2971 tfm = crypto_alloc_shash("michael_mic", 0, 0); 2972 if (IS_ERR(tfm)) 2973 return PTR_ERR(tfm); 2974 2975 spin_lock_bh(&ab->base_lock); 2976 2977 peer = ath11k_peer_find(ab, vdev_id, peer_mac); 2978 if (!peer) { 2979 ath11k_warn(ab, "failed to find the peer to set up fragment info\n"); 2980 spin_unlock_bh(&ab->base_lock); 2981 return -ENOENT; 2982 } 2983 2984 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 2985 rx_tid = &peer->rx_tid[i]; 2986 rx_tid->ab = ab; 2987 timer_setup(&rx_tid->frag_timer, ath11k_dp_rx_frag_timer, 0); 2988 skb_queue_head_init(&rx_tid->rx_frags); 2989 } 2990 2991 peer->tfm_mmic = tfm; 2992 spin_unlock_bh(&ab->base_lock); 2993 2994 return 0; 2995 } 2996 2997 static int ath11k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key, 2998 struct ieee80211_hdr *hdr, u8 *data, 2999 size_t data_len, u8 *mic) 3000 { 3001 SHASH_DESC_ON_STACK(desc, tfm); 3002 u8 mic_hdr[16] = {0}; 3003 u8 tid = 0; 3004 int ret; 3005 3006 if (!tfm) 3007 return -EINVAL; 3008 3009 desc->tfm = tfm; 3010 3011 ret = crypto_shash_setkey(tfm, key, 8); 3012 if (ret) 3013 goto out; 3014 3015 ret = crypto_shash_init(desc); 3016 if (ret) 3017 goto out; 3018 3019 /* TKIP MIC header */ 3020 memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN); 3021 memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN); 3022 if (ieee80211_is_data_qos(hdr->frame_control)) 3023 tid = ieee80211_get_tid(hdr); 3024 mic_hdr[12] = tid; 3025 3026 ret = crypto_shash_update(desc, mic_hdr, 16); 3027 if (ret) 3028 goto out; 3029 ret = crypto_shash_update(desc, data, data_len); 3030 if (ret) 3031 goto out; 3032 ret = crypto_shash_final(desc, mic); 3033 out: 3034 shash_desc_zero(desc); 3035 return ret; 3036 } 3037 3038 static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer *peer, 3039 struct sk_buff *msdu) 3040 { 3041 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data; 3042 struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu); 3043 struct ieee80211_key_conf *key_conf; 3044 struct ieee80211_hdr *hdr; 3045 u8 mic[IEEE80211_CCMP_MIC_LEN]; 3046 int head_len, tail_len, ret; 3047 size_t data_len; 3048 u32 hdr_len; 3049 u8 *key, *data; 3050 u8 key_idx; 3051 3052 if (ath11k_dp_rx_h_mpdu_start_enctype(rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC) 3053 return 0; 3054 3055 hdr = (struct ieee80211_hdr *)(msdu->data + HAL_RX_DESC_SIZE); 3056 hdr_len = ieee80211_hdrlen(hdr->frame_control); 3057 head_len = hdr_len + HAL_RX_DESC_SIZE + IEEE80211_TKIP_IV_LEN; 3058 tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN; 3059 3060 if (!is_multicast_ether_addr(hdr->addr1)) 3061 key_idx = peer->ucast_keyidx; 3062 else 3063 key_idx = peer->mcast_keyidx; 3064 3065 key_conf = peer->keys[key_idx]; 3066 3067 data = msdu->data + head_len; 3068 data_len = msdu->len - head_len - tail_len; 3069 key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; 3070 3071 ret = ath11k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic); 3072 if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN)) 3073 goto mic_fail; 3074 3075 return 0; 3076 3077 mic_fail: 3078 (ATH11K_SKB_RXCB(msdu))->is_first_msdu = true; 3079 (ATH11K_SKB_RXCB(msdu))->is_last_msdu = true; 3080 3081 rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED | 3082 RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED; 3083 skb_pull(msdu, HAL_RX_DESC_SIZE); 3084 3085 ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs); 3086 ath11k_dp_rx_h_undecap(ar, msdu, rx_desc, 3087 HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true); 3088 ieee80211_rx(ar->hw, msdu); 3089 return -EINVAL; 3090 } 3091 3092 static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu, 3093 enum hal_encrypt_type enctype, u32 flags) 3094 { 3095 struct ieee80211_hdr *hdr; 3096 size_t hdr_len; 3097 size_t crypto_len; 3098 3099 if (!flags) 3100 return; 3101 3102 hdr = (struct ieee80211_hdr *)(msdu->data + HAL_RX_DESC_SIZE); 3103 3104 if (flags & RX_FLAG_MIC_STRIPPED) 3105 skb_trim(msdu, msdu->len - 3106 ath11k_dp_rx_crypto_mic_len(ar, enctype)); 3107 3108 if (flags & RX_FLAG_ICV_STRIPPED) 3109 skb_trim(msdu, msdu->len - 3110 ath11k_dp_rx_crypto_icv_len(ar, enctype)); 3111 3112 if (flags & RX_FLAG_IV_STRIPPED) { 3113 hdr_len = ieee80211_hdrlen(hdr->frame_control); 3114 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 3115 3116 memmove((void *)msdu->data + HAL_RX_DESC_SIZE + crypto_len, 3117 (void *)msdu->data + HAL_RX_DESC_SIZE, hdr_len); 3118 skb_pull(msdu, crypto_len); 3119 } 3120 } 3121 3122 static int ath11k_dp_rx_h_defrag(struct ath11k *ar, 3123 struct ath11k_peer *peer, 3124 struct dp_rx_tid *rx_tid, 3125 struct sk_buff **defrag_skb) 3126 { 3127 struct hal_rx_desc *rx_desc; 3128 struct sk_buff *skb, *first_frag, *last_frag; 3129 struct ieee80211_hdr *hdr; 3130 enum hal_encrypt_type enctype; 3131 bool is_decrypted = false; 3132 int msdu_len = 0; 3133 int extra_space; 3134 u32 flags; 3135 3136 first_frag = skb_peek(&rx_tid->rx_frags); 3137 last_frag = skb_peek_tail(&rx_tid->rx_frags); 3138 3139 skb_queue_walk(&rx_tid->rx_frags, skb) { 3140 flags = 0; 3141 rx_desc = (struct hal_rx_desc *)skb->data; 3142 hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE); 3143 3144 enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc); 3145 if (enctype != HAL_ENCRYPT_TYPE_OPEN) 3146 is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc); 3147 3148 if (is_decrypted) { 3149 if (skb != first_frag) 3150 flags |= RX_FLAG_IV_STRIPPED; 3151 if (skb != last_frag) 3152 flags |= RX_FLAG_ICV_STRIPPED | 3153 RX_FLAG_MIC_STRIPPED; 3154 } 3155 3156 /* RX fragments are always raw packets */ 3157 if (skb != last_frag) 3158 skb_trim(skb, skb->len - FCS_LEN); 3159 ath11k_dp_rx_h_undecap_frag(ar, skb, enctype, flags); 3160 3161 if (skb != first_frag) 3162 skb_pull(skb, HAL_RX_DESC_SIZE + 3163 ieee80211_hdrlen(hdr->frame_control)); 3164 msdu_len += skb->len; 3165 } 3166 3167 extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag)); 3168 if (extra_space > 0 && 3169 (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0)) 3170 return -ENOMEM; 3171 3172 __skb_unlink(first_frag, &rx_tid->rx_frags); 3173 while ((skb = __skb_dequeue(&rx_tid->rx_frags))) { 3174 skb_put_data(first_frag, skb->data, skb->len); 3175 dev_kfree_skb_any(skb); 3176 } 3177 3178 hdr = (struct ieee80211_hdr *)(first_frag->data + HAL_RX_DESC_SIZE); 3179 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); 3180 ATH11K_SKB_RXCB(first_frag)->is_frag = 1; 3181 3182 if (ath11k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag)) 3183 first_frag = NULL; 3184 3185 *defrag_skb = first_frag; 3186 return 0; 3187 } 3188 3189 static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_tid *rx_tid, 3190 struct sk_buff *defrag_skb) 3191 { 3192 struct ath11k_base *ab = ar->ab; 3193 struct ath11k_pdev_dp *dp = &ar->dp; 3194 struct dp_rxdma_ring *rx_refill_ring = &dp->rx_refill_buf_ring; 3195 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data; 3196 struct hal_reo_entrance_ring *reo_ent_ring; 3197 struct hal_reo_dest_ring *reo_dest_ring; 3198 struct dp_link_desc_bank *link_desc_banks; 3199 struct hal_rx_msdu_link *msdu_link; 3200 struct hal_rx_msdu_details *msdu0; 3201 struct hal_srng *srng; 3202 dma_addr_t paddr; 3203 u32 desc_bank, msdu_info, mpdu_info; 3204 u32 dst_idx, cookie; 3205 u32 *msdu_len_offset; 3206 int ret, buf_id; 3207 3208 link_desc_banks = ab->dp.link_desc_banks; 3209 reo_dest_ring = rx_tid->dst_ring_desc; 3210 3211 ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank); 3212 msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr + 3213 (paddr - link_desc_banks[desc_bank].paddr)); 3214 msdu0 = &msdu_link->msdu_link[0]; 3215 dst_idx = FIELD_GET(RX_MSDU_DESC_INFO0_REO_DEST_IND, msdu0->rx_msdu_info.info0); 3216 memset(msdu0, 0, sizeof(*msdu0)); 3217 3218 msdu_info = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1) | 3219 FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1) | 3220 FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_CONTINUATION, 0) | 3221 FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_LENGTH, 3222 defrag_skb->len - HAL_RX_DESC_SIZE) | 3223 FIELD_PREP(RX_MSDU_DESC_INFO0_REO_DEST_IND, dst_idx) | 3224 FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_SA, 1) | 3225 FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_DA, 1); 3226 msdu0->rx_msdu_info.info0 = msdu_info; 3227 3228 /* change msdu len in hal rx desc */ 3229 msdu_len_offset = (u32 *)&rx_desc->msdu_start; 3230 *msdu_len_offset &= ~(RX_MSDU_START_INFO1_MSDU_LENGTH); 3231 *msdu_len_offset |= defrag_skb->len - HAL_RX_DESC_SIZE; 3232 3233 paddr = dma_map_single(ab->dev, defrag_skb->data, 3234 defrag_skb->len + skb_tailroom(defrag_skb), 3235 DMA_FROM_DEVICE); 3236 if (dma_mapping_error(ab->dev, paddr)) 3237 return -ENOMEM; 3238 3239 spin_lock_bh(&rx_refill_ring->idr_lock); 3240 buf_id = idr_alloc(&rx_refill_ring->bufs_idr, defrag_skb, 0, 3241 rx_refill_ring->bufs_max * 3, GFP_ATOMIC); 3242 spin_unlock_bh(&rx_refill_ring->idr_lock); 3243 if (buf_id < 0) { 3244 ret = -ENOMEM; 3245 goto err_unmap_dma; 3246 } 3247 3248 ATH11K_SKB_RXCB(defrag_skb)->paddr = paddr; 3249 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, dp->mac_id) | 3250 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 3251 3252 ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie, HAL_RX_BUF_RBM_SW3_BM); 3253 3254 /* Fill mpdu details into reo entrace ring */ 3255 srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id]; 3256 3257 spin_lock_bh(&srng->lock); 3258 ath11k_hal_srng_access_begin(ab, srng); 3259 3260 reo_ent_ring = (struct hal_reo_entrance_ring *) 3261 ath11k_hal_srng_src_get_next_entry(ab, srng); 3262 if (!reo_ent_ring) { 3263 ath11k_hal_srng_access_end(ab, srng); 3264 spin_unlock_bh(&srng->lock); 3265 ret = -ENOSPC; 3266 goto err_free_idr; 3267 } 3268 memset(reo_ent_ring, 0, sizeof(*reo_ent_ring)); 3269 3270 ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank); 3271 ath11k_hal_rx_buf_addr_info_set(reo_ent_ring, paddr, desc_bank, 3272 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST); 3273 3274 mpdu_info = FIELD_PREP(RX_MPDU_DESC_INFO0_MSDU_COUNT, 1) | 3275 FIELD_PREP(RX_MPDU_DESC_INFO0_SEQ_NUM, rx_tid->cur_sn) | 3276 FIELD_PREP(RX_MPDU_DESC_INFO0_FRAG_FLAG, 0) | 3277 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_SA, 1) | 3278 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_DA, 1) | 3279 FIELD_PREP(RX_MPDU_DESC_INFO0_RAW_MPDU, 1) | 3280 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_PN, 1); 3281 3282 reo_ent_ring->rx_mpdu_info.info0 = mpdu_info; 3283 reo_ent_ring->rx_mpdu_info.meta_data = reo_dest_ring->rx_mpdu_info.meta_data; 3284 reo_ent_ring->queue_addr_lo = reo_dest_ring->queue_addr_lo; 3285 reo_ent_ring->info0 = FIELD_PREP(HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI, 3286 FIELD_GET(HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI, 3287 reo_dest_ring->info0)) | 3288 FIELD_PREP(HAL_REO_ENTR_RING_INFO0_DEST_IND, dst_idx); 3289 ath11k_hal_srng_access_end(ab, srng); 3290 spin_unlock_bh(&srng->lock); 3291 3292 return 0; 3293 3294 err_free_idr: 3295 spin_lock_bh(&rx_refill_ring->idr_lock); 3296 idr_remove(&rx_refill_ring->bufs_idr, buf_id); 3297 spin_unlock_bh(&rx_refill_ring->idr_lock); 3298 err_unmap_dma: 3299 dma_unmap_single(ab->dev, paddr, defrag_skb->len + skb_tailroom(defrag_skb), 3300 DMA_FROM_DEVICE); 3301 return ret; 3302 } 3303 3304 static int ath11k_dp_rx_h_cmp_frags(struct sk_buff *a, struct sk_buff *b) 3305 { 3306 int frag1, frag2; 3307 3308 frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(a); 3309 frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(b); 3310 3311 return frag1 - frag2; 3312 } 3313 3314 static void ath11k_dp_rx_h_sort_frags(struct sk_buff_head *frag_list, 3315 struct sk_buff *cur_frag) 3316 { 3317 struct sk_buff *skb; 3318 int cmp; 3319 3320 skb_queue_walk(frag_list, skb) { 3321 cmp = ath11k_dp_rx_h_cmp_frags(skb, cur_frag); 3322 if (cmp < 0) 3323 continue; 3324 __skb_queue_before(frag_list, skb, cur_frag); 3325 return; 3326 } 3327 __skb_queue_tail(frag_list, cur_frag); 3328 } 3329 3330 static u64 ath11k_dp_rx_h_get_pn(struct sk_buff *skb) 3331 { 3332 struct ieee80211_hdr *hdr; 3333 u64 pn = 0; 3334 u8 *ehdr; 3335 3336 hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE); 3337 ehdr = skb->data + HAL_RX_DESC_SIZE + ieee80211_hdrlen(hdr->frame_control); 3338 3339 pn = ehdr[0]; 3340 pn |= (u64)ehdr[1] << 8; 3341 pn |= (u64)ehdr[4] << 16; 3342 pn |= (u64)ehdr[5] << 24; 3343 pn |= (u64)ehdr[6] << 32; 3344 pn |= (u64)ehdr[7] << 40; 3345 3346 return pn; 3347 } 3348 3349 static bool 3350 ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k *ar, struct dp_rx_tid *rx_tid) 3351 { 3352 enum hal_encrypt_type encrypt_type; 3353 struct sk_buff *first_frag, *skb; 3354 struct hal_rx_desc *desc; 3355 u64 last_pn; 3356 u64 cur_pn; 3357 3358 first_frag = skb_peek(&rx_tid->rx_frags); 3359 desc = (struct hal_rx_desc *)first_frag->data; 3360 3361 encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(desc); 3362 if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 && 3363 encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 && 3364 encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 && 3365 encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256) 3366 return true; 3367 3368 last_pn = ath11k_dp_rx_h_get_pn(first_frag); 3369 skb_queue_walk(&rx_tid->rx_frags, skb) { 3370 if (skb == first_frag) 3371 continue; 3372 3373 cur_pn = ath11k_dp_rx_h_get_pn(skb); 3374 if (cur_pn != last_pn + 1) 3375 return false; 3376 last_pn = cur_pn; 3377 } 3378 return true; 3379 } 3380 3381 static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar, 3382 struct sk_buff *msdu, 3383 u32 *ring_desc) 3384 { 3385 struct ath11k_base *ab = ar->ab; 3386 struct hal_rx_desc *rx_desc; 3387 struct ath11k_peer *peer; 3388 struct dp_rx_tid *rx_tid; 3389 struct sk_buff *defrag_skb = NULL; 3390 u32 peer_id; 3391 u16 seqno, frag_no; 3392 u8 tid; 3393 int ret = 0; 3394 bool more_frags; 3395 3396 rx_desc = (struct hal_rx_desc *)msdu->data; 3397 peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(rx_desc); 3398 tid = ath11k_dp_rx_h_mpdu_start_tid(rx_desc); 3399 seqno = ath11k_dp_rx_h_mpdu_start_seq_no(rx_desc); 3400 frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(msdu); 3401 more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(msdu); 3402 3403 if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(rx_desc) || 3404 !ath11k_dp_rx_h_mpdu_start_fc_valid(rx_desc) || 3405 tid > IEEE80211_NUM_TIDS) 3406 return -EINVAL; 3407 3408 /* received unfragmented packet in reo 3409 * exception ring, this shouldn't happen 3410 * as these packets typically come from 3411 * reo2sw srngs. 3412 */ 3413 if (WARN_ON_ONCE(!frag_no && !more_frags)) 3414 return -EINVAL; 3415 3416 spin_lock_bh(&ab->base_lock); 3417 peer = ath11k_peer_find_by_id(ab, peer_id); 3418 if (!peer) { 3419 ath11k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n", 3420 peer_id); 3421 ret = -ENOENT; 3422 goto out_unlock; 3423 } 3424 rx_tid = &peer->rx_tid[tid]; 3425 3426 if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) || 3427 skb_queue_empty(&rx_tid->rx_frags)) { 3428 /* Flush stored fragments and start a new sequence */ 3429 ath11k_dp_rx_frags_cleanup(rx_tid, true); 3430 rx_tid->cur_sn = seqno; 3431 } 3432 3433 if (rx_tid->rx_frag_bitmap & BIT(frag_no)) { 3434 /* Fragment already present */ 3435 ret = -EINVAL; 3436 goto out_unlock; 3437 } 3438 3439 if (frag_no > __fls(rx_tid->rx_frag_bitmap)) 3440 __skb_queue_tail(&rx_tid->rx_frags, msdu); 3441 else 3442 ath11k_dp_rx_h_sort_frags(&rx_tid->rx_frags, msdu); 3443 3444 rx_tid->rx_frag_bitmap |= BIT(frag_no); 3445 if (!more_frags) 3446 rx_tid->last_frag_no = frag_no; 3447 3448 if (frag_no == 0) { 3449 rx_tid->dst_ring_desc = kmemdup(ring_desc, 3450 sizeof(*rx_tid->dst_ring_desc), 3451 GFP_ATOMIC); 3452 if (!rx_tid->dst_ring_desc) { 3453 ret = -ENOMEM; 3454 goto out_unlock; 3455 } 3456 } else { 3457 ath11k_dp_rx_link_desc_return(ab, ring_desc, 3458 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3459 } 3460 3461 if (!rx_tid->last_frag_no || 3462 rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) { 3463 mod_timer(&rx_tid->frag_timer, jiffies + 3464 ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS); 3465 goto out_unlock; 3466 } 3467 3468 spin_unlock_bh(&ab->base_lock); 3469 del_timer_sync(&rx_tid->frag_timer); 3470 spin_lock_bh(&ab->base_lock); 3471 3472 peer = ath11k_peer_find_by_id(ab, peer_id); 3473 if (!peer) 3474 goto err_frags_cleanup; 3475 3476 if (!ath11k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid)) 3477 goto err_frags_cleanup; 3478 3479 if (ath11k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb)) 3480 goto err_frags_cleanup; 3481 3482 if (!defrag_skb) 3483 goto err_frags_cleanup; 3484 3485 if (ath11k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb)) 3486 goto err_frags_cleanup; 3487 3488 ath11k_dp_rx_frags_cleanup(rx_tid, false); 3489 goto out_unlock; 3490 3491 err_frags_cleanup: 3492 dev_kfree_skb_any(defrag_skb); 3493 ath11k_dp_rx_frags_cleanup(rx_tid, true); 3494 out_unlock: 3495 spin_unlock_bh(&ab->base_lock); 3496 return ret; 3497 } 3498 3499 static int 3500 ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool drop) 3501 { 3502 struct ath11k_pdev_dp *dp = &ar->dp; 3503 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 3504 struct sk_buff *msdu; 3505 struct ath11k_skb_rxcb *rxcb; 3506 struct hal_rx_desc *rx_desc; 3507 u8 *hdr_status; 3508 u16 msdu_len; 3509 3510 spin_lock_bh(&rx_ring->idr_lock); 3511 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 3512 if (!msdu) { 3513 ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n", 3514 buf_id); 3515 spin_unlock_bh(&rx_ring->idr_lock); 3516 return -EINVAL; 3517 } 3518 3519 idr_remove(&rx_ring->bufs_idr, buf_id); 3520 spin_unlock_bh(&rx_ring->idr_lock); 3521 3522 rxcb = ATH11K_SKB_RXCB(msdu); 3523 dma_unmap_single(ar->ab->dev, rxcb->paddr, 3524 msdu->len + skb_tailroom(msdu), 3525 DMA_FROM_DEVICE); 3526 3527 if (drop) { 3528 dev_kfree_skb_any(msdu); 3529 return 0; 3530 } 3531 3532 rcu_read_lock(); 3533 if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) { 3534 dev_kfree_skb_any(msdu); 3535 goto exit; 3536 } 3537 3538 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 3539 dev_kfree_skb_any(msdu); 3540 goto exit; 3541 } 3542 3543 rx_desc = (struct hal_rx_desc *)msdu->data; 3544 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc); 3545 if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) { 3546 hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc); 3547 ath11k_warn(ar->ab, "invalid msdu leng %u", msdu_len); 3548 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status, 3549 sizeof(struct ieee80211_hdr)); 3550 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc, 3551 sizeof(struct hal_rx_desc)); 3552 dev_kfree_skb_any(msdu); 3553 goto exit; 3554 } 3555 3556 skb_put(msdu, HAL_RX_DESC_SIZE + msdu_len); 3557 3558 if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) { 3559 dev_kfree_skb_any(msdu); 3560 ath11k_dp_rx_link_desc_return(ar->ab, ring_desc, 3561 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3562 } 3563 exit: 3564 rcu_read_unlock(); 3565 return 0; 3566 } 3567 3568 int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi, 3569 int budget) 3570 { 3571 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; 3572 struct dp_link_desc_bank *link_desc_banks; 3573 enum hal_rx_buf_return_buf_manager rbm; 3574 int tot_n_bufs_reaped, quota, ret, i; 3575 int n_bufs_reaped[MAX_RADIOS] = {0}; 3576 struct dp_rxdma_ring *rx_ring; 3577 struct dp_srng *reo_except; 3578 u32 desc_bank, num_msdus; 3579 struct hal_srng *srng; 3580 struct ath11k_dp *dp; 3581 void *link_desc_va; 3582 int buf_id, mac_id; 3583 struct ath11k *ar; 3584 dma_addr_t paddr; 3585 u32 *desc; 3586 bool is_frag; 3587 u8 drop = 0; 3588 3589 tot_n_bufs_reaped = 0; 3590 quota = budget; 3591 3592 dp = &ab->dp; 3593 reo_except = &dp->reo_except_ring; 3594 link_desc_banks = dp->link_desc_banks; 3595 3596 srng = &ab->hal.srng_list[reo_except->ring_id]; 3597 3598 spin_lock_bh(&srng->lock); 3599 3600 ath11k_hal_srng_access_begin(ab, srng); 3601 3602 while (budget && 3603 (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 3604 struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc; 3605 3606 ab->soc_stats.err_ring_pkts++; 3607 ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr, 3608 &desc_bank); 3609 if (ret) { 3610 ath11k_warn(ab, "failed to parse error reo desc %d\n", 3611 ret); 3612 continue; 3613 } 3614 link_desc_va = link_desc_banks[desc_bank].vaddr + 3615 (paddr - link_desc_banks[desc_bank].paddr); 3616 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies, 3617 &rbm); 3618 if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST && 3619 rbm != HAL_RX_BUF_RBM_SW3_BM) { 3620 ab->soc_stats.invalid_rbm++; 3621 ath11k_warn(ab, "invalid return buffer manager %d\n", rbm); 3622 ath11k_dp_rx_link_desc_return(ab, desc, 3623 HAL_WBM_REL_BM_ACT_REL_MSDU); 3624 continue; 3625 } 3626 3627 is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG); 3628 3629 /* Process only rx fragments with one msdu per link desc below, and drop 3630 * msdu's indicated due to error reasons. 3631 */ 3632 if (!is_frag || num_msdus > 1) { 3633 drop = 1; 3634 /* Return the link desc back to wbm idle list */ 3635 ath11k_dp_rx_link_desc_return(ab, desc, 3636 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3637 } 3638 3639 for (i = 0; i < num_msdus; i++) { 3640 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 3641 msdu_cookies[i]); 3642 3643 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, 3644 msdu_cookies[i]); 3645 3646 ar = ab->pdevs[mac_id].ar; 3647 3648 if (!ath11k_dp_process_rx_err_buf(ar, desc, buf_id, drop)) { 3649 n_bufs_reaped[mac_id]++; 3650 tot_n_bufs_reaped++; 3651 } 3652 } 3653 3654 if (tot_n_bufs_reaped >= quota) { 3655 tot_n_bufs_reaped = quota; 3656 goto exit; 3657 } 3658 3659 budget = quota - tot_n_bufs_reaped; 3660 } 3661 3662 exit: 3663 ath11k_hal_srng_access_end(ab, srng); 3664 3665 spin_unlock_bh(&srng->lock); 3666 3667 for (i = 0; i < ab->num_radios; i++) { 3668 if (!n_bufs_reaped[i]) 3669 continue; 3670 3671 ar = ab->pdevs[i].ar; 3672 rx_ring = &ar->dp.rx_refill_buf_ring; 3673 3674 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i], 3675 HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC); 3676 } 3677 3678 return tot_n_bufs_reaped; 3679 } 3680 3681 static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar, 3682 int msdu_len, 3683 struct sk_buff_head *msdu_list) 3684 { 3685 struct sk_buff *skb, *tmp; 3686 struct ath11k_skb_rxcb *rxcb; 3687 int n_buffs; 3688 3689 n_buffs = DIV_ROUND_UP(msdu_len, 3690 (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE)); 3691 3692 skb_queue_walk_safe(msdu_list, skb, tmp) { 3693 rxcb = ATH11K_SKB_RXCB(skb); 3694 if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO && 3695 rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) { 3696 if (!n_buffs) 3697 break; 3698 __skb_unlink(skb, msdu_list); 3699 dev_kfree_skb_any(skb); 3700 n_buffs--; 3701 } 3702 } 3703 } 3704 3705 static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu, 3706 struct ieee80211_rx_status *status, 3707 struct sk_buff_head *msdu_list) 3708 { 3709 u16 msdu_len; 3710 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 3711 u8 l3pad_bytes; 3712 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3713 3714 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc); 3715 3716 if (!rxcb->is_frag && ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE)) { 3717 /* First buffer will be freed by the caller, so deduct it's length */ 3718 msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE); 3719 ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list); 3720 return -EINVAL; 3721 } 3722 3723 if (!ath11k_dp_rx_h_attn_msdu_done(desc)) { 3724 ath11k_warn(ar->ab, 3725 "msdu_done bit not set in null_q_des processing\n"); 3726 __skb_queue_purge(msdu_list); 3727 return -EIO; 3728 } 3729 3730 /* Handle NULL queue descriptor violations arising out a missing 3731 * REO queue for a given peer or a given TID. This typically 3732 * may happen if a packet is received on a QOS enabled TID before the 3733 * ADDBA negotiation for that TID, when the TID queue is setup. Or 3734 * it may also happen for MC/BC frames if they are not routed to the 3735 * non-QOS TID queue, in the absence of any other default TID queue. 3736 * This error can show up both in a REO destination or WBM release ring. 3737 */ 3738 3739 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc); 3740 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc); 3741 3742 if (rxcb->is_frag) { 3743 skb_pull(msdu, HAL_RX_DESC_SIZE); 3744 } else { 3745 l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc); 3746 3747 if ((HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) 3748 return -EINVAL; 3749 3750 skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len); 3751 skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes); 3752 } 3753 ath11k_dp_rx_h_ppdu(ar, desc, status); 3754 3755 ath11k_dp_rx_h_mpdu(ar, msdu, desc, status); 3756 3757 rxcb->tid = ath11k_dp_rx_h_mpdu_start_tid(desc); 3758 3759 /* Please note that caller will having the access to msdu and completing 3760 * rx with mac80211. Need not worry about cleaning up amsdu_list. 3761 */ 3762 3763 return 0; 3764 } 3765 3766 static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu, 3767 struct ieee80211_rx_status *status, 3768 struct sk_buff_head *msdu_list) 3769 { 3770 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3771 bool drop = false; 3772 3773 ar->ab->soc_stats.reo_error[rxcb->err_code]++; 3774 3775 switch (rxcb->err_code) { 3776 case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO: 3777 if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list)) 3778 drop = true; 3779 break; 3780 case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED: 3781 /* TODO: Do not drop PN failed packets in the driver; 3782 * instead, it is good to drop such packets in mac80211 3783 * after incrementing the replay counters. 3784 */ 3785 fallthrough; 3786 default: 3787 /* TODO: Review other errors and process them to mac80211 3788 * as appropriate. 3789 */ 3790 drop = true; 3791 break; 3792 } 3793 3794 return drop; 3795 } 3796 3797 static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu, 3798 struct ieee80211_rx_status *status) 3799 { 3800 u16 msdu_len; 3801 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 3802 u8 l3pad_bytes; 3803 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3804 3805 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc); 3806 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc); 3807 3808 l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc); 3809 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc); 3810 skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len); 3811 skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes); 3812 3813 ath11k_dp_rx_h_ppdu(ar, desc, status); 3814 3815 status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR | 3816 RX_FLAG_DECRYPTED); 3817 3818 ath11k_dp_rx_h_undecap(ar, msdu, desc, 3819 HAL_ENCRYPT_TYPE_TKIP_MIC, status, false); 3820 } 3821 3822 static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar, struct sk_buff *msdu, 3823 struct ieee80211_rx_status *status) 3824 { 3825 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3826 bool drop = false; 3827 3828 ar->ab->soc_stats.rxdma_error[rxcb->err_code]++; 3829 3830 switch (rxcb->err_code) { 3831 case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR: 3832 ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status); 3833 break; 3834 default: 3835 /* TODO: Review other rxdma error code to check if anything is 3836 * worth reporting to mac80211 3837 */ 3838 drop = true; 3839 break; 3840 } 3841 3842 return drop; 3843 } 3844 3845 static void ath11k_dp_rx_wbm_err(struct ath11k *ar, 3846 struct napi_struct *napi, 3847 struct sk_buff *msdu, 3848 struct sk_buff_head *msdu_list) 3849 { 3850 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3851 struct ieee80211_rx_status rxs = {0}; 3852 struct ieee80211_rx_status *status; 3853 bool drop = true; 3854 3855 switch (rxcb->err_rel_src) { 3856 case HAL_WBM_REL_SRC_MODULE_REO: 3857 drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list); 3858 break; 3859 case HAL_WBM_REL_SRC_MODULE_RXDMA: 3860 drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs); 3861 break; 3862 default: 3863 /* msdu will get freed */ 3864 break; 3865 } 3866 3867 if (drop) { 3868 dev_kfree_skb_any(msdu); 3869 return; 3870 } 3871 3872 status = IEEE80211_SKB_RXCB(msdu); 3873 *status = rxs; 3874 3875 ath11k_dp_rx_deliver_msdu(ar, napi, msdu); 3876 } 3877 3878 int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab, 3879 struct napi_struct *napi, int budget) 3880 { 3881 struct ath11k *ar; 3882 struct ath11k_dp *dp = &ab->dp; 3883 struct dp_rxdma_ring *rx_ring; 3884 struct hal_rx_wbm_rel_info err_info; 3885 struct hal_srng *srng; 3886 struct sk_buff *msdu; 3887 struct sk_buff_head msdu_list[MAX_RADIOS]; 3888 struct ath11k_skb_rxcb *rxcb; 3889 u32 *rx_desc; 3890 int buf_id, mac_id; 3891 int num_buffs_reaped[MAX_RADIOS] = {0}; 3892 int total_num_buffs_reaped = 0; 3893 int ret, i; 3894 3895 for (i = 0; i < ab->num_radios; i++) 3896 __skb_queue_head_init(&msdu_list[i]); 3897 3898 srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id]; 3899 3900 spin_lock_bh(&srng->lock); 3901 3902 ath11k_hal_srng_access_begin(ab, srng); 3903 3904 while (budget) { 3905 rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng); 3906 if (!rx_desc) 3907 break; 3908 3909 ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info); 3910 if (ret) { 3911 ath11k_warn(ab, 3912 "failed to parse rx error in wbm_rel ring desc %d\n", 3913 ret); 3914 continue; 3915 } 3916 3917 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie); 3918 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie); 3919 3920 ar = ab->pdevs[mac_id].ar; 3921 rx_ring = &ar->dp.rx_refill_buf_ring; 3922 3923 spin_lock_bh(&rx_ring->idr_lock); 3924 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 3925 if (!msdu) { 3926 ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n", 3927 buf_id, mac_id); 3928 spin_unlock_bh(&rx_ring->idr_lock); 3929 continue; 3930 } 3931 3932 idr_remove(&rx_ring->bufs_idr, buf_id); 3933 spin_unlock_bh(&rx_ring->idr_lock); 3934 3935 rxcb = ATH11K_SKB_RXCB(msdu); 3936 dma_unmap_single(ab->dev, rxcb->paddr, 3937 msdu->len + skb_tailroom(msdu), 3938 DMA_FROM_DEVICE); 3939 3940 num_buffs_reaped[mac_id]++; 3941 total_num_buffs_reaped++; 3942 budget--; 3943 3944 if (err_info.push_reason != 3945 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { 3946 dev_kfree_skb_any(msdu); 3947 continue; 3948 } 3949 3950 rxcb->err_rel_src = err_info.err_rel_src; 3951 rxcb->err_code = err_info.err_code; 3952 rxcb->rx_desc = (struct hal_rx_desc *)msdu->data; 3953 __skb_queue_tail(&msdu_list[mac_id], msdu); 3954 } 3955 3956 ath11k_hal_srng_access_end(ab, srng); 3957 3958 spin_unlock_bh(&srng->lock); 3959 3960 if (!total_num_buffs_reaped) 3961 goto done; 3962 3963 for (i = 0; i < ab->num_radios; i++) { 3964 if (!num_buffs_reaped[i]) 3965 continue; 3966 3967 ar = ab->pdevs[i].ar; 3968 rx_ring = &ar->dp.rx_refill_buf_ring; 3969 3970 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i], 3971 HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC); 3972 } 3973 3974 rcu_read_lock(); 3975 for (i = 0; i < ab->num_radios; i++) { 3976 if (!rcu_dereference(ab->pdevs_active[i])) { 3977 __skb_queue_purge(&msdu_list[i]); 3978 continue; 3979 } 3980 3981 ar = ab->pdevs[i].ar; 3982 3983 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 3984 __skb_queue_purge(&msdu_list[i]); 3985 continue; 3986 } 3987 3988 while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL) 3989 ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]); 3990 } 3991 rcu_read_unlock(); 3992 done: 3993 return total_num_buffs_reaped; 3994 } 3995 3996 int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget) 3997 { 3998 struct ath11k *ar; 3999 struct dp_srng *err_ring; 4000 struct dp_rxdma_ring *rx_ring; 4001 struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks; 4002 struct hal_srng *srng; 4003 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; 4004 enum hal_rx_buf_return_buf_manager rbm; 4005 enum hal_reo_entr_rxdma_ecode rxdma_err_code; 4006 struct ath11k_skb_rxcb *rxcb; 4007 struct sk_buff *skb; 4008 struct hal_reo_entrance_ring *entr_ring; 4009 void *desc; 4010 int num_buf_freed = 0; 4011 int quota = budget; 4012 dma_addr_t paddr; 4013 u32 desc_bank; 4014 void *link_desc_va; 4015 int num_msdus; 4016 int i; 4017 int buf_id; 4018 4019 ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar; 4020 err_ring = &ar->dp.rxdma_err_dst_ring[ath11k_hw_mac_id_to_srng_id(&ab->hw_params, 4021 mac_id)]; 4022 rx_ring = &ar->dp.rx_refill_buf_ring; 4023 4024 srng = &ab->hal.srng_list[err_ring->ring_id]; 4025 4026 spin_lock_bh(&srng->lock); 4027 4028 ath11k_hal_srng_access_begin(ab, srng); 4029 4030 while (quota-- && 4031 (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 4032 ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank); 4033 4034 entr_ring = (struct hal_reo_entrance_ring *)desc; 4035 rxdma_err_code = 4036 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE, 4037 entr_ring->info1); 4038 ab->soc_stats.rxdma_error[rxdma_err_code]++; 4039 4040 link_desc_va = link_desc_banks[desc_bank].vaddr + 4041 (paddr - link_desc_banks[desc_bank].paddr); 4042 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, 4043 msdu_cookies, &rbm); 4044 4045 for (i = 0; i < num_msdus; i++) { 4046 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 4047 msdu_cookies[i]); 4048 4049 spin_lock_bh(&rx_ring->idr_lock); 4050 skb = idr_find(&rx_ring->bufs_idr, buf_id); 4051 if (!skb) { 4052 ath11k_warn(ab, "rxdma error with invalid buf_id %d\n", 4053 buf_id); 4054 spin_unlock_bh(&rx_ring->idr_lock); 4055 continue; 4056 } 4057 4058 idr_remove(&rx_ring->bufs_idr, buf_id); 4059 spin_unlock_bh(&rx_ring->idr_lock); 4060 4061 rxcb = ATH11K_SKB_RXCB(skb); 4062 dma_unmap_single(ab->dev, rxcb->paddr, 4063 skb->len + skb_tailroom(skb), 4064 DMA_FROM_DEVICE); 4065 dev_kfree_skb_any(skb); 4066 4067 num_buf_freed++; 4068 } 4069 4070 ath11k_dp_rx_link_desc_return(ab, desc, 4071 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 4072 } 4073 4074 ath11k_hal_srng_access_end(ab, srng); 4075 4076 spin_unlock_bh(&srng->lock); 4077 4078 if (num_buf_freed) 4079 ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed, 4080 HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC); 4081 4082 return budget - quota; 4083 } 4084 4085 void ath11k_dp_process_reo_status(struct ath11k_base *ab) 4086 { 4087 struct ath11k_dp *dp = &ab->dp; 4088 struct hal_srng *srng; 4089 struct dp_reo_cmd *cmd, *tmp; 4090 bool found = false; 4091 u32 *reo_desc; 4092 u16 tag; 4093 struct hal_reo_status reo_status; 4094 4095 srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id]; 4096 4097 memset(&reo_status, 0, sizeof(reo_status)); 4098 4099 spin_lock_bh(&srng->lock); 4100 4101 ath11k_hal_srng_access_begin(ab, srng); 4102 4103 while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 4104 tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc); 4105 4106 switch (tag) { 4107 case HAL_REO_GET_QUEUE_STATS_STATUS: 4108 ath11k_hal_reo_status_queue_stats(ab, reo_desc, 4109 &reo_status); 4110 break; 4111 case HAL_REO_FLUSH_QUEUE_STATUS: 4112 ath11k_hal_reo_flush_queue_status(ab, reo_desc, 4113 &reo_status); 4114 break; 4115 case HAL_REO_FLUSH_CACHE_STATUS: 4116 ath11k_hal_reo_flush_cache_status(ab, reo_desc, 4117 &reo_status); 4118 break; 4119 case HAL_REO_UNBLOCK_CACHE_STATUS: 4120 ath11k_hal_reo_unblk_cache_status(ab, reo_desc, 4121 &reo_status); 4122 break; 4123 case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS: 4124 ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc, 4125 &reo_status); 4126 break; 4127 case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS: 4128 ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc, 4129 &reo_status); 4130 break; 4131 case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS: 4132 ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc, 4133 &reo_status); 4134 break; 4135 default: 4136 ath11k_warn(ab, "Unknown reo status type %d\n", tag); 4137 continue; 4138 } 4139 4140 spin_lock_bh(&dp->reo_cmd_lock); 4141 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 4142 if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) { 4143 found = true; 4144 list_del(&cmd->list); 4145 break; 4146 } 4147 } 4148 spin_unlock_bh(&dp->reo_cmd_lock); 4149 4150 if (found) { 4151 cmd->handler(dp, (void *)&cmd->data, 4152 reo_status.uniform_hdr.cmd_status); 4153 kfree(cmd); 4154 } 4155 4156 found = false; 4157 } 4158 4159 ath11k_hal_srng_access_end(ab, srng); 4160 4161 spin_unlock_bh(&srng->lock); 4162 } 4163 4164 void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id) 4165 { 4166 struct ath11k *ar = ab->pdevs[mac_id].ar; 4167 4168 ath11k_dp_rx_pdev_srng_free(ar); 4169 ath11k_dp_rxdma_pdev_buf_free(ar); 4170 } 4171 4172 int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id) 4173 { 4174 struct ath11k *ar = ab->pdevs[mac_id].ar; 4175 struct ath11k_pdev_dp *dp = &ar->dp; 4176 u32 ring_id; 4177 int i; 4178 int ret; 4179 4180 ret = ath11k_dp_rx_pdev_srng_alloc(ar); 4181 if (ret) { 4182 ath11k_warn(ab, "failed to setup rx srngs\n"); 4183 return ret; 4184 } 4185 4186 ret = ath11k_dp_rxdma_pdev_buf_setup(ar); 4187 if (ret) { 4188 ath11k_warn(ab, "failed to setup rxdma ring\n"); 4189 return ret; 4190 } 4191 4192 ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; 4193 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF); 4194 if (ret) { 4195 ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n", 4196 ret); 4197 return ret; 4198 } 4199 4200 if (ab->hw_params.rx_mac_buf_ring) { 4201 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 4202 ring_id = dp->rx_mac_buf_ring[i].ring_id; 4203 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, 4204 mac_id + i, HAL_RXDMA_BUF); 4205 if (ret) { 4206 ath11k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n", 4207 i, ret); 4208 return ret; 4209 } 4210 } 4211 } 4212 4213 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 4214 ring_id = dp->rxdma_err_dst_ring[i].ring_id; 4215 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, 4216 mac_id + i, HAL_RXDMA_DST); 4217 if (ret) { 4218 ath11k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n", 4219 i, ret); 4220 return ret; 4221 } 4222 } 4223 4224 if (!ab->hw_params.rxdma1_enable) 4225 goto config_refill_ring; 4226 4227 ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id; 4228 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, 4229 mac_id, HAL_RXDMA_MONITOR_BUF); 4230 if (ret) { 4231 ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n", 4232 ret); 4233 return ret; 4234 } 4235 ret = ath11k_dp_tx_htt_srng_setup(ab, 4236 dp->rxdma_mon_dst_ring.ring_id, 4237 mac_id, HAL_RXDMA_MONITOR_DST); 4238 if (ret) { 4239 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n", 4240 ret); 4241 return ret; 4242 } 4243 ret = ath11k_dp_tx_htt_srng_setup(ab, 4244 dp->rxdma_mon_desc_ring.ring_id, 4245 mac_id, HAL_RXDMA_MONITOR_DESC); 4246 if (ret) { 4247 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n", 4248 ret); 4249 return ret; 4250 } 4251 4252 config_refill_ring: 4253 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 4254 ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; 4255 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i, 4256 HAL_RXDMA_MONITOR_STATUS); 4257 if (ret) { 4258 ath11k_warn(ab, 4259 "failed to configure mon_status_refill_ring%d %d\n", 4260 i, ret); 4261 return ret; 4262 } 4263 } 4264 4265 return 0; 4266 } 4267 4268 static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len) 4269 { 4270 if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) { 4271 *frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc); 4272 *total_len -= *frag_len; 4273 } else { 4274 *frag_len = *total_len; 4275 *total_len = 0; 4276 } 4277 } 4278 4279 static 4280 int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar, 4281 void *p_last_buf_addr_info, 4282 u8 mac_id) 4283 { 4284 struct ath11k_pdev_dp *dp = &ar->dp; 4285 struct dp_srng *dp_srng; 4286 void *hal_srng; 4287 void *src_srng_desc; 4288 int ret = 0; 4289 4290 dp_srng = &dp->rxdma_mon_desc_ring; 4291 hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id]; 4292 4293 ath11k_hal_srng_access_begin(ar->ab, hal_srng); 4294 4295 src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng); 4296 4297 if (src_srng_desc) { 4298 struct ath11k_buffer_addr *src_desc = 4299 (struct ath11k_buffer_addr *)src_srng_desc; 4300 4301 *src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info); 4302 } else { 4303 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4304 "Monitor Link Desc Ring %d Full", mac_id); 4305 ret = -ENOMEM; 4306 } 4307 4308 ath11k_hal_srng_access_end(ar->ab, hal_srng); 4309 return ret; 4310 } 4311 4312 static 4313 void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc, 4314 dma_addr_t *paddr, u32 *sw_cookie, 4315 void **pp_buf_addr_info) 4316 { 4317 struct hal_rx_msdu_link *msdu_link = 4318 (struct hal_rx_msdu_link *)rx_msdu_link_desc; 4319 struct ath11k_buffer_addr *buf_addr_info; 4320 u8 rbm = 0; 4321 4322 buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info; 4323 4324 ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, &rbm); 4325 4326 *pp_buf_addr_info = (void *)buf_addr_info; 4327 } 4328 4329 static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len) 4330 { 4331 if (skb->len > len) { 4332 skb_trim(skb, len); 4333 } else { 4334 if (skb_tailroom(skb) < len - skb->len) { 4335 if ((pskb_expand_head(skb, 0, 4336 len - skb->len - skb_tailroom(skb), 4337 GFP_ATOMIC))) { 4338 dev_kfree_skb_any(skb); 4339 return -ENOMEM; 4340 } 4341 } 4342 skb_put(skb, (len - skb->len)); 4343 } 4344 return 0; 4345 } 4346 4347 static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar, 4348 void *msdu_link_desc, 4349 struct hal_rx_msdu_list *msdu_list, 4350 u16 *num_msdus) 4351 { 4352 struct hal_rx_msdu_details *msdu_details = NULL; 4353 struct rx_msdu_desc *msdu_desc_info = NULL; 4354 struct hal_rx_msdu_link *msdu_link = NULL; 4355 int i; 4356 u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1); 4357 u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1); 4358 u8 tmp = 0; 4359 4360 msdu_link = (struct hal_rx_msdu_link *)msdu_link_desc; 4361 msdu_details = &msdu_link->msdu_link[0]; 4362 4363 for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) { 4364 if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR, 4365 msdu_details[i].buf_addr_info.info0) == 0) { 4366 msdu_desc_info = &msdu_details[i - 1].rx_msdu_info; 4367 msdu_desc_info->info0 |= last; 4368 ; 4369 break; 4370 } 4371 msdu_desc_info = &msdu_details[i].rx_msdu_info; 4372 4373 if (!i) 4374 msdu_desc_info->info0 |= first; 4375 else if (i == (HAL_RX_NUM_MSDU_DESC - 1)) 4376 msdu_desc_info->info0 |= last; 4377 msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0; 4378 msdu_list->msdu_info[i].msdu_len = 4379 HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0); 4380 msdu_list->sw_cookie[i] = 4381 FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, 4382 msdu_details[i].buf_addr_info.info1); 4383 tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR, 4384 msdu_details[i].buf_addr_info.info1); 4385 msdu_list->rbm[i] = tmp; 4386 } 4387 *num_msdus = i; 4388 } 4389 4390 static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id, 4391 u32 *rx_bufs_used) 4392 { 4393 u32 ret = 0; 4394 4395 if ((*ppdu_id < msdu_ppdu_id) && 4396 ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) { 4397 *ppdu_id = msdu_ppdu_id; 4398 ret = msdu_ppdu_id; 4399 } else if ((*ppdu_id > msdu_ppdu_id) && 4400 ((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) { 4401 /* mon_dst is behind than mon_status 4402 * skip dst_ring and free it 4403 */ 4404 *rx_bufs_used += 1; 4405 *ppdu_id = msdu_ppdu_id; 4406 ret = msdu_ppdu_id; 4407 } 4408 return ret; 4409 } 4410 4411 static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info, 4412 bool *is_frag, u32 *total_len, 4413 u32 *frag_len, u32 *msdu_cnt) 4414 { 4415 if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) { 4416 if (!*is_frag) { 4417 *total_len = info->msdu_len; 4418 *is_frag = true; 4419 } 4420 ath11k_dp_mon_set_frag_len(total_len, 4421 frag_len); 4422 } else { 4423 if (*is_frag) { 4424 ath11k_dp_mon_set_frag_len(total_len, 4425 frag_len); 4426 } else { 4427 *frag_len = info->msdu_len; 4428 } 4429 *is_frag = false; 4430 *msdu_cnt -= 1; 4431 } 4432 } 4433 4434 static u32 4435 ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, 4436 void *ring_entry, struct sk_buff **head_msdu, 4437 struct sk_buff **tail_msdu, u32 *npackets, 4438 u32 *ppdu_id) 4439 { 4440 struct ath11k_pdev_dp *dp = &ar->dp; 4441 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4442 struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring; 4443 struct sk_buff *msdu = NULL, *last = NULL; 4444 struct hal_rx_msdu_list msdu_list; 4445 void *p_buf_addr_info, *p_last_buf_addr_info; 4446 struct hal_rx_desc *rx_desc; 4447 void *rx_msdu_link_desc; 4448 dma_addr_t paddr; 4449 u16 num_msdus = 0; 4450 u32 rx_buf_size, rx_pkt_offset, sw_cookie; 4451 u32 rx_bufs_used = 0, i = 0; 4452 u32 msdu_ppdu_id = 0, msdu_cnt = 0; 4453 u32 total_len = 0, frag_len = 0; 4454 bool is_frag, is_first_msdu; 4455 bool drop_mpdu = false; 4456 struct ath11k_skb_rxcb *rxcb; 4457 struct hal_reo_entrance_ring *ent_desc = 4458 (struct hal_reo_entrance_ring *)ring_entry; 4459 int buf_id; 4460 4461 ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr, 4462 &sw_cookie, &p_last_buf_addr_info, 4463 &msdu_cnt); 4464 4465 if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON, 4466 ent_desc->info1) == 4467 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { 4468 u8 rxdma_err = 4469 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE, 4470 ent_desc->info1); 4471 if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR || 4472 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR || 4473 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) { 4474 drop_mpdu = true; 4475 pmon->rx_mon_stats.dest_mpdu_drop++; 4476 } 4477 } 4478 4479 is_frag = false; 4480 is_first_msdu = true; 4481 4482 do { 4483 if (pmon->mon_last_linkdesc_paddr == paddr) { 4484 pmon->rx_mon_stats.dup_mon_linkdesc_cnt++; 4485 return rx_bufs_used; 4486 } 4487 4488 rx_msdu_link_desc = 4489 (void *)pmon->link_desc_banks[sw_cookie].vaddr + 4490 (paddr - pmon->link_desc_banks[sw_cookie].paddr); 4491 4492 ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list, 4493 &num_msdus); 4494 4495 for (i = 0; i < num_msdus; i++) { 4496 u32 l2_hdr_offset; 4497 4498 if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) { 4499 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4500 "i %d last_cookie %d is same\n", 4501 i, pmon->mon_last_buf_cookie); 4502 drop_mpdu = true; 4503 pmon->rx_mon_stats.dup_mon_buf_cnt++; 4504 continue; 4505 } 4506 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 4507 msdu_list.sw_cookie[i]); 4508 4509 spin_lock_bh(&rx_ring->idr_lock); 4510 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 4511 spin_unlock_bh(&rx_ring->idr_lock); 4512 if (!msdu) { 4513 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4514 "msdu_pop: invalid buf_id %d\n", buf_id); 4515 break; 4516 } 4517 rxcb = ATH11K_SKB_RXCB(msdu); 4518 if (!rxcb->unmapped) { 4519 dma_unmap_single(ar->ab->dev, rxcb->paddr, 4520 msdu->len + 4521 skb_tailroom(msdu), 4522 DMA_FROM_DEVICE); 4523 rxcb->unmapped = 1; 4524 } 4525 if (drop_mpdu) { 4526 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4527 "i %d drop msdu %p *ppdu_id %x\n", 4528 i, msdu, *ppdu_id); 4529 dev_kfree_skb_any(msdu); 4530 msdu = NULL; 4531 goto next_msdu; 4532 } 4533 4534 rx_desc = (struct hal_rx_desc *)msdu->data; 4535 4536 rx_pkt_offset = sizeof(struct hal_rx_desc); 4537 l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(rx_desc); 4538 4539 if (is_first_msdu) { 4540 if (!ath11k_dp_rxdesc_mpdu_valid(rx_desc)) { 4541 drop_mpdu = true; 4542 dev_kfree_skb_any(msdu); 4543 msdu = NULL; 4544 pmon->mon_last_linkdesc_paddr = paddr; 4545 goto next_msdu; 4546 } 4547 4548 msdu_ppdu_id = 4549 ath11k_dp_rxdesc_get_ppduid(rx_desc); 4550 4551 if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id, 4552 ppdu_id, 4553 &rx_bufs_used)) { 4554 if (rx_bufs_used) { 4555 drop_mpdu = true; 4556 dev_kfree_skb_any(msdu); 4557 msdu = NULL; 4558 goto next_msdu; 4559 } 4560 return rx_bufs_used; 4561 } 4562 pmon->mon_last_linkdesc_paddr = paddr; 4563 is_first_msdu = false; 4564 } 4565 ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i], 4566 &is_frag, &total_len, 4567 &frag_len, &msdu_cnt); 4568 rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len; 4569 4570 ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size); 4571 4572 if (!(*head_msdu)) 4573 *head_msdu = msdu; 4574 else if (last) 4575 last->next = msdu; 4576 4577 last = msdu; 4578 next_msdu: 4579 pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i]; 4580 rx_bufs_used++; 4581 spin_lock_bh(&rx_ring->idr_lock); 4582 idr_remove(&rx_ring->bufs_idr, buf_id); 4583 spin_unlock_bh(&rx_ring->idr_lock); 4584 } 4585 4586 ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr, 4587 &sw_cookie, 4588 &p_buf_addr_info); 4589 4590 if (ath11k_dp_rx_monitor_link_desc_return(ar, 4591 p_last_buf_addr_info, 4592 dp->mac_id)) 4593 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4594 "dp_rx_monitor_link_desc_return failed"); 4595 4596 p_last_buf_addr_info = p_buf_addr_info; 4597 4598 } while (paddr && msdu_cnt); 4599 4600 if (last) 4601 last->next = NULL; 4602 4603 *tail_msdu = msdu; 4604 4605 if (msdu_cnt == 0) 4606 *npackets = 1; 4607 4608 return rx_bufs_used; 4609 } 4610 4611 static void ath11k_dp_rx_msdus_set_payload(struct sk_buff *msdu) 4612 { 4613 u32 rx_pkt_offset, l2_hdr_offset; 4614 4615 rx_pkt_offset = sizeof(struct hal_rx_desc); 4616 l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad((struct hal_rx_desc *)msdu->data); 4617 skb_pull(msdu, rx_pkt_offset + l2_hdr_offset); 4618 } 4619 4620 static struct sk_buff * 4621 ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, 4622 u32 mac_id, struct sk_buff *head_msdu, 4623 struct sk_buff *last_msdu, 4624 struct ieee80211_rx_status *rxs) 4625 { 4626 struct sk_buff *msdu, *mpdu_buf, *prev_buf; 4627 u32 decap_format, wifi_hdr_len; 4628 struct hal_rx_desc *rx_desc; 4629 char *hdr_desc; 4630 u8 *dest; 4631 struct ieee80211_hdr_3addr *wh; 4632 4633 mpdu_buf = NULL; 4634 4635 if (!head_msdu) 4636 goto err_merge_fail; 4637 4638 rx_desc = (struct hal_rx_desc *)head_msdu->data; 4639 4640 if (ath11k_dp_rxdesc_get_mpdulen_err(rx_desc)) 4641 return NULL; 4642 4643 decap_format = ath11k_dp_rxdesc_get_decap_format(rx_desc); 4644 4645 ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs); 4646 4647 if (decap_format == DP_RX_DECAP_TYPE_RAW) { 4648 ath11k_dp_rx_msdus_set_payload(head_msdu); 4649 4650 prev_buf = head_msdu; 4651 msdu = head_msdu->next; 4652 4653 while (msdu) { 4654 ath11k_dp_rx_msdus_set_payload(msdu); 4655 4656 prev_buf = msdu; 4657 msdu = msdu->next; 4658 } 4659 4660 prev_buf->next = NULL; 4661 4662 skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN); 4663 } else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) { 4664 __le16 qos_field; 4665 u8 qos_pkt = 0; 4666 4667 rx_desc = (struct hal_rx_desc *)head_msdu->data; 4668 hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc); 4669 4670 /* Base size */ 4671 wifi_hdr_len = sizeof(struct ieee80211_hdr_3addr); 4672 wh = (struct ieee80211_hdr_3addr *)hdr_desc; 4673 4674 if (ieee80211_is_data_qos(wh->frame_control)) { 4675 struct ieee80211_qos_hdr *qwh = 4676 (struct ieee80211_qos_hdr *)hdr_desc; 4677 4678 qos_field = qwh->qos_ctrl; 4679 qos_pkt = 1; 4680 } 4681 msdu = head_msdu; 4682 4683 while (msdu) { 4684 rx_desc = (struct hal_rx_desc *)msdu->data; 4685 hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc); 4686 4687 if (qos_pkt) { 4688 dest = skb_push(msdu, sizeof(__le16)); 4689 if (!dest) 4690 goto err_merge_fail; 4691 memcpy(dest, hdr_desc, wifi_hdr_len); 4692 memcpy(dest + wifi_hdr_len, 4693 (u8 *)&qos_field, sizeof(__le16)); 4694 } 4695 ath11k_dp_rx_msdus_set_payload(msdu); 4696 prev_buf = msdu; 4697 msdu = msdu->next; 4698 } 4699 dest = skb_put(prev_buf, HAL_RX_FCS_LEN); 4700 if (!dest) 4701 goto err_merge_fail; 4702 4703 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4704 "mpdu_buf %pK mpdu_buf->len %u", 4705 prev_buf, prev_buf->len); 4706 } else { 4707 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4708 "decap format %d is not supported!\n", 4709 decap_format); 4710 goto err_merge_fail; 4711 } 4712 4713 return head_msdu; 4714 4715 err_merge_fail: 4716 if (mpdu_buf && decap_format != DP_RX_DECAP_TYPE_RAW) { 4717 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4718 "err_merge_fail mpdu_buf %pK", mpdu_buf); 4719 /* Free the head buffer */ 4720 dev_kfree_skb_any(mpdu_buf); 4721 } 4722 return NULL; 4723 } 4724 4725 static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id, 4726 struct sk_buff *head_msdu, 4727 struct sk_buff *tail_msdu, 4728 struct napi_struct *napi) 4729 { 4730 struct ath11k_pdev_dp *dp = &ar->dp; 4731 struct sk_buff *mon_skb, *skb_next, *header; 4732 struct ieee80211_rx_status *rxs = &dp->rx_status, *status; 4733 4734 mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu, 4735 tail_msdu, rxs); 4736 4737 if (!mon_skb) 4738 goto mon_deliver_fail; 4739 4740 header = mon_skb; 4741 4742 rxs->flag = 0; 4743 do { 4744 skb_next = mon_skb->next; 4745 if (!skb_next) 4746 rxs->flag &= ~RX_FLAG_AMSDU_MORE; 4747 else 4748 rxs->flag |= RX_FLAG_AMSDU_MORE; 4749 4750 if (mon_skb == header) { 4751 header = NULL; 4752 rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN; 4753 } else { 4754 rxs->flag |= RX_FLAG_ALLOW_SAME_PN; 4755 } 4756 rxs->flag |= RX_FLAG_ONLY_MONITOR; 4757 4758 status = IEEE80211_SKB_RXCB(mon_skb); 4759 *status = *rxs; 4760 4761 ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb); 4762 mon_skb = skb_next; 4763 } while (mon_skb); 4764 rxs->flag = 0; 4765 4766 return 0; 4767 4768 mon_deliver_fail: 4769 mon_skb = head_msdu; 4770 while (mon_skb) { 4771 skb_next = mon_skb->next; 4772 dev_kfree_skb_any(mon_skb); 4773 mon_skb = skb_next; 4774 } 4775 return -EINVAL; 4776 } 4777 4778 static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, u32 quota, 4779 struct napi_struct *napi) 4780 { 4781 struct ath11k_pdev_dp *dp = &ar->dp; 4782 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4783 void *ring_entry; 4784 void *mon_dst_srng; 4785 u32 ppdu_id; 4786 u32 rx_bufs_used; 4787 struct ath11k_pdev_mon_stats *rx_mon_stats; 4788 u32 npackets = 0; 4789 4790 mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id]; 4791 4792 if (!mon_dst_srng) { 4793 ath11k_warn(ar->ab, 4794 "HAL Monitor Destination Ring Init Failed -- %pK", 4795 mon_dst_srng); 4796 return; 4797 } 4798 4799 spin_lock_bh(&pmon->mon_lock); 4800 4801 ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng); 4802 4803 ppdu_id = pmon->mon_ppdu_info.ppdu_id; 4804 rx_bufs_used = 0; 4805 rx_mon_stats = &pmon->rx_mon_stats; 4806 4807 while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) { 4808 struct sk_buff *head_msdu, *tail_msdu; 4809 4810 head_msdu = NULL; 4811 tail_msdu = NULL; 4812 4813 rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, ring_entry, 4814 &head_msdu, 4815 &tail_msdu, 4816 &npackets, &ppdu_id); 4817 4818 if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) { 4819 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 4820 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4821 "dest_rx: new ppdu_id %x != status ppdu_id %x", 4822 ppdu_id, pmon->mon_ppdu_info.ppdu_id); 4823 break; 4824 } 4825 if (head_msdu && tail_msdu) { 4826 ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu, 4827 tail_msdu, napi); 4828 rx_mon_stats->dest_mpdu_done++; 4829 } 4830 4831 ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab, 4832 mon_dst_srng); 4833 } 4834 ath11k_hal_srng_access_end(ar->ab, mon_dst_srng); 4835 4836 spin_unlock_bh(&pmon->mon_lock); 4837 4838 if (rx_bufs_used) { 4839 rx_mon_stats->dest_ppdu_done++; 4840 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, 4841 &dp->rxdma_mon_buf_ring, 4842 rx_bufs_used, 4843 HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC); 4844 } 4845 } 4846 4847 static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar, 4848 u32 quota, 4849 struct napi_struct *napi) 4850 { 4851 struct ath11k_pdev_dp *dp = &ar->dp; 4852 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4853 struct hal_rx_mon_ppdu_info *ppdu_info; 4854 struct sk_buff *status_skb; 4855 u32 tlv_status = HAL_TLV_STATUS_BUF_DONE; 4856 struct ath11k_pdev_mon_stats *rx_mon_stats; 4857 4858 ppdu_info = &pmon->mon_ppdu_info; 4859 rx_mon_stats = &pmon->rx_mon_stats; 4860 4861 if (pmon->mon_ppdu_status != DP_PPDU_STATUS_START) 4862 return; 4863 4864 while (!skb_queue_empty(&pmon->rx_status_q)) { 4865 status_skb = skb_dequeue(&pmon->rx_status_q); 4866 4867 tlv_status = ath11k_hal_rx_parse_mon_status(ar->ab, ppdu_info, 4868 status_skb); 4869 if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) { 4870 rx_mon_stats->status_ppdu_done++; 4871 pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE; 4872 ath11k_dp_rx_mon_dest_process(ar, quota, napi); 4873 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 4874 } 4875 dev_kfree_skb_any(status_skb); 4876 } 4877 } 4878 4879 static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id, 4880 struct napi_struct *napi, int budget) 4881 { 4882 struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id); 4883 struct ath11k_pdev_dp *dp = &ar->dp; 4884 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4885 int num_buffs_reaped = 0; 4886 4887 num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ar->ab, dp->mac_id, &budget, 4888 &pmon->rx_status_q); 4889 if (num_buffs_reaped) 4890 ath11k_dp_rx_mon_status_process_tlv(ar, budget, napi); 4891 4892 return num_buffs_reaped; 4893 } 4894 4895 int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id, 4896 struct napi_struct *napi, int budget) 4897 { 4898 struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id); 4899 int ret = 0; 4900 4901 if (test_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags)) 4902 ret = ath11k_dp_mon_process_rx(ab, mac_id, napi, budget); 4903 else 4904 ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget); 4905 return ret; 4906 } 4907 4908 static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar) 4909 { 4910 struct ath11k_pdev_dp *dp = &ar->dp; 4911 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4912 4913 skb_queue_head_init(&pmon->rx_status_q); 4914 4915 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 4916 4917 memset(&pmon->rx_mon_stats, 0, 4918 sizeof(pmon->rx_mon_stats)); 4919 return 0; 4920 } 4921 4922 int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar) 4923 { 4924 struct ath11k_pdev_dp *dp = &ar->dp; 4925 struct ath11k_mon_data *pmon = &dp->mon_data; 4926 struct hal_srng *mon_desc_srng = NULL; 4927 struct dp_srng *dp_srng; 4928 int ret = 0; 4929 u32 n_link_desc = 0; 4930 4931 ret = ath11k_dp_rx_pdev_mon_status_attach(ar); 4932 if (ret) { 4933 ath11k_warn(ar->ab, "pdev_mon_status_attach() failed"); 4934 return ret; 4935 } 4936 4937 /* if rxdma1_enable is false, no need to setup 4938 * rxdma_mon_desc_ring. 4939 */ 4940 if (!ar->ab->hw_params.rxdma1_enable) 4941 return 0; 4942 4943 dp_srng = &dp->rxdma_mon_desc_ring; 4944 n_link_desc = dp_srng->size / 4945 ath11k_hal_srng_get_entrysize(ar->ab, HAL_RXDMA_MONITOR_DESC); 4946 mon_desc_srng = 4947 &ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id]; 4948 4949 ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks, 4950 HAL_RXDMA_MONITOR_DESC, mon_desc_srng, 4951 n_link_desc); 4952 if (ret) { 4953 ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed"); 4954 return ret; 4955 } 4956 pmon->mon_last_linkdesc_paddr = 0; 4957 pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1; 4958 spin_lock_init(&pmon->mon_lock); 4959 4960 return 0; 4961 } 4962 4963 static int ath11k_dp_mon_link_free(struct ath11k *ar) 4964 { 4965 struct ath11k_pdev_dp *dp = &ar->dp; 4966 struct ath11k_mon_data *pmon = &dp->mon_data; 4967 4968 ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks, 4969 HAL_RXDMA_MONITOR_DESC, 4970 &dp->rxdma_mon_desc_ring); 4971 return 0; 4972 } 4973 4974 int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar) 4975 { 4976 ath11k_dp_mon_link_free(ar); 4977 return 0; 4978 } 4979