1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/ieee80211.h> 7 #include <linux/kernel.h> 8 #include <linux/skbuff.h> 9 #include <crypto/hash.h> 10 #include "core.h" 11 #include "debug.h" 12 #include "debugfs_htt_stats.h" 13 #include "debugfs_sta.h" 14 #include "hal_desc.h" 15 #include "hw.h" 16 #include "dp_rx.h" 17 #include "hal_rx.h" 18 #include "dp_tx.h" 19 #include "peer.h" 20 21 #define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ) 22 23 static u8 *ath11k_dp_rx_h_80211_hdr(struct ath11k_base *ab, struct hal_rx_desc *desc) 24 { 25 return ab->hw_params.hw_ops->rx_desc_get_hdr_status(desc); 26 } 27 28 static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_base *ab, 29 struct hal_rx_desc *desc) 30 { 31 if (!ab->hw_params.hw_ops->rx_desc_encrypt_valid(desc)) 32 return HAL_ENCRYPT_TYPE_OPEN; 33 34 return ab->hw_params.hw_ops->rx_desc_get_encrypt_type(desc); 35 } 36 37 static u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab, 38 struct hal_rx_desc *desc) 39 { 40 return ab->hw_params.hw_ops->rx_desc_get_decap_type(desc); 41 } 42 43 static u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab, 44 struct hal_rx_desc *desc) 45 { 46 return ab->hw_params.hw_ops->rx_desc_get_mesh_ctl(desc); 47 } 48 49 static bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct ath11k_base *ab, 50 struct hal_rx_desc *desc) 51 { 52 return ab->hw_params.hw_ops->rx_desc_get_mpdu_seq_ctl_vld(desc); 53 } 54 55 static bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct ath11k_base *ab, 56 struct hal_rx_desc *desc) 57 { 58 return ab->hw_params.hw_ops->rx_desc_get_mpdu_fc_valid(desc); 59 } 60 61 static bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab, 62 struct sk_buff *skb) 63 { 64 struct ieee80211_hdr *hdr; 65 66 hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz); 67 return ieee80211_has_morefrags(hdr->frame_control); 68 } 69 70 static u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab, 71 struct sk_buff *skb) 72 { 73 struct ieee80211_hdr *hdr; 74 75 hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz); 76 return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; 77 } 78 79 static u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct ath11k_base *ab, 80 struct hal_rx_desc *desc) 81 { 82 return ab->hw_params.hw_ops->rx_desc_get_mpdu_start_seq_no(desc); 83 } 84 85 static void *ath11k_dp_rx_get_attention(struct ath11k_base *ab, 86 struct hal_rx_desc *desc) 87 { 88 return ab->hw_params.hw_ops->rx_desc_get_attention(desc); 89 } 90 91 static bool ath11k_dp_rx_h_attn_msdu_done(struct rx_attention *attn) 92 { 93 return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE, 94 __le32_to_cpu(attn->info2)); 95 } 96 97 static bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct rx_attention *attn) 98 { 99 return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL, 100 __le32_to_cpu(attn->info1)); 101 } 102 103 static bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct rx_attention *attn) 104 { 105 return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL, 106 __le32_to_cpu(attn->info1)); 107 } 108 109 static bool ath11k_dp_rx_h_attn_is_decrypted(struct rx_attention *attn) 110 { 111 return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE, 112 __le32_to_cpu(attn->info2)) == 113 RX_DESC_DECRYPT_STATUS_CODE_OK); 114 } 115 116 static u32 ath11k_dp_rx_h_attn_mpdu_err(struct rx_attention *attn) 117 { 118 u32 info = __le32_to_cpu(attn->info1); 119 u32 errmap = 0; 120 121 if (info & RX_ATTENTION_INFO1_FCS_ERR) 122 errmap |= DP_RX_MPDU_ERR_FCS; 123 124 if (info & RX_ATTENTION_INFO1_DECRYPT_ERR) 125 errmap |= DP_RX_MPDU_ERR_DECRYPT; 126 127 if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR) 128 errmap |= DP_RX_MPDU_ERR_TKIP_MIC; 129 130 if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR) 131 errmap |= DP_RX_MPDU_ERR_AMSDU_ERR; 132 133 if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR) 134 errmap |= DP_RX_MPDU_ERR_OVERFLOW; 135 136 if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR) 137 errmap |= DP_RX_MPDU_ERR_MSDU_LEN; 138 139 if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR) 140 errmap |= DP_RX_MPDU_ERR_MPDU_LEN; 141 142 return errmap; 143 } 144 145 static bool ath11k_dp_rx_h_attn_msdu_len_err(struct ath11k_base *ab, 146 struct hal_rx_desc *desc) 147 { 148 struct rx_attention *rx_attention; 149 u32 errmap; 150 151 rx_attention = ath11k_dp_rx_get_attention(ab, desc); 152 errmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention); 153 154 return errmap & DP_RX_MPDU_ERR_MSDU_LEN; 155 } 156 157 static u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab, 158 struct hal_rx_desc *desc) 159 { 160 return ab->hw_params.hw_ops->rx_desc_get_msdu_len(desc); 161 } 162 163 static u8 ath11k_dp_rx_h_msdu_start_sgi(struct ath11k_base *ab, 164 struct hal_rx_desc *desc) 165 { 166 return ab->hw_params.hw_ops->rx_desc_get_msdu_sgi(desc); 167 } 168 169 static u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct ath11k_base *ab, 170 struct hal_rx_desc *desc) 171 { 172 return ab->hw_params.hw_ops->rx_desc_get_msdu_rate_mcs(desc); 173 } 174 175 static u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct ath11k_base *ab, 176 struct hal_rx_desc *desc) 177 { 178 return ab->hw_params.hw_ops->rx_desc_get_msdu_rx_bw(desc); 179 } 180 181 static u32 ath11k_dp_rx_h_msdu_start_freq(struct ath11k_base *ab, 182 struct hal_rx_desc *desc) 183 { 184 return ab->hw_params.hw_ops->rx_desc_get_msdu_freq(desc); 185 } 186 187 static u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct ath11k_base *ab, 188 struct hal_rx_desc *desc) 189 { 190 return ab->hw_params.hw_ops->rx_desc_get_msdu_pkt_type(desc); 191 } 192 193 static u8 ath11k_dp_rx_h_msdu_start_nss(struct ath11k_base *ab, 194 struct hal_rx_desc *desc) 195 { 196 return hweight8(ab->hw_params.hw_ops->rx_desc_get_msdu_nss(desc)); 197 } 198 199 static u8 ath11k_dp_rx_h_mpdu_start_tid(struct ath11k_base *ab, 200 struct hal_rx_desc *desc) 201 { 202 return ab->hw_params.hw_ops->rx_desc_get_mpdu_tid(desc); 203 } 204 205 static u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct ath11k_base *ab, 206 struct hal_rx_desc *desc) 207 { 208 return ab->hw_params.hw_ops->rx_desc_get_mpdu_peer_id(desc); 209 } 210 211 static u8 ath11k_dp_rx_h_msdu_end_l3pad(struct ath11k_base *ab, 212 struct hal_rx_desc *desc) 213 { 214 return ab->hw_params.hw_ops->rx_desc_get_l3_pad_bytes(desc); 215 } 216 217 static bool ath11k_dp_rx_h_msdu_end_first_msdu(struct ath11k_base *ab, 218 struct hal_rx_desc *desc) 219 { 220 return ab->hw_params.hw_ops->rx_desc_get_first_msdu(desc); 221 } 222 223 static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct ath11k_base *ab, 224 struct hal_rx_desc *desc) 225 { 226 return ab->hw_params.hw_ops->rx_desc_get_last_msdu(desc); 227 } 228 229 static void ath11k_dp_rx_desc_end_tlv_copy(struct ath11k_base *ab, 230 struct hal_rx_desc *fdesc, 231 struct hal_rx_desc *ldesc) 232 { 233 ab->hw_params.hw_ops->rx_desc_copy_attn_end_tlv(fdesc, ldesc); 234 } 235 236 static u32 ath11k_dp_rxdesc_get_mpdulen_err(struct rx_attention *attn) 237 { 238 return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR, 239 __le32_to_cpu(attn->info1)); 240 } 241 242 static u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab, 243 struct hal_rx_desc *rx_desc) 244 { 245 u8 *rx_pkt_hdr; 246 247 rx_pkt_hdr = ab->hw_params.hw_ops->rx_desc_get_msdu_payload(rx_desc); 248 249 return rx_pkt_hdr; 250 } 251 252 static bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab, 253 struct hal_rx_desc *rx_desc) 254 { 255 u32 tlv_tag; 256 257 tlv_tag = ab->hw_params.hw_ops->rx_desc_get_mpdu_start_tag(rx_desc); 258 259 return tlv_tag == HAL_RX_MPDU_START; 260 } 261 262 static u32 ath11k_dp_rxdesc_get_ppduid(struct ath11k_base *ab, 263 struct hal_rx_desc *rx_desc) 264 { 265 return ab->hw_params.hw_ops->rx_desc_get_mpdu_ppdu_id(rx_desc); 266 } 267 268 static void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab, 269 struct hal_rx_desc *desc, 270 u16 len) 271 { 272 ab->hw_params.hw_ops->rx_desc_set_msdu_len(desc, len); 273 } 274 275 static bool ath11k_dp_rx_h_attn_is_mcbc(struct ath11k_base *ab, 276 struct hal_rx_desc *desc) 277 { 278 struct rx_attention *attn = ath11k_dp_rx_get_attention(ab, desc); 279 280 return ath11k_dp_rx_h_msdu_end_first_msdu(ab, desc) && 281 (!!FIELD_GET(RX_ATTENTION_INFO1_MCAST_BCAST, 282 __le32_to_cpu(attn->info1))); 283 } 284 285 static bool ath11k_dp_rxdesc_mac_addr2_valid(struct ath11k_base *ab, 286 struct hal_rx_desc *desc) 287 { 288 return ab->hw_params.hw_ops->rx_desc_mac_addr2_valid(desc); 289 } 290 291 static u8 *ath11k_dp_rxdesc_mpdu_start_addr2(struct ath11k_base *ab, 292 struct hal_rx_desc *desc) 293 { 294 return ab->hw_params.hw_ops->rx_desc_mpdu_start_addr2(desc); 295 } 296 297 static void ath11k_dp_service_mon_ring(struct timer_list *t) 298 { 299 struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer); 300 int i; 301 302 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) 303 ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET); 304 305 mod_timer(&ab->mon_reap_timer, jiffies + 306 msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL)); 307 } 308 309 static int ath11k_dp_purge_mon_ring(struct ath11k_base *ab) 310 { 311 int i, reaped = 0; 312 unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS); 313 314 do { 315 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) 316 reaped += ath11k_dp_rx_process_mon_rings(ab, i, 317 NULL, 318 DP_MON_SERVICE_BUDGET); 319 320 /* nothing more to reap */ 321 if (reaped < DP_MON_SERVICE_BUDGET) 322 return 0; 323 324 } while (time_before(jiffies, timeout)); 325 326 ath11k_warn(ab, "dp mon ring purge timeout"); 327 328 return -ETIMEDOUT; 329 } 330 331 /* Returns number of Rx buffers replenished */ 332 int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id, 333 struct dp_rxdma_ring *rx_ring, 334 int req_entries, 335 enum hal_rx_buf_return_buf_manager mgr) 336 { 337 struct hal_srng *srng; 338 u32 *desc; 339 struct sk_buff *skb; 340 int num_free; 341 int num_remain; 342 int buf_id; 343 u32 cookie; 344 dma_addr_t paddr; 345 346 req_entries = min(req_entries, rx_ring->bufs_max); 347 348 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 349 350 spin_lock_bh(&srng->lock); 351 352 ath11k_hal_srng_access_begin(ab, srng); 353 354 num_free = ath11k_hal_srng_src_num_free(ab, srng, true); 355 if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4)) 356 req_entries = num_free; 357 358 req_entries = min(num_free, req_entries); 359 num_remain = req_entries; 360 361 while (num_remain > 0) { 362 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + 363 DP_RX_BUFFER_ALIGN_SIZE); 364 if (!skb) 365 break; 366 367 if (!IS_ALIGNED((unsigned long)skb->data, 368 DP_RX_BUFFER_ALIGN_SIZE)) { 369 skb_pull(skb, 370 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - 371 skb->data); 372 } 373 374 paddr = dma_map_single(ab->dev, skb->data, 375 skb->len + skb_tailroom(skb), 376 DMA_FROM_DEVICE); 377 if (dma_mapping_error(ab->dev, paddr)) 378 goto fail_free_skb; 379 380 spin_lock_bh(&rx_ring->idr_lock); 381 buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, 382 rx_ring->bufs_max * 3, GFP_ATOMIC); 383 spin_unlock_bh(&rx_ring->idr_lock); 384 if (buf_id < 0) 385 goto fail_dma_unmap; 386 387 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 388 if (!desc) 389 goto fail_idr_remove; 390 391 ATH11K_SKB_RXCB(skb)->paddr = paddr; 392 393 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 394 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 395 396 num_remain--; 397 398 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); 399 } 400 401 ath11k_hal_srng_access_end(ab, srng); 402 403 spin_unlock_bh(&srng->lock); 404 405 return req_entries - num_remain; 406 407 fail_idr_remove: 408 spin_lock_bh(&rx_ring->idr_lock); 409 idr_remove(&rx_ring->bufs_idr, buf_id); 410 spin_unlock_bh(&rx_ring->idr_lock); 411 fail_dma_unmap: 412 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 413 DMA_FROM_DEVICE); 414 fail_free_skb: 415 dev_kfree_skb_any(skb); 416 417 ath11k_hal_srng_access_end(ab, srng); 418 419 spin_unlock_bh(&srng->lock); 420 421 return req_entries - num_remain; 422 } 423 424 static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar, 425 struct dp_rxdma_ring *rx_ring) 426 { 427 struct ath11k_pdev_dp *dp = &ar->dp; 428 struct sk_buff *skb; 429 int buf_id; 430 431 spin_lock_bh(&rx_ring->idr_lock); 432 idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { 433 idr_remove(&rx_ring->bufs_idr, buf_id); 434 /* TODO: Understand where internal driver does this dma_unmap 435 * of rxdma_buffer. 436 */ 437 dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr, 438 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); 439 dev_kfree_skb_any(skb); 440 } 441 442 idr_destroy(&rx_ring->bufs_idr); 443 spin_unlock_bh(&rx_ring->idr_lock); 444 445 /* if rxdma1_enable is false, mon_status_refill_ring 446 * isn't setup, so don't clean. 447 */ 448 if (!ar->ab->hw_params.rxdma1_enable) 449 return 0; 450 451 rx_ring = &dp->rx_mon_status_refill_ring[0]; 452 453 spin_lock_bh(&rx_ring->idr_lock); 454 idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { 455 idr_remove(&rx_ring->bufs_idr, buf_id); 456 /* XXX: Understand where internal driver does this dma_unmap 457 * of rxdma_buffer. 458 */ 459 dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr, 460 skb->len + skb_tailroom(skb), DMA_BIDIRECTIONAL); 461 dev_kfree_skb_any(skb); 462 } 463 464 idr_destroy(&rx_ring->bufs_idr); 465 spin_unlock_bh(&rx_ring->idr_lock); 466 467 return 0; 468 } 469 470 static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar) 471 { 472 struct ath11k_pdev_dp *dp = &ar->dp; 473 struct ath11k_base *ab = ar->ab; 474 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 475 int i; 476 477 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 478 479 rx_ring = &dp->rxdma_mon_buf_ring; 480 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 481 482 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 483 rx_ring = &dp->rx_mon_status_refill_ring[i]; 484 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 485 } 486 487 return 0; 488 } 489 490 static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar, 491 struct dp_rxdma_ring *rx_ring, 492 u32 ringtype) 493 { 494 struct ath11k_pdev_dp *dp = &ar->dp; 495 int num_entries; 496 497 num_entries = rx_ring->refill_buf_ring.size / 498 ath11k_hal_srng_get_entrysize(ar->ab, ringtype); 499 500 rx_ring->bufs_max = num_entries; 501 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries, 502 ar->ab->hw_params.hal_params->rx_buf_rbm); 503 return 0; 504 } 505 506 static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar) 507 { 508 struct ath11k_pdev_dp *dp = &ar->dp; 509 struct ath11k_base *ab = ar->ab; 510 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 511 int i; 512 513 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF); 514 515 if (ar->ab->hw_params.rxdma1_enable) { 516 rx_ring = &dp->rxdma_mon_buf_ring; 517 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF); 518 } 519 520 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 521 rx_ring = &dp->rx_mon_status_refill_ring[i]; 522 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS); 523 } 524 525 return 0; 526 } 527 528 static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar) 529 { 530 struct ath11k_pdev_dp *dp = &ar->dp; 531 struct ath11k_base *ab = ar->ab; 532 int i; 533 534 ath11k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring); 535 536 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 537 if (ab->hw_params.rx_mac_buf_ring) 538 ath11k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]); 539 540 ath11k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]); 541 ath11k_dp_srng_cleanup(ab, 542 &dp->rx_mon_status_refill_ring[i].refill_buf_ring); 543 } 544 545 ath11k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring); 546 } 547 548 void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab) 549 { 550 struct ath11k_dp *dp = &ab->dp; 551 int i; 552 553 for (i = 0; i < DP_REO_DST_RING_MAX; i++) 554 ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]); 555 } 556 557 int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab) 558 { 559 struct ath11k_dp *dp = &ab->dp; 560 int ret; 561 int i; 562 563 for (i = 0; i < DP_REO_DST_RING_MAX; i++) { 564 ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring[i], 565 HAL_REO_DST, i, 0, 566 DP_REO_DST_RING_SIZE); 567 if (ret) { 568 ath11k_warn(ab, "failed to setup reo_dst_ring\n"); 569 goto err_reo_cleanup; 570 } 571 } 572 573 return 0; 574 575 err_reo_cleanup: 576 ath11k_dp_pdev_reo_cleanup(ab); 577 578 return ret; 579 } 580 581 static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar) 582 { 583 struct ath11k_pdev_dp *dp = &ar->dp; 584 struct ath11k_base *ab = ar->ab; 585 struct dp_srng *srng = NULL; 586 int i; 587 int ret; 588 589 ret = ath11k_dp_srng_setup(ar->ab, 590 &dp->rx_refill_buf_ring.refill_buf_ring, 591 HAL_RXDMA_BUF, 0, 592 dp->mac_id, DP_RXDMA_BUF_RING_SIZE); 593 if (ret) { 594 ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n"); 595 return ret; 596 } 597 598 if (ar->ab->hw_params.rx_mac_buf_ring) { 599 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 600 ret = ath11k_dp_srng_setup(ar->ab, 601 &dp->rx_mac_buf_ring[i], 602 HAL_RXDMA_BUF, 1, 603 dp->mac_id + i, 1024); 604 if (ret) { 605 ath11k_warn(ar->ab, "failed to setup rx_mac_buf_ring %d\n", 606 i); 607 return ret; 608 } 609 } 610 } 611 612 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 613 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring[i], 614 HAL_RXDMA_DST, 0, dp->mac_id + i, 615 DP_RXDMA_ERR_DST_RING_SIZE); 616 if (ret) { 617 ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring %d\n", i); 618 return ret; 619 } 620 } 621 622 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 623 srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring; 624 ret = ath11k_dp_srng_setup(ar->ab, 625 srng, 626 HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id + i, 627 DP_RXDMA_MON_STATUS_RING_SIZE); 628 if (ret) { 629 ath11k_warn(ar->ab, 630 "failed to setup rx_mon_status_refill_ring %d\n", i); 631 return ret; 632 } 633 } 634 635 /* if rxdma1_enable is false, then it doesn't need 636 * to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring 637 * and rxdma_mon_desc_ring. 638 * init reap timer for QCA6390. 639 */ 640 if (!ar->ab->hw_params.rxdma1_enable) { 641 //init mon status buffer reap timer 642 timer_setup(&ar->ab->mon_reap_timer, 643 ath11k_dp_service_mon_ring, 0); 644 return 0; 645 } 646 647 ret = ath11k_dp_srng_setup(ar->ab, 648 &dp->rxdma_mon_buf_ring.refill_buf_ring, 649 HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id, 650 DP_RXDMA_MONITOR_BUF_RING_SIZE); 651 if (ret) { 652 ath11k_warn(ar->ab, 653 "failed to setup HAL_RXDMA_MONITOR_BUF\n"); 654 return ret; 655 } 656 657 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring, 658 HAL_RXDMA_MONITOR_DST, 0, dp->mac_id, 659 DP_RXDMA_MONITOR_DST_RING_SIZE); 660 if (ret) { 661 ath11k_warn(ar->ab, 662 "failed to setup HAL_RXDMA_MONITOR_DST\n"); 663 return ret; 664 } 665 666 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring, 667 HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id, 668 DP_RXDMA_MONITOR_DESC_RING_SIZE); 669 if (ret) { 670 ath11k_warn(ar->ab, 671 "failed to setup HAL_RXDMA_MONITOR_DESC\n"); 672 return ret; 673 } 674 675 return 0; 676 } 677 678 void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab) 679 { 680 struct ath11k_dp *dp = &ab->dp; 681 struct dp_reo_cmd *cmd, *tmp; 682 struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache; 683 684 spin_lock_bh(&dp->reo_cmd_lock); 685 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 686 list_del(&cmd->list); 687 dma_unmap_single(ab->dev, cmd->data.paddr, 688 cmd->data.size, DMA_BIDIRECTIONAL); 689 kfree(cmd->data.vaddr); 690 kfree(cmd); 691 } 692 693 list_for_each_entry_safe(cmd_cache, tmp_cache, 694 &dp->reo_cmd_cache_flush_list, list) { 695 list_del(&cmd_cache->list); 696 dp->reo_cmd_cache_flush_count--; 697 dma_unmap_single(ab->dev, cmd_cache->data.paddr, 698 cmd_cache->data.size, DMA_BIDIRECTIONAL); 699 kfree(cmd_cache->data.vaddr); 700 kfree(cmd_cache); 701 } 702 spin_unlock_bh(&dp->reo_cmd_lock); 703 } 704 705 static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx, 706 enum hal_reo_cmd_status status) 707 { 708 struct dp_rx_tid *rx_tid = ctx; 709 710 if (status != HAL_REO_CMD_SUCCESS) 711 ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n", 712 rx_tid->tid, status); 713 714 dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size, 715 DMA_BIDIRECTIONAL); 716 kfree(rx_tid->vaddr); 717 } 718 719 static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab, 720 struct dp_rx_tid *rx_tid) 721 { 722 struct ath11k_hal_reo_cmd cmd = {0}; 723 unsigned long tot_desc_sz, desc_sz; 724 int ret; 725 726 tot_desc_sz = rx_tid->size; 727 desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID); 728 729 while (tot_desc_sz > desc_sz) { 730 tot_desc_sz -= desc_sz; 731 cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz); 732 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 733 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, 734 HAL_REO_CMD_FLUSH_CACHE, &cmd, 735 NULL); 736 if (ret) 737 ath11k_warn(ab, 738 "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n", 739 rx_tid->tid, ret); 740 } 741 742 memset(&cmd, 0, sizeof(cmd)); 743 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 744 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 745 cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS; 746 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, 747 HAL_REO_CMD_FLUSH_CACHE, 748 &cmd, ath11k_dp_reo_cmd_free); 749 if (ret) { 750 ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n", 751 rx_tid->tid, ret); 752 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 753 DMA_BIDIRECTIONAL); 754 kfree(rx_tid->vaddr); 755 } 756 } 757 758 static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx, 759 enum hal_reo_cmd_status status) 760 { 761 struct ath11k_base *ab = dp->ab; 762 struct dp_rx_tid *rx_tid = ctx; 763 struct dp_reo_cache_flush_elem *elem, *tmp; 764 765 if (status == HAL_REO_CMD_DRAIN) { 766 goto free_desc; 767 } else if (status != HAL_REO_CMD_SUCCESS) { 768 /* Shouldn't happen! Cleanup in case of other failure? */ 769 ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n", 770 rx_tid->tid, status); 771 return; 772 } 773 774 elem = kzalloc(sizeof(*elem), GFP_ATOMIC); 775 if (!elem) 776 goto free_desc; 777 778 elem->ts = jiffies; 779 memcpy(&elem->data, rx_tid, sizeof(*rx_tid)); 780 781 spin_lock_bh(&dp->reo_cmd_lock); 782 list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list); 783 dp->reo_cmd_cache_flush_count++; 784 785 /* Flush and invalidate aged REO desc from HW cache */ 786 list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list, 787 list) { 788 if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD || 789 time_after(jiffies, elem->ts + 790 msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) { 791 list_del(&elem->list); 792 dp->reo_cmd_cache_flush_count--; 793 spin_unlock_bh(&dp->reo_cmd_lock); 794 795 ath11k_dp_reo_cache_flush(ab, &elem->data); 796 kfree(elem); 797 spin_lock_bh(&dp->reo_cmd_lock); 798 } 799 } 800 spin_unlock_bh(&dp->reo_cmd_lock); 801 802 return; 803 free_desc: 804 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 805 DMA_BIDIRECTIONAL); 806 kfree(rx_tid->vaddr); 807 } 808 809 void ath11k_peer_rx_tid_delete(struct ath11k *ar, 810 struct ath11k_peer *peer, u8 tid) 811 { 812 struct ath11k_hal_reo_cmd cmd = {0}; 813 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 814 int ret; 815 816 if (!rx_tid->active) 817 return; 818 819 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 820 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 821 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 822 cmd.upd0 |= HAL_REO_CMD_UPD0_VLD; 823 ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid, 824 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 825 ath11k_dp_rx_tid_del_func); 826 if (ret) { 827 ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n", 828 tid, ret); 829 dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size, 830 DMA_BIDIRECTIONAL); 831 kfree(rx_tid->vaddr); 832 } 833 834 rx_tid->active = false; 835 } 836 837 static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab, 838 u32 *link_desc, 839 enum hal_wbm_rel_bm_act action) 840 { 841 struct ath11k_dp *dp = &ab->dp; 842 struct hal_srng *srng; 843 u32 *desc; 844 int ret = 0; 845 846 srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id]; 847 848 spin_lock_bh(&srng->lock); 849 850 ath11k_hal_srng_access_begin(ab, srng); 851 852 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 853 if (!desc) { 854 ret = -ENOBUFS; 855 goto exit; 856 } 857 858 ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc, 859 action); 860 861 exit: 862 ath11k_hal_srng_access_end(ab, srng); 863 864 spin_unlock_bh(&srng->lock); 865 866 return ret; 867 } 868 869 static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_desc) 870 { 871 struct ath11k_base *ab = rx_tid->ab; 872 873 lockdep_assert_held(&ab->base_lock); 874 875 if (rx_tid->dst_ring_desc) { 876 if (rel_link_desc) 877 ath11k_dp_rx_link_desc_return(ab, (u32 *)rx_tid->dst_ring_desc, 878 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 879 kfree(rx_tid->dst_ring_desc); 880 rx_tid->dst_ring_desc = NULL; 881 } 882 883 rx_tid->cur_sn = 0; 884 rx_tid->last_frag_no = 0; 885 rx_tid->rx_frag_bitmap = 0; 886 __skb_queue_purge(&rx_tid->rx_frags); 887 } 888 889 void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer) 890 { 891 struct dp_rx_tid *rx_tid; 892 int i; 893 894 lockdep_assert_held(&ar->ab->base_lock); 895 896 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 897 rx_tid = &peer->rx_tid[i]; 898 899 spin_unlock_bh(&ar->ab->base_lock); 900 del_timer_sync(&rx_tid->frag_timer); 901 spin_lock_bh(&ar->ab->base_lock); 902 903 ath11k_dp_rx_frags_cleanup(rx_tid, true); 904 } 905 } 906 907 void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer) 908 { 909 struct dp_rx_tid *rx_tid; 910 int i; 911 912 lockdep_assert_held(&ar->ab->base_lock); 913 914 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 915 rx_tid = &peer->rx_tid[i]; 916 917 ath11k_peer_rx_tid_delete(ar, peer, i); 918 ath11k_dp_rx_frags_cleanup(rx_tid, true); 919 920 spin_unlock_bh(&ar->ab->base_lock); 921 del_timer_sync(&rx_tid->frag_timer); 922 spin_lock_bh(&ar->ab->base_lock); 923 } 924 } 925 926 static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar, 927 struct ath11k_peer *peer, 928 struct dp_rx_tid *rx_tid, 929 u32 ba_win_sz, u16 ssn, 930 bool update_ssn) 931 { 932 struct ath11k_hal_reo_cmd cmd = {0}; 933 int ret; 934 935 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 936 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 937 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 938 cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE; 939 cmd.ba_window_size = ba_win_sz; 940 941 if (update_ssn) { 942 cmd.upd0 |= HAL_REO_CMD_UPD0_SSN; 943 cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn); 944 } 945 946 ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid, 947 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 948 NULL); 949 if (ret) { 950 ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n", 951 rx_tid->tid, ret); 952 return ret; 953 } 954 955 rx_tid->ba_win_sz = ba_win_sz; 956 957 return 0; 958 } 959 960 static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab, 961 const u8 *peer_mac, int vdev_id, u8 tid) 962 { 963 struct ath11k_peer *peer; 964 struct dp_rx_tid *rx_tid; 965 966 spin_lock_bh(&ab->base_lock); 967 968 peer = ath11k_peer_find(ab, vdev_id, peer_mac); 969 if (!peer) { 970 ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n"); 971 goto unlock_exit; 972 } 973 974 rx_tid = &peer->rx_tid[tid]; 975 if (!rx_tid->active) 976 goto unlock_exit; 977 978 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 979 DMA_BIDIRECTIONAL); 980 kfree(rx_tid->vaddr); 981 982 rx_tid->active = false; 983 984 unlock_exit: 985 spin_unlock_bh(&ab->base_lock); 986 } 987 988 int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id, 989 u8 tid, u32 ba_win_sz, u16 ssn, 990 enum hal_pn_type pn_type) 991 { 992 struct ath11k_base *ab = ar->ab; 993 struct ath11k_peer *peer; 994 struct dp_rx_tid *rx_tid; 995 u32 hw_desc_sz; 996 u32 *addr_aligned; 997 void *vaddr; 998 dma_addr_t paddr; 999 int ret; 1000 1001 spin_lock_bh(&ab->base_lock); 1002 1003 peer = ath11k_peer_find(ab, vdev_id, peer_mac); 1004 if (!peer) { 1005 ath11k_warn(ab, "failed to find the peer to set up rx tid\n"); 1006 spin_unlock_bh(&ab->base_lock); 1007 return -ENOENT; 1008 } 1009 1010 rx_tid = &peer->rx_tid[tid]; 1011 /* Update the tid queue if it is already setup */ 1012 if (rx_tid->active) { 1013 paddr = rx_tid->paddr; 1014 ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid, 1015 ba_win_sz, ssn, true); 1016 spin_unlock_bh(&ab->base_lock); 1017 if (ret) { 1018 ath11k_warn(ab, "failed to update reo for rx tid %d\n", tid); 1019 return ret; 1020 } 1021 1022 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, 1023 peer_mac, paddr, 1024 tid, 1, ba_win_sz); 1025 if (ret) 1026 ath11k_warn(ab, "failed to send wmi command to update rx reorder queue, tid :%d (%d)\n", 1027 tid, ret); 1028 return ret; 1029 } 1030 1031 rx_tid->tid = tid; 1032 1033 rx_tid->ba_win_sz = ba_win_sz; 1034 1035 /* TODO: Optimize the memory allocation for qos tid based on 1036 * the actual BA window size in REO tid update path. 1037 */ 1038 if (tid == HAL_DESC_REO_NON_QOS_TID) 1039 hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid); 1040 else 1041 hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid); 1042 1043 vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC); 1044 if (!vaddr) { 1045 spin_unlock_bh(&ab->base_lock); 1046 return -ENOMEM; 1047 } 1048 1049 addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN); 1050 1051 ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz, 1052 ssn, pn_type); 1053 1054 paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz, 1055 DMA_BIDIRECTIONAL); 1056 1057 ret = dma_mapping_error(ab->dev, paddr); 1058 if (ret) { 1059 spin_unlock_bh(&ab->base_lock); 1060 goto err_mem_free; 1061 } 1062 1063 rx_tid->vaddr = vaddr; 1064 rx_tid->paddr = paddr; 1065 rx_tid->size = hw_desc_sz; 1066 rx_tid->active = true; 1067 1068 spin_unlock_bh(&ab->base_lock); 1069 1070 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, 1071 paddr, tid, 1, ba_win_sz); 1072 if (ret) { 1073 ath11k_warn(ar->ab, "failed to setup rx reorder queue, tid :%d (%d)\n", 1074 tid, ret); 1075 ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid); 1076 } 1077 1078 return ret; 1079 1080 err_mem_free: 1081 kfree(vaddr); 1082 1083 return ret; 1084 } 1085 1086 int ath11k_dp_rx_ampdu_start(struct ath11k *ar, 1087 struct ieee80211_ampdu_params *params) 1088 { 1089 struct ath11k_base *ab = ar->ab; 1090 struct ath11k_sta *arsta = (void *)params->sta->drv_priv; 1091 int vdev_id = arsta->arvif->vdev_id; 1092 int ret; 1093 1094 ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id, 1095 params->tid, params->buf_size, 1096 params->ssn, arsta->pn_type); 1097 if (ret) 1098 ath11k_warn(ab, "failed to setup rx tid %d\n", ret); 1099 1100 return ret; 1101 } 1102 1103 int ath11k_dp_rx_ampdu_stop(struct ath11k *ar, 1104 struct ieee80211_ampdu_params *params) 1105 { 1106 struct ath11k_base *ab = ar->ab; 1107 struct ath11k_peer *peer; 1108 struct ath11k_sta *arsta = (void *)params->sta->drv_priv; 1109 int vdev_id = arsta->arvif->vdev_id; 1110 dma_addr_t paddr; 1111 bool active; 1112 int ret; 1113 1114 spin_lock_bh(&ab->base_lock); 1115 1116 peer = ath11k_peer_find(ab, vdev_id, params->sta->addr); 1117 if (!peer) { 1118 ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n"); 1119 spin_unlock_bh(&ab->base_lock); 1120 return -ENOENT; 1121 } 1122 1123 paddr = peer->rx_tid[params->tid].paddr; 1124 active = peer->rx_tid[params->tid].active; 1125 1126 if (!active) { 1127 spin_unlock_bh(&ab->base_lock); 1128 return 0; 1129 } 1130 1131 ret = ath11k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false); 1132 spin_unlock_bh(&ab->base_lock); 1133 if (ret) { 1134 ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n", 1135 params->tid, ret); 1136 return ret; 1137 } 1138 1139 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, 1140 params->sta->addr, paddr, 1141 params->tid, 1, 1); 1142 if (ret) 1143 ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n", 1144 ret); 1145 1146 return ret; 1147 } 1148 1149 int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif, 1150 const u8 *peer_addr, 1151 enum set_key_cmd key_cmd, 1152 struct ieee80211_key_conf *key) 1153 { 1154 struct ath11k *ar = arvif->ar; 1155 struct ath11k_base *ab = ar->ab; 1156 struct ath11k_hal_reo_cmd cmd = {0}; 1157 struct ath11k_peer *peer; 1158 struct dp_rx_tid *rx_tid; 1159 u8 tid; 1160 int ret = 0; 1161 1162 /* NOTE: Enable PN/TSC replay check offload only for unicast frames. 1163 * We use mac80211 PN/TSC replay check functionality for bcast/mcast 1164 * for now. 1165 */ 1166 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) 1167 return 0; 1168 1169 cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS; 1170 cmd.upd0 |= HAL_REO_CMD_UPD0_PN | 1171 HAL_REO_CMD_UPD0_PN_SIZE | 1172 HAL_REO_CMD_UPD0_PN_VALID | 1173 HAL_REO_CMD_UPD0_PN_CHECK | 1174 HAL_REO_CMD_UPD0_SVLD; 1175 1176 switch (key->cipher) { 1177 case WLAN_CIPHER_SUITE_TKIP: 1178 case WLAN_CIPHER_SUITE_CCMP: 1179 case WLAN_CIPHER_SUITE_CCMP_256: 1180 case WLAN_CIPHER_SUITE_GCMP: 1181 case WLAN_CIPHER_SUITE_GCMP_256: 1182 if (key_cmd == SET_KEY) { 1183 cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK; 1184 cmd.pn_size = 48; 1185 } 1186 break; 1187 default: 1188 break; 1189 } 1190 1191 spin_lock_bh(&ab->base_lock); 1192 1193 peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr); 1194 if (!peer) { 1195 ath11k_warn(ab, "failed to find the peer to configure pn replay detection\n"); 1196 spin_unlock_bh(&ab->base_lock); 1197 return -ENOENT; 1198 } 1199 1200 for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) { 1201 rx_tid = &peer->rx_tid[tid]; 1202 if (!rx_tid->active) 1203 continue; 1204 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 1205 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 1206 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, 1207 HAL_REO_CMD_UPDATE_RX_QUEUE, 1208 &cmd, NULL); 1209 if (ret) { 1210 ath11k_warn(ab, "failed to configure rx tid %d queue for pn replay detection %d\n", 1211 tid, ret); 1212 break; 1213 } 1214 } 1215 1216 spin_unlock_bh(&ab->base_lock); 1217 1218 return ret; 1219 } 1220 1221 static inline int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats, 1222 u16 peer_id) 1223 { 1224 int i; 1225 1226 for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) { 1227 if (ppdu_stats->user_stats[i].is_valid_peer_id) { 1228 if (peer_id == ppdu_stats->user_stats[i].peer_id) 1229 return i; 1230 } else { 1231 return i; 1232 } 1233 } 1234 1235 return -EINVAL; 1236 } 1237 1238 static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab, 1239 u16 tag, u16 len, const void *ptr, 1240 void *data) 1241 { 1242 struct htt_ppdu_stats_info *ppdu_info; 1243 struct htt_ppdu_user_stats *user_stats; 1244 int cur_user; 1245 u16 peer_id; 1246 1247 ppdu_info = (struct htt_ppdu_stats_info *)data; 1248 1249 switch (tag) { 1250 case HTT_PPDU_STATS_TAG_COMMON: 1251 if (len < sizeof(struct htt_ppdu_stats_common)) { 1252 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1253 len, tag); 1254 return -EINVAL; 1255 } 1256 memcpy((void *)&ppdu_info->ppdu_stats.common, ptr, 1257 sizeof(struct htt_ppdu_stats_common)); 1258 break; 1259 case HTT_PPDU_STATS_TAG_USR_RATE: 1260 if (len < sizeof(struct htt_ppdu_stats_user_rate)) { 1261 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1262 len, tag); 1263 return -EINVAL; 1264 } 1265 1266 peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id; 1267 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1268 peer_id); 1269 if (cur_user < 0) 1270 return -EINVAL; 1271 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1272 user_stats->peer_id = peer_id; 1273 user_stats->is_valid_peer_id = true; 1274 memcpy((void *)&user_stats->rate, ptr, 1275 sizeof(struct htt_ppdu_stats_user_rate)); 1276 user_stats->tlv_flags |= BIT(tag); 1277 break; 1278 case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON: 1279 if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) { 1280 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1281 len, tag); 1282 return -EINVAL; 1283 } 1284 1285 peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id; 1286 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1287 peer_id); 1288 if (cur_user < 0) 1289 return -EINVAL; 1290 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1291 user_stats->peer_id = peer_id; 1292 user_stats->is_valid_peer_id = true; 1293 memcpy((void *)&user_stats->cmpltn_cmn, ptr, 1294 sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)); 1295 user_stats->tlv_flags |= BIT(tag); 1296 break; 1297 case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS: 1298 if (len < 1299 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) { 1300 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1301 len, tag); 1302 return -EINVAL; 1303 } 1304 1305 peer_id = 1306 ((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id; 1307 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1308 peer_id); 1309 if (cur_user < 0) 1310 return -EINVAL; 1311 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1312 user_stats->peer_id = peer_id; 1313 user_stats->is_valid_peer_id = true; 1314 memcpy((void *)&user_stats->ack_ba, ptr, 1315 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)); 1316 user_stats->tlv_flags |= BIT(tag); 1317 break; 1318 } 1319 return 0; 1320 } 1321 1322 int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len, 1323 int (*iter)(struct ath11k_base *ar, u16 tag, u16 len, 1324 const void *ptr, void *data), 1325 void *data) 1326 { 1327 const struct htt_tlv *tlv; 1328 const void *begin = ptr; 1329 u16 tlv_tag, tlv_len; 1330 int ret = -EINVAL; 1331 1332 while (len > 0) { 1333 if (len < sizeof(*tlv)) { 1334 ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n", 1335 ptr - begin, len, sizeof(*tlv)); 1336 return -EINVAL; 1337 } 1338 tlv = (struct htt_tlv *)ptr; 1339 tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header); 1340 tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header); 1341 ptr += sizeof(*tlv); 1342 len -= sizeof(*tlv); 1343 1344 if (tlv_len > len) { 1345 ath11k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n", 1346 tlv_tag, ptr - begin, len, tlv_len); 1347 return -EINVAL; 1348 } 1349 ret = iter(ab, tlv_tag, tlv_len, ptr, data); 1350 if (ret == -ENOMEM) 1351 return ret; 1352 1353 ptr += tlv_len; 1354 len -= tlv_len; 1355 } 1356 return 0; 1357 } 1358 1359 static inline u32 ath11k_he_gi_to_nl80211_he_gi(u8 sgi) 1360 { 1361 u32 ret = 0; 1362 1363 switch (sgi) { 1364 case RX_MSDU_START_SGI_0_8_US: 1365 ret = NL80211_RATE_INFO_HE_GI_0_8; 1366 break; 1367 case RX_MSDU_START_SGI_1_6_US: 1368 ret = NL80211_RATE_INFO_HE_GI_1_6; 1369 break; 1370 case RX_MSDU_START_SGI_3_2_US: 1371 ret = NL80211_RATE_INFO_HE_GI_3_2; 1372 break; 1373 } 1374 1375 return ret; 1376 } 1377 1378 static void 1379 ath11k_update_per_peer_tx_stats(struct ath11k *ar, 1380 struct htt_ppdu_stats *ppdu_stats, u8 user) 1381 { 1382 struct ath11k_base *ab = ar->ab; 1383 struct ath11k_peer *peer; 1384 struct ieee80211_sta *sta; 1385 struct ath11k_sta *arsta; 1386 struct htt_ppdu_stats_user_rate *user_rate; 1387 struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats; 1388 struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user]; 1389 struct htt_ppdu_stats_common *common = &ppdu_stats->common; 1390 int ret; 1391 u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0; 1392 u32 succ_bytes = 0; 1393 u16 rate = 0, succ_pkts = 0; 1394 u32 tx_duration = 0; 1395 u8 tid = HTT_PPDU_STATS_NON_QOS_TID; 1396 bool is_ampdu = false; 1397 1398 if (!usr_stats) 1399 return; 1400 1401 if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE))) 1402 return; 1403 1404 if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON)) 1405 is_ampdu = 1406 HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags); 1407 1408 if (usr_stats->tlv_flags & 1409 BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) { 1410 succ_bytes = usr_stats->ack_ba.success_bytes; 1411 succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M, 1412 usr_stats->ack_ba.info); 1413 tid = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM, 1414 usr_stats->ack_ba.info); 1415 } 1416 1417 if (common->fes_duration_us) 1418 tx_duration = common->fes_duration_us; 1419 1420 user_rate = &usr_stats->rate; 1421 flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags); 1422 bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2; 1423 nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1; 1424 mcs = HTT_USR_RATE_MCS(user_rate->rate_flags); 1425 sgi = HTT_USR_RATE_GI(user_rate->rate_flags); 1426 dcm = HTT_USR_RATE_DCM(user_rate->rate_flags); 1427 1428 /* Note: If host configured fixed rates and in some other special 1429 * cases, the broadcast/management frames are sent in different rates. 1430 * Firmware rate's control to be skipped for this? 1431 */ 1432 1433 if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) { 1434 ath11k_warn(ab, "Invalid HE mcs %d peer stats", mcs); 1435 return; 1436 } 1437 1438 if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH11K_VHT_MCS_MAX) { 1439 ath11k_warn(ab, "Invalid VHT mcs %d peer stats", mcs); 1440 return; 1441 } 1442 1443 if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH11K_HT_MCS_MAX || nss < 1)) { 1444 ath11k_warn(ab, "Invalid HT mcs %d nss %d peer stats", 1445 mcs, nss); 1446 return; 1447 } 1448 1449 if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) { 1450 ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs, 1451 flags, 1452 &rate_idx, 1453 &rate); 1454 if (ret < 0) 1455 return; 1456 } 1457 1458 rcu_read_lock(); 1459 spin_lock_bh(&ab->base_lock); 1460 peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id); 1461 1462 if (!peer || !peer->sta) { 1463 spin_unlock_bh(&ab->base_lock); 1464 rcu_read_unlock(); 1465 return; 1466 } 1467 1468 sta = peer->sta; 1469 arsta = (struct ath11k_sta *)sta->drv_priv; 1470 1471 memset(&arsta->txrate, 0, sizeof(arsta->txrate)); 1472 1473 switch (flags) { 1474 case WMI_RATE_PREAMBLE_OFDM: 1475 arsta->txrate.legacy = rate; 1476 break; 1477 case WMI_RATE_PREAMBLE_CCK: 1478 arsta->txrate.legacy = rate; 1479 break; 1480 case WMI_RATE_PREAMBLE_HT: 1481 arsta->txrate.mcs = mcs + 8 * (nss - 1); 1482 arsta->txrate.flags = RATE_INFO_FLAGS_MCS; 1483 if (sgi) 1484 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1485 break; 1486 case WMI_RATE_PREAMBLE_VHT: 1487 arsta->txrate.mcs = mcs; 1488 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; 1489 if (sgi) 1490 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1491 break; 1492 case WMI_RATE_PREAMBLE_HE: 1493 arsta->txrate.mcs = mcs; 1494 arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS; 1495 arsta->txrate.he_dcm = dcm; 1496 arsta->txrate.he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi); 1497 arsta->txrate.he_ru_alloc = ath11k_he_ru_tones_to_nl80211_he_ru_alloc( 1498 (user_rate->ru_end - 1499 user_rate->ru_start) + 1); 1500 break; 1501 } 1502 1503 arsta->txrate.nss = nss; 1504 arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw); 1505 arsta->tx_duration += tx_duration; 1506 memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info)); 1507 1508 /* PPDU stats reported for mgmt packet doesn't have valid tx bytes. 1509 * So skip peer stats update for mgmt packets. 1510 */ 1511 if (tid < HTT_PPDU_STATS_NON_QOS_TID) { 1512 memset(peer_stats, 0, sizeof(*peer_stats)); 1513 peer_stats->succ_pkts = succ_pkts; 1514 peer_stats->succ_bytes = succ_bytes; 1515 peer_stats->is_ampdu = is_ampdu; 1516 peer_stats->duration = tx_duration; 1517 peer_stats->ba_fails = 1518 HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) + 1519 HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags); 1520 1521 if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) 1522 ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx); 1523 } 1524 1525 spin_unlock_bh(&ab->base_lock); 1526 rcu_read_unlock(); 1527 } 1528 1529 static void ath11k_htt_update_ppdu_stats(struct ath11k *ar, 1530 struct htt_ppdu_stats *ppdu_stats) 1531 { 1532 u8 user; 1533 1534 for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++) 1535 ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user); 1536 } 1537 1538 static 1539 struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar, 1540 u32 ppdu_id) 1541 { 1542 struct htt_ppdu_stats_info *ppdu_info; 1543 1544 spin_lock_bh(&ar->data_lock); 1545 if (!list_empty(&ar->ppdu_stats_info)) { 1546 list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) { 1547 if (ppdu_info->ppdu_id == ppdu_id) { 1548 spin_unlock_bh(&ar->data_lock); 1549 return ppdu_info; 1550 } 1551 } 1552 1553 if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) { 1554 ppdu_info = list_first_entry(&ar->ppdu_stats_info, 1555 typeof(*ppdu_info), list); 1556 list_del(&ppdu_info->list); 1557 ar->ppdu_stat_list_depth--; 1558 ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats); 1559 kfree(ppdu_info); 1560 } 1561 } 1562 spin_unlock_bh(&ar->data_lock); 1563 1564 ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC); 1565 if (!ppdu_info) 1566 return NULL; 1567 1568 spin_lock_bh(&ar->data_lock); 1569 list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info); 1570 ar->ppdu_stat_list_depth++; 1571 spin_unlock_bh(&ar->data_lock); 1572 1573 return ppdu_info; 1574 } 1575 1576 static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab, 1577 struct sk_buff *skb) 1578 { 1579 struct ath11k_htt_ppdu_stats_msg *msg; 1580 struct htt_ppdu_stats_info *ppdu_info; 1581 struct ath11k *ar; 1582 int ret; 1583 u8 pdev_id; 1584 u32 ppdu_id, len; 1585 1586 msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data; 1587 len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info); 1588 pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info); 1589 ppdu_id = msg->ppdu_id; 1590 1591 rcu_read_lock(); 1592 ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); 1593 if (!ar) { 1594 ret = -EINVAL; 1595 goto exit; 1596 } 1597 1598 if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) 1599 trace_ath11k_htt_ppdu_stats(ar, skb->data, len); 1600 1601 ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id); 1602 if (!ppdu_info) { 1603 ret = -EINVAL; 1604 goto exit; 1605 } 1606 1607 ppdu_info->ppdu_id = ppdu_id; 1608 ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len, 1609 ath11k_htt_tlv_ppdu_stats_parse, 1610 (void *)ppdu_info); 1611 if (ret) { 1612 ath11k_warn(ab, "Failed to parse tlv %d\n", ret); 1613 goto exit; 1614 } 1615 1616 exit: 1617 rcu_read_unlock(); 1618 1619 return ret; 1620 } 1621 1622 static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb) 1623 { 1624 struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data; 1625 struct ath_pktlog_hdr *hdr = (struct ath_pktlog_hdr *)data; 1626 struct ath11k *ar; 1627 u8 pdev_id; 1628 1629 pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr); 1630 ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); 1631 if (!ar) { 1632 ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id); 1633 return; 1634 } 1635 1636 trace_ath11k_htt_pktlog(ar, data->payload, hdr->size, 1637 ar->ab->pktlog_defs_checksum); 1638 } 1639 1640 static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab, 1641 struct sk_buff *skb) 1642 { 1643 u32 *data = (u32 *)skb->data; 1644 u8 pdev_id, ring_type, ring_id, pdev_idx; 1645 u16 hp, tp; 1646 u32 backpressure_time; 1647 struct ath11k_bp_stats *bp_stats; 1648 1649 pdev_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_PDEV_ID_M, *data); 1650 ring_type = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_TYPE_M, *data); 1651 ring_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_ID_M, *data); 1652 ++data; 1653 1654 hp = FIELD_GET(HTT_BACKPRESSURE_EVENT_HP_M, *data); 1655 tp = FIELD_GET(HTT_BACKPRESSURE_EVENT_TP_M, *data); 1656 ++data; 1657 1658 backpressure_time = *data; 1659 1660 ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "htt backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n", 1661 pdev_id, ring_type, ring_id, hp, tp, backpressure_time); 1662 1663 if (ring_type == HTT_BACKPRESSURE_UMAC_RING_TYPE) { 1664 if (ring_id >= HTT_SW_UMAC_RING_IDX_MAX) 1665 return; 1666 1667 bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[ring_id]; 1668 } else if (ring_type == HTT_BACKPRESSURE_LMAC_RING_TYPE) { 1669 pdev_idx = DP_HW2SW_MACID(pdev_id); 1670 1671 if (ring_id >= HTT_SW_LMAC_RING_IDX_MAX || pdev_idx >= MAX_RADIOS) 1672 return; 1673 1674 bp_stats = &ab->soc_stats.bp_stats.lmac_ring_bp_stats[ring_id][pdev_idx]; 1675 } else { 1676 ath11k_warn(ab, "unknown ring type received in htt bp event %d\n", 1677 ring_type); 1678 return; 1679 } 1680 1681 spin_lock_bh(&ab->base_lock); 1682 bp_stats->hp = hp; 1683 bp_stats->tp = tp; 1684 bp_stats->count++; 1685 bp_stats->jiffies = jiffies; 1686 spin_unlock_bh(&ab->base_lock); 1687 } 1688 1689 void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab, 1690 struct sk_buff *skb) 1691 { 1692 struct ath11k_dp *dp = &ab->dp; 1693 struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data; 1694 enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp); 1695 u16 peer_id; 1696 u8 vdev_id; 1697 u8 mac_addr[ETH_ALEN]; 1698 u16 peer_mac_h16; 1699 u16 ast_hash; 1700 u16 hw_peer_id; 1701 1702 ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type); 1703 1704 switch (type) { 1705 case HTT_T2H_MSG_TYPE_VERSION_CONF: 1706 dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR, 1707 resp->version_msg.version); 1708 dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR, 1709 resp->version_msg.version); 1710 complete(&dp->htt_tgt_version_received); 1711 break; 1712 case HTT_T2H_MSG_TYPE_PEER_MAP: 1713 vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID, 1714 resp->peer_map_ev.info); 1715 peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID, 1716 resp->peer_map_ev.info); 1717 peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16, 1718 resp->peer_map_ev.info1); 1719 ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32, 1720 peer_mac_h16, mac_addr); 1721 ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0); 1722 break; 1723 case HTT_T2H_MSG_TYPE_PEER_MAP2: 1724 vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID, 1725 resp->peer_map_ev.info); 1726 peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID, 1727 resp->peer_map_ev.info); 1728 peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16, 1729 resp->peer_map_ev.info1); 1730 ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32, 1731 peer_mac_h16, mac_addr); 1732 ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL, 1733 resp->peer_map_ev.info2); 1734 hw_peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID, 1735 resp->peer_map_ev.info1); 1736 ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash, 1737 hw_peer_id); 1738 break; 1739 case HTT_T2H_MSG_TYPE_PEER_UNMAP: 1740 case HTT_T2H_MSG_TYPE_PEER_UNMAP2: 1741 peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID, 1742 resp->peer_unmap_ev.info); 1743 ath11k_peer_unmap_event(ab, peer_id); 1744 break; 1745 case HTT_T2H_MSG_TYPE_PPDU_STATS_IND: 1746 ath11k_htt_pull_ppdu_stats(ab, skb); 1747 break; 1748 case HTT_T2H_MSG_TYPE_EXT_STATS_CONF: 1749 ath11k_debugfs_htt_ext_stats_handler(ab, skb); 1750 break; 1751 case HTT_T2H_MSG_TYPE_PKTLOG: 1752 ath11k_htt_pktlog(ab, skb); 1753 break; 1754 case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND: 1755 ath11k_htt_backpressure_event_handler(ab, skb); 1756 break; 1757 default: 1758 ath11k_warn(ab, "htt event %d not handled\n", type); 1759 break; 1760 } 1761 1762 dev_kfree_skb_any(skb); 1763 } 1764 1765 static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar, 1766 struct sk_buff_head *msdu_list, 1767 struct sk_buff *first, struct sk_buff *last, 1768 u8 l3pad_bytes, int msdu_len) 1769 { 1770 struct ath11k_base *ab = ar->ab; 1771 struct sk_buff *skb; 1772 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first); 1773 int buf_first_hdr_len, buf_first_len; 1774 struct hal_rx_desc *ldesc; 1775 int space_extra, rem_len, buf_len; 1776 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 1777 1778 /* As the msdu is spread across multiple rx buffers, 1779 * find the offset to the start of msdu for computing 1780 * the length of the msdu in the first buffer. 1781 */ 1782 buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes; 1783 buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len; 1784 1785 if (WARN_ON_ONCE(msdu_len <= buf_first_len)) { 1786 skb_put(first, buf_first_hdr_len + msdu_len); 1787 skb_pull(first, buf_first_hdr_len); 1788 return 0; 1789 } 1790 1791 ldesc = (struct hal_rx_desc *)last->data; 1792 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ab, ldesc); 1793 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ab, ldesc); 1794 1795 /* MSDU spans over multiple buffers because the length of the MSDU 1796 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data 1797 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. 1798 */ 1799 skb_put(first, DP_RX_BUFFER_SIZE); 1800 skb_pull(first, buf_first_hdr_len); 1801 1802 /* When an MSDU spread over multiple buffers attention, MSDU_END and 1803 * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs. 1804 */ 1805 ath11k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc); 1806 1807 space_extra = msdu_len - (buf_first_len + skb_tailroom(first)); 1808 if (space_extra > 0 && 1809 (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) { 1810 /* Free up all buffers of the MSDU */ 1811 while ((skb = __skb_dequeue(msdu_list)) != NULL) { 1812 rxcb = ATH11K_SKB_RXCB(skb); 1813 if (!rxcb->is_continuation) { 1814 dev_kfree_skb_any(skb); 1815 break; 1816 } 1817 dev_kfree_skb_any(skb); 1818 } 1819 return -ENOMEM; 1820 } 1821 1822 rem_len = msdu_len - buf_first_len; 1823 while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) { 1824 rxcb = ATH11K_SKB_RXCB(skb); 1825 if (rxcb->is_continuation) 1826 buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz; 1827 else 1828 buf_len = rem_len; 1829 1830 if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) { 1831 WARN_ON_ONCE(1); 1832 dev_kfree_skb_any(skb); 1833 return -EINVAL; 1834 } 1835 1836 skb_put(skb, buf_len + hal_rx_desc_sz); 1837 skb_pull(skb, hal_rx_desc_sz); 1838 skb_copy_from_linear_data(skb, skb_put(first, buf_len), 1839 buf_len); 1840 dev_kfree_skb_any(skb); 1841 1842 rem_len -= buf_len; 1843 if (!rxcb->is_continuation) 1844 break; 1845 } 1846 1847 return 0; 1848 } 1849 1850 static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list, 1851 struct sk_buff *first) 1852 { 1853 struct sk_buff *skb; 1854 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first); 1855 1856 if (!rxcb->is_continuation) 1857 return first; 1858 1859 skb_queue_walk(msdu_list, skb) { 1860 rxcb = ATH11K_SKB_RXCB(skb); 1861 if (!rxcb->is_continuation) 1862 return skb; 1863 } 1864 1865 return NULL; 1866 } 1867 1868 static void ath11k_dp_rx_h_csum_offload(struct ath11k *ar, struct sk_buff *msdu) 1869 { 1870 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 1871 struct rx_attention *rx_attention; 1872 bool ip_csum_fail, l4_csum_fail; 1873 1874 rx_attention = ath11k_dp_rx_get_attention(ar->ab, rxcb->rx_desc); 1875 ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rx_attention); 1876 l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rx_attention); 1877 1878 msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ? 1879 CHECKSUM_NONE : CHECKSUM_UNNECESSARY; 1880 } 1881 1882 static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, 1883 enum hal_encrypt_type enctype) 1884 { 1885 switch (enctype) { 1886 case HAL_ENCRYPT_TYPE_OPEN: 1887 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1888 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1889 return 0; 1890 case HAL_ENCRYPT_TYPE_CCMP_128: 1891 return IEEE80211_CCMP_MIC_LEN; 1892 case HAL_ENCRYPT_TYPE_CCMP_256: 1893 return IEEE80211_CCMP_256_MIC_LEN; 1894 case HAL_ENCRYPT_TYPE_GCMP_128: 1895 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1896 return IEEE80211_GCMP_MIC_LEN; 1897 case HAL_ENCRYPT_TYPE_WEP_40: 1898 case HAL_ENCRYPT_TYPE_WEP_104: 1899 case HAL_ENCRYPT_TYPE_WEP_128: 1900 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1901 case HAL_ENCRYPT_TYPE_WAPI: 1902 break; 1903 } 1904 1905 ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype); 1906 return 0; 1907 } 1908 1909 static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar, 1910 enum hal_encrypt_type enctype) 1911 { 1912 switch (enctype) { 1913 case HAL_ENCRYPT_TYPE_OPEN: 1914 return 0; 1915 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1916 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1917 return IEEE80211_TKIP_IV_LEN; 1918 case HAL_ENCRYPT_TYPE_CCMP_128: 1919 return IEEE80211_CCMP_HDR_LEN; 1920 case HAL_ENCRYPT_TYPE_CCMP_256: 1921 return IEEE80211_CCMP_256_HDR_LEN; 1922 case HAL_ENCRYPT_TYPE_GCMP_128: 1923 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1924 return IEEE80211_GCMP_HDR_LEN; 1925 case HAL_ENCRYPT_TYPE_WEP_40: 1926 case HAL_ENCRYPT_TYPE_WEP_104: 1927 case HAL_ENCRYPT_TYPE_WEP_128: 1928 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1929 case HAL_ENCRYPT_TYPE_WAPI: 1930 break; 1931 } 1932 1933 ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 1934 return 0; 1935 } 1936 1937 static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar, 1938 enum hal_encrypt_type enctype) 1939 { 1940 switch (enctype) { 1941 case HAL_ENCRYPT_TYPE_OPEN: 1942 case HAL_ENCRYPT_TYPE_CCMP_128: 1943 case HAL_ENCRYPT_TYPE_CCMP_256: 1944 case HAL_ENCRYPT_TYPE_GCMP_128: 1945 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1946 return 0; 1947 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1948 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1949 return IEEE80211_TKIP_ICV_LEN; 1950 case HAL_ENCRYPT_TYPE_WEP_40: 1951 case HAL_ENCRYPT_TYPE_WEP_104: 1952 case HAL_ENCRYPT_TYPE_WEP_128: 1953 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1954 case HAL_ENCRYPT_TYPE_WAPI: 1955 break; 1956 } 1957 1958 ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 1959 return 0; 1960 } 1961 1962 static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar, 1963 struct sk_buff *msdu, 1964 u8 *first_hdr, 1965 enum hal_encrypt_type enctype, 1966 struct ieee80211_rx_status *status) 1967 { 1968 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 1969 u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN]; 1970 struct ieee80211_hdr *hdr; 1971 size_t hdr_len; 1972 u8 da[ETH_ALEN]; 1973 u8 sa[ETH_ALEN]; 1974 u16 qos_ctl = 0; 1975 u8 *qos; 1976 1977 /* copy SA & DA and pull decapped header */ 1978 hdr = (struct ieee80211_hdr *)msdu->data; 1979 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1980 ether_addr_copy(da, ieee80211_get_DA(hdr)); 1981 ether_addr_copy(sa, ieee80211_get_SA(hdr)); 1982 skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control)); 1983 1984 if (rxcb->is_first_msdu) { 1985 /* original 802.11 header is valid for the first msdu 1986 * hence we can reuse the same header 1987 */ 1988 hdr = (struct ieee80211_hdr *)first_hdr; 1989 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1990 1991 /* Each A-MSDU subframe will be reported as a separate MSDU, 1992 * so strip the A-MSDU bit from QoS Ctl. 1993 */ 1994 if (ieee80211_is_data_qos(hdr->frame_control)) { 1995 qos = ieee80211_get_qos_ctl(hdr); 1996 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; 1997 } 1998 } else { 1999 /* Rebuild qos header if this is a middle/last msdu */ 2000 hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 2001 2002 /* Reset the order bit as the HT_Control header is stripped */ 2003 hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER)); 2004 2005 qos_ctl = rxcb->tid; 2006 2007 if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(ar->ab, rxcb->rx_desc)) 2008 qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT; 2009 2010 /* TODO Add other QoS ctl fields when required */ 2011 2012 /* copy decap header before overwriting for reuse below */ 2013 memcpy(decap_hdr, (uint8_t *)hdr, hdr_len); 2014 } 2015 2016 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 2017 memcpy(skb_push(msdu, 2018 ath11k_dp_rx_crypto_param_len(ar, enctype)), 2019 (void *)hdr + hdr_len, 2020 ath11k_dp_rx_crypto_param_len(ar, enctype)); 2021 } 2022 2023 if (!rxcb->is_first_msdu) { 2024 memcpy(skb_push(msdu, 2025 IEEE80211_QOS_CTL_LEN), &qos_ctl, 2026 IEEE80211_QOS_CTL_LEN); 2027 memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len); 2028 return; 2029 } 2030 2031 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 2032 2033 /* original 802.11 header has a different DA and in 2034 * case of 4addr it may also have different SA 2035 */ 2036 hdr = (struct ieee80211_hdr *)msdu->data; 2037 ether_addr_copy(ieee80211_get_DA(hdr), da); 2038 ether_addr_copy(ieee80211_get_SA(hdr), sa); 2039 } 2040 2041 static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu, 2042 enum hal_encrypt_type enctype, 2043 struct ieee80211_rx_status *status, 2044 bool decrypted) 2045 { 2046 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 2047 struct ieee80211_hdr *hdr; 2048 size_t hdr_len; 2049 size_t crypto_len; 2050 2051 if (!rxcb->is_first_msdu || 2052 !(rxcb->is_first_msdu && rxcb->is_last_msdu)) { 2053 WARN_ON_ONCE(1); 2054 return; 2055 } 2056 2057 skb_trim(msdu, msdu->len - FCS_LEN); 2058 2059 if (!decrypted) 2060 return; 2061 2062 hdr = (void *)msdu->data; 2063 2064 /* Tail */ 2065 if (status->flag & RX_FLAG_IV_STRIPPED) { 2066 skb_trim(msdu, msdu->len - 2067 ath11k_dp_rx_crypto_mic_len(ar, enctype)); 2068 2069 skb_trim(msdu, msdu->len - 2070 ath11k_dp_rx_crypto_icv_len(ar, enctype)); 2071 } else { 2072 /* MIC */ 2073 if (status->flag & RX_FLAG_MIC_STRIPPED) 2074 skb_trim(msdu, msdu->len - 2075 ath11k_dp_rx_crypto_mic_len(ar, enctype)); 2076 2077 /* ICV */ 2078 if (status->flag & RX_FLAG_ICV_STRIPPED) 2079 skb_trim(msdu, msdu->len - 2080 ath11k_dp_rx_crypto_icv_len(ar, enctype)); 2081 } 2082 2083 /* MMIC */ 2084 if ((status->flag & RX_FLAG_MMIC_STRIPPED) && 2085 !ieee80211_has_morefrags(hdr->frame_control) && 2086 enctype == HAL_ENCRYPT_TYPE_TKIP_MIC) 2087 skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN); 2088 2089 /* Head */ 2090 if (status->flag & RX_FLAG_IV_STRIPPED) { 2091 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2092 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 2093 2094 memmove((void *)msdu->data + crypto_len, 2095 (void *)msdu->data, hdr_len); 2096 skb_pull(msdu, crypto_len); 2097 } 2098 } 2099 2100 static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar, 2101 struct sk_buff *msdu, 2102 enum hal_encrypt_type enctype) 2103 { 2104 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 2105 struct ieee80211_hdr *hdr; 2106 size_t hdr_len, crypto_len; 2107 void *rfc1042; 2108 bool is_amsdu; 2109 2110 is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu); 2111 hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(ar->ab, rxcb->rx_desc); 2112 rfc1042 = hdr; 2113 2114 if (rxcb->is_first_msdu) { 2115 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2116 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 2117 2118 rfc1042 += hdr_len + crypto_len; 2119 } 2120 2121 if (is_amsdu) 2122 rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr); 2123 2124 return rfc1042; 2125 } 2126 2127 static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar, 2128 struct sk_buff *msdu, 2129 u8 *first_hdr, 2130 enum hal_encrypt_type enctype, 2131 struct ieee80211_rx_status *status) 2132 { 2133 struct ieee80211_hdr *hdr; 2134 struct ethhdr *eth; 2135 size_t hdr_len; 2136 u8 da[ETH_ALEN]; 2137 u8 sa[ETH_ALEN]; 2138 void *rfc1042; 2139 2140 rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype); 2141 if (WARN_ON_ONCE(!rfc1042)) 2142 return; 2143 2144 /* pull decapped header and copy SA & DA */ 2145 eth = (struct ethhdr *)msdu->data; 2146 ether_addr_copy(da, eth->h_dest); 2147 ether_addr_copy(sa, eth->h_source); 2148 skb_pull(msdu, sizeof(struct ethhdr)); 2149 2150 /* push rfc1042/llc/snap */ 2151 memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042, 2152 sizeof(struct ath11k_dp_rfc1042_hdr)); 2153 2154 /* push original 802.11 header */ 2155 hdr = (struct ieee80211_hdr *)first_hdr; 2156 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2157 2158 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 2159 memcpy(skb_push(msdu, 2160 ath11k_dp_rx_crypto_param_len(ar, enctype)), 2161 (void *)hdr + hdr_len, 2162 ath11k_dp_rx_crypto_param_len(ar, enctype)); 2163 } 2164 2165 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 2166 2167 /* original 802.11 header has a different DA and in 2168 * case of 4addr it may also have different SA 2169 */ 2170 hdr = (struct ieee80211_hdr *)msdu->data; 2171 ether_addr_copy(ieee80211_get_DA(hdr), da); 2172 ether_addr_copy(ieee80211_get_SA(hdr), sa); 2173 } 2174 2175 static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu, 2176 struct hal_rx_desc *rx_desc, 2177 enum hal_encrypt_type enctype, 2178 struct ieee80211_rx_status *status, 2179 bool decrypted) 2180 { 2181 u8 *first_hdr; 2182 u8 decap; 2183 struct ethhdr *ehdr; 2184 2185 first_hdr = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc); 2186 decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc); 2187 2188 switch (decap) { 2189 case DP_RX_DECAP_TYPE_NATIVE_WIFI: 2190 ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr, 2191 enctype, status); 2192 break; 2193 case DP_RX_DECAP_TYPE_RAW: 2194 ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status, 2195 decrypted); 2196 break; 2197 case DP_RX_DECAP_TYPE_ETHERNET2_DIX: 2198 ehdr = (struct ethhdr *)msdu->data; 2199 2200 /* mac80211 allows fast path only for authorized STA */ 2201 if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) { 2202 ATH11K_SKB_RXCB(msdu)->is_eapol = true; 2203 ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr, 2204 enctype, status); 2205 break; 2206 } 2207 2208 /* PN for mcast packets will be validated in mac80211; 2209 * remove eth header and add 802.11 header. 2210 */ 2211 if (ATH11K_SKB_RXCB(msdu)->is_mcbc && decrypted) 2212 ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr, 2213 enctype, status); 2214 break; 2215 case DP_RX_DECAP_TYPE_8023: 2216 /* TODO: Handle undecap for these formats */ 2217 break; 2218 } 2219 } 2220 2221 static struct ath11k_peer * 2222 ath11k_dp_rx_h_find_peer(struct ath11k_base *ab, struct sk_buff *msdu) 2223 { 2224 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 2225 struct hal_rx_desc *rx_desc = rxcb->rx_desc; 2226 struct ath11k_peer *peer = NULL; 2227 2228 lockdep_assert_held(&ab->base_lock); 2229 2230 if (rxcb->peer_id) 2231 peer = ath11k_peer_find_by_id(ab, rxcb->peer_id); 2232 2233 if (peer) 2234 return peer; 2235 2236 if (!rx_desc || !(ath11k_dp_rxdesc_mac_addr2_valid(ab, rx_desc))) 2237 return NULL; 2238 2239 peer = ath11k_peer_find_by_addr(ab, 2240 ath11k_dp_rxdesc_mpdu_start_addr2(ab, rx_desc)); 2241 return peer; 2242 } 2243 2244 static void ath11k_dp_rx_h_mpdu(struct ath11k *ar, 2245 struct sk_buff *msdu, 2246 struct hal_rx_desc *rx_desc, 2247 struct ieee80211_rx_status *rx_status) 2248 { 2249 bool fill_crypto_hdr; 2250 enum hal_encrypt_type enctype; 2251 bool is_decrypted = false; 2252 struct ath11k_skb_rxcb *rxcb; 2253 struct ieee80211_hdr *hdr; 2254 struct ath11k_peer *peer; 2255 struct rx_attention *rx_attention; 2256 u32 err_bitmap; 2257 2258 /* PN for multicast packets will be checked in mac80211 */ 2259 rxcb = ATH11K_SKB_RXCB(msdu); 2260 fill_crypto_hdr = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc); 2261 rxcb->is_mcbc = fill_crypto_hdr; 2262 2263 if (rxcb->is_mcbc) { 2264 rxcb->peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc); 2265 rxcb->seq_no = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc); 2266 } 2267 2268 spin_lock_bh(&ar->ab->base_lock); 2269 peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu); 2270 if (peer) { 2271 if (rxcb->is_mcbc) 2272 enctype = peer->sec_type_grp; 2273 else 2274 enctype = peer->sec_type; 2275 } else { 2276 enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc); 2277 } 2278 spin_unlock_bh(&ar->ab->base_lock); 2279 2280 rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc); 2281 err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention); 2282 if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap) 2283 is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention); 2284 2285 /* Clear per-MPDU flags while leaving per-PPDU flags intact */ 2286 rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | 2287 RX_FLAG_MMIC_ERROR | 2288 RX_FLAG_DECRYPTED | 2289 RX_FLAG_IV_STRIPPED | 2290 RX_FLAG_MMIC_STRIPPED); 2291 2292 if (err_bitmap & DP_RX_MPDU_ERR_FCS) 2293 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; 2294 if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC) 2295 rx_status->flag |= RX_FLAG_MMIC_ERROR; 2296 2297 if (is_decrypted) { 2298 rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED; 2299 2300 if (fill_crypto_hdr) 2301 rx_status->flag |= RX_FLAG_MIC_STRIPPED | 2302 RX_FLAG_ICV_STRIPPED; 2303 else 2304 rx_status->flag |= RX_FLAG_IV_STRIPPED | 2305 RX_FLAG_PN_VALIDATED; 2306 } 2307 2308 ath11k_dp_rx_h_csum_offload(ar, msdu); 2309 ath11k_dp_rx_h_undecap(ar, msdu, rx_desc, 2310 enctype, rx_status, is_decrypted); 2311 2312 if (!is_decrypted || fill_crypto_hdr) 2313 return; 2314 2315 if (ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc) != 2316 DP_RX_DECAP_TYPE_ETHERNET2_DIX) { 2317 hdr = (void *)msdu->data; 2318 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 2319 } 2320 } 2321 2322 static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc, 2323 struct ieee80211_rx_status *rx_status) 2324 { 2325 struct ieee80211_supported_band *sband; 2326 enum rx_msdu_start_pkt_type pkt_type; 2327 u8 bw; 2328 u8 rate_mcs, nss; 2329 u8 sgi; 2330 bool is_cck; 2331 2332 pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(ar->ab, rx_desc); 2333 bw = ath11k_dp_rx_h_msdu_start_rx_bw(ar->ab, rx_desc); 2334 rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(ar->ab, rx_desc); 2335 nss = ath11k_dp_rx_h_msdu_start_nss(ar->ab, rx_desc); 2336 sgi = ath11k_dp_rx_h_msdu_start_sgi(ar->ab, rx_desc); 2337 2338 switch (pkt_type) { 2339 case RX_MSDU_START_PKT_TYPE_11A: 2340 case RX_MSDU_START_PKT_TYPE_11B: 2341 is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B); 2342 sband = &ar->mac.sbands[rx_status->band]; 2343 rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs, 2344 is_cck); 2345 break; 2346 case RX_MSDU_START_PKT_TYPE_11N: 2347 rx_status->encoding = RX_ENC_HT; 2348 if (rate_mcs > ATH11K_HT_MCS_MAX) { 2349 ath11k_warn(ar->ab, 2350 "Received with invalid mcs in HT mode %d\n", 2351 rate_mcs); 2352 break; 2353 } 2354 rx_status->rate_idx = rate_mcs + (8 * (nss - 1)); 2355 if (sgi) 2356 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 2357 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 2358 break; 2359 case RX_MSDU_START_PKT_TYPE_11AC: 2360 rx_status->encoding = RX_ENC_VHT; 2361 rx_status->rate_idx = rate_mcs; 2362 if (rate_mcs > ATH11K_VHT_MCS_MAX) { 2363 ath11k_warn(ar->ab, 2364 "Received with invalid mcs in VHT mode %d\n", 2365 rate_mcs); 2366 break; 2367 } 2368 rx_status->nss = nss; 2369 if (sgi) 2370 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 2371 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 2372 break; 2373 case RX_MSDU_START_PKT_TYPE_11AX: 2374 rx_status->rate_idx = rate_mcs; 2375 if (rate_mcs > ATH11K_HE_MCS_MAX) { 2376 ath11k_warn(ar->ab, 2377 "Received with invalid mcs in HE mode %d\n", 2378 rate_mcs); 2379 break; 2380 } 2381 rx_status->encoding = RX_ENC_HE; 2382 rx_status->nss = nss; 2383 rx_status->he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi); 2384 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 2385 break; 2386 } 2387 } 2388 2389 static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc, 2390 struct ieee80211_rx_status *rx_status) 2391 { 2392 u8 channel_num; 2393 u32 center_freq, meta_data; 2394 struct ieee80211_channel *channel; 2395 2396 rx_status->freq = 0; 2397 rx_status->rate_idx = 0; 2398 rx_status->nss = 0; 2399 rx_status->encoding = RX_ENC_LEGACY; 2400 rx_status->bw = RATE_INFO_BW_20; 2401 2402 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 2403 2404 meta_data = ath11k_dp_rx_h_msdu_start_freq(ar->ab, rx_desc); 2405 channel_num = meta_data; 2406 center_freq = meta_data >> 16; 2407 2408 if (center_freq >= ATH11K_MIN_6G_FREQ && 2409 center_freq <= ATH11K_MAX_6G_FREQ) { 2410 rx_status->band = NL80211_BAND_6GHZ; 2411 rx_status->freq = center_freq; 2412 } else if (channel_num >= 1 && channel_num <= 14) { 2413 rx_status->band = NL80211_BAND_2GHZ; 2414 } else if (channel_num >= 36 && channel_num <= 173) { 2415 rx_status->band = NL80211_BAND_5GHZ; 2416 } else { 2417 spin_lock_bh(&ar->data_lock); 2418 channel = ar->rx_channel; 2419 if (channel) { 2420 rx_status->band = channel->band; 2421 channel_num = 2422 ieee80211_frequency_to_channel(channel->center_freq); 2423 } 2424 spin_unlock_bh(&ar->data_lock); 2425 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ", 2426 rx_desc, sizeof(struct hal_rx_desc)); 2427 } 2428 2429 if (rx_status->band != NL80211_BAND_6GHZ) 2430 rx_status->freq = ieee80211_channel_to_frequency(channel_num, 2431 rx_status->band); 2432 2433 ath11k_dp_rx_h_rate(ar, rx_desc, rx_status); 2434 } 2435 2436 static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi, 2437 struct sk_buff *msdu, 2438 struct ieee80211_rx_status *status) 2439 { 2440 static const struct ieee80211_radiotap_he known = { 2441 .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | 2442 IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN), 2443 .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN), 2444 }; 2445 struct ieee80211_rx_status *rx_status; 2446 struct ieee80211_radiotap_he *he = NULL; 2447 struct ieee80211_sta *pubsta = NULL; 2448 struct ath11k_peer *peer; 2449 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 2450 u8 decap = DP_RX_DECAP_TYPE_RAW; 2451 bool is_mcbc = rxcb->is_mcbc; 2452 bool is_eapol = rxcb->is_eapol; 2453 2454 if (status->encoding == RX_ENC_HE && 2455 !(status->flag & RX_FLAG_RADIOTAP_HE) && 2456 !(status->flag & RX_FLAG_SKIP_MONITOR)) { 2457 he = skb_push(msdu, sizeof(known)); 2458 memcpy(he, &known, sizeof(known)); 2459 status->flag |= RX_FLAG_RADIOTAP_HE; 2460 } 2461 2462 if (!(status->flag & RX_FLAG_ONLY_MONITOR)) 2463 decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rxcb->rx_desc); 2464 2465 spin_lock_bh(&ar->ab->base_lock); 2466 peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu); 2467 if (peer && peer->sta) 2468 pubsta = peer->sta; 2469 spin_unlock_bh(&ar->ab->base_lock); 2470 2471 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 2472 "rx skb %pK len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", 2473 msdu, 2474 msdu->len, 2475 peer ? peer->addr : NULL, 2476 rxcb->tid, 2477 is_mcbc ? "mcast" : "ucast", 2478 rxcb->seq_no, 2479 (status->encoding == RX_ENC_LEGACY) ? "legacy" : "", 2480 (status->encoding == RX_ENC_HT) ? "ht" : "", 2481 (status->encoding == RX_ENC_VHT) ? "vht" : "", 2482 (status->encoding == RX_ENC_HE) ? "he" : "", 2483 (status->bw == RATE_INFO_BW_40) ? "40" : "", 2484 (status->bw == RATE_INFO_BW_80) ? "80" : "", 2485 (status->bw == RATE_INFO_BW_160) ? "160" : "", 2486 status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", 2487 status->rate_idx, 2488 status->nss, 2489 status->freq, 2490 status->band, status->flag, 2491 !!(status->flag & RX_FLAG_FAILED_FCS_CRC), 2492 !!(status->flag & RX_FLAG_MMIC_ERROR), 2493 !!(status->flag & RX_FLAG_AMSDU_MORE)); 2494 2495 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DP_RX, NULL, "dp rx msdu: ", 2496 msdu->data, msdu->len); 2497 2498 rx_status = IEEE80211_SKB_RXCB(msdu); 2499 *rx_status = *status; 2500 2501 /* TODO: trace rx packet */ 2502 2503 /* PN for multicast packets are not validate in HW, 2504 * so skip 802.3 rx path 2505 * Also, fast_rx expectes the STA to be authorized, hence 2506 * eapol packets are sent in slow path. 2507 */ 2508 if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol && 2509 !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED)) 2510 rx_status->flag |= RX_FLAG_8023; 2511 2512 ieee80211_rx_napi(ar->hw, pubsta, msdu, napi); 2513 } 2514 2515 static int ath11k_dp_rx_process_msdu(struct ath11k *ar, 2516 struct sk_buff *msdu, 2517 struct sk_buff_head *msdu_list, 2518 struct ieee80211_rx_status *rx_status) 2519 { 2520 struct ath11k_base *ab = ar->ab; 2521 struct hal_rx_desc *rx_desc, *lrx_desc; 2522 struct rx_attention *rx_attention; 2523 struct ath11k_skb_rxcb *rxcb; 2524 struct sk_buff *last_buf; 2525 u8 l3_pad_bytes; 2526 u8 *hdr_status; 2527 u16 msdu_len; 2528 int ret; 2529 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 2530 2531 last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu); 2532 if (!last_buf) { 2533 ath11k_warn(ab, 2534 "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n"); 2535 ret = -EIO; 2536 goto free_out; 2537 } 2538 2539 rx_desc = (struct hal_rx_desc *)msdu->data; 2540 if (ath11k_dp_rx_h_attn_msdu_len_err(ab, rx_desc)) { 2541 ath11k_warn(ar->ab, "msdu len not valid\n"); 2542 ret = -EIO; 2543 goto free_out; 2544 } 2545 2546 lrx_desc = (struct hal_rx_desc *)last_buf->data; 2547 rx_attention = ath11k_dp_rx_get_attention(ab, lrx_desc); 2548 if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) { 2549 ath11k_warn(ab, "msdu_done bit in attention is not set\n"); 2550 ret = -EIO; 2551 goto free_out; 2552 } 2553 2554 rxcb = ATH11K_SKB_RXCB(msdu); 2555 rxcb->rx_desc = rx_desc; 2556 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ab, rx_desc); 2557 l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ab, lrx_desc); 2558 2559 if (rxcb->is_frag) { 2560 skb_pull(msdu, hal_rx_desc_sz); 2561 } else if (!rxcb->is_continuation) { 2562 if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) { 2563 hdr_status = ath11k_dp_rx_h_80211_hdr(ab, rx_desc); 2564 ret = -EINVAL; 2565 ath11k_warn(ab, "invalid msdu len %u\n", msdu_len); 2566 ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", hdr_status, 2567 sizeof(struct ieee80211_hdr)); 2568 ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", rx_desc, 2569 sizeof(struct hal_rx_desc)); 2570 goto free_out; 2571 } 2572 skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len); 2573 skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes); 2574 } else { 2575 ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list, 2576 msdu, last_buf, 2577 l3_pad_bytes, msdu_len); 2578 if (ret) { 2579 ath11k_warn(ab, 2580 "failed to coalesce msdu rx buffer%d\n", ret); 2581 goto free_out; 2582 } 2583 } 2584 2585 ath11k_dp_rx_h_ppdu(ar, rx_desc, rx_status); 2586 ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status); 2587 2588 rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED; 2589 2590 return 0; 2591 2592 free_out: 2593 return ret; 2594 } 2595 2596 static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab, 2597 struct napi_struct *napi, 2598 struct sk_buff_head *msdu_list, 2599 int *quota, int ring_id) 2600 { 2601 struct ath11k_skb_rxcb *rxcb; 2602 struct sk_buff *msdu; 2603 struct ath11k *ar; 2604 struct ieee80211_rx_status rx_status = {0}; 2605 u8 mac_id; 2606 int ret; 2607 2608 if (skb_queue_empty(msdu_list)) 2609 return; 2610 2611 rcu_read_lock(); 2612 2613 while (*quota && (msdu = __skb_dequeue(msdu_list))) { 2614 rxcb = ATH11K_SKB_RXCB(msdu); 2615 mac_id = rxcb->mac_id; 2616 ar = ab->pdevs[mac_id].ar; 2617 if (!rcu_dereference(ab->pdevs_active[mac_id])) { 2618 dev_kfree_skb_any(msdu); 2619 continue; 2620 } 2621 2622 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 2623 dev_kfree_skb_any(msdu); 2624 continue; 2625 } 2626 2627 ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status); 2628 if (ret) { 2629 ath11k_dbg(ab, ATH11K_DBG_DATA, 2630 "Unable to process msdu %d", ret); 2631 dev_kfree_skb_any(msdu); 2632 continue; 2633 } 2634 2635 ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status); 2636 (*quota)--; 2637 } 2638 2639 rcu_read_unlock(); 2640 } 2641 2642 int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id, 2643 struct napi_struct *napi, int budget) 2644 { 2645 struct ath11k_dp *dp = &ab->dp; 2646 struct dp_rxdma_ring *rx_ring; 2647 int num_buffs_reaped[MAX_RADIOS] = {0}; 2648 struct sk_buff_head msdu_list; 2649 struct ath11k_skb_rxcb *rxcb; 2650 int total_msdu_reaped = 0; 2651 struct hal_srng *srng; 2652 struct sk_buff *msdu; 2653 int quota = budget; 2654 bool done = false; 2655 int buf_id, mac_id; 2656 struct ath11k *ar; 2657 u32 *rx_desc; 2658 int i; 2659 2660 __skb_queue_head_init(&msdu_list); 2661 2662 srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id]; 2663 2664 spin_lock_bh(&srng->lock); 2665 2666 ath11k_hal_srng_access_begin(ab, srng); 2667 2668 try_again: 2669 while ((rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 2670 struct hal_reo_dest_ring desc = *(struct hal_reo_dest_ring *)rx_desc; 2671 enum hal_reo_dest_ring_push_reason push_reason; 2672 u32 cookie; 2673 2674 cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, 2675 desc.buf_addr_info.info1); 2676 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 2677 cookie); 2678 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie); 2679 2680 ar = ab->pdevs[mac_id].ar; 2681 rx_ring = &ar->dp.rx_refill_buf_ring; 2682 spin_lock_bh(&rx_ring->idr_lock); 2683 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 2684 if (!msdu) { 2685 ath11k_warn(ab, "frame rx with invalid buf_id %d\n", 2686 buf_id); 2687 spin_unlock_bh(&rx_ring->idr_lock); 2688 continue; 2689 } 2690 2691 idr_remove(&rx_ring->bufs_idr, buf_id); 2692 spin_unlock_bh(&rx_ring->idr_lock); 2693 2694 rxcb = ATH11K_SKB_RXCB(msdu); 2695 dma_unmap_single(ab->dev, rxcb->paddr, 2696 msdu->len + skb_tailroom(msdu), 2697 DMA_FROM_DEVICE); 2698 2699 num_buffs_reaped[mac_id]++; 2700 total_msdu_reaped++; 2701 2702 push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON, 2703 desc.info0); 2704 if (push_reason != 2705 HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) { 2706 dev_kfree_skb_any(msdu); 2707 ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++; 2708 continue; 2709 } 2710 2711 rxcb->is_first_msdu = !!(desc.rx_msdu_info.info0 & 2712 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU); 2713 rxcb->is_last_msdu = !!(desc.rx_msdu_info.info0 & 2714 RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU); 2715 rxcb->is_continuation = !!(desc.rx_msdu_info.info0 & 2716 RX_MSDU_DESC_INFO0_MSDU_CONTINUATION); 2717 rxcb->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID, 2718 desc.rx_mpdu_info.meta_data); 2719 rxcb->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM, 2720 desc.rx_mpdu_info.info0); 2721 rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM, 2722 desc.info0); 2723 2724 rxcb->mac_id = mac_id; 2725 __skb_queue_tail(&msdu_list, msdu); 2726 2727 if (total_msdu_reaped >= quota && !rxcb->is_continuation) { 2728 done = true; 2729 break; 2730 } 2731 } 2732 2733 /* Hw might have updated the head pointer after we cached it. 2734 * In this case, even though there are entries in the ring we'll 2735 * get rx_desc NULL. Give the read another try with updated cached 2736 * head pointer so that we can reap complete MPDU in the current 2737 * rx processing. 2738 */ 2739 if (!done && ath11k_hal_srng_dst_num_free(ab, srng, true)) { 2740 ath11k_hal_srng_access_end(ab, srng); 2741 goto try_again; 2742 } 2743 2744 ath11k_hal_srng_access_end(ab, srng); 2745 2746 spin_unlock_bh(&srng->lock); 2747 2748 if (!total_msdu_reaped) 2749 goto exit; 2750 2751 for (i = 0; i < ab->num_radios; i++) { 2752 if (!num_buffs_reaped[i]) 2753 continue; 2754 2755 ar = ab->pdevs[i].ar; 2756 rx_ring = &ar->dp.rx_refill_buf_ring; 2757 2758 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i], 2759 ab->hw_params.hal_params->rx_buf_rbm); 2760 } 2761 2762 ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list, 2763 "a, ring_id); 2764 2765 exit: 2766 return budget - quota; 2767 } 2768 2769 static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta, 2770 struct hal_rx_mon_ppdu_info *ppdu_info) 2771 { 2772 struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats; 2773 u32 num_msdu; 2774 2775 if (!rx_stats) 2776 return; 2777 2778 num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count + 2779 ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count; 2780 2781 rx_stats->num_msdu += num_msdu; 2782 rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count + 2783 ppdu_info->tcp_ack_msdu_count; 2784 rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count; 2785 rx_stats->other_msdu_count += ppdu_info->other_msdu_count; 2786 2787 if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A || 2788 ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) { 2789 ppdu_info->nss = 1; 2790 ppdu_info->mcs = HAL_RX_MAX_MCS; 2791 ppdu_info->tid = IEEE80211_NUM_TIDS; 2792 } 2793 2794 if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS) 2795 rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu; 2796 2797 if (ppdu_info->mcs <= HAL_RX_MAX_MCS) 2798 rx_stats->mcs_count[ppdu_info->mcs] += num_msdu; 2799 2800 if (ppdu_info->gi < HAL_RX_GI_MAX) 2801 rx_stats->gi_count[ppdu_info->gi] += num_msdu; 2802 2803 if (ppdu_info->bw < HAL_RX_BW_MAX) 2804 rx_stats->bw_count[ppdu_info->bw] += num_msdu; 2805 2806 if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX) 2807 rx_stats->coding_count[ppdu_info->ldpc] += num_msdu; 2808 2809 if (ppdu_info->tid <= IEEE80211_NUM_TIDS) 2810 rx_stats->tid_count[ppdu_info->tid] += num_msdu; 2811 2812 if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX) 2813 rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu; 2814 2815 if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX) 2816 rx_stats->reception_type[ppdu_info->reception_type] += num_msdu; 2817 2818 if (ppdu_info->is_stbc) 2819 rx_stats->stbc_count += num_msdu; 2820 2821 if (ppdu_info->beamformed) 2822 rx_stats->beamformed_count += num_msdu; 2823 2824 if (ppdu_info->num_mpdu_fcs_ok > 1) 2825 rx_stats->ampdu_msdu_count += num_msdu; 2826 else 2827 rx_stats->non_ampdu_msdu_count += num_msdu; 2828 2829 rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok; 2830 rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err; 2831 rx_stats->dcm_count += ppdu_info->dcm; 2832 rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu; 2833 2834 arsta->rssi_comb = ppdu_info->rssi_comb; 2835 rx_stats->rx_duration += ppdu_info->rx_duration; 2836 arsta->rx_duration = rx_stats->rx_duration; 2837 } 2838 2839 static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab, 2840 struct dp_rxdma_ring *rx_ring, 2841 int *buf_id) 2842 { 2843 struct sk_buff *skb; 2844 dma_addr_t paddr; 2845 2846 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + 2847 DP_RX_BUFFER_ALIGN_SIZE); 2848 2849 if (!skb) 2850 goto fail_alloc_skb; 2851 2852 if (!IS_ALIGNED((unsigned long)skb->data, 2853 DP_RX_BUFFER_ALIGN_SIZE)) { 2854 skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - 2855 skb->data); 2856 } 2857 2858 paddr = dma_map_single(ab->dev, skb->data, 2859 skb->len + skb_tailroom(skb), 2860 DMA_FROM_DEVICE); 2861 if (unlikely(dma_mapping_error(ab->dev, paddr))) 2862 goto fail_free_skb; 2863 2864 spin_lock_bh(&rx_ring->idr_lock); 2865 *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, 2866 rx_ring->bufs_max, GFP_ATOMIC); 2867 spin_unlock_bh(&rx_ring->idr_lock); 2868 if (*buf_id < 0) 2869 goto fail_dma_unmap; 2870 2871 ATH11K_SKB_RXCB(skb)->paddr = paddr; 2872 return skb; 2873 2874 fail_dma_unmap: 2875 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 2876 DMA_FROM_DEVICE); 2877 fail_free_skb: 2878 dev_kfree_skb_any(skb); 2879 fail_alloc_skb: 2880 return NULL; 2881 } 2882 2883 int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id, 2884 struct dp_rxdma_ring *rx_ring, 2885 int req_entries, 2886 enum hal_rx_buf_return_buf_manager mgr) 2887 { 2888 struct hal_srng *srng; 2889 u32 *desc; 2890 struct sk_buff *skb; 2891 int num_free; 2892 int num_remain; 2893 int buf_id; 2894 u32 cookie; 2895 dma_addr_t paddr; 2896 2897 req_entries = min(req_entries, rx_ring->bufs_max); 2898 2899 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 2900 2901 spin_lock_bh(&srng->lock); 2902 2903 ath11k_hal_srng_access_begin(ab, srng); 2904 2905 num_free = ath11k_hal_srng_src_num_free(ab, srng, true); 2906 2907 req_entries = min(num_free, req_entries); 2908 num_remain = req_entries; 2909 2910 while (num_remain > 0) { 2911 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, 2912 &buf_id); 2913 if (!skb) 2914 break; 2915 paddr = ATH11K_SKB_RXCB(skb)->paddr; 2916 2917 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 2918 if (!desc) 2919 goto fail_desc_get; 2920 2921 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 2922 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 2923 2924 num_remain--; 2925 2926 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); 2927 } 2928 2929 ath11k_hal_srng_access_end(ab, srng); 2930 2931 spin_unlock_bh(&srng->lock); 2932 2933 return req_entries - num_remain; 2934 2935 fail_desc_get: 2936 spin_lock_bh(&rx_ring->idr_lock); 2937 idr_remove(&rx_ring->bufs_idr, buf_id); 2938 spin_unlock_bh(&rx_ring->idr_lock); 2939 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 2940 DMA_FROM_DEVICE); 2941 dev_kfree_skb_any(skb); 2942 ath11k_hal_srng_access_end(ab, srng); 2943 spin_unlock_bh(&srng->lock); 2944 2945 return req_entries - num_remain; 2946 } 2947 2948 static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id, 2949 int *budget, struct sk_buff_head *skb_list) 2950 { 2951 struct ath11k *ar; 2952 const struct ath11k_hw_hal_params *hal_params; 2953 struct ath11k_pdev_dp *dp; 2954 struct dp_rxdma_ring *rx_ring; 2955 struct hal_srng *srng; 2956 void *rx_mon_status_desc; 2957 struct sk_buff *skb; 2958 struct ath11k_skb_rxcb *rxcb; 2959 struct hal_tlv_hdr *tlv; 2960 u32 cookie; 2961 int buf_id, srng_id; 2962 dma_addr_t paddr; 2963 u8 rbm; 2964 int num_buffs_reaped = 0; 2965 2966 ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar; 2967 dp = &ar->dp; 2968 srng_id = ath11k_hw_mac_id_to_srng_id(&ab->hw_params, mac_id); 2969 rx_ring = &dp->rx_mon_status_refill_ring[srng_id]; 2970 2971 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 2972 2973 spin_lock_bh(&srng->lock); 2974 2975 ath11k_hal_srng_access_begin(ab, srng); 2976 while (*budget) { 2977 *budget -= 1; 2978 rx_mon_status_desc = 2979 ath11k_hal_srng_src_peek(ab, srng); 2980 if (!rx_mon_status_desc) 2981 break; 2982 2983 ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr, 2984 &cookie, &rbm); 2985 if (paddr) { 2986 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie); 2987 2988 spin_lock_bh(&rx_ring->idr_lock); 2989 skb = idr_find(&rx_ring->bufs_idr, buf_id); 2990 if (!skb) { 2991 ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n", 2992 buf_id); 2993 spin_unlock_bh(&rx_ring->idr_lock); 2994 goto move_next; 2995 } 2996 2997 idr_remove(&rx_ring->bufs_idr, buf_id); 2998 spin_unlock_bh(&rx_ring->idr_lock); 2999 3000 rxcb = ATH11K_SKB_RXCB(skb); 3001 3002 dma_unmap_single(ab->dev, rxcb->paddr, 3003 skb->len + skb_tailroom(skb), 3004 DMA_FROM_DEVICE); 3005 3006 tlv = (struct hal_tlv_hdr *)skb->data; 3007 if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != 3008 HAL_RX_STATUS_BUFFER_DONE) { 3009 ath11k_warn(ab, "mon status DONE not set %lx\n", 3010 FIELD_GET(HAL_TLV_HDR_TAG, 3011 tlv->tl)); 3012 dev_kfree_skb_any(skb); 3013 goto move_next; 3014 } 3015 3016 __skb_queue_tail(skb_list, skb); 3017 } 3018 move_next: 3019 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, 3020 &buf_id); 3021 3022 if (!skb) { 3023 hal_params = ab->hw_params.hal_params; 3024 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0, 3025 hal_params->rx_buf_rbm); 3026 num_buffs_reaped++; 3027 break; 3028 } 3029 rxcb = ATH11K_SKB_RXCB(skb); 3030 3031 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 3032 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 3033 3034 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr, 3035 cookie, 3036 ab->hw_params.hal_params->rx_buf_rbm); 3037 ath11k_hal_srng_src_get_next_entry(ab, srng); 3038 num_buffs_reaped++; 3039 } 3040 ath11k_hal_srng_access_end(ab, srng); 3041 spin_unlock_bh(&srng->lock); 3042 3043 return num_buffs_reaped; 3044 } 3045 3046 int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id, 3047 struct napi_struct *napi, int budget) 3048 { 3049 struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id); 3050 enum hal_rx_mon_status hal_status; 3051 struct sk_buff *skb; 3052 struct sk_buff_head skb_list; 3053 struct hal_rx_mon_ppdu_info ppdu_info; 3054 struct ath11k_peer *peer; 3055 struct ath11k_sta *arsta; 3056 int num_buffs_reaped = 0; 3057 u32 rx_buf_sz; 3058 u16 log_type = 0; 3059 3060 __skb_queue_head_init(&skb_list); 3061 3062 num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget, 3063 &skb_list); 3064 if (!num_buffs_reaped) 3065 goto exit; 3066 3067 while ((skb = __skb_dequeue(&skb_list))) { 3068 memset(&ppdu_info, 0, sizeof(ppdu_info)); 3069 ppdu_info.peer_id = HAL_INVALID_PEERID; 3070 3071 if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) { 3072 log_type = ATH11K_PKTLOG_TYPE_LITE_RX; 3073 rx_buf_sz = DP_RX_BUFFER_SIZE_LITE; 3074 } else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) { 3075 log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF; 3076 rx_buf_sz = DP_RX_BUFFER_SIZE; 3077 } 3078 3079 if (log_type) 3080 trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz); 3081 3082 hal_status = ath11k_hal_rx_parse_mon_status(ab, &ppdu_info, skb); 3083 3084 if (ppdu_info.peer_id == HAL_INVALID_PEERID || 3085 hal_status != HAL_RX_MON_STATUS_PPDU_DONE) { 3086 dev_kfree_skb_any(skb); 3087 continue; 3088 } 3089 3090 rcu_read_lock(); 3091 spin_lock_bh(&ab->base_lock); 3092 peer = ath11k_peer_find_by_id(ab, ppdu_info.peer_id); 3093 3094 if (!peer || !peer->sta) { 3095 ath11k_dbg(ab, ATH11K_DBG_DATA, 3096 "failed to find the peer with peer_id %d\n", 3097 ppdu_info.peer_id); 3098 spin_unlock_bh(&ab->base_lock); 3099 rcu_read_unlock(); 3100 dev_kfree_skb_any(skb); 3101 continue; 3102 } 3103 3104 arsta = (struct ath11k_sta *)peer->sta->drv_priv; 3105 ath11k_dp_rx_update_peer_stats(arsta, &ppdu_info); 3106 3107 if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr)) 3108 trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz); 3109 3110 spin_unlock_bh(&ab->base_lock); 3111 rcu_read_unlock(); 3112 3113 dev_kfree_skb_any(skb); 3114 } 3115 exit: 3116 return num_buffs_reaped; 3117 } 3118 3119 static void ath11k_dp_rx_frag_timer(struct timer_list *timer) 3120 { 3121 struct dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer); 3122 3123 spin_lock_bh(&rx_tid->ab->base_lock); 3124 if (rx_tid->last_frag_no && 3125 rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) { 3126 spin_unlock_bh(&rx_tid->ab->base_lock); 3127 return; 3128 } 3129 ath11k_dp_rx_frags_cleanup(rx_tid, true); 3130 spin_unlock_bh(&rx_tid->ab->base_lock); 3131 } 3132 3133 int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id) 3134 { 3135 struct ath11k_base *ab = ar->ab; 3136 struct crypto_shash *tfm; 3137 struct ath11k_peer *peer; 3138 struct dp_rx_tid *rx_tid; 3139 int i; 3140 3141 tfm = crypto_alloc_shash("michael_mic", 0, 0); 3142 if (IS_ERR(tfm)) 3143 return PTR_ERR(tfm); 3144 3145 spin_lock_bh(&ab->base_lock); 3146 3147 peer = ath11k_peer_find(ab, vdev_id, peer_mac); 3148 if (!peer) { 3149 ath11k_warn(ab, "failed to find the peer to set up fragment info\n"); 3150 spin_unlock_bh(&ab->base_lock); 3151 return -ENOENT; 3152 } 3153 3154 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 3155 rx_tid = &peer->rx_tid[i]; 3156 rx_tid->ab = ab; 3157 timer_setup(&rx_tid->frag_timer, ath11k_dp_rx_frag_timer, 0); 3158 skb_queue_head_init(&rx_tid->rx_frags); 3159 } 3160 3161 peer->tfm_mmic = tfm; 3162 spin_unlock_bh(&ab->base_lock); 3163 3164 return 0; 3165 } 3166 3167 static int ath11k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key, 3168 struct ieee80211_hdr *hdr, u8 *data, 3169 size_t data_len, u8 *mic) 3170 { 3171 SHASH_DESC_ON_STACK(desc, tfm); 3172 u8 mic_hdr[16] = {0}; 3173 u8 tid = 0; 3174 int ret; 3175 3176 if (!tfm) 3177 return -EINVAL; 3178 3179 desc->tfm = tfm; 3180 3181 ret = crypto_shash_setkey(tfm, key, 8); 3182 if (ret) 3183 goto out; 3184 3185 ret = crypto_shash_init(desc); 3186 if (ret) 3187 goto out; 3188 3189 /* TKIP MIC header */ 3190 memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN); 3191 memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN); 3192 if (ieee80211_is_data_qos(hdr->frame_control)) 3193 tid = ieee80211_get_tid(hdr); 3194 mic_hdr[12] = tid; 3195 3196 ret = crypto_shash_update(desc, mic_hdr, 16); 3197 if (ret) 3198 goto out; 3199 ret = crypto_shash_update(desc, data, data_len); 3200 if (ret) 3201 goto out; 3202 ret = crypto_shash_final(desc, mic); 3203 out: 3204 shash_desc_zero(desc); 3205 return ret; 3206 } 3207 3208 static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer *peer, 3209 struct sk_buff *msdu) 3210 { 3211 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data; 3212 struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu); 3213 struct ieee80211_key_conf *key_conf; 3214 struct ieee80211_hdr *hdr; 3215 u8 mic[IEEE80211_CCMP_MIC_LEN]; 3216 int head_len, tail_len, ret; 3217 size_t data_len; 3218 u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 3219 u8 *key, *data; 3220 u8 key_idx; 3221 3222 if (ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc) != 3223 HAL_ENCRYPT_TYPE_TKIP_MIC) 3224 return 0; 3225 3226 hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz); 3227 hdr_len = ieee80211_hdrlen(hdr->frame_control); 3228 head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN; 3229 tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN; 3230 3231 if (!is_multicast_ether_addr(hdr->addr1)) 3232 key_idx = peer->ucast_keyidx; 3233 else 3234 key_idx = peer->mcast_keyidx; 3235 3236 key_conf = peer->keys[key_idx]; 3237 3238 data = msdu->data + head_len; 3239 data_len = msdu->len - head_len - tail_len; 3240 key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; 3241 3242 ret = ath11k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic); 3243 if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN)) 3244 goto mic_fail; 3245 3246 return 0; 3247 3248 mic_fail: 3249 (ATH11K_SKB_RXCB(msdu))->is_first_msdu = true; 3250 (ATH11K_SKB_RXCB(msdu))->is_last_msdu = true; 3251 3252 rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED | 3253 RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED; 3254 skb_pull(msdu, hal_rx_desc_sz); 3255 3256 ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs); 3257 ath11k_dp_rx_h_undecap(ar, msdu, rx_desc, 3258 HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true); 3259 ieee80211_rx(ar->hw, msdu); 3260 return -EINVAL; 3261 } 3262 3263 static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu, 3264 enum hal_encrypt_type enctype, u32 flags) 3265 { 3266 struct ieee80211_hdr *hdr; 3267 size_t hdr_len; 3268 size_t crypto_len; 3269 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 3270 3271 if (!flags) 3272 return; 3273 3274 hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz); 3275 3276 if (flags & RX_FLAG_MIC_STRIPPED) 3277 skb_trim(msdu, msdu->len - 3278 ath11k_dp_rx_crypto_mic_len(ar, enctype)); 3279 3280 if (flags & RX_FLAG_ICV_STRIPPED) 3281 skb_trim(msdu, msdu->len - 3282 ath11k_dp_rx_crypto_icv_len(ar, enctype)); 3283 3284 if (flags & RX_FLAG_IV_STRIPPED) { 3285 hdr_len = ieee80211_hdrlen(hdr->frame_control); 3286 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 3287 3288 memmove((void *)msdu->data + hal_rx_desc_sz + crypto_len, 3289 (void *)msdu->data + hal_rx_desc_sz, hdr_len); 3290 skb_pull(msdu, crypto_len); 3291 } 3292 } 3293 3294 static int ath11k_dp_rx_h_defrag(struct ath11k *ar, 3295 struct ath11k_peer *peer, 3296 struct dp_rx_tid *rx_tid, 3297 struct sk_buff **defrag_skb) 3298 { 3299 struct hal_rx_desc *rx_desc; 3300 struct sk_buff *skb, *first_frag, *last_frag; 3301 struct ieee80211_hdr *hdr; 3302 struct rx_attention *rx_attention; 3303 enum hal_encrypt_type enctype; 3304 bool is_decrypted = false; 3305 int msdu_len = 0; 3306 int extra_space; 3307 u32 flags, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 3308 3309 first_frag = skb_peek(&rx_tid->rx_frags); 3310 last_frag = skb_peek_tail(&rx_tid->rx_frags); 3311 3312 skb_queue_walk(&rx_tid->rx_frags, skb) { 3313 flags = 0; 3314 rx_desc = (struct hal_rx_desc *)skb->data; 3315 hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz); 3316 3317 enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc); 3318 if (enctype != HAL_ENCRYPT_TYPE_OPEN) { 3319 rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc); 3320 is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention); 3321 } 3322 3323 if (is_decrypted) { 3324 if (skb != first_frag) 3325 flags |= RX_FLAG_IV_STRIPPED; 3326 if (skb != last_frag) 3327 flags |= RX_FLAG_ICV_STRIPPED | 3328 RX_FLAG_MIC_STRIPPED; 3329 } 3330 3331 /* RX fragments are always raw packets */ 3332 if (skb != last_frag) 3333 skb_trim(skb, skb->len - FCS_LEN); 3334 ath11k_dp_rx_h_undecap_frag(ar, skb, enctype, flags); 3335 3336 if (skb != first_frag) 3337 skb_pull(skb, hal_rx_desc_sz + 3338 ieee80211_hdrlen(hdr->frame_control)); 3339 msdu_len += skb->len; 3340 } 3341 3342 extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag)); 3343 if (extra_space > 0 && 3344 (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0)) 3345 return -ENOMEM; 3346 3347 __skb_unlink(first_frag, &rx_tid->rx_frags); 3348 while ((skb = __skb_dequeue(&rx_tid->rx_frags))) { 3349 skb_put_data(first_frag, skb->data, skb->len); 3350 dev_kfree_skb_any(skb); 3351 } 3352 3353 hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz); 3354 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); 3355 ATH11K_SKB_RXCB(first_frag)->is_frag = 1; 3356 3357 if (ath11k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag)) 3358 first_frag = NULL; 3359 3360 *defrag_skb = first_frag; 3361 return 0; 3362 } 3363 3364 static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_tid *rx_tid, 3365 struct sk_buff *defrag_skb) 3366 { 3367 struct ath11k_base *ab = ar->ab; 3368 struct ath11k_pdev_dp *dp = &ar->dp; 3369 struct dp_rxdma_ring *rx_refill_ring = &dp->rx_refill_buf_ring; 3370 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data; 3371 struct hal_reo_entrance_ring *reo_ent_ring; 3372 struct hal_reo_dest_ring *reo_dest_ring; 3373 struct dp_link_desc_bank *link_desc_banks; 3374 struct hal_rx_msdu_link *msdu_link; 3375 struct hal_rx_msdu_details *msdu0; 3376 struct hal_srng *srng; 3377 dma_addr_t paddr; 3378 u32 desc_bank, msdu_info, mpdu_info; 3379 u32 dst_idx, cookie, hal_rx_desc_sz; 3380 int ret, buf_id; 3381 3382 hal_rx_desc_sz = ab->hw_params.hal_desc_sz; 3383 link_desc_banks = ab->dp.link_desc_banks; 3384 reo_dest_ring = rx_tid->dst_ring_desc; 3385 3386 ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank); 3387 msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr + 3388 (paddr - link_desc_banks[desc_bank].paddr)); 3389 msdu0 = &msdu_link->msdu_link[0]; 3390 dst_idx = FIELD_GET(RX_MSDU_DESC_INFO0_REO_DEST_IND, msdu0->rx_msdu_info.info0); 3391 memset(msdu0, 0, sizeof(*msdu0)); 3392 3393 msdu_info = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1) | 3394 FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1) | 3395 FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_CONTINUATION, 0) | 3396 FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_LENGTH, 3397 defrag_skb->len - hal_rx_desc_sz) | 3398 FIELD_PREP(RX_MSDU_DESC_INFO0_REO_DEST_IND, dst_idx) | 3399 FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_SA, 1) | 3400 FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_DA, 1); 3401 msdu0->rx_msdu_info.info0 = msdu_info; 3402 3403 /* change msdu len in hal rx desc */ 3404 ath11k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz); 3405 3406 paddr = dma_map_single(ab->dev, defrag_skb->data, 3407 defrag_skb->len + skb_tailroom(defrag_skb), 3408 DMA_TO_DEVICE); 3409 if (dma_mapping_error(ab->dev, paddr)) 3410 return -ENOMEM; 3411 3412 spin_lock_bh(&rx_refill_ring->idr_lock); 3413 buf_id = idr_alloc(&rx_refill_ring->bufs_idr, defrag_skb, 0, 3414 rx_refill_ring->bufs_max * 3, GFP_ATOMIC); 3415 spin_unlock_bh(&rx_refill_ring->idr_lock); 3416 if (buf_id < 0) { 3417 ret = -ENOMEM; 3418 goto err_unmap_dma; 3419 } 3420 3421 ATH11K_SKB_RXCB(defrag_skb)->paddr = paddr; 3422 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, dp->mac_id) | 3423 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 3424 3425 ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie, 3426 ab->hw_params.hal_params->rx_buf_rbm); 3427 3428 /* Fill mpdu details into reo entrace ring */ 3429 srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id]; 3430 3431 spin_lock_bh(&srng->lock); 3432 ath11k_hal_srng_access_begin(ab, srng); 3433 3434 reo_ent_ring = (struct hal_reo_entrance_ring *) 3435 ath11k_hal_srng_src_get_next_entry(ab, srng); 3436 if (!reo_ent_ring) { 3437 ath11k_hal_srng_access_end(ab, srng); 3438 spin_unlock_bh(&srng->lock); 3439 ret = -ENOSPC; 3440 goto err_free_idr; 3441 } 3442 memset(reo_ent_ring, 0, sizeof(*reo_ent_ring)); 3443 3444 ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank); 3445 ath11k_hal_rx_buf_addr_info_set(reo_ent_ring, paddr, desc_bank, 3446 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST); 3447 3448 mpdu_info = FIELD_PREP(RX_MPDU_DESC_INFO0_MSDU_COUNT, 1) | 3449 FIELD_PREP(RX_MPDU_DESC_INFO0_SEQ_NUM, rx_tid->cur_sn) | 3450 FIELD_PREP(RX_MPDU_DESC_INFO0_FRAG_FLAG, 0) | 3451 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_SA, 1) | 3452 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_DA, 1) | 3453 FIELD_PREP(RX_MPDU_DESC_INFO0_RAW_MPDU, 1) | 3454 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_PN, 1); 3455 3456 reo_ent_ring->rx_mpdu_info.info0 = mpdu_info; 3457 reo_ent_ring->rx_mpdu_info.meta_data = reo_dest_ring->rx_mpdu_info.meta_data; 3458 reo_ent_ring->queue_addr_lo = reo_dest_ring->queue_addr_lo; 3459 reo_ent_ring->info0 = FIELD_PREP(HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI, 3460 FIELD_GET(HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI, 3461 reo_dest_ring->info0)) | 3462 FIELD_PREP(HAL_REO_ENTR_RING_INFO0_DEST_IND, dst_idx); 3463 ath11k_hal_srng_access_end(ab, srng); 3464 spin_unlock_bh(&srng->lock); 3465 3466 return 0; 3467 3468 err_free_idr: 3469 spin_lock_bh(&rx_refill_ring->idr_lock); 3470 idr_remove(&rx_refill_ring->bufs_idr, buf_id); 3471 spin_unlock_bh(&rx_refill_ring->idr_lock); 3472 err_unmap_dma: 3473 dma_unmap_single(ab->dev, paddr, defrag_skb->len + skb_tailroom(defrag_skb), 3474 DMA_TO_DEVICE); 3475 return ret; 3476 } 3477 3478 static int ath11k_dp_rx_h_cmp_frags(struct ath11k *ar, 3479 struct sk_buff *a, struct sk_buff *b) 3480 { 3481 int frag1, frag2; 3482 3483 frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, a); 3484 frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, b); 3485 3486 return frag1 - frag2; 3487 } 3488 3489 static void ath11k_dp_rx_h_sort_frags(struct ath11k *ar, 3490 struct sk_buff_head *frag_list, 3491 struct sk_buff *cur_frag) 3492 { 3493 struct sk_buff *skb; 3494 int cmp; 3495 3496 skb_queue_walk(frag_list, skb) { 3497 cmp = ath11k_dp_rx_h_cmp_frags(ar, skb, cur_frag); 3498 if (cmp < 0) 3499 continue; 3500 __skb_queue_before(frag_list, skb, cur_frag); 3501 return; 3502 } 3503 __skb_queue_tail(frag_list, cur_frag); 3504 } 3505 3506 static u64 ath11k_dp_rx_h_get_pn(struct ath11k *ar, struct sk_buff *skb) 3507 { 3508 struct ieee80211_hdr *hdr; 3509 u64 pn = 0; 3510 u8 *ehdr; 3511 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 3512 3513 hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz); 3514 ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control); 3515 3516 pn = ehdr[0]; 3517 pn |= (u64)ehdr[1] << 8; 3518 pn |= (u64)ehdr[4] << 16; 3519 pn |= (u64)ehdr[5] << 24; 3520 pn |= (u64)ehdr[6] << 32; 3521 pn |= (u64)ehdr[7] << 40; 3522 3523 return pn; 3524 } 3525 3526 static bool 3527 ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k *ar, struct dp_rx_tid *rx_tid) 3528 { 3529 enum hal_encrypt_type encrypt_type; 3530 struct sk_buff *first_frag, *skb; 3531 struct hal_rx_desc *desc; 3532 u64 last_pn; 3533 u64 cur_pn; 3534 3535 first_frag = skb_peek(&rx_tid->rx_frags); 3536 desc = (struct hal_rx_desc *)first_frag->data; 3537 3538 encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, desc); 3539 if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 && 3540 encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 && 3541 encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 && 3542 encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256) 3543 return true; 3544 3545 last_pn = ath11k_dp_rx_h_get_pn(ar, first_frag); 3546 skb_queue_walk(&rx_tid->rx_frags, skb) { 3547 if (skb == first_frag) 3548 continue; 3549 3550 cur_pn = ath11k_dp_rx_h_get_pn(ar, skb); 3551 if (cur_pn != last_pn + 1) 3552 return false; 3553 last_pn = cur_pn; 3554 } 3555 return true; 3556 } 3557 3558 static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar, 3559 struct sk_buff *msdu, 3560 u32 *ring_desc) 3561 { 3562 struct ath11k_base *ab = ar->ab; 3563 struct hal_rx_desc *rx_desc; 3564 struct ath11k_peer *peer; 3565 struct dp_rx_tid *rx_tid; 3566 struct sk_buff *defrag_skb = NULL; 3567 u32 peer_id; 3568 u16 seqno, frag_no; 3569 u8 tid; 3570 int ret = 0; 3571 bool more_frags; 3572 bool is_mcbc; 3573 3574 rx_desc = (struct hal_rx_desc *)msdu->data; 3575 peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc); 3576 tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, rx_desc); 3577 seqno = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc); 3578 frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, msdu); 3579 more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(ar->ab, msdu); 3580 is_mcbc = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc); 3581 3582 /* Multicast/Broadcast fragments are not expected */ 3583 if (is_mcbc) 3584 return -EINVAL; 3585 3586 if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(ar->ab, rx_desc) || 3587 !ath11k_dp_rx_h_mpdu_start_fc_valid(ar->ab, rx_desc) || 3588 tid > IEEE80211_NUM_TIDS) 3589 return -EINVAL; 3590 3591 /* received unfragmented packet in reo 3592 * exception ring, this shouldn't happen 3593 * as these packets typically come from 3594 * reo2sw srngs. 3595 */ 3596 if (WARN_ON_ONCE(!frag_no && !more_frags)) 3597 return -EINVAL; 3598 3599 spin_lock_bh(&ab->base_lock); 3600 peer = ath11k_peer_find_by_id(ab, peer_id); 3601 if (!peer) { 3602 ath11k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n", 3603 peer_id); 3604 ret = -ENOENT; 3605 goto out_unlock; 3606 } 3607 rx_tid = &peer->rx_tid[tid]; 3608 3609 if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) || 3610 skb_queue_empty(&rx_tid->rx_frags)) { 3611 /* Flush stored fragments and start a new sequence */ 3612 ath11k_dp_rx_frags_cleanup(rx_tid, true); 3613 rx_tid->cur_sn = seqno; 3614 } 3615 3616 if (rx_tid->rx_frag_bitmap & BIT(frag_no)) { 3617 /* Fragment already present */ 3618 ret = -EINVAL; 3619 goto out_unlock; 3620 } 3621 3622 if (frag_no > __fls(rx_tid->rx_frag_bitmap)) 3623 __skb_queue_tail(&rx_tid->rx_frags, msdu); 3624 else 3625 ath11k_dp_rx_h_sort_frags(ar, &rx_tid->rx_frags, msdu); 3626 3627 rx_tid->rx_frag_bitmap |= BIT(frag_no); 3628 if (!more_frags) 3629 rx_tid->last_frag_no = frag_no; 3630 3631 if (frag_no == 0) { 3632 rx_tid->dst_ring_desc = kmemdup(ring_desc, 3633 sizeof(*rx_tid->dst_ring_desc), 3634 GFP_ATOMIC); 3635 if (!rx_tid->dst_ring_desc) { 3636 ret = -ENOMEM; 3637 goto out_unlock; 3638 } 3639 } else { 3640 ath11k_dp_rx_link_desc_return(ab, ring_desc, 3641 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3642 } 3643 3644 if (!rx_tid->last_frag_no || 3645 rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) { 3646 mod_timer(&rx_tid->frag_timer, jiffies + 3647 ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS); 3648 goto out_unlock; 3649 } 3650 3651 spin_unlock_bh(&ab->base_lock); 3652 del_timer_sync(&rx_tid->frag_timer); 3653 spin_lock_bh(&ab->base_lock); 3654 3655 peer = ath11k_peer_find_by_id(ab, peer_id); 3656 if (!peer) 3657 goto err_frags_cleanup; 3658 3659 if (!ath11k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid)) 3660 goto err_frags_cleanup; 3661 3662 if (ath11k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb)) 3663 goto err_frags_cleanup; 3664 3665 if (!defrag_skb) 3666 goto err_frags_cleanup; 3667 3668 if (ath11k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb)) 3669 goto err_frags_cleanup; 3670 3671 ath11k_dp_rx_frags_cleanup(rx_tid, false); 3672 goto out_unlock; 3673 3674 err_frags_cleanup: 3675 dev_kfree_skb_any(defrag_skb); 3676 ath11k_dp_rx_frags_cleanup(rx_tid, true); 3677 out_unlock: 3678 spin_unlock_bh(&ab->base_lock); 3679 return ret; 3680 } 3681 3682 static int 3683 ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool drop) 3684 { 3685 struct ath11k_pdev_dp *dp = &ar->dp; 3686 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 3687 struct sk_buff *msdu; 3688 struct ath11k_skb_rxcb *rxcb; 3689 struct hal_rx_desc *rx_desc; 3690 u8 *hdr_status; 3691 u16 msdu_len; 3692 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 3693 3694 spin_lock_bh(&rx_ring->idr_lock); 3695 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 3696 if (!msdu) { 3697 ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n", 3698 buf_id); 3699 spin_unlock_bh(&rx_ring->idr_lock); 3700 return -EINVAL; 3701 } 3702 3703 idr_remove(&rx_ring->bufs_idr, buf_id); 3704 spin_unlock_bh(&rx_ring->idr_lock); 3705 3706 rxcb = ATH11K_SKB_RXCB(msdu); 3707 dma_unmap_single(ar->ab->dev, rxcb->paddr, 3708 msdu->len + skb_tailroom(msdu), 3709 DMA_FROM_DEVICE); 3710 3711 if (drop) { 3712 dev_kfree_skb_any(msdu); 3713 return 0; 3714 } 3715 3716 rcu_read_lock(); 3717 if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) { 3718 dev_kfree_skb_any(msdu); 3719 goto exit; 3720 } 3721 3722 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 3723 dev_kfree_skb_any(msdu); 3724 goto exit; 3725 } 3726 3727 rx_desc = (struct hal_rx_desc *)msdu->data; 3728 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, rx_desc); 3729 if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) { 3730 hdr_status = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc); 3731 ath11k_warn(ar->ab, "invalid msdu leng %u", msdu_len); 3732 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status, 3733 sizeof(struct ieee80211_hdr)); 3734 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc, 3735 sizeof(struct hal_rx_desc)); 3736 dev_kfree_skb_any(msdu); 3737 goto exit; 3738 } 3739 3740 skb_put(msdu, hal_rx_desc_sz + msdu_len); 3741 3742 if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) { 3743 dev_kfree_skb_any(msdu); 3744 ath11k_dp_rx_link_desc_return(ar->ab, ring_desc, 3745 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3746 } 3747 exit: 3748 rcu_read_unlock(); 3749 return 0; 3750 } 3751 3752 int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi, 3753 int budget) 3754 { 3755 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; 3756 struct dp_link_desc_bank *link_desc_banks; 3757 enum hal_rx_buf_return_buf_manager rbm; 3758 int tot_n_bufs_reaped, quota, ret, i; 3759 int n_bufs_reaped[MAX_RADIOS] = {0}; 3760 struct dp_rxdma_ring *rx_ring; 3761 struct dp_srng *reo_except; 3762 u32 desc_bank, num_msdus; 3763 struct hal_srng *srng; 3764 struct ath11k_dp *dp; 3765 void *link_desc_va; 3766 int buf_id, mac_id; 3767 struct ath11k *ar; 3768 dma_addr_t paddr; 3769 u32 *desc; 3770 bool is_frag; 3771 u8 drop = 0; 3772 3773 tot_n_bufs_reaped = 0; 3774 quota = budget; 3775 3776 dp = &ab->dp; 3777 reo_except = &dp->reo_except_ring; 3778 link_desc_banks = dp->link_desc_banks; 3779 3780 srng = &ab->hal.srng_list[reo_except->ring_id]; 3781 3782 spin_lock_bh(&srng->lock); 3783 3784 ath11k_hal_srng_access_begin(ab, srng); 3785 3786 while (budget && 3787 (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 3788 struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc; 3789 3790 ab->soc_stats.err_ring_pkts++; 3791 ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr, 3792 &desc_bank); 3793 if (ret) { 3794 ath11k_warn(ab, "failed to parse error reo desc %d\n", 3795 ret); 3796 continue; 3797 } 3798 link_desc_va = link_desc_banks[desc_bank].vaddr + 3799 (paddr - link_desc_banks[desc_bank].paddr); 3800 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies, 3801 &rbm); 3802 if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST && 3803 rbm != ab->hw_params.hal_params->rx_buf_rbm) { 3804 ab->soc_stats.invalid_rbm++; 3805 ath11k_warn(ab, "invalid return buffer manager %d\n", rbm); 3806 ath11k_dp_rx_link_desc_return(ab, desc, 3807 HAL_WBM_REL_BM_ACT_REL_MSDU); 3808 continue; 3809 } 3810 3811 is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG); 3812 3813 /* Process only rx fragments with one msdu per link desc below, and drop 3814 * msdu's indicated due to error reasons. 3815 */ 3816 if (!is_frag || num_msdus > 1) { 3817 drop = 1; 3818 /* Return the link desc back to wbm idle list */ 3819 ath11k_dp_rx_link_desc_return(ab, desc, 3820 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3821 } 3822 3823 for (i = 0; i < num_msdus; i++) { 3824 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 3825 msdu_cookies[i]); 3826 3827 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, 3828 msdu_cookies[i]); 3829 3830 ar = ab->pdevs[mac_id].ar; 3831 3832 if (!ath11k_dp_process_rx_err_buf(ar, desc, buf_id, drop)) { 3833 n_bufs_reaped[mac_id]++; 3834 tot_n_bufs_reaped++; 3835 } 3836 } 3837 3838 if (tot_n_bufs_reaped >= quota) { 3839 tot_n_bufs_reaped = quota; 3840 goto exit; 3841 } 3842 3843 budget = quota - tot_n_bufs_reaped; 3844 } 3845 3846 exit: 3847 ath11k_hal_srng_access_end(ab, srng); 3848 3849 spin_unlock_bh(&srng->lock); 3850 3851 for (i = 0; i < ab->num_radios; i++) { 3852 if (!n_bufs_reaped[i]) 3853 continue; 3854 3855 ar = ab->pdevs[i].ar; 3856 rx_ring = &ar->dp.rx_refill_buf_ring; 3857 3858 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i], 3859 ab->hw_params.hal_params->rx_buf_rbm); 3860 } 3861 3862 return tot_n_bufs_reaped; 3863 } 3864 3865 static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar, 3866 int msdu_len, 3867 struct sk_buff_head *msdu_list) 3868 { 3869 struct sk_buff *skb, *tmp; 3870 struct ath11k_skb_rxcb *rxcb; 3871 int n_buffs; 3872 3873 n_buffs = DIV_ROUND_UP(msdu_len, 3874 (DP_RX_BUFFER_SIZE - ar->ab->hw_params.hal_desc_sz)); 3875 3876 skb_queue_walk_safe(msdu_list, skb, tmp) { 3877 rxcb = ATH11K_SKB_RXCB(skb); 3878 if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO && 3879 rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) { 3880 if (!n_buffs) 3881 break; 3882 __skb_unlink(skb, msdu_list); 3883 dev_kfree_skb_any(skb); 3884 n_buffs--; 3885 } 3886 } 3887 } 3888 3889 static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu, 3890 struct ieee80211_rx_status *status, 3891 struct sk_buff_head *msdu_list) 3892 { 3893 u16 msdu_len; 3894 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 3895 struct rx_attention *rx_attention; 3896 u8 l3pad_bytes; 3897 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3898 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 3899 3900 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc); 3901 3902 if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) { 3903 /* First buffer will be freed by the caller, so deduct it's length */ 3904 msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz); 3905 ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list); 3906 return -EINVAL; 3907 } 3908 3909 rx_attention = ath11k_dp_rx_get_attention(ar->ab, desc); 3910 if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) { 3911 ath11k_warn(ar->ab, 3912 "msdu_done bit not set in null_q_des processing\n"); 3913 __skb_queue_purge(msdu_list); 3914 return -EIO; 3915 } 3916 3917 /* Handle NULL queue descriptor violations arising out a missing 3918 * REO queue for a given peer or a given TID. This typically 3919 * may happen if a packet is received on a QOS enabled TID before the 3920 * ADDBA negotiation for that TID, when the TID queue is setup. Or 3921 * it may also happen for MC/BC frames if they are not routed to the 3922 * non-QOS TID queue, in the absence of any other default TID queue. 3923 * This error can show up both in a REO destination or WBM release ring. 3924 */ 3925 3926 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc); 3927 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc); 3928 3929 if (rxcb->is_frag) { 3930 skb_pull(msdu, hal_rx_desc_sz); 3931 } else { 3932 l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc); 3933 3934 if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) 3935 return -EINVAL; 3936 3937 skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); 3938 skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); 3939 } 3940 ath11k_dp_rx_h_ppdu(ar, desc, status); 3941 3942 ath11k_dp_rx_h_mpdu(ar, msdu, desc, status); 3943 3944 rxcb->tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, desc); 3945 3946 /* Please note that caller will having the access to msdu and completing 3947 * rx with mac80211. Need not worry about cleaning up amsdu_list. 3948 */ 3949 3950 return 0; 3951 } 3952 3953 static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu, 3954 struct ieee80211_rx_status *status, 3955 struct sk_buff_head *msdu_list) 3956 { 3957 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3958 bool drop = false; 3959 3960 ar->ab->soc_stats.reo_error[rxcb->err_code]++; 3961 3962 switch (rxcb->err_code) { 3963 case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO: 3964 if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list)) 3965 drop = true; 3966 break; 3967 case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED: 3968 /* TODO: Do not drop PN failed packets in the driver; 3969 * instead, it is good to drop such packets in mac80211 3970 * after incrementing the replay counters. 3971 */ 3972 fallthrough; 3973 default: 3974 /* TODO: Review other errors and process them to mac80211 3975 * as appropriate. 3976 */ 3977 drop = true; 3978 break; 3979 } 3980 3981 return drop; 3982 } 3983 3984 static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu, 3985 struct ieee80211_rx_status *status) 3986 { 3987 u16 msdu_len; 3988 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 3989 u8 l3pad_bytes; 3990 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3991 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 3992 3993 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc); 3994 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc); 3995 3996 l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc); 3997 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc); 3998 skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); 3999 skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); 4000 4001 ath11k_dp_rx_h_ppdu(ar, desc, status); 4002 4003 status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR | 4004 RX_FLAG_DECRYPTED); 4005 4006 ath11k_dp_rx_h_undecap(ar, msdu, desc, 4007 HAL_ENCRYPT_TYPE_TKIP_MIC, status, false); 4008 } 4009 4010 static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar, struct sk_buff *msdu, 4011 struct ieee80211_rx_status *status) 4012 { 4013 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 4014 bool drop = false; 4015 4016 ar->ab->soc_stats.rxdma_error[rxcb->err_code]++; 4017 4018 switch (rxcb->err_code) { 4019 case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR: 4020 ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status); 4021 break; 4022 default: 4023 /* TODO: Review other rxdma error code to check if anything is 4024 * worth reporting to mac80211 4025 */ 4026 drop = true; 4027 break; 4028 } 4029 4030 return drop; 4031 } 4032 4033 static void ath11k_dp_rx_wbm_err(struct ath11k *ar, 4034 struct napi_struct *napi, 4035 struct sk_buff *msdu, 4036 struct sk_buff_head *msdu_list) 4037 { 4038 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 4039 struct ieee80211_rx_status rxs = {0}; 4040 bool drop = true; 4041 4042 switch (rxcb->err_rel_src) { 4043 case HAL_WBM_REL_SRC_MODULE_REO: 4044 drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list); 4045 break; 4046 case HAL_WBM_REL_SRC_MODULE_RXDMA: 4047 drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs); 4048 break; 4049 default: 4050 /* msdu will get freed */ 4051 break; 4052 } 4053 4054 if (drop) { 4055 dev_kfree_skb_any(msdu); 4056 return; 4057 } 4058 4059 ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs); 4060 } 4061 4062 int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab, 4063 struct napi_struct *napi, int budget) 4064 { 4065 struct ath11k *ar; 4066 struct ath11k_dp *dp = &ab->dp; 4067 struct dp_rxdma_ring *rx_ring; 4068 struct hal_rx_wbm_rel_info err_info; 4069 struct hal_srng *srng; 4070 struct sk_buff *msdu; 4071 struct sk_buff_head msdu_list[MAX_RADIOS]; 4072 struct ath11k_skb_rxcb *rxcb; 4073 u32 *rx_desc; 4074 int buf_id, mac_id; 4075 int num_buffs_reaped[MAX_RADIOS] = {0}; 4076 int total_num_buffs_reaped = 0; 4077 int ret, i; 4078 4079 for (i = 0; i < ab->num_radios; i++) 4080 __skb_queue_head_init(&msdu_list[i]); 4081 4082 srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id]; 4083 4084 spin_lock_bh(&srng->lock); 4085 4086 ath11k_hal_srng_access_begin(ab, srng); 4087 4088 while (budget) { 4089 rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng); 4090 if (!rx_desc) 4091 break; 4092 4093 ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info); 4094 if (ret) { 4095 ath11k_warn(ab, 4096 "failed to parse rx error in wbm_rel ring desc %d\n", 4097 ret); 4098 continue; 4099 } 4100 4101 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie); 4102 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie); 4103 4104 ar = ab->pdevs[mac_id].ar; 4105 rx_ring = &ar->dp.rx_refill_buf_ring; 4106 4107 spin_lock_bh(&rx_ring->idr_lock); 4108 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 4109 if (!msdu) { 4110 ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n", 4111 buf_id, mac_id); 4112 spin_unlock_bh(&rx_ring->idr_lock); 4113 continue; 4114 } 4115 4116 idr_remove(&rx_ring->bufs_idr, buf_id); 4117 spin_unlock_bh(&rx_ring->idr_lock); 4118 4119 rxcb = ATH11K_SKB_RXCB(msdu); 4120 dma_unmap_single(ab->dev, rxcb->paddr, 4121 msdu->len + skb_tailroom(msdu), 4122 DMA_FROM_DEVICE); 4123 4124 num_buffs_reaped[mac_id]++; 4125 total_num_buffs_reaped++; 4126 budget--; 4127 4128 if (err_info.push_reason != 4129 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { 4130 dev_kfree_skb_any(msdu); 4131 continue; 4132 } 4133 4134 rxcb->err_rel_src = err_info.err_rel_src; 4135 rxcb->err_code = err_info.err_code; 4136 rxcb->rx_desc = (struct hal_rx_desc *)msdu->data; 4137 __skb_queue_tail(&msdu_list[mac_id], msdu); 4138 } 4139 4140 ath11k_hal_srng_access_end(ab, srng); 4141 4142 spin_unlock_bh(&srng->lock); 4143 4144 if (!total_num_buffs_reaped) 4145 goto done; 4146 4147 for (i = 0; i < ab->num_radios; i++) { 4148 if (!num_buffs_reaped[i]) 4149 continue; 4150 4151 ar = ab->pdevs[i].ar; 4152 rx_ring = &ar->dp.rx_refill_buf_ring; 4153 4154 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i], 4155 ab->hw_params.hal_params->rx_buf_rbm); 4156 } 4157 4158 rcu_read_lock(); 4159 for (i = 0; i < ab->num_radios; i++) { 4160 if (!rcu_dereference(ab->pdevs_active[i])) { 4161 __skb_queue_purge(&msdu_list[i]); 4162 continue; 4163 } 4164 4165 ar = ab->pdevs[i].ar; 4166 4167 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 4168 __skb_queue_purge(&msdu_list[i]); 4169 continue; 4170 } 4171 4172 while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL) 4173 ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]); 4174 } 4175 rcu_read_unlock(); 4176 done: 4177 return total_num_buffs_reaped; 4178 } 4179 4180 int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget) 4181 { 4182 struct ath11k *ar; 4183 struct dp_srng *err_ring; 4184 struct dp_rxdma_ring *rx_ring; 4185 struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks; 4186 struct hal_srng *srng; 4187 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; 4188 enum hal_rx_buf_return_buf_manager rbm; 4189 enum hal_reo_entr_rxdma_ecode rxdma_err_code; 4190 struct ath11k_skb_rxcb *rxcb; 4191 struct sk_buff *skb; 4192 struct hal_reo_entrance_ring *entr_ring; 4193 void *desc; 4194 int num_buf_freed = 0; 4195 int quota = budget; 4196 dma_addr_t paddr; 4197 u32 desc_bank; 4198 void *link_desc_va; 4199 int num_msdus; 4200 int i; 4201 int buf_id; 4202 4203 ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar; 4204 err_ring = &ar->dp.rxdma_err_dst_ring[ath11k_hw_mac_id_to_srng_id(&ab->hw_params, 4205 mac_id)]; 4206 rx_ring = &ar->dp.rx_refill_buf_ring; 4207 4208 srng = &ab->hal.srng_list[err_ring->ring_id]; 4209 4210 spin_lock_bh(&srng->lock); 4211 4212 ath11k_hal_srng_access_begin(ab, srng); 4213 4214 while (quota-- && 4215 (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 4216 ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank); 4217 4218 entr_ring = (struct hal_reo_entrance_ring *)desc; 4219 rxdma_err_code = 4220 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE, 4221 entr_ring->info1); 4222 ab->soc_stats.rxdma_error[rxdma_err_code]++; 4223 4224 link_desc_va = link_desc_banks[desc_bank].vaddr + 4225 (paddr - link_desc_banks[desc_bank].paddr); 4226 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, 4227 msdu_cookies, &rbm); 4228 4229 for (i = 0; i < num_msdus; i++) { 4230 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 4231 msdu_cookies[i]); 4232 4233 spin_lock_bh(&rx_ring->idr_lock); 4234 skb = idr_find(&rx_ring->bufs_idr, buf_id); 4235 if (!skb) { 4236 ath11k_warn(ab, "rxdma error with invalid buf_id %d\n", 4237 buf_id); 4238 spin_unlock_bh(&rx_ring->idr_lock); 4239 continue; 4240 } 4241 4242 idr_remove(&rx_ring->bufs_idr, buf_id); 4243 spin_unlock_bh(&rx_ring->idr_lock); 4244 4245 rxcb = ATH11K_SKB_RXCB(skb); 4246 dma_unmap_single(ab->dev, rxcb->paddr, 4247 skb->len + skb_tailroom(skb), 4248 DMA_FROM_DEVICE); 4249 dev_kfree_skb_any(skb); 4250 4251 num_buf_freed++; 4252 } 4253 4254 ath11k_dp_rx_link_desc_return(ab, desc, 4255 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 4256 } 4257 4258 ath11k_hal_srng_access_end(ab, srng); 4259 4260 spin_unlock_bh(&srng->lock); 4261 4262 if (num_buf_freed) 4263 ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed, 4264 ab->hw_params.hal_params->rx_buf_rbm); 4265 4266 return budget - quota; 4267 } 4268 4269 void ath11k_dp_process_reo_status(struct ath11k_base *ab) 4270 { 4271 struct ath11k_dp *dp = &ab->dp; 4272 struct hal_srng *srng; 4273 struct dp_reo_cmd *cmd, *tmp; 4274 bool found = false; 4275 u32 *reo_desc; 4276 u16 tag; 4277 struct hal_reo_status reo_status; 4278 4279 srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id]; 4280 4281 memset(&reo_status, 0, sizeof(reo_status)); 4282 4283 spin_lock_bh(&srng->lock); 4284 4285 ath11k_hal_srng_access_begin(ab, srng); 4286 4287 while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 4288 tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc); 4289 4290 switch (tag) { 4291 case HAL_REO_GET_QUEUE_STATS_STATUS: 4292 ath11k_hal_reo_status_queue_stats(ab, reo_desc, 4293 &reo_status); 4294 break; 4295 case HAL_REO_FLUSH_QUEUE_STATUS: 4296 ath11k_hal_reo_flush_queue_status(ab, reo_desc, 4297 &reo_status); 4298 break; 4299 case HAL_REO_FLUSH_CACHE_STATUS: 4300 ath11k_hal_reo_flush_cache_status(ab, reo_desc, 4301 &reo_status); 4302 break; 4303 case HAL_REO_UNBLOCK_CACHE_STATUS: 4304 ath11k_hal_reo_unblk_cache_status(ab, reo_desc, 4305 &reo_status); 4306 break; 4307 case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS: 4308 ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc, 4309 &reo_status); 4310 break; 4311 case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS: 4312 ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc, 4313 &reo_status); 4314 break; 4315 case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS: 4316 ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc, 4317 &reo_status); 4318 break; 4319 default: 4320 ath11k_warn(ab, "Unknown reo status type %d\n", tag); 4321 continue; 4322 } 4323 4324 spin_lock_bh(&dp->reo_cmd_lock); 4325 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 4326 if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) { 4327 found = true; 4328 list_del(&cmd->list); 4329 break; 4330 } 4331 } 4332 spin_unlock_bh(&dp->reo_cmd_lock); 4333 4334 if (found) { 4335 cmd->handler(dp, (void *)&cmd->data, 4336 reo_status.uniform_hdr.cmd_status); 4337 kfree(cmd); 4338 } 4339 4340 found = false; 4341 } 4342 4343 ath11k_hal_srng_access_end(ab, srng); 4344 4345 spin_unlock_bh(&srng->lock); 4346 } 4347 4348 void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id) 4349 { 4350 struct ath11k *ar = ab->pdevs[mac_id].ar; 4351 4352 ath11k_dp_rx_pdev_srng_free(ar); 4353 ath11k_dp_rxdma_pdev_buf_free(ar); 4354 } 4355 4356 int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id) 4357 { 4358 struct ath11k *ar = ab->pdevs[mac_id].ar; 4359 struct ath11k_pdev_dp *dp = &ar->dp; 4360 u32 ring_id; 4361 int i; 4362 int ret; 4363 4364 ret = ath11k_dp_rx_pdev_srng_alloc(ar); 4365 if (ret) { 4366 ath11k_warn(ab, "failed to setup rx srngs\n"); 4367 return ret; 4368 } 4369 4370 ret = ath11k_dp_rxdma_pdev_buf_setup(ar); 4371 if (ret) { 4372 ath11k_warn(ab, "failed to setup rxdma ring\n"); 4373 return ret; 4374 } 4375 4376 ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; 4377 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF); 4378 if (ret) { 4379 ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n", 4380 ret); 4381 return ret; 4382 } 4383 4384 if (ab->hw_params.rx_mac_buf_ring) { 4385 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 4386 ring_id = dp->rx_mac_buf_ring[i].ring_id; 4387 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, 4388 mac_id + i, HAL_RXDMA_BUF); 4389 if (ret) { 4390 ath11k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n", 4391 i, ret); 4392 return ret; 4393 } 4394 } 4395 } 4396 4397 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 4398 ring_id = dp->rxdma_err_dst_ring[i].ring_id; 4399 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, 4400 mac_id + i, HAL_RXDMA_DST); 4401 if (ret) { 4402 ath11k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n", 4403 i, ret); 4404 return ret; 4405 } 4406 } 4407 4408 if (!ab->hw_params.rxdma1_enable) 4409 goto config_refill_ring; 4410 4411 ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id; 4412 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, 4413 mac_id, HAL_RXDMA_MONITOR_BUF); 4414 if (ret) { 4415 ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n", 4416 ret); 4417 return ret; 4418 } 4419 ret = ath11k_dp_tx_htt_srng_setup(ab, 4420 dp->rxdma_mon_dst_ring.ring_id, 4421 mac_id, HAL_RXDMA_MONITOR_DST); 4422 if (ret) { 4423 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n", 4424 ret); 4425 return ret; 4426 } 4427 ret = ath11k_dp_tx_htt_srng_setup(ab, 4428 dp->rxdma_mon_desc_ring.ring_id, 4429 mac_id, HAL_RXDMA_MONITOR_DESC); 4430 if (ret) { 4431 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n", 4432 ret); 4433 return ret; 4434 } 4435 4436 config_refill_ring: 4437 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 4438 ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; 4439 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i, 4440 HAL_RXDMA_MONITOR_STATUS); 4441 if (ret) { 4442 ath11k_warn(ab, 4443 "failed to configure mon_status_refill_ring%d %d\n", 4444 i, ret); 4445 return ret; 4446 } 4447 } 4448 4449 return 0; 4450 } 4451 4452 static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len) 4453 { 4454 if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) { 4455 *frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc); 4456 *total_len -= *frag_len; 4457 } else { 4458 *frag_len = *total_len; 4459 *total_len = 0; 4460 } 4461 } 4462 4463 static 4464 int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar, 4465 void *p_last_buf_addr_info, 4466 u8 mac_id) 4467 { 4468 struct ath11k_pdev_dp *dp = &ar->dp; 4469 struct dp_srng *dp_srng; 4470 void *hal_srng; 4471 void *src_srng_desc; 4472 int ret = 0; 4473 4474 if (ar->ab->hw_params.rxdma1_enable) { 4475 dp_srng = &dp->rxdma_mon_desc_ring; 4476 hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id]; 4477 } else { 4478 dp_srng = &ar->ab->dp.wbm_desc_rel_ring; 4479 hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id]; 4480 } 4481 4482 ath11k_hal_srng_access_begin(ar->ab, hal_srng); 4483 4484 src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng); 4485 4486 if (src_srng_desc) { 4487 struct ath11k_buffer_addr *src_desc = 4488 (struct ath11k_buffer_addr *)src_srng_desc; 4489 4490 *src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info); 4491 } else { 4492 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4493 "Monitor Link Desc Ring %d Full", mac_id); 4494 ret = -ENOMEM; 4495 } 4496 4497 ath11k_hal_srng_access_end(ar->ab, hal_srng); 4498 return ret; 4499 } 4500 4501 static 4502 void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc, 4503 dma_addr_t *paddr, u32 *sw_cookie, 4504 u8 *rbm, 4505 void **pp_buf_addr_info) 4506 { 4507 struct hal_rx_msdu_link *msdu_link = 4508 (struct hal_rx_msdu_link *)rx_msdu_link_desc; 4509 struct ath11k_buffer_addr *buf_addr_info; 4510 4511 buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info; 4512 4513 ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm); 4514 4515 *pp_buf_addr_info = (void *)buf_addr_info; 4516 } 4517 4518 static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len) 4519 { 4520 if (skb->len > len) { 4521 skb_trim(skb, len); 4522 } else { 4523 if (skb_tailroom(skb) < len - skb->len) { 4524 if ((pskb_expand_head(skb, 0, 4525 len - skb->len - skb_tailroom(skb), 4526 GFP_ATOMIC))) { 4527 dev_kfree_skb_any(skb); 4528 return -ENOMEM; 4529 } 4530 } 4531 skb_put(skb, (len - skb->len)); 4532 } 4533 return 0; 4534 } 4535 4536 static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar, 4537 void *msdu_link_desc, 4538 struct hal_rx_msdu_list *msdu_list, 4539 u16 *num_msdus) 4540 { 4541 struct hal_rx_msdu_details *msdu_details = NULL; 4542 struct rx_msdu_desc *msdu_desc_info = NULL; 4543 struct hal_rx_msdu_link *msdu_link = NULL; 4544 int i; 4545 u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1); 4546 u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1); 4547 u8 tmp = 0; 4548 4549 msdu_link = (struct hal_rx_msdu_link *)msdu_link_desc; 4550 msdu_details = &msdu_link->msdu_link[0]; 4551 4552 for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) { 4553 if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR, 4554 msdu_details[i].buf_addr_info.info0) == 0) { 4555 msdu_desc_info = &msdu_details[i - 1].rx_msdu_info; 4556 msdu_desc_info->info0 |= last; 4557 ; 4558 break; 4559 } 4560 msdu_desc_info = &msdu_details[i].rx_msdu_info; 4561 4562 if (!i) 4563 msdu_desc_info->info0 |= first; 4564 else if (i == (HAL_RX_NUM_MSDU_DESC - 1)) 4565 msdu_desc_info->info0 |= last; 4566 msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0; 4567 msdu_list->msdu_info[i].msdu_len = 4568 HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0); 4569 msdu_list->sw_cookie[i] = 4570 FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, 4571 msdu_details[i].buf_addr_info.info1); 4572 tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR, 4573 msdu_details[i].buf_addr_info.info1); 4574 msdu_list->rbm[i] = tmp; 4575 } 4576 *num_msdus = i; 4577 } 4578 4579 static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id, 4580 u32 *rx_bufs_used) 4581 { 4582 u32 ret = 0; 4583 4584 if ((*ppdu_id < msdu_ppdu_id) && 4585 ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) { 4586 *ppdu_id = msdu_ppdu_id; 4587 ret = msdu_ppdu_id; 4588 } else if ((*ppdu_id > msdu_ppdu_id) && 4589 ((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) { 4590 /* mon_dst is behind than mon_status 4591 * skip dst_ring and free it 4592 */ 4593 *rx_bufs_used += 1; 4594 *ppdu_id = msdu_ppdu_id; 4595 ret = msdu_ppdu_id; 4596 } 4597 return ret; 4598 } 4599 4600 static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info, 4601 bool *is_frag, u32 *total_len, 4602 u32 *frag_len, u32 *msdu_cnt) 4603 { 4604 if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) { 4605 if (!*is_frag) { 4606 *total_len = info->msdu_len; 4607 *is_frag = true; 4608 } 4609 ath11k_dp_mon_set_frag_len(total_len, 4610 frag_len); 4611 } else { 4612 if (*is_frag) { 4613 ath11k_dp_mon_set_frag_len(total_len, 4614 frag_len); 4615 } else { 4616 *frag_len = info->msdu_len; 4617 } 4618 *is_frag = false; 4619 *msdu_cnt -= 1; 4620 } 4621 } 4622 4623 static u32 4624 ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id, 4625 void *ring_entry, struct sk_buff **head_msdu, 4626 struct sk_buff **tail_msdu, u32 *npackets, 4627 u32 *ppdu_id) 4628 { 4629 struct ath11k_pdev_dp *dp = &ar->dp; 4630 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4631 struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring; 4632 struct sk_buff *msdu = NULL, *last = NULL; 4633 struct hal_rx_msdu_list msdu_list; 4634 void *p_buf_addr_info, *p_last_buf_addr_info; 4635 struct hal_rx_desc *rx_desc; 4636 void *rx_msdu_link_desc; 4637 dma_addr_t paddr; 4638 u16 num_msdus = 0; 4639 u32 rx_buf_size, rx_pkt_offset, sw_cookie; 4640 u32 rx_bufs_used = 0, i = 0; 4641 u32 msdu_ppdu_id = 0, msdu_cnt = 0; 4642 u32 total_len = 0, frag_len = 0; 4643 bool is_frag, is_first_msdu; 4644 bool drop_mpdu = false; 4645 struct ath11k_skb_rxcb *rxcb; 4646 struct hal_reo_entrance_ring *ent_desc = 4647 (struct hal_reo_entrance_ring *)ring_entry; 4648 int buf_id; 4649 u32 rx_link_buf_info[2]; 4650 u8 rbm; 4651 4652 if (!ar->ab->hw_params.rxdma1_enable) 4653 rx_ring = &dp->rx_refill_buf_ring; 4654 4655 ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr, 4656 &sw_cookie, 4657 &p_last_buf_addr_info, &rbm, 4658 &msdu_cnt); 4659 4660 if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON, 4661 ent_desc->info1) == 4662 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { 4663 u8 rxdma_err = 4664 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE, 4665 ent_desc->info1); 4666 if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR || 4667 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR || 4668 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) { 4669 drop_mpdu = true; 4670 pmon->rx_mon_stats.dest_mpdu_drop++; 4671 } 4672 } 4673 4674 is_frag = false; 4675 is_first_msdu = true; 4676 4677 do { 4678 if (pmon->mon_last_linkdesc_paddr == paddr) { 4679 pmon->rx_mon_stats.dup_mon_linkdesc_cnt++; 4680 return rx_bufs_used; 4681 } 4682 4683 if (ar->ab->hw_params.rxdma1_enable) 4684 rx_msdu_link_desc = 4685 (void *)pmon->link_desc_banks[sw_cookie].vaddr + 4686 (paddr - pmon->link_desc_banks[sw_cookie].paddr); 4687 else 4688 rx_msdu_link_desc = 4689 (void *)ar->ab->dp.link_desc_banks[sw_cookie].vaddr + 4690 (paddr - ar->ab->dp.link_desc_banks[sw_cookie].paddr); 4691 4692 ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list, 4693 &num_msdus); 4694 4695 for (i = 0; i < num_msdus; i++) { 4696 u32 l2_hdr_offset; 4697 4698 if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) { 4699 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4700 "i %d last_cookie %d is same\n", 4701 i, pmon->mon_last_buf_cookie); 4702 drop_mpdu = true; 4703 pmon->rx_mon_stats.dup_mon_buf_cnt++; 4704 continue; 4705 } 4706 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 4707 msdu_list.sw_cookie[i]); 4708 4709 spin_lock_bh(&rx_ring->idr_lock); 4710 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 4711 spin_unlock_bh(&rx_ring->idr_lock); 4712 if (!msdu) { 4713 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4714 "msdu_pop: invalid buf_id %d\n", buf_id); 4715 break; 4716 } 4717 rxcb = ATH11K_SKB_RXCB(msdu); 4718 if (!rxcb->unmapped) { 4719 dma_unmap_single(ar->ab->dev, rxcb->paddr, 4720 msdu->len + 4721 skb_tailroom(msdu), 4722 DMA_FROM_DEVICE); 4723 rxcb->unmapped = 1; 4724 } 4725 if (drop_mpdu) { 4726 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4727 "i %d drop msdu %p *ppdu_id %x\n", 4728 i, msdu, *ppdu_id); 4729 dev_kfree_skb_any(msdu); 4730 msdu = NULL; 4731 goto next_msdu; 4732 } 4733 4734 rx_desc = (struct hal_rx_desc *)msdu->data; 4735 4736 rx_pkt_offset = sizeof(struct hal_rx_desc); 4737 l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc); 4738 4739 if (is_first_msdu) { 4740 if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) { 4741 drop_mpdu = true; 4742 dev_kfree_skb_any(msdu); 4743 msdu = NULL; 4744 pmon->mon_last_linkdesc_paddr = paddr; 4745 goto next_msdu; 4746 } 4747 4748 msdu_ppdu_id = 4749 ath11k_dp_rxdesc_get_ppduid(ar->ab, rx_desc); 4750 4751 if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id, 4752 ppdu_id, 4753 &rx_bufs_used)) { 4754 if (rx_bufs_used) { 4755 drop_mpdu = true; 4756 dev_kfree_skb_any(msdu); 4757 msdu = NULL; 4758 goto next_msdu; 4759 } 4760 return rx_bufs_used; 4761 } 4762 pmon->mon_last_linkdesc_paddr = paddr; 4763 is_first_msdu = false; 4764 } 4765 ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i], 4766 &is_frag, &total_len, 4767 &frag_len, &msdu_cnt); 4768 rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len; 4769 4770 ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size); 4771 4772 if (!(*head_msdu)) 4773 *head_msdu = msdu; 4774 else if (last) 4775 last->next = msdu; 4776 4777 last = msdu; 4778 next_msdu: 4779 pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i]; 4780 rx_bufs_used++; 4781 spin_lock_bh(&rx_ring->idr_lock); 4782 idr_remove(&rx_ring->bufs_idr, buf_id); 4783 spin_unlock_bh(&rx_ring->idr_lock); 4784 } 4785 4786 ath11k_hal_rx_buf_addr_info_set(rx_link_buf_info, paddr, sw_cookie, rbm); 4787 4788 ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr, 4789 &sw_cookie, &rbm, 4790 &p_buf_addr_info); 4791 4792 if (ar->ab->hw_params.rxdma1_enable) { 4793 if (ath11k_dp_rx_monitor_link_desc_return(ar, 4794 p_last_buf_addr_info, 4795 dp->mac_id)) 4796 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4797 "dp_rx_monitor_link_desc_return failed"); 4798 } else { 4799 ath11k_dp_rx_link_desc_return(ar->ab, rx_link_buf_info, 4800 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 4801 } 4802 4803 p_last_buf_addr_info = p_buf_addr_info; 4804 4805 } while (paddr && msdu_cnt); 4806 4807 if (last) 4808 last->next = NULL; 4809 4810 *tail_msdu = msdu; 4811 4812 if (msdu_cnt == 0) 4813 *npackets = 1; 4814 4815 return rx_bufs_used; 4816 } 4817 4818 static void ath11k_dp_rx_msdus_set_payload(struct ath11k *ar, struct sk_buff *msdu) 4819 { 4820 u32 rx_pkt_offset, l2_hdr_offset; 4821 4822 rx_pkt_offset = ar->ab->hw_params.hal_desc_sz; 4823 l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, 4824 (struct hal_rx_desc *)msdu->data); 4825 skb_pull(msdu, rx_pkt_offset + l2_hdr_offset); 4826 } 4827 4828 static struct sk_buff * 4829 ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, 4830 u32 mac_id, struct sk_buff *head_msdu, 4831 struct sk_buff *last_msdu, 4832 struct ieee80211_rx_status *rxs) 4833 { 4834 struct ath11k_base *ab = ar->ab; 4835 struct sk_buff *msdu, *prev_buf; 4836 u32 wifi_hdr_len; 4837 struct hal_rx_desc *rx_desc; 4838 char *hdr_desc; 4839 u8 *dest, decap_format; 4840 struct ieee80211_hdr_3addr *wh; 4841 struct rx_attention *rx_attention; 4842 4843 if (!head_msdu) 4844 goto err_merge_fail; 4845 4846 rx_desc = (struct hal_rx_desc *)head_msdu->data; 4847 rx_attention = ath11k_dp_rx_get_attention(ab, rx_desc); 4848 4849 if (ath11k_dp_rxdesc_get_mpdulen_err(rx_attention)) 4850 return NULL; 4851 4852 decap_format = ath11k_dp_rx_h_msdu_start_decap_type(ab, rx_desc); 4853 4854 ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs); 4855 4856 if (decap_format == DP_RX_DECAP_TYPE_RAW) { 4857 ath11k_dp_rx_msdus_set_payload(ar, head_msdu); 4858 4859 prev_buf = head_msdu; 4860 msdu = head_msdu->next; 4861 4862 while (msdu) { 4863 ath11k_dp_rx_msdus_set_payload(ar, msdu); 4864 4865 prev_buf = msdu; 4866 msdu = msdu->next; 4867 } 4868 4869 prev_buf->next = NULL; 4870 4871 skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN); 4872 } else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) { 4873 __le16 qos_field; 4874 u8 qos_pkt = 0; 4875 4876 rx_desc = (struct hal_rx_desc *)head_msdu->data; 4877 hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc); 4878 4879 /* Base size */ 4880 wifi_hdr_len = sizeof(struct ieee80211_hdr_3addr); 4881 wh = (struct ieee80211_hdr_3addr *)hdr_desc; 4882 4883 if (ieee80211_is_data_qos(wh->frame_control)) { 4884 struct ieee80211_qos_hdr *qwh = 4885 (struct ieee80211_qos_hdr *)hdr_desc; 4886 4887 qos_field = qwh->qos_ctrl; 4888 qos_pkt = 1; 4889 } 4890 msdu = head_msdu; 4891 4892 while (msdu) { 4893 rx_desc = (struct hal_rx_desc *)msdu->data; 4894 hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc); 4895 4896 if (qos_pkt) { 4897 dest = skb_push(msdu, sizeof(__le16)); 4898 if (!dest) 4899 goto err_merge_fail; 4900 memcpy(dest, hdr_desc, wifi_hdr_len); 4901 memcpy(dest + wifi_hdr_len, 4902 (u8 *)&qos_field, sizeof(__le16)); 4903 } 4904 ath11k_dp_rx_msdus_set_payload(ar, msdu); 4905 prev_buf = msdu; 4906 msdu = msdu->next; 4907 } 4908 dest = skb_put(prev_buf, HAL_RX_FCS_LEN); 4909 if (!dest) 4910 goto err_merge_fail; 4911 4912 ath11k_dbg(ab, ATH11K_DBG_DATA, 4913 "mpdu_buf %pK mpdu_buf->len %u", 4914 prev_buf, prev_buf->len); 4915 } else { 4916 ath11k_dbg(ab, ATH11K_DBG_DATA, 4917 "decap format %d is not supported!\n", 4918 decap_format); 4919 goto err_merge_fail; 4920 } 4921 4922 return head_msdu; 4923 4924 err_merge_fail: 4925 return NULL; 4926 } 4927 4928 static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id, 4929 struct sk_buff *head_msdu, 4930 struct sk_buff *tail_msdu, 4931 struct napi_struct *napi) 4932 { 4933 struct ath11k_pdev_dp *dp = &ar->dp; 4934 struct sk_buff *mon_skb, *skb_next, *header; 4935 struct ieee80211_rx_status *rxs = &dp->rx_status; 4936 4937 mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu, 4938 tail_msdu, rxs); 4939 4940 if (!mon_skb) 4941 goto mon_deliver_fail; 4942 4943 header = mon_skb; 4944 4945 rxs->flag = 0; 4946 do { 4947 skb_next = mon_skb->next; 4948 if (!skb_next) 4949 rxs->flag &= ~RX_FLAG_AMSDU_MORE; 4950 else 4951 rxs->flag |= RX_FLAG_AMSDU_MORE; 4952 4953 if (mon_skb == header) { 4954 header = NULL; 4955 rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN; 4956 } else { 4957 rxs->flag |= RX_FLAG_ALLOW_SAME_PN; 4958 } 4959 rxs->flag |= RX_FLAG_ONLY_MONITOR; 4960 4961 ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb, rxs); 4962 mon_skb = skb_next; 4963 } while (mon_skb); 4964 rxs->flag = 0; 4965 4966 return 0; 4967 4968 mon_deliver_fail: 4969 mon_skb = head_msdu; 4970 while (mon_skb) { 4971 skb_next = mon_skb->next; 4972 dev_kfree_skb_any(mon_skb); 4973 mon_skb = skb_next; 4974 } 4975 return -EINVAL; 4976 } 4977 4978 static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id, 4979 u32 quota, struct napi_struct *napi) 4980 { 4981 struct ath11k_pdev_dp *dp = &ar->dp; 4982 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4983 const struct ath11k_hw_hal_params *hal_params; 4984 void *ring_entry; 4985 void *mon_dst_srng; 4986 u32 ppdu_id; 4987 u32 rx_bufs_used; 4988 u32 ring_id; 4989 struct ath11k_pdev_mon_stats *rx_mon_stats; 4990 u32 npackets = 0; 4991 4992 if (ar->ab->hw_params.rxdma1_enable) 4993 ring_id = dp->rxdma_mon_dst_ring.ring_id; 4994 else 4995 ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id; 4996 4997 mon_dst_srng = &ar->ab->hal.srng_list[ring_id]; 4998 4999 if (!mon_dst_srng) { 5000 ath11k_warn(ar->ab, 5001 "HAL Monitor Destination Ring Init Failed -- %pK", 5002 mon_dst_srng); 5003 return; 5004 } 5005 5006 spin_lock_bh(&pmon->mon_lock); 5007 5008 ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng); 5009 5010 ppdu_id = pmon->mon_ppdu_info.ppdu_id; 5011 rx_bufs_used = 0; 5012 rx_mon_stats = &pmon->rx_mon_stats; 5013 5014 while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) { 5015 struct sk_buff *head_msdu, *tail_msdu; 5016 5017 head_msdu = NULL; 5018 tail_msdu = NULL; 5019 5020 rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry, 5021 &head_msdu, 5022 &tail_msdu, 5023 &npackets, &ppdu_id); 5024 5025 if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) { 5026 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 5027 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 5028 "dest_rx: new ppdu_id %x != status ppdu_id %x", 5029 ppdu_id, pmon->mon_ppdu_info.ppdu_id); 5030 break; 5031 } 5032 if (head_msdu && tail_msdu) { 5033 ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu, 5034 tail_msdu, napi); 5035 rx_mon_stats->dest_mpdu_done++; 5036 } 5037 5038 ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab, 5039 mon_dst_srng); 5040 } 5041 ath11k_hal_srng_access_end(ar->ab, mon_dst_srng); 5042 5043 spin_unlock_bh(&pmon->mon_lock); 5044 5045 if (rx_bufs_used) { 5046 rx_mon_stats->dest_ppdu_done++; 5047 hal_params = ar->ab->hw_params.hal_params; 5048 5049 if (ar->ab->hw_params.rxdma1_enable) 5050 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, 5051 &dp->rxdma_mon_buf_ring, 5052 rx_bufs_used, 5053 hal_params->rx_buf_rbm); 5054 else 5055 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, 5056 &dp->rx_refill_buf_ring, 5057 rx_bufs_used, 5058 hal_params->rx_buf_rbm); 5059 } 5060 } 5061 5062 static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar, 5063 int mac_id, u32 quota, 5064 struct napi_struct *napi) 5065 { 5066 struct ath11k_pdev_dp *dp = &ar->dp; 5067 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 5068 struct hal_rx_mon_ppdu_info *ppdu_info; 5069 struct sk_buff *status_skb; 5070 u32 tlv_status = HAL_TLV_STATUS_BUF_DONE; 5071 struct ath11k_pdev_mon_stats *rx_mon_stats; 5072 5073 ppdu_info = &pmon->mon_ppdu_info; 5074 rx_mon_stats = &pmon->rx_mon_stats; 5075 5076 if (pmon->mon_ppdu_status != DP_PPDU_STATUS_START) 5077 return; 5078 5079 while (!skb_queue_empty(&pmon->rx_status_q)) { 5080 status_skb = skb_dequeue(&pmon->rx_status_q); 5081 5082 tlv_status = ath11k_hal_rx_parse_mon_status(ar->ab, ppdu_info, 5083 status_skb); 5084 if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) { 5085 rx_mon_stats->status_ppdu_done++; 5086 pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE; 5087 ath11k_dp_rx_mon_dest_process(ar, mac_id, quota, napi); 5088 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 5089 } 5090 dev_kfree_skb_any(status_skb); 5091 } 5092 } 5093 5094 static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id, 5095 struct napi_struct *napi, int budget) 5096 { 5097 struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id); 5098 struct ath11k_pdev_dp *dp = &ar->dp; 5099 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 5100 int num_buffs_reaped = 0; 5101 5102 num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ar->ab, mac_id, &budget, 5103 &pmon->rx_status_q); 5104 if (num_buffs_reaped) 5105 ath11k_dp_rx_mon_status_process_tlv(ar, mac_id, budget, napi); 5106 5107 return num_buffs_reaped; 5108 } 5109 5110 int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id, 5111 struct napi_struct *napi, int budget) 5112 { 5113 struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id); 5114 int ret = 0; 5115 5116 if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) 5117 ret = ath11k_dp_mon_process_rx(ab, mac_id, napi, budget); 5118 else 5119 ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget); 5120 return ret; 5121 } 5122 5123 static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar) 5124 { 5125 struct ath11k_pdev_dp *dp = &ar->dp; 5126 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 5127 5128 skb_queue_head_init(&pmon->rx_status_q); 5129 5130 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 5131 5132 memset(&pmon->rx_mon_stats, 0, 5133 sizeof(pmon->rx_mon_stats)); 5134 return 0; 5135 } 5136 5137 int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar) 5138 { 5139 struct ath11k_pdev_dp *dp = &ar->dp; 5140 struct ath11k_mon_data *pmon = &dp->mon_data; 5141 struct hal_srng *mon_desc_srng = NULL; 5142 struct dp_srng *dp_srng; 5143 int ret = 0; 5144 u32 n_link_desc = 0; 5145 5146 ret = ath11k_dp_rx_pdev_mon_status_attach(ar); 5147 if (ret) { 5148 ath11k_warn(ar->ab, "pdev_mon_status_attach() failed"); 5149 return ret; 5150 } 5151 5152 /* if rxdma1_enable is false, no need to setup 5153 * rxdma_mon_desc_ring. 5154 */ 5155 if (!ar->ab->hw_params.rxdma1_enable) 5156 return 0; 5157 5158 dp_srng = &dp->rxdma_mon_desc_ring; 5159 n_link_desc = dp_srng->size / 5160 ath11k_hal_srng_get_entrysize(ar->ab, HAL_RXDMA_MONITOR_DESC); 5161 mon_desc_srng = 5162 &ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id]; 5163 5164 ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks, 5165 HAL_RXDMA_MONITOR_DESC, mon_desc_srng, 5166 n_link_desc); 5167 if (ret) { 5168 ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed"); 5169 return ret; 5170 } 5171 pmon->mon_last_linkdesc_paddr = 0; 5172 pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1; 5173 spin_lock_init(&pmon->mon_lock); 5174 5175 return 0; 5176 } 5177 5178 static int ath11k_dp_mon_link_free(struct ath11k *ar) 5179 { 5180 struct ath11k_pdev_dp *dp = &ar->dp; 5181 struct ath11k_mon_data *pmon = &dp->mon_data; 5182 5183 ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks, 5184 HAL_RXDMA_MONITOR_DESC, 5185 &dp->rxdma_mon_desc_ring); 5186 return 0; 5187 } 5188 5189 int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar) 5190 { 5191 ath11k_dp_mon_link_free(ar); 5192 return 0; 5193 } 5194 5195 int ath11k_dp_rx_pktlog_start(struct ath11k_base *ab) 5196 { 5197 /* start reap timer */ 5198 mod_timer(&ab->mon_reap_timer, 5199 jiffies + msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL)); 5200 5201 return 0; 5202 } 5203 5204 int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer) 5205 { 5206 int ret; 5207 5208 if (stop_timer) 5209 del_timer_sync(&ab->mon_reap_timer); 5210 5211 /* reap all the monitor related rings */ 5212 ret = ath11k_dp_purge_mon_ring(ab); 5213 if (ret) { 5214 ath11k_warn(ab, "failed to purge dp mon ring: %d\n", ret); 5215 return ret; 5216 } 5217 5218 return 0; 5219 } 5220