1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/ieee80211.h> 7 #include <linux/kernel.h> 8 #include <linux/skbuff.h> 9 #include <crypto/hash.h> 10 #include "core.h" 11 #include "debug.h" 12 #include "debugfs_htt_stats.h" 13 #include "debugfs_sta.h" 14 #include "hal_desc.h" 15 #include "hw.h" 16 #include "dp_rx.h" 17 #include "hal_rx.h" 18 #include "dp_tx.h" 19 #include "peer.h" 20 21 #define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ) 22 23 static inline 24 u8 *ath11k_dp_rx_h_80211_hdr(struct ath11k_base *ab, struct hal_rx_desc *desc) 25 { 26 return ab->hw_params.hw_ops->rx_desc_get_hdr_status(desc); 27 } 28 29 static inline 30 enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_base *ab, 31 struct hal_rx_desc *desc) 32 { 33 if (!ab->hw_params.hw_ops->rx_desc_encrypt_valid(desc)) 34 return HAL_ENCRYPT_TYPE_OPEN; 35 36 return ab->hw_params.hw_ops->rx_desc_get_encrypt_type(desc); 37 } 38 39 static inline u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab, 40 struct hal_rx_desc *desc) 41 { 42 return ab->hw_params.hw_ops->rx_desc_get_decap_type(desc); 43 } 44 45 static inline 46 bool ath11k_dp_rx_h_msdu_start_ldpc_support(struct ath11k_base *ab, 47 struct hal_rx_desc *desc) 48 { 49 return ab->hw_params.hw_ops->rx_desc_get_ldpc_support(desc); 50 } 51 52 static inline 53 u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab, 54 struct hal_rx_desc *desc) 55 { 56 return ab->hw_params.hw_ops->rx_desc_get_mesh_ctl(desc); 57 } 58 59 static inline 60 bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct ath11k_base *ab, 61 struct hal_rx_desc *desc) 62 { 63 return ab->hw_params.hw_ops->rx_desc_get_mpdu_seq_ctl_vld(desc); 64 } 65 66 static inline bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct ath11k_base *ab, 67 struct hal_rx_desc *desc) 68 { 69 return ab->hw_params.hw_ops->rx_desc_get_mpdu_fc_valid(desc); 70 } 71 72 static inline bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab, 73 struct sk_buff *skb) 74 { 75 struct ieee80211_hdr *hdr; 76 77 hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz); 78 return ieee80211_has_morefrags(hdr->frame_control); 79 } 80 81 static inline u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab, 82 struct sk_buff *skb) 83 { 84 struct ieee80211_hdr *hdr; 85 86 hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz); 87 return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; 88 } 89 90 static inline u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct ath11k_base *ab, 91 struct hal_rx_desc *desc) 92 { 93 return ab->hw_params.hw_ops->rx_desc_get_mpdu_start_seq_no(desc); 94 } 95 96 static inline void *ath11k_dp_rx_get_attention(struct ath11k_base *ab, 97 struct hal_rx_desc *desc) 98 { 99 return ab->hw_params.hw_ops->rx_desc_get_attention(desc); 100 } 101 102 static inline bool ath11k_dp_rx_h_attn_msdu_done(struct rx_attention *attn) 103 { 104 return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE, 105 __le32_to_cpu(attn->info2)); 106 } 107 108 static inline bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct rx_attention *attn) 109 { 110 return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL, 111 __le32_to_cpu(attn->info1)); 112 } 113 114 static inline bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct rx_attention *attn) 115 { 116 return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL, 117 __le32_to_cpu(attn->info1)); 118 } 119 120 static inline bool ath11k_dp_rx_h_attn_is_decrypted(struct rx_attention *attn) 121 { 122 return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE, 123 __le32_to_cpu(attn->info2)) == 124 RX_DESC_DECRYPT_STATUS_CODE_OK); 125 } 126 127 static u32 ath11k_dp_rx_h_attn_mpdu_err(struct rx_attention *attn) 128 { 129 u32 info = __le32_to_cpu(attn->info1); 130 u32 errmap = 0; 131 132 if (info & RX_ATTENTION_INFO1_FCS_ERR) 133 errmap |= DP_RX_MPDU_ERR_FCS; 134 135 if (info & RX_ATTENTION_INFO1_DECRYPT_ERR) 136 errmap |= DP_RX_MPDU_ERR_DECRYPT; 137 138 if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR) 139 errmap |= DP_RX_MPDU_ERR_TKIP_MIC; 140 141 if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR) 142 errmap |= DP_RX_MPDU_ERR_AMSDU_ERR; 143 144 if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR) 145 errmap |= DP_RX_MPDU_ERR_OVERFLOW; 146 147 if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR) 148 errmap |= DP_RX_MPDU_ERR_MSDU_LEN; 149 150 if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR) 151 errmap |= DP_RX_MPDU_ERR_MPDU_LEN; 152 153 return errmap; 154 } 155 156 static bool ath11k_dp_rx_h_attn_msdu_len_err(struct ath11k_base *ab, 157 struct hal_rx_desc *desc) 158 { 159 struct rx_attention *rx_attention; 160 u32 errmap; 161 162 rx_attention = ath11k_dp_rx_get_attention(ab, desc); 163 errmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention); 164 165 return errmap & DP_RX_MPDU_ERR_MSDU_LEN; 166 } 167 168 static inline u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab, 169 struct hal_rx_desc *desc) 170 { 171 return ab->hw_params.hw_ops->rx_desc_get_msdu_len(desc); 172 } 173 174 static inline u8 ath11k_dp_rx_h_msdu_start_sgi(struct ath11k_base *ab, 175 struct hal_rx_desc *desc) 176 { 177 return ab->hw_params.hw_ops->rx_desc_get_msdu_sgi(desc); 178 } 179 180 static inline u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct ath11k_base *ab, 181 struct hal_rx_desc *desc) 182 { 183 return ab->hw_params.hw_ops->rx_desc_get_msdu_rate_mcs(desc); 184 } 185 186 static inline u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct ath11k_base *ab, 187 struct hal_rx_desc *desc) 188 { 189 return ab->hw_params.hw_ops->rx_desc_get_msdu_rx_bw(desc); 190 } 191 192 static inline u32 ath11k_dp_rx_h_msdu_start_freq(struct ath11k_base *ab, 193 struct hal_rx_desc *desc) 194 { 195 return ab->hw_params.hw_ops->rx_desc_get_msdu_freq(desc); 196 } 197 198 static inline u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct ath11k_base *ab, 199 struct hal_rx_desc *desc) 200 { 201 return ab->hw_params.hw_ops->rx_desc_get_msdu_pkt_type(desc); 202 } 203 204 static inline u8 ath11k_dp_rx_h_msdu_start_nss(struct ath11k_base *ab, 205 struct hal_rx_desc *desc) 206 { 207 return hweight8(ab->hw_params.hw_ops->rx_desc_get_msdu_nss(desc)); 208 } 209 210 static inline u8 ath11k_dp_rx_h_mpdu_start_tid(struct ath11k_base *ab, 211 struct hal_rx_desc *desc) 212 { 213 return ab->hw_params.hw_ops->rx_desc_get_mpdu_tid(desc); 214 } 215 216 static inline u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct ath11k_base *ab, 217 struct hal_rx_desc *desc) 218 { 219 return ab->hw_params.hw_ops->rx_desc_get_mpdu_peer_id(desc); 220 } 221 222 static inline u8 ath11k_dp_rx_h_msdu_end_l3pad(struct ath11k_base *ab, 223 struct hal_rx_desc *desc) 224 { 225 return ab->hw_params.hw_ops->rx_desc_get_l3_pad_bytes(desc); 226 } 227 228 static inline bool ath11k_dp_rx_h_msdu_end_first_msdu(struct ath11k_base *ab, 229 struct hal_rx_desc *desc) 230 { 231 return ab->hw_params.hw_ops->rx_desc_get_first_msdu(desc); 232 } 233 234 static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct ath11k_base *ab, 235 struct hal_rx_desc *desc) 236 { 237 return ab->hw_params.hw_ops->rx_desc_get_last_msdu(desc); 238 } 239 240 static void ath11k_dp_rx_desc_end_tlv_copy(struct ath11k_base *ab, 241 struct hal_rx_desc *fdesc, 242 struct hal_rx_desc *ldesc) 243 { 244 ab->hw_params.hw_ops->rx_desc_copy_attn_end_tlv(fdesc, ldesc); 245 } 246 247 static inline u32 ath11k_dp_rxdesc_get_mpdulen_err(struct rx_attention *attn) 248 { 249 return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR, 250 __le32_to_cpu(attn->info1)); 251 } 252 253 static inline u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab, 254 struct hal_rx_desc *rx_desc) 255 { 256 u8 *rx_pkt_hdr; 257 258 rx_pkt_hdr = ab->hw_params.hw_ops->rx_desc_get_msdu_payload(rx_desc); 259 260 return rx_pkt_hdr; 261 } 262 263 static inline bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab, 264 struct hal_rx_desc *rx_desc) 265 { 266 u32 tlv_tag; 267 268 tlv_tag = ab->hw_params.hw_ops->rx_desc_get_mpdu_start_tag(rx_desc); 269 270 return tlv_tag == HAL_RX_MPDU_START; 271 } 272 273 static inline u32 ath11k_dp_rxdesc_get_ppduid(struct ath11k_base *ab, 274 struct hal_rx_desc *rx_desc) 275 { 276 return ab->hw_params.hw_ops->rx_desc_get_mpdu_ppdu_id(rx_desc); 277 } 278 279 static inline void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab, 280 struct hal_rx_desc *desc, 281 u16 len) 282 { 283 ab->hw_params.hw_ops->rx_desc_set_msdu_len(desc, len); 284 } 285 286 static bool ath11k_dp_rx_h_attn_is_mcbc(struct ath11k_base *ab, 287 struct hal_rx_desc *desc) 288 { 289 struct rx_attention *attn = ath11k_dp_rx_get_attention(ab, desc); 290 291 return ath11k_dp_rx_h_msdu_end_first_msdu(ab, desc) && 292 (!!FIELD_GET(RX_ATTENTION_INFO1_MCAST_BCAST, 293 __le32_to_cpu(attn->info1))); 294 } 295 296 static bool ath11k_dp_rxdesc_mac_addr2_valid(struct ath11k_base *ab, 297 struct hal_rx_desc *desc) 298 { 299 return ab->hw_params.hw_ops->rx_desc_mac_addr2_valid(desc); 300 } 301 302 static u8 *ath11k_dp_rxdesc_mpdu_start_addr2(struct ath11k_base *ab, 303 struct hal_rx_desc *desc) 304 { 305 return ab->hw_params.hw_ops->rx_desc_mpdu_start_addr2(desc); 306 } 307 308 static void ath11k_dp_service_mon_ring(struct timer_list *t) 309 { 310 struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer); 311 int i; 312 313 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) 314 ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET); 315 316 mod_timer(&ab->mon_reap_timer, jiffies + 317 msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL)); 318 } 319 320 static int ath11k_dp_purge_mon_ring(struct ath11k_base *ab) 321 { 322 int i, reaped = 0; 323 unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS); 324 325 do { 326 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) 327 reaped += ath11k_dp_rx_process_mon_rings(ab, i, 328 NULL, 329 DP_MON_SERVICE_BUDGET); 330 331 /* nothing more to reap */ 332 if (reaped < DP_MON_SERVICE_BUDGET) 333 return 0; 334 335 } while (time_before(jiffies, timeout)); 336 337 ath11k_warn(ab, "dp mon ring purge timeout"); 338 339 return -ETIMEDOUT; 340 } 341 342 /* Returns number of Rx buffers replenished */ 343 int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id, 344 struct dp_rxdma_ring *rx_ring, 345 int req_entries, 346 enum hal_rx_buf_return_buf_manager mgr) 347 { 348 struct hal_srng *srng; 349 u32 *desc; 350 struct sk_buff *skb; 351 int num_free; 352 int num_remain; 353 int buf_id; 354 u32 cookie; 355 dma_addr_t paddr; 356 357 req_entries = min(req_entries, rx_ring->bufs_max); 358 359 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 360 361 spin_lock_bh(&srng->lock); 362 363 ath11k_hal_srng_access_begin(ab, srng); 364 365 num_free = ath11k_hal_srng_src_num_free(ab, srng, true); 366 if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4)) 367 req_entries = num_free; 368 369 req_entries = min(num_free, req_entries); 370 num_remain = req_entries; 371 372 while (num_remain > 0) { 373 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + 374 DP_RX_BUFFER_ALIGN_SIZE); 375 if (!skb) 376 break; 377 378 if (!IS_ALIGNED((unsigned long)skb->data, 379 DP_RX_BUFFER_ALIGN_SIZE)) { 380 skb_pull(skb, 381 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - 382 skb->data); 383 } 384 385 paddr = dma_map_single(ab->dev, skb->data, 386 skb->len + skb_tailroom(skb), 387 DMA_FROM_DEVICE); 388 if (dma_mapping_error(ab->dev, paddr)) 389 goto fail_free_skb; 390 391 spin_lock_bh(&rx_ring->idr_lock); 392 buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 1, 393 (rx_ring->bufs_max * 3) + 1, GFP_ATOMIC); 394 spin_unlock_bh(&rx_ring->idr_lock); 395 if (buf_id <= 0) 396 goto fail_dma_unmap; 397 398 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 399 if (!desc) 400 goto fail_idr_remove; 401 402 ATH11K_SKB_RXCB(skb)->paddr = paddr; 403 404 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 405 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 406 407 num_remain--; 408 409 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); 410 } 411 412 ath11k_hal_srng_access_end(ab, srng); 413 414 spin_unlock_bh(&srng->lock); 415 416 return req_entries - num_remain; 417 418 fail_idr_remove: 419 spin_lock_bh(&rx_ring->idr_lock); 420 idr_remove(&rx_ring->bufs_idr, buf_id); 421 spin_unlock_bh(&rx_ring->idr_lock); 422 fail_dma_unmap: 423 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 424 DMA_FROM_DEVICE); 425 fail_free_skb: 426 dev_kfree_skb_any(skb); 427 428 ath11k_hal_srng_access_end(ab, srng); 429 430 spin_unlock_bh(&srng->lock); 431 432 return req_entries - num_remain; 433 } 434 435 static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar, 436 struct dp_rxdma_ring *rx_ring) 437 { 438 struct sk_buff *skb; 439 int buf_id; 440 441 spin_lock_bh(&rx_ring->idr_lock); 442 idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { 443 idr_remove(&rx_ring->bufs_idr, buf_id); 444 /* TODO: Understand where internal driver does this dma_unmap 445 * of rxdma_buffer. 446 */ 447 dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr, 448 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); 449 dev_kfree_skb_any(skb); 450 } 451 452 idr_destroy(&rx_ring->bufs_idr); 453 spin_unlock_bh(&rx_ring->idr_lock); 454 455 return 0; 456 } 457 458 static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar) 459 { 460 struct ath11k_pdev_dp *dp = &ar->dp; 461 struct ath11k_base *ab = ar->ab; 462 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 463 int i; 464 465 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 466 467 rx_ring = &dp->rxdma_mon_buf_ring; 468 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 469 470 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 471 rx_ring = &dp->rx_mon_status_refill_ring[i]; 472 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 473 } 474 475 return 0; 476 } 477 478 static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar, 479 struct dp_rxdma_ring *rx_ring, 480 u32 ringtype) 481 { 482 struct ath11k_pdev_dp *dp = &ar->dp; 483 int num_entries; 484 485 num_entries = rx_ring->refill_buf_ring.size / 486 ath11k_hal_srng_get_entrysize(ar->ab, ringtype); 487 488 rx_ring->bufs_max = num_entries; 489 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries, 490 ar->ab->hw_params.hal_params->rx_buf_rbm); 491 return 0; 492 } 493 494 static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar) 495 { 496 struct ath11k_pdev_dp *dp = &ar->dp; 497 struct ath11k_base *ab = ar->ab; 498 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 499 int i; 500 501 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF); 502 503 if (ar->ab->hw_params.rxdma1_enable) { 504 rx_ring = &dp->rxdma_mon_buf_ring; 505 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF); 506 } 507 508 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 509 rx_ring = &dp->rx_mon_status_refill_ring[i]; 510 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS); 511 } 512 513 return 0; 514 } 515 516 static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar) 517 { 518 struct ath11k_pdev_dp *dp = &ar->dp; 519 struct ath11k_base *ab = ar->ab; 520 int i; 521 522 ath11k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring); 523 524 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 525 if (ab->hw_params.rx_mac_buf_ring) 526 ath11k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]); 527 528 ath11k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]); 529 ath11k_dp_srng_cleanup(ab, 530 &dp->rx_mon_status_refill_ring[i].refill_buf_ring); 531 } 532 533 ath11k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring); 534 } 535 536 void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab) 537 { 538 struct ath11k_dp *dp = &ab->dp; 539 int i; 540 541 for (i = 0; i < DP_REO_DST_RING_MAX; i++) 542 ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]); 543 } 544 545 int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab) 546 { 547 struct ath11k_dp *dp = &ab->dp; 548 int ret; 549 int i; 550 551 for (i = 0; i < DP_REO_DST_RING_MAX; i++) { 552 ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring[i], 553 HAL_REO_DST, i, 0, 554 DP_REO_DST_RING_SIZE); 555 if (ret) { 556 ath11k_warn(ab, "failed to setup reo_dst_ring\n"); 557 goto err_reo_cleanup; 558 } 559 } 560 561 return 0; 562 563 err_reo_cleanup: 564 ath11k_dp_pdev_reo_cleanup(ab); 565 566 return ret; 567 } 568 569 static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar) 570 { 571 struct ath11k_pdev_dp *dp = &ar->dp; 572 struct ath11k_base *ab = ar->ab; 573 struct dp_srng *srng = NULL; 574 int i; 575 int ret; 576 577 ret = ath11k_dp_srng_setup(ar->ab, 578 &dp->rx_refill_buf_ring.refill_buf_ring, 579 HAL_RXDMA_BUF, 0, 580 dp->mac_id, DP_RXDMA_BUF_RING_SIZE); 581 if (ret) { 582 ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n"); 583 return ret; 584 } 585 586 if (ar->ab->hw_params.rx_mac_buf_ring) { 587 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 588 ret = ath11k_dp_srng_setup(ar->ab, 589 &dp->rx_mac_buf_ring[i], 590 HAL_RXDMA_BUF, 1, 591 dp->mac_id + i, 1024); 592 if (ret) { 593 ath11k_warn(ar->ab, "failed to setup rx_mac_buf_ring %d\n", 594 i); 595 return ret; 596 } 597 } 598 } 599 600 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 601 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring[i], 602 HAL_RXDMA_DST, 0, dp->mac_id + i, 603 DP_RXDMA_ERR_DST_RING_SIZE); 604 if (ret) { 605 ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring %d\n", i); 606 return ret; 607 } 608 } 609 610 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 611 srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring; 612 ret = ath11k_dp_srng_setup(ar->ab, 613 srng, 614 HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id + i, 615 DP_RXDMA_MON_STATUS_RING_SIZE); 616 if (ret) { 617 ath11k_warn(ar->ab, 618 "failed to setup rx_mon_status_refill_ring %d\n", i); 619 return ret; 620 } 621 } 622 623 /* if rxdma1_enable is false, then it doesn't need 624 * to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring 625 * and rxdma_mon_desc_ring. 626 * init reap timer for QCA6390. 627 */ 628 if (!ar->ab->hw_params.rxdma1_enable) { 629 //init mon status buffer reap timer 630 timer_setup(&ar->ab->mon_reap_timer, 631 ath11k_dp_service_mon_ring, 0); 632 return 0; 633 } 634 635 ret = ath11k_dp_srng_setup(ar->ab, 636 &dp->rxdma_mon_buf_ring.refill_buf_ring, 637 HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id, 638 DP_RXDMA_MONITOR_BUF_RING_SIZE); 639 if (ret) { 640 ath11k_warn(ar->ab, 641 "failed to setup HAL_RXDMA_MONITOR_BUF\n"); 642 return ret; 643 } 644 645 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring, 646 HAL_RXDMA_MONITOR_DST, 0, dp->mac_id, 647 DP_RXDMA_MONITOR_DST_RING_SIZE); 648 if (ret) { 649 ath11k_warn(ar->ab, 650 "failed to setup HAL_RXDMA_MONITOR_DST\n"); 651 return ret; 652 } 653 654 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring, 655 HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id, 656 DP_RXDMA_MONITOR_DESC_RING_SIZE); 657 if (ret) { 658 ath11k_warn(ar->ab, 659 "failed to setup HAL_RXDMA_MONITOR_DESC\n"); 660 return ret; 661 } 662 663 return 0; 664 } 665 666 void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab) 667 { 668 struct ath11k_dp *dp = &ab->dp; 669 struct dp_reo_cmd *cmd, *tmp; 670 struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache; 671 struct dp_rx_tid *rx_tid; 672 673 spin_lock_bh(&dp->reo_cmd_lock); 674 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 675 list_del(&cmd->list); 676 rx_tid = &cmd->data; 677 if (rx_tid->vaddr) { 678 dma_unmap_single(ab->dev, rx_tid->paddr, 679 rx_tid->size, DMA_BIDIRECTIONAL); 680 kfree(rx_tid->vaddr); 681 rx_tid->vaddr = NULL; 682 } 683 kfree(cmd); 684 } 685 686 list_for_each_entry_safe(cmd_cache, tmp_cache, 687 &dp->reo_cmd_cache_flush_list, list) { 688 list_del(&cmd_cache->list); 689 dp->reo_cmd_cache_flush_count--; 690 rx_tid = &cmd_cache->data; 691 if (rx_tid->vaddr) { 692 dma_unmap_single(ab->dev, rx_tid->paddr, 693 rx_tid->size, DMA_BIDIRECTIONAL); 694 kfree(rx_tid->vaddr); 695 rx_tid->vaddr = NULL; 696 } 697 kfree(cmd_cache); 698 } 699 spin_unlock_bh(&dp->reo_cmd_lock); 700 } 701 702 static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx, 703 enum hal_reo_cmd_status status) 704 { 705 struct dp_rx_tid *rx_tid = ctx; 706 707 if (status != HAL_REO_CMD_SUCCESS) 708 ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n", 709 rx_tid->tid, status); 710 if (rx_tid->vaddr) { 711 dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size, 712 DMA_BIDIRECTIONAL); 713 kfree(rx_tid->vaddr); 714 rx_tid->vaddr = NULL; 715 } 716 } 717 718 static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab, 719 struct dp_rx_tid *rx_tid) 720 { 721 struct ath11k_hal_reo_cmd cmd = {0}; 722 unsigned long tot_desc_sz, desc_sz; 723 int ret; 724 725 tot_desc_sz = rx_tid->size; 726 desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID); 727 728 while (tot_desc_sz > desc_sz) { 729 tot_desc_sz -= desc_sz; 730 cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz); 731 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 732 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, 733 HAL_REO_CMD_FLUSH_CACHE, &cmd, 734 NULL); 735 if (ret) 736 ath11k_warn(ab, 737 "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n", 738 rx_tid->tid, ret); 739 } 740 741 memset(&cmd, 0, sizeof(cmd)); 742 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 743 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 744 cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS; 745 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, 746 HAL_REO_CMD_FLUSH_CACHE, 747 &cmd, ath11k_dp_reo_cmd_free); 748 if (ret) { 749 ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n", 750 rx_tid->tid, ret); 751 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 752 DMA_BIDIRECTIONAL); 753 kfree(rx_tid->vaddr); 754 rx_tid->vaddr = NULL; 755 } 756 } 757 758 static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx, 759 enum hal_reo_cmd_status status) 760 { 761 struct ath11k_base *ab = dp->ab; 762 struct dp_rx_tid *rx_tid = ctx; 763 struct dp_reo_cache_flush_elem *elem, *tmp; 764 765 if (status == HAL_REO_CMD_DRAIN) { 766 goto free_desc; 767 } else if (status != HAL_REO_CMD_SUCCESS) { 768 /* Shouldn't happen! Cleanup in case of other failure? */ 769 ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n", 770 rx_tid->tid, status); 771 return; 772 } 773 774 elem = kzalloc(sizeof(*elem), GFP_ATOMIC); 775 if (!elem) 776 goto free_desc; 777 778 elem->ts = jiffies; 779 memcpy(&elem->data, rx_tid, sizeof(*rx_tid)); 780 781 spin_lock_bh(&dp->reo_cmd_lock); 782 list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list); 783 dp->reo_cmd_cache_flush_count++; 784 785 /* Flush and invalidate aged REO desc from HW cache */ 786 list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list, 787 list) { 788 if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD || 789 time_after(jiffies, elem->ts + 790 msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) { 791 list_del(&elem->list); 792 dp->reo_cmd_cache_flush_count--; 793 spin_unlock_bh(&dp->reo_cmd_lock); 794 795 ath11k_dp_reo_cache_flush(ab, &elem->data); 796 kfree(elem); 797 spin_lock_bh(&dp->reo_cmd_lock); 798 } 799 } 800 spin_unlock_bh(&dp->reo_cmd_lock); 801 802 return; 803 free_desc: 804 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 805 DMA_BIDIRECTIONAL); 806 kfree(rx_tid->vaddr); 807 rx_tid->vaddr = NULL; 808 } 809 810 void ath11k_peer_rx_tid_delete(struct ath11k *ar, 811 struct ath11k_peer *peer, u8 tid) 812 { 813 struct ath11k_hal_reo_cmd cmd = {0}; 814 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 815 int ret; 816 817 if (!rx_tid->active) 818 return; 819 820 rx_tid->active = false; 821 822 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 823 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 824 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 825 cmd.upd0 |= HAL_REO_CMD_UPD0_VLD; 826 ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid, 827 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 828 ath11k_dp_rx_tid_del_func); 829 if (ret) { 830 if (ret != -ESHUTDOWN) 831 ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n", 832 tid, ret); 833 dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size, 834 DMA_BIDIRECTIONAL); 835 kfree(rx_tid->vaddr); 836 rx_tid->vaddr = NULL; 837 } 838 839 rx_tid->paddr = 0; 840 rx_tid->size = 0; 841 } 842 843 static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab, 844 u32 *link_desc, 845 enum hal_wbm_rel_bm_act action) 846 { 847 struct ath11k_dp *dp = &ab->dp; 848 struct hal_srng *srng; 849 u32 *desc; 850 int ret = 0; 851 852 srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id]; 853 854 spin_lock_bh(&srng->lock); 855 856 ath11k_hal_srng_access_begin(ab, srng); 857 858 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 859 if (!desc) { 860 ret = -ENOBUFS; 861 goto exit; 862 } 863 864 ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc, 865 action); 866 867 exit: 868 ath11k_hal_srng_access_end(ab, srng); 869 870 spin_unlock_bh(&srng->lock); 871 872 return ret; 873 } 874 875 static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_desc) 876 { 877 struct ath11k_base *ab = rx_tid->ab; 878 879 lockdep_assert_held(&ab->base_lock); 880 881 if (rx_tid->dst_ring_desc) { 882 if (rel_link_desc) 883 ath11k_dp_rx_link_desc_return(ab, (u32 *)rx_tid->dst_ring_desc, 884 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 885 kfree(rx_tid->dst_ring_desc); 886 rx_tid->dst_ring_desc = NULL; 887 } 888 889 rx_tid->cur_sn = 0; 890 rx_tid->last_frag_no = 0; 891 rx_tid->rx_frag_bitmap = 0; 892 __skb_queue_purge(&rx_tid->rx_frags); 893 } 894 895 void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer) 896 { 897 struct dp_rx_tid *rx_tid; 898 int i; 899 900 lockdep_assert_held(&ar->ab->base_lock); 901 902 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 903 rx_tid = &peer->rx_tid[i]; 904 905 spin_unlock_bh(&ar->ab->base_lock); 906 del_timer_sync(&rx_tid->frag_timer); 907 spin_lock_bh(&ar->ab->base_lock); 908 909 ath11k_dp_rx_frags_cleanup(rx_tid, true); 910 } 911 } 912 913 void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer) 914 { 915 struct dp_rx_tid *rx_tid; 916 int i; 917 918 lockdep_assert_held(&ar->ab->base_lock); 919 920 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 921 rx_tid = &peer->rx_tid[i]; 922 923 ath11k_peer_rx_tid_delete(ar, peer, i); 924 ath11k_dp_rx_frags_cleanup(rx_tid, true); 925 926 spin_unlock_bh(&ar->ab->base_lock); 927 del_timer_sync(&rx_tid->frag_timer); 928 spin_lock_bh(&ar->ab->base_lock); 929 } 930 } 931 932 static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar, 933 struct ath11k_peer *peer, 934 struct dp_rx_tid *rx_tid, 935 u32 ba_win_sz, u16 ssn, 936 bool update_ssn) 937 { 938 struct ath11k_hal_reo_cmd cmd = {0}; 939 int ret; 940 941 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 942 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 943 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 944 cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE; 945 cmd.ba_window_size = ba_win_sz; 946 947 if (update_ssn) { 948 cmd.upd0 |= HAL_REO_CMD_UPD0_SSN; 949 cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn); 950 } 951 952 ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid, 953 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 954 NULL); 955 if (ret) { 956 ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n", 957 rx_tid->tid, ret); 958 return ret; 959 } 960 961 rx_tid->ba_win_sz = ba_win_sz; 962 963 return 0; 964 } 965 966 static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab, 967 const u8 *peer_mac, int vdev_id, u8 tid) 968 { 969 struct ath11k_peer *peer; 970 struct dp_rx_tid *rx_tid; 971 972 spin_lock_bh(&ab->base_lock); 973 974 peer = ath11k_peer_find(ab, vdev_id, peer_mac); 975 if (!peer) { 976 ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n"); 977 goto unlock_exit; 978 } 979 980 rx_tid = &peer->rx_tid[tid]; 981 if (!rx_tid->active) 982 goto unlock_exit; 983 984 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 985 DMA_BIDIRECTIONAL); 986 kfree(rx_tid->vaddr); 987 rx_tid->vaddr = NULL; 988 989 rx_tid->active = false; 990 991 unlock_exit: 992 spin_unlock_bh(&ab->base_lock); 993 } 994 995 int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id, 996 u8 tid, u32 ba_win_sz, u16 ssn, 997 enum hal_pn_type pn_type) 998 { 999 struct ath11k_base *ab = ar->ab; 1000 struct ath11k_peer *peer; 1001 struct dp_rx_tid *rx_tid; 1002 u32 hw_desc_sz; 1003 u32 *addr_aligned; 1004 void *vaddr; 1005 dma_addr_t paddr; 1006 int ret; 1007 1008 spin_lock_bh(&ab->base_lock); 1009 1010 peer = ath11k_peer_find(ab, vdev_id, peer_mac); 1011 if (!peer) { 1012 ath11k_warn(ab, "failed to find the peer %pM to set up rx tid\n", 1013 peer_mac); 1014 spin_unlock_bh(&ab->base_lock); 1015 return -ENOENT; 1016 } 1017 1018 rx_tid = &peer->rx_tid[tid]; 1019 /* Update the tid queue if it is already setup */ 1020 if (rx_tid->active) { 1021 paddr = rx_tid->paddr; 1022 ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid, 1023 ba_win_sz, ssn, true); 1024 spin_unlock_bh(&ab->base_lock); 1025 if (ret) { 1026 ath11k_warn(ab, "failed to update reo for peer %pM rx tid %d\n: %d", 1027 peer_mac, tid, ret); 1028 return ret; 1029 } 1030 1031 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, 1032 peer_mac, paddr, 1033 tid, 1, ba_win_sz); 1034 if (ret) 1035 ath11k_warn(ab, "failed to send wmi rx reorder queue for peer %pM tid %d: %d\n", 1036 peer_mac, tid, ret); 1037 return ret; 1038 } 1039 1040 rx_tid->tid = tid; 1041 1042 rx_tid->ba_win_sz = ba_win_sz; 1043 1044 /* TODO: Optimize the memory allocation for qos tid based on 1045 * the actual BA window size in REO tid update path. 1046 */ 1047 if (tid == HAL_DESC_REO_NON_QOS_TID) 1048 hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid); 1049 else 1050 hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid); 1051 1052 vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC); 1053 if (!vaddr) { 1054 spin_unlock_bh(&ab->base_lock); 1055 return -ENOMEM; 1056 } 1057 1058 addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN); 1059 1060 ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz, 1061 ssn, pn_type); 1062 1063 paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz, 1064 DMA_BIDIRECTIONAL); 1065 1066 ret = dma_mapping_error(ab->dev, paddr); 1067 if (ret) { 1068 spin_unlock_bh(&ab->base_lock); 1069 ath11k_warn(ab, "failed to setup dma map for peer %pM rx tid %d: %d\n", 1070 peer_mac, tid, ret); 1071 goto err_mem_free; 1072 } 1073 1074 rx_tid->vaddr = vaddr; 1075 rx_tid->paddr = paddr; 1076 rx_tid->size = hw_desc_sz; 1077 rx_tid->active = true; 1078 1079 spin_unlock_bh(&ab->base_lock); 1080 1081 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, 1082 paddr, tid, 1, ba_win_sz); 1083 if (ret) { 1084 ath11k_warn(ar->ab, "failed to setup rx reorder queue for peer %pM tid %d: %d\n", 1085 peer_mac, tid, ret); 1086 ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid); 1087 } 1088 1089 return ret; 1090 1091 err_mem_free: 1092 kfree(rx_tid->vaddr); 1093 rx_tid->vaddr = NULL; 1094 1095 return ret; 1096 } 1097 1098 int ath11k_dp_rx_ampdu_start(struct ath11k *ar, 1099 struct ieee80211_ampdu_params *params) 1100 { 1101 struct ath11k_base *ab = ar->ab; 1102 struct ath11k_sta *arsta = (void *)params->sta->drv_priv; 1103 int vdev_id = arsta->arvif->vdev_id; 1104 int ret; 1105 1106 ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id, 1107 params->tid, params->buf_size, 1108 params->ssn, arsta->pn_type); 1109 if (ret) 1110 ath11k_warn(ab, "failed to setup rx tid %d\n", ret); 1111 1112 return ret; 1113 } 1114 1115 int ath11k_dp_rx_ampdu_stop(struct ath11k *ar, 1116 struct ieee80211_ampdu_params *params) 1117 { 1118 struct ath11k_base *ab = ar->ab; 1119 struct ath11k_peer *peer; 1120 struct ath11k_sta *arsta = (void *)params->sta->drv_priv; 1121 int vdev_id = arsta->arvif->vdev_id; 1122 dma_addr_t paddr; 1123 bool active; 1124 int ret; 1125 1126 spin_lock_bh(&ab->base_lock); 1127 1128 peer = ath11k_peer_find(ab, vdev_id, params->sta->addr); 1129 if (!peer) { 1130 ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n"); 1131 spin_unlock_bh(&ab->base_lock); 1132 return -ENOENT; 1133 } 1134 1135 paddr = peer->rx_tid[params->tid].paddr; 1136 active = peer->rx_tid[params->tid].active; 1137 1138 if (!active) { 1139 spin_unlock_bh(&ab->base_lock); 1140 return 0; 1141 } 1142 1143 ret = ath11k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false); 1144 spin_unlock_bh(&ab->base_lock); 1145 if (ret) { 1146 ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n", 1147 params->tid, ret); 1148 return ret; 1149 } 1150 1151 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, 1152 params->sta->addr, paddr, 1153 params->tid, 1, 1); 1154 if (ret) 1155 ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n", 1156 ret); 1157 1158 return ret; 1159 } 1160 1161 int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif, 1162 const u8 *peer_addr, 1163 enum set_key_cmd key_cmd, 1164 struct ieee80211_key_conf *key) 1165 { 1166 struct ath11k *ar = arvif->ar; 1167 struct ath11k_base *ab = ar->ab; 1168 struct ath11k_hal_reo_cmd cmd = {0}; 1169 struct ath11k_peer *peer; 1170 struct dp_rx_tid *rx_tid; 1171 u8 tid; 1172 int ret = 0; 1173 1174 /* NOTE: Enable PN/TSC replay check offload only for unicast frames. 1175 * We use mac80211 PN/TSC replay check functionality for bcast/mcast 1176 * for now. 1177 */ 1178 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) 1179 return 0; 1180 1181 cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS; 1182 cmd.upd0 |= HAL_REO_CMD_UPD0_PN | 1183 HAL_REO_CMD_UPD0_PN_SIZE | 1184 HAL_REO_CMD_UPD0_PN_VALID | 1185 HAL_REO_CMD_UPD0_PN_CHECK | 1186 HAL_REO_CMD_UPD0_SVLD; 1187 1188 switch (key->cipher) { 1189 case WLAN_CIPHER_SUITE_TKIP: 1190 case WLAN_CIPHER_SUITE_CCMP: 1191 case WLAN_CIPHER_SUITE_CCMP_256: 1192 case WLAN_CIPHER_SUITE_GCMP: 1193 case WLAN_CIPHER_SUITE_GCMP_256: 1194 if (key_cmd == SET_KEY) { 1195 cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK; 1196 cmd.pn_size = 48; 1197 } 1198 break; 1199 default: 1200 break; 1201 } 1202 1203 spin_lock_bh(&ab->base_lock); 1204 1205 peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr); 1206 if (!peer) { 1207 ath11k_warn(ab, "failed to find the peer to configure pn replay detection\n"); 1208 spin_unlock_bh(&ab->base_lock); 1209 return -ENOENT; 1210 } 1211 1212 for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) { 1213 rx_tid = &peer->rx_tid[tid]; 1214 if (!rx_tid->active) 1215 continue; 1216 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 1217 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 1218 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, 1219 HAL_REO_CMD_UPDATE_RX_QUEUE, 1220 &cmd, NULL); 1221 if (ret) { 1222 ath11k_warn(ab, "failed to configure rx tid %d queue for pn replay detection %d\n", 1223 tid, ret); 1224 break; 1225 } 1226 } 1227 1228 spin_unlock_bh(&ab->base_lock); 1229 1230 return ret; 1231 } 1232 1233 static inline int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats, 1234 u16 peer_id) 1235 { 1236 int i; 1237 1238 for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) { 1239 if (ppdu_stats->user_stats[i].is_valid_peer_id) { 1240 if (peer_id == ppdu_stats->user_stats[i].peer_id) 1241 return i; 1242 } else { 1243 return i; 1244 } 1245 } 1246 1247 return -EINVAL; 1248 } 1249 1250 static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab, 1251 u16 tag, u16 len, const void *ptr, 1252 void *data) 1253 { 1254 struct htt_ppdu_stats_info *ppdu_info; 1255 struct htt_ppdu_user_stats *user_stats; 1256 int cur_user; 1257 u16 peer_id; 1258 1259 ppdu_info = (struct htt_ppdu_stats_info *)data; 1260 1261 switch (tag) { 1262 case HTT_PPDU_STATS_TAG_COMMON: 1263 if (len < sizeof(struct htt_ppdu_stats_common)) { 1264 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1265 len, tag); 1266 return -EINVAL; 1267 } 1268 memcpy((void *)&ppdu_info->ppdu_stats.common, ptr, 1269 sizeof(struct htt_ppdu_stats_common)); 1270 break; 1271 case HTT_PPDU_STATS_TAG_USR_RATE: 1272 if (len < sizeof(struct htt_ppdu_stats_user_rate)) { 1273 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1274 len, tag); 1275 return -EINVAL; 1276 } 1277 1278 peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id; 1279 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1280 peer_id); 1281 if (cur_user < 0) 1282 return -EINVAL; 1283 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1284 user_stats->peer_id = peer_id; 1285 user_stats->is_valid_peer_id = true; 1286 memcpy((void *)&user_stats->rate, ptr, 1287 sizeof(struct htt_ppdu_stats_user_rate)); 1288 user_stats->tlv_flags |= BIT(tag); 1289 break; 1290 case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON: 1291 if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) { 1292 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1293 len, tag); 1294 return -EINVAL; 1295 } 1296 1297 peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id; 1298 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1299 peer_id); 1300 if (cur_user < 0) 1301 return -EINVAL; 1302 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1303 user_stats->peer_id = peer_id; 1304 user_stats->is_valid_peer_id = true; 1305 memcpy((void *)&user_stats->cmpltn_cmn, ptr, 1306 sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)); 1307 user_stats->tlv_flags |= BIT(tag); 1308 break; 1309 case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS: 1310 if (len < 1311 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) { 1312 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1313 len, tag); 1314 return -EINVAL; 1315 } 1316 1317 peer_id = 1318 ((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id; 1319 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1320 peer_id); 1321 if (cur_user < 0) 1322 return -EINVAL; 1323 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1324 user_stats->peer_id = peer_id; 1325 user_stats->is_valid_peer_id = true; 1326 memcpy((void *)&user_stats->ack_ba, ptr, 1327 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)); 1328 user_stats->tlv_flags |= BIT(tag); 1329 break; 1330 } 1331 return 0; 1332 } 1333 1334 int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len, 1335 int (*iter)(struct ath11k_base *ar, u16 tag, u16 len, 1336 const void *ptr, void *data), 1337 void *data) 1338 { 1339 const struct htt_tlv *tlv; 1340 const void *begin = ptr; 1341 u16 tlv_tag, tlv_len; 1342 int ret = -EINVAL; 1343 1344 while (len > 0) { 1345 if (len < sizeof(*tlv)) { 1346 ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n", 1347 ptr - begin, len, sizeof(*tlv)); 1348 return -EINVAL; 1349 } 1350 tlv = (struct htt_tlv *)ptr; 1351 tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header); 1352 tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header); 1353 ptr += sizeof(*tlv); 1354 len -= sizeof(*tlv); 1355 1356 if (tlv_len > len) { 1357 ath11k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n", 1358 tlv_tag, ptr - begin, len, tlv_len); 1359 return -EINVAL; 1360 } 1361 ret = iter(ab, tlv_tag, tlv_len, ptr, data); 1362 if (ret == -ENOMEM) 1363 return ret; 1364 1365 ptr += tlv_len; 1366 len -= tlv_len; 1367 } 1368 return 0; 1369 } 1370 1371 static void 1372 ath11k_update_per_peer_tx_stats(struct ath11k *ar, 1373 struct htt_ppdu_stats *ppdu_stats, u8 user) 1374 { 1375 struct ath11k_base *ab = ar->ab; 1376 struct ath11k_peer *peer; 1377 struct ieee80211_sta *sta; 1378 struct ath11k_sta *arsta; 1379 struct htt_ppdu_stats_user_rate *user_rate; 1380 struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats; 1381 struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user]; 1382 struct htt_ppdu_stats_common *common = &ppdu_stats->common; 1383 int ret; 1384 u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0; 1385 u32 succ_bytes = 0; 1386 u16 rate = 0, succ_pkts = 0; 1387 u32 tx_duration = 0; 1388 u8 tid = HTT_PPDU_STATS_NON_QOS_TID; 1389 bool is_ampdu = false; 1390 1391 if (!usr_stats) 1392 return; 1393 1394 if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE))) 1395 return; 1396 1397 if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON)) 1398 is_ampdu = 1399 HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags); 1400 1401 if (usr_stats->tlv_flags & 1402 BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) { 1403 succ_bytes = usr_stats->ack_ba.success_bytes; 1404 succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M, 1405 usr_stats->ack_ba.info); 1406 tid = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM, 1407 usr_stats->ack_ba.info); 1408 } 1409 1410 if (common->fes_duration_us) 1411 tx_duration = common->fes_duration_us; 1412 1413 user_rate = &usr_stats->rate; 1414 flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags); 1415 bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2; 1416 nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1; 1417 mcs = HTT_USR_RATE_MCS(user_rate->rate_flags); 1418 sgi = HTT_USR_RATE_GI(user_rate->rate_flags); 1419 dcm = HTT_USR_RATE_DCM(user_rate->rate_flags); 1420 1421 /* Note: If host configured fixed rates and in some other special 1422 * cases, the broadcast/management frames are sent in different rates. 1423 * Firmware rate's control to be skipped for this? 1424 */ 1425 1426 if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) { 1427 ath11k_warn(ab, "Invalid HE mcs %d peer stats", mcs); 1428 return; 1429 } 1430 1431 if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH11K_VHT_MCS_MAX) { 1432 ath11k_warn(ab, "Invalid VHT mcs %d peer stats", mcs); 1433 return; 1434 } 1435 1436 if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH11K_HT_MCS_MAX || nss < 1)) { 1437 ath11k_warn(ab, "Invalid HT mcs %d nss %d peer stats", 1438 mcs, nss); 1439 return; 1440 } 1441 1442 if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) { 1443 ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs, 1444 flags, 1445 &rate_idx, 1446 &rate); 1447 if (ret < 0) 1448 return; 1449 } 1450 1451 rcu_read_lock(); 1452 spin_lock_bh(&ab->base_lock); 1453 peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id); 1454 1455 if (!peer || !peer->sta) { 1456 spin_unlock_bh(&ab->base_lock); 1457 rcu_read_unlock(); 1458 return; 1459 } 1460 1461 sta = peer->sta; 1462 arsta = (struct ath11k_sta *)sta->drv_priv; 1463 1464 memset(&arsta->txrate, 0, sizeof(arsta->txrate)); 1465 1466 switch (flags) { 1467 case WMI_RATE_PREAMBLE_OFDM: 1468 arsta->txrate.legacy = rate; 1469 break; 1470 case WMI_RATE_PREAMBLE_CCK: 1471 arsta->txrate.legacy = rate; 1472 break; 1473 case WMI_RATE_PREAMBLE_HT: 1474 arsta->txrate.mcs = mcs + 8 * (nss - 1); 1475 arsta->txrate.flags = RATE_INFO_FLAGS_MCS; 1476 if (sgi) 1477 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1478 break; 1479 case WMI_RATE_PREAMBLE_VHT: 1480 arsta->txrate.mcs = mcs; 1481 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; 1482 if (sgi) 1483 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1484 break; 1485 case WMI_RATE_PREAMBLE_HE: 1486 arsta->txrate.mcs = mcs; 1487 arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS; 1488 arsta->txrate.he_dcm = dcm; 1489 arsta->txrate.he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi); 1490 arsta->txrate.he_ru_alloc = ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc 1491 ((user_rate->ru_end - 1492 user_rate->ru_start) + 1); 1493 break; 1494 } 1495 1496 arsta->txrate.nss = nss; 1497 1498 arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw); 1499 arsta->tx_duration += tx_duration; 1500 memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info)); 1501 1502 /* PPDU stats reported for mgmt packet doesn't have valid tx bytes. 1503 * So skip peer stats update for mgmt packets. 1504 */ 1505 if (tid < HTT_PPDU_STATS_NON_QOS_TID) { 1506 memset(peer_stats, 0, sizeof(*peer_stats)); 1507 peer_stats->succ_pkts = succ_pkts; 1508 peer_stats->succ_bytes = succ_bytes; 1509 peer_stats->is_ampdu = is_ampdu; 1510 peer_stats->duration = tx_duration; 1511 peer_stats->ba_fails = 1512 HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) + 1513 HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags); 1514 1515 if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) 1516 ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx); 1517 } 1518 1519 spin_unlock_bh(&ab->base_lock); 1520 rcu_read_unlock(); 1521 } 1522 1523 static void ath11k_htt_update_ppdu_stats(struct ath11k *ar, 1524 struct htt_ppdu_stats *ppdu_stats) 1525 { 1526 u8 user; 1527 1528 for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++) 1529 ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user); 1530 } 1531 1532 static 1533 struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar, 1534 u32 ppdu_id) 1535 { 1536 struct htt_ppdu_stats_info *ppdu_info; 1537 1538 lockdep_assert_held(&ar->data_lock); 1539 1540 if (!list_empty(&ar->ppdu_stats_info)) { 1541 list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) { 1542 if (ppdu_info->ppdu_id == ppdu_id) 1543 return ppdu_info; 1544 } 1545 1546 if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) { 1547 ppdu_info = list_first_entry(&ar->ppdu_stats_info, 1548 typeof(*ppdu_info), list); 1549 list_del(&ppdu_info->list); 1550 ar->ppdu_stat_list_depth--; 1551 ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats); 1552 kfree(ppdu_info); 1553 } 1554 } 1555 1556 ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC); 1557 if (!ppdu_info) 1558 return NULL; 1559 1560 list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info); 1561 ar->ppdu_stat_list_depth++; 1562 1563 return ppdu_info; 1564 } 1565 1566 static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab, 1567 struct sk_buff *skb) 1568 { 1569 struct ath11k_htt_ppdu_stats_msg *msg; 1570 struct htt_ppdu_stats_info *ppdu_info; 1571 struct ath11k *ar; 1572 int ret; 1573 u8 pdev_id; 1574 u32 ppdu_id, len; 1575 1576 msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data; 1577 len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info); 1578 pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info); 1579 ppdu_id = msg->ppdu_id; 1580 1581 rcu_read_lock(); 1582 ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); 1583 if (!ar) { 1584 ret = -EINVAL; 1585 goto out; 1586 } 1587 1588 if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) 1589 trace_ath11k_htt_ppdu_stats(ar, skb->data, len); 1590 1591 spin_lock_bh(&ar->data_lock); 1592 ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id); 1593 if (!ppdu_info) { 1594 ret = -EINVAL; 1595 goto out_unlock_data; 1596 } 1597 1598 ppdu_info->ppdu_id = ppdu_id; 1599 ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len, 1600 ath11k_htt_tlv_ppdu_stats_parse, 1601 (void *)ppdu_info); 1602 if (ret) { 1603 ath11k_warn(ab, "Failed to parse tlv %d\n", ret); 1604 goto out_unlock_data; 1605 } 1606 1607 out_unlock_data: 1608 spin_unlock_bh(&ar->data_lock); 1609 1610 out: 1611 rcu_read_unlock(); 1612 1613 return ret; 1614 } 1615 1616 static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb) 1617 { 1618 struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data; 1619 struct ath_pktlog_hdr *hdr = (struct ath_pktlog_hdr *)data; 1620 struct ath11k *ar; 1621 u8 pdev_id; 1622 1623 pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr); 1624 ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); 1625 if (!ar) { 1626 ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id); 1627 return; 1628 } 1629 1630 trace_ath11k_htt_pktlog(ar, data->payload, hdr->size, 1631 ar->ab->pktlog_defs_checksum); 1632 } 1633 1634 static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab, 1635 struct sk_buff *skb) 1636 { 1637 u32 *data = (u32 *)skb->data; 1638 u8 pdev_id, ring_type, ring_id, pdev_idx; 1639 u16 hp, tp; 1640 u32 backpressure_time; 1641 struct ath11k_bp_stats *bp_stats; 1642 1643 pdev_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_PDEV_ID_M, *data); 1644 ring_type = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_TYPE_M, *data); 1645 ring_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_ID_M, *data); 1646 ++data; 1647 1648 hp = FIELD_GET(HTT_BACKPRESSURE_EVENT_HP_M, *data); 1649 tp = FIELD_GET(HTT_BACKPRESSURE_EVENT_TP_M, *data); 1650 ++data; 1651 1652 backpressure_time = *data; 1653 1654 ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n", 1655 pdev_id, ring_type, ring_id, hp, tp, backpressure_time); 1656 1657 if (ring_type == HTT_BACKPRESSURE_UMAC_RING_TYPE) { 1658 if (ring_id >= HTT_SW_UMAC_RING_IDX_MAX) 1659 return; 1660 1661 bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[ring_id]; 1662 } else if (ring_type == HTT_BACKPRESSURE_LMAC_RING_TYPE) { 1663 pdev_idx = DP_HW2SW_MACID(pdev_id); 1664 1665 if (ring_id >= HTT_SW_LMAC_RING_IDX_MAX || pdev_idx >= MAX_RADIOS) 1666 return; 1667 1668 bp_stats = &ab->soc_stats.bp_stats.lmac_ring_bp_stats[ring_id][pdev_idx]; 1669 } else { 1670 ath11k_warn(ab, "unknown ring type received in htt bp event %d\n", 1671 ring_type); 1672 return; 1673 } 1674 1675 spin_lock_bh(&ab->base_lock); 1676 bp_stats->hp = hp; 1677 bp_stats->tp = tp; 1678 bp_stats->count++; 1679 bp_stats->jiffies = jiffies; 1680 spin_unlock_bh(&ab->base_lock); 1681 } 1682 1683 void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab, 1684 struct sk_buff *skb) 1685 { 1686 struct ath11k_dp *dp = &ab->dp; 1687 struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data; 1688 enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp); 1689 u16 peer_id; 1690 u8 vdev_id; 1691 u8 mac_addr[ETH_ALEN]; 1692 u16 peer_mac_h16; 1693 u16 ast_hash; 1694 u16 hw_peer_id; 1695 1696 ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type); 1697 1698 switch (type) { 1699 case HTT_T2H_MSG_TYPE_VERSION_CONF: 1700 dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR, 1701 resp->version_msg.version); 1702 dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR, 1703 resp->version_msg.version); 1704 complete(&dp->htt_tgt_version_received); 1705 break; 1706 case HTT_T2H_MSG_TYPE_PEER_MAP: 1707 vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID, 1708 resp->peer_map_ev.info); 1709 peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID, 1710 resp->peer_map_ev.info); 1711 peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16, 1712 resp->peer_map_ev.info1); 1713 ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32, 1714 peer_mac_h16, mac_addr); 1715 ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0); 1716 break; 1717 case HTT_T2H_MSG_TYPE_PEER_MAP2: 1718 vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID, 1719 resp->peer_map_ev.info); 1720 peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID, 1721 resp->peer_map_ev.info); 1722 peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16, 1723 resp->peer_map_ev.info1); 1724 ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32, 1725 peer_mac_h16, mac_addr); 1726 ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL, 1727 resp->peer_map_ev.info2); 1728 hw_peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID, 1729 resp->peer_map_ev.info1); 1730 ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash, 1731 hw_peer_id); 1732 break; 1733 case HTT_T2H_MSG_TYPE_PEER_UNMAP: 1734 case HTT_T2H_MSG_TYPE_PEER_UNMAP2: 1735 peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID, 1736 resp->peer_unmap_ev.info); 1737 ath11k_peer_unmap_event(ab, peer_id); 1738 break; 1739 case HTT_T2H_MSG_TYPE_PPDU_STATS_IND: 1740 ath11k_htt_pull_ppdu_stats(ab, skb); 1741 break; 1742 case HTT_T2H_MSG_TYPE_EXT_STATS_CONF: 1743 ath11k_debugfs_htt_ext_stats_handler(ab, skb); 1744 break; 1745 case HTT_T2H_MSG_TYPE_PKTLOG: 1746 ath11k_htt_pktlog(ab, skb); 1747 break; 1748 case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND: 1749 ath11k_htt_backpressure_event_handler(ab, skb); 1750 break; 1751 default: 1752 ath11k_warn(ab, "htt event %d not handled\n", type); 1753 break; 1754 } 1755 1756 dev_kfree_skb_any(skb); 1757 } 1758 1759 static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar, 1760 struct sk_buff_head *msdu_list, 1761 struct sk_buff *first, struct sk_buff *last, 1762 u8 l3pad_bytes, int msdu_len) 1763 { 1764 struct ath11k_base *ab = ar->ab; 1765 struct sk_buff *skb; 1766 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first); 1767 int buf_first_hdr_len, buf_first_len; 1768 struct hal_rx_desc *ldesc; 1769 int space_extra, rem_len, buf_len; 1770 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 1771 1772 /* As the msdu is spread across multiple rx buffers, 1773 * find the offset to the start of msdu for computing 1774 * the length of the msdu in the first buffer. 1775 */ 1776 buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes; 1777 buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len; 1778 1779 if (WARN_ON_ONCE(msdu_len <= buf_first_len)) { 1780 skb_put(first, buf_first_hdr_len + msdu_len); 1781 skb_pull(first, buf_first_hdr_len); 1782 return 0; 1783 } 1784 1785 ldesc = (struct hal_rx_desc *)last->data; 1786 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ab, ldesc); 1787 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ab, ldesc); 1788 1789 /* MSDU spans over multiple buffers because the length of the MSDU 1790 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data 1791 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. 1792 */ 1793 skb_put(first, DP_RX_BUFFER_SIZE); 1794 skb_pull(first, buf_first_hdr_len); 1795 1796 /* When an MSDU spread over multiple buffers attention, MSDU_END and 1797 * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs. 1798 */ 1799 ath11k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc); 1800 1801 space_extra = msdu_len - (buf_first_len + skb_tailroom(first)); 1802 if (space_extra > 0 && 1803 (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) { 1804 /* Free up all buffers of the MSDU */ 1805 while ((skb = __skb_dequeue(msdu_list)) != NULL) { 1806 rxcb = ATH11K_SKB_RXCB(skb); 1807 if (!rxcb->is_continuation) { 1808 dev_kfree_skb_any(skb); 1809 break; 1810 } 1811 dev_kfree_skb_any(skb); 1812 } 1813 return -ENOMEM; 1814 } 1815 1816 rem_len = msdu_len - buf_first_len; 1817 while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) { 1818 rxcb = ATH11K_SKB_RXCB(skb); 1819 if (rxcb->is_continuation) 1820 buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz; 1821 else 1822 buf_len = rem_len; 1823 1824 if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) { 1825 WARN_ON_ONCE(1); 1826 dev_kfree_skb_any(skb); 1827 return -EINVAL; 1828 } 1829 1830 skb_put(skb, buf_len + hal_rx_desc_sz); 1831 skb_pull(skb, hal_rx_desc_sz); 1832 skb_copy_from_linear_data(skb, skb_put(first, buf_len), 1833 buf_len); 1834 dev_kfree_skb_any(skb); 1835 1836 rem_len -= buf_len; 1837 if (!rxcb->is_continuation) 1838 break; 1839 } 1840 1841 return 0; 1842 } 1843 1844 static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list, 1845 struct sk_buff *first) 1846 { 1847 struct sk_buff *skb; 1848 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first); 1849 1850 if (!rxcb->is_continuation) 1851 return first; 1852 1853 skb_queue_walk(msdu_list, skb) { 1854 rxcb = ATH11K_SKB_RXCB(skb); 1855 if (!rxcb->is_continuation) 1856 return skb; 1857 } 1858 1859 return NULL; 1860 } 1861 1862 static void ath11k_dp_rx_h_csum_offload(struct ath11k *ar, struct sk_buff *msdu) 1863 { 1864 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 1865 struct rx_attention *rx_attention; 1866 bool ip_csum_fail, l4_csum_fail; 1867 1868 rx_attention = ath11k_dp_rx_get_attention(ar->ab, rxcb->rx_desc); 1869 ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rx_attention); 1870 l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rx_attention); 1871 1872 msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ? 1873 CHECKSUM_NONE : CHECKSUM_UNNECESSARY; 1874 } 1875 1876 static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, 1877 enum hal_encrypt_type enctype) 1878 { 1879 switch (enctype) { 1880 case HAL_ENCRYPT_TYPE_OPEN: 1881 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1882 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1883 return 0; 1884 case HAL_ENCRYPT_TYPE_CCMP_128: 1885 return IEEE80211_CCMP_MIC_LEN; 1886 case HAL_ENCRYPT_TYPE_CCMP_256: 1887 return IEEE80211_CCMP_256_MIC_LEN; 1888 case HAL_ENCRYPT_TYPE_GCMP_128: 1889 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1890 return IEEE80211_GCMP_MIC_LEN; 1891 case HAL_ENCRYPT_TYPE_WEP_40: 1892 case HAL_ENCRYPT_TYPE_WEP_104: 1893 case HAL_ENCRYPT_TYPE_WEP_128: 1894 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1895 case HAL_ENCRYPT_TYPE_WAPI: 1896 break; 1897 } 1898 1899 ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype); 1900 return 0; 1901 } 1902 1903 static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar, 1904 enum hal_encrypt_type enctype) 1905 { 1906 switch (enctype) { 1907 case HAL_ENCRYPT_TYPE_OPEN: 1908 return 0; 1909 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1910 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1911 return IEEE80211_TKIP_IV_LEN; 1912 case HAL_ENCRYPT_TYPE_CCMP_128: 1913 return IEEE80211_CCMP_HDR_LEN; 1914 case HAL_ENCRYPT_TYPE_CCMP_256: 1915 return IEEE80211_CCMP_256_HDR_LEN; 1916 case HAL_ENCRYPT_TYPE_GCMP_128: 1917 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1918 return IEEE80211_GCMP_HDR_LEN; 1919 case HAL_ENCRYPT_TYPE_WEP_40: 1920 case HAL_ENCRYPT_TYPE_WEP_104: 1921 case HAL_ENCRYPT_TYPE_WEP_128: 1922 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1923 case HAL_ENCRYPT_TYPE_WAPI: 1924 break; 1925 } 1926 1927 ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 1928 return 0; 1929 } 1930 1931 static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar, 1932 enum hal_encrypt_type enctype) 1933 { 1934 switch (enctype) { 1935 case HAL_ENCRYPT_TYPE_OPEN: 1936 case HAL_ENCRYPT_TYPE_CCMP_128: 1937 case HAL_ENCRYPT_TYPE_CCMP_256: 1938 case HAL_ENCRYPT_TYPE_GCMP_128: 1939 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1940 return 0; 1941 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1942 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1943 return IEEE80211_TKIP_ICV_LEN; 1944 case HAL_ENCRYPT_TYPE_WEP_40: 1945 case HAL_ENCRYPT_TYPE_WEP_104: 1946 case HAL_ENCRYPT_TYPE_WEP_128: 1947 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1948 case HAL_ENCRYPT_TYPE_WAPI: 1949 break; 1950 } 1951 1952 ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 1953 return 0; 1954 } 1955 1956 static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar, 1957 struct sk_buff *msdu, 1958 u8 *first_hdr, 1959 enum hal_encrypt_type enctype, 1960 struct ieee80211_rx_status *status) 1961 { 1962 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 1963 u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN]; 1964 struct ieee80211_hdr *hdr; 1965 size_t hdr_len; 1966 u8 da[ETH_ALEN]; 1967 u8 sa[ETH_ALEN]; 1968 u16 qos_ctl = 0; 1969 u8 *qos; 1970 1971 /* copy SA & DA and pull decapped header */ 1972 hdr = (struct ieee80211_hdr *)msdu->data; 1973 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1974 ether_addr_copy(da, ieee80211_get_DA(hdr)); 1975 ether_addr_copy(sa, ieee80211_get_SA(hdr)); 1976 skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control)); 1977 1978 if (rxcb->is_first_msdu) { 1979 /* original 802.11 header is valid for the first msdu 1980 * hence we can reuse the same header 1981 */ 1982 hdr = (struct ieee80211_hdr *)first_hdr; 1983 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1984 1985 /* Each A-MSDU subframe will be reported as a separate MSDU, 1986 * so strip the A-MSDU bit from QoS Ctl. 1987 */ 1988 if (ieee80211_is_data_qos(hdr->frame_control)) { 1989 qos = ieee80211_get_qos_ctl(hdr); 1990 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; 1991 } 1992 } else { 1993 /* Rebuild qos header if this is a middle/last msdu */ 1994 hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 1995 1996 /* Reset the order bit as the HT_Control header is stripped */ 1997 hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER)); 1998 1999 qos_ctl = rxcb->tid; 2000 2001 if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(ar->ab, rxcb->rx_desc)) 2002 qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT; 2003 2004 /* TODO Add other QoS ctl fields when required */ 2005 2006 /* copy decap header before overwriting for reuse below */ 2007 memcpy(decap_hdr, (uint8_t *)hdr, hdr_len); 2008 } 2009 2010 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 2011 memcpy(skb_push(msdu, 2012 ath11k_dp_rx_crypto_param_len(ar, enctype)), 2013 (void *)hdr + hdr_len, 2014 ath11k_dp_rx_crypto_param_len(ar, enctype)); 2015 } 2016 2017 if (!rxcb->is_first_msdu) { 2018 memcpy(skb_push(msdu, 2019 IEEE80211_QOS_CTL_LEN), &qos_ctl, 2020 IEEE80211_QOS_CTL_LEN); 2021 memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len); 2022 return; 2023 } 2024 2025 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 2026 2027 /* original 802.11 header has a different DA and in 2028 * case of 4addr it may also have different SA 2029 */ 2030 hdr = (struct ieee80211_hdr *)msdu->data; 2031 ether_addr_copy(ieee80211_get_DA(hdr), da); 2032 ether_addr_copy(ieee80211_get_SA(hdr), sa); 2033 } 2034 2035 static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu, 2036 enum hal_encrypt_type enctype, 2037 struct ieee80211_rx_status *status, 2038 bool decrypted) 2039 { 2040 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 2041 struct ieee80211_hdr *hdr; 2042 size_t hdr_len; 2043 size_t crypto_len; 2044 2045 if (!rxcb->is_first_msdu || 2046 !(rxcb->is_first_msdu && rxcb->is_last_msdu)) { 2047 WARN_ON_ONCE(1); 2048 return; 2049 } 2050 2051 skb_trim(msdu, msdu->len - FCS_LEN); 2052 2053 if (!decrypted) 2054 return; 2055 2056 hdr = (void *)msdu->data; 2057 2058 /* Tail */ 2059 if (status->flag & RX_FLAG_IV_STRIPPED) { 2060 skb_trim(msdu, msdu->len - 2061 ath11k_dp_rx_crypto_mic_len(ar, enctype)); 2062 2063 skb_trim(msdu, msdu->len - 2064 ath11k_dp_rx_crypto_icv_len(ar, enctype)); 2065 } else { 2066 /* MIC */ 2067 if (status->flag & RX_FLAG_MIC_STRIPPED) 2068 skb_trim(msdu, msdu->len - 2069 ath11k_dp_rx_crypto_mic_len(ar, enctype)); 2070 2071 /* ICV */ 2072 if (status->flag & RX_FLAG_ICV_STRIPPED) 2073 skb_trim(msdu, msdu->len - 2074 ath11k_dp_rx_crypto_icv_len(ar, enctype)); 2075 } 2076 2077 /* MMIC */ 2078 if ((status->flag & RX_FLAG_MMIC_STRIPPED) && 2079 !ieee80211_has_morefrags(hdr->frame_control) && 2080 enctype == HAL_ENCRYPT_TYPE_TKIP_MIC) 2081 skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN); 2082 2083 /* Head */ 2084 if (status->flag & RX_FLAG_IV_STRIPPED) { 2085 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2086 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 2087 2088 memmove((void *)msdu->data + crypto_len, 2089 (void *)msdu->data, hdr_len); 2090 skb_pull(msdu, crypto_len); 2091 } 2092 } 2093 2094 static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar, 2095 struct sk_buff *msdu, 2096 enum hal_encrypt_type enctype) 2097 { 2098 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 2099 struct ieee80211_hdr *hdr; 2100 size_t hdr_len, crypto_len; 2101 void *rfc1042; 2102 bool is_amsdu; 2103 2104 is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu); 2105 hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(ar->ab, rxcb->rx_desc); 2106 rfc1042 = hdr; 2107 2108 if (rxcb->is_first_msdu) { 2109 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2110 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 2111 2112 rfc1042 += hdr_len + crypto_len; 2113 } 2114 2115 if (is_amsdu) 2116 rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr); 2117 2118 return rfc1042; 2119 } 2120 2121 static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar, 2122 struct sk_buff *msdu, 2123 u8 *first_hdr, 2124 enum hal_encrypt_type enctype, 2125 struct ieee80211_rx_status *status) 2126 { 2127 struct ieee80211_hdr *hdr; 2128 struct ethhdr *eth; 2129 size_t hdr_len; 2130 u8 da[ETH_ALEN]; 2131 u8 sa[ETH_ALEN]; 2132 void *rfc1042; 2133 2134 rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype); 2135 if (WARN_ON_ONCE(!rfc1042)) 2136 return; 2137 2138 /* pull decapped header and copy SA & DA */ 2139 eth = (struct ethhdr *)msdu->data; 2140 ether_addr_copy(da, eth->h_dest); 2141 ether_addr_copy(sa, eth->h_source); 2142 skb_pull(msdu, sizeof(struct ethhdr)); 2143 2144 /* push rfc1042/llc/snap */ 2145 memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042, 2146 sizeof(struct ath11k_dp_rfc1042_hdr)); 2147 2148 /* push original 802.11 header */ 2149 hdr = (struct ieee80211_hdr *)first_hdr; 2150 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2151 2152 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 2153 memcpy(skb_push(msdu, 2154 ath11k_dp_rx_crypto_param_len(ar, enctype)), 2155 (void *)hdr + hdr_len, 2156 ath11k_dp_rx_crypto_param_len(ar, enctype)); 2157 } 2158 2159 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 2160 2161 /* original 802.11 header has a different DA and in 2162 * case of 4addr it may also have different SA 2163 */ 2164 hdr = (struct ieee80211_hdr *)msdu->data; 2165 ether_addr_copy(ieee80211_get_DA(hdr), da); 2166 ether_addr_copy(ieee80211_get_SA(hdr), sa); 2167 } 2168 2169 static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu, 2170 struct hal_rx_desc *rx_desc, 2171 enum hal_encrypt_type enctype, 2172 struct ieee80211_rx_status *status, 2173 bool decrypted) 2174 { 2175 u8 *first_hdr; 2176 u8 decap; 2177 struct ethhdr *ehdr; 2178 2179 first_hdr = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc); 2180 decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc); 2181 2182 switch (decap) { 2183 case DP_RX_DECAP_TYPE_NATIVE_WIFI: 2184 ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr, 2185 enctype, status); 2186 break; 2187 case DP_RX_DECAP_TYPE_RAW: 2188 ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status, 2189 decrypted); 2190 break; 2191 case DP_RX_DECAP_TYPE_ETHERNET2_DIX: 2192 ehdr = (struct ethhdr *)msdu->data; 2193 2194 /* mac80211 allows fast path only for authorized STA */ 2195 if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) { 2196 ATH11K_SKB_RXCB(msdu)->is_eapol = true; 2197 ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr, 2198 enctype, status); 2199 break; 2200 } 2201 2202 /* PN for mcast packets will be validated in mac80211; 2203 * remove eth header and add 802.11 header. 2204 */ 2205 if (ATH11K_SKB_RXCB(msdu)->is_mcbc && decrypted) 2206 ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr, 2207 enctype, status); 2208 break; 2209 case DP_RX_DECAP_TYPE_8023: 2210 /* TODO: Handle undecap for these formats */ 2211 break; 2212 } 2213 } 2214 2215 static struct ath11k_peer * 2216 ath11k_dp_rx_h_find_peer(struct ath11k_base *ab, struct sk_buff *msdu) 2217 { 2218 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 2219 struct hal_rx_desc *rx_desc = rxcb->rx_desc; 2220 struct ath11k_peer *peer = NULL; 2221 2222 lockdep_assert_held(&ab->base_lock); 2223 2224 if (rxcb->peer_id) 2225 peer = ath11k_peer_find_by_id(ab, rxcb->peer_id); 2226 2227 if (peer) 2228 return peer; 2229 2230 if (!rx_desc || !(ath11k_dp_rxdesc_mac_addr2_valid(ab, rx_desc))) 2231 return NULL; 2232 2233 peer = ath11k_peer_find_by_addr(ab, 2234 ath11k_dp_rxdesc_mpdu_start_addr2(ab, rx_desc)); 2235 return peer; 2236 } 2237 2238 static void ath11k_dp_rx_h_mpdu(struct ath11k *ar, 2239 struct sk_buff *msdu, 2240 struct hal_rx_desc *rx_desc, 2241 struct ieee80211_rx_status *rx_status) 2242 { 2243 bool fill_crypto_hdr; 2244 enum hal_encrypt_type enctype; 2245 bool is_decrypted = false; 2246 struct ath11k_skb_rxcb *rxcb; 2247 struct ieee80211_hdr *hdr; 2248 struct ath11k_peer *peer; 2249 struct rx_attention *rx_attention; 2250 u32 err_bitmap; 2251 2252 /* PN for multicast packets will be checked in mac80211 */ 2253 rxcb = ATH11K_SKB_RXCB(msdu); 2254 fill_crypto_hdr = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc); 2255 rxcb->is_mcbc = fill_crypto_hdr; 2256 2257 if (rxcb->is_mcbc) { 2258 rxcb->peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc); 2259 rxcb->seq_no = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc); 2260 } 2261 2262 spin_lock_bh(&ar->ab->base_lock); 2263 peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu); 2264 if (peer) { 2265 if (rxcb->is_mcbc) 2266 enctype = peer->sec_type_grp; 2267 else 2268 enctype = peer->sec_type; 2269 } else { 2270 enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc); 2271 } 2272 spin_unlock_bh(&ar->ab->base_lock); 2273 2274 rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc); 2275 err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention); 2276 if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap) 2277 is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention); 2278 2279 /* Clear per-MPDU flags while leaving per-PPDU flags intact */ 2280 rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | 2281 RX_FLAG_MMIC_ERROR | 2282 RX_FLAG_DECRYPTED | 2283 RX_FLAG_IV_STRIPPED | 2284 RX_FLAG_MMIC_STRIPPED); 2285 2286 if (err_bitmap & DP_RX_MPDU_ERR_FCS) 2287 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; 2288 if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC) 2289 rx_status->flag |= RX_FLAG_MMIC_ERROR; 2290 2291 if (is_decrypted) { 2292 rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED; 2293 2294 if (fill_crypto_hdr) 2295 rx_status->flag |= RX_FLAG_MIC_STRIPPED | 2296 RX_FLAG_ICV_STRIPPED; 2297 else 2298 rx_status->flag |= RX_FLAG_IV_STRIPPED | 2299 RX_FLAG_PN_VALIDATED; 2300 } 2301 2302 ath11k_dp_rx_h_csum_offload(ar, msdu); 2303 ath11k_dp_rx_h_undecap(ar, msdu, rx_desc, 2304 enctype, rx_status, is_decrypted); 2305 2306 if (!is_decrypted || fill_crypto_hdr) 2307 return; 2308 2309 if (ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc) != 2310 DP_RX_DECAP_TYPE_ETHERNET2_DIX) { 2311 hdr = (void *)msdu->data; 2312 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 2313 } 2314 } 2315 2316 static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc, 2317 struct ieee80211_rx_status *rx_status) 2318 { 2319 struct ieee80211_supported_band *sband; 2320 enum rx_msdu_start_pkt_type pkt_type; 2321 u8 bw; 2322 u8 rate_mcs, nss; 2323 u8 sgi; 2324 bool is_cck, is_ldpc; 2325 2326 pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(ar->ab, rx_desc); 2327 bw = ath11k_dp_rx_h_msdu_start_rx_bw(ar->ab, rx_desc); 2328 rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(ar->ab, rx_desc); 2329 nss = ath11k_dp_rx_h_msdu_start_nss(ar->ab, rx_desc); 2330 sgi = ath11k_dp_rx_h_msdu_start_sgi(ar->ab, rx_desc); 2331 2332 switch (pkt_type) { 2333 case RX_MSDU_START_PKT_TYPE_11A: 2334 case RX_MSDU_START_PKT_TYPE_11B: 2335 is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B); 2336 sband = &ar->mac.sbands[rx_status->band]; 2337 rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs, 2338 is_cck); 2339 break; 2340 case RX_MSDU_START_PKT_TYPE_11N: 2341 rx_status->encoding = RX_ENC_HT; 2342 if (rate_mcs > ATH11K_HT_MCS_MAX) { 2343 ath11k_warn(ar->ab, 2344 "Received with invalid mcs in HT mode %d\n", 2345 rate_mcs); 2346 break; 2347 } 2348 rx_status->rate_idx = rate_mcs + (8 * (nss - 1)); 2349 if (sgi) 2350 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 2351 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 2352 break; 2353 case RX_MSDU_START_PKT_TYPE_11AC: 2354 rx_status->encoding = RX_ENC_VHT; 2355 rx_status->rate_idx = rate_mcs; 2356 if (rate_mcs > ATH11K_VHT_MCS_MAX) { 2357 ath11k_warn(ar->ab, 2358 "Received with invalid mcs in VHT mode %d\n", 2359 rate_mcs); 2360 break; 2361 } 2362 rx_status->nss = nss; 2363 if (sgi) 2364 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 2365 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 2366 is_ldpc = ath11k_dp_rx_h_msdu_start_ldpc_support(ar->ab, rx_desc); 2367 if (is_ldpc) 2368 rx_status->enc_flags |= RX_ENC_FLAG_LDPC; 2369 break; 2370 case RX_MSDU_START_PKT_TYPE_11AX: 2371 rx_status->rate_idx = rate_mcs; 2372 if (rate_mcs > ATH11K_HE_MCS_MAX) { 2373 ath11k_warn(ar->ab, 2374 "Received with invalid mcs in HE mode %d\n", 2375 rate_mcs); 2376 break; 2377 } 2378 rx_status->encoding = RX_ENC_HE; 2379 rx_status->nss = nss; 2380 rx_status->he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi); 2381 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 2382 break; 2383 } 2384 } 2385 2386 static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc, 2387 struct ieee80211_rx_status *rx_status) 2388 { 2389 u8 channel_num; 2390 u32 center_freq, meta_data; 2391 struct ieee80211_channel *channel; 2392 2393 rx_status->freq = 0; 2394 rx_status->rate_idx = 0; 2395 rx_status->nss = 0; 2396 rx_status->encoding = RX_ENC_LEGACY; 2397 rx_status->bw = RATE_INFO_BW_20; 2398 2399 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 2400 2401 meta_data = ath11k_dp_rx_h_msdu_start_freq(ar->ab, rx_desc); 2402 channel_num = meta_data; 2403 center_freq = meta_data >> 16; 2404 2405 if (center_freq >= ATH11K_MIN_6G_FREQ && 2406 center_freq <= ATH11K_MAX_6G_FREQ) { 2407 rx_status->band = NL80211_BAND_6GHZ; 2408 rx_status->freq = center_freq; 2409 } else if (channel_num >= 1 && channel_num <= 14) { 2410 rx_status->band = NL80211_BAND_2GHZ; 2411 } else if (channel_num >= 36 && channel_num <= 173) { 2412 rx_status->band = NL80211_BAND_5GHZ; 2413 } else { 2414 spin_lock_bh(&ar->data_lock); 2415 channel = ar->rx_channel; 2416 if (channel) { 2417 rx_status->band = channel->band; 2418 channel_num = 2419 ieee80211_frequency_to_channel(channel->center_freq); 2420 } 2421 spin_unlock_bh(&ar->data_lock); 2422 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ", 2423 rx_desc, sizeof(struct hal_rx_desc)); 2424 } 2425 2426 if (rx_status->band != NL80211_BAND_6GHZ) 2427 rx_status->freq = ieee80211_channel_to_frequency(channel_num, 2428 rx_status->band); 2429 2430 ath11k_dp_rx_h_rate(ar, rx_desc, rx_status); 2431 } 2432 2433 static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi, 2434 struct sk_buff *msdu, 2435 struct ieee80211_rx_status *status) 2436 { 2437 static const struct ieee80211_radiotap_he known = { 2438 .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | 2439 IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN), 2440 .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN), 2441 }; 2442 struct ieee80211_rx_status *rx_status; 2443 struct ieee80211_radiotap_he *he = NULL; 2444 struct ieee80211_sta *pubsta = NULL; 2445 struct ath11k_peer *peer; 2446 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 2447 u8 decap = DP_RX_DECAP_TYPE_RAW; 2448 bool is_mcbc = rxcb->is_mcbc; 2449 bool is_eapol = rxcb->is_eapol; 2450 2451 if (status->encoding == RX_ENC_HE && 2452 !(status->flag & RX_FLAG_RADIOTAP_HE) && 2453 !(status->flag & RX_FLAG_SKIP_MONITOR)) { 2454 he = skb_push(msdu, sizeof(known)); 2455 memcpy(he, &known, sizeof(known)); 2456 status->flag |= RX_FLAG_RADIOTAP_HE; 2457 } 2458 2459 if (!(status->flag & RX_FLAG_ONLY_MONITOR)) 2460 decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rxcb->rx_desc); 2461 2462 spin_lock_bh(&ar->ab->base_lock); 2463 peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu); 2464 if (peer && peer->sta) 2465 pubsta = peer->sta; 2466 spin_unlock_bh(&ar->ab->base_lock); 2467 2468 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 2469 "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", 2470 msdu, 2471 msdu->len, 2472 peer ? peer->addr : NULL, 2473 rxcb->tid, 2474 is_mcbc ? "mcast" : "ucast", 2475 rxcb->seq_no, 2476 (status->encoding == RX_ENC_LEGACY) ? "legacy" : "", 2477 (status->encoding == RX_ENC_HT) ? "ht" : "", 2478 (status->encoding == RX_ENC_VHT) ? "vht" : "", 2479 (status->encoding == RX_ENC_HE) ? "he" : "", 2480 (status->bw == RATE_INFO_BW_40) ? "40" : "", 2481 (status->bw == RATE_INFO_BW_80) ? "80" : "", 2482 (status->bw == RATE_INFO_BW_160) ? "160" : "", 2483 status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", 2484 status->rate_idx, 2485 status->nss, 2486 status->freq, 2487 status->band, status->flag, 2488 !!(status->flag & RX_FLAG_FAILED_FCS_CRC), 2489 !!(status->flag & RX_FLAG_MMIC_ERROR), 2490 !!(status->flag & RX_FLAG_AMSDU_MORE)); 2491 2492 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DP_RX, NULL, "dp rx msdu: ", 2493 msdu->data, msdu->len); 2494 2495 rx_status = IEEE80211_SKB_RXCB(msdu); 2496 *rx_status = *status; 2497 2498 /* TODO: trace rx packet */ 2499 2500 /* PN for multicast packets are not validate in HW, 2501 * so skip 802.3 rx path 2502 * Also, fast_rx expects the STA to be authorized, hence 2503 * eapol packets are sent in slow path. 2504 */ 2505 if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol && 2506 !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED)) 2507 rx_status->flag |= RX_FLAG_8023; 2508 2509 ieee80211_rx_napi(ar->hw, pubsta, msdu, napi); 2510 } 2511 2512 static int ath11k_dp_rx_process_msdu(struct ath11k *ar, 2513 struct sk_buff *msdu, 2514 struct sk_buff_head *msdu_list, 2515 struct ieee80211_rx_status *rx_status) 2516 { 2517 struct ath11k_base *ab = ar->ab; 2518 struct hal_rx_desc *rx_desc, *lrx_desc; 2519 struct rx_attention *rx_attention; 2520 struct ath11k_skb_rxcb *rxcb; 2521 struct sk_buff *last_buf; 2522 u8 l3_pad_bytes; 2523 u8 *hdr_status; 2524 u16 msdu_len; 2525 int ret; 2526 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 2527 2528 last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu); 2529 if (!last_buf) { 2530 ath11k_warn(ab, 2531 "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n"); 2532 ret = -EIO; 2533 goto free_out; 2534 } 2535 2536 rx_desc = (struct hal_rx_desc *)msdu->data; 2537 if (ath11k_dp_rx_h_attn_msdu_len_err(ab, rx_desc)) { 2538 ath11k_warn(ar->ab, "msdu len not valid\n"); 2539 ret = -EIO; 2540 goto free_out; 2541 } 2542 2543 lrx_desc = (struct hal_rx_desc *)last_buf->data; 2544 rx_attention = ath11k_dp_rx_get_attention(ab, lrx_desc); 2545 if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) { 2546 ath11k_warn(ab, "msdu_done bit in attention is not set\n"); 2547 ret = -EIO; 2548 goto free_out; 2549 } 2550 2551 rxcb = ATH11K_SKB_RXCB(msdu); 2552 rxcb->rx_desc = rx_desc; 2553 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ab, rx_desc); 2554 l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ab, lrx_desc); 2555 2556 if (rxcb->is_frag) { 2557 skb_pull(msdu, hal_rx_desc_sz); 2558 } else if (!rxcb->is_continuation) { 2559 if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) { 2560 hdr_status = ath11k_dp_rx_h_80211_hdr(ab, rx_desc); 2561 ret = -EINVAL; 2562 ath11k_warn(ab, "invalid msdu len %u\n", msdu_len); 2563 ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", hdr_status, 2564 sizeof(struct ieee80211_hdr)); 2565 ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", rx_desc, 2566 sizeof(struct hal_rx_desc)); 2567 goto free_out; 2568 } 2569 skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len); 2570 skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes); 2571 } else { 2572 ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list, 2573 msdu, last_buf, 2574 l3_pad_bytes, msdu_len); 2575 if (ret) { 2576 ath11k_warn(ab, 2577 "failed to coalesce msdu rx buffer%d\n", ret); 2578 goto free_out; 2579 } 2580 } 2581 2582 ath11k_dp_rx_h_ppdu(ar, rx_desc, rx_status); 2583 ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status); 2584 2585 rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED; 2586 2587 return 0; 2588 2589 free_out: 2590 return ret; 2591 } 2592 2593 static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab, 2594 struct napi_struct *napi, 2595 struct sk_buff_head *msdu_list, 2596 int mac_id) 2597 { 2598 struct sk_buff *msdu; 2599 struct ath11k *ar; 2600 struct ieee80211_rx_status rx_status = {0}; 2601 int ret; 2602 2603 if (skb_queue_empty(msdu_list)) 2604 return; 2605 2606 if (unlikely(!rcu_access_pointer(ab->pdevs_active[mac_id]))) { 2607 __skb_queue_purge(msdu_list); 2608 return; 2609 } 2610 2611 ar = ab->pdevs[mac_id].ar; 2612 if (unlikely(test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags))) { 2613 __skb_queue_purge(msdu_list); 2614 return; 2615 } 2616 2617 while ((msdu = __skb_dequeue(msdu_list))) { 2618 ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status); 2619 if (unlikely(ret)) { 2620 ath11k_dbg(ab, ATH11K_DBG_DATA, 2621 "Unable to process msdu %d", ret); 2622 dev_kfree_skb_any(msdu); 2623 continue; 2624 } 2625 2626 ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status); 2627 } 2628 } 2629 2630 int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id, 2631 struct napi_struct *napi, int budget) 2632 { 2633 struct ath11k_dp *dp = &ab->dp; 2634 struct dp_rxdma_ring *rx_ring; 2635 int num_buffs_reaped[MAX_RADIOS] = {0}; 2636 struct sk_buff_head msdu_list[MAX_RADIOS]; 2637 struct ath11k_skb_rxcb *rxcb; 2638 int total_msdu_reaped = 0; 2639 struct hal_srng *srng; 2640 struct sk_buff *msdu; 2641 bool done = false; 2642 int buf_id, mac_id; 2643 struct ath11k *ar; 2644 struct hal_reo_dest_ring *desc; 2645 enum hal_reo_dest_ring_push_reason push_reason; 2646 u32 cookie; 2647 int i; 2648 2649 for (i = 0; i < MAX_RADIOS; i++) 2650 __skb_queue_head_init(&msdu_list[i]); 2651 2652 srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id]; 2653 2654 spin_lock_bh(&srng->lock); 2655 2656 try_again: 2657 ath11k_hal_srng_access_begin(ab, srng); 2658 2659 while (likely(desc = 2660 (struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab, 2661 srng))) { 2662 cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, 2663 desc->buf_addr_info.info1); 2664 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 2665 cookie); 2666 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie); 2667 2668 if (unlikely(buf_id == 0)) 2669 continue; 2670 2671 ar = ab->pdevs[mac_id].ar; 2672 rx_ring = &ar->dp.rx_refill_buf_ring; 2673 spin_lock_bh(&rx_ring->idr_lock); 2674 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 2675 if (unlikely(!msdu)) { 2676 ath11k_warn(ab, "frame rx with invalid buf_id %d\n", 2677 buf_id); 2678 spin_unlock_bh(&rx_ring->idr_lock); 2679 continue; 2680 } 2681 2682 idr_remove(&rx_ring->bufs_idr, buf_id); 2683 spin_unlock_bh(&rx_ring->idr_lock); 2684 2685 rxcb = ATH11K_SKB_RXCB(msdu); 2686 dma_unmap_single(ab->dev, rxcb->paddr, 2687 msdu->len + skb_tailroom(msdu), 2688 DMA_FROM_DEVICE); 2689 2690 num_buffs_reaped[mac_id]++; 2691 2692 push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON, 2693 desc->info0); 2694 if (unlikely(push_reason != 2695 HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION)) { 2696 dev_kfree_skb_any(msdu); 2697 ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++; 2698 continue; 2699 } 2700 2701 rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 & 2702 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU); 2703 rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 & 2704 RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU); 2705 rxcb->is_continuation = !!(desc->rx_msdu_info.info0 & 2706 RX_MSDU_DESC_INFO0_MSDU_CONTINUATION); 2707 rxcb->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID, 2708 desc->rx_mpdu_info.meta_data); 2709 rxcb->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM, 2710 desc->rx_mpdu_info.info0); 2711 rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM, 2712 desc->info0); 2713 2714 rxcb->mac_id = mac_id; 2715 __skb_queue_tail(&msdu_list[mac_id], msdu); 2716 2717 if (rxcb->is_continuation) { 2718 done = false; 2719 } else { 2720 total_msdu_reaped++; 2721 done = true; 2722 } 2723 2724 if (total_msdu_reaped >= budget) 2725 break; 2726 } 2727 2728 /* Hw might have updated the head pointer after we cached it. 2729 * In this case, even though there are entries in the ring we'll 2730 * get rx_desc NULL. Give the read another try with updated cached 2731 * head pointer so that we can reap complete MPDU in the current 2732 * rx processing. 2733 */ 2734 if (unlikely(!done && ath11k_hal_srng_dst_num_free(ab, srng, true))) { 2735 ath11k_hal_srng_access_end(ab, srng); 2736 goto try_again; 2737 } 2738 2739 ath11k_hal_srng_access_end(ab, srng); 2740 2741 spin_unlock_bh(&srng->lock); 2742 2743 if (unlikely(!total_msdu_reaped)) 2744 goto exit; 2745 2746 for (i = 0; i < ab->num_radios; i++) { 2747 if (!num_buffs_reaped[i]) 2748 continue; 2749 2750 ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list[i], i); 2751 2752 ar = ab->pdevs[i].ar; 2753 rx_ring = &ar->dp.rx_refill_buf_ring; 2754 2755 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i], 2756 ab->hw_params.hal_params->rx_buf_rbm); 2757 } 2758 exit: 2759 return total_msdu_reaped; 2760 } 2761 2762 static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta, 2763 struct hal_rx_mon_ppdu_info *ppdu_info) 2764 { 2765 struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats; 2766 u32 num_msdu; 2767 int i; 2768 2769 if (!rx_stats) 2770 return; 2771 2772 arsta->rssi_comb = ppdu_info->rssi_comb; 2773 ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb); 2774 2775 num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count + 2776 ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count; 2777 2778 rx_stats->num_msdu += num_msdu; 2779 rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count + 2780 ppdu_info->tcp_ack_msdu_count; 2781 rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count; 2782 rx_stats->other_msdu_count += ppdu_info->other_msdu_count; 2783 2784 if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A || 2785 ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) { 2786 ppdu_info->nss = 1; 2787 ppdu_info->mcs = HAL_RX_MAX_MCS; 2788 ppdu_info->tid = IEEE80211_NUM_TIDS; 2789 } 2790 2791 if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS) 2792 rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu; 2793 2794 if (ppdu_info->mcs <= HAL_RX_MAX_MCS) 2795 rx_stats->mcs_count[ppdu_info->mcs] += num_msdu; 2796 2797 if (ppdu_info->gi < HAL_RX_GI_MAX) 2798 rx_stats->gi_count[ppdu_info->gi] += num_msdu; 2799 2800 if (ppdu_info->bw < HAL_RX_BW_MAX) 2801 rx_stats->bw_count[ppdu_info->bw] += num_msdu; 2802 2803 if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX) 2804 rx_stats->coding_count[ppdu_info->ldpc] += num_msdu; 2805 2806 if (ppdu_info->tid <= IEEE80211_NUM_TIDS) 2807 rx_stats->tid_count[ppdu_info->tid] += num_msdu; 2808 2809 if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX) 2810 rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu; 2811 2812 if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX) 2813 rx_stats->reception_type[ppdu_info->reception_type] += num_msdu; 2814 2815 if (ppdu_info->is_stbc) 2816 rx_stats->stbc_count += num_msdu; 2817 2818 if (ppdu_info->beamformed) 2819 rx_stats->beamformed_count += num_msdu; 2820 2821 if (ppdu_info->num_mpdu_fcs_ok > 1) 2822 rx_stats->ampdu_msdu_count += num_msdu; 2823 else 2824 rx_stats->non_ampdu_msdu_count += num_msdu; 2825 2826 rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok; 2827 rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err; 2828 rx_stats->dcm_count += ppdu_info->dcm; 2829 rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu; 2830 2831 arsta->rssi_comb = ppdu_info->rssi_comb; 2832 2833 BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) > 2834 ARRAY_SIZE(ppdu_info->rssi_chain_pri20)); 2835 2836 for (i = 0; i < ARRAY_SIZE(arsta->chain_signal); i++) 2837 arsta->chain_signal[i] = ppdu_info->rssi_chain_pri20[i]; 2838 2839 rx_stats->rx_duration += ppdu_info->rx_duration; 2840 arsta->rx_duration = rx_stats->rx_duration; 2841 } 2842 2843 static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab, 2844 struct dp_rxdma_ring *rx_ring, 2845 int *buf_id) 2846 { 2847 struct sk_buff *skb; 2848 dma_addr_t paddr; 2849 2850 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + 2851 DP_RX_BUFFER_ALIGN_SIZE); 2852 2853 if (!skb) 2854 goto fail_alloc_skb; 2855 2856 if (!IS_ALIGNED((unsigned long)skb->data, 2857 DP_RX_BUFFER_ALIGN_SIZE)) { 2858 skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - 2859 skb->data); 2860 } 2861 2862 paddr = dma_map_single(ab->dev, skb->data, 2863 skb->len + skb_tailroom(skb), 2864 DMA_FROM_DEVICE); 2865 if (unlikely(dma_mapping_error(ab->dev, paddr))) 2866 goto fail_free_skb; 2867 2868 spin_lock_bh(&rx_ring->idr_lock); 2869 *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, 2870 rx_ring->bufs_max, GFP_ATOMIC); 2871 spin_unlock_bh(&rx_ring->idr_lock); 2872 if (*buf_id < 0) 2873 goto fail_dma_unmap; 2874 2875 ATH11K_SKB_RXCB(skb)->paddr = paddr; 2876 return skb; 2877 2878 fail_dma_unmap: 2879 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 2880 DMA_FROM_DEVICE); 2881 fail_free_skb: 2882 dev_kfree_skb_any(skb); 2883 fail_alloc_skb: 2884 return NULL; 2885 } 2886 2887 int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id, 2888 struct dp_rxdma_ring *rx_ring, 2889 int req_entries, 2890 enum hal_rx_buf_return_buf_manager mgr) 2891 { 2892 struct hal_srng *srng; 2893 u32 *desc; 2894 struct sk_buff *skb; 2895 int num_free; 2896 int num_remain; 2897 int buf_id; 2898 u32 cookie; 2899 dma_addr_t paddr; 2900 2901 req_entries = min(req_entries, rx_ring->bufs_max); 2902 2903 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 2904 2905 spin_lock_bh(&srng->lock); 2906 2907 ath11k_hal_srng_access_begin(ab, srng); 2908 2909 num_free = ath11k_hal_srng_src_num_free(ab, srng, true); 2910 2911 req_entries = min(num_free, req_entries); 2912 num_remain = req_entries; 2913 2914 while (num_remain > 0) { 2915 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, 2916 &buf_id); 2917 if (!skb) 2918 break; 2919 paddr = ATH11K_SKB_RXCB(skb)->paddr; 2920 2921 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 2922 if (!desc) 2923 goto fail_desc_get; 2924 2925 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 2926 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 2927 2928 num_remain--; 2929 2930 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); 2931 } 2932 2933 ath11k_hal_srng_access_end(ab, srng); 2934 2935 spin_unlock_bh(&srng->lock); 2936 2937 return req_entries - num_remain; 2938 2939 fail_desc_get: 2940 spin_lock_bh(&rx_ring->idr_lock); 2941 idr_remove(&rx_ring->bufs_idr, buf_id); 2942 spin_unlock_bh(&rx_ring->idr_lock); 2943 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 2944 DMA_FROM_DEVICE); 2945 dev_kfree_skb_any(skb); 2946 ath11k_hal_srng_access_end(ab, srng); 2947 spin_unlock_bh(&srng->lock); 2948 2949 return req_entries - num_remain; 2950 } 2951 2952 #define ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP 32535 2953 2954 static void 2955 ath11k_dp_rx_mon_update_status_buf_state(struct ath11k_mon_data *pmon, 2956 struct hal_tlv_hdr *tlv) 2957 { 2958 struct hal_rx_ppdu_start *ppdu_start; 2959 u16 ppdu_id_diff, ppdu_id, tlv_len; 2960 u8 *ptr; 2961 2962 /* PPDU id is part of second tlv, move ptr to second tlv */ 2963 tlv_len = FIELD_GET(HAL_TLV_HDR_LEN, tlv->tl); 2964 ptr = (u8 *)tlv; 2965 ptr += sizeof(*tlv) + tlv_len; 2966 tlv = (struct hal_tlv_hdr *)ptr; 2967 2968 if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_PPDU_START) 2969 return; 2970 2971 ptr += sizeof(*tlv); 2972 ppdu_start = (struct hal_rx_ppdu_start *)ptr; 2973 ppdu_id = FIELD_GET(HAL_RX_PPDU_START_INFO0_PPDU_ID, 2974 __le32_to_cpu(ppdu_start->info0)); 2975 2976 if (pmon->sw_mon_entries.ppdu_id < ppdu_id) { 2977 pmon->buf_state = DP_MON_STATUS_LEAD; 2978 ppdu_id_diff = ppdu_id - pmon->sw_mon_entries.ppdu_id; 2979 if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP) 2980 pmon->buf_state = DP_MON_STATUS_LAG; 2981 } else if (pmon->sw_mon_entries.ppdu_id > ppdu_id) { 2982 pmon->buf_state = DP_MON_STATUS_LAG; 2983 ppdu_id_diff = pmon->sw_mon_entries.ppdu_id - ppdu_id; 2984 if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP) 2985 pmon->buf_state = DP_MON_STATUS_LEAD; 2986 } 2987 } 2988 2989 static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id, 2990 int *budget, struct sk_buff_head *skb_list) 2991 { 2992 struct ath11k *ar; 2993 const struct ath11k_hw_hal_params *hal_params; 2994 struct ath11k_pdev_dp *dp; 2995 struct dp_rxdma_ring *rx_ring; 2996 struct ath11k_mon_data *pmon; 2997 struct hal_srng *srng; 2998 void *rx_mon_status_desc; 2999 struct sk_buff *skb; 3000 struct ath11k_skb_rxcb *rxcb; 3001 struct hal_tlv_hdr *tlv; 3002 u32 cookie; 3003 int buf_id, srng_id; 3004 dma_addr_t paddr; 3005 u8 rbm; 3006 int num_buffs_reaped = 0; 3007 3008 ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar; 3009 dp = &ar->dp; 3010 pmon = &dp->mon_data; 3011 srng_id = ath11k_hw_mac_id_to_srng_id(&ab->hw_params, mac_id); 3012 rx_ring = &dp->rx_mon_status_refill_ring[srng_id]; 3013 3014 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 3015 3016 spin_lock_bh(&srng->lock); 3017 3018 ath11k_hal_srng_access_begin(ab, srng); 3019 while (*budget) { 3020 *budget -= 1; 3021 rx_mon_status_desc = 3022 ath11k_hal_srng_src_peek(ab, srng); 3023 if (!rx_mon_status_desc) { 3024 pmon->buf_state = DP_MON_STATUS_REPLINISH; 3025 break; 3026 } 3027 3028 ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr, 3029 &cookie, &rbm); 3030 if (paddr) { 3031 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie); 3032 3033 spin_lock_bh(&rx_ring->idr_lock); 3034 skb = idr_find(&rx_ring->bufs_idr, buf_id); 3035 spin_unlock_bh(&rx_ring->idr_lock); 3036 3037 if (!skb) { 3038 ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n", 3039 buf_id); 3040 pmon->buf_state = DP_MON_STATUS_REPLINISH; 3041 goto move_next; 3042 } 3043 3044 rxcb = ATH11K_SKB_RXCB(skb); 3045 3046 dma_sync_single_for_cpu(ab->dev, rxcb->paddr, 3047 skb->len + skb_tailroom(skb), 3048 DMA_FROM_DEVICE); 3049 3050 tlv = (struct hal_tlv_hdr *)skb->data; 3051 if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != 3052 HAL_RX_STATUS_BUFFER_DONE) { 3053 ath11k_warn(ab, "mon status DONE not set %lx, buf_id %d\n", 3054 FIELD_GET(HAL_TLV_HDR_TAG, 3055 tlv->tl), buf_id); 3056 /* If done status is missing, hold onto status 3057 * ring until status is done for this status 3058 * ring buffer. 3059 * Keep HP in mon_status_ring unchanged, 3060 * and break from here. 3061 * Check status for same buffer for next time 3062 */ 3063 pmon->buf_state = DP_MON_STATUS_NO_DMA; 3064 break; 3065 } 3066 3067 spin_lock_bh(&rx_ring->idr_lock); 3068 idr_remove(&rx_ring->bufs_idr, buf_id); 3069 spin_unlock_bh(&rx_ring->idr_lock); 3070 if (ab->hw_params.full_monitor_mode) { 3071 ath11k_dp_rx_mon_update_status_buf_state(pmon, tlv); 3072 if (paddr == pmon->mon_status_paddr) 3073 pmon->buf_state = DP_MON_STATUS_MATCH; 3074 } 3075 3076 dma_unmap_single(ab->dev, rxcb->paddr, 3077 skb->len + skb_tailroom(skb), 3078 DMA_FROM_DEVICE); 3079 3080 __skb_queue_tail(skb_list, skb); 3081 } else { 3082 pmon->buf_state = DP_MON_STATUS_REPLINISH; 3083 } 3084 move_next: 3085 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, 3086 &buf_id); 3087 3088 if (!skb) { 3089 hal_params = ab->hw_params.hal_params; 3090 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0, 3091 hal_params->rx_buf_rbm); 3092 num_buffs_reaped++; 3093 break; 3094 } 3095 rxcb = ATH11K_SKB_RXCB(skb); 3096 3097 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 3098 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 3099 3100 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr, 3101 cookie, 3102 ab->hw_params.hal_params->rx_buf_rbm); 3103 ath11k_hal_srng_src_get_next_entry(ab, srng); 3104 num_buffs_reaped++; 3105 } 3106 ath11k_hal_srng_access_end(ab, srng); 3107 spin_unlock_bh(&srng->lock); 3108 3109 return num_buffs_reaped; 3110 } 3111 3112 static void ath11k_dp_rx_frag_timer(struct timer_list *timer) 3113 { 3114 struct dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer); 3115 3116 spin_lock_bh(&rx_tid->ab->base_lock); 3117 if (rx_tid->last_frag_no && 3118 rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) { 3119 spin_unlock_bh(&rx_tid->ab->base_lock); 3120 return; 3121 } 3122 ath11k_dp_rx_frags_cleanup(rx_tid, true); 3123 spin_unlock_bh(&rx_tid->ab->base_lock); 3124 } 3125 3126 int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id) 3127 { 3128 struct ath11k_base *ab = ar->ab; 3129 struct crypto_shash *tfm; 3130 struct ath11k_peer *peer; 3131 struct dp_rx_tid *rx_tid; 3132 int i; 3133 3134 tfm = crypto_alloc_shash("michael_mic", 0, 0); 3135 if (IS_ERR(tfm)) { 3136 ath11k_warn(ab, "failed to allocate michael_mic shash: %ld\n", 3137 PTR_ERR(tfm)); 3138 return PTR_ERR(tfm); 3139 } 3140 3141 spin_lock_bh(&ab->base_lock); 3142 3143 peer = ath11k_peer_find(ab, vdev_id, peer_mac); 3144 if (!peer) { 3145 ath11k_warn(ab, "failed to find the peer to set up fragment info\n"); 3146 spin_unlock_bh(&ab->base_lock); 3147 crypto_free_shash(tfm); 3148 return -ENOENT; 3149 } 3150 3151 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 3152 rx_tid = &peer->rx_tid[i]; 3153 rx_tid->ab = ab; 3154 timer_setup(&rx_tid->frag_timer, ath11k_dp_rx_frag_timer, 0); 3155 skb_queue_head_init(&rx_tid->rx_frags); 3156 } 3157 3158 peer->tfm_mmic = tfm; 3159 peer->dp_setup_done = true; 3160 spin_unlock_bh(&ab->base_lock); 3161 3162 return 0; 3163 } 3164 3165 static int ath11k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key, 3166 struct ieee80211_hdr *hdr, u8 *data, 3167 size_t data_len, u8 *mic) 3168 { 3169 SHASH_DESC_ON_STACK(desc, tfm); 3170 u8 mic_hdr[16] = {0}; 3171 u8 tid = 0; 3172 int ret; 3173 3174 if (!tfm) 3175 return -EINVAL; 3176 3177 desc->tfm = tfm; 3178 3179 ret = crypto_shash_setkey(tfm, key, 8); 3180 if (ret) 3181 goto out; 3182 3183 ret = crypto_shash_init(desc); 3184 if (ret) 3185 goto out; 3186 3187 /* TKIP MIC header */ 3188 memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN); 3189 memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN); 3190 if (ieee80211_is_data_qos(hdr->frame_control)) 3191 tid = ieee80211_get_tid(hdr); 3192 mic_hdr[12] = tid; 3193 3194 ret = crypto_shash_update(desc, mic_hdr, 16); 3195 if (ret) 3196 goto out; 3197 ret = crypto_shash_update(desc, data, data_len); 3198 if (ret) 3199 goto out; 3200 ret = crypto_shash_final(desc, mic); 3201 out: 3202 shash_desc_zero(desc); 3203 return ret; 3204 } 3205 3206 static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer *peer, 3207 struct sk_buff *msdu) 3208 { 3209 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data; 3210 struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu); 3211 struct ieee80211_key_conf *key_conf; 3212 struct ieee80211_hdr *hdr; 3213 u8 mic[IEEE80211_CCMP_MIC_LEN]; 3214 int head_len, tail_len, ret; 3215 size_t data_len; 3216 u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 3217 u8 *key, *data; 3218 u8 key_idx; 3219 3220 if (ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc) != 3221 HAL_ENCRYPT_TYPE_TKIP_MIC) 3222 return 0; 3223 3224 hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz); 3225 hdr_len = ieee80211_hdrlen(hdr->frame_control); 3226 head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN; 3227 tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN; 3228 3229 if (!is_multicast_ether_addr(hdr->addr1)) 3230 key_idx = peer->ucast_keyidx; 3231 else 3232 key_idx = peer->mcast_keyidx; 3233 3234 key_conf = peer->keys[key_idx]; 3235 3236 data = msdu->data + head_len; 3237 data_len = msdu->len - head_len - tail_len; 3238 key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; 3239 3240 ret = ath11k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic); 3241 if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN)) 3242 goto mic_fail; 3243 3244 return 0; 3245 3246 mic_fail: 3247 (ATH11K_SKB_RXCB(msdu))->is_first_msdu = true; 3248 (ATH11K_SKB_RXCB(msdu))->is_last_msdu = true; 3249 3250 rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED | 3251 RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED; 3252 skb_pull(msdu, hal_rx_desc_sz); 3253 3254 ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs); 3255 ath11k_dp_rx_h_undecap(ar, msdu, rx_desc, 3256 HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true); 3257 ieee80211_rx(ar->hw, msdu); 3258 return -EINVAL; 3259 } 3260 3261 static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu, 3262 enum hal_encrypt_type enctype, u32 flags) 3263 { 3264 struct ieee80211_hdr *hdr; 3265 size_t hdr_len; 3266 size_t crypto_len; 3267 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 3268 3269 if (!flags) 3270 return; 3271 3272 hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz); 3273 3274 if (flags & RX_FLAG_MIC_STRIPPED) 3275 skb_trim(msdu, msdu->len - 3276 ath11k_dp_rx_crypto_mic_len(ar, enctype)); 3277 3278 if (flags & RX_FLAG_ICV_STRIPPED) 3279 skb_trim(msdu, msdu->len - 3280 ath11k_dp_rx_crypto_icv_len(ar, enctype)); 3281 3282 if (flags & RX_FLAG_IV_STRIPPED) { 3283 hdr_len = ieee80211_hdrlen(hdr->frame_control); 3284 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 3285 3286 memmove((void *)msdu->data + hal_rx_desc_sz + crypto_len, 3287 (void *)msdu->data + hal_rx_desc_sz, hdr_len); 3288 skb_pull(msdu, crypto_len); 3289 } 3290 } 3291 3292 static int ath11k_dp_rx_h_defrag(struct ath11k *ar, 3293 struct ath11k_peer *peer, 3294 struct dp_rx_tid *rx_tid, 3295 struct sk_buff **defrag_skb) 3296 { 3297 struct hal_rx_desc *rx_desc; 3298 struct sk_buff *skb, *first_frag, *last_frag; 3299 struct ieee80211_hdr *hdr; 3300 struct rx_attention *rx_attention; 3301 enum hal_encrypt_type enctype; 3302 bool is_decrypted = false; 3303 int msdu_len = 0; 3304 int extra_space; 3305 u32 flags, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 3306 3307 first_frag = skb_peek(&rx_tid->rx_frags); 3308 last_frag = skb_peek_tail(&rx_tid->rx_frags); 3309 3310 skb_queue_walk(&rx_tid->rx_frags, skb) { 3311 flags = 0; 3312 rx_desc = (struct hal_rx_desc *)skb->data; 3313 hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz); 3314 3315 enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc); 3316 if (enctype != HAL_ENCRYPT_TYPE_OPEN) { 3317 rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc); 3318 is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention); 3319 } 3320 3321 if (is_decrypted) { 3322 if (skb != first_frag) 3323 flags |= RX_FLAG_IV_STRIPPED; 3324 if (skb != last_frag) 3325 flags |= RX_FLAG_ICV_STRIPPED | 3326 RX_FLAG_MIC_STRIPPED; 3327 } 3328 3329 /* RX fragments are always raw packets */ 3330 if (skb != last_frag) 3331 skb_trim(skb, skb->len - FCS_LEN); 3332 ath11k_dp_rx_h_undecap_frag(ar, skb, enctype, flags); 3333 3334 if (skb != first_frag) 3335 skb_pull(skb, hal_rx_desc_sz + 3336 ieee80211_hdrlen(hdr->frame_control)); 3337 msdu_len += skb->len; 3338 } 3339 3340 extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag)); 3341 if (extra_space > 0 && 3342 (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0)) 3343 return -ENOMEM; 3344 3345 __skb_unlink(first_frag, &rx_tid->rx_frags); 3346 while ((skb = __skb_dequeue(&rx_tid->rx_frags))) { 3347 skb_put_data(first_frag, skb->data, skb->len); 3348 dev_kfree_skb_any(skb); 3349 } 3350 3351 hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz); 3352 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); 3353 ATH11K_SKB_RXCB(first_frag)->is_frag = 1; 3354 3355 if (ath11k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag)) 3356 first_frag = NULL; 3357 3358 *defrag_skb = first_frag; 3359 return 0; 3360 } 3361 3362 static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_tid *rx_tid, 3363 struct sk_buff *defrag_skb) 3364 { 3365 struct ath11k_base *ab = ar->ab; 3366 struct ath11k_pdev_dp *dp = &ar->dp; 3367 struct dp_rxdma_ring *rx_refill_ring = &dp->rx_refill_buf_ring; 3368 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data; 3369 struct hal_reo_entrance_ring *reo_ent_ring; 3370 struct hal_reo_dest_ring *reo_dest_ring; 3371 struct dp_link_desc_bank *link_desc_banks; 3372 struct hal_rx_msdu_link *msdu_link; 3373 struct hal_rx_msdu_details *msdu0; 3374 struct hal_srng *srng; 3375 dma_addr_t paddr; 3376 u32 desc_bank, msdu_info, mpdu_info; 3377 u32 dst_idx, cookie, hal_rx_desc_sz; 3378 int ret, buf_id; 3379 3380 hal_rx_desc_sz = ab->hw_params.hal_desc_sz; 3381 link_desc_banks = ab->dp.link_desc_banks; 3382 reo_dest_ring = rx_tid->dst_ring_desc; 3383 3384 ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank); 3385 msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr + 3386 (paddr - link_desc_banks[desc_bank].paddr)); 3387 msdu0 = &msdu_link->msdu_link[0]; 3388 dst_idx = FIELD_GET(RX_MSDU_DESC_INFO0_REO_DEST_IND, msdu0->rx_msdu_info.info0); 3389 memset(msdu0, 0, sizeof(*msdu0)); 3390 3391 msdu_info = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1) | 3392 FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1) | 3393 FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_CONTINUATION, 0) | 3394 FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_LENGTH, 3395 defrag_skb->len - hal_rx_desc_sz) | 3396 FIELD_PREP(RX_MSDU_DESC_INFO0_REO_DEST_IND, dst_idx) | 3397 FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_SA, 1) | 3398 FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_DA, 1); 3399 msdu0->rx_msdu_info.info0 = msdu_info; 3400 3401 /* change msdu len in hal rx desc */ 3402 ath11k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz); 3403 3404 paddr = dma_map_single(ab->dev, defrag_skb->data, 3405 defrag_skb->len + skb_tailroom(defrag_skb), 3406 DMA_TO_DEVICE); 3407 if (dma_mapping_error(ab->dev, paddr)) 3408 return -ENOMEM; 3409 3410 spin_lock_bh(&rx_refill_ring->idr_lock); 3411 buf_id = idr_alloc(&rx_refill_ring->bufs_idr, defrag_skb, 0, 3412 rx_refill_ring->bufs_max * 3, GFP_ATOMIC); 3413 spin_unlock_bh(&rx_refill_ring->idr_lock); 3414 if (buf_id < 0) { 3415 ret = -ENOMEM; 3416 goto err_unmap_dma; 3417 } 3418 3419 ATH11K_SKB_RXCB(defrag_skb)->paddr = paddr; 3420 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, dp->mac_id) | 3421 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 3422 3423 ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie, 3424 ab->hw_params.hal_params->rx_buf_rbm); 3425 3426 /* Fill mpdu details into reo entrace ring */ 3427 srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id]; 3428 3429 spin_lock_bh(&srng->lock); 3430 ath11k_hal_srng_access_begin(ab, srng); 3431 3432 reo_ent_ring = (struct hal_reo_entrance_ring *) 3433 ath11k_hal_srng_src_get_next_entry(ab, srng); 3434 if (!reo_ent_ring) { 3435 ath11k_hal_srng_access_end(ab, srng); 3436 spin_unlock_bh(&srng->lock); 3437 ret = -ENOSPC; 3438 goto err_free_idr; 3439 } 3440 memset(reo_ent_ring, 0, sizeof(*reo_ent_ring)); 3441 3442 ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank); 3443 ath11k_hal_rx_buf_addr_info_set(reo_ent_ring, paddr, desc_bank, 3444 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST); 3445 3446 mpdu_info = FIELD_PREP(RX_MPDU_DESC_INFO0_MSDU_COUNT, 1) | 3447 FIELD_PREP(RX_MPDU_DESC_INFO0_SEQ_NUM, rx_tid->cur_sn) | 3448 FIELD_PREP(RX_MPDU_DESC_INFO0_FRAG_FLAG, 0) | 3449 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_SA, 1) | 3450 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_DA, 1) | 3451 FIELD_PREP(RX_MPDU_DESC_INFO0_RAW_MPDU, 1) | 3452 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_PN, 1); 3453 3454 reo_ent_ring->rx_mpdu_info.info0 = mpdu_info; 3455 reo_ent_ring->rx_mpdu_info.meta_data = reo_dest_ring->rx_mpdu_info.meta_data; 3456 reo_ent_ring->queue_addr_lo = reo_dest_ring->queue_addr_lo; 3457 reo_ent_ring->info0 = FIELD_PREP(HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI, 3458 FIELD_GET(HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI, 3459 reo_dest_ring->info0)) | 3460 FIELD_PREP(HAL_REO_ENTR_RING_INFO0_DEST_IND, dst_idx); 3461 ath11k_hal_srng_access_end(ab, srng); 3462 spin_unlock_bh(&srng->lock); 3463 3464 return 0; 3465 3466 err_free_idr: 3467 spin_lock_bh(&rx_refill_ring->idr_lock); 3468 idr_remove(&rx_refill_ring->bufs_idr, buf_id); 3469 spin_unlock_bh(&rx_refill_ring->idr_lock); 3470 err_unmap_dma: 3471 dma_unmap_single(ab->dev, paddr, defrag_skb->len + skb_tailroom(defrag_skb), 3472 DMA_TO_DEVICE); 3473 return ret; 3474 } 3475 3476 static int ath11k_dp_rx_h_cmp_frags(struct ath11k *ar, 3477 struct sk_buff *a, struct sk_buff *b) 3478 { 3479 int frag1, frag2; 3480 3481 frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, a); 3482 frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, b); 3483 3484 return frag1 - frag2; 3485 } 3486 3487 static void ath11k_dp_rx_h_sort_frags(struct ath11k *ar, 3488 struct sk_buff_head *frag_list, 3489 struct sk_buff *cur_frag) 3490 { 3491 struct sk_buff *skb; 3492 int cmp; 3493 3494 skb_queue_walk(frag_list, skb) { 3495 cmp = ath11k_dp_rx_h_cmp_frags(ar, skb, cur_frag); 3496 if (cmp < 0) 3497 continue; 3498 __skb_queue_before(frag_list, skb, cur_frag); 3499 return; 3500 } 3501 __skb_queue_tail(frag_list, cur_frag); 3502 } 3503 3504 static u64 ath11k_dp_rx_h_get_pn(struct ath11k *ar, struct sk_buff *skb) 3505 { 3506 struct ieee80211_hdr *hdr; 3507 u64 pn = 0; 3508 u8 *ehdr; 3509 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 3510 3511 hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz); 3512 ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control); 3513 3514 pn = ehdr[0]; 3515 pn |= (u64)ehdr[1] << 8; 3516 pn |= (u64)ehdr[4] << 16; 3517 pn |= (u64)ehdr[5] << 24; 3518 pn |= (u64)ehdr[6] << 32; 3519 pn |= (u64)ehdr[7] << 40; 3520 3521 return pn; 3522 } 3523 3524 static bool 3525 ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k *ar, struct dp_rx_tid *rx_tid) 3526 { 3527 enum hal_encrypt_type encrypt_type; 3528 struct sk_buff *first_frag, *skb; 3529 struct hal_rx_desc *desc; 3530 u64 last_pn; 3531 u64 cur_pn; 3532 3533 first_frag = skb_peek(&rx_tid->rx_frags); 3534 desc = (struct hal_rx_desc *)first_frag->data; 3535 3536 encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, desc); 3537 if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 && 3538 encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 && 3539 encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 && 3540 encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256) 3541 return true; 3542 3543 last_pn = ath11k_dp_rx_h_get_pn(ar, first_frag); 3544 skb_queue_walk(&rx_tid->rx_frags, skb) { 3545 if (skb == first_frag) 3546 continue; 3547 3548 cur_pn = ath11k_dp_rx_h_get_pn(ar, skb); 3549 if (cur_pn != last_pn + 1) 3550 return false; 3551 last_pn = cur_pn; 3552 } 3553 return true; 3554 } 3555 3556 static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar, 3557 struct sk_buff *msdu, 3558 u32 *ring_desc) 3559 { 3560 struct ath11k_base *ab = ar->ab; 3561 struct hal_rx_desc *rx_desc; 3562 struct ath11k_peer *peer; 3563 struct dp_rx_tid *rx_tid; 3564 struct sk_buff *defrag_skb = NULL; 3565 u32 peer_id; 3566 u16 seqno, frag_no; 3567 u8 tid; 3568 int ret = 0; 3569 bool more_frags; 3570 bool is_mcbc; 3571 3572 rx_desc = (struct hal_rx_desc *)msdu->data; 3573 peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc); 3574 tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, rx_desc); 3575 seqno = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc); 3576 frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, msdu); 3577 more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(ar->ab, msdu); 3578 is_mcbc = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc); 3579 3580 /* Multicast/Broadcast fragments are not expected */ 3581 if (is_mcbc) 3582 return -EINVAL; 3583 3584 if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(ar->ab, rx_desc) || 3585 !ath11k_dp_rx_h_mpdu_start_fc_valid(ar->ab, rx_desc) || 3586 tid > IEEE80211_NUM_TIDS) 3587 return -EINVAL; 3588 3589 /* received unfragmented packet in reo 3590 * exception ring, this shouldn't happen 3591 * as these packets typically come from 3592 * reo2sw srngs. 3593 */ 3594 if (WARN_ON_ONCE(!frag_no && !more_frags)) 3595 return -EINVAL; 3596 3597 spin_lock_bh(&ab->base_lock); 3598 peer = ath11k_peer_find_by_id(ab, peer_id); 3599 if (!peer) { 3600 ath11k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n", 3601 peer_id); 3602 ret = -ENOENT; 3603 goto out_unlock; 3604 } 3605 if (!peer->dp_setup_done) { 3606 ath11k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n", 3607 peer->addr, peer_id); 3608 ret = -ENOENT; 3609 goto out_unlock; 3610 } 3611 3612 rx_tid = &peer->rx_tid[tid]; 3613 3614 if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) || 3615 skb_queue_empty(&rx_tid->rx_frags)) { 3616 /* Flush stored fragments and start a new sequence */ 3617 ath11k_dp_rx_frags_cleanup(rx_tid, true); 3618 rx_tid->cur_sn = seqno; 3619 } 3620 3621 if (rx_tid->rx_frag_bitmap & BIT(frag_no)) { 3622 /* Fragment already present */ 3623 ret = -EINVAL; 3624 goto out_unlock; 3625 } 3626 3627 if (!rx_tid->rx_frag_bitmap || (frag_no > __fls(rx_tid->rx_frag_bitmap))) 3628 __skb_queue_tail(&rx_tid->rx_frags, msdu); 3629 else 3630 ath11k_dp_rx_h_sort_frags(ar, &rx_tid->rx_frags, msdu); 3631 3632 rx_tid->rx_frag_bitmap |= BIT(frag_no); 3633 if (!more_frags) 3634 rx_tid->last_frag_no = frag_no; 3635 3636 if (frag_no == 0) { 3637 rx_tid->dst_ring_desc = kmemdup(ring_desc, 3638 sizeof(*rx_tid->dst_ring_desc), 3639 GFP_ATOMIC); 3640 if (!rx_tid->dst_ring_desc) { 3641 ret = -ENOMEM; 3642 goto out_unlock; 3643 } 3644 } else { 3645 ath11k_dp_rx_link_desc_return(ab, ring_desc, 3646 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3647 } 3648 3649 if (!rx_tid->last_frag_no || 3650 rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) { 3651 mod_timer(&rx_tid->frag_timer, jiffies + 3652 ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS); 3653 goto out_unlock; 3654 } 3655 3656 spin_unlock_bh(&ab->base_lock); 3657 del_timer_sync(&rx_tid->frag_timer); 3658 spin_lock_bh(&ab->base_lock); 3659 3660 peer = ath11k_peer_find_by_id(ab, peer_id); 3661 if (!peer) 3662 goto err_frags_cleanup; 3663 3664 if (!ath11k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid)) 3665 goto err_frags_cleanup; 3666 3667 if (ath11k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb)) 3668 goto err_frags_cleanup; 3669 3670 if (!defrag_skb) 3671 goto err_frags_cleanup; 3672 3673 if (ath11k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb)) 3674 goto err_frags_cleanup; 3675 3676 ath11k_dp_rx_frags_cleanup(rx_tid, false); 3677 goto out_unlock; 3678 3679 err_frags_cleanup: 3680 dev_kfree_skb_any(defrag_skb); 3681 ath11k_dp_rx_frags_cleanup(rx_tid, true); 3682 out_unlock: 3683 spin_unlock_bh(&ab->base_lock); 3684 return ret; 3685 } 3686 3687 static int 3688 ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool drop) 3689 { 3690 struct ath11k_pdev_dp *dp = &ar->dp; 3691 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 3692 struct sk_buff *msdu; 3693 struct ath11k_skb_rxcb *rxcb; 3694 struct hal_rx_desc *rx_desc; 3695 u8 *hdr_status; 3696 u16 msdu_len; 3697 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 3698 3699 spin_lock_bh(&rx_ring->idr_lock); 3700 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 3701 if (!msdu) { 3702 ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n", 3703 buf_id); 3704 spin_unlock_bh(&rx_ring->idr_lock); 3705 return -EINVAL; 3706 } 3707 3708 idr_remove(&rx_ring->bufs_idr, buf_id); 3709 spin_unlock_bh(&rx_ring->idr_lock); 3710 3711 rxcb = ATH11K_SKB_RXCB(msdu); 3712 dma_unmap_single(ar->ab->dev, rxcb->paddr, 3713 msdu->len + skb_tailroom(msdu), 3714 DMA_FROM_DEVICE); 3715 3716 if (drop) { 3717 dev_kfree_skb_any(msdu); 3718 return 0; 3719 } 3720 3721 rcu_read_lock(); 3722 if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) { 3723 dev_kfree_skb_any(msdu); 3724 goto exit; 3725 } 3726 3727 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 3728 dev_kfree_skb_any(msdu); 3729 goto exit; 3730 } 3731 3732 rx_desc = (struct hal_rx_desc *)msdu->data; 3733 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, rx_desc); 3734 if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) { 3735 hdr_status = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc); 3736 ath11k_warn(ar->ab, "invalid msdu leng %u", msdu_len); 3737 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status, 3738 sizeof(struct ieee80211_hdr)); 3739 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc, 3740 sizeof(struct hal_rx_desc)); 3741 dev_kfree_skb_any(msdu); 3742 goto exit; 3743 } 3744 3745 skb_put(msdu, hal_rx_desc_sz + msdu_len); 3746 3747 if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) { 3748 dev_kfree_skb_any(msdu); 3749 ath11k_dp_rx_link_desc_return(ar->ab, ring_desc, 3750 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3751 } 3752 exit: 3753 rcu_read_unlock(); 3754 return 0; 3755 } 3756 3757 int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi, 3758 int budget) 3759 { 3760 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; 3761 struct dp_link_desc_bank *link_desc_banks; 3762 enum hal_rx_buf_return_buf_manager rbm; 3763 int tot_n_bufs_reaped, quota, ret, i; 3764 int n_bufs_reaped[MAX_RADIOS] = {0}; 3765 struct dp_rxdma_ring *rx_ring; 3766 struct dp_srng *reo_except; 3767 u32 desc_bank, num_msdus; 3768 struct hal_srng *srng; 3769 struct ath11k_dp *dp; 3770 void *link_desc_va; 3771 int buf_id, mac_id; 3772 struct ath11k *ar; 3773 dma_addr_t paddr; 3774 u32 *desc; 3775 bool is_frag; 3776 u8 drop = 0; 3777 3778 tot_n_bufs_reaped = 0; 3779 quota = budget; 3780 3781 dp = &ab->dp; 3782 reo_except = &dp->reo_except_ring; 3783 link_desc_banks = dp->link_desc_banks; 3784 3785 srng = &ab->hal.srng_list[reo_except->ring_id]; 3786 3787 spin_lock_bh(&srng->lock); 3788 3789 ath11k_hal_srng_access_begin(ab, srng); 3790 3791 while (budget && 3792 (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 3793 struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc; 3794 3795 ab->soc_stats.err_ring_pkts++; 3796 ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr, 3797 &desc_bank); 3798 if (ret) { 3799 ath11k_warn(ab, "failed to parse error reo desc %d\n", 3800 ret); 3801 continue; 3802 } 3803 link_desc_va = link_desc_banks[desc_bank].vaddr + 3804 (paddr - link_desc_banks[desc_bank].paddr); 3805 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies, 3806 &rbm); 3807 if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST && 3808 rbm != HAL_RX_BUF_RBM_SW3_BM) { 3809 ab->soc_stats.invalid_rbm++; 3810 ath11k_warn(ab, "invalid return buffer manager %d\n", rbm); 3811 ath11k_dp_rx_link_desc_return(ab, desc, 3812 HAL_WBM_REL_BM_ACT_REL_MSDU); 3813 continue; 3814 } 3815 3816 is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG); 3817 3818 /* Process only rx fragments with one msdu per link desc below, and drop 3819 * msdu's indicated due to error reasons. 3820 */ 3821 if (!is_frag || num_msdus > 1) { 3822 drop = 1; 3823 /* Return the link desc back to wbm idle list */ 3824 ath11k_dp_rx_link_desc_return(ab, desc, 3825 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3826 } 3827 3828 for (i = 0; i < num_msdus; i++) { 3829 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 3830 msdu_cookies[i]); 3831 3832 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, 3833 msdu_cookies[i]); 3834 3835 ar = ab->pdevs[mac_id].ar; 3836 3837 if (!ath11k_dp_process_rx_err_buf(ar, desc, buf_id, drop)) { 3838 n_bufs_reaped[mac_id]++; 3839 tot_n_bufs_reaped++; 3840 } 3841 } 3842 3843 if (tot_n_bufs_reaped >= quota) { 3844 tot_n_bufs_reaped = quota; 3845 goto exit; 3846 } 3847 3848 budget = quota - tot_n_bufs_reaped; 3849 } 3850 3851 exit: 3852 ath11k_hal_srng_access_end(ab, srng); 3853 3854 spin_unlock_bh(&srng->lock); 3855 3856 for (i = 0; i < ab->num_radios; i++) { 3857 if (!n_bufs_reaped[i]) 3858 continue; 3859 3860 ar = ab->pdevs[i].ar; 3861 rx_ring = &ar->dp.rx_refill_buf_ring; 3862 3863 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i], 3864 ab->hw_params.hal_params->rx_buf_rbm); 3865 } 3866 3867 return tot_n_bufs_reaped; 3868 } 3869 3870 static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar, 3871 int msdu_len, 3872 struct sk_buff_head *msdu_list) 3873 { 3874 struct sk_buff *skb, *tmp; 3875 struct ath11k_skb_rxcb *rxcb; 3876 int n_buffs; 3877 3878 n_buffs = DIV_ROUND_UP(msdu_len, 3879 (DP_RX_BUFFER_SIZE - ar->ab->hw_params.hal_desc_sz)); 3880 3881 skb_queue_walk_safe(msdu_list, skb, tmp) { 3882 rxcb = ATH11K_SKB_RXCB(skb); 3883 if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO && 3884 rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) { 3885 if (!n_buffs) 3886 break; 3887 __skb_unlink(skb, msdu_list); 3888 dev_kfree_skb_any(skb); 3889 n_buffs--; 3890 } 3891 } 3892 } 3893 3894 static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu, 3895 struct ieee80211_rx_status *status, 3896 struct sk_buff_head *msdu_list) 3897 { 3898 u16 msdu_len; 3899 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 3900 struct rx_attention *rx_attention; 3901 u8 l3pad_bytes; 3902 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3903 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 3904 3905 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc); 3906 3907 if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) { 3908 /* First buffer will be freed by the caller, so deduct it's length */ 3909 msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz); 3910 ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list); 3911 return -EINVAL; 3912 } 3913 3914 rx_attention = ath11k_dp_rx_get_attention(ar->ab, desc); 3915 if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) { 3916 ath11k_warn(ar->ab, 3917 "msdu_done bit not set in null_q_des processing\n"); 3918 __skb_queue_purge(msdu_list); 3919 return -EIO; 3920 } 3921 3922 /* Handle NULL queue descriptor violations arising out a missing 3923 * REO queue for a given peer or a given TID. This typically 3924 * may happen if a packet is received on a QOS enabled TID before the 3925 * ADDBA negotiation for that TID, when the TID queue is setup. Or 3926 * it may also happen for MC/BC frames if they are not routed to the 3927 * non-QOS TID queue, in the absence of any other default TID queue. 3928 * This error can show up both in a REO destination or WBM release ring. 3929 */ 3930 3931 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc); 3932 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc); 3933 3934 if (rxcb->is_frag) { 3935 skb_pull(msdu, hal_rx_desc_sz); 3936 } else { 3937 l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc); 3938 3939 if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) 3940 return -EINVAL; 3941 3942 skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); 3943 skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); 3944 } 3945 ath11k_dp_rx_h_ppdu(ar, desc, status); 3946 3947 ath11k_dp_rx_h_mpdu(ar, msdu, desc, status); 3948 3949 rxcb->tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, desc); 3950 3951 /* Please note that caller will having the access to msdu and completing 3952 * rx with mac80211. Need not worry about cleaning up amsdu_list. 3953 */ 3954 3955 return 0; 3956 } 3957 3958 static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu, 3959 struct ieee80211_rx_status *status, 3960 struct sk_buff_head *msdu_list) 3961 { 3962 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3963 bool drop = false; 3964 3965 ar->ab->soc_stats.reo_error[rxcb->err_code]++; 3966 3967 switch (rxcb->err_code) { 3968 case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO: 3969 if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list)) 3970 drop = true; 3971 break; 3972 case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED: 3973 /* TODO: Do not drop PN failed packets in the driver; 3974 * instead, it is good to drop such packets in mac80211 3975 * after incrementing the replay counters. 3976 */ 3977 fallthrough; 3978 default: 3979 /* TODO: Review other errors and process them to mac80211 3980 * as appropriate. 3981 */ 3982 drop = true; 3983 break; 3984 } 3985 3986 return drop; 3987 } 3988 3989 static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu, 3990 struct ieee80211_rx_status *status) 3991 { 3992 u16 msdu_len; 3993 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 3994 u8 l3pad_bytes; 3995 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3996 u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; 3997 3998 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc); 3999 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc); 4000 4001 l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc); 4002 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc); 4003 skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); 4004 skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); 4005 4006 ath11k_dp_rx_h_ppdu(ar, desc, status); 4007 4008 status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR | 4009 RX_FLAG_DECRYPTED); 4010 4011 ath11k_dp_rx_h_undecap(ar, msdu, desc, 4012 HAL_ENCRYPT_TYPE_TKIP_MIC, status, false); 4013 } 4014 4015 static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar, struct sk_buff *msdu, 4016 struct ieee80211_rx_status *status) 4017 { 4018 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 4019 bool drop = false; 4020 4021 ar->ab->soc_stats.rxdma_error[rxcb->err_code]++; 4022 4023 switch (rxcb->err_code) { 4024 case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR: 4025 ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status); 4026 break; 4027 default: 4028 /* TODO: Review other rxdma error code to check if anything is 4029 * worth reporting to mac80211 4030 */ 4031 drop = true; 4032 break; 4033 } 4034 4035 return drop; 4036 } 4037 4038 static void ath11k_dp_rx_wbm_err(struct ath11k *ar, 4039 struct napi_struct *napi, 4040 struct sk_buff *msdu, 4041 struct sk_buff_head *msdu_list) 4042 { 4043 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 4044 struct ieee80211_rx_status rxs = {0}; 4045 bool drop = true; 4046 4047 switch (rxcb->err_rel_src) { 4048 case HAL_WBM_REL_SRC_MODULE_REO: 4049 drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list); 4050 break; 4051 case HAL_WBM_REL_SRC_MODULE_RXDMA: 4052 drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs); 4053 break; 4054 default: 4055 /* msdu will get freed */ 4056 break; 4057 } 4058 4059 if (drop) { 4060 dev_kfree_skb_any(msdu); 4061 return; 4062 } 4063 4064 ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs); 4065 } 4066 4067 int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab, 4068 struct napi_struct *napi, int budget) 4069 { 4070 struct ath11k *ar; 4071 struct ath11k_dp *dp = &ab->dp; 4072 struct dp_rxdma_ring *rx_ring; 4073 struct hal_rx_wbm_rel_info err_info; 4074 struct hal_srng *srng; 4075 struct sk_buff *msdu; 4076 struct sk_buff_head msdu_list[MAX_RADIOS]; 4077 struct ath11k_skb_rxcb *rxcb; 4078 u32 *rx_desc; 4079 int buf_id, mac_id; 4080 int num_buffs_reaped[MAX_RADIOS] = {0}; 4081 int total_num_buffs_reaped = 0; 4082 int ret, i; 4083 4084 for (i = 0; i < ab->num_radios; i++) 4085 __skb_queue_head_init(&msdu_list[i]); 4086 4087 srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id]; 4088 4089 spin_lock_bh(&srng->lock); 4090 4091 ath11k_hal_srng_access_begin(ab, srng); 4092 4093 while (budget) { 4094 rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng); 4095 if (!rx_desc) 4096 break; 4097 4098 ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info); 4099 if (ret) { 4100 ath11k_warn(ab, 4101 "failed to parse rx error in wbm_rel ring desc %d\n", 4102 ret); 4103 continue; 4104 } 4105 4106 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie); 4107 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie); 4108 4109 ar = ab->pdevs[mac_id].ar; 4110 rx_ring = &ar->dp.rx_refill_buf_ring; 4111 4112 spin_lock_bh(&rx_ring->idr_lock); 4113 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 4114 if (!msdu) { 4115 ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n", 4116 buf_id, mac_id); 4117 spin_unlock_bh(&rx_ring->idr_lock); 4118 continue; 4119 } 4120 4121 idr_remove(&rx_ring->bufs_idr, buf_id); 4122 spin_unlock_bh(&rx_ring->idr_lock); 4123 4124 rxcb = ATH11K_SKB_RXCB(msdu); 4125 dma_unmap_single(ab->dev, rxcb->paddr, 4126 msdu->len + skb_tailroom(msdu), 4127 DMA_FROM_DEVICE); 4128 4129 num_buffs_reaped[mac_id]++; 4130 total_num_buffs_reaped++; 4131 budget--; 4132 4133 if (err_info.push_reason != 4134 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { 4135 dev_kfree_skb_any(msdu); 4136 continue; 4137 } 4138 4139 rxcb->err_rel_src = err_info.err_rel_src; 4140 rxcb->err_code = err_info.err_code; 4141 rxcb->rx_desc = (struct hal_rx_desc *)msdu->data; 4142 __skb_queue_tail(&msdu_list[mac_id], msdu); 4143 } 4144 4145 ath11k_hal_srng_access_end(ab, srng); 4146 4147 spin_unlock_bh(&srng->lock); 4148 4149 if (!total_num_buffs_reaped) 4150 goto done; 4151 4152 for (i = 0; i < ab->num_radios; i++) { 4153 if (!num_buffs_reaped[i]) 4154 continue; 4155 4156 ar = ab->pdevs[i].ar; 4157 rx_ring = &ar->dp.rx_refill_buf_ring; 4158 4159 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i], 4160 ab->hw_params.hal_params->rx_buf_rbm); 4161 } 4162 4163 rcu_read_lock(); 4164 for (i = 0; i < ab->num_radios; i++) { 4165 if (!rcu_dereference(ab->pdevs_active[i])) { 4166 __skb_queue_purge(&msdu_list[i]); 4167 continue; 4168 } 4169 4170 ar = ab->pdevs[i].ar; 4171 4172 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 4173 __skb_queue_purge(&msdu_list[i]); 4174 continue; 4175 } 4176 4177 while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL) 4178 ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]); 4179 } 4180 rcu_read_unlock(); 4181 done: 4182 return total_num_buffs_reaped; 4183 } 4184 4185 int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget) 4186 { 4187 struct ath11k *ar; 4188 struct dp_srng *err_ring; 4189 struct dp_rxdma_ring *rx_ring; 4190 struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks; 4191 struct hal_srng *srng; 4192 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; 4193 enum hal_rx_buf_return_buf_manager rbm; 4194 enum hal_reo_entr_rxdma_ecode rxdma_err_code; 4195 struct ath11k_skb_rxcb *rxcb; 4196 struct sk_buff *skb; 4197 struct hal_reo_entrance_ring *entr_ring; 4198 void *desc; 4199 int num_buf_freed = 0; 4200 int quota = budget; 4201 dma_addr_t paddr; 4202 u32 desc_bank; 4203 void *link_desc_va; 4204 int num_msdus; 4205 int i; 4206 int buf_id; 4207 4208 ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar; 4209 err_ring = &ar->dp.rxdma_err_dst_ring[ath11k_hw_mac_id_to_srng_id(&ab->hw_params, 4210 mac_id)]; 4211 rx_ring = &ar->dp.rx_refill_buf_ring; 4212 4213 srng = &ab->hal.srng_list[err_ring->ring_id]; 4214 4215 spin_lock_bh(&srng->lock); 4216 4217 ath11k_hal_srng_access_begin(ab, srng); 4218 4219 while (quota-- && 4220 (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 4221 ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank); 4222 4223 entr_ring = (struct hal_reo_entrance_ring *)desc; 4224 rxdma_err_code = 4225 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE, 4226 entr_ring->info1); 4227 ab->soc_stats.rxdma_error[rxdma_err_code]++; 4228 4229 link_desc_va = link_desc_banks[desc_bank].vaddr + 4230 (paddr - link_desc_banks[desc_bank].paddr); 4231 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, 4232 msdu_cookies, &rbm); 4233 4234 for (i = 0; i < num_msdus; i++) { 4235 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 4236 msdu_cookies[i]); 4237 4238 spin_lock_bh(&rx_ring->idr_lock); 4239 skb = idr_find(&rx_ring->bufs_idr, buf_id); 4240 if (!skb) { 4241 ath11k_warn(ab, "rxdma error with invalid buf_id %d\n", 4242 buf_id); 4243 spin_unlock_bh(&rx_ring->idr_lock); 4244 continue; 4245 } 4246 4247 idr_remove(&rx_ring->bufs_idr, buf_id); 4248 spin_unlock_bh(&rx_ring->idr_lock); 4249 4250 rxcb = ATH11K_SKB_RXCB(skb); 4251 dma_unmap_single(ab->dev, rxcb->paddr, 4252 skb->len + skb_tailroom(skb), 4253 DMA_FROM_DEVICE); 4254 dev_kfree_skb_any(skb); 4255 4256 num_buf_freed++; 4257 } 4258 4259 ath11k_dp_rx_link_desc_return(ab, desc, 4260 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 4261 } 4262 4263 ath11k_hal_srng_access_end(ab, srng); 4264 4265 spin_unlock_bh(&srng->lock); 4266 4267 if (num_buf_freed) 4268 ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed, 4269 ab->hw_params.hal_params->rx_buf_rbm); 4270 4271 return budget - quota; 4272 } 4273 4274 void ath11k_dp_process_reo_status(struct ath11k_base *ab) 4275 { 4276 struct ath11k_dp *dp = &ab->dp; 4277 struct hal_srng *srng; 4278 struct dp_reo_cmd *cmd, *tmp; 4279 bool found = false; 4280 u32 *reo_desc; 4281 u16 tag; 4282 struct hal_reo_status reo_status; 4283 4284 srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id]; 4285 4286 memset(&reo_status, 0, sizeof(reo_status)); 4287 4288 spin_lock_bh(&srng->lock); 4289 4290 ath11k_hal_srng_access_begin(ab, srng); 4291 4292 while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 4293 tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc); 4294 4295 switch (tag) { 4296 case HAL_REO_GET_QUEUE_STATS_STATUS: 4297 ath11k_hal_reo_status_queue_stats(ab, reo_desc, 4298 &reo_status); 4299 break; 4300 case HAL_REO_FLUSH_QUEUE_STATUS: 4301 ath11k_hal_reo_flush_queue_status(ab, reo_desc, 4302 &reo_status); 4303 break; 4304 case HAL_REO_FLUSH_CACHE_STATUS: 4305 ath11k_hal_reo_flush_cache_status(ab, reo_desc, 4306 &reo_status); 4307 break; 4308 case HAL_REO_UNBLOCK_CACHE_STATUS: 4309 ath11k_hal_reo_unblk_cache_status(ab, reo_desc, 4310 &reo_status); 4311 break; 4312 case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS: 4313 ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc, 4314 &reo_status); 4315 break; 4316 case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS: 4317 ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc, 4318 &reo_status); 4319 break; 4320 case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS: 4321 ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc, 4322 &reo_status); 4323 break; 4324 default: 4325 ath11k_warn(ab, "Unknown reo status type %d\n", tag); 4326 continue; 4327 } 4328 4329 spin_lock_bh(&dp->reo_cmd_lock); 4330 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 4331 if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) { 4332 found = true; 4333 list_del(&cmd->list); 4334 break; 4335 } 4336 } 4337 spin_unlock_bh(&dp->reo_cmd_lock); 4338 4339 if (found) { 4340 cmd->handler(dp, (void *)&cmd->data, 4341 reo_status.uniform_hdr.cmd_status); 4342 kfree(cmd); 4343 } 4344 4345 found = false; 4346 } 4347 4348 ath11k_hal_srng_access_end(ab, srng); 4349 4350 spin_unlock_bh(&srng->lock); 4351 } 4352 4353 void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id) 4354 { 4355 struct ath11k *ar = ab->pdevs[mac_id].ar; 4356 4357 ath11k_dp_rx_pdev_srng_free(ar); 4358 ath11k_dp_rxdma_pdev_buf_free(ar); 4359 } 4360 4361 int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id) 4362 { 4363 struct ath11k *ar = ab->pdevs[mac_id].ar; 4364 struct ath11k_pdev_dp *dp = &ar->dp; 4365 u32 ring_id; 4366 int i; 4367 int ret; 4368 4369 ret = ath11k_dp_rx_pdev_srng_alloc(ar); 4370 if (ret) { 4371 ath11k_warn(ab, "failed to setup rx srngs\n"); 4372 return ret; 4373 } 4374 4375 ret = ath11k_dp_rxdma_pdev_buf_setup(ar); 4376 if (ret) { 4377 ath11k_warn(ab, "failed to setup rxdma ring\n"); 4378 return ret; 4379 } 4380 4381 ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; 4382 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF); 4383 if (ret) { 4384 ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n", 4385 ret); 4386 return ret; 4387 } 4388 4389 if (ab->hw_params.rx_mac_buf_ring) { 4390 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 4391 ring_id = dp->rx_mac_buf_ring[i].ring_id; 4392 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, 4393 mac_id + i, HAL_RXDMA_BUF); 4394 if (ret) { 4395 ath11k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n", 4396 i, ret); 4397 return ret; 4398 } 4399 } 4400 } 4401 4402 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 4403 ring_id = dp->rxdma_err_dst_ring[i].ring_id; 4404 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, 4405 mac_id + i, HAL_RXDMA_DST); 4406 if (ret) { 4407 ath11k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n", 4408 i, ret); 4409 return ret; 4410 } 4411 } 4412 4413 if (!ab->hw_params.rxdma1_enable) 4414 goto config_refill_ring; 4415 4416 ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id; 4417 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, 4418 mac_id, HAL_RXDMA_MONITOR_BUF); 4419 if (ret) { 4420 ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n", 4421 ret); 4422 return ret; 4423 } 4424 ret = ath11k_dp_tx_htt_srng_setup(ab, 4425 dp->rxdma_mon_dst_ring.ring_id, 4426 mac_id, HAL_RXDMA_MONITOR_DST); 4427 if (ret) { 4428 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n", 4429 ret); 4430 return ret; 4431 } 4432 ret = ath11k_dp_tx_htt_srng_setup(ab, 4433 dp->rxdma_mon_desc_ring.ring_id, 4434 mac_id, HAL_RXDMA_MONITOR_DESC); 4435 if (ret) { 4436 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n", 4437 ret); 4438 return ret; 4439 } 4440 4441 config_refill_ring: 4442 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 4443 ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; 4444 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i, 4445 HAL_RXDMA_MONITOR_STATUS); 4446 if (ret) { 4447 ath11k_warn(ab, 4448 "failed to configure mon_status_refill_ring%d %d\n", 4449 i, ret); 4450 return ret; 4451 } 4452 } 4453 4454 return 0; 4455 } 4456 4457 static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len) 4458 { 4459 if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) { 4460 *frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc); 4461 *total_len -= *frag_len; 4462 } else { 4463 *frag_len = *total_len; 4464 *total_len = 0; 4465 } 4466 } 4467 4468 static 4469 int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar, 4470 void *p_last_buf_addr_info, 4471 u8 mac_id) 4472 { 4473 struct ath11k_pdev_dp *dp = &ar->dp; 4474 struct dp_srng *dp_srng; 4475 void *hal_srng; 4476 void *src_srng_desc; 4477 int ret = 0; 4478 4479 if (ar->ab->hw_params.rxdma1_enable) { 4480 dp_srng = &dp->rxdma_mon_desc_ring; 4481 hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id]; 4482 } else { 4483 dp_srng = &ar->ab->dp.wbm_desc_rel_ring; 4484 hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id]; 4485 } 4486 4487 ath11k_hal_srng_access_begin(ar->ab, hal_srng); 4488 4489 src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng); 4490 4491 if (src_srng_desc) { 4492 struct ath11k_buffer_addr *src_desc = 4493 (struct ath11k_buffer_addr *)src_srng_desc; 4494 4495 *src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info); 4496 } else { 4497 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4498 "Monitor Link Desc Ring %d Full", mac_id); 4499 ret = -ENOMEM; 4500 } 4501 4502 ath11k_hal_srng_access_end(ar->ab, hal_srng); 4503 return ret; 4504 } 4505 4506 static 4507 void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc, 4508 dma_addr_t *paddr, u32 *sw_cookie, 4509 u8 *rbm, 4510 void **pp_buf_addr_info) 4511 { 4512 struct hal_rx_msdu_link *msdu_link = 4513 (struct hal_rx_msdu_link *)rx_msdu_link_desc; 4514 struct ath11k_buffer_addr *buf_addr_info; 4515 4516 buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info; 4517 4518 ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm); 4519 4520 *pp_buf_addr_info = (void *)buf_addr_info; 4521 } 4522 4523 static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len) 4524 { 4525 if (skb->len > len) { 4526 skb_trim(skb, len); 4527 } else { 4528 if (skb_tailroom(skb) < len - skb->len) { 4529 if ((pskb_expand_head(skb, 0, 4530 len - skb->len - skb_tailroom(skb), 4531 GFP_ATOMIC))) { 4532 dev_kfree_skb_any(skb); 4533 return -ENOMEM; 4534 } 4535 } 4536 skb_put(skb, (len - skb->len)); 4537 } 4538 return 0; 4539 } 4540 4541 static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar, 4542 void *msdu_link_desc, 4543 struct hal_rx_msdu_list *msdu_list, 4544 u16 *num_msdus) 4545 { 4546 struct hal_rx_msdu_details *msdu_details = NULL; 4547 struct rx_msdu_desc *msdu_desc_info = NULL; 4548 struct hal_rx_msdu_link *msdu_link = NULL; 4549 int i; 4550 u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1); 4551 u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1); 4552 u8 tmp = 0; 4553 4554 msdu_link = (struct hal_rx_msdu_link *)msdu_link_desc; 4555 msdu_details = &msdu_link->msdu_link[0]; 4556 4557 for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) { 4558 if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR, 4559 msdu_details[i].buf_addr_info.info0) == 0) { 4560 msdu_desc_info = &msdu_details[i - 1].rx_msdu_info; 4561 msdu_desc_info->info0 |= last; 4562 ; 4563 break; 4564 } 4565 msdu_desc_info = &msdu_details[i].rx_msdu_info; 4566 4567 if (!i) 4568 msdu_desc_info->info0 |= first; 4569 else if (i == (HAL_RX_NUM_MSDU_DESC - 1)) 4570 msdu_desc_info->info0 |= last; 4571 msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0; 4572 msdu_list->msdu_info[i].msdu_len = 4573 HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0); 4574 msdu_list->sw_cookie[i] = 4575 FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, 4576 msdu_details[i].buf_addr_info.info1); 4577 tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR, 4578 msdu_details[i].buf_addr_info.info1); 4579 msdu_list->rbm[i] = tmp; 4580 } 4581 *num_msdus = i; 4582 } 4583 4584 static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id, 4585 u32 *rx_bufs_used) 4586 { 4587 u32 ret = 0; 4588 4589 if ((*ppdu_id < msdu_ppdu_id) && 4590 ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) { 4591 *ppdu_id = msdu_ppdu_id; 4592 ret = msdu_ppdu_id; 4593 } else if ((*ppdu_id > msdu_ppdu_id) && 4594 ((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) { 4595 /* mon_dst is behind than mon_status 4596 * skip dst_ring and free it 4597 */ 4598 *rx_bufs_used += 1; 4599 *ppdu_id = msdu_ppdu_id; 4600 ret = msdu_ppdu_id; 4601 } 4602 return ret; 4603 } 4604 4605 static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info, 4606 bool *is_frag, u32 *total_len, 4607 u32 *frag_len, u32 *msdu_cnt) 4608 { 4609 if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) { 4610 if (!*is_frag) { 4611 *total_len = info->msdu_len; 4612 *is_frag = true; 4613 } 4614 ath11k_dp_mon_set_frag_len(total_len, 4615 frag_len); 4616 } else { 4617 if (*is_frag) { 4618 ath11k_dp_mon_set_frag_len(total_len, 4619 frag_len); 4620 } else { 4621 *frag_len = info->msdu_len; 4622 } 4623 *is_frag = false; 4624 *msdu_cnt -= 1; 4625 } 4626 } 4627 4628 static u32 4629 ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id, 4630 void *ring_entry, struct sk_buff **head_msdu, 4631 struct sk_buff **tail_msdu, u32 *npackets, 4632 u32 *ppdu_id) 4633 { 4634 struct ath11k_pdev_dp *dp = &ar->dp; 4635 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4636 struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring; 4637 struct sk_buff *msdu = NULL, *last = NULL; 4638 struct hal_rx_msdu_list msdu_list; 4639 void *p_buf_addr_info, *p_last_buf_addr_info; 4640 struct hal_rx_desc *rx_desc; 4641 void *rx_msdu_link_desc; 4642 dma_addr_t paddr; 4643 u16 num_msdus = 0; 4644 u32 rx_buf_size, rx_pkt_offset, sw_cookie; 4645 u32 rx_bufs_used = 0, i = 0; 4646 u32 msdu_ppdu_id = 0, msdu_cnt = 0; 4647 u32 total_len = 0, frag_len = 0; 4648 bool is_frag, is_first_msdu; 4649 bool drop_mpdu = false; 4650 struct ath11k_skb_rxcb *rxcb; 4651 struct hal_reo_entrance_ring *ent_desc = 4652 (struct hal_reo_entrance_ring *)ring_entry; 4653 int buf_id; 4654 u32 rx_link_buf_info[2]; 4655 u8 rbm; 4656 4657 if (!ar->ab->hw_params.rxdma1_enable) 4658 rx_ring = &dp->rx_refill_buf_ring; 4659 4660 ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr, 4661 &sw_cookie, 4662 &p_last_buf_addr_info, &rbm, 4663 &msdu_cnt); 4664 4665 if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON, 4666 ent_desc->info1) == 4667 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { 4668 u8 rxdma_err = 4669 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE, 4670 ent_desc->info1); 4671 if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR || 4672 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR || 4673 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) { 4674 drop_mpdu = true; 4675 pmon->rx_mon_stats.dest_mpdu_drop++; 4676 } 4677 } 4678 4679 is_frag = false; 4680 is_first_msdu = true; 4681 4682 do { 4683 if (pmon->mon_last_linkdesc_paddr == paddr) { 4684 pmon->rx_mon_stats.dup_mon_linkdesc_cnt++; 4685 return rx_bufs_used; 4686 } 4687 4688 if (ar->ab->hw_params.rxdma1_enable) 4689 rx_msdu_link_desc = 4690 (void *)pmon->link_desc_banks[sw_cookie].vaddr + 4691 (paddr - pmon->link_desc_banks[sw_cookie].paddr); 4692 else 4693 rx_msdu_link_desc = 4694 (void *)ar->ab->dp.link_desc_banks[sw_cookie].vaddr + 4695 (paddr - ar->ab->dp.link_desc_banks[sw_cookie].paddr); 4696 4697 ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list, 4698 &num_msdus); 4699 4700 for (i = 0; i < num_msdus; i++) { 4701 u32 l2_hdr_offset; 4702 4703 if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) { 4704 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4705 "i %d last_cookie %d is same\n", 4706 i, pmon->mon_last_buf_cookie); 4707 drop_mpdu = true; 4708 pmon->rx_mon_stats.dup_mon_buf_cnt++; 4709 continue; 4710 } 4711 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 4712 msdu_list.sw_cookie[i]); 4713 4714 spin_lock_bh(&rx_ring->idr_lock); 4715 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 4716 spin_unlock_bh(&rx_ring->idr_lock); 4717 if (!msdu) { 4718 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4719 "msdu_pop: invalid buf_id %d\n", buf_id); 4720 break; 4721 } 4722 rxcb = ATH11K_SKB_RXCB(msdu); 4723 if (!rxcb->unmapped) { 4724 dma_unmap_single(ar->ab->dev, rxcb->paddr, 4725 msdu->len + 4726 skb_tailroom(msdu), 4727 DMA_FROM_DEVICE); 4728 rxcb->unmapped = 1; 4729 } 4730 if (drop_mpdu) { 4731 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4732 "i %d drop msdu %p *ppdu_id %x\n", 4733 i, msdu, *ppdu_id); 4734 dev_kfree_skb_any(msdu); 4735 msdu = NULL; 4736 goto next_msdu; 4737 } 4738 4739 rx_desc = (struct hal_rx_desc *)msdu->data; 4740 4741 rx_pkt_offset = sizeof(struct hal_rx_desc); 4742 l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc); 4743 4744 if (is_first_msdu) { 4745 if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) { 4746 drop_mpdu = true; 4747 dev_kfree_skb_any(msdu); 4748 msdu = NULL; 4749 pmon->mon_last_linkdesc_paddr = paddr; 4750 goto next_msdu; 4751 } 4752 4753 msdu_ppdu_id = 4754 ath11k_dp_rxdesc_get_ppduid(ar->ab, rx_desc); 4755 4756 if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id, 4757 ppdu_id, 4758 &rx_bufs_used)) { 4759 if (rx_bufs_used) { 4760 drop_mpdu = true; 4761 dev_kfree_skb_any(msdu); 4762 msdu = NULL; 4763 goto next_msdu; 4764 } 4765 return rx_bufs_used; 4766 } 4767 pmon->mon_last_linkdesc_paddr = paddr; 4768 is_first_msdu = false; 4769 } 4770 ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i], 4771 &is_frag, &total_len, 4772 &frag_len, &msdu_cnt); 4773 rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len; 4774 4775 ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size); 4776 4777 if (!(*head_msdu)) 4778 *head_msdu = msdu; 4779 else if (last) 4780 last->next = msdu; 4781 4782 last = msdu; 4783 next_msdu: 4784 pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i]; 4785 rx_bufs_used++; 4786 spin_lock_bh(&rx_ring->idr_lock); 4787 idr_remove(&rx_ring->bufs_idr, buf_id); 4788 spin_unlock_bh(&rx_ring->idr_lock); 4789 } 4790 4791 ath11k_hal_rx_buf_addr_info_set(rx_link_buf_info, paddr, sw_cookie, rbm); 4792 4793 ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr, 4794 &sw_cookie, &rbm, 4795 &p_buf_addr_info); 4796 4797 if (ar->ab->hw_params.rxdma1_enable) { 4798 if (ath11k_dp_rx_monitor_link_desc_return(ar, 4799 p_last_buf_addr_info, 4800 dp->mac_id)) 4801 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4802 "dp_rx_monitor_link_desc_return failed"); 4803 } else { 4804 ath11k_dp_rx_link_desc_return(ar->ab, rx_link_buf_info, 4805 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 4806 } 4807 4808 p_last_buf_addr_info = p_buf_addr_info; 4809 4810 } while (paddr && msdu_cnt); 4811 4812 if (last) 4813 last->next = NULL; 4814 4815 *tail_msdu = msdu; 4816 4817 if (msdu_cnt == 0) 4818 *npackets = 1; 4819 4820 return rx_bufs_used; 4821 } 4822 4823 static void ath11k_dp_rx_msdus_set_payload(struct ath11k *ar, struct sk_buff *msdu) 4824 { 4825 u32 rx_pkt_offset, l2_hdr_offset; 4826 4827 rx_pkt_offset = ar->ab->hw_params.hal_desc_sz; 4828 l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, 4829 (struct hal_rx_desc *)msdu->data); 4830 skb_pull(msdu, rx_pkt_offset + l2_hdr_offset); 4831 } 4832 4833 static struct sk_buff * 4834 ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, 4835 u32 mac_id, struct sk_buff *head_msdu, 4836 struct sk_buff *last_msdu, 4837 struct ieee80211_rx_status *rxs, bool *fcs_err) 4838 { 4839 struct ath11k_base *ab = ar->ab; 4840 struct sk_buff *msdu, *prev_buf; 4841 struct hal_rx_desc *rx_desc; 4842 char *hdr_desc; 4843 u8 *dest, decap_format; 4844 struct ieee80211_hdr_3addr *wh; 4845 struct rx_attention *rx_attention; 4846 u32 err_bitmap; 4847 4848 if (!head_msdu) 4849 goto err_merge_fail; 4850 4851 rx_desc = (struct hal_rx_desc *)head_msdu->data; 4852 rx_attention = ath11k_dp_rx_get_attention(ab, rx_desc); 4853 err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention); 4854 4855 if (err_bitmap & DP_RX_MPDU_ERR_FCS) 4856 *fcs_err = true; 4857 4858 if (ath11k_dp_rxdesc_get_mpdulen_err(rx_attention)) 4859 return NULL; 4860 4861 decap_format = ath11k_dp_rx_h_msdu_start_decap_type(ab, rx_desc); 4862 4863 ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs); 4864 4865 if (decap_format == DP_RX_DECAP_TYPE_RAW) { 4866 ath11k_dp_rx_msdus_set_payload(ar, head_msdu); 4867 4868 prev_buf = head_msdu; 4869 msdu = head_msdu->next; 4870 4871 while (msdu) { 4872 ath11k_dp_rx_msdus_set_payload(ar, msdu); 4873 4874 prev_buf = msdu; 4875 msdu = msdu->next; 4876 } 4877 4878 prev_buf->next = NULL; 4879 4880 skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN); 4881 } else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) { 4882 u8 qos_pkt = 0; 4883 4884 rx_desc = (struct hal_rx_desc *)head_msdu->data; 4885 hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc); 4886 4887 /* Base size */ 4888 wh = (struct ieee80211_hdr_3addr *)hdr_desc; 4889 4890 if (ieee80211_is_data_qos(wh->frame_control)) 4891 qos_pkt = 1; 4892 4893 msdu = head_msdu; 4894 4895 while (msdu) { 4896 ath11k_dp_rx_msdus_set_payload(ar, msdu); 4897 if (qos_pkt) { 4898 dest = skb_push(msdu, sizeof(__le16)); 4899 if (!dest) 4900 goto err_merge_fail; 4901 memcpy(dest, hdr_desc, sizeof(struct ieee80211_qos_hdr)); 4902 } 4903 prev_buf = msdu; 4904 msdu = msdu->next; 4905 } 4906 dest = skb_put(prev_buf, HAL_RX_FCS_LEN); 4907 if (!dest) 4908 goto err_merge_fail; 4909 4910 ath11k_dbg(ab, ATH11K_DBG_DATA, 4911 "mpdu_buf %p mpdu_buf->len %u", 4912 prev_buf, prev_buf->len); 4913 } else { 4914 ath11k_dbg(ab, ATH11K_DBG_DATA, 4915 "decap format %d is not supported!\n", 4916 decap_format); 4917 goto err_merge_fail; 4918 } 4919 4920 return head_msdu; 4921 4922 err_merge_fail: 4923 return NULL; 4924 } 4925 4926 static void 4927 ath11k_dp_rx_update_radiotap_he(struct hal_rx_mon_ppdu_info *rx_status, 4928 u8 *rtap_buf) 4929 { 4930 u32 rtap_len = 0; 4931 4932 put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]); 4933 rtap_len += 2; 4934 4935 put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]); 4936 rtap_len += 2; 4937 4938 put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]); 4939 rtap_len += 2; 4940 4941 put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]); 4942 rtap_len += 2; 4943 4944 put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]); 4945 rtap_len += 2; 4946 4947 put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]); 4948 } 4949 4950 static void 4951 ath11k_dp_rx_update_radiotap_he_mu(struct hal_rx_mon_ppdu_info *rx_status, 4952 u8 *rtap_buf) 4953 { 4954 u32 rtap_len = 0; 4955 4956 put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]); 4957 rtap_len += 2; 4958 4959 put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]); 4960 rtap_len += 2; 4961 4962 rtap_buf[rtap_len] = rx_status->he_RU[0]; 4963 rtap_len += 1; 4964 4965 rtap_buf[rtap_len] = rx_status->he_RU[1]; 4966 rtap_len += 1; 4967 4968 rtap_buf[rtap_len] = rx_status->he_RU[2]; 4969 rtap_len += 1; 4970 4971 rtap_buf[rtap_len] = rx_status->he_RU[3]; 4972 } 4973 4974 static void ath11k_update_radiotap(struct ath11k *ar, 4975 struct hal_rx_mon_ppdu_info *ppduinfo, 4976 struct sk_buff *mon_skb, 4977 struct ieee80211_rx_status *rxs) 4978 { 4979 struct ieee80211_supported_band *sband; 4980 u8 *ptr = NULL; 4981 4982 rxs->flag |= RX_FLAG_MACTIME_START; 4983 rxs->signal = ppduinfo->rssi_comb + ATH11K_DEFAULT_NOISE_FLOOR; 4984 4985 if (ppduinfo->nss) 4986 rxs->nss = ppduinfo->nss; 4987 4988 if (ppduinfo->he_mu_flags) { 4989 rxs->flag |= RX_FLAG_RADIOTAP_HE_MU; 4990 rxs->encoding = RX_ENC_HE; 4991 ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he_mu)); 4992 ath11k_dp_rx_update_radiotap_he_mu(ppduinfo, ptr); 4993 } else if (ppduinfo->he_flags) { 4994 rxs->flag |= RX_FLAG_RADIOTAP_HE; 4995 rxs->encoding = RX_ENC_HE; 4996 ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he)); 4997 ath11k_dp_rx_update_radiotap_he(ppduinfo, ptr); 4998 rxs->rate_idx = ppduinfo->rate; 4999 } else if (ppduinfo->vht_flags) { 5000 rxs->encoding = RX_ENC_VHT; 5001 rxs->rate_idx = ppduinfo->rate; 5002 } else if (ppduinfo->ht_flags) { 5003 rxs->encoding = RX_ENC_HT; 5004 rxs->rate_idx = ppduinfo->rate; 5005 } else { 5006 rxs->encoding = RX_ENC_LEGACY; 5007 sband = &ar->mac.sbands[rxs->band]; 5008 rxs->rate_idx = ath11k_mac_hw_rate_to_idx(sband, ppduinfo->rate, 5009 ppduinfo->cck_flag); 5010 } 5011 5012 rxs->mactime = ppduinfo->tsft; 5013 } 5014 5015 static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id, 5016 struct sk_buff *head_msdu, 5017 struct hal_rx_mon_ppdu_info *ppduinfo, 5018 struct sk_buff *tail_msdu, 5019 struct napi_struct *napi) 5020 { 5021 struct ath11k_pdev_dp *dp = &ar->dp; 5022 struct sk_buff *mon_skb, *skb_next, *header; 5023 struct ieee80211_rx_status *rxs = &dp->rx_status; 5024 bool fcs_err = false; 5025 5026 mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu, 5027 tail_msdu, rxs, &fcs_err); 5028 5029 if (!mon_skb) 5030 goto mon_deliver_fail; 5031 5032 header = mon_skb; 5033 5034 rxs->flag = 0; 5035 5036 if (fcs_err) 5037 rxs->flag = RX_FLAG_FAILED_FCS_CRC; 5038 5039 do { 5040 skb_next = mon_skb->next; 5041 if (!skb_next) 5042 rxs->flag &= ~RX_FLAG_AMSDU_MORE; 5043 else 5044 rxs->flag |= RX_FLAG_AMSDU_MORE; 5045 5046 if (mon_skb == header) { 5047 header = NULL; 5048 rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN; 5049 } else { 5050 rxs->flag |= RX_FLAG_ALLOW_SAME_PN; 5051 } 5052 rxs->flag |= RX_FLAG_ONLY_MONITOR; 5053 ath11k_update_radiotap(ar, ppduinfo, mon_skb, rxs); 5054 5055 ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb, rxs); 5056 mon_skb = skb_next; 5057 } while (mon_skb); 5058 rxs->flag = 0; 5059 5060 return 0; 5061 5062 mon_deliver_fail: 5063 mon_skb = head_msdu; 5064 while (mon_skb) { 5065 skb_next = mon_skb->next; 5066 dev_kfree_skb_any(mon_skb); 5067 mon_skb = skb_next; 5068 } 5069 return -EINVAL; 5070 } 5071 5072 /* The destination ring processing is stuck if the destination is not 5073 * moving while status ring moves 16 PPDU. The destination ring processing 5074 * skips this destination ring PPDU as a workaround. 5075 */ 5076 #define MON_DEST_RING_STUCK_MAX_CNT 16 5077 5078 static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id, 5079 u32 quota, struct napi_struct *napi) 5080 { 5081 struct ath11k_pdev_dp *dp = &ar->dp; 5082 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 5083 const struct ath11k_hw_hal_params *hal_params; 5084 void *ring_entry; 5085 void *mon_dst_srng; 5086 u32 ppdu_id; 5087 u32 rx_bufs_used; 5088 u32 ring_id; 5089 struct ath11k_pdev_mon_stats *rx_mon_stats; 5090 u32 npackets = 0; 5091 u32 mpdu_rx_bufs_used; 5092 5093 if (ar->ab->hw_params.rxdma1_enable) 5094 ring_id = dp->rxdma_mon_dst_ring.ring_id; 5095 else 5096 ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id; 5097 5098 mon_dst_srng = &ar->ab->hal.srng_list[ring_id]; 5099 5100 if (!mon_dst_srng) { 5101 ath11k_warn(ar->ab, 5102 "HAL Monitor Destination Ring Init Failed -- %p", 5103 mon_dst_srng); 5104 return; 5105 } 5106 5107 spin_lock_bh(&pmon->mon_lock); 5108 5109 ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng); 5110 5111 ppdu_id = pmon->mon_ppdu_info.ppdu_id; 5112 rx_bufs_used = 0; 5113 rx_mon_stats = &pmon->rx_mon_stats; 5114 5115 while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) { 5116 struct sk_buff *head_msdu, *tail_msdu; 5117 5118 head_msdu = NULL; 5119 tail_msdu = NULL; 5120 5121 mpdu_rx_bufs_used = ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry, 5122 &head_msdu, 5123 &tail_msdu, 5124 &npackets, &ppdu_id); 5125 5126 rx_bufs_used += mpdu_rx_bufs_used; 5127 5128 if (mpdu_rx_bufs_used) { 5129 dp->mon_dest_ring_stuck_cnt = 0; 5130 } else { 5131 dp->mon_dest_ring_stuck_cnt++; 5132 rx_mon_stats->dest_mon_not_reaped++; 5133 } 5134 5135 if (dp->mon_dest_ring_stuck_cnt > MON_DEST_RING_STUCK_MAX_CNT) { 5136 rx_mon_stats->dest_mon_stuck++; 5137 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 5138 "status ring ppdu_id=%d dest ring ppdu_id=%d mon_dest_ring_stuck_cnt=%d dest_mon_not_reaped=%u dest_mon_stuck=%u\n", 5139 pmon->mon_ppdu_info.ppdu_id, ppdu_id, 5140 dp->mon_dest_ring_stuck_cnt, 5141 rx_mon_stats->dest_mon_not_reaped, 5142 rx_mon_stats->dest_mon_stuck); 5143 pmon->mon_ppdu_info.ppdu_id = ppdu_id; 5144 continue; 5145 } 5146 5147 if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) { 5148 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 5149 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 5150 "dest_rx: new ppdu_id %x != status ppdu_id %x dest_mon_not_reaped = %u dest_mon_stuck = %u\n", 5151 ppdu_id, pmon->mon_ppdu_info.ppdu_id, 5152 rx_mon_stats->dest_mon_not_reaped, 5153 rx_mon_stats->dest_mon_stuck); 5154 break; 5155 } 5156 if (head_msdu && tail_msdu) { 5157 ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu, 5158 &pmon->mon_ppdu_info, 5159 tail_msdu, napi); 5160 rx_mon_stats->dest_mpdu_done++; 5161 } 5162 5163 ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab, 5164 mon_dst_srng); 5165 } 5166 ath11k_hal_srng_access_end(ar->ab, mon_dst_srng); 5167 5168 spin_unlock_bh(&pmon->mon_lock); 5169 5170 if (rx_bufs_used) { 5171 rx_mon_stats->dest_ppdu_done++; 5172 hal_params = ar->ab->hw_params.hal_params; 5173 5174 if (ar->ab->hw_params.rxdma1_enable) 5175 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, 5176 &dp->rxdma_mon_buf_ring, 5177 rx_bufs_used, 5178 hal_params->rx_buf_rbm); 5179 else 5180 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, 5181 &dp->rx_refill_buf_ring, 5182 rx_bufs_used, 5183 hal_params->rx_buf_rbm); 5184 } 5185 } 5186 5187 int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id, 5188 struct napi_struct *napi, int budget) 5189 { 5190 struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id); 5191 enum hal_rx_mon_status hal_status; 5192 struct sk_buff *skb; 5193 struct sk_buff_head skb_list; 5194 struct ath11k_peer *peer; 5195 struct ath11k_sta *arsta; 5196 int num_buffs_reaped = 0; 5197 u32 rx_buf_sz; 5198 u16 log_type; 5199 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&ar->dp.mon_data; 5200 struct ath11k_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats; 5201 struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info; 5202 5203 __skb_queue_head_init(&skb_list); 5204 5205 num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget, 5206 &skb_list); 5207 if (!num_buffs_reaped) 5208 goto exit; 5209 5210 memset(ppdu_info, 0, sizeof(*ppdu_info)); 5211 ppdu_info->peer_id = HAL_INVALID_PEERID; 5212 5213 while ((skb = __skb_dequeue(&skb_list))) { 5214 if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) { 5215 log_type = ATH11K_PKTLOG_TYPE_LITE_RX; 5216 rx_buf_sz = DP_RX_BUFFER_SIZE_LITE; 5217 } else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) { 5218 log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF; 5219 rx_buf_sz = DP_RX_BUFFER_SIZE; 5220 } else { 5221 log_type = ATH11K_PKTLOG_TYPE_INVALID; 5222 rx_buf_sz = 0; 5223 } 5224 5225 if (log_type != ATH11K_PKTLOG_TYPE_INVALID) 5226 trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz); 5227 5228 memset(ppdu_info, 0, sizeof(*ppdu_info)); 5229 ppdu_info->peer_id = HAL_INVALID_PEERID; 5230 hal_status = ath11k_hal_rx_parse_mon_status(ab, ppdu_info, skb); 5231 5232 if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) && 5233 pmon->mon_ppdu_status == DP_PPDU_STATUS_START && 5234 hal_status == HAL_TLV_STATUS_PPDU_DONE) { 5235 rx_mon_stats->status_ppdu_done++; 5236 pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE; 5237 ath11k_dp_rx_mon_dest_process(ar, mac_id, budget, napi); 5238 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 5239 } 5240 5241 if (ppdu_info->peer_id == HAL_INVALID_PEERID || 5242 hal_status != HAL_RX_MON_STATUS_PPDU_DONE) { 5243 dev_kfree_skb_any(skb); 5244 continue; 5245 } 5246 5247 rcu_read_lock(); 5248 spin_lock_bh(&ab->base_lock); 5249 peer = ath11k_peer_find_by_id(ab, ppdu_info->peer_id); 5250 5251 if (!peer || !peer->sta) { 5252 ath11k_dbg(ab, ATH11K_DBG_DATA, 5253 "failed to find the peer with peer_id %d\n", 5254 ppdu_info->peer_id); 5255 goto next_skb; 5256 } 5257 5258 arsta = (struct ath11k_sta *)peer->sta->drv_priv; 5259 ath11k_dp_rx_update_peer_stats(arsta, ppdu_info); 5260 5261 if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr)) 5262 trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz); 5263 5264 next_skb: 5265 spin_unlock_bh(&ab->base_lock); 5266 rcu_read_unlock(); 5267 5268 dev_kfree_skb_any(skb); 5269 memset(ppdu_info, 0, sizeof(*ppdu_info)); 5270 ppdu_info->peer_id = HAL_INVALID_PEERID; 5271 } 5272 exit: 5273 return num_buffs_reaped; 5274 } 5275 5276 static u32 5277 ath11k_dp_rx_full_mon_mpdu_pop(struct ath11k *ar, 5278 void *ring_entry, struct sk_buff **head_msdu, 5279 struct sk_buff **tail_msdu, 5280 struct hal_sw_mon_ring_entries *sw_mon_entries) 5281 { 5282 struct ath11k_pdev_dp *dp = &ar->dp; 5283 struct ath11k_mon_data *pmon = &dp->mon_data; 5284 struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring; 5285 struct sk_buff *msdu = NULL, *last = NULL; 5286 struct hal_sw_monitor_ring *sw_desc = ring_entry; 5287 struct hal_rx_msdu_list msdu_list; 5288 struct hal_rx_desc *rx_desc; 5289 struct ath11k_skb_rxcb *rxcb; 5290 void *rx_msdu_link_desc; 5291 void *p_buf_addr_info, *p_last_buf_addr_info; 5292 int buf_id, i = 0; 5293 u32 rx_buf_size, rx_pkt_offset, l2_hdr_offset; 5294 u32 rx_bufs_used = 0, msdu_cnt = 0; 5295 u32 total_len = 0, frag_len = 0, sw_cookie; 5296 u16 num_msdus = 0; 5297 u8 rxdma_err, rbm; 5298 bool is_frag, is_first_msdu; 5299 bool drop_mpdu = false; 5300 5301 ath11k_hal_rx_sw_mon_ring_buf_paddr_get(ring_entry, sw_mon_entries); 5302 5303 sw_cookie = sw_mon_entries->mon_dst_sw_cookie; 5304 sw_mon_entries->end_of_ppdu = false; 5305 sw_mon_entries->drop_ppdu = false; 5306 p_last_buf_addr_info = sw_mon_entries->dst_buf_addr_info; 5307 msdu_cnt = sw_mon_entries->msdu_cnt; 5308 5309 sw_mon_entries->end_of_ppdu = 5310 FIELD_GET(HAL_SW_MON_RING_INFO0_END_OF_PPDU, sw_desc->info0); 5311 if (sw_mon_entries->end_of_ppdu) 5312 return rx_bufs_used; 5313 5314 if (FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_PUSH_REASON, 5315 sw_desc->info0) == 5316 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { 5317 rxdma_err = 5318 FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_ERROR_CODE, 5319 sw_desc->info0); 5320 if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR || 5321 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR || 5322 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) { 5323 pmon->rx_mon_stats.dest_mpdu_drop++; 5324 drop_mpdu = true; 5325 } 5326 } 5327 5328 is_frag = false; 5329 is_first_msdu = true; 5330 5331 do { 5332 rx_msdu_link_desc = 5333 (u8 *)pmon->link_desc_banks[sw_cookie].vaddr + 5334 (sw_mon_entries->mon_dst_paddr - 5335 pmon->link_desc_banks[sw_cookie].paddr); 5336 5337 ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list, 5338 &num_msdus); 5339 5340 for (i = 0; i < num_msdus; i++) { 5341 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 5342 msdu_list.sw_cookie[i]); 5343 5344 spin_lock_bh(&rx_ring->idr_lock); 5345 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 5346 if (!msdu) { 5347 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 5348 "full mon msdu_pop: invalid buf_id %d\n", 5349 buf_id); 5350 spin_unlock_bh(&rx_ring->idr_lock); 5351 break; 5352 } 5353 idr_remove(&rx_ring->bufs_idr, buf_id); 5354 spin_unlock_bh(&rx_ring->idr_lock); 5355 5356 rxcb = ATH11K_SKB_RXCB(msdu); 5357 if (!rxcb->unmapped) { 5358 dma_unmap_single(ar->ab->dev, rxcb->paddr, 5359 msdu->len + 5360 skb_tailroom(msdu), 5361 DMA_FROM_DEVICE); 5362 rxcb->unmapped = 1; 5363 } 5364 if (drop_mpdu) { 5365 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 5366 "full mon: i %d drop msdu %p *ppdu_id %x\n", 5367 i, msdu, sw_mon_entries->ppdu_id); 5368 dev_kfree_skb_any(msdu); 5369 msdu_cnt--; 5370 goto next_msdu; 5371 } 5372 5373 rx_desc = (struct hal_rx_desc *)msdu->data; 5374 5375 rx_pkt_offset = sizeof(struct hal_rx_desc); 5376 l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc); 5377 5378 if (is_first_msdu) { 5379 if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) { 5380 drop_mpdu = true; 5381 dev_kfree_skb_any(msdu); 5382 msdu = NULL; 5383 goto next_msdu; 5384 } 5385 is_first_msdu = false; 5386 } 5387 5388 ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i], 5389 &is_frag, &total_len, 5390 &frag_len, &msdu_cnt); 5391 5392 rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len; 5393 5394 ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size); 5395 5396 if (!(*head_msdu)) 5397 *head_msdu = msdu; 5398 else if (last) 5399 last->next = msdu; 5400 5401 last = msdu; 5402 next_msdu: 5403 rx_bufs_used++; 5404 } 5405 5406 ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, 5407 &sw_mon_entries->mon_dst_paddr, 5408 &sw_mon_entries->mon_dst_sw_cookie, 5409 &rbm, 5410 &p_buf_addr_info); 5411 5412 if (ath11k_dp_rx_monitor_link_desc_return(ar, 5413 p_last_buf_addr_info, 5414 dp->mac_id)) 5415 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 5416 "full mon: dp_rx_monitor_link_desc_return failed\n"); 5417 5418 p_last_buf_addr_info = p_buf_addr_info; 5419 5420 } while (sw_mon_entries->mon_dst_paddr && msdu_cnt); 5421 5422 if (last) 5423 last->next = NULL; 5424 5425 *tail_msdu = msdu; 5426 5427 return rx_bufs_used; 5428 } 5429 5430 static int ath11k_dp_rx_full_mon_prepare_mpdu(struct ath11k_dp *dp, 5431 struct dp_full_mon_mpdu *mon_mpdu, 5432 struct sk_buff *head, 5433 struct sk_buff *tail) 5434 { 5435 mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC); 5436 if (!mon_mpdu) 5437 return -ENOMEM; 5438 5439 list_add_tail(&mon_mpdu->list, &dp->dp_full_mon_mpdu_list); 5440 mon_mpdu->head = head; 5441 mon_mpdu->tail = tail; 5442 5443 return 0; 5444 } 5445 5446 static void ath11k_dp_rx_full_mon_drop_ppdu(struct ath11k_dp *dp, 5447 struct dp_full_mon_mpdu *mon_mpdu) 5448 { 5449 struct dp_full_mon_mpdu *tmp; 5450 struct sk_buff *tmp_msdu, *skb_next; 5451 5452 if (list_empty(&dp->dp_full_mon_mpdu_list)) 5453 return; 5454 5455 list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) { 5456 list_del(&mon_mpdu->list); 5457 5458 tmp_msdu = mon_mpdu->head; 5459 while (tmp_msdu) { 5460 skb_next = tmp_msdu->next; 5461 dev_kfree_skb_any(tmp_msdu); 5462 tmp_msdu = skb_next; 5463 } 5464 5465 kfree(mon_mpdu); 5466 } 5467 } 5468 5469 static int ath11k_dp_rx_full_mon_deliver_ppdu(struct ath11k *ar, 5470 int mac_id, 5471 struct ath11k_mon_data *pmon, 5472 struct napi_struct *napi) 5473 { 5474 struct ath11k_pdev_mon_stats *rx_mon_stats; 5475 struct dp_full_mon_mpdu *tmp; 5476 struct dp_full_mon_mpdu *mon_mpdu = pmon->mon_mpdu; 5477 struct sk_buff *head_msdu, *tail_msdu; 5478 struct ath11k_base *ab = ar->ab; 5479 struct ath11k_dp *dp = &ab->dp; 5480 int ret; 5481 5482 rx_mon_stats = &pmon->rx_mon_stats; 5483 5484 list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) { 5485 list_del(&mon_mpdu->list); 5486 head_msdu = mon_mpdu->head; 5487 tail_msdu = mon_mpdu->tail; 5488 if (head_msdu && tail_msdu) { 5489 ret = ath11k_dp_rx_mon_deliver(ar, mac_id, head_msdu, 5490 &pmon->mon_ppdu_info, 5491 tail_msdu, napi); 5492 rx_mon_stats->dest_mpdu_done++; 5493 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "full mon: deliver ppdu\n"); 5494 } 5495 kfree(mon_mpdu); 5496 } 5497 5498 return ret; 5499 } 5500 5501 static int 5502 ath11k_dp_rx_process_full_mon_status_ring(struct ath11k_base *ab, int mac_id, 5503 struct napi_struct *napi, int budget) 5504 { 5505 struct ath11k *ar = ab->pdevs[mac_id].ar; 5506 struct ath11k_pdev_dp *dp = &ar->dp; 5507 struct ath11k_mon_data *pmon = &dp->mon_data; 5508 struct hal_sw_mon_ring_entries *sw_mon_entries; 5509 int quota = 0, work = 0, count; 5510 5511 sw_mon_entries = &pmon->sw_mon_entries; 5512 5513 while (pmon->hold_mon_dst_ring) { 5514 quota = ath11k_dp_rx_process_mon_status(ab, mac_id, 5515 napi, 1); 5516 if (pmon->buf_state == DP_MON_STATUS_MATCH) { 5517 count = sw_mon_entries->status_buf_count; 5518 if (count > 1) { 5519 quota += ath11k_dp_rx_process_mon_status(ab, mac_id, 5520 napi, count); 5521 } 5522 5523 ath11k_dp_rx_full_mon_deliver_ppdu(ar, dp->mac_id, 5524 pmon, napi); 5525 pmon->hold_mon_dst_ring = false; 5526 } else if (!pmon->mon_status_paddr || 5527 pmon->buf_state == DP_MON_STATUS_LEAD) { 5528 sw_mon_entries->drop_ppdu = true; 5529 pmon->hold_mon_dst_ring = false; 5530 } 5531 5532 if (!quota) 5533 break; 5534 5535 work += quota; 5536 } 5537 5538 if (sw_mon_entries->drop_ppdu) 5539 ath11k_dp_rx_full_mon_drop_ppdu(&ab->dp, pmon->mon_mpdu); 5540 5541 return work; 5542 } 5543 5544 static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id, 5545 struct napi_struct *napi, int budget) 5546 { 5547 struct ath11k *ar = ab->pdevs[mac_id].ar; 5548 struct ath11k_pdev_dp *dp = &ar->dp; 5549 struct ath11k_mon_data *pmon = &dp->mon_data; 5550 struct hal_sw_mon_ring_entries *sw_mon_entries; 5551 struct ath11k_pdev_mon_stats *rx_mon_stats; 5552 struct sk_buff *head_msdu, *tail_msdu; 5553 void *mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id]; 5554 void *ring_entry; 5555 u32 rx_bufs_used = 0, mpdu_rx_bufs_used; 5556 int quota = 0, ret; 5557 bool break_dst_ring = false; 5558 5559 spin_lock_bh(&pmon->mon_lock); 5560 5561 sw_mon_entries = &pmon->sw_mon_entries; 5562 rx_mon_stats = &pmon->rx_mon_stats; 5563 5564 if (pmon->hold_mon_dst_ring) { 5565 spin_unlock_bh(&pmon->mon_lock); 5566 goto reap_status_ring; 5567 } 5568 5569 ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng); 5570 while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) { 5571 head_msdu = NULL; 5572 tail_msdu = NULL; 5573 5574 mpdu_rx_bufs_used = ath11k_dp_rx_full_mon_mpdu_pop(ar, ring_entry, 5575 &head_msdu, 5576 &tail_msdu, 5577 sw_mon_entries); 5578 rx_bufs_used += mpdu_rx_bufs_used; 5579 5580 if (!sw_mon_entries->end_of_ppdu) { 5581 if (head_msdu) { 5582 ret = ath11k_dp_rx_full_mon_prepare_mpdu(&ab->dp, 5583 pmon->mon_mpdu, 5584 head_msdu, 5585 tail_msdu); 5586 if (ret) 5587 break_dst_ring = true; 5588 } 5589 5590 goto next_entry; 5591 } else { 5592 if (!sw_mon_entries->ppdu_id && 5593 !sw_mon_entries->mon_status_paddr) { 5594 break_dst_ring = true; 5595 goto next_entry; 5596 } 5597 } 5598 5599 rx_mon_stats->dest_ppdu_done++; 5600 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 5601 pmon->buf_state = DP_MON_STATUS_LAG; 5602 pmon->mon_status_paddr = sw_mon_entries->mon_status_paddr; 5603 pmon->hold_mon_dst_ring = true; 5604 next_entry: 5605 ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab, 5606 mon_dst_srng); 5607 if (break_dst_ring) 5608 break; 5609 } 5610 5611 ath11k_hal_srng_access_end(ar->ab, mon_dst_srng); 5612 spin_unlock_bh(&pmon->mon_lock); 5613 5614 if (rx_bufs_used) { 5615 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, 5616 &dp->rxdma_mon_buf_ring, 5617 rx_bufs_used, 5618 HAL_RX_BUF_RBM_SW3_BM); 5619 } 5620 5621 reap_status_ring: 5622 quota = ath11k_dp_rx_process_full_mon_status_ring(ab, mac_id, 5623 napi, budget); 5624 5625 return quota; 5626 } 5627 5628 int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id, 5629 struct napi_struct *napi, int budget) 5630 { 5631 struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id); 5632 int ret = 0; 5633 5634 if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) && 5635 ab->hw_params.full_monitor_mode) 5636 ret = ath11k_dp_full_mon_process_rx(ab, mac_id, napi, budget); 5637 else 5638 ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget); 5639 5640 return ret; 5641 } 5642 5643 static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar) 5644 { 5645 struct ath11k_pdev_dp *dp = &ar->dp; 5646 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 5647 5648 skb_queue_head_init(&pmon->rx_status_q); 5649 5650 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 5651 5652 memset(&pmon->rx_mon_stats, 0, 5653 sizeof(pmon->rx_mon_stats)); 5654 return 0; 5655 } 5656 5657 int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar) 5658 { 5659 struct ath11k_pdev_dp *dp = &ar->dp; 5660 struct ath11k_mon_data *pmon = &dp->mon_data; 5661 struct hal_srng *mon_desc_srng = NULL; 5662 struct dp_srng *dp_srng; 5663 int ret = 0; 5664 u32 n_link_desc = 0; 5665 5666 ret = ath11k_dp_rx_pdev_mon_status_attach(ar); 5667 if (ret) { 5668 ath11k_warn(ar->ab, "pdev_mon_status_attach() failed"); 5669 return ret; 5670 } 5671 5672 /* if rxdma1_enable is false, no need to setup 5673 * rxdma_mon_desc_ring. 5674 */ 5675 if (!ar->ab->hw_params.rxdma1_enable) 5676 return 0; 5677 5678 dp_srng = &dp->rxdma_mon_desc_ring; 5679 n_link_desc = dp_srng->size / 5680 ath11k_hal_srng_get_entrysize(ar->ab, HAL_RXDMA_MONITOR_DESC); 5681 mon_desc_srng = 5682 &ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id]; 5683 5684 ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks, 5685 HAL_RXDMA_MONITOR_DESC, mon_desc_srng, 5686 n_link_desc); 5687 if (ret) { 5688 ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed"); 5689 return ret; 5690 } 5691 pmon->mon_last_linkdesc_paddr = 0; 5692 pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1; 5693 spin_lock_init(&pmon->mon_lock); 5694 5695 return 0; 5696 } 5697 5698 static int ath11k_dp_mon_link_free(struct ath11k *ar) 5699 { 5700 struct ath11k_pdev_dp *dp = &ar->dp; 5701 struct ath11k_mon_data *pmon = &dp->mon_data; 5702 5703 ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks, 5704 HAL_RXDMA_MONITOR_DESC, 5705 &dp->rxdma_mon_desc_ring); 5706 return 0; 5707 } 5708 5709 int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar) 5710 { 5711 ath11k_dp_mon_link_free(ar); 5712 return 0; 5713 } 5714 5715 int ath11k_dp_rx_pktlog_start(struct ath11k_base *ab) 5716 { 5717 /* start reap timer */ 5718 mod_timer(&ab->mon_reap_timer, 5719 jiffies + msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL)); 5720 5721 return 0; 5722 } 5723 5724 int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer) 5725 { 5726 int ret; 5727 5728 if (stop_timer) 5729 del_timer_sync(&ab->mon_reap_timer); 5730 5731 /* reap all the monitor related rings */ 5732 ret = ath11k_dp_purge_mon_ring(ab); 5733 if (ret) { 5734 ath11k_warn(ab, "failed to purge dp mon ring: %d\n", ret); 5735 return ret; 5736 } 5737 5738 return 0; 5739 } 5740