1d5c65159SKalle Valo // SPDX-License-Identifier: BSD-3-Clause-Clear 2d5c65159SKalle Valo /* 3d5c65159SKalle Valo * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4d5c65159SKalle Valo */ 5d5c65159SKalle Valo 6d5c65159SKalle Valo #include <linux/ieee80211.h> 7d5c65159SKalle Valo #include "core.h" 8d5c65159SKalle Valo #include "debug.h" 9d5c65159SKalle Valo #include "hal_desc.h" 10d5c65159SKalle Valo #include "hw.h" 11d5c65159SKalle Valo #include "dp_rx.h" 12d5c65159SKalle Valo #include "hal_rx.h" 13d5c65159SKalle Valo #include "dp_tx.h" 14d5c65159SKalle Valo #include "peer.h" 15d5c65159SKalle Valo 16d5c65159SKalle Valo static u8 *ath11k_dp_rx_h_80211_hdr(struct hal_rx_desc *desc) 17d5c65159SKalle Valo { 18d5c65159SKalle Valo return desc->hdr_status; 19d5c65159SKalle Valo } 20d5c65159SKalle Valo 21d5c65159SKalle Valo static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct hal_rx_desc *desc) 22d5c65159SKalle Valo { 23d5c65159SKalle Valo if (!(__le32_to_cpu(desc->mpdu_start.info1) & 24d5c65159SKalle Valo RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID)) 25d5c65159SKalle Valo return HAL_ENCRYPT_TYPE_OPEN; 26d5c65159SKalle Valo 27d5c65159SKalle Valo return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE, 28d5c65159SKalle Valo __le32_to_cpu(desc->mpdu_start.info2)); 29d5c65159SKalle Valo } 30d5c65159SKalle Valo 31d5c65159SKalle Valo static u8 ath11k_dp_rx_h_mpdu_start_decap_type(struct hal_rx_desc *desc) 32d5c65159SKalle Valo { 33d5c65159SKalle Valo return FIELD_GET(RX_MPDU_START_INFO5_DECAP_TYPE, 34d5c65159SKalle Valo __le32_to_cpu(desc->mpdu_start.info5)); 35d5c65159SKalle Valo } 36d5c65159SKalle Valo 37d5c65159SKalle Valo static bool ath11k_dp_rx_h_attn_msdu_done(struct hal_rx_desc *desc) 38d5c65159SKalle Valo { 39d5c65159SKalle Valo return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE, 40d5c65159SKalle Valo __le32_to_cpu(desc->attention.info2)); 41d5c65159SKalle Valo } 42d5c65159SKalle Valo 43d5c65159SKalle Valo static bool ath11k_dp_rx_h_attn_first_mpdu(struct hal_rx_desc *desc) 44d5c65159SKalle Valo { 45d5c65159SKalle Valo return !!FIELD_GET(RX_ATTENTION_INFO1_FIRST_MPDU, 46d5c65159SKalle Valo __le32_to_cpu(desc->attention.info1)); 47d5c65159SKalle Valo } 48d5c65159SKalle Valo 49d5c65159SKalle Valo static bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct hal_rx_desc *desc) 50d5c65159SKalle Valo { 51d5c65159SKalle Valo return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL, 52d5c65159SKalle Valo __le32_to_cpu(desc->attention.info1)); 53d5c65159SKalle Valo } 54d5c65159SKalle Valo 55d5c65159SKalle Valo static bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct hal_rx_desc *desc) 56d5c65159SKalle Valo { 57d5c65159SKalle Valo return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL, 58d5c65159SKalle Valo __le32_to_cpu(desc->attention.info1)); 59d5c65159SKalle Valo } 60d5c65159SKalle Valo 61d5c65159SKalle Valo static bool ath11k_dp_rx_h_attn_is_decrypted(struct hal_rx_desc *desc) 62d5c65159SKalle Valo { 63d5c65159SKalle Valo return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE, 64d5c65159SKalle Valo __le32_to_cpu(desc->attention.info2)) == 65d5c65159SKalle Valo RX_DESC_DECRYPT_STATUS_CODE_OK); 66d5c65159SKalle Valo } 67d5c65159SKalle Valo 68d5c65159SKalle Valo static u32 ath11k_dp_rx_h_attn_mpdu_err(struct hal_rx_desc *desc) 69d5c65159SKalle Valo { 70d5c65159SKalle Valo u32 info = __le32_to_cpu(desc->attention.info1); 71d5c65159SKalle Valo u32 errmap = 0; 72d5c65159SKalle Valo 73d5c65159SKalle Valo if (info & RX_ATTENTION_INFO1_FCS_ERR) 74d5c65159SKalle Valo errmap |= DP_RX_MPDU_ERR_FCS; 75d5c65159SKalle Valo 76d5c65159SKalle Valo if (info & RX_ATTENTION_INFO1_DECRYPT_ERR) 77d5c65159SKalle Valo errmap |= DP_RX_MPDU_ERR_DECRYPT; 78d5c65159SKalle Valo 79d5c65159SKalle Valo if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR) 80d5c65159SKalle Valo errmap |= DP_RX_MPDU_ERR_TKIP_MIC; 81d5c65159SKalle Valo 82d5c65159SKalle Valo if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR) 83d5c65159SKalle Valo errmap |= DP_RX_MPDU_ERR_AMSDU_ERR; 84d5c65159SKalle Valo 85d5c65159SKalle Valo if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR) 86d5c65159SKalle Valo errmap |= DP_RX_MPDU_ERR_OVERFLOW; 87d5c65159SKalle Valo 88d5c65159SKalle Valo if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR) 89d5c65159SKalle Valo errmap |= DP_RX_MPDU_ERR_MSDU_LEN; 90d5c65159SKalle Valo 91d5c65159SKalle Valo if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR) 92d5c65159SKalle Valo errmap |= DP_RX_MPDU_ERR_MPDU_LEN; 93d5c65159SKalle Valo 94d5c65159SKalle Valo return errmap; 95d5c65159SKalle Valo } 96d5c65159SKalle Valo 97d5c65159SKalle Valo static u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct hal_rx_desc *desc) 98d5c65159SKalle Valo { 99d5c65159SKalle Valo return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH, 100d5c65159SKalle Valo __le32_to_cpu(desc->msdu_start.info1)); 101d5c65159SKalle Valo } 102d5c65159SKalle Valo 103d5c65159SKalle Valo static u8 ath11k_dp_rx_h_msdu_start_sgi(struct hal_rx_desc *desc) 104d5c65159SKalle Valo { 105d5c65159SKalle Valo return FIELD_GET(RX_MSDU_START_INFO3_SGI, 106d5c65159SKalle Valo __le32_to_cpu(desc->msdu_start.info3)); 107d5c65159SKalle Valo } 108d5c65159SKalle Valo 109d5c65159SKalle Valo static u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct hal_rx_desc *desc) 110d5c65159SKalle Valo { 111d5c65159SKalle Valo return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS, 112d5c65159SKalle Valo __le32_to_cpu(desc->msdu_start.info3)); 113d5c65159SKalle Valo } 114d5c65159SKalle Valo 115d5c65159SKalle Valo static u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct hal_rx_desc *desc) 116d5c65159SKalle Valo { 117d5c65159SKalle Valo return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW, 118d5c65159SKalle Valo __le32_to_cpu(desc->msdu_start.info3)); 119d5c65159SKalle Valo } 120d5c65159SKalle Valo 121d5c65159SKalle Valo static u32 ath11k_dp_rx_h_msdu_start_freq(struct hal_rx_desc *desc) 122d5c65159SKalle Valo { 123d5c65159SKalle Valo return __le32_to_cpu(desc->msdu_start.phy_meta_data); 124d5c65159SKalle Valo } 125d5c65159SKalle Valo 126d5c65159SKalle Valo static u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct hal_rx_desc *desc) 127d5c65159SKalle Valo { 128d5c65159SKalle Valo return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE, 129d5c65159SKalle Valo __le32_to_cpu(desc->msdu_start.info3)); 130d5c65159SKalle Valo } 131d5c65159SKalle Valo 132d5c65159SKalle Valo static u8 ath11k_dp_rx_h_msdu_start_nss(struct hal_rx_desc *desc) 133d5c65159SKalle Valo { 134d5c65159SKalle Valo u8 mimo_ss_bitmap = FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP, 135d5c65159SKalle Valo __le32_to_cpu(desc->msdu_start.info3)); 136d5c65159SKalle Valo 137d5c65159SKalle Valo return hweight8(mimo_ss_bitmap); 138d5c65159SKalle Valo } 139d5c65159SKalle Valo 140d5c65159SKalle Valo static u8 ath11k_dp_rx_h_msdu_end_l3pad(struct hal_rx_desc *desc) 141d5c65159SKalle Valo { 142d5c65159SKalle Valo return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING, 143d5c65159SKalle Valo __le32_to_cpu(desc->msdu_end.info2)); 144d5c65159SKalle Valo } 145d5c65159SKalle Valo 146d5c65159SKalle Valo static bool ath11k_dp_rx_h_msdu_end_first_msdu(struct hal_rx_desc *desc) 147d5c65159SKalle Valo { 148d5c65159SKalle Valo return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU, 149d5c65159SKalle Valo __le32_to_cpu(desc->msdu_end.info2)); 150d5c65159SKalle Valo } 151d5c65159SKalle Valo 152d5c65159SKalle Valo static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct hal_rx_desc *desc) 153d5c65159SKalle Valo { 154d5c65159SKalle Valo return !!FIELD_GET(RX_MSDU_END_INFO2_LAST_MSDU, 155d5c65159SKalle Valo __le32_to_cpu(desc->msdu_end.info2)); 156d5c65159SKalle Valo } 157d5c65159SKalle Valo 158d5c65159SKalle Valo static void ath11k_dp_rx_desc_end_tlv_copy(struct hal_rx_desc *fdesc, 159d5c65159SKalle Valo struct hal_rx_desc *ldesc) 160d5c65159SKalle Valo { 161d5c65159SKalle Valo memcpy((u8 *)&fdesc->msdu_end, (u8 *)&ldesc->msdu_end, 162d5c65159SKalle Valo sizeof(struct rx_msdu_end)); 163d5c65159SKalle Valo memcpy((u8 *)&fdesc->attention, (u8 *)&ldesc->attention, 164d5c65159SKalle Valo sizeof(struct rx_attention)); 165d5c65159SKalle Valo memcpy((u8 *)&fdesc->mpdu_end, (u8 *)&ldesc->mpdu_end, 166d5c65159SKalle Valo sizeof(struct rx_mpdu_end)); 167d5c65159SKalle Valo } 168d5c65159SKalle Valo 169d5c65159SKalle Valo static u32 ath11k_dp_rxdesc_get_mpdulen_err(struct hal_rx_desc *rx_desc) 170d5c65159SKalle Valo { 171d5c65159SKalle Valo struct rx_attention *rx_attn; 172d5c65159SKalle Valo 173d5c65159SKalle Valo rx_attn = &rx_desc->attention; 174d5c65159SKalle Valo 175d5c65159SKalle Valo return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR, 176d5c65159SKalle Valo __le32_to_cpu(rx_attn->info1)); 177d5c65159SKalle Valo } 178d5c65159SKalle Valo 179d5c65159SKalle Valo static u32 ath11k_dp_rxdesc_get_decap_format(struct hal_rx_desc *rx_desc) 180d5c65159SKalle Valo { 181d5c65159SKalle Valo struct rx_msdu_start *rx_msdu_start; 182d5c65159SKalle Valo 183d5c65159SKalle Valo rx_msdu_start = &rx_desc->msdu_start; 184d5c65159SKalle Valo 185d5c65159SKalle Valo return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT, 186d5c65159SKalle Valo __le32_to_cpu(rx_msdu_start->info2)); 187d5c65159SKalle Valo } 188d5c65159SKalle Valo 189d5c65159SKalle Valo static u8 *ath11k_dp_rxdesc_get_80211hdr(struct hal_rx_desc *rx_desc) 190d5c65159SKalle Valo { 191d5c65159SKalle Valo u8 *rx_pkt_hdr; 192d5c65159SKalle Valo 193d5c65159SKalle Valo rx_pkt_hdr = &rx_desc->msdu_payload[0]; 194d5c65159SKalle Valo 195d5c65159SKalle Valo return rx_pkt_hdr; 196d5c65159SKalle Valo } 197d5c65159SKalle Valo 198d5c65159SKalle Valo static bool ath11k_dp_rxdesc_mpdu_valid(struct hal_rx_desc *rx_desc) 199d5c65159SKalle Valo { 200d5c65159SKalle Valo u32 tlv_tag; 201d5c65159SKalle Valo 202d5c65159SKalle Valo tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG, 203d5c65159SKalle Valo __le32_to_cpu(rx_desc->mpdu_start_tag)); 204d5c65159SKalle Valo 205d5c65159SKalle Valo return tlv_tag == HAL_RX_MPDU_START ? true : false; 206d5c65159SKalle Valo } 207d5c65159SKalle Valo 208d5c65159SKalle Valo static u32 ath11k_dp_rxdesc_get_ppduid(struct hal_rx_desc *rx_desc) 209d5c65159SKalle Valo { 210d5c65159SKalle Valo return __le16_to_cpu(rx_desc->mpdu_start.phy_ppdu_id); 211d5c65159SKalle Valo } 212d5c65159SKalle Valo 213d5c65159SKalle Valo /* Returns number of Rx buffers replenished */ 214d5c65159SKalle Valo int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id, 215d5c65159SKalle Valo struct dp_rxdma_ring *rx_ring, 216d5c65159SKalle Valo int req_entries, 217d5c65159SKalle Valo enum hal_rx_buf_return_buf_manager mgr, 218d5c65159SKalle Valo gfp_t gfp) 219d5c65159SKalle Valo { 220d5c65159SKalle Valo struct hal_srng *srng; 221d5c65159SKalle Valo u32 *desc; 222d5c65159SKalle Valo struct sk_buff *skb; 223d5c65159SKalle Valo int num_free; 224d5c65159SKalle Valo int num_remain; 225d5c65159SKalle Valo int buf_id; 226d5c65159SKalle Valo u32 cookie; 227d5c65159SKalle Valo dma_addr_t paddr; 228d5c65159SKalle Valo 229d5c65159SKalle Valo req_entries = min(req_entries, rx_ring->bufs_max); 230d5c65159SKalle Valo 231d5c65159SKalle Valo srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 232d5c65159SKalle Valo 233d5c65159SKalle Valo spin_lock_bh(&srng->lock); 234d5c65159SKalle Valo 235d5c65159SKalle Valo ath11k_hal_srng_access_begin(ab, srng); 236d5c65159SKalle Valo 237d5c65159SKalle Valo num_free = ath11k_hal_srng_src_num_free(ab, srng, true); 238d5c65159SKalle Valo if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4)) 239d5c65159SKalle Valo req_entries = num_free; 240d5c65159SKalle Valo 241d5c65159SKalle Valo req_entries = min(num_free, req_entries); 242d5c65159SKalle Valo num_remain = req_entries; 243d5c65159SKalle Valo 244d5c65159SKalle Valo while (num_remain > 0) { 245d5c65159SKalle Valo skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + 246d5c65159SKalle Valo DP_RX_BUFFER_ALIGN_SIZE); 247d5c65159SKalle Valo if (!skb) 248d5c65159SKalle Valo break; 249d5c65159SKalle Valo 250d5c65159SKalle Valo if (!IS_ALIGNED((unsigned long)skb->data, 251d5c65159SKalle Valo DP_RX_BUFFER_ALIGN_SIZE)) { 252d5c65159SKalle Valo skb_pull(skb, 253d5c65159SKalle Valo PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - 254d5c65159SKalle Valo skb->data); 255d5c65159SKalle Valo } 256d5c65159SKalle Valo 257d5c65159SKalle Valo paddr = dma_map_single(ab->dev, skb->data, 258d5c65159SKalle Valo skb->len + skb_tailroom(skb), 259d5c65159SKalle Valo DMA_FROM_DEVICE); 260d5c65159SKalle Valo if (dma_mapping_error(ab->dev, paddr)) 261d5c65159SKalle Valo goto fail_free_skb; 262d5c65159SKalle Valo 263d5c65159SKalle Valo spin_lock_bh(&rx_ring->idr_lock); 264d5c65159SKalle Valo buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, 265d5c65159SKalle Valo rx_ring->bufs_max * 3, gfp); 266d5c65159SKalle Valo spin_unlock_bh(&rx_ring->idr_lock); 267d5c65159SKalle Valo if (buf_id < 0) 268d5c65159SKalle Valo goto fail_dma_unmap; 269d5c65159SKalle Valo 270d5c65159SKalle Valo desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 271d5c65159SKalle Valo if (!desc) 272d5c65159SKalle Valo goto fail_idr_remove; 273d5c65159SKalle Valo 274d5c65159SKalle Valo ATH11K_SKB_RXCB(skb)->paddr = paddr; 275d5c65159SKalle Valo 276d5c65159SKalle Valo cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 277d5c65159SKalle Valo FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 278d5c65159SKalle Valo 279d5c65159SKalle Valo num_remain--; 280d5c65159SKalle Valo 281d5c65159SKalle Valo ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); 282d5c65159SKalle Valo } 283d5c65159SKalle Valo 284d5c65159SKalle Valo ath11k_hal_srng_access_end(ab, srng); 285d5c65159SKalle Valo 286d5c65159SKalle Valo spin_unlock_bh(&srng->lock); 287d5c65159SKalle Valo 288d5c65159SKalle Valo return req_entries - num_remain; 289d5c65159SKalle Valo 290d5c65159SKalle Valo fail_idr_remove: 291d5c65159SKalle Valo spin_lock_bh(&rx_ring->idr_lock); 292d5c65159SKalle Valo idr_remove(&rx_ring->bufs_idr, buf_id); 293d5c65159SKalle Valo spin_unlock_bh(&rx_ring->idr_lock); 294d5c65159SKalle Valo fail_dma_unmap: 295d5c65159SKalle Valo dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 296d5c65159SKalle Valo DMA_FROM_DEVICE); 297d5c65159SKalle Valo fail_free_skb: 298d5c65159SKalle Valo dev_kfree_skb_any(skb); 299d5c65159SKalle Valo 300d5c65159SKalle Valo ath11k_hal_srng_access_end(ab, srng); 301d5c65159SKalle Valo 302d5c65159SKalle Valo spin_unlock_bh(&srng->lock); 303d5c65159SKalle Valo 304d5c65159SKalle Valo return req_entries - num_remain; 305d5c65159SKalle Valo } 306d5c65159SKalle Valo 307d5c65159SKalle Valo static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar, 308d5c65159SKalle Valo struct dp_rxdma_ring *rx_ring) 309d5c65159SKalle Valo { 310d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 311d5c65159SKalle Valo struct sk_buff *skb; 312d5c65159SKalle Valo int buf_id; 313d5c65159SKalle Valo 314d5c65159SKalle Valo spin_lock_bh(&rx_ring->idr_lock); 315d5c65159SKalle Valo idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { 316d5c65159SKalle Valo idr_remove(&rx_ring->bufs_idr, buf_id); 317d5c65159SKalle Valo /* TODO: Understand where internal driver does this dma_unmap of 318d5c65159SKalle Valo * of rxdma_buffer. 319d5c65159SKalle Valo */ 320d5c65159SKalle Valo dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr, 321d5c65159SKalle Valo skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); 322d5c65159SKalle Valo dev_kfree_skb_any(skb); 323d5c65159SKalle Valo } 324d5c65159SKalle Valo 325d5c65159SKalle Valo idr_destroy(&rx_ring->bufs_idr); 326d5c65159SKalle Valo spin_unlock_bh(&rx_ring->idr_lock); 327d5c65159SKalle Valo 328d5c65159SKalle Valo rx_ring = &dp->rx_mon_status_refill_ring; 329d5c65159SKalle Valo 330d5c65159SKalle Valo spin_lock_bh(&rx_ring->idr_lock); 331d5c65159SKalle Valo idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { 332d5c65159SKalle Valo idr_remove(&rx_ring->bufs_idr, buf_id); 333d5c65159SKalle Valo /* XXX: Understand where internal driver does this dma_unmap of 334d5c65159SKalle Valo * of rxdma_buffer. 335d5c65159SKalle Valo */ 336d5c65159SKalle Valo dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr, 337d5c65159SKalle Valo skb->len + skb_tailroom(skb), DMA_BIDIRECTIONAL); 338d5c65159SKalle Valo dev_kfree_skb_any(skb); 339d5c65159SKalle Valo } 340d5c65159SKalle Valo 341d5c65159SKalle Valo idr_destroy(&rx_ring->bufs_idr); 342d5c65159SKalle Valo spin_unlock_bh(&rx_ring->idr_lock); 343d5c65159SKalle Valo return 0; 344d5c65159SKalle Valo } 345d5c65159SKalle Valo 346d5c65159SKalle Valo static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar) 347d5c65159SKalle Valo { 348d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 349d5c65159SKalle Valo struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 350d5c65159SKalle Valo 351d5c65159SKalle Valo ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 352d5c65159SKalle Valo 353d5c65159SKalle Valo rx_ring = &dp->rxdma_mon_buf_ring; 354d5c65159SKalle Valo ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 355d5c65159SKalle Valo 356d5c65159SKalle Valo rx_ring = &dp->rx_mon_status_refill_ring; 357d5c65159SKalle Valo ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 358d5c65159SKalle Valo return 0; 359d5c65159SKalle Valo } 360d5c65159SKalle Valo 361d5c65159SKalle Valo static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar, 362d5c65159SKalle Valo struct dp_rxdma_ring *rx_ring, 363d5c65159SKalle Valo u32 ringtype) 364d5c65159SKalle Valo { 365d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 366d5c65159SKalle Valo int num_entries; 367d5c65159SKalle Valo 368d5c65159SKalle Valo num_entries = rx_ring->refill_buf_ring.size / 369d5c65159SKalle Valo ath11k_hal_srng_get_entrysize(ringtype); 370d5c65159SKalle Valo 371d5c65159SKalle Valo rx_ring->bufs_max = num_entries; 372d5c65159SKalle Valo ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries, 373d5c65159SKalle Valo HAL_RX_BUF_RBM_SW3_BM, GFP_KERNEL); 374d5c65159SKalle Valo return 0; 375d5c65159SKalle Valo } 376d5c65159SKalle Valo 377d5c65159SKalle Valo static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar) 378d5c65159SKalle Valo { 379d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 380d5c65159SKalle Valo struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 381d5c65159SKalle Valo 382d5c65159SKalle Valo ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF); 383d5c65159SKalle Valo 384d5c65159SKalle Valo rx_ring = &dp->rxdma_mon_buf_ring; 385d5c65159SKalle Valo ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF); 386d5c65159SKalle Valo 387d5c65159SKalle Valo rx_ring = &dp->rx_mon_status_refill_ring; 388d5c65159SKalle Valo ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS); 389d5c65159SKalle Valo 390d5c65159SKalle Valo return 0; 391d5c65159SKalle Valo } 392d5c65159SKalle Valo 393d5c65159SKalle Valo static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar) 394d5c65159SKalle Valo { 395d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 396d5c65159SKalle Valo 397d5c65159SKalle Valo ath11k_dp_srng_cleanup(ar->ab, &dp->rx_refill_buf_ring.refill_buf_ring); 398d5c65159SKalle Valo ath11k_dp_srng_cleanup(ar->ab, &dp->reo_dst_ring); 399d5c65159SKalle Valo ath11k_dp_srng_cleanup(ar->ab, &dp->rxdma_err_dst_ring); 400d5c65159SKalle Valo ath11k_dp_srng_cleanup(ar->ab, &dp->rx_mon_status_refill_ring.refill_buf_ring); 401d5c65159SKalle Valo ath11k_dp_srng_cleanup(ar->ab, &dp->rxdma_mon_buf_ring.refill_buf_ring); 402d5c65159SKalle Valo } 403d5c65159SKalle Valo 404d5c65159SKalle Valo static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar) 405d5c65159SKalle Valo { 406d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 407d5c65159SKalle Valo struct dp_srng *srng = NULL; 408d5c65159SKalle Valo int ret; 409d5c65159SKalle Valo 410d5c65159SKalle Valo ret = ath11k_dp_srng_setup(ar->ab, 411d5c65159SKalle Valo &dp->rx_refill_buf_ring.refill_buf_ring, 412d5c65159SKalle Valo HAL_RXDMA_BUF, 0, 413d5c65159SKalle Valo dp->mac_id, DP_RXDMA_BUF_RING_SIZE); 414d5c65159SKalle Valo if (ret) { 415d5c65159SKalle Valo ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n"); 416d5c65159SKalle Valo return ret; 417d5c65159SKalle Valo } 418d5c65159SKalle Valo 419d5c65159SKalle Valo ret = ath11k_dp_srng_setup(ar->ab, &dp->reo_dst_ring, HAL_REO_DST, 420d5c65159SKalle Valo dp->mac_id, dp->mac_id, 421d5c65159SKalle Valo DP_REO_DST_RING_SIZE); 422d5c65159SKalle Valo if (ret) { 423d5c65159SKalle Valo ath11k_warn(ar->ab, "failed to setup reo_dst_ring\n"); 424d5c65159SKalle Valo return ret; 425d5c65159SKalle Valo } 426d5c65159SKalle Valo 427d5c65159SKalle Valo ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring, 428d5c65159SKalle Valo HAL_RXDMA_DST, 0, dp->mac_id, 429d5c65159SKalle Valo DP_RXDMA_ERR_DST_RING_SIZE); 430d5c65159SKalle Valo if (ret) { 431d5c65159SKalle Valo ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring\n"); 432d5c65159SKalle Valo return ret; 433d5c65159SKalle Valo } 434d5c65159SKalle Valo 435d5c65159SKalle Valo srng = &dp->rx_mon_status_refill_ring.refill_buf_ring; 436d5c65159SKalle Valo ret = ath11k_dp_srng_setup(ar->ab, 437d5c65159SKalle Valo srng, 438d5c65159SKalle Valo HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id, 439d5c65159SKalle Valo DP_RXDMA_MON_STATUS_RING_SIZE); 440d5c65159SKalle Valo if (ret) { 441d5c65159SKalle Valo ath11k_warn(ar->ab, 442d5c65159SKalle Valo "failed to setup rx_mon_status_refill_ring\n"); 443d5c65159SKalle Valo return ret; 444d5c65159SKalle Valo } 445d5c65159SKalle Valo ret = ath11k_dp_srng_setup(ar->ab, 446d5c65159SKalle Valo &dp->rxdma_mon_buf_ring.refill_buf_ring, 447d5c65159SKalle Valo HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id, 448d5c65159SKalle Valo DP_RXDMA_MONITOR_BUF_RING_SIZE); 449d5c65159SKalle Valo if (ret) { 450d5c65159SKalle Valo ath11k_warn(ar->ab, 451d5c65159SKalle Valo "failed to setup HAL_RXDMA_MONITOR_BUF\n"); 452d5c65159SKalle Valo return ret; 453d5c65159SKalle Valo } 454d5c65159SKalle Valo 455d5c65159SKalle Valo ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring, 456d5c65159SKalle Valo HAL_RXDMA_MONITOR_DST, 0, dp->mac_id, 457d5c65159SKalle Valo DP_RXDMA_MONITOR_DST_RING_SIZE); 458d5c65159SKalle Valo if (ret) { 459d5c65159SKalle Valo ath11k_warn(ar->ab, 460d5c65159SKalle Valo "failed to setup HAL_RXDMA_MONITOR_DST\n"); 461d5c65159SKalle Valo return ret; 462d5c65159SKalle Valo } 463d5c65159SKalle Valo 464d5c65159SKalle Valo ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring, 465d5c65159SKalle Valo HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id, 466d5c65159SKalle Valo DP_RXDMA_MONITOR_DESC_RING_SIZE); 467d5c65159SKalle Valo if (ret) { 468d5c65159SKalle Valo ath11k_warn(ar->ab, 469d5c65159SKalle Valo "failed to setup HAL_RXDMA_MONITOR_DESC\n"); 470d5c65159SKalle Valo return ret; 471d5c65159SKalle Valo } 472d5c65159SKalle Valo 473d5c65159SKalle Valo return 0; 474d5c65159SKalle Valo } 475d5c65159SKalle Valo 476d5c65159SKalle Valo void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab) 477d5c65159SKalle Valo { 478d5c65159SKalle Valo struct ath11k_dp *dp = &ab->dp; 479d5c65159SKalle Valo struct dp_reo_cmd *cmd, *tmp; 480d5c65159SKalle Valo struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache; 481d5c65159SKalle Valo 482d5c65159SKalle Valo spin_lock_bh(&dp->reo_cmd_lock); 483d5c65159SKalle Valo list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 484d5c65159SKalle Valo list_del(&cmd->list); 485d5c65159SKalle Valo dma_unmap_single(ab->dev, cmd->data.paddr, 486d5c65159SKalle Valo cmd->data.size, DMA_BIDIRECTIONAL); 487d5c65159SKalle Valo kfree(cmd->data.vaddr); 488d5c65159SKalle Valo kfree(cmd); 489d5c65159SKalle Valo } 490d5c65159SKalle Valo 491d5c65159SKalle Valo list_for_each_entry_safe(cmd_cache, tmp_cache, 492d5c65159SKalle Valo &dp->reo_cmd_cache_flush_list, list) { 493d5c65159SKalle Valo list_del(&cmd_cache->list); 494d5c65159SKalle Valo dma_unmap_single(ab->dev, cmd_cache->data.paddr, 495d5c65159SKalle Valo cmd_cache->data.size, DMA_BIDIRECTIONAL); 496d5c65159SKalle Valo kfree(cmd_cache->data.vaddr); 497d5c65159SKalle Valo kfree(cmd_cache); 498d5c65159SKalle Valo } 499d5c65159SKalle Valo spin_unlock_bh(&dp->reo_cmd_lock); 500d5c65159SKalle Valo } 501d5c65159SKalle Valo 502d5c65159SKalle Valo static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx, 503d5c65159SKalle Valo enum hal_reo_cmd_status status) 504d5c65159SKalle Valo { 505d5c65159SKalle Valo struct dp_rx_tid *rx_tid = ctx; 506d5c65159SKalle Valo 507d5c65159SKalle Valo if (status != HAL_REO_CMD_SUCCESS) 508d5c65159SKalle Valo ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n", 509d5c65159SKalle Valo rx_tid->tid, status); 510d5c65159SKalle Valo 511d5c65159SKalle Valo dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size, 512d5c65159SKalle Valo DMA_BIDIRECTIONAL); 513d5c65159SKalle Valo kfree(rx_tid->vaddr); 514d5c65159SKalle Valo } 515d5c65159SKalle Valo 516d5c65159SKalle Valo static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab, 517d5c65159SKalle Valo struct dp_rx_tid *rx_tid) 518d5c65159SKalle Valo { 519d5c65159SKalle Valo struct ath11k_hal_reo_cmd cmd = {0}; 520d5c65159SKalle Valo unsigned long tot_desc_sz, desc_sz; 521d5c65159SKalle Valo int ret; 522d5c65159SKalle Valo 523d5c65159SKalle Valo tot_desc_sz = rx_tid->size; 524d5c65159SKalle Valo desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID); 525d5c65159SKalle Valo 526d5c65159SKalle Valo while (tot_desc_sz > desc_sz) { 527d5c65159SKalle Valo tot_desc_sz -= desc_sz; 528d5c65159SKalle Valo cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz); 529d5c65159SKalle Valo cmd.addr_hi = upper_32_bits(rx_tid->paddr); 530d5c65159SKalle Valo ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, 531d5c65159SKalle Valo HAL_REO_CMD_FLUSH_CACHE, &cmd, 532d5c65159SKalle Valo NULL); 533d5c65159SKalle Valo if (ret) 534d5c65159SKalle Valo ath11k_warn(ab, 535d5c65159SKalle Valo "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n", 536d5c65159SKalle Valo rx_tid->tid, ret); 537d5c65159SKalle Valo } 538d5c65159SKalle Valo 539d5c65159SKalle Valo memset(&cmd, 0, sizeof(cmd)); 540d5c65159SKalle Valo cmd.addr_lo = lower_32_bits(rx_tid->paddr); 541d5c65159SKalle Valo cmd.addr_hi = upper_32_bits(rx_tid->paddr); 542d5c65159SKalle Valo cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS; 543d5c65159SKalle Valo ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, 544d5c65159SKalle Valo HAL_REO_CMD_FLUSH_CACHE, 545d5c65159SKalle Valo &cmd, ath11k_dp_reo_cmd_free); 546d5c65159SKalle Valo if (ret) { 547d5c65159SKalle Valo ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n", 548d5c65159SKalle Valo rx_tid->tid, ret); 549d5c65159SKalle Valo dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 550d5c65159SKalle Valo DMA_BIDIRECTIONAL); 551d5c65159SKalle Valo kfree(rx_tid->vaddr); 552d5c65159SKalle Valo } 553d5c65159SKalle Valo } 554d5c65159SKalle Valo 555d5c65159SKalle Valo static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx, 556d5c65159SKalle Valo enum hal_reo_cmd_status status) 557d5c65159SKalle Valo { 558d5c65159SKalle Valo struct ath11k_base *ab = dp->ab; 559d5c65159SKalle Valo struct dp_rx_tid *rx_tid = ctx; 560d5c65159SKalle Valo struct dp_reo_cache_flush_elem *elem, *tmp; 561d5c65159SKalle Valo 562d5c65159SKalle Valo if (status == HAL_REO_CMD_DRAIN) { 563d5c65159SKalle Valo goto free_desc; 564d5c65159SKalle Valo } else if (status != HAL_REO_CMD_SUCCESS) { 565d5c65159SKalle Valo /* Shouldn't happen! Cleanup in case of other failure? */ 566d5c65159SKalle Valo ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n", 567d5c65159SKalle Valo rx_tid->tid, status); 568d5c65159SKalle Valo return; 569d5c65159SKalle Valo } 570d5c65159SKalle Valo 571d5c65159SKalle Valo elem = kzalloc(sizeof(*elem), GFP_ATOMIC); 572d5c65159SKalle Valo if (!elem) 573d5c65159SKalle Valo goto free_desc; 574d5c65159SKalle Valo 575d5c65159SKalle Valo elem->ts = jiffies; 576d5c65159SKalle Valo memcpy(&elem->data, rx_tid, sizeof(*rx_tid)); 577d5c65159SKalle Valo 578d5c65159SKalle Valo spin_lock_bh(&dp->reo_cmd_lock); 579d5c65159SKalle Valo list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list); 580d5c65159SKalle Valo spin_unlock_bh(&dp->reo_cmd_lock); 581d5c65159SKalle Valo 582d5c65159SKalle Valo /* Flush and invalidate aged REO desc from HW cache */ 583d5c65159SKalle Valo spin_lock_bh(&dp->reo_cmd_lock); 584d5c65159SKalle Valo list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list, 585d5c65159SKalle Valo list) { 586d5c65159SKalle Valo if (time_after(jiffies, elem->ts + 587d5c65159SKalle Valo msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) { 588d5c65159SKalle Valo list_del(&elem->list); 589d5c65159SKalle Valo spin_unlock_bh(&dp->reo_cmd_lock); 590d5c65159SKalle Valo 591d5c65159SKalle Valo ath11k_dp_reo_cache_flush(ab, &elem->data); 592d5c65159SKalle Valo kfree(elem); 593d5c65159SKalle Valo spin_lock_bh(&dp->reo_cmd_lock); 594d5c65159SKalle Valo } 595d5c65159SKalle Valo } 596d5c65159SKalle Valo spin_unlock_bh(&dp->reo_cmd_lock); 597d5c65159SKalle Valo 598d5c65159SKalle Valo return; 599d5c65159SKalle Valo free_desc: 600d5c65159SKalle Valo dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 601d5c65159SKalle Valo DMA_BIDIRECTIONAL); 602d5c65159SKalle Valo kfree(rx_tid->vaddr); 603d5c65159SKalle Valo } 604d5c65159SKalle Valo 605d5c65159SKalle Valo static void ath11k_peer_rx_tid_delete(struct ath11k *ar, 606d5c65159SKalle Valo struct ath11k_peer *peer, u8 tid) 607d5c65159SKalle Valo { 608d5c65159SKalle Valo struct ath11k_hal_reo_cmd cmd = {0}; 609d5c65159SKalle Valo struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 610d5c65159SKalle Valo int ret; 611d5c65159SKalle Valo 612d5c65159SKalle Valo if (!rx_tid->active) 613d5c65159SKalle Valo return; 614d5c65159SKalle Valo 615d5c65159SKalle Valo cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 616d5c65159SKalle Valo cmd.addr_lo = lower_32_bits(rx_tid->paddr); 617d5c65159SKalle Valo cmd.addr_hi = upper_32_bits(rx_tid->paddr); 618d5c65159SKalle Valo cmd.upd0 |= HAL_REO_CMD_UPD0_VLD; 619d5c65159SKalle Valo ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid, 620d5c65159SKalle Valo HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 621d5c65159SKalle Valo ath11k_dp_rx_tid_del_func); 622d5c65159SKalle Valo if (ret) { 623d5c65159SKalle Valo ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n", 624d5c65159SKalle Valo tid, ret); 625d5c65159SKalle Valo dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size, 626d5c65159SKalle Valo DMA_BIDIRECTIONAL); 627d5c65159SKalle Valo kfree(rx_tid->vaddr); 628d5c65159SKalle Valo } 629d5c65159SKalle Valo 630d5c65159SKalle Valo rx_tid->active = false; 631d5c65159SKalle Valo } 632d5c65159SKalle Valo 633d5c65159SKalle Valo void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer) 634d5c65159SKalle Valo { 635d5c65159SKalle Valo int i; 636d5c65159SKalle Valo 637d5c65159SKalle Valo for (i = 0; i <= IEEE80211_NUM_TIDS; i++) 638d5c65159SKalle Valo ath11k_peer_rx_tid_delete(ar, peer, i); 639d5c65159SKalle Valo } 640d5c65159SKalle Valo 641d5c65159SKalle Valo static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar, 642d5c65159SKalle Valo struct ath11k_peer *peer, 643d5c65159SKalle Valo struct dp_rx_tid *rx_tid, 644d5c65159SKalle Valo u32 ba_win_sz, u16 ssn) 645d5c65159SKalle Valo { 646d5c65159SKalle Valo struct ath11k_hal_reo_cmd cmd = {0}; 647d5c65159SKalle Valo int ret; 648d5c65159SKalle Valo 649d5c65159SKalle Valo cmd.addr_lo = lower_32_bits(rx_tid->paddr); 650d5c65159SKalle Valo cmd.addr_hi = upper_32_bits(rx_tid->paddr); 651d5c65159SKalle Valo cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 652d5c65159SKalle Valo cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE | 653d5c65159SKalle Valo HAL_REO_CMD_UPD0_SSN; 654d5c65159SKalle Valo cmd.ba_window_size = ba_win_sz; 655d5c65159SKalle Valo cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn); 656d5c65159SKalle Valo 657d5c65159SKalle Valo ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid, 658d5c65159SKalle Valo HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 659d5c65159SKalle Valo NULL); 660d5c65159SKalle Valo if (ret) { 661d5c65159SKalle Valo ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n", 662d5c65159SKalle Valo rx_tid->tid, ret); 663d5c65159SKalle Valo return ret; 664d5c65159SKalle Valo } 665d5c65159SKalle Valo 666d5c65159SKalle Valo rx_tid->ba_win_sz = ba_win_sz; 667d5c65159SKalle Valo 668d5c65159SKalle Valo return 0; 669d5c65159SKalle Valo } 670d5c65159SKalle Valo 671d5c65159SKalle Valo static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab, 672d5c65159SKalle Valo const u8 *peer_mac, int vdev_id, u8 tid) 673d5c65159SKalle Valo { 674d5c65159SKalle Valo struct ath11k_peer *peer; 675d5c65159SKalle Valo struct dp_rx_tid *rx_tid; 676d5c65159SKalle Valo 677d5c65159SKalle Valo spin_lock_bh(&ab->base_lock); 678d5c65159SKalle Valo 679d5c65159SKalle Valo peer = ath11k_peer_find(ab, vdev_id, peer_mac); 680d5c65159SKalle Valo if (!peer) { 681d5c65159SKalle Valo ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n"); 682d5c65159SKalle Valo goto unlock_exit; 683d5c65159SKalle Valo } 684d5c65159SKalle Valo 685d5c65159SKalle Valo rx_tid = &peer->rx_tid[tid]; 686d5c65159SKalle Valo if (!rx_tid->active) 687d5c65159SKalle Valo goto unlock_exit; 688d5c65159SKalle Valo 689d5c65159SKalle Valo dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 690d5c65159SKalle Valo DMA_BIDIRECTIONAL); 691d5c65159SKalle Valo kfree(rx_tid->vaddr); 692d5c65159SKalle Valo 693d5c65159SKalle Valo rx_tid->active = false; 694d5c65159SKalle Valo 695d5c65159SKalle Valo unlock_exit: 696d5c65159SKalle Valo spin_unlock_bh(&ab->base_lock); 697d5c65159SKalle Valo } 698d5c65159SKalle Valo 699d5c65159SKalle Valo int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id, 700d5c65159SKalle Valo u8 tid, u32 ba_win_sz, u16 ssn) 701d5c65159SKalle Valo { 702d5c65159SKalle Valo struct ath11k_base *ab = ar->ab; 703d5c65159SKalle Valo struct ath11k_peer *peer; 704d5c65159SKalle Valo struct dp_rx_tid *rx_tid; 705d5c65159SKalle Valo u32 hw_desc_sz; 706d5c65159SKalle Valo u32 *addr_aligned; 707d5c65159SKalle Valo void *vaddr; 708d5c65159SKalle Valo dma_addr_t paddr; 709d5c65159SKalle Valo int ret; 710d5c65159SKalle Valo 711d5c65159SKalle Valo spin_lock_bh(&ab->base_lock); 712d5c65159SKalle Valo 713d5c65159SKalle Valo peer = ath11k_peer_find(ab, vdev_id, peer_mac); 714d5c65159SKalle Valo if (!peer) { 715d5c65159SKalle Valo ath11k_warn(ab, "failed to find the peer to set up rx tid\n"); 716d5c65159SKalle Valo spin_unlock_bh(&ab->base_lock); 717d5c65159SKalle Valo return -ENOENT; 718d5c65159SKalle Valo } 719d5c65159SKalle Valo 720d5c65159SKalle Valo rx_tid = &peer->rx_tid[tid]; 721d5c65159SKalle Valo /* Update the tid queue if it is already setup */ 722d5c65159SKalle Valo if (rx_tid->active) { 723d5c65159SKalle Valo paddr = rx_tid->paddr; 724d5c65159SKalle Valo ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid, 725d5c65159SKalle Valo ba_win_sz, ssn); 726d5c65159SKalle Valo spin_unlock_bh(&ab->base_lock); 727d5c65159SKalle Valo if (ret) { 728d5c65159SKalle Valo ath11k_warn(ab, "failed to update reo for rx tid %d\n", tid); 729d5c65159SKalle Valo return ret; 730d5c65159SKalle Valo } 731d5c65159SKalle Valo 732d5c65159SKalle Valo ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, 733d5c65159SKalle Valo peer_mac, paddr, 734d5c65159SKalle Valo tid, 1, ba_win_sz); 735d5c65159SKalle Valo if (ret) 736d5c65159SKalle Valo ath11k_warn(ab, "failed to send wmi command to update rx reorder queue, tid :%d (%d)\n", 737d5c65159SKalle Valo tid, ret); 738d5c65159SKalle Valo return ret; 739d5c65159SKalle Valo } 740d5c65159SKalle Valo 741d5c65159SKalle Valo rx_tid->tid = tid; 742d5c65159SKalle Valo 743d5c65159SKalle Valo rx_tid->ba_win_sz = ba_win_sz; 744d5c65159SKalle Valo 745d5c65159SKalle Valo /* TODO: Optimize the memory allocation for qos tid based on the 746d5c65159SKalle Valo * the actual BA window size in REO tid update path. 747d5c65159SKalle Valo */ 748d5c65159SKalle Valo if (tid == HAL_DESC_REO_NON_QOS_TID) 749d5c65159SKalle Valo hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid); 750d5c65159SKalle Valo else 751d5c65159SKalle Valo hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid); 752d5c65159SKalle Valo 753d5c65159SKalle Valo vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_KERNEL); 754d5c65159SKalle Valo if (!vaddr) { 755d5c65159SKalle Valo spin_unlock_bh(&ab->base_lock); 756d5c65159SKalle Valo return -ENOMEM; 757d5c65159SKalle Valo } 758d5c65159SKalle Valo 759d5c65159SKalle Valo addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN); 760d5c65159SKalle Valo 761d5c65159SKalle Valo ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz, ssn); 762d5c65159SKalle Valo 763d5c65159SKalle Valo paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz, 764d5c65159SKalle Valo DMA_BIDIRECTIONAL); 765d5c65159SKalle Valo 766d5c65159SKalle Valo ret = dma_mapping_error(ab->dev, paddr); 767d5c65159SKalle Valo if (ret) { 768d5c65159SKalle Valo spin_unlock_bh(&ab->base_lock); 769d5c65159SKalle Valo goto err_mem_free; 770d5c65159SKalle Valo } 771d5c65159SKalle Valo 772d5c65159SKalle Valo rx_tid->vaddr = vaddr; 773d5c65159SKalle Valo rx_tid->paddr = paddr; 774d5c65159SKalle Valo rx_tid->size = hw_desc_sz; 775d5c65159SKalle Valo rx_tid->active = true; 776d5c65159SKalle Valo 777d5c65159SKalle Valo spin_unlock_bh(&ab->base_lock); 778d5c65159SKalle Valo 779d5c65159SKalle Valo ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, 780d5c65159SKalle Valo paddr, tid, 1, ba_win_sz); 781d5c65159SKalle Valo if (ret) { 782d5c65159SKalle Valo ath11k_warn(ar->ab, "failed to setup rx reorder queue, tid :%d (%d)\n", 783d5c65159SKalle Valo tid, ret); 784d5c65159SKalle Valo ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid); 785d5c65159SKalle Valo } 786d5c65159SKalle Valo 787d5c65159SKalle Valo return ret; 788d5c65159SKalle Valo 789d5c65159SKalle Valo err_mem_free: 790d5c65159SKalle Valo kfree(vaddr); 791d5c65159SKalle Valo 792d5c65159SKalle Valo return ret; 793d5c65159SKalle Valo } 794d5c65159SKalle Valo 795d5c65159SKalle Valo int ath11k_dp_rx_ampdu_start(struct ath11k *ar, 796d5c65159SKalle Valo struct ieee80211_ampdu_params *params) 797d5c65159SKalle Valo { 798d5c65159SKalle Valo struct ath11k_base *ab = ar->ab; 799d5c65159SKalle Valo struct ath11k_sta *arsta = (void *)params->sta->drv_priv; 800d5c65159SKalle Valo int vdev_id = arsta->arvif->vdev_id; 801d5c65159SKalle Valo int ret; 802d5c65159SKalle Valo 803d5c65159SKalle Valo ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id, 804d5c65159SKalle Valo params->tid, params->buf_size, 805d5c65159SKalle Valo params->ssn); 806d5c65159SKalle Valo if (ret) 807d5c65159SKalle Valo ath11k_warn(ab, "failed to setup rx tid %d\n", ret); 808d5c65159SKalle Valo 809d5c65159SKalle Valo return ret; 810d5c65159SKalle Valo } 811d5c65159SKalle Valo 812d5c65159SKalle Valo int ath11k_dp_rx_ampdu_stop(struct ath11k *ar, 813d5c65159SKalle Valo struct ieee80211_ampdu_params *params) 814d5c65159SKalle Valo { 815d5c65159SKalle Valo struct ath11k_base *ab = ar->ab; 816d5c65159SKalle Valo struct ath11k_peer *peer; 817d5c65159SKalle Valo struct ath11k_sta *arsta = (void *)params->sta->drv_priv; 818d5c65159SKalle Valo int vdev_id = arsta->arvif->vdev_id; 819d5c65159SKalle Valo dma_addr_t paddr; 820d5c65159SKalle Valo bool active; 821d5c65159SKalle Valo int ret; 822d5c65159SKalle Valo 823d5c65159SKalle Valo spin_lock_bh(&ab->base_lock); 824d5c65159SKalle Valo 825d5c65159SKalle Valo peer = ath11k_peer_find(ab, vdev_id, params->sta->addr); 826d5c65159SKalle Valo if (!peer) { 827d5c65159SKalle Valo ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n"); 828d5c65159SKalle Valo spin_unlock_bh(&ab->base_lock); 829d5c65159SKalle Valo return -ENOENT; 830d5c65159SKalle Valo } 831d5c65159SKalle Valo 832d5c65159SKalle Valo paddr = peer->rx_tid[params->tid].paddr; 833d5c65159SKalle Valo active = peer->rx_tid[params->tid].active; 834d5c65159SKalle Valo 835d5c65159SKalle Valo ath11k_peer_rx_tid_delete(ar, peer, params->tid); 836d5c65159SKalle Valo 837d5c65159SKalle Valo spin_unlock_bh(&ab->base_lock); 838d5c65159SKalle Valo 839d5c65159SKalle Valo if (!active) 840d5c65159SKalle Valo return 0; 841d5c65159SKalle Valo 842d5c65159SKalle Valo ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, 843d5c65159SKalle Valo params->sta->addr, paddr, 844d5c65159SKalle Valo params->tid, 1, 1); 845d5c65159SKalle Valo if (ret) 846d5c65159SKalle Valo ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n", 847d5c65159SKalle Valo ret); 848d5c65159SKalle Valo 849d5c65159SKalle Valo return ret; 850d5c65159SKalle Valo } 851d5c65159SKalle Valo 852d5c65159SKalle Valo static int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats, 853d5c65159SKalle Valo u16 peer_id) 854d5c65159SKalle Valo { 855d5c65159SKalle Valo int i; 856d5c65159SKalle Valo 857d5c65159SKalle Valo for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) { 858d5c65159SKalle Valo if (ppdu_stats->user_stats[i].is_valid_peer_id) { 859d5c65159SKalle Valo if (peer_id == ppdu_stats->user_stats[i].peer_id) 860d5c65159SKalle Valo return i; 861d5c65159SKalle Valo } else { 862d5c65159SKalle Valo return i; 863d5c65159SKalle Valo } 864d5c65159SKalle Valo } 865d5c65159SKalle Valo 866d5c65159SKalle Valo return -EINVAL; 867d5c65159SKalle Valo } 868d5c65159SKalle Valo 869d5c65159SKalle Valo static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab, 870d5c65159SKalle Valo u16 tag, u16 len, const void *ptr, 871d5c65159SKalle Valo void *data) 872d5c65159SKalle Valo { 873d5c65159SKalle Valo struct htt_ppdu_stats_info *ppdu_info; 874d5c65159SKalle Valo struct htt_ppdu_user_stats *user_stats; 875d5c65159SKalle Valo int cur_user; 876d5c65159SKalle Valo u16 peer_id; 877d5c65159SKalle Valo 878d5c65159SKalle Valo ppdu_info = (struct htt_ppdu_stats_info *)data; 879d5c65159SKalle Valo 880d5c65159SKalle Valo switch (tag) { 881d5c65159SKalle Valo case HTT_PPDU_STATS_TAG_COMMON: 882d5c65159SKalle Valo if (len < sizeof(struct htt_ppdu_stats_common)) { 883d5c65159SKalle Valo ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 884d5c65159SKalle Valo len, tag); 885d5c65159SKalle Valo return -EINVAL; 886d5c65159SKalle Valo } 887d5c65159SKalle Valo memcpy((void *)&ppdu_info->ppdu_stats.common, ptr, 888d5c65159SKalle Valo sizeof(struct htt_ppdu_stats_common)); 889d5c65159SKalle Valo break; 890d5c65159SKalle Valo case HTT_PPDU_STATS_TAG_USR_RATE: 891d5c65159SKalle Valo if (len < sizeof(struct htt_ppdu_stats_user_rate)) { 892d5c65159SKalle Valo ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 893d5c65159SKalle Valo len, tag); 894d5c65159SKalle Valo return -EINVAL; 895d5c65159SKalle Valo } 896d5c65159SKalle Valo 897d5c65159SKalle Valo peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id; 898d5c65159SKalle Valo cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 899d5c65159SKalle Valo peer_id); 900d5c65159SKalle Valo if (cur_user < 0) 901d5c65159SKalle Valo return -EINVAL; 902d5c65159SKalle Valo user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 903d5c65159SKalle Valo user_stats->peer_id = peer_id; 904d5c65159SKalle Valo user_stats->is_valid_peer_id = true; 905d5c65159SKalle Valo memcpy((void *)&user_stats->rate, ptr, 906d5c65159SKalle Valo sizeof(struct htt_ppdu_stats_user_rate)); 907d5c65159SKalle Valo user_stats->tlv_flags |= BIT(tag); 908d5c65159SKalle Valo break; 909d5c65159SKalle Valo case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON: 910d5c65159SKalle Valo if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) { 911d5c65159SKalle Valo ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 912d5c65159SKalle Valo len, tag); 913d5c65159SKalle Valo return -EINVAL; 914d5c65159SKalle Valo } 915d5c65159SKalle Valo 916d5c65159SKalle Valo peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id; 917d5c65159SKalle Valo cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 918d5c65159SKalle Valo peer_id); 919d5c65159SKalle Valo if (cur_user < 0) 920d5c65159SKalle Valo return -EINVAL; 921d5c65159SKalle Valo user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 922d5c65159SKalle Valo user_stats->peer_id = peer_id; 923d5c65159SKalle Valo user_stats->is_valid_peer_id = true; 924d5c65159SKalle Valo memcpy((void *)&user_stats->cmpltn_cmn, ptr, 925d5c65159SKalle Valo sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)); 926d5c65159SKalle Valo user_stats->tlv_flags |= BIT(tag); 927d5c65159SKalle Valo break; 928d5c65159SKalle Valo case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS: 929d5c65159SKalle Valo if (len < 930d5c65159SKalle Valo sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) { 931d5c65159SKalle Valo ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 932d5c65159SKalle Valo len, tag); 933d5c65159SKalle Valo return -EINVAL; 934d5c65159SKalle Valo } 935d5c65159SKalle Valo 936d5c65159SKalle Valo peer_id = 937d5c65159SKalle Valo ((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id; 938d5c65159SKalle Valo cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 939d5c65159SKalle Valo peer_id); 940d5c65159SKalle Valo if (cur_user < 0) 941d5c65159SKalle Valo return -EINVAL; 942d5c65159SKalle Valo user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 943d5c65159SKalle Valo user_stats->peer_id = peer_id; 944d5c65159SKalle Valo user_stats->is_valid_peer_id = true; 945d5c65159SKalle Valo memcpy((void *)&user_stats->ack_ba, ptr, 946d5c65159SKalle Valo sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)); 947d5c65159SKalle Valo user_stats->tlv_flags |= BIT(tag); 948d5c65159SKalle Valo break; 949d5c65159SKalle Valo } 950d5c65159SKalle Valo return 0; 951d5c65159SKalle Valo } 952d5c65159SKalle Valo 953d5c65159SKalle Valo int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len, 954d5c65159SKalle Valo int (*iter)(struct ath11k_base *ar, u16 tag, u16 len, 955d5c65159SKalle Valo const void *ptr, void *data), 956d5c65159SKalle Valo void *data) 957d5c65159SKalle Valo { 958d5c65159SKalle Valo const struct htt_tlv *tlv; 959d5c65159SKalle Valo const void *begin = ptr; 960d5c65159SKalle Valo u16 tlv_tag, tlv_len; 961d5c65159SKalle Valo int ret = -EINVAL; 962d5c65159SKalle Valo 963d5c65159SKalle Valo while (len > 0) { 964d5c65159SKalle Valo if (len < sizeof(*tlv)) { 965d5c65159SKalle Valo ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n", 966d5c65159SKalle Valo ptr - begin, len, sizeof(*tlv)); 967d5c65159SKalle Valo return -EINVAL; 968d5c65159SKalle Valo } 969d5c65159SKalle Valo tlv = (struct htt_tlv *)ptr; 970d5c65159SKalle Valo tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header); 971d5c65159SKalle Valo tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header); 972d5c65159SKalle Valo ptr += sizeof(*tlv); 973d5c65159SKalle Valo len -= sizeof(*tlv); 974d5c65159SKalle Valo 975d5c65159SKalle Valo if (tlv_len > len) { 976d5c65159SKalle Valo ath11k_err(ab, "htt tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n", 977d5c65159SKalle Valo tlv_tag, ptr - begin, len, tlv_len); 978d5c65159SKalle Valo return -EINVAL; 979d5c65159SKalle Valo } 980d5c65159SKalle Valo ret = iter(ab, tlv_tag, tlv_len, ptr, data); 981d5c65159SKalle Valo if (ret == -ENOMEM) 982d5c65159SKalle Valo return ret; 983d5c65159SKalle Valo 984d5c65159SKalle Valo ptr += tlv_len; 985d5c65159SKalle Valo len -= tlv_len; 986d5c65159SKalle Valo } 987d5c65159SKalle Valo return 0; 988d5c65159SKalle Valo } 989d5c65159SKalle Valo 990d5c65159SKalle Valo static u32 ath11k_bw_to_mac80211_bwflags(u8 bw) 991d5c65159SKalle Valo { 992d5c65159SKalle Valo u32 bwflags = 0; 993d5c65159SKalle Valo 994d5c65159SKalle Valo switch (bw) { 995d5c65159SKalle Valo case ATH11K_BW_40: 996d5c65159SKalle Valo bwflags = IEEE80211_TX_RC_40_MHZ_WIDTH; 997d5c65159SKalle Valo break; 998d5c65159SKalle Valo case ATH11K_BW_80: 999d5c65159SKalle Valo bwflags = IEEE80211_TX_RC_80_MHZ_WIDTH; 1000d5c65159SKalle Valo break; 1001d5c65159SKalle Valo case ATH11K_BW_160: 1002d5c65159SKalle Valo bwflags = IEEE80211_TX_RC_160_MHZ_WIDTH; 1003d5c65159SKalle Valo break; 1004d5c65159SKalle Valo } 1005d5c65159SKalle Valo 1006d5c65159SKalle Valo return bwflags; 1007d5c65159SKalle Valo } 1008d5c65159SKalle Valo 1009d5c65159SKalle Valo static void 1010d5c65159SKalle Valo ath11k_update_per_peer_tx_stats(struct ath11k *ar, 1011d5c65159SKalle Valo struct htt_ppdu_stats *ppdu_stats, u8 user) 1012d5c65159SKalle Valo { 1013d5c65159SKalle Valo struct ath11k_base *ab = ar->ab; 1014d5c65159SKalle Valo struct ath11k_peer *peer; 1015d5c65159SKalle Valo struct ieee80211_sta *sta; 1016d5c65159SKalle Valo struct ath11k_sta *arsta; 1017d5c65159SKalle Valo struct htt_ppdu_stats_user_rate *user_rate; 1018d5c65159SKalle Valo struct ieee80211_chanctx_conf *conf = NULL; 1019d5c65159SKalle Valo struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats; 1020d5c65159SKalle Valo struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user]; 1021d5c65159SKalle Valo struct htt_ppdu_stats_common *common = &ppdu_stats->common; 1022d5c65159SKalle Valo int ret; 1023d5c65159SKalle Valo u8 flags, mcs, nss, bw, sgi, rate_idx = 0; 1024d5c65159SKalle Valo u32 succ_bytes = 0; 1025d5c65159SKalle Valo u16 rate = 0, succ_pkts = 0; 1026d5c65159SKalle Valo u32 tx_duration = 0; 1027d5c65159SKalle Valo bool is_ampdu = false; 1028d5c65159SKalle Valo 1029d5c65159SKalle Valo if (!usr_stats) 1030d5c65159SKalle Valo return; 1031d5c65159SKalle Valo 1032d5c65159SKalle Valo if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE))) 1033d5c65159SKalle Valo return; 1034d5c65159SKalle Valo 1035d5c65159SKalle Valo if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON)) 1036d5c65159SKalle Valo is_ampdu = 1037d5c65159SKalle Valo HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags); 1038d5c65159SKalle Valo 1039d5c65159SKalle Valo if (usr_stats->tlv_flags & 1040d5c65159SKalle Valo BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) { 1041d5c65159SKalle Valo succ_bytes = usr_stats->ack_ba.success_bytes; 1042d5c65159SKalle Valo succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M, 1043d5c65159SKalle Valo usr_stats->ack_ba.info); 1044d5c65159SKalle Valo } 1045d5c65159SKalle Valo 1046d5c65159SKalle Valo if (common->fes_duration_us) 1047d5c65159SKalle Valo tx_duration = common->fes_duration_us; 1048d5c65159SKalle Valo 1049d5c65159SKalle Valo user_rate = &usr_stats->rate; 1050d5c65159SKalle Valo flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags); 1051d5c65159SKalle Valo bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2; 1052d5c65159SKalle Valo nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1; 1053d5c65159SKalle Valo mcs = HTT_USR_RATE_MCS(user_rate->rate_flags); 1054d5c65159SKalle Valo sgi = HTT_USR_RATE_GI(user_rate->rate_flags); 1055d5c65159SKalle Valo 1056d5c65159SKalle Valo /* Note: If host configured fixed rates and in some other special 1057d5c65159SKalle Valo * cases, the broadcast/management frames are sent in different rates. 1058d5c65159SKalle Valo * Firmware rate's control to be skipped for this? 1059d5c65159SKalle Valo */ 1060d5c65159SKalle Valo 1061d5c65159SKalle Valo if (flags == WMI_RATE_PREAMBLE_VHT && mcs > 9) { 1062d5c65159SKalle Valo ath11k_warn(ab, "Invalid VHT mcs %hhd peer stats", mcs); 1063d5c65159SKalle Valo return; 1064d5c65159SKalle Valo } 1065d5c65159SKalle Valo 1066d5c65159SKalle Valo if (flags == WMI_RATE_PREAMBLE_HT && (mcs > 7 || nss < 1)) { 1067d5c65159SKalle Valo ath11k_warn(ab, "Invalid HT mcs %hhd nss %hhd peer stats", 1068d5c65159SKalle Valo mcs, nss); 1069d5c65159SKalle Valo return; 1070d5c65159SKalle Valo } 1071d5c65159SKalle Valo 1072d5c65159SKalle Valo if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) { 1073d5c65159SKalle Valo ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs, 1074d5c65159SKalle Valo flags, 1075d5c65159SKalle Valo &rate_idx, 1076d5c65159SKalle Valo &rate); 1077d5c65159SKalle Valo if (ret < 0) 1078d5c65159SKalle Valo return; 1079d5c65159SKalle Valo } 1080d5c65159SKalle Valo 1081d5c65159SKalle Valo rcu_read_lock(); 1082d5c65159SKalle Valo spin_lock_bh(&ab->base_lock); 1083d5c65159SKalle Valo peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id); 1084d5c65159SKalle Valo 1085d5c65159SKalle Valo if (!peer || !peer->sta) { 1086d5c65159SKalle Valo spin_unlock_bh(&ab->base_lock); 1087d5c65159SKalle Valo rcu_read_unlock(); 1088d5c65159SKalle Valo return; 1089d5c65159SKalle Valo } 1090d5c65159SKalle Valo 1091d5c65159SKalle Valo sta = peer->sta; 1092d5c65159SKalle Valo arsta = (struct ath11k_sta *)sta->drv_priv; 1093d5c65159SKalle Valo 1094d5c65159SKalle Valo memset(&arsta->txrate, 0, sizeof(arsta->txrate)); 1095d5c65159SKalle Valo memset(&arsta->tx_info.status, 0, sizeof(arsta->tx_info.status)); 1096d5c65159SKalle Valo 1097d5c65159SKalle Valo switch (flags) { 1098d5c65159SKalle Valo case WMI_RATE_PREAMBLE_OFDM: 1099d5c65159SKalle Valo arsta->txrate.legacy = rate; 1100d5c65159SKalle Valo if (arsta->arvif && arsta->arvif->vif) 1101d5c65159SKalle Valo conf = rcu_dereference(arsta->arvif->vif->chanctx_conf); 1102d5c65159SKalle Valo if (conf && conf->def.chan->band == NL80211_BAND_5GHZ) 1103d5c65159SKalle Valo arsta->tx_info.status.rates[0].idx = rate_idx - 4; 1104d5c65159SKalle Valo break; 1105d5c65159SKalle Valo case WMI_RATE_PREAMBLE_CCK: 1106d5c65159SKalle Valo arsta->txrate.legacy = rate; 1107d5c65159SKalle Valo arsta->tx_info.status.rates[0].idx = rate_idx; 1108d5c65159SKalle Valo if (mcs > ATH11K_HW_RATE_CCK_LP_1M && 1109d5c65159SKalle Valo mcs <= ATH11K_HW_RATE_CCK_SP_2M) 1110d5c65159SKalle Valo arsta->tx_info.status.rates[0].flags |= 1111d5c65159SKalle Valo IEEE80211_TX_RC_USE_SHORT_PREAMBLE; 1112d5c65159SKalle Valo break; 1113d5c65159SKalle Valo case WMI_RATE_PREAMBLE_HT: 1114d5c65159SKalle Valo arsta->txrate.mcs = mcs + 8 * (nss - 1); 1115d5c65159SKalle Valo arsta->tx_info.status.rates[0].idx = arsta->txrate.mcs; 1116d5c65159SKalle Valo arsta->txrate.flags = RATE_INFO_FLAGS_MCS; 1117d5c65159SKalle Valo arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_MCS; 1118d5c65159SKalle Valo if (sgi) { 1119d5c65159SKalle Valo arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1120d5c65159SKalle Valo arsta->tx_info.status.rates[0].flags |= 1121d5c65159SKalle Valo IEEE80211_TX_RC_SHORT_GI; 1122d5c65159SKalle Valo } 1123d5c65159SKalle Valo break; 1124d5c65159SKalle Valo case WMI_RATE_PREAMBLE_VHT: 1125d5c65159SKalle Valo arsta->txrate.mcs = mcs; 1126d5c65159SKalle Valo ieee80211_rate_set_vht(&arsta->tx_info.status.rates[0], mcs, nss); 1127d5c65159SKalle Valo arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; 1128d5c65159SKalle Valo arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_VHT_MCS; 1129d5c65159SKalle Valo if (sgi) { 1130d5c65159SKalle Valo arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1131d5c65159SKalle Valo arsta->tx_info.status.rates[0].flags |= 1132d5c65159SKalle Valo IEEE80211_TX_RC_SHORT_GI; 1133d5c65159SKalle Valo } 1134d5c65159SKalle Valo break; 1135d5c65159SKalle Valo } 1136d5c65159SKalle Valo 1137d5c65159SKalle Valo arsta->txrate.nss = nss; 113839e81c6aSTamizh chelvam arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw); 1139d5c65159SKalle Valo arsta->tx_info.status.rates[0].flags |= ath11k_bw_to_mac80211_bwflags(bw); 1140d5c65159SKalle Valo 1141d5c65159SKalle Valo memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info)); 1142d5c65159SKalle Valo 1143d5c65159SKalle Valo if (succ_pkts) { 1144d5c65159SKalle Valo arsta->tx_info.flags = IEEE80211_TX_STAT_ACK; 1145d5c65159SKalle Valo arsta->tx_info.status.rates[0].count = 1; 1146d5c65159SKalle Valo ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info); 1147d5c65159SKalle Valo } 1148d5c65159SKalle Valo 1149d5c65159SKalle Valo memset(peer_stats, 0, sizeof(*peer_stats)); 1150d5c65159SKalle Valo 1151d5c65159SKalle Valo peer_stats->succ_pkts = succ_pkts; 1152d5c65159SKalle Valo peer_stats->succ_bytes = succ_bytes; 1153d5c65159SKalle Valo peer_stats->is_ampdu = is_ampdu; 1154d5c65159SKalle Valo peer_stats->duration = tx_duration; 1155d5c65159SKalle Valo peer_stats->ba_fails = 1156d5c65159SKalle Valo HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) + 1157d5c65159SKalle Valo HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags); 1158d5c65159SKalle Valo 1159d5c65159SKalle Valo if (ath11k_debug_is_extd_tx_stats_enabled(ar)) 1160d5c65159SKalle Valo ath11k_accumulate_per_peer_tx_stats(arsta, 1161d5c65159SKalle Valo peer_stats, rate_idx); 1162d5c65159SKalle Valo 1163d5c65159SKalle Valo spin_unlock_bh(&ab->base_lock); 1164d5c65159SKalle Valo rcu_read_unlock(); 1165d5c65159SKalle Valo } 1166d5c65159SKalle Valo 1167d5c65159SKalle Valo static void ath11k_htt_update_ppdu_stats(struct ath11k *ar, 1168d5c65159SKalle Valo struct htt_ppdu_stats *ppdu_stats) 1169d5c65159SKalle Valo { 1170d5c65159SKalle Valo u8 user; 1171d5c65159SKalle Valo 1172d5c65159SKalle Valo for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++) 1173d5c65159SKalle Valo ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user); 1174d5c65159SKalle Valo } 1175d5c65159SKalle Valo 1176d5c65159SKalle Valo static 1177d5c65159SKalle Valo struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar, 1178d5c65159SKalle Valo u32 ppdu_id) 1179d5c65159SKalle Valo { 1180d5c65159SKalle Valo struct htt_ppdu_stats_info *ppdu_info = NULL; 1181d5c65159SKalle Valo 1182d5c65159SKalle Valo spin_lock_bh(&ar->data_lock); 1183d5c65159SKalle Valo if (!list_empty(&ar->ppdu_stats_info)) { 1184d5c65159SKalle Valo list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) { 1185d5c65159SKalle Valo if (ppdu_info && ppdu_info->ppdu_id == ppdu_id) { 1186d5c65159SKalle Valo spin_unlock_bh(&ar->data_lock); 1187d5c65159SKalle Valo return ppdu_info; 1188d5c65159SKalle Valo } 1189d5c65159SKalle Valo } 1190d5c65159SKalle Valo 1191d5c65159SKalle Valo if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) { 1192d5c65159SKalle Valo ppdu_info = list_first_entry(&ar->ppdu_stats_info, 1193d5c65159SKalle Valo typeof(*ppdu_info), list); 1194d5c65159SKalle Valo list_del(&ppdu_info->list); 1195d5c65159SKalle Valo ar->ppdu_stat_list_depth--; 1196d5c65159SKalle Valo ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats); 1197d5c65159SKalle Valo kfree(ppdu_info); 1198d5c65159SKalle Valo } 1199d5c65159SKalle Valo } 1200d5c65159SKalle Valo spin_unlock_bh(&ar->data_lock); 1201d5c65159SKalle Valo 1202d5c65159SKalle Valo ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_KERNEL); 1203d5c65159SKalle Valo if (!ppdu_info) 1204d5c65159SKalle Valo return NULL; 1205d5c65159SKalle Valo 1206d5c65159SKalle Valo spin_lock_bh(&ar->data_lock); 1207d5c65159SKalle Valo list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info); 1208d5c65159SKalle Valo ar->ppdu_stat_list_depth++; 1209d5c65159SKalle Valo spin_unlock_bh(&ar->data_lock); 1210d5c65159SKalle Valo 1211d5c65159SKalle Valo return ppdu_info; 1212d5c65159SKalle Valo } 1213d5c65159SKalle Valo 1214d5c65159SKalle Valo static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab, 1215d5c65159SKalle Valo struct sk_buff *skb) 1216d5c65159SKalle Valo { 1217d5c65159SKalle Valo struct ath11k_htt_ppdu_stats_msg *msg; 1218d5c65159SKalle Valo struct htt_ppdu_stats_info *ppdu_info; 1219d5c65159SKalle Valo struct ath11k *ar; 1220d5c65159SKalle Valo int ret; 1221d5c65159SKalle Valo u8 pdev_id; 1222d5c65159SKalle Valo u32 ppdu_id, len; 1223d5c65159SKalle Valo 1224d5c65159SKalle Valo msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data; 1225d5c65159SKalle Valo len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info); 1226d5c65159SKalle Valo pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info); 1227d5c65159SKalle Valo ppdu_id = msg->ppdu_id; 1228d5c65159SKalle Valo 1229d5c65159SKalle Valo rcu_read_lock(); 1230d5c65159SKalle Valo ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); 1231d5c65159SKalle Valo if (!ar) { 1232d5c65159SKalle Valo ret = -EINVAL; 1233d5c65159SKalle Valo goto exit; 1234d5c65159SKalle Valo } 1235d5c65159SKalle Valo 1236d5c65159SKalle Valo if (ath11k_debug_is_pktlog_lite_mode_enabled(ar)) 1237d5c65159SKalle Valo trace_ath11k_htt_ppdu_stats(ar, skb->data, len); 1238d5c65159SKalle Valo 1239d5c65159SKalle Valo ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id); 1240d5c65159SKalle Valo if (!ppdu_info) { 1241d5c65159SKalle Valo ret = -EINVAL; 1242d5c65159SKalle Valo goto exit; 1243d5c65159SKalle Valo } 1244d5c65159SKalle Valo 1245d5c65159SKalle Valo ppdu_info->ppdu_id = ppdu_id; 1246d5c65159SKalle Valo ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len, 1247d5c65159SKalle Valo ath11k_htt_tlv_ppdu_stats_parse, 1248d5c65159SKalle Valo (void *)ppdu_info); 1249d5c65159SKalle Valo if (ret) { 1250d5c65159SKalle Valo ath11k_warn(ab, "Failed to parse tlv %d\n", ret); 1251d5c65159SKalle Valo goto exit; 1252d5c65159SKalle Valo } 1253d5c65159SKalle Valo 1254d5c65159SKalle Valo exit: 1255d5c65159SKalle Valo rcu_read_unlock(); 1256d5c65159SKalle Valo 1257d5c65159SKalle Valo return ret; 1258d5c65159SKalle Valo } 1259d5c65159SKalle Valo 1260d5c65159SKalle Valo static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb) 1261d5c65159SKalle Valo { 1262d5c65159SKalle Valo struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data; 1263d5c65159SKalle Valo struct ath11k *ar; 1264d5c65159SKalle Valo u32 len; 1265d5c65159SKalle Valo u8 pdev_id; 1266d5c65159SKalle Valo 1267d5c65159SKalle Valo len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, data->hdr); 1268d5c65159SKalle Valo 1269d5c65159SKalle Valo if (len > ATH11K_HTT_PKTLOG_MAX_SIZE) { 1270d5c65159SKalle Valo ath11k_warn(ab, "htt pktlog buffer size %d, expected < %d\n", 1271d5c65159SKalle Valo len, 1272d5c65159SKalle Valo ATH11K_HTT_PKTLOG_MAX_SIZE); 1273d5c65159SKalle Valo return; 1274d5c65159SKalle Valo } 1275d5c65159SKalle Valo 1276d5c65159SKalle Valo pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr); 1277d5c65159SKalle Valo pdev_id = DP_HW2SW_MACID(pdev_id); 1278d5c65159SKalle Valo ar = ab->pdevs[pdev_id].ar; 1279d5c65159SKalle Valo 1280d5c65159SKalle Valo trace_ath11k_htt_pktlog(ar, data->payload, len); 1281d5c65159SKalle Valo } 1282d5c65159SKalle Valo 1283d5c65159SKalle Valo void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab, 1284d5c65159SKalle Valo struct sk_buff *skb) 1285d5c65159SKalle Valo { 1286d5c65159SKalle Valo struct ath11k_dp *dp = &ab->dp; 1287d5c65159SKalle Valo struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data; 1288d5c65159SKalle Valo enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp); 1289d5c65159SKalle Valo u16 peer_id; 1290d5c65159SKalle Valo u8 vdev_id; 1291d5c65159SKalle Valo u8 mac_addr[ETH_ALEN]; 1292d5c65159SKalle Valo u16 peer_mac_h16; 1293d5c65159SKalle Valo u16 ast_hash; 1294d5c65159SKalle Valo 1295d5c65159SKalle Valo ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type); 1296d5c65159SKalle Valo 1297d5c65159SKalle Valo switch (type) { 1298d5c65159SKalle Valo case HTT_T2H_MSG_TYPE_VERSION_CONF: 1299d5c65159SKalle Valo dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR, 1300d5c65159SKalle Valo resp->version_msg.version); 1301d5c65159SKalle Valo dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR, 1302d5c65159SKalle Valo resp->version_msg.version); 1303d5c65159SKalle Valo complete(&dp->htt_tgt_version_received); 1304d5c65159SKalle Valo break; 1305d5c65159SKalle Valo case HTT_T2H_MSG_TYPE_PEER_MAP: 1306d5c65159SKalle Valo vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID, 1307d5c65159SKalle Valo resp->peer_map_ev.info); 1308d5c65159SKalle Valo peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID, 1309d5c65159SKalle Valo resp->peer_map_ev.info); 1310d5c65159SKalle Valo peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16, 1311d5c65159SKalle Valo resp->peer_map_ev.info1); 1312d5c65159SKalle Valo ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32, 1313d5c65159SKalle Valo peer_mac_h16, mac_addr); 1314d5c65159SKalle Valo ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL, 1315d5c65159SKalle Valo resp->peer_map_ev.info1); 1316d5c65159SKalle Valo ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash); 1317d5c65159SKalle Valo break; 1318d5c65159SKalle Valo case HTT_T2H_MSG_TYPE_PEER_UNMAP: 1319d5c65159SKalle Valo peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID, 1320d5c65159SKalle Valo resp->peer_unmap_ev.info); 1321d5c65159SKalle Valo ath11k_peer_unmap_event(ab, peer_id); 1322d5c65159SKalle Valo break; 1323d5c65159SKalle Valo case HTT_T2H_MSG_TYPE_PPDU_STATS_IND: 1324d5c65159SKalle Valo ath11k_htt_pull_ppdu_stats(ab, skb); 1325d5c65159SKalle Valo break; 1326d5c65159SKalle Valo case HTT_T2H_MSG_TYPE_EXT_STATS_CONF: 1327d5c65159SKalle Valo ath11k_dbg_htt_ext_stats_handler(ab, skb); 1328d5c65159SKalle Valo break; 1329d5c65159SKalle Valo case HTT_T2H_MSG_TYPE_PKTLOG: 1330d5c65159SKalle Valo ath11k_htt_pktlog(ab, skb); 1331d5c65159SKalle Valo break; 1332d5c65159SKalle Valo default: 1333d5c65159SKalle Valo ath11k_warn(ab, "htt event %d not handled\n", type); 1334d5c65159SKalle Valo break; 1335d5c65159SKalle Valo } 1336d5c65159SKalle Valo 1337d5c65159SKalle Valo dev_kfree_skb_any(skb); 1338d5c65159SKalle Valo } 1339d5c65159SKalle Valo 1340d5c65159SKalle Valo static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar, 1341d5c65159SKalle Valo struct sk_buff_head *msdu_list, 1342d5c65159SKalle Valo struct sk_buff *first, struct sk_buff *last, 1343d5c65159SKalle Valo u8 l3pad_bytes, int msdu_len) 1344d5c65159SKalle Valo { 1345d5c65159SKalle Valo struct sk_buff *skb; 1346d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first); 1347d5c65159SKalle Valo struct hal_rx_desc *ldesc; 1348d5c65159SKalle Valo int space_extra; 1349d5c65159SKalle Valo int rem_len; 1350d5c65159SKalle Valo int buf_len; 1351d5c65159SKalle Valo 1352d5c65159SKalle Valo if (!rxcb->is_continuation) { 1353d5c65159SKalle Valo skb_put(first, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len); 1354d5c65159SKalle Valo skb_pull(first, HAL_RX_DESC_SIZE + l3pad_bytes); 1355d5c65159SKalle Valo return 0; 1356d5c65159SKalle Valo } 1357d5c65159SKalle Valo 1358d5c65159SKalle Valo if (WARN_ON_ONCE(msdu_len <= (DP_RX_BUFFER_SIZE - 1359d5c65159SKalle Valo (HAL_RX_DESC_SIZE + l3pad_bytes)))) { 1360d5c65159SKalle Valo skb_put(first, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len); 1361d5c65159SKalle Valo skb_pull(first, HAL_RX_DESC_SIZE + l3pad_bytes); 1362d5c65159SKalle Valo return 0; 1363d5c65159SKalle Valo } 1364d5c65159SKalle Valo 1365d5c65159SKalle Valo ldesc = (struct hal_rx_desc *)last->data; 1366d5c65159SKalle Valo rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ldesc); 1367d5c65159SKalle Valo rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ldesc); 1368d5c65159SKalle Valo 1369d5c65159SKalle Valo /* MSDU spans over multiple buffers because the length of the MSDU 1370d5c65159SKalle Valo * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data 1371d5c65159SKalle Valo * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. 1372d5c65159SKalle Valo */ 1373d5c65159SKalle Valo skb_put(first, DP_RX_BUFFER_SIZE); 1374d5c65159SKalle Valo skb_pull(first, HAL_RX_DESC_SIZE + l3pad_bytes); 1375d5c65159SKalle Valo 1376d5c65159SKalle Valo space_extra = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first)); 1377d5c65159SKalle Valo if (space_extra > 0 && 1378d5c65159SKalle Valo (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) { 1379d5c65159SKalle Valo /* Free up all buffers of the MSDU */ 1380d5c65159SKalle Valo while ((skb = __skb_dequeue(msdu_list)) != NULL) { 1381d5c65159SKalle Valo rxcb = ATH11K_SKB_RXCB(skb); 1382d5c65159SKalle Valo if (!rxcb->is_continuation) { 1383d5c65159SKalle Valo dev_kfree_skb_any(skb); 1384d5c65159SKalle Valo break; 1385d5c65159SKalle Valo } 1386d5c65159SKalle Valo dev_kfree_skb_any(skb); 1387d5c65159SKalle Valo } 1388d5c65159SKalle Valo return -ENOMEM; 1389d5c65159SKalle Valo } 1390d5c65159SKalle Valo 1391d5c65159SKalle Valo /* When an MSDU spread over multiple buffers attention, MSDU_END and 1392d5c65159SKalle Valo * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs. 1393d5c65159SKalle Valo */ 1394d5c65159SKalle Valo ath11k_dp_rx_desc_end_tlv_copy(rxcb->rx_desc, ldesc); 1395d5c65159SKalle Valo 1396d5c65159SKalle Valo rem_len = msdu_len - 1397d5c65159SKalle Valo (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE - l3pad_bytes); 1398d5c65159SKalle Valo while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) { 1399d5c65159SKalle Valo rxcb = ATH11K_SKB_RXCB(skb); 1400d5c65159SKalle Valo if (rxcb->is_continuation) 1401d5c65159SKalle Valo buf_len = DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE; 1402d5c65159SKalle Valo else 1403d5c65159SKalle Valo buf_len = rem_len; 1404d5c65159SKalle Valo 1405d5c65159SKalle Valo if (buf_len > (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE)) { 1406d5c65159SKalle Valo WARN_ON_ONCE(1); 1407d5c65159SKalle Valo dev_kfree_skb_any(skb); 1408d5c65159SKalle Valo return -EINVAL; 1409d5c65159SKalle Valo } 1410d5c65159SKalle Valo 1411d5c65159SKalle Valo skb_put(skb, buf_len + HAL_RX_DESC_SIZE); 1412d5c65159SKalle Valo skb_pull(skb, HAL_RX_DESC_SIZE); 1413d5c65159SKalle Valo skb_copy_from_linear_data(skb, skb_put(first, buf_len), 1414d5c65159SKalle Valo buf_len); 1415d5c65159SKalle Valo dev_kfree_skb_any(skb); 1416d5c65159SKalle Valo 1417d5c65159SKalle Valo rem_len -= buf_len; 1418d5c65159SKalle Valo if (!rxcb->is_continuation) 1419d5c65159SKalle Valo break; 1420d5c65159SKalle Valo } 1421d5c65159SKalle Valo 1422d5c65159SKalle Valo return 0; 1423d5c65159SKalle Valo } 1424d5c65159SKalle Valo 1425d5c65159SKalle Valo static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list, 1426d5c65159SKalle Valo struct sk_buff *first) 1427d5c65159SKalle Valo { 1428d5c65159SKalle Valo struct sk_buff *skb; 1429d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first); 1430d5c65159SKalle Valo 1431d5c65159SKalle Valo if (!rxcb->is_continuation) 1432d5c65159SKalle Valo return first; 1433d5c65159SKalle Valo 1434d5c65159SKalle Valo skb_queue_walk(msdu_list, skb) { 1435d5c65159SKalle Valo rxcb = ATH11K_SKB_RXCB(skb); 1436d5c65159SKalle Valo if (!rxcb->is_continuation) 1437d5c65159SKalle Valo return skb; 1438d5c65159SKalle Valo } 1439d5c65159SKalle Valo 1440d5c65159SKalle Valo return NULL; 1441d5c65159SKalle Valo } 1442d5c65159SKalle Valo 1443d5c65159SKalle Valo static int ath11k_dp_rx_retrieve_amsdu(struct ath11k *ar, 1444d5c65159SKalle Valo struct sk_buff_head *msdu_list, 1445d5c65159SKalle Valo struct sk_buff_head *amsdu_list) 1446d5c65159SKalle Valo { 1447d5c65159SKalle Valo struct sk_buff *msdu = skb_peek(msdu_list); 1448d5c65159SKalle Valo struct sk_buff *last_buf; 1449d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb; 1450d5c65159SKalle Valo struct ieee80211_hdr *hdr; 1451d5c65159SKalle Valo struct hal_rx_desc *rx_desc, *lrx_desc; 1452d5c65159SKalle Valo u16 msdu_len; 1453d5c65159SKalle Valo u8 l3_pad_bytes; 1454d5c65159SKalle Valo u8 *hdr_status; 1455d5c65159SKalle Valo int ret; 1456d5c65159SKalle Valo 1457d5c65159SKalle Valo if (!msdu) 1458d5c65159SKalle Valo return -ENOENT; 1459d5c65159SKalle Valo 1460d5c65159SKalle Valo rx_desc = (struct hal_rx_desc *)msdu->data; 1461d5c65159SKalle Valo hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc); 1462d5c65159SKalle Valo hdr = (struct ieee80211_hdr *)hdr_status; 1463d5c65159SKalle Valo /* Process only data frames */ 1464d5c65159SKalle Valo if (!ieee80211_is_data(hdr->frame_control)) { 1465d5c65159SKalle Valo __skb_unlink(msdu, msdu_list); 1466d5c65159SKalle Valo dev_kfree_skb_any(msdu); 1467d5c65159SKalle Valo return -EINVAL; 1468d5c65159SKalle Valo } 1469d5c65159SKalle Valo 1470d5c65159SKalle Valo do { 1471d5c65159SKalle Valo __skb_unlink(msdu, msdu_list); 1472d5c65159SKalle Valo last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu); 1473d5c65159SKalle Valo if (!last_buf) { 1474d5c65159SKalle Valo ath11k_warn(ar->ab, 1475d5c65159SKalle Valo "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n"); 1476d5c65159SKalle Valo ret = -EIO; 1477d5c65159SKalle Valo goto free_out; 1478d5c65159SKalle Valo } 1479d5c65159SKalle Valo 1480d5c65159SKalle Valo rx_desc = (struct hal_rx_desc *)msdu->data; 1481d5c65159SKalle Valo lrx_desc = (struct hal_rx_desc *)last_buf->data; 1482d5c65159SKalle Valo 1483d5c65159SKalle Valo if (!ath11k_dp_rx_h_attn_msdu_done(lrx_desc)) { 1484d5c65159SKalle Valo ath11k_warn(ar->ab, "msdu_done bit in attention is not set\n"); 1485d5c65159SKalle Valo ret = -EIO; 1486d5c65159SKalle Valo goto free_out; 1487d5c65159SKalle Valo } 1488d5c65159SKalle Valo 1489d5c65159SKalle Valo rxcb = ATH11K_SKB_RXCB(msdu); 1490d5c65159SKalle Valo rxcb->rx_desc = rx_desc; 1491d5c65159SKalle Valo msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc); 1492d5c65159SKalle Valo l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(lrx_desc); 1493d5c65159SKalle Valo 1494d5c65159SKalle Valo if (!rxcb->is_continuation) { 1495d5c65159SKalle Valo skb_put(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes + msdu_len); 1496d5c65159SKalle Valo skb_pull(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes); 1497d5c65159SKalle Valo } else { 1498d5c65159SKalle Valo ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list, 1499d5c65159SKalle Valo msdu, last_buf, 1500d5c65159SKalle Valo l3_pad_bytes, msdu_len); 1501d5c65159SKalle Valo if (ret) { 1502d5c65159SKalle Valo ath11k_warn(ar->ab, 1503d5c65159SKalle Valo "failed to coalesce msdu rx buffer%d\n", ret); 1504d5c65159SKalle Valo goto free_out; 1505d5c65159SKalle Valo } 1506d5c65159SKalle Valo } 1507d5c65159SKalle Valo __skb_queue_tail(amsdu_list, msdu); 1508d5c65159SKalle Valo 1509d5c65159SKalle Valo /* Should we also consider msdu_cnt from mpdu_meta while 1510d5c65159SKalle Valo * preparing amsdu list? 1511d5c65159SKalle Valo */ 1512d5c65159SKalle Valo if (rxcb->is_last_msdu) 1513d5c65159SKalle Valo break; 1514d5c65159SKalle Valo } while ((msdu = skb_peek(msdu_list)) != NULL); 1515d5c65159SKalle Valo 1516d5c65159SKalle Valo return 0; 1517d5c65159SKalle Valo 1518d5c65159SKalle Valo free_out: 1519d5c65159SKalle Valo dev_kfree_skb_any(msdu); 1520d5c65159SKalle Valo __skb_queue_purge(amsdu_list); 1521d5c65159SKalle Valo 1522d5c65159SKalle Valo return ret; 1523d5c65159SKalle Valo } 1524d5c65159SKalle Valo 1525d5c65159SKalle Valo static void ath11k_dp_rx_h_csum_offload(struct sk_buff *msdu) 1526d5c65159SKalle Valo { 1527d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 1528d5c65159SKalle Valo bool ip_csum_fail, l4_csum_fail; 1529d5c65159SKalle Valo 1530d5c65159SKalle Valo ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rxcb->rx_desc); 1531d5c65159SKalle Valo l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rxcb->rx_desc); 1532d5c65159SKalle Valo 1533d5c65159SKalle Valo msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ? 1534d5c65159SKalle Valo CHECKSUM_NONE : CHECKSUM_UNNECESSARY; 1535d5c65159SKalle Valo } 1536d5c65159SKalle Valo 1537d5c65159SKalle Valo static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, 1538d5c65159SKalle Valo enum hal_encrypt_type enctype) 1539d5c65159SKalle Valo { 1540d5c65159SKalle Valo switch (enctype) { 1541d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_OPEN: 1542d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1543d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_TKIP_MIC: 1544d5c65159SKalle Valo return 0; 1545d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_CCMP_128: 1546d5c65159SKalle Valo return IEEE80211_CCMP_MIC_LEN; 1547d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_CCMP_256: 1548d5c65159SKalle Valo return IEEE80211_CCMP_256_MIC_LEN; 1549d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_GCMP_128: 1550d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1551d5c65159SKalle Valo return IEEE80211_GCMP_MIC_LEN; 1552d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_WEP_40: 1553d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_WEP_104: 1554d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_WEP_128: 1555d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1556d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_WAPI: 1557d5c65159SKalle Valo break; 1558d5c65159SKalle Valo } 1559d5c65159SKalle Valo 1560d5c65159SKalle Valo ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype); 1561d5c65159SKalle Valo return 0; 1562d5c65159SKalle Valo } 1563d5c65159SKalle Valo 1564d5c65159SKalle Valo static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar, 1565d5c65159SKalle Valo enum hal_encrypt_type enctype) 1566d5c65159SKalle Valo { 1567d5c65159SKalle Valo switch (enctype) { 1568d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_OPEN: 1569d5c65159SKalle Valo return 0; 1570d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1571d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_TKIP_MIC: 1572d5c65159SKalle Valo return IEEE80211_TKIP_IV_LEN; 1573d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_CCMP_128: 1574d5c65159SKalle Valo return IEEE80211_CCMP_HDR_LEN; 1575d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_CCMP_256: 1576d5c65159SKalle Valo return IEEE80211_CCMP_256_HDR_LEN; 1577d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_GCMP_128: 1578d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1579d5c65159SKalle Valo return IEEE80211_GCMP_HDR_LEN; 1580d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_WEP_40: 1581d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_WEP_104: 1582d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_WEP_128: 1583d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1584d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_WAPI: 1585d5c65159SKalle Valo break; 1586d5c65159SKalle Valo } 1587d5c65159SKalle Valo 1588d5c65159SKalle Valo ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 1589d5c65159SKalle Valo return 0; 1590d5c65159SKalle Valo } 1591d5c65159SKalle Valo 1592d5c65159SKalle Valo static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar, 1593d5c65159SKalle Valo enum hal_encrypt_type enctype) 1594d5c65159SKalle Valo { 1595d5c65159SKalle Valo switch (enctype) { 1596d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_OPEN: 1597d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_CCMP_128: 1598d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_CCMP_256: 1599d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_GCMP_128: 1600d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1601d5c65159SKalle Valo return 0; 1602d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1603d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_TKIP_MIC: 1604d5c65159SKalle Valo return IEEE80211_TKIP_ICV_LEN; 1605d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_WEP_40: 1606d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_WEP_104: 1607d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_WEP_128: 1608d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1609d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_WAPI: 1610d5c65159SKalle Valo break; 1611d5c65159SKalle Valo } 1612d5c65159SKalle Valo 1613d5c65159SKalle Valo ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 1614d5c65159SKalle Valo return 0; 1615d5c65159SKalle Valo } 1616d5c65159SKalle Valo 1617d5c65159SKalle Valo static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar, 1618d5c65159SKalle Valo struct sk_buff *msdu, 1619d5c65159SKalle Valo u8 *first_hdr, 1620d5c65159SKalle Valo enum hal_encrypt_type enctype, 1621d5c65159SKalle Valo struct ieee80211_rx_status *status) 1622d5c65159SKalle Valo { 1623d5c65159SKalle Valo struct ieee80211_hdr *hdr; 1624d5c65159SKalle Valo size_t hdr_len; 1625d5c65159SKalle Valo u8 da[ETH_ALEN]; 1626d5c65159SKalle Valo u8 sa[ETH_ALEN]; 1627d5c65159SKalle Valo 1628d5c65159SKalle Valo /* pull decapped header and copy SA & DA */ 1629d5c65159SKalle Valo hdr = (struct ieee80211_hdr *)msdu->data; 1630d5c65159SKalle Valo ether_addr_copy(da, ieee80211_get_DA(hdr)); 1631d5c65159SKalle Valo ether_addr_copy(sa, ieee80211_get_SA(hdr)); 1632d5c65159SKalle Valo skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control)); 1633d5c65159SKalle Valo 1634d5c65159SKalle Valo /* push original 802.11 header */ 1635d5c65159SKalle Valo hdr = (struct ieee80211_hdr *)first_hdr; 1636d5c65159SKalle Valo hdr_len = ieee80211_hdrlen(hdr->frame_control); 1637d5c65159SKalle Valo 1638d5c65159SKalle Valo if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 1639d5c65159SKalle Valo memcpy(skb_push(msdu, 1640d5c65159SKalle Valo ath11k_dp_rx_crypto_param_len(ar, enctype)), 1641d5c65159SKalle Valo (void *)hdr + hdr_len, 1642d5c65159SKalle Valo ath11k_dp_rx_crypto_param_len(ar, enctype)); 1643d5c65159SKalle Valo } 1644d5c65159SKalle Valo 1645d5c65159SKalle Valo memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1646d5c65159SKalle Valo 1647d5c65159SKalle Valo /* original 802.11 header has a different DA and in 1648d5c65159SKalle Valo * case of 4addr it may also have different SA 1649d5c65159SKalle Valo */ 1650d5c65159SKalle Valo hdr = (struct ieee80211_hdr *)msdu->data; 1651d5c65159SKalle Valo ether_addr_copy(ieee80211_get_DA(hdr), da); 1652d5c65159SKalle Valo ether_addr_copy(ieee80211_get_SA(hdr), sa); 1653d5c65159SKalle Valo } 1654d5c65159SKalle Valo 1655d5c65159SKalle Valo static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu, 1656d5c65159SKalle Valo enum hal_encrypt_type enctype, 1657d5c65159SKalle Valo struct ieee80211_rx_status *status, 1658d5c65159SKalle Valo bool decrypted) 1659d5c65159SKalle Valo { 1660d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 1661d5c65159SKalle Valo struct ieee80211_hdr *hdr; 1662d5c65159SKalle Valo size_t hdr_len; 1663d5c65159SKalle Valo size_t crypto_len; 1664d5c65159SKalle Valo 1665d5c65159SKalle Valo if (!rxcb->is_first_msdu || 1666d5c65159SKalle Valo !(rxcb->is_first_msdu && rxcb->is_last_msdu)) { 1667d5c65159SKalle Valo WARN_ON_ONCE(1); 1668d5c65159SKalle Valo return; 1669d5c65159SKalle Valo } 1670d5c65159SKalle Valo 1671d5c65159SKalle Valo skb_trim(msdu, msdu->len - FCS_LEN); 1672d5c65159SKalle Valo 1673d5c65159SKalle Valo if (!decrypted) 1674d5c65159SKalle Valo return; 1675d5c65159SKalle Valo 1676d5c65159SKalle Valo hdr = (void *)msdu->data; 1677d5c65159SKalle Valo 1678d5c65159SKalle Valo /* Tail */ 1679d5c65159SKalle Valo if (status->flag & RX_FLAG_IV_STRIPPED) { 1680d5c65159SKalle Valo skb_trim(msdu, msdu->len - 1681d5c65159SKalle Valo ath11k_dp_rx_crypto_mic_len(ar, enctype)); 1682d5c65159SKalle Valo 1683d5c65159SKalle Valo skb_trim(msdu, msdu->len - 1684d5c65159SKalle Valo ath11k_dp_rx_crypto_icv_len(ar, enctype)); 1685d5c65159SKalle Valo } else { 1686d5c65159SKalle Valo /* MIC */ 1687d5c65159SKalle Valo if (status->flag & RX_FLAG_MIC_STRIPPED) 1688d5c65159SKalle Valo skb_trim(msdu, msdu->len - 1689d5c65159SKalle Valo ath11k_dp_rx_crypto_mic_len(ar, enctype)); 1690d5c65159SKalle Valo 1691d5c65159SKalle Valo /* ICV */ 1692d5c65159SKalle Valo if (status->flag & RX_FLAG_ICV_STRIPPED) 1693d5c65159SKalle Valo skb_trim(msdu, msdu->len - 1694d5c65159SKalle Valo ath11k_dp_rx_crypto_icv_len(ar, enctype)); 1695d5c65159SKalle Valo } 1696d5c65159SKalle Valo 1697d5c65159SKalle Valo /* MMIC */ 1698d5c65159SKalle Valo if ((status->flag & RX_FLAG_MMIC_STRIPPED) && 1699d5c65159SKalle Valo !ieee80211_has_morefrags(hdr->frame_control) && 1700d5c65159SKalle Valo enctype == HAL_ENCRYPT_TYPE_TKIP_MIC) 1701d5c65159SKalle Valo skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN); 1702d5c65159SKalle Valo 1703d5c65159SKalle Valo /* Head */ 1704d5c65159SKalle Valo if (status->flag & RX_FLAG_IV_STRIPPED) { 1705d5c65159SKalle Valo hdr_len = ieee80211_hdrlen(hdr->frame_control); 1706d5c65159SKalle Valo crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 1707d5c65159SKalle Valo 1708d5c65159SKalle Valo memmove((void *)msdu->data + crypto_len, 1709d5c65159SKalle Valo (void *)msdu->data, hdr_len); 1710d5c65159SKalle Valo skb_pull(msdu, crypto_len); 1711d5c65159SKalle Valo } 1712d5c65159SKalle Valo } 1713d5c65159SKalle Valo 1714d5c65159SKalle Valo static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar, 1715d5c65159SKalle Valo struct sk_buff *msdu, 1716d5c65159SKalle Valo enum hal_encrypt_type enctype) 1717d5c65159SKalle Valo { 1718d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 1719d5c65159SKalle Valo struct ieee80211_hdr *hdr; 1720d5c65159SKalle Valo size_t hdr_len, crypto_len; 1721d5c65159SKalle Valo void *rfc1042; 1722d5c65159SKalle Valo bool is_amsdu; 1723d5c65159SKalle Valo 1724d5c65159SKalle Valo is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu); 1725d5c65159SKalle Valo hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(rxcb->rx_desc); 1726d5c65159SKalle Valo rfc1042 = hdr; 1727d5c65159SKalle Valo 1728d5c65159SKalle Valo if (rxcb->is_first_msdu) { 1729d5c65159SKalle Valo hdr_len = ieee80211_hdrlen(hdr->frame_control); 1730d5c65159SKalle Valo crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 1731d5c65159SKalle Valo 1732d5c65159SKalle Valo rfc1042 += hdr_len + crypto_len; 1733d5c65159SKalle Valo } 1734d5c65159SKalle Valo 1735d5c65159SKalle Valo if (is_amsdu) 1736d5c65159SKalle Valo rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr); 1737d5c65159SKalle Valo 1738d5c65159SKalle Valo return rfc1042; 1739d5c65159SKalle Valo } 1740d5c65159SKalle Valo 1741d5c65159SKalle Valo static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar, 1742d5c65159SKalle Valo struct sk_buff *msdu, 1743d5c65159SKalle Valo u8 *first_hdr, 1744d5c65159SKalle Valo enum hal_encrypt_type enctype, 1745d5c65159SKalle Valo struct ieee80211_rx_status *status) 1746d5c65159SKalle Valo { 1747d5c65159SKalle Valo struct ieee80211_hdr *hdr; 1748d5c65159SKalle Valo struct ethhdr *eth; 1749d5c65159SKalle Valo size_t hdr_len; 1750d5c65159SKalle Valo u8 da[ETH_ALEN]; 1751d5c65159SKalle Valo u8 sa[ETH_ALEN]; 1752d5c65159SKalle Valo void *rfc1042; 1753d5c65159SKalle Valo 1754d5c65159SKalle Valo rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype); 1755d5c65159SKalle Valo if (WARN_ON_ONCE(!rfc1042)) 1756d5c65159SKalle Valo return; 1757d5c65159SKalle Valo 1758d5c65159SKalle Valo /* pull decapped header and copy SA & DA */ 1759d5c65159SKalle Valo eth = (struct ethhdr *)msdu->data; 1760d5c65159SKalle Valo ether_addr_copy(da, eth->h_dest); 1761d5c65159SKalle Valo ether_addr_copy(sa, eth->h_source); 1762d5c65159SKalle Valo skb_pull(msdu, sizeof(struct ethhdr)); 1763d5c65159SKalle Valo 1764d5c65159SKalle Valo /* push rfc1042/llc/snap */ 1765d5c65159SKalle Valo memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042, 1766d5c65159SKalle Valo sizeof(struct ath11k_dp_rfc1042_hdr)); 1767d5c65159SKalle Valo 1768d5c65159SKalle Valo /* push original 802.11 header */ 1769d5c65159SKalle Valo hdr = (struct ieee80211_hdr *)first_hdr; 1770d5c65159SKalle Valo hdr_len = ieee80211_hdrlen(hdr->frame_control); 1771d5c65159SKalle Valo 1772d5c65159SKalle Valo if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 1773d5c65159SKalle Valo memcpy(skb_push(msdu, 1774d5c65159SKalle Valo ath11k_dp_rx_crypto_param_len(ar, enctype)), 1775d5c65159SKalle Valo (void *)hdr + hdr_len, 1776d5c65159SKalle Valo ath11k_dp_rx_crypto_param_len(ar, enctype)); 1777d5c65159SKalle Valo } 1778d5c65159SKalle Valo 1779d5c65159SKalle Valo memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1780d5c65159SKalle Valo 1781d5c65159SKalle Valo /* original 802.11 header has a different DA and in 1782d5c65159SKalle Valo * case of 4addr it may also have different SA 1783d5c65159SKalle Valo */ 1784d5c65159SKalle Valo hdr = (struct ieee80211_hdr *)msdu->data; 1785d5c65159SKalle Valo ether_addr_copy(ieee80211_get_DA(hdr), da); 1786d5c65159SKalle Valo ether_addr_copy(ieee80211_get_SA(hdr), sa); 1787d5c65159SKalle Valo } 1788d5c65159SKalle Valo 1789d5c65159SKalle Valo static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu, 1790d5c65159SKalle Valo struct hal_rx_desc *rx_desc, 1791d5c65159SKalle Valo enum hal_encrypt_type enctype, 1792d5c65159SKalle Valo struct ieee80211_rx_status *status, 1793d5c65159SKalle Valo bool decrypted) 1794d5c65159SKalle Valo { 1795d5c65159SKalle Valo u8 *first_hdr; 1796d5c65159SKalle Valo u8 decap; 1797d5c65159SKalle Valo 1798d5c65159SKalle Valo first_hdr = ath11k_dp_rx_h_80211_hdr(rx_desc); 1799d5c65159SKalle Valo decap = ath11k_dp_rx_h_mpdu_start_decap_type(rx_desc); 1800d5c65159SKalle Valo 1801d5c65159SKalle Valo switch (decap) { 1802d5c65159SKalle Valo case DP_RX_DECAP_TYPE_NATIVE_WIFI: 1803d5c65159SKalle Valo ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr, 1804d5c65159SKalle Valo enctype, status); 1805d5c65159SKalle Valo break; 1806d5c65159SKalle Valo case DP_RX_DECAP_TYPE_RAW: 1807d5c65159SKalle Valo ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status, 1808d5c65159SKalle Valo decrypted); 1809d5c65159SKalle Valo break; 1810d5c65159SKalle Valo case DP_RX_DECAP_TYPE_ETHERNET2_DIX: 1811d5c65159SKalle Valo ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr, 1812d5c65159SKalle Valo enctype, status); 1813d5c65159SKalle Valo break; 1814d5c65159SKalle Valo case DP_RX_DECAP_TYPE_8023: 1815d5c65159SKalle Valo /* TODO: Handle undecap for these formats */ 1816d5c65159SKalle Valo break; 1817d5c65159SKalle Valo } 1818d5c65159SKalle Valo } 1819d5c65159SKalle Valo 1820d5c65159SKalle Valo static void ath11k_dp_rx_h_mpdu(struct ath11k *ar, 1821d5c65159SKalle Valo struct sk_buff_head *amsdu_list, 1822d5c65159SKalle Valo struct hal_rx_desc *rx_desc, 1823d5c65159SKalle Valo struct ieee80211_rx_status *rx_status) 1824d5c65159SKalle Valo { 1825d5c65159SKalle Valo struct ieee80211_hdr *hdr; 1826d5c65159SKalle Valo enum hal_encrypt_type enctype; 1827d5c65159SKalle Valo struct sk_buff *last_msdu; 1828d5c65159SKalle Valo struct sk_buff *msdu; 1829d5c65159SKalle Valo struct ath11k_skb_rxcb *last_rxcb; 1830d5c65159SKalle Valo bool is_decrypted; 1831d5c65159SKalle Valo u32 err_bitmap; 1832d5c65159SKalle Valo u8 *qos; 1833d5c65159SKalle Valo 1834d5c65159SKalle Valo if (skb_queue_empty(amsdu_list)) 1835d5c65159SKalle Valo return; 1836d5c65159SKalle Valo 1837d5c65159SKalle Valo hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(rx_desc); 1838d5c65159SKalle Valo 1839d5c65159SKalle Valo /* Each A-MSDU subframe will use the original header as the base and be 1840d5c65159SKalle Valo * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl. 1841d5c65159SKalle Valo */ 1842d5c65159SKalle Valo if (ieee80211_is_data_qos(hdr->frame_control)) { 1843d5c65159SKalle Valo qos = ieee80211_get_qos_ctl(hdr); 1844d5c65159SKalle Valo qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; 1845d5c65159SKalle Valo } 1846d5c65159SKalle Valo 1847d5c65159SKalle Valo is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc); 1848d5c65159SKalle Valo enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc); 1849d5c65159SKalle Valo 1850d5c65159SKalle Valo /* Some attention flags are valid only in the last MSDU. */ 1851d5c65159SKalle Valo last_msdu = skb_peek_tail(amsdu_list); 1852d5c65159SKalle Valo last_rxcb = ATH11K_SKB_RXCB(last_msdu); 1853d5c65159SKalle Valo 1854d5c65159SKalle Valo err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(last_rxcb->rx_desc); 1855d5c65159SKalle Valo 1856d5c65159SKalle Valo /* Clear per-MPDU flags while leaving per-PPDU flags intact. */ 1857d5c65159SKalle Valo rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | 1858d5c65159SKalle Valo RX_FLAG_MMIC_ERROR | 1859d5c65159SKalle Valo RX_FLAG_DECRYPTED | 1860d5c65159SKalle Valo RX_FLAG_IV_STRIPPED | 1861d5c65159SKalle Valo RX_FLAG_MMIC_STRIPPED); 1862d5c65159SKalle Valo 1863d5c65159SKalle Valo if (err_bitmap & DP_RX_MPDU_ERR_FCS) 1864d5c65159SKalle Valo rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; 1865d5c65159SKalle Valo 1866d5c65159SKalle Valo if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC) 1867d5c65159SKalle Valo rx_status->flag |= RX_FLAG_MMIC_ERROR; 1868d5c65159SKalle Valo 1869d5c65159SKalle Valo if (is_decrypted) 1870d5c65159SKalle Valo rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED | 1871d5c65159SKalle Valo RX_FLAG_MIC_STRIPPED | RX_FLAG_ICV_STRIPPED; 1872d5c65159SKalle Valo 1873d5c65159SKalle Valo skb_queue_walk(amsdu_list, msdu) { 1874d5c65159SKalle Valo ath11k_dp_rx_h_csum_offload(msdu); 1875d5c65159SKalle Valo ath11k_dp_rx_h_undecap(ar, msdu, rx_desc, 1876d5c65159SKalle Valo enctype, rx_status, is_decrypted); 1877d5c65159SKalle Valo } 1878d5c65159SKalle Valo } 1879d5c65159SKalle Valo 1880d5c65159SKalle Valo static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc, 1881d5c65159SKalle Valo struct ieee80211_rx_status *rx_status) 1882d5c65159SKalle Valo { 1883d5c65159SKalle Valo struct ieee80211_supported_band *sband; 1884d5c65159SKalle Valo enum rx_msdu_start_pkt_type pkt_type; 1885d5c65159SKalle Valo u8 bw; 1886d5c65159SKalle Valo u8 rate_mcs, nss; 1887d5c65159SKalle Valo u8 sgi; 1888d5c65159SKalle Valo bool is_cck; 1889d5c65159SKalle Valo 1890d5c65159SKalle Valo pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(rx_desc); 1891d5c65159SKalle Valo bw = ath11k_dp_rx_h_msdu_start_rx_bw(rx_desc); 1892d5c65159SKalle Valo rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(rx_desc); 1893d5c65159SKalle Valo nss = ath11k_dp_rx_h_msdu_start_nss(rx_desc); 1894d5c65159SKalle Valo sgi = ath11k_dp_rx_h_msdu_start_sgi(rx_desc); 1895d5c65159SKalle Valo 1896d5c65159SKalle Valo switch (pkt_type) { 1897d5c65159SKalle Valo case RX_MSDU_START_PKT_TYPE_11A: 1898d5c65159SKalle Valo case RX_MSDU_START_PKT_TYPE_11B: 1899d5c65159SKalle Valo is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B); 1900d5c65159SKalle Valo sband = &ar->mac.sbands[rx_status->band]; 1901d5c65159SKalle Valo rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs, 1902d5c65159SKalle Valo is_cck); 1903d5c65159SKalle Valo break; 1904d5c65159SKalle Valo case RX_MSDU_START_PKT_TYPE_11N: 1905d5c65159SKalle Valo rx_status->encoding = RX_ENC_HT; 1906d5c65159SKalle Valo if (rate_mcs > ATH11K_HT_MCS_MAX) { 1907d5c65159SKalle Valo ath11k_warn(ar->ab, 1908d5c65159SKalle Valo "Received with invalid mcs in HT mode %d\n", 1909d5c65159SKalle Valo rate_mcs); 1910d5c65159SKalle Valo break; 1911d5c65159SKalle Valo } 1912d5c65159SKalle Valo rx_status->rate_idx = rate_mcs + (8 * (nss - 1)); 1913d5c65159SKalle Valo if (sgi) 1914d5c65159SKalle Valo rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 191539e81c6aSTamizh chelvam rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 1916d5c65159SKalle Valo break; 1917d5c65159SKalle Valo case RX_MSDU_START_PKT_TYPE_11AC: 1918d5c65159SKalle Valo rx_status->encoding = RX_ENC_VHT; 1919d5c65159SKalle Valo rx_status->rate_idx = rate_mcs; 1920d5c65159SKalle Valo if (rate_mcs > ATH11K_VHT_MCS_MAX) { 1921d5c65159SKalle Valo ath11k_warn(ar->ab, 1922d5c65159SKalle Valo "Received with invalid mcs in VHT mode %d\n", 1923d5c65159SKalle Valo rate_mcs); 1924d5c65159SKalle Valo break; 1925d5c65159SKalle Valo } 1926d5c65159SKalle Valo rx_status->nss = nss; 1927d5c65159SKalle Valo if (sgi) 1928d5c65159SKalle Valo rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 192939e81c6aSTamizh chelvam rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 1930d5c65159SKalle Valo break; 1931d5c65159SKalle Valo case RX_MSDU_START_PKT_TYPE_11AX: 1932d5c65159SKalle Valo rx_status->rate_idx = rate_mcs; 1933d5c65159SKalle Valo if (rate_mcs > ATH11K_HE_MCS_MAX) { 1934d5c65159SKalle Valo ath11k_warn(ar->ab, 1935d5c65159SKalle Valo "Received with invalid mcs in HE mode %d\n", 1936d5c65159SKalle Valo rate_mcs); 1937d5c65159SKalle Valo break; 1938d5c65159SKalle Valo } 1939d5c65159SKalle Valo rx_status->encoding = RX_ENC_HE; 1940d5c65159SKalle Valo rx_status->nss = nss; 194139e81c6aSTamizh chelvam rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 1942d5c65159SKalle Valo break; 1943d5c65159SKalle Valo } 1944d5c65159SKalle Valo } 1945d5c65159SKalle Valo 1946d5c65159SKalle Valo static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc, 1947d5c65159SKalle Valo struct ieee80211_rx_status *rx_status) 1948d5c65159SKalle Valo { 1949d5c65159SKalle Valo u8 channel_num; 1950d5c65159SKalle Valo 1951d5c65159SKalle Valo rx_status->freq = 0; 1952d5c65159SKalle Valo rx_status->rate_idx = 0; 1953d5c65159SKalle Valo rx_status->nss = 0; 1954d5c65159SKalle Valo rx_status->encoding = RX_ENC_LEGACY; 1955d5c65159SKalle Valo rx_status->bw = RATE_INFO_BW_20; 1956d5c65159SKalle Valo 1957d5c65159SKalle Valo rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 1958d5c65159SKalle Valo 1959d5c65159SKalle Valo channel_num = ath11k_dp_rx_h_msdu_start_freq(rx_desc); 1960d5c65159SKalle Valo 1961d5c65159SKalle Valo if (channel_num >= 1 && channel_num <= 14) { 1962d5c65159SKalle Valo rx_status->band = NL80211_BAND_2GHZ; 1963d5c65159SKalle Valo } else if (channel_num >= 36 && channel_num <= 173) { 1964d5c65159SKalle Valo rx_status->band = NL80211_BAND_5GHZ; 1965d5c65159SKalle Valo } else { 1966d5c65159SKalle Valo ath11k_warn(ar->ab, "Unsupported Channel info received %d\n", 1967d5c65159SKalle Valo channel_num); 1968d5c65159SKalle Valo return; 1969d5c65159SKalle Valo } 1970d5c65159SKalle Valo 1971d5c65159SKalle Valo rx_status->freq = ieee80211_channel_to_frequency(channel_num, 1972d5c65159SKalle Valo rx_status->band); 1973d5c65159SKalle Valo 1974d5c65159SKalle Valo ath11k_dp_rx_h_rate(ar, rx_desc, rx_status); 1975d5c65159SKalle Valo } 1976d5c65159SKalle Valo 1977d5c65159SKalle Valo static void ath11k_dp_rx_process_amsdu(struct ath11k *ar, 1978d5c65159SKalle Valo struct sk_buff_head *amsdu_list, 1979d5c65159SKalle Valo struct ieee80211_rx_status *rx_status) 1980d5c65159SKalle Valo { 1981d5c65159SKalle Valo struct sk_buff *first; 1982d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb; 1983d5c65159SKalle Valo struct hal_rx_desc *rx_desc; 1984d5c65159SKalle Valo bool first_mpdu; 1985d5c65159SKalle Valo 1986d5c65159SKalle Valo if (skb_queue_empty(amsdu_list)) 1987d5c65159SKalle Valo return; 1988d5c65159SKalle Valo 1989d5c65159SKalle Valo first = skb_peek(amsdu_list); 1990d5c65159SKalle Valo rxcb = ATH11K_SKB_RXCB(first); 1991d5c65159SKalle Valo rx_desc = rxcb->rx_desc; 1992d5c65159SKalle Valo 1993d5c65159SKalle Valo first_mpdu = ath11k_dp_rx_h_attn_first_mpdu(rx_desc); 1994d5c65159SKalle Valo if (first_mpdu) 1995d5c65159SKalle Valo ath11k_dp_rx_h_ppdu(ar, rx_desc, rx_status); 1996d5c65159SKalle Valo 1997d5c65159SKalle Valo ath11k_dp_rx_h_mpdu(ar, amsdu_list, rx_desc, rx_status); 1998d5c65159SKalle Valo } 1999d5c65159SKalle Valo 2000d5c65159SKalle Valo static char *ath11k_print_get_tid(struct ieee80211_hdr *hdr, char *out, 2001d5c65159SKalle Valo size_t size) 2002d5c65159SKalle Valo { 2003d5c65159SKalle Valo u8 *qc; 2004d5c65159SKalle Valo int tid; 2005d5c65159SKalle Valo 2006d5c65159SKalle Valo if (!ieee80211_is_data_qos(hdr->frame_control)) 2007d5c65159SKalle Valo return ""; 2008d5c65159SKalle Valo 2009d5c65159SKalle Valo qc = ieee80211_get_qos_ctl(hdr); 2010d5c65159SKalle Valo tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 2011d5c65159SKalle Valo snprintf(out, size, "tid %d", tid); 2012d5c65159SKalle Valo 2013d5c65159SKalle Valo return out; 2014d5c65159SKalle Valo } 2015d5c65159SKalle Valo 2016d5c65159SKalle Valo static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi, 2017d5c65159SKalle Valo struct sk_buff *msdu) 2018d5c65159SKalle Valo { 2019e4eb7b5cSJohn Crispin static const struct ieee80211_radiotap_he known = { 2020e4eb7b5cSJohn Crispin .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN), 2021e4eb7b5cSJohn Crispin .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN), 2022e4eb7b5cSJohn Crispin }; 2023d5c65159SKalle Valo struct ieee80211_rx_status *status; 2024d5c65159SKalle Valo struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; 2025e4eb7b5cSJohn Crispin struct ieee80211_radiotap_he *he = NULL; 2026d5c65159SKalle Valo char tid[32]; 2027d5c65159SKalle Valo 2028d5c65159SKalle Valo status = IEEE80211_SKB_RXCB(msdu); 2029e4eb7b5cSJohn Crispin if (status->encoding == RX_ENC_HE) { 2030e4eb7b5cSJohn Crispin he = skb_push(msdu, sizeof(known)); 2031e4eb7b5cSJohn Crispin memcpy(he, &known, sizeof(known)); 2032e4eb7b5cSJohn Crispin status->flag |= RX_FLAG_RADIOTAP_HE; 2033e4eb7b5cSJohn Crispin } 2034d5c65159SKalle Valo 2035d5c65159SKalle Valo ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 2036d5c65159SKalle Valo "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", 2037d5c65159SKalle Valo msdu, 2038d5c65159SKalle Valo msdu->len, 2039d5c65159SKalle Valo ieee80211_get_SA(hdr), 2040d5c65159SKalle Valo ath11k_print_get_tid(hdr, tid, sizeof(tid)), 2041d5c65159SKalle Valo is_multicast_ether_addr(ieee80211_get_DA(hdr)) ? 2042d5c65159SKalle Valo "mcast" : "ucast", 2043d5c65159SKalle Valo (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4, 2044d5c65159SKalle Valo (status->encoding == RX_ENC_LEGACY) ? "legacy" : "", 2045d5c65159SKalle Valo (status->encoding == RX_ENC_HT) ? "ht" : "", 2046d5c65159SKalle Valo (status->encoding == RX_ENC_VHT) ? "vht" : "", 2047d5c65159SKalle Valo (status->encoding == RX_ENC_HE) ? "he" : "", 2048d5c65159SKalle Valo (status->bw == RATE_INFO_BW_40) ? "40" : "", 2049d5c65159SKalle Valo (status->bw == RATE_INFO_BW_80) ? "80" : "", 2050d5c65159SKalle Valo (status->bw == RATE_INFO_BW_160) ? "160" : "", 2051d5c65159SKalle Valo status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", 2052d5c65159SKalle Valo status->rate_idx, 2053d5c65159SKalle Valo status->nss, 2054d5c65159SKalle Valo status->freq, 2055d5c65159SKalle Valo status->band, status->flag, 2056d5c65159SKalle Valo !!(status->flag & RX_FLAG_FAILED_FCS_CRC), 2057d5c65159SKalle Valo !!(status->flag & RX_FLAG_MMIC_ERROR), 2058d5c65159SKalle Valo !!(status->flag & RX_FLAG_AMSDU_MORE)); 2059d5c65159SKalle Valo 2060d5c65159SKalle Valo /* TODO: trace rx packet */ 2061d5c65159SKalle Valo 2062d5c65159SKalle Valo ieee80211_rx_napi(ar->hw, NULL, msdu, napi); 2063d5c65159SKalle Valo } 2064d5c65159SKalle Valo 2065d5c65159SKalle Valo static void ath11k_dp_rx_pre_deliver_amsdu(struct ath11k *ar, 2066d5c65159SKalle Valo struct sk_buff_head *amsdu_list, 2067d5c65159SKalle Valo struct ieee80211_rx_status *rxs) 2068d5c65159SKalle Valo { 2069d5c65159SKalle Valo struct sk_buff *msdu; 2070d5c65159SKalle Valo struct sk_buff *first_subframe; 2071d5c65159SKalle Valo struct ieee80211_rx_status *status; 2072d5c65159SKalle Valo 2073d5c65159SKalle Valo first_subframe = skb_peek(amsdu_list); 2074d5c65159SKalle Valo 2075d5c65159SKalle Valo skb_queue_walk(amsdu_list, msdu) { 2076d5c65159SKalle Valo /* Setup per-MSDU flags */ 2077d5c65159SKalle Valo if (skb_queue_empty(amsdu_list)) 2078d5c65159SKalle Valo rxs->flag &= ~RX_FLAG_AMSDU_MORE; 2079d5c65159SKalle Valo else 2080d5c65159SKalle Valo rxs->flag |= RX_FLAG_AMSDU_MORE; 2081d5c65159SKalle Valo 2082d5c65159SKalle Valo if (msdu == first_subframe) { 2083d5c65159SKalle Valo first_subframe = NULL; 2084d5c65159SKalle Valo rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN; 2085d5c65159SKalle Valo } else { 2086d5c65159SKalle Valo rxs->flag |= RX_FLAG_ALLOW_SAME_PN; 2087d5c65159SKalle Valo } 2088d5c65159SKalle Valo rxs->flag |= RX_FLAG_SKIP_MONITOR; 2089d5c65159SKalle Valo 2090d5c65159SKalle Valo status = IEEE80211_SKB_RXCB(msdu); 2091d5c65159SKalle Valo *status = *rxs; 2092d5c65159SKalle Valo } 2093d5c65159SKalle Valo } 2094d5c65159SKalle Valo 2095d5c65159SKalle Valo static void ath11k_dp_rx_process_pending_packets(struct ath11k_base *ab, 2096d5c65159SKalle Valo struct napi_struct *napi, 2097d5c65159SKalle Valo struct sk_buff_head *pending_q, 2098d5c65159SKalle Valo int *quota, u8 mac_id) 2099d5c65159SKalle Valo { 2100d5c65159SKalle Valo struct ath11k *ar; 2101d5c65159SKalle Valo struct sk_buff *msdu; 2102d5c65159SKalle Valo struct ath11k_pdev *pdev; 2103d5c65159SKalle Valo 2104d5c65159SKalle Valo if (skb_queue_empty(pending_q)) 2105d5c65159SKalle Valo return; 2106d5c65159SKalle Valo 2107d5c65159SKalle Valo ar = ab->pdevs[mac_id].ar; 2108d5c65159SKalle Valo 2109d5c65159SKalle Valo rcu_read_lock(); 2110d5c65159SKalle Valo pdev = rcu_dereference(ab->pdevs_active[mac_id]); 2111d5c65159SKalle Valo 2112d5c65159SKalle Valo while (*quota && (msdu = __skb_dequeue(pending_q))) { 2113d5c65159SKalle Valo if (!pdev) { 2114d5c65159SKalle Valo dev_kfree_skb_any(msdu); 2115d5c65159SKalle Valo continue; 2116d5c65159SKalle Valo } 2117d5c65159SKalle Valo 2118d5c65159SKalle Valo ath11k_dp_rx_deliver_msdu(ar, napi, msdu); 2119d5c65159SKalle Valo (*quota)--; 2120d5c65159SKalle Valo } 2121d5c65159SKalle Valo rcu_read_unlock(); 2122d5c65159SKalle Valo } 2123d5c65159SKalle Valo 2124d5c65159SKalle Valo int ath11k_dp_process_rx(struct ath11k_base *ab, int mac_id, 2125d5c65159SKalle Valo struct napi_struct *napi, struct sk_buff_head *pending_q, 2126d5c65159SKalle Valo int budget) 2127d5c65159SKalle Valo { 2128d5c65159SKalle Valo struct ath11k *ar = ab->pdevs[mac_id].ar; 2129d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 2130d5c65159SKalle Valo struct ieee80211_rx_status *rx_status = &dp->rx_status; 2131d5c65159SKalle Valo struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 2132d5c65159SKalle Valo struct hal_srng *srng; 2133d5c65159SKalle Valo struct hal_rx_meta_info meta_info; 2134d5c65159SKalle Valo struct sk_buff *msdu; 2135d5c65159SKalle Valo struct sk_buff_head msdu_list; 2136d5c65159SKalle Valo struct sk_buff_head amsdu_list; 2137d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb; 2138d5c65159SKalle Valo u32 *rx_desc; 2139d5c65159SKalle Valo int buf_id; 2140d5c65159SKalle Valo int num_buffs_reaped = 0; 2141d5c65159SKalle Valo int quota = budget; 2142d5c65159SKalle Valo int ret; 2143d5c65159SKalle Valo bool done = false; 2144d5c65159SKalle Valo 2145d5c65159SKalle Valo /* Process any pending packets from the previous napi poll. 2146d5c65159SKalle Valo * Note: All msdu's in this pending_q corresponds to the same mac id 2147d5c65159SKalle Valo * due to pdev based reo dest mapping and also since each irq group id 2148d5c65159SKalle Valo * maps to specific reo dest ring. 2149d5c65159SKalle Valo */ 2150d5c65159SKalle Valo ath11k_dp_rx_process_pending_packets(ab, napi, pending_q, "a, 2151d5c65159SKalle Valo mac_id); 2152d5c65159SKalle Valo 2153d5c65159SKalle Valo /* If all quota is exhausted by processing the pending_q, 2154d5c65159SKalle Valo * Wait for the next napi poll to reap the new info 2155d5c65159SKalle Valo */ 2156d5c65159SKalle Valo if (!quota) 2157d5c65159SKalle Valo goto exit; 2158d5c65159SKalle Valo 2159d5c65159SKalle Valo __skb_queue_head_init(&msdu_list); 2160d5c65159SKalle Valo 2161d5c65159SKalle Valo srng = &ab->hal.srng_list[dp->reo_dst_ring.ring_id]; 2162d5c65159SKalle Valo 2163d5c65159SKalle Valo spin_lock_bh(&srng->lock); 2164d5c65159SKalle Valo 2165d5c65159SKalle Valo ath11k_hal_srng_access_begin(ab, srng); 2166d5c65159SKalle Valo 2167d5c65159SKalle Valo try_again: 2168d5c65159SKalle Valo while ((rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 2169d5c65159SKalle Valo memset(&meta_info, 0, sizeof(meta_info)); 2170d5c65159SKalle Valo ath11k_hal_rx_parse_dst_ring_desc(ab, rx_desc, &meta_info); 2171d5c65159SKalle Valo 2172d5c65159SKalle Valo buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 2173d5c65159SKalle Valo meta_info.msdu_meta.cookie); 2174d5c65159SKalle Valo spin_lock_bh(&rx_ring->idr_lock); 2175d5c65159SKalle Valo msdu = idr_find(&rx_ring->bufs_idr, buf_id); 2176d5c65159SKalle Valo if (!msdu) { 2177d5c65159SKalle Valo ath11k_warn(ab, "frame rx with invalid buf_id %d\n", 2178d5c65159SKalle Valo buf_id); 2179d5c65159SKalle Valo spin_unlock_bh(&rx_ring->idr_lock); 2180d5c65159SKalle Valo continue; 2181d5c65159SKalle Valo } 2182d5c65159SKalle Valo 2183d5c65159SKalle Valo idr_remove(&rx_ring->bufs_idr, buf_id); 2184d5c65159SKalle Valo spin_unlock_bh(&rx_ring->idr_lock); 2185d5c65159SKalle Valo 2186d5c65159SKalle Valo rxcb = ATH11K_SKB_RXCB(msdu); 2187d5c65159SKalle Valo dma_unmap_single(ab->dev, rxcb->paddr, 2188d5c65159SKalle Valo msdu->len + skb_tailroom(msdu), 2189d5c65159SKalle Valo DMA_FROM_DEVICE); 2190d5c65159SKalle Valo 2191d5c65159SKalle Valo num_buffs_reaped++; 2192d5c65159SKalle Valo 2193d5c65159SKalle Valo if (meta_info.push_reason != 2194d5c65159SKalle Valo HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) { 2195d5c65159SKalle Valo /* TODO: Check if the msdu can be sent up for processing */ 2196d5c65159SKalle Valo dev_kfree_skb_any(msdu); 2197d5c65159SKalle Valo ab->soc_stats.hal_reo_error[dp->reo_dst_ring.ring_id]++; 2198d5c65159SKalle Valo continue; 2199d5c65159SKalle Valo } 2200d5c65159SKalle Valo 2201d5c65159SKalle Valo rxcb->is_first_msdu = meta_info.msdu_meta.first; 2202d5c65159SKalle Valo rxcb->is_last_msdu = meta_info.msdu_meta.last; 2203d5c65159SKalle Valo rxcb->is_continuation = meta_info.msdu_meta.continuation; 2204d5c65159SKalle Valo rxcb->mac_id = mac_id; 2205d5c65159SKalle Valo __skb_queue_tail(&msdu_list, msdu); 2206d5c65159SKalle Valo 2207d5c65159SKalle Valo /* Stop reaping from the ring once quota is exhausted 2208d5c65159SKalle Valo * and we've received all msdu's in the the AMSDU. The 2209d5c65159SKalle Valo * additional msdu's reaped in excess of quota here would 2210d5c65159SKalle Valo * be pushed into the pending queue to be processed during 2211d5c65159SKalle Valo * the next napi poll. 2212d5c65159SKalle Valo * Note: More profiling can be done to see the impact on 2213d5c65159SKalle Valo * pending_q and throughput during various traffic & density 2214d5c65159SKalle Valo * and how use of budget instead of remaining quota affects it. 2215d5c65159SKalle Valo */ 2216d5c65159SKalle Valo if (num_buffs_reaped >= quota && rxcb->is_last_msdu && 2217d5c65159SKalle Valo !rxcb->is_continuation) { 2218d5c65159SKalle Valo done = true; 2219d5c65159SKalle Valo break; 2220d5c65159SKalle Valo } 2221d5c65159SKalle Valo } 2222d5c65159SKalle Valo 2223d5c65159SKalle Valo /* Hw might have updated the head pointer after we cached it. 2224d5c65159SKalle Valo * In this case, even though there are entries in the ring we'll 2225d5c65159SKalle Valo * get rx_desc NULL. Give the read another try with updated cached 2226d5c65159SKalle Valo * head pointer so that we can reap complete MPDU in the current 2227d5c65159SKalle Valo * rx processing. 2228d5c65159SKalle Valo */ 2229d5c65159SKalle Valo if (!done && ath11k_hal_srng_dst_num_free(ab, srng, true)) { 2230d5c65159SKalle Valo ath11k_hal_srng_access_end(ab, srng); 2231d5c65159SKalle Valo goto try_again; 2232d5c65159SKalle Valo } 2233d5c65159SKalle Valo 2234d5c65159SKalle Valo ath11k_hal_srng_access_end(ab, srng); 2235d5c65159SKalle Valo 2236d5c65159SKalle Valo spin_unlock_bh(&srng->lock); 2237d5c65159SKalle Valo 2238d5c65159SKalle Valo if (!num_buffs_reaped) 2239d5c65159SKalle Valo goto exit; 2240d5c65159SKalle Valo 2241d5c65159SKalle Valo /* Should we reschedule it later if we are not able to replenish all 2242d5c65159SKalle Valo * the buffers? 2243d5c65159SKalle Valo */ 2244d5c65159SKalle Valo ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buffs_reaped, 2245d5c65159SKalle Valo HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC); 2246d5c65159SKalle Valo 2247d5c65159SKalle Valo rcu_read_lock(); 2248d5c65159SKalle Valo if (!rcu_dereference(ab->pdevs_active[mac_id])) { 2249d5c65159SKalle Valo __skb_queue_purge(&msdu_list); 2250d5c65159SKalle Valo goto rcu_unlock; 2251d5c65159SKalle Valo } 2252d5c65159SKalle Valo 2253d5c65159SKalle Valo if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 2254d5c65159SKalle Valo __skb_queue_purge(&msdu_list); 2255d5c65159SKalle Valo goto rcu_unlock; 2256d5c65159SKalle Valo } 2257d5c65159SKalle Valo 2258d5c65159SKalle Valo while (!skb_queue_empty(&msdu_list)) { 2259d5c65159SKalle Valo __skb_queue_head_init(&amsdu_list); 2260d5c65159SKalle Valo ret = ath11k_dp_rx_retrieve_amsdu(ar, &msdu_list, &amsdu_list); 2261d5c65159SKalle Valo if (ret) { 2262d5c65159SKalle Valo if (ret == -EIO) { 2263d5c65159SKalle Valo ath11k_err(ab, "rx ring got corrupted %d\n", ret); 2264d5c65159SKalle Valo __skb_queue_purge(&msdu_list); 2265d5c65159SKalle Valo /* Should stop processing any more rx in 2266d5c65159SKalle Valo * future from this ring? 2267d5c65159SKalle Valo */ 2268d5c65159SKalle Valo goto rcu_unlock; 2269d5c65159SKalle Valo } 2270d5c65159SKalle Valo 2271d5c65159SKalle Valo /* A-MSDU retrieval got failed due to non-fatal condition, 2272d5c65159SKalle Valo * continue processing with the next msdu. 2273d5c65159SKalle Valo */ 2274d5c65159SKalle Valo continue; 2275d5c65159SKalle Valo } 2276d5c65159SKalle Valo 2277d5c65159SKalle Valo ath11k_dp_rx_process_amsdu(ar, &amsdu_list, rx_status); 2278d5c65159SKalle Valo 2279d5c65159SKalle Valo ath11k_dp_rx_pre_deliver_amsdu(ar, &amsdu_list, rx_status); 2280d5c65159SKalle Valo skb_queue_splice_tail(&amsdu_list, pending_q); 2281d5c65159SKalle Valo } 2282d5c65159SKalle Valo 2283d5c65159SKalle Valo while (quota && (msdu = __skb_dequeue(pending_q))) { 2284d5c65159SKalle Valo ath11k_dp_rx_deliver_msdu(ar, napi, msdu); 2285d5c65159SKalle Valo quota--; 2286d5c65159SKalle Valo } 2287d5c65159SKalle Valo 2288d5c65159SKalle Valo rcu_unlock: 2289d5c65159SKalle Valo rcu_read_unlock(); 2290d5c65159SKalle Valo exit: 2291d5c65159SKalle Valo return budget - quota; 2292d5c65159SKalle Valo } 2293d5c65159SKalle Valo 2294d5c65159SKalle Valo static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta, 2295d5c65159SKalle Valo struct hal_rx_mon_ppdu_info *ppdu_info) 2296d5c65159SKalle Valo { 2297d5c65159SKalle Valo struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats; 2298d5c65159SKalle Valo u32 num_msdu; 2299d5c65159SKalle Valo 2300d5c65159SKalle Valo if (!rx_stats) 2301d5c65159SKalle Valo return; 2302d5c65159SKalle Valo 2303d5c65159SKalle Valo num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count + 2304d5c65159SKalle Valo ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count; 2305d5c65159SKalle Valo 2306d5c65159SKalle Valo rx_stats->num_msdu += num_msdu; 2307d5c65159SKalle Valo rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count + 2308d5c65159SKalle Valo ppdu_info->tcp_ack_msdu_count; 2309d5c65159SKalle Valo rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count; 2310d5c65159SKalle Valo rx_stats->other_msdu_count += ppdu_info->other_msdu_count; 2311d5c65159SKalle Valo 2312d5c65159SKalle Valo if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A || 2313d5c65159SKalle Valo ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) { 2314d5c65159SKalle Valo ppdu_info->nss = 1; 2315d5c65159SKalle Valo ppdu_info->mcs = HAL_RX_MAX_MCS; 2316d5c65159SKalle Valo ppdu_info->tid = IEEE80211_NUM_TIDS; 2317d5c65159SKalle Valo } 2318d5c65159SKalle Valo 2319d5c65159SKalle Valo if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS) 2320d5c65159SKalle Valo rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu; 2321d5c65159SKalle Valo 2322d5c65159SKalle Valo if (ppdu_info->mcs <= HAL_RX_MAX_MCS) 2323d5c65159SKalle Valo rx_stats->mcs_count[ppdu_info->mcs] += num_msdu; 2324d5c65159SKalle Valo 2325d5c65159SKalle Valo if (ppdu_info->gi < HAL_RX_GI_MAX) 2326d5c65159SKalle Valo rx_stats->gi_count[ppdu_info->gi] += num_msdu; 2327d5c65159SKalle Valo 2328d5c65159SKalle Valo if (ppdu_info->bw < HAL_RX_BW_MAX) 2329d5c65159SKalle Valo rx_stats->bw_count[ppdu_info->bw] += num_msdu; 2330d5c65159SKalle Valo 2331d5c65159SKalle Valo if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX) 2332d5c65159SKalle Valo rx_stats->coding_count[ppdu_info->ldpc] += num_msdu; 2333d5c65159SKalle Valo 2334d5c65159SKalle Valo if (ppdu_info->tid <= IEEE80211_NUM_TIDS) 2335d5c65159SKalle Valo rx_stats->tid_count[ppdu_info->tid] += num_msdu; 2336d5c65159SKalle Valo 2337d5c65159SKalle Valo if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX) 2338d5c65159SKalle Valo rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu; 2339d5c65159SKalle Valo 2340d5c65159SKalle Valo if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX) 2341d5c65159SKalle Valo rx_stats->reception_type[ppdu_info->reception_type] += num_msdu; 2342d5c65159SKalle Valo 2343d5c65159SKalle Valo if (ppdu_info->is_stbc) 2344d5c65159SKalle Valo rx_stats->stbc_count += num_msdu; 2345d5c65159SKalle Valo 2346d5c65159SKalle Valo if (ppdu_info->beamformed) 2347d5c65159SKalle Valo rx_stats->beamformed_count += num_msdu; 2348d5c65159SKalle Valo 2349d5c65159SKalle Valo if (ppdu_info->num_mpdu_fcs_ok > 1) 2350d5c65159SKalle Valo rx_stats->ampdu_msdu_count += num_msdu; 2351d5c65159SKalle Valo else 2352d5c65159SKalle Valo rx_stats->non_ampdu_msdu_count += num_msdu; 2353d5c65159SKalle Valo 2354d5c65159SKalle Valo rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok; 2355d5c65159SKalle Valo rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err; 2356d5c65159SKalle Valo 2357d5c65159SKalle Valo arsta->rssi_comb = ppdu_info->rssi_comb; 2358d5c65159SKalle Valo rx_stats->rx_duration += ppdu_info->rx_duration; 2359d5c65159SKalle Valo arsta->rx_duration = rx_stats->rx_duration; 2360d5c65159SKalle Valo } 2361d5c65159SKalle Valo 2362d5c65159SKalle Valo static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab, 2363d5c65159SKalle Valo struct dp_rxdma_ring *rx_ring, 2364d5c65159SKalle Valo int *buf_id, gfp_t gfp) 2365d5c65159SKalle Valo { 2366d5c65159SKalle Valo struct sk_buff *skb; 2367d5c65159SKalle Valo dma_addr_t paddr; 2368d5c65159SKalle Valo 2369d5c65159SKalle Valo skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + 2370d5c65159SKalle Valo DP_RX_BUFFER_ALIGN_SIZE); 2371d5c65159SKalle Valo 2372d5c65159SKalle Valo if (!skb) 2373d5c65159SKalle Valo goto fail_alloc_skb; 2374d5c65159SKalle Valo 2375d5c65159SKalle Valo if (!IS_ALIGNED((unsigned long)skb->data, 2376d5c65159SKalle Valo DP_RX_BUFFER_ALIGN_SIZE)) { 2377d5c65159SKalle Valo skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - 2378d5c65159SKalle Valo skb->data); 2379d5c65159SKalle Valo } 2380d5c65159SKalle Valo 2381d5c65159SKalle Valo paddr = dma_map_single(ab->dev, skb->data, 2382d5c65159SKalle Valo skb->len + skb_tailroom(skb), 2383d5c65159SKalle Valo DMA_BIDIRECTIONAL); 2384d5c65159SKalle Valo if (unlikely(dma_mapping_error(ab->dev, paddr))) 2385d5c65159SKalle Valo goto fail_free_skb; 2386d5c65159SKalle Valo 2387d5c65159SKalle Valo spin_lock_bh(&rx_ring->idr_lock); 2388d5c65159SKalle Valo *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, 2389d5c65159SKalle Valo rx_ring->bufs_max, gfp); 2390d5c65159SKalle Valo spin_unlock_bh(&rx_ring->idr_lock); 2391d5c65159SKalle Valo if (*buf_id < 0) 2392d5c65159SKalle Valo goto fail_dma_unmap; 2393d5c65159SKalle Valo 2394d5c65159SKalle Valo ATH11K_SKB_RXCB(skb)->paddr = paddr; 2395d5c65159SKalle Valo return skb; 2396d5c65159SKalle Valo 2397d5c65159SKalle Valo fail_dma_unmap: 2398d5c65159SKalle Valo dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 2399d5c65159SKalle Valo DMA_BIDIRECTIONAL); 2400d5c65159SKalle Valo fail_free_skb: 2401d5c65159SKalle Valo dev_kfree_skb_any(skb); 2402d5c65159SKalle Valo fail_alloc_skb: 2403d5c65159SKalle Valo return NULL; 2404d5c65159SKalle Valo } 2405d5c65159SKalle Valo 2406d5c65159SKalle Valo int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id, 2407d5c65159SKalle Valo struct dp_rxdma_ring *rx_ring, 2408d5c65159SKalle Valo int req_entries, 2409d5c65159SKalle Valo enum hal_rx_buf_return_buf_manager mgr, 2410d5c65159SKalle Valo gfp_t gfp) 2411d5c65159SKalle Valo { 2412d5c65159SKalle Valo struct hal_srng *srng; 2413d5c65159SKalle Valo u32 *desc; 2414d5c65159SKalle Valo struct sk_buff *skb; 2415d5c65159SKalle Valo int num_free; 2416d5c65159SKalle Valo int num_remain; 2417d5c65159SKalle Valo int buf_id; 2418d5c65159SKalle Valo u32 cookie; 2419d5c65159SKalle Valo dma_addr_t paddr; 2420d5c65159SKalle Valo 2421d5c65159SKalle Valo req_entries = min(req_entries, rx_ring->bufs_max); 2422d5c65159SKalle Valo 2423d5c65159SKalle Valo srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 2424d5c65159SKalle Valo 2425d5c65159SKalle Valo spin_lock_bh(&srng->lock); 2426d5c65159SKalle Valo 2427d5c65159SKalle Valo ath11k_hal_srng_access_begin(ab, srng); 2428d5c65159SKalle Valo 2429d5c65159SKalle Valo num_free = ath11k_hal_srng_src_num_free(ab, srng, true); 2430d5c65159SKalle Valo 2431d5c65159SKalle Valo req_entries = min(num_free, req_entries); 2432d5c65159SKalle Valo num_remain = req_entries; 2433d5c65159SKalle Valo 2434d5c65159SKalle Valo while (num_remain > 0) { 2435d5c65159SKalle Valo skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, 2436d5c65159SKalle Valo &buf_id, gfp); 2437d5c65159SKalle Valo if (!skb) 2438d5c65159SKalle Valo break; 2439d5c65159SKalle Valo paddr = ATH11K_SKB_RXCB(skb)->paddr; 2440d5c65159SKalle Valo 2441d5c65159SKalle Valo desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 2442d5c65159SKalle Valo if (!desc) 2443d5c65159SKalle Valo goto fail_desc_get; 2444d5c65159SKalle Valo 2445d5c65159SKalle Valo cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 2446d5c65159SKalle Valo FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 2447d5c65159SKalle Valo 2448d5c65159SKalle Valo num_remain--; 2449d5c65159SKalle Valo 2450d5c65159SKalle Valo ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); 2451d5c65159SKalle Valo } 2452d5c65159SKalle Valo 2453d5c65159SKalle Valo ath11k_hal_srng_access_end(ab, srng); 2454d5c65159SKalle Valo 2455d5c65159SKalle Valo spin_unlock_bh(&srng->lock); 2456d5c65159SKalle Valo 2457d5c65159SKalle Valo return req_entries - num_remain; 2458d5c65159SKalle Valo 2459d5c65159SKalle Valo fail_desc_get: 2460d5c65159SKalle Valo spin_lock_bh(&rx_ring->idr_lock); 2461d5c65159SKalle Valo idr_remove(&rx_ring->bufs_idr, buf_id); 2462d5c65159SKalle Valo spin_unlock_bh(&rx_ring->idr_lock); 2463d5c65159SKalle Valo dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 2464d5c65159SKalle Valo DMA_BIDIRECTIONAL); 2465d5c65159SKalle Valo dev_kfree_skb_any(skb); 2466d5c65159SKalle Valo ath11k_hal_srng_access_end(ab, srng); 2467d5c65159SKalle Valo spin_unlock_bh(&srng->lock); 2468d5c65159SKalle Valo 2469d5c65159SKalle Valo return req_entries - num_remain; 2470d5c65159SKalle Valo } 2471d5c65159SKalle Valo 2472d5c65159SKalle Valo static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id, 2473d5c65159SKalle Valo int *budget, struct sk_buff_head *skb_list) 2474d5c65159SKalle Valo { 2475d5c65159SKalle Valo struct ath11k *ar = ab->pdevs[mac_id].ar; 2476d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 2477d5c65159SKalle Valo struct dp_rxdma_ring *rx_ring = &dp->rx_mon_status_refill_ring; 2478d5c65159SKalle Valo struct hal_srng *srng; 2479d5c65159SKalle Valo void *rx_mon_status_desc; 2480d5c65159SKalle Valo struct sk_buff *skb; 2481d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb; 2482d5c65159SKalle Valo struct hal_tlv_hdr *tlv; 2483d5c65159SKalle Valo u32 cookie; 2484d5c65159SKalle Valo int buf_id; 2485d5c65159SKalle Valo dma_addr_t paddr; 2486d5c65159SKalle Valo u8 rbm; 2487d5c65159SKalle Valo int num_buffs_reaped = 0; 2488d5c65159SKalle Valo 2489d5c65159SKalle Valo srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 2490d5c65159SKalle Valo 2491d5c65159SKalle Valo spin_lock_bh(&srng->lock); 2492d5c65159SKalle Valo 2493d5c65159SKalle Valo ath11k_hal_srng_access_begin(ab, srng); 2494d5c65159SKalle Valo while (*budget) { 2495d5c65159SKalle Valo *budget -= 1; 2496d5c65159SKalle Valo rx_mon_status_desc = 2497d5c65159SKalle Valo ath11k_hal_srng_src_peek(ab, srng); 2498d5c65159SKalle Valo if (!rx_mon_status_desc) 2499d5c65159SKalle Valo break; 2500d5c65159SKalle Valo 2501d5c65159SKalle Valo ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr, 2502d5c65159SKalle Valo &cookie, &rbm); 2503d5c65159SKalle Valo if (paddr) { 2504d5c65159SKalle Valo buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie); 2505d5c65159SKalle Valo 2506d5c65159SKalle Valo spin_lock_bh(&rx_ring->idr_lock); 2507d5c65159SKalle Valo skb = idr_find(&rx_ring->bufs_idr, buf_id); 2508d5c65159SKalle Valo if (!skb) { 2509d5c65159SKalle Valo ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n", 2510d5c65159SKalle Valo buf_id); 2511d5c65159SKalle Valo spin_unlock_bh(&rx_ring->idr_lock); 2512d5c65159SKalle Valo continue; 2513d5c65159SKalle Valo } 2514d5c65159SKalle Valo 2515d5c65159SKalle Valo idr_remove(&rx_ring->bufs_idr, buf_id); 2516d5c65159SKalle Valo spin_unlock_bh(&rx_ring->idr_lock); 2517d5c65159SKalle Valo 2518d5c65159SKalle Valo rxcb = ATH11K_SKB_RXCB(skb); 2519d5c65159SKalle Valo 2520d5c65159SKalle Valo dma_sync_single_for_cpu(ab->dev, rxcb->paddr, 2521d5c65159SKalle Valo skb->len + skb_tailroom(skb), 2522d5c65159SKalle Valo DMA_FROM_DEVICE); 2523d5c65159SKalle Valo 2524d5c65159SKalle Valo dma_unmap_single(ab->dev, rxcb->paddr, 2525d5c65159SKalle Valo skb->len + skb_tailroom(skb), 2526d5c65159SKalle Valo DMA_BIDIRECTIONAL); 2527d5c65159SKalle Valo 2528d5c65159SKalle Valo tlv = (struct hal_tlv_hdr *)skb->data; 2529d5c65159SKalle Valo if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != 2530d5c65159SKalle Valo HAL_RX_STATUS_BUFFER_DONE) { 2531d5c65159SKalle Valo ath11k_hal_srng_src_get_next_entry(ab, srng); 2532d5c65159SKalle Valo continue; 2533d5c65159SKalle Valo } 2534d5c65159SKalle Valo 2535d5c65159SKalle Valo __skb_queue_tail(skb_list, skb); 2536d5c65159SKalle Valo } 2537d5c65159SKalle Valo 2538d5c65159SKalle Valo skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, 2539d5c65159SKalle Valo &buf_id, GFP_ATOMIC); 2540d5c65159SKalle Valo 2541d5c65159SKalle Valo if (!skb) { 2542d5c65159SKalle Valo ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0, 2543d5c65159SKalle Valo HAL_RX_BUF_RBM_SW3_BM); 2544d5c65159SKalle Valo num_buffs_reaped++; 2545d5c65159SKalle Valo break; 2546d5c65159SKalle Valo } 2547d5c65159SKalle Valo rxcb = ATH11K_SKB_RXCB(skb); 2548d5c65159SKalle Valo 2549d5c65159SKalle Valo cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 2550d5c65159SKalle Valo FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 2551d5c65159SKalle Valo 2552d5c65159SKalle Valo ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr, 2553d5c65159SKalle Valo cookie, HAL_RX_BUF_RBM_SW3_BM); 2554d5c65159SKalle Valo ath11k_hal_srng_src_get_next_entry(ab, srng); 2555d5c65159SKalle Valo num_buffs_reaped++; 2556d5c65159SKalle Valo } 2557d5c65159SKalle Valo ath11k_hal_srng_access_end(ab, srng); 2558d5c65159SKalle Valo spin_unlock_bh(&srng->lock); 2559d5c65159SKalle Valo 2560d5c65159SKalle Valo return num_buffs_reaped; 2561d5c65159SKalle Valo } 2562d5c65159SKalle Valo 2563d5c65159SKalle Valo int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id, 2564d5c65159SKalle Valo struct napi_struct *napi, int budget) 2565d5c65159SKalle Valo { 2566d5c65159SKalle Valo struct ath11k *ar = ab->pdevs[mac_id].ar; 2567d5c65159SKalle Valo enum hal_rx_mon_status hal_status; 2568d5c65159SKalle Valo struct sk_buff *skb; 2569d5c65159SKalle Valo struct sk_buff_head skb_list; 2570d5c65159SKalle Valo struct hal_rx_mon_ppdu_info ppdu_info; 2571d5c65159SKalle Valo struct ath11k_peer *peer; 2572d5c65159SKalle Valo struct ath11k_sta *arsta; 2573d5c65159SKalle Valo int num_buffs_reaped = 0; 2574d5c65159SKalle Valo 2575d5c65159SKalle Valo __skb_queue_head_init(&skb_list); 2576d5c65159SKalle Valo 2577d5c65159SKalle Valo num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget, 2578d5c65159SKalle Valo &skb_list); 2579d5c65159SKalle Valo if (!num_buffs_reaped) 2580d5c65159SKalle Valo goto exit; 2581d5c65159SKalle Valo 2582d5c65159SKalle Valo while ((skb = __skb_dequeue(&skb_list))) { 2583d5c65159SKalle Valo memset(&ppdu_info, 0, sizeof(ppdu_info)); 2584d5c65159SKalle Valo ppdu_info.peer_id = HAL_INVALID_PEERID; 2585d5c65159SKalle Valo 2586d5c65159SKalle Valo if (ath11k_debug_is_pktlog_rx_stats_enabled(ar)) 2587d5c65159SKalle Valo trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE); 2588d5c65159SKalle Valo 2589d5c65159SKalle Valo hal_status = ath11k_hal_rx_parse_mon_status(ab, &ppdu_info, skb); 2590d5c65159SKalle Valo 2591d5c65159SKalle Valo if (ppdu_info.peer_id == HAL_INVALID_PEERID || 2592d5c65159SKalle Valo hal_status != HAL_RX_MON_STATUS_PPDU_DONE) { 2593d5c65159SKalle Valo dev_kfree_skb_any(skb); 2594d5c65159SKalle Valo continue; 2595d5c65159SKalle Valo } 2596d5c65159SKalle Valo 2597d5c65159SKalle Valo rcu_read_lock(); 2598d5c65159SKalle Valo spin_lock_bh(&ab->base_lock); 2599d5c65159SKalle Valo peer = ath11k_peer_find_by_id(ab, ppdu_info.peer_id); 2600d5c65159SKalle Valo 2601d5c65159SKalle Valo if (!peer || !peer->sta) { 26022dab7d22SJohn Crispin ath11k_dbg(ab, ATH11K_DBG_DATA, 26032dab7d22SJohn Crispin "failed to find the peer with peer_id %d\n", 2604d5c65159SKalle Valo ppdu_info.peer_id); 2605d5c65159SKalle Valo spin_unlock_bh(&ab->base_lock); 2606d5c65159SKalle Valo rcu_read_unlock(); 2607d5c65159SKalle Valo dev_kfree_skb_any(skb); 2608d5c65159SKalle Valo continue; 2609d5c65159SKalle Valo } 2610d5c65159SKalle Valo 2611d5c65159SKalle Valo arsta = (struct ath11k_sta *)peer->sta->drv_priv; 2612d5c65159SKalle Valo ath11k_dp_rx_update_peer_stats(arsta, &ppdu_info); 2613d5c65159SKalle Valo 2614d5c65159SKalle Valo if (ath11k_debug_is_pktlog_peer_valid(ar, peer->addr)) 2615d5c65159SKalle Valo trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE); 2616d5c65159SKalle Valo 2617d5c65159SKalle Valo spin_unlock_bh(&ab->base_lock); 2618d5c65159SKalle Valo rcu_read_unlock(); 2619d5c65159SKalle Valo 2620d5c65159SKalle Valo dev_kfree_skb_any(skb); 2621d5c65159SKalle Valo } 2622d5c65159SKalle Valo exit: 2623d5c65159SKalle Valo return num_buffs_reaped; 2624d5c65159SKalle Valo } 2625d5c65159SKalle Valo 2626d5c65159SKalle Valo static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab, 2627d5c65159SKalle Valo u32 *link_desc, 2628d5c65159SKalle Valo enum hal_wbm_rel_bm_act action) 2629d5c65159SKalle Valo { 2630d5c65159SKalle Valo struct ath11k_dp *dp = &ab->dp; 2631d5c65159SKalle Valo struct hal_srng *srng; 2632d5c65159SKalle Valo u32 *desc; 2633d5c65159SKalle Valo int ret = 0; 2634d5c65159SKalle Valo 2635d5c65159SKalle Valo srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id]; 2636d5c65159SKalle Valo 2637d5c65159SKalle Valo spin_lock_bh(&srng->lock); 2638d5c65159SKalle Valo 2639d5c65159SKalle Valo ath11k_hal_srng_access_begin(ab, srng); 2640d5c65159SKalle Valo 2641d5c65159SKalle Valo desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 2642d5c65159SKalle Valo if (!desc) { 2643d5c65159SKalle Valo ret = -ENOBUFS; 2644d5c65159SKalle Valo goto exit; 2645d5c65159SKalle Valo } 2646d5c65159SKalle Valo 2647d5c65159SKalle Valo ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc, 2648d5c65159SKalle Valo action); 2649d5c65159SKalle Valo 2650d5c65159SKalle Valo exit: 2651d5c65159SKalle Valo ath11k_hal_srng_access_end(ab, srng); 2652d5c65159SKalle Valo 2653d5c65159SKalle Valo spin_unlock_bh(&srng->lock); 2654d5c65159SKalle Valo 2655d5c65159SKalle Valo return ret; 2656d5c65159SKalle Valo } 2657d5c65159SKalle Valo 2658d5c65159SKalle Valo static void ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar, 2659d5c65159SKalle Valo struct sk_buff *msdu, 2660d5c65159SKalle Valo struct hal_rx_desc *rx_desc, 2661d5c65159SKalle Valo struct ieee80211_rx_status *rx_status) 2662d5c65159SKalle Valo { 2663d5c65159SKalle Valo u8 rx_channel; 2664d5c65159SKalle Valo enum hal_encrypt_type enctype; 2665d5c65159SKalle Valo bool is_decrypted; 2666d5c65159SKalle Valo u32 err_bitmap; 2667d5c65159SKalle Valo 2668d5c65159SKalle Valo is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc); 2669d5c65159SKalle Valo enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc); 2670d5c65159SKalle Valo err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_desc); 2671d5c65159SKalle Valo 2672d5c65159SKalle Valo if (err_bitmap & DP_RX_MPDU_ERR_FCS) 2673d5c65159SKalle Valo rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; 2674d5c65159SKalle Valo 2675d5c65159SKalle Valo if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC) 2676d5c65159SKalle Valo rx_status->flag |= RX_FLAG_MMIC_ERROR; 2677d5c65159SKalle Valo 2678d5c65159SKalle Valo rx_status->encoding = RX_ENC_LEGACY; 2679d5c65159SKalle Valo rx_status->bw = RATE_INFO_BW_20; 2680d5c65159SKalle Valo 2681d5c65159SKalle Valo rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 2682d5c65159SKalle Valo 2683d5c65159SKalle Valo rx_channel = ath11k_dp_rx_h_msdu_start_freq(rx_desc); 2684d5c65159SKalle Valo 2685d5c65159SKalle Valo if (rx_channel >= 1 && rx_channel <= 14) { 2686d5c65159SKalle Valo rx_status->band = NL80211_BAND_2GHZ; 2687d5c65159SKalle Valo } else if (rx_channel >= 36 && rx_channel <= 173) { 2688d5c65159SKalle Valo rx_status->band = NL80211_BAND_5GHZ; 2689d5c65159SKalle Valo } else { 2690d5c65159SKalle Valo ath11k_warn(ar->ab, "Unsupported Channel info received %d\n", 2691d5c65159SKalle Valo rx_channel); 2692d5c65159SKalle Valo return; 2693d5c65159SKalle Valo } 2694d5c65159SKalle Valo 2695d5c65159SKalle Valo rx_status->freq = ieee80211_channel_to_frequency(rx_channel, 2696d5c65159SKalle Valo rx_status->band); 2697d5c65159SKalle Valo ath11k_dp_rx_h_rate(ar, rx_desc, rx_status); 2698d5c65159SKalle Valo 2699d5c65159SKalle Valo /* Rx fragments are received in raw mode */ 2700d5c65159SKalle Valo skb_trim(msdu, msdu->len - FCS_LEN); 2701d5c65159SKalle Valo 2702d5c65159SKalle Valo if (is_decrypted) { 2703d5c65159SKalle Valo rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MIC_STRIPPED; 2704d5c65159SKalle Valo skb_trim(msdu, msdu->len - 2705d5c65159SKalle Valo ath11k_dp_rx_crypto_mic_len(ar, enctype)); 2706d5c65159SKalle Valo } 2707d5c65159SKalle Valo } 2708d5c65159SKalle Valo 2709d5c65159SKalle Valo static int 2710d5c65159SKalle Valo ath11k_dp_process_rx_err_buf(struct ath11k *ar, struct napi_struct *napi, 2711d5c65159SKalle Valo int buf_id, bool frag) 2712d5c65159SKalle Valo { 2713d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 2714d5c65159SKalle Valo struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 2715d5c65159SKalle Valo struct ieee80211_rx_status rx_status = {0}; 2716d5c65159SKalle Valo struct sk_buff *msdu; 2717d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb; 2718d5c65159SKalle Valo struct ieee80211_rx_status *status; 2719d5c65159SKalle Valo struct hal_rx_desc *rx_desc; 2720d5c65159SKalle Valo u16 msdu_len; 2721d5c65159SKalle Valo 2722d5c65159SKalle Valo spin_lock_bh(&rx_ring->idr_lock); 2723d5c65159SKalle Valo msdu = idr_find(&rx_ring->bufs_idr, buf_id); 2724d5c65159SKalle Valo if (!msdu) { 2725d5c65159SKalle Valo ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n", 2726d5c65159SKalle Valo buf_id); 2727d5c65159SKalle Valo spin_unlock_bh(&rx_ring->idr_lock); 2728d5c65159SKalle Valo return -EINVAL; 2729d5c65159SKalle Valo } 2730d5c65159SKalle Valo 2731d5c65159SKalle Valo idr_remove(&rx_ring->bufs_idr, buf_id); 2732d5c65159SKalle Valo spin_unlock_bh(&rx_ring->idr_lock); 2733d5c65159SKalle Valo 2734d5c65159SKalle Valo rxcb = ATH11K_SKB_RXCB(msdu); 2735d5c65159SKalle Valo dma_unmap_single(ar->ab->dev, rxcb->paddr, 2736d5c65159SKalle Valo msdu->len + skb_tailroom(msdu), 2737d5c65159SKalle Valo DMA_FROM_DEVICE); 2738d5c65159SKalle Valo 2739d5c65159SKalle Valo if (!frag) { 2740d5c65159SKalle Valo /* Process only rx fragments below, and drop 2741d5c65159SKalle Valo * msdu's indicated due to error reasons. 2742d5c65159SKalle Valo */ 2743d5c65159SKalle Valo dev_kfree_skb_any(msdu); 2744d5c65159SKalle Valo return 0; 2745d5c65159SKalle Valo } 2746d5c65159SKalle Valo 2747d5c65159SKalle Valo rcu_read_lock(); 2748d5c65159SKalle Valo if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) { 2749d5c65159SKalle Valo dev_kfree_skb_any(msdu); 2750d5c65159SKalle Valo goto exit; 2751d5c65159SKalle Valo } 2752d5c65159SKalle Valo 2753d5c65159SKalle Valo if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 2754d5c65159SKalle Valo dev_kfree_skb_any(msdu); 2755d5c65159SKalle Valo goto exit; 2756d5c65159SKalle Valo } 2757d5c65159SKalle Valo 2758d5c65159SKalle Valo rx_desc = (struct hal_rx_desc *)msdu->data; 2759d5c65159SKalle Valo msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc); 2760d5c65159SKalle Valo skb_put(msdu, HAL_RX_DESC_SIZE + msdu_len); 2761d5c65159SKalle Valo skb_pull(msdu, HAL_RX_DESC_SIZE); 2762d5c65159SKalle Valo 2763d5c65159SKalle Valo ath11k_dp_rx_frag_h_mpdu(ar, msdu, rx_desc, &rx_status); 2764d5c65159SKalle Valo 2765d5c65159SKalle Valo status = IEEE80211_SKB_RXCB(msdu); 2766d5c65159SKalle Valo 2767d5c65159SKalle Valo *status = rx_status; 2768d5c65159SKalle Valo 2769d5c65159SKalle Valo ath11k_dp_rx_deliver_msdu(ar, napi, msdu); 2770d5c65159SKalle Valo 2771d5c65159SKalle Valo exit: 2772d5c65159SKalle Valo rcu_read_unlock(); 2773d5c65159SKalle Valo return 0; 2774d5c65159SKalle Valo } 2775d5c65159SKalle Valo 2776d5c65159SKalle Valo int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi, 2777d5c65159SKalle Valo int budget) 2778d5c65159SKalle Valo { 2779d5c65159SKalle Valo struct hal_rx_msdu_meta meta[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; 2780d5c65159SKalle Valo struct dp_link_desc_bank *link_desc_banks; 2781d5c65159SKalle Valo enum hal_rx_buf_return_buf_manager rbm; 2782d5c65159SKalle Valo int tot_n_bufs_reaped, quota, ret, i; 2783d5c65159SKalle Valo int n_bufs_reaped[MAX_RADIOS] = {0}; 2784d5c65159SKalle Valo struct hal_rx_meta_info meta_info; 2785d5c65159SKalle Valo struct dp_rxdma_ring *rx_ring; 2786d5c65159SKalle Valo struct dp_srng *reo_except; 2787d5c65159SKalle Valo u32 desc_bank, num_msdus; 2788d5c65159SKalle Valo struct hal_srng *srng; 2789d5c65159SKalle Valo struct ath11k_dp *dp; 2790d5c65159SKalle Valo void *link_desc_va; 2791d5c65159SKalle Valo int buf_id, mac_id; 2792d5c65159SKalle Valo struct ath11k *ar; 2793d5c65159SKalle Valo dma_addr_t paddr; 2794d5c65159SKalle Valo u32 *desc; 2795d5c65159SKalle Valo bool is_frag; 2796d5c65159SKalle Valo 2797d5c65159SKalle Valo tot_n_bufs_reaped = 0; 2798d5c65159SKalle Valo quota = budget; 2799d5c65159SKalle Valo 2800d5c65159SKalle Valo dp = &ab->dp; 2801d5c65159SKalle Valo reo_except = &dp->reo_except_ring; 2802d5c65159SKalle Valo link_desc_banks = dp->link_desc_banks; 2803d5c65159SKalle Valo 2804d5c65159SKalle Valo srng = &ab->hal.srng_list[reo_except->ring_id]; 2805d5c65159SKalle Valo 2806d5c65159SKalle Valo spin_lock_bh(&srng->lock); 2807d5c65159SKalle Valo 2808d5c65159SKalle Valo ath11k_hal_srng_access_begin(ab, srng); 2809d5c65159SKalle Valo 2810d5c65159SKalle Valo while (budget && 2811d5c65159SKalle Valo (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 2812d5c65159SKalle Valo ab->soc_stats.err_ring_pkts++; 2813d5c65159SKalle Valo ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr, 2814d5c65159SKalle Valo &desc_bank); 2815d5c65159SKalle Valo if (ret) { 2816d5c65159SKalle Valo ath11k_warn(ab, "failed to parse error reo desc %d\n", 2817d5c65159SKalle Valo ret); 2818d5c65159SKalle Valo continue; 2819d5c65159SKalle Valo } 2820d5c65159SKalle Valo link_desc_va = link_desc_banks[desc_bank].vaddr + 2821d5c65159SKalle Valo (paddr - link_desc_banks[desc_bank].paddr); 2822d5c65159SKalle Valo ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, meta, 2823d5c65159SKalle Valo &rbm); 2824d5c65159SKalle Valo if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST && 2825d5c65159SKalle Valo rbm != HAL_RX_BUF_RBM_SW3_BM) { 2826d5c65159SKalle Valo ab->soc_stats.invalid_rbm++; 2827d5c65159SKalle Valo ath11k_warn(ab, "invalid return buffer manager %d\n", rbm); 2828d5c65159SKalle Valo ath11k_dp_rx_link_desc_return(ab, desc, 2829d5c65159SKalle Valo HAL_WBM_REL_BM_ACT_REL_MSDU); 2830d5c65159SKalle Valo continue; 2831d5c65159SKalle Valo } 2832d5c65159SKalle Valo 2833d5c65159SKalle Valo memset(&meta_info, 0, sizeof(meta_info)); 2834d5c65159SKalle Valo ath11k_hal_rx_parse_dst_ring_desc(ab, desc, &meta_info); 2835d5c65159SKalle Valo 2836d5c65159SKalle Valo is_frag = meta_info.mpdu_meta.frag; 2837d5c65159SKalle Valo 2838d5c65159SKalle Valo /* Return the link desc back to wbm idle list */ 2839d5c65159SKalle Valo ath11k_dp_rx_link_desc_return(ab, desc, 2840d5c65159SKalle Valo HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 2841d5c65159SKalle Valo 2842d5c65159SKalle Valo for (i = 0; i < num_msdus; i++) { 2843d5c65159SKalle Valo buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 2844d5c65159SKalle Valo meta[i].cookie); 2845d5c65159SKalle Valo 2846d5c65159SKalle Valo mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, 2847d5c65159SKalle Valo meta[i].cookie); 2848d5c65159SKalle Valo 2849d5c65159SKalle Valo ar = ab->pdevs[mac_id].ar; 2850d5c65159SKalle Valo 2851d5c65159SKalle Valo if (!ath11k_dp_process_rx_err_buf(ar, napi, buf_id, 2852d5c65159SKalle Valo is_frag)) { 2853d5c65159SKalle Valo n_bufs_reaped[mac_id]++; 2854d5c65159SKalle Valo tot_n_bufs_reaped++; 2855d5c65159SKalle Valo } 2856d5c65159SKalle Valo } 2857d5c65159SKalle Valo 2858d5c65159SKalle Valo if (tot_n_bufs_reaped >= quota) { 2859d5c65159SKalle Valo tot_n_bufs_reaped = quota; 2860d5c65159SKalle Valo goto exit; 2861d5c65159SKalle Valo } 2862d5c65159SKalle Valo 2863d5c65159SKalle Valo budget = quota - tot_n_bufs_reaped; 2864d5c65159SKalle Valo } 2865d5c65159SKalle Valo 2866d5c65159SKalle Valo exit: 2867d5c65159SKalle Valo ath11k_hal_srng_access_end(ab, srng); 2868d5c65159SKalle Valo 2869d5c65159SKalle Valo spin_unlock_bh(&srng->lock); 2870d5c65159SKalle Valo 2871d5c65159SKalle Valo for (i = 0; i < ab->num_radios; i++) { 2872d5c65159SKalle Valo if (!n_bufs_reaped[i]) 2873d5c65159SKalle Valo continue; 2874d5c65159SKalle Valo 2875d5c65159SKalle Valo ar = ab->pdevs[i].ar; 2876d5c65159SKalle Valo rx_ring = &ar->dp.rx_refill_buf_ring; 2877d5c65159SKalle Valo 2878d5c65159SKalle Valo ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i], 2879d5c65159SKalle Valo HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC); 2880d5c65159SKalle Valo } 2881d5c65159SKalle Valo 2882d5c65159SKalle Valo return tot_n_bufs_reaped; 2883d5c65159SKalle Valo } 2884d5c65159SKalle Valo 2885d5c65159SKalle Valo static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar, 2886d5c65159SKalle Valo int msdu_len, 2887d5c65159SKalle Valo struct sk_buff_head *msdu_list) 2888d5c65159SKalle Valo { 2889d5c65159SKalle Valo struct sk_buff *skb, *tmp; 2890d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb; 2891d5c65159SKalle Valo int n_buffs; 2892d5c65159SKalle Valo 2893d5c65159SKalle Valo n_buffs = DIV_ROUND_UP(msdu_len, 2894d5c65159SKalle Valo (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE)); 2895d5c65159SKalle Valo 2896d5c65159SKalle Valo skb_queue_walk_safe(msdu_list, skb, tmp) { 2897d5c65159SKalle Valo rxcb = ATH11K_SKB_RXCB(skb); 2898d5c65159SKalle Valo if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO && 2899d5c65159SKalle Valo rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) { 2900d5c65159SKalle Valo if (!n_buffs) 2901d5c65159SKalle Valo break; 2902d5c65159SKalle Valo __skb_unlink(skb, msdu_list); 2903d5c65159SKalle Valo dev_kfree_skb_any(skb); 2904d5c65159SKalle Valo n_buffs--; 2905d5c65159SKalle Valo } 2906d5c65159SKalle Valo } 2907d5c65159SKalle Valo } 2908d5c65159SKalle Valo 2909d5c65159SKalle Valo static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu, 2910d5c65159SKalle Valo struct ieee80211_rx_status *status, 2911d5c65159SKalle Valo struct sk_buff_head *msdu_list) 2912d5c65159SKalle Valo { 2913d5c65159SKalle Valo struct sk_buff_head amsdu_list; 2914d5c65159SKalle Valo u16 msdu_len; 2915d5c65159SKalle Valo struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 2916d5c65159SKalle Valo u8 l3pad_bytes; 2917d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 2918d5c65159SKalle Valo 2919d5c65159SKalle Valo msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc); 2920d5c65159SKalle Valo 2921d5c65159SKalle Valo if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) { 2922d5c65159SKalle Valo /* First buffer will be freed by the caller, so deduct it's length */ 2923d5c65159SKalle Valo msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE); 2924d5c65159SKalle Valo ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list); 2925d5c65159SKalle Valo return -EINVAL; 2926d5c65159SKalle Valo } 2927d5c65159SKalle Valo 2928d5c65159SKalle Valo if (!ath11k_dp_rx_h_attn_msdu_done(desc)) { 2929d5c65159SKalle Valo ath11k_warn(ar->ab, 2930d5c65159SKalle Valo "msdu_done bit not set in null_q_des processing\n"); 2931d5c65159SKalle Valo __skb_queue_purge(msdu_list); 2932d5c65159SKalle Valo return -EIO; 2933d5c65159SKalle Valo } 2934d5c65159SKalle Valo 2935d5c65159SKalle Valo /* Handle NULL queue descriptor violations arising out a missing 2936d5c65159SKalle Valo * REO queue for a given peer or a given TID. This typically 2937d5c65159SKalle Valo * may happen if a packet is received on a QOS enabled TID before the 2938d5c65159SKalle Valo * ADDBA negotiation for that TID, when the TID queue is setup. Or 2939d5c65159SKalle Valo * it may also happen for MC/BC frames if they are not routed to the 2940d5c65159SKalle Valo * non-QOS TID queue, in the absence of any other default TID queue. 2941d5c65159SKalle Valo * This error can show up both in a REO destination or WBM release ring. 2942d5c65159SKalle Valo */ 2943d5c65159SKalle Valo 2944d5c65159SKalle Valo __skb_queue_head_init(&amsdu_list); 2945d5c65159SKalle Valo 2946d5c65159SKalle Valo rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc); 2947d5c65159SKalle Valo rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc); 2948d5c65159SKalle Valo 2949d5c65159SKalle Valo l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc); 2950d5c65159SKalle Valo 2951d5c65159SKalle Valo if ((HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) 2952d5c65159SKalle Valo return -EINVAL; 2953d5c65159SKalle Valo 2954d5c65159SKalle Valo skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len); 2955d5c65159SKalle Valo skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes); 2956d5c65159SKalle Valo 2957d5c65159SKalle Valo ath11k_dp_rx_h_ppdu(ar, desc, status); 2958d5c65159SKalle Valo 2959d5c65159SKalle Valo __skb_queue_tail(&amsdu_list, msdu); 2960d5c65159SKalle Valo 2961d5c65159SKalle Valo ath11k_dp_rx_h_mpdu(ar, &amsdu_list, desc, status); 2962d5c65159SKalle Valo 2963d5c65159SKalle Valo /* Please note that caller will having the access to msdu and completing 2964d5c65159SKalle Valo * rx with mac80211. Need not worry about cleaning up amsdu_list. 2965d5c65159SKalle Valo */ 2966d5c65159SKalle Valo 2967d5c65159SKalle Valo return 0; 2968d5c65159SKalle Valo } 2969d5c65159SKalle Valo 2970d5c65159SKalle Valo static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu, 2971d5c65159SKalle Valo struct ieee80211_rx_status *status, 2972d5c65159SKalle Valo struct sk_buff_head *msdu_list) 2973d5c65159SKalle Valo { 2974d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 2975d5c65159SKalle Valo bool drop = false; 2976d5c65159SKalle Valo 2977d5c65159SKalle Valo ar->ab->soc_stats.reo_error[rxcb->err_code]++; 2978d5c65159SKalle Valo 2979d5c65159SKalle Valo switch (rxcb->err_code) { 2980d5c65159SKalle Valo case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO: 2981d5c65159SKalle Valo if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list)) 2982d5c65159SKalle Valo drop = true; 2983d5c65159SKalle Valo break; 2984d5c65159SKalle Valo default: 2985d5c65159SKalle Valo /* TODO: Review other errors and process them to mac80211 2986d5c65159SKalle Valo * as appropriate. 2987d5c65159SKalle Valo */ 2988d5c65159SKalle Valo drop = true; 2989d5c65159SKalle Valo break; 2990d5c65159SKalle Valo } 2991d5c65159SKalle Valo 2992d5c65159SKalle Valo return drop; 2993d5c65159SKalle Valo } 2994d5c65159SKalle Valo 2995d5c65159SKalle Valo static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu, 2996d5c65159SKalle Valo struct ieee80211_rx_status *status) 2997d5c65159SKalle Valo { 2998d5c65159SKalle Valo u16 msdu_len; 2999d5c65159SKalle Valo struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 3000d5c65159SKalle Valo u8 l3pad_bytes; 3001d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3002d5c65159SKalle Valo 3003d5c65159SKalle Valo rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc); 3004d5c65159SKalle Valo rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc); 3005d5c65159SKalle Valo 3006d5c65159SKalle Valo l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc); 3007d5c65159SKalle Valo msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc); 3008d5c65159SKalle Valo skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len); 3009d5c65159SKalle Valo skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes); 3010d5c65159SKalle Valo 3011d5c65159SKalle Valo ath11k_dp_rx_h_ppdu(ar, desc, status); 3012d5c65159SKalle Valo 3013d5c65159SKalle Valo status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR | 3014d5c65159SKalle Valo RX_FLAG_DECRYPTED); 3015d5c65159SKalle Valo 3016d5c65159SKalle Valo ath11k_dp_rx_h_undecap(ar, msdu, desc, 3017d5c65159SKalle Valo HAL_ENCRYPT_TYPE_TKIP_MIC, status, false); 3018d5c65159SKalle Valo } 3019d5c65159SKalle Valo 3020d5c65159SKalle Valo static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar, struct sk_buff *msdu, 3021d5c65159SKalle Valo struct ieee80211_rx_status *status) 3022d5c65159SKalle Valo { 3023d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3024d5c65159SKalle Valo bool drop = false; 3025d5c65159SKalle Valo 3026d5c65159SKalle Valo ar->ab->soc_stats.rxdma_error[rxcb->err_code]++; 3027d5c65159SKalle Valo 3028d5c65159SKalle Valo switch (rxcb->err_code) { 3029d5c65159SKalle Valo case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR: 3030d5c65159SKalle Valo ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status); 3031d5c65159SKalle Valo break; 3032d5c65159SKalle Valo default: 3033d5c65159SKalle Valo /* TODO: Review other rxdma error code to check if anything is 3034d5c65159SKalle Valo * worth reporting to mac80211 3035d5c65159SKalle Valo */ 3036d5c65159SKalle Valo drop = true; 3037d5c65159SKalle Valo break; 3038d5c65159SKalle Valo } 3039d5c65159SKalle Valo 3040d5c65159SKalle Valo return drop; 3041d5c65159SKalle Valo } 3042d5c65159SKalle Valo 3043d5c65159SKalle Valo static void ath11k_dp_rx_wbm_err(struct ath11k *ar, 3044d5c65159SKalle Valo struct napi_struct *napi, 3045d5c65159SKalle Valo struct sk_buff *msdu, 3046d5c65159SKalle Valo struct sk_buff_head *msdu_list) 3047d5c65159SKalle Valo { 3048d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3049d5c65159SKalle Valo struct ieee80211_rx_status rxs = {0}; 3050d5c65159SKalle Valo struct ieee80211_rx_status *status; 3051d5c65159SKalle Valo bool drop = true; 3052d5c65159SKalle Valo 3053d5c65159SKalle Valo switch (rxcb->err_rel_src) { 3054d5c65159SKalle Valo case HAL_WBM_REL_SRC_MODULE_REO: 3055d5c65159SKalle Valo drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list); 3056d5c65159SKalle Valo break; 3057d5c65159SKalle Valo case HAL_WBM_REL_SRC_MODULE_RXDMA: 3058d5c65159SKalle Valo drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs); 3059d5c65159SKalle Valo break; 3060d5c65159SKalle Valo default: 3061d5c65159SKalle Valo /* msdu will get freed */ 3062d5c65159SKalle Valo break; 3063d5c65159SKalle Valo } 3064d5c65159SKalle Valo 3065d5c65159SKalle Valo if (drop) { 3066d5c65159SKalle Valo dev_kfree_skb_any(msdu); 3067d5c65159SKalle Valo return; 3068d5c65159SKalle Valo } 3069d5c65159SKalle Valo 3070d5c65159SKalle Valo status = IEEE80211_SKB_RXCB(msdu); 3071d5c65159SKalle Valo *status = rxs; 3072d5c65159SKalle Valo 3073d5c65159SKalle Valo ath11k_dp_rx_deliver_msdu(ar, napi, msdu); 3074d5c65159SKalle Valo } 3075d5c65159SKalle Valo 3076d5c65159SKalle Valo int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab, 3077d5c65159SKalle Valo struct napi_struct *napi, int budget) 3078d5c65159SKalle Valo { 3079d5c65159SKalle Valo struct ath11k *ar; 3080d5c65159SKalle Valo struct ath11k_dp *dp = &ab->dp; 3081d5c65159SKalle Valo struct dp_rxdma_ring *rx_ring; 3082d5c65159SKalle Valo struct hal_rx_wbm_rel_info err_info; 3083d5c65159SKalle Valo struct hal_srng *srng; 3084d5c65159SKalle Valo struct sk_buff *msdu; 3085d5c65159SKalle Valo struct sk_buff_head msdu_list[MAX_RADIOS]; 3086d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb; 3087d5c65159SKalle Valo u32 *rx_desc; 3088d5c65159SKalle Valo int buf_id, mac_id; 3089d5c65159SKalle Valo int num_buffs_reaped[MAX_RADIOS] = {0}; 3090d5c65159SKalle Valo int total_num_buffs_reaped = 0; 3091d5c65159SKalle Valo int ret, i; 3092d5c65159SKalle Valo 3093d5c65159SKalle Valo for (i = 0; i < MAX_RADIOS; i++) 3094d5c65159SKalle Valo __skb_queue_head_init(&msdu_list[i]); 3095d5c65159SKalle Valo 3096d5c65159SKalle Valo srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id]; 3097d5c65159SKalle Valo 3098d5c65159SKalle Valo spin_lock_bh(&srng->lock); 3099d5c65159SKalle Valo 3100d5c65159SKalle Valo ath11k_hal_srng_access_begin(ab, srng); 3101d5c65159SKalle Valo 3102d5c65159SKalle Valo while (budget) { 3103d5c65159SKalle Valo rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng); 3104d5c65159SKalle Valo if (!rx_desc) 3105d5c65159SKalle Valo break; 3106d5c65159SKalle Valo 3107d5c65159SKalle Valo ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info); 3108d5c65159SKalle Valo if (ret) { 3109d5c65159SKalle Valo ath11k_warn(ab, 3110d5c65159SKalle Valo "failed to parse rx error in wbm_rel ring desc %d\n", 3111d5c65159SKalle Valo ret); 3112d5c65159SKalle Valo continue; 3113d5c65159SKalle Valo } 3114d5c65159SKalle Valo 3115d5c65159SKalle Valo buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie); 3116d5c65159SKalle Valo mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie); 3117d5c65159SKalle Valo 3118d5c65159SKalle Valo ar = ab->pdevs[mac_id].ar; 3119d5c65159SKalle Valo rx_ring = &ar->dp.rx_refill_buf_ring; 3120d5c65159SKalle Valo 3121d5c65159SKalle Valo spin_lock_bh(&rx_ring->idr_lock); 3122d5c65159SKalle Valo msdu = idr_find(&rx_ring->bufs_idr, buf_id); 3123d5c65159SKalle Valo if (!msdu) { 3124d5c65159SKalle Valo ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n", 3125d5c65159SKalle Valo buf_id, mac_id); 3126d5c65159SKalle Valo spin_unlock_bh(&rx_ring->idr_lock); 3127d5c65159SKalle Valo continue; 3128d5c65159SKalle Valo } 3129d5c65159SKalle Valo 3130d5c65159SKalle Valo idr_remove(&rx_ring->bufs_idr, buf_id); 3131d5c65159SKalle Valo spin_unlock_bh(&rx_ring->idr_lock); 3132d5c65159SKalle Valo 3133d5c65159SKalle Valo rxcb = ATH11K_SKB_RXCB(msdu); 3134d5c65159SKalle Valo dma_unmap_single(ab->dev, rxcb->paddr, 3135d5c65159SKalle Valo msdu->len + skb_tailroom(msdu), 3136d5c65159SKalle Valo DMA_FROM_DEVICE); 3137d5c65159SKalle Valo 3138d5c65159SKalle Valo num_buffs_reaped[mac_id]++; 3139d5c65159SKalle Valo total_num_buffs_reaped++; 3140d5c65159SKalle Valo budget--; 3141d5c65159SKalle Valo 3142d5c65159SKalle Valo if (err_info.push_reason != 3143d5c65159SKalle Valo HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { 3144d5c65159SKalle Valo dev_kfree_skb_any(msdu); 3145d5c65159SKalle Valo continue; 3146d5c65159SKalle Valo } 3147d5c65159SKalle Valo 3148d5c65159SKalle Valo rxcb->err_rel_src = err_info.err_rel_src; 3149d5c65159SKalle Valo rxcb->err_code = err_info.err_code; 3150d5c65159SKalle Valo rxcb->rx_desc = (struct hal_rx_desc *)msdu->data; 3151d5c65159SKalle Valo __skb_queue_tail(&msdu_list[mac_id], msdu); 3152d5c65159SKalle Valo } 3153d5c65159SKalle Valo 3154d5c65159SKalle Valo ath11k_hal_srng_access_end(ab, srng); 3155d5c65159SKalle Valo 3156d5c65159SKalle Valo spin_unlock_bh(&srng->lock); 3157d5c65159SKalle Valo 3158d5c65159SKalle Valo if (!total_num_buffs_reaped) 3159d5c65159SKalle Valo goto done; 3160d5c65159SKalle Valo 3161d5c65159SKalle Valo for (i = 0; i < ab->num_radios; i++) { 3162d5c65159SKalle Valo if (!num_buffs_reaped[i]) 3163d5c65159SKalle Valo continue; 3164d5c65159SKalle Valo 3165d5c65159SKalle Valo ar = ab->pdevs[i].ar; 3166d5c65159SKalle Valo rx_ring = &ar->dp.rx_refill_buf_ring; 3167d5c65159SKalle Valo 3168d5c65159SKalle Valo ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i], 3169d5c65159SKalle Valo HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC); 3170d5c65159SKalle Valo } 3171d5c65159SKalle Valo 3172d5c65159SKalle Valo rcu_read_lock(); 3173d5c65159SKalle Valo for (i = 0; i < ab->num_radios; i++) { 3174d5c65159SKalle Valo if (!rcu_dereference(ab->pdevs_active[i])) { 3175d5c65159SKalle Valo __skb_queue_purge(&msdu_list[i]); 3176d5c65159SKalle Valo continue; 3177d5c65159SKalle Valo } 3178d5c65159SKalle Valo 3179d5c65159SKalle Valo ar = ab->pdevs[i].ar; 3180d5c65159SKalle Valo 3181d5c65159SKalle Valo if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 3182d5c65159SKalle Valo __skb_queue_purge(&msdu_list[i]); 3183d5c65159SKalle Valo continue; 3184d5c65159SKalle Valo } 3185d5c65159SKalle Valo 3186d5c65159SKalle Valo while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL) 3187d5c65159SKalle Valo ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]); 3188d5c65159SKalle Valo } 3189d5c65159SKalle Valo rcu_read_unlock(); 3190d5c65159SKalle Valo done: 3191d5c65159SKalle Valo return total_num_buffs_reaped; 3192d5c65159SKalle Valo } 3193d5c65159SKalle Valo 3194d5c65159SKalle Valo int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget) 3195d5c65159SKalle Valo { 3196d5c65159SKalle Valo struct ath11k *ar = ab->pdevs[mac_id].ar; 3197d5c65159SKalle Valo struct dp_srng *err_ring = &ar->dp.rxdma_err_dst_ring; 3198d5c65159SKalle Valo struct dp_rxdma_ring *rx_ring = &ar->dp.rx_refill_buf_ring; 3199d5c65159SKalle Valo struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks; 3200d5c65159SKalle Valo struct hal_srng *srng; 3201d5c65159SKalle Valo struct hal_rx_msdu_meta meta[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; 3202d5c65159SKalle Valo enum hal_rx_buf_return_buf_manager rbm; 3203d5c65159SKalle Valo enum hal_reo_entr_rxdma_ecode rxdma_err_code; 3204d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb; 3205d5c65159SKalle Valo struct sk_buff *skb; 3206d5c65159SKalle Valo struct hal_reo_entrance_ring *entr_ring; 3207d5c65159SKalle Valo void *desc; 3208d5c65159SKalle Valo int num_buf_freed = 0; 3209d5c65159SKalle Valo int quota = budget; 3210d5c65159SKalle Valo dma_addr_t paddr; 3211d5c65159SKalle Valo u32 desc_bank; 3212d5c65159SKalle Valo void *link_desc_va; 3213d5c65159SKalle Valo int num_msdus; 3214d5c65159SKalle Valo int i; 3215d5c65159SKalle Valo int buf_id; 3216d5c65159SKalle Valo 3217d5c65159SKalle Valo srng = &ab->hal.srng_list[err_ring->ring_id]; 3218d5c65159SKalle Valo 3219d5c65159SKalle Valo spin_lock_bh(&srng->lock); 3220d5c65159SKalle Valo 3221d5c65159SKalle Valo ath11k_hal_srng_access_begin(ab, srng); 3222d5c65159SKalle Valo 3223d5c65159SKalle Valo while (quota-- && 3224d5c65159SKalle Valo (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 3225d5c65159SKalle Valo ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank); 3226d5c65159SKalle Valo 3227d5c65159SKalle Valo entr_ring = (struct hal_reo_entrance_ring *)desc; 3228d5c65159SKalle Valo rxdma_err_code = 3229d5c65159SKalle Valo FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE, 3230d5c65159SKalle Valo entr_ring->info1); 3231d5c65159SKalle Valo ab->soc_stats.rxdma_error[rxdma_err_code]++; 3232d5c65159SKalle Valo 3233d5c65159SKalle Valo link_desc_va = link_desc_banks[desc_bank].vaddr + 3234d5c65159SKalle Valo (paddr - link_desc_banks[desc_bank].paddr); 3235d5c65159SKalle Valo ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, meta, 3236d5c65159SKalle Valo &rbm); 3237d5c65159SKalle Valo 3238d5c65159SKalle Valo for (i = 0; i < num_msdus; i++) { 3239d5c65159SKalle Valo buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 3240d5c65159SKalle Valo meta[i].cookie); 3241d5c65159SKalle Valo 3242d5c65159SKalle Valo spin_lock_bh(&rx_ring->idr_lock); 3243d5c65159SKalle Valo skb = idr_find(&rx_ring->bufs_idr, buf_id); 3244d5c65159SKalle Valo if (!skb) { 3245d5c65159SKalle Valo ath11k_warn(ab, "rxdma error with invalid buf_id %d\n", 3246d5c65159SKalle Valo buf_id); 3247d5c65159SKalle Valo spin_unlock_bh(&rx_ring->idr_lock); 3248d5c65159SKalle Valo continue; 3249d5c65159SKalle Valo } 3250d5c65159SKalle Valo 3251d5c65159SKalle Valo idr_remove(&rx_ring->bufs_idr, buf_id); 3252d5c65159SKalle Valo spin_unlock_bh(&rx_ring->idr_lock); 3253d5c65159SKalle Valo 3254d5c65159SKalle Valo rxcb = ATH11K_SKB_RXCB(skb); 3255d5c65159SKalle Valo dma_unmap_single(ab->dev, rxcb->paddr, 3256d5c65159SKalle Valo skb->len + skb_tailroom(skb), 3257d5c65159SKalle Valo DMA_FROM_DEVICE); 3258d5c65159SKalle Valo dev_kfree_skb_any(skb); 3259d5c65159SKalle Valo 3260d5c65159SKalle Valo num_buf_freed++; 3261d5c65159SKalle Valo } 3262d5c65159SKalle Valo 3263d5c65159SKalle Valo ath11k_dp_rx_link_desc_return(ab, desc, 3264d5c65159SKalle Valo HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3265d5c65159SKalle Valo } 3266d5c65159SKalle Valo 3267d5c65159SKalle Valo ath11k_hal_srng_access_end(ab, srng); 3268d5c65159SKalle Valo 3269d5c65159SKalle Valo spin_unlock_bh(&srng->lock); 3270d5c65159SKalle Valo 3271d5c65159SKalle Valo if (num_buf_freed) 3272d5c65159SKalle Valo ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed, 3273d5c65159SKalle Valo HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC); 3274d5c65159SKalle Valo 3275d5c65159SKalle Valo return budget - quota; 3276d5c65159SKalle Valo } 3277d5c65159SKalle Valo 3278d5c65159SKalle Valo void ath11k_dp_process_reo_status(struct ath11k_base *ab) 3279d5c65159SKalle Valo { 3280d5c65159SKalle Valo struct ath11k_dp *dp = &ab->dp; 3281d5c65159SKalle Valo struct hal_srng *srng; 3282d5c65159SKalle Valo struct dp_reo_cmd *cmd, *tmp; 3283d5c65159SKalle Valo bool found = false; 3284d5c65159SKalle Valo u32 *reo_desc; 3285d5c65159SKalle Valo u16 tag; 3286d5c65159SKalle Valo struct hal_reo_status reo_status; 3287d5c65159SKalle Valo 3288d5c65159SKalle Valo srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id]; 3289d5c65159SKalle Valo 3290d5c65159SKalle Valo memset(&reo_status, 0, sizeof(reo_status)); 3291d5c65159SKalle Valo 3292d5c65159SKalle Valo spin_lock_bh(&srng->lock); 3293d5c65159SKalle Valo 3294d5c65159SKalle Valo ath11k_hal_srng_access_begin(ab, srng); 3295d5c65159SKalle Valo 3296d5c65159SKalle Valo while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 3297d5c65159SKalle Valo tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc); 3298d5c65159SKalle Valo 3299d5c65159SKalle Valo switch (tag) { 3300d5c65159SKalle Valo case HAL_REO_GET_QUEUE_STATS_STATUS: 3301d5c65159SKalle Valo ath11k_hal_reo_status_queue_stats(ab, reo_desc, 3302d5c65159SKalle Valo &reo_status); 3303d5c65159SKalle Valo break; 3304d5c65159SKalle Valo case HAL_REO_FLUSH_QUEUE_STATUS: 3305d5c65159SKalle Valo ath11k_hal_reo_flush_queue_status(ab, reo_desc, 3306d5c65159SKalle Valo &reo_status); 3307d5c65159SKalle Valo break; 3308d5c65159SKalle Valo case HAL_REO_FLUSH_CACHE_STATUS: 3309d5c65159SKalle Valo ath11k_hal_reo_flush_cache_status(ab, reo_desc, 3310d5c65159SKalle Valo &reo_status); 3311d5c65159SKalle Valo break; 3312d5c65159SKalle Valo case HAL_REO_UNBLOCK_CACHE_STATUS: 3313d5c65159SKalle Valo ath11k_hal_reo_unblk_cache_status(ab, reo_desc, 3314d5c65159SKalle Valo &reo_status); 3315d5c65159SKalle Valo break; 3316d5c65159SKalle Valo case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS: 3317d5c65159SKalle Valo ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc, 3318d5c65159SKalle Valo &reo_status); 3319d5c65159SKalle Valo break; 3320d5c65159SKalle Valo case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS: 3321d5c65159SKalle Valo ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc, 3322d5c65159SKalle Valo &reo_status); 3323d5c65159SKalle Valo break; 3324d5c65159SKalle Valo case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS: 3325d5c65159SKalle Valo ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc, 3326d5c65159SKalle Valo &reo_status); 3327d5c65159SKalle Valo break; 3328d5c65159SKalle Valo default: 3329d5c65159SKalle Valo ath11k_warn(ab, "Unknown reo status type %d\n", tag); 3330d5c65159SKalle Valo continue; 3331d5c65159SKalle Valo } 3332d5c65159SKalle Valo 3333d5c65159SKalle Valo spin_lock_bh(&dp->reo_cmd_lock); 3334d5c65159SKalle Valo list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 3335d5c65159SKalle Valo if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) { 3336d5c65159SKalle Valo found = true; 3337d5c65159SKalle Valo list_del(&cmd->list); 3338d5c65159SKalle Valo break; 3339d5c65159SKalle Valo } 3340d5c65159SKalle Valo } 3341d5c65159SKalle Valo spin_unlock_bh(&dp->reo_cmd_lock); 3342d5c65159SKalle Valo 3343d5c65159SKalle Valo if (found) { 3344d5c65159SKalle Valo cmd->handler(dp, (void *)&cmd->data, 3345d5c65159SKalle Valo reo_status.uniform_hdr.cmd_status); 3346d5c65159SKalle Valo kfree(cmd); 3347d5c65159SKalle Valo } 3348d5c65159SKalle Valo 3349d5c65159SKalle Valo found = false; 3350d5c65159SKalle Valo } 3351d5c65159SKalle Valo 3352d5c65159SKalle Valo ath11k_hal_srng_access_end(ab, srng); 3353d5c65159SKalle Valo 3354d5c65159SKalle Valo spin_unlock_bh(&srng->lock); 3355d5c65159SKalle Valo } 3356d5c65159SKalle Valo 3357d5c65159SKalle Valo void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id) 3358d5c65159SKalle Valo { 3359d5c65159SKalle Valo struct ath11k *ar = ab->pdevs[mac_id].ar; 3360d5c65159SKalle Valo 3361d5c65159SKalle Valo ath11k_dp_rx_pdev_srng_free(ar); 3362d5c65159SKalle Valo ath11k_dp_rxdma_pdev_buf_free(ar); 3363d5c65159SKalle Valo } 3364d5c65159SKalle Valo 3365d5c65159SKalle Valo int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id) 3366d5c65159SKalle Valo { 3367d5c65159SKalle Valo struct ath11k *ar = ab->pdevs[mac_id].ar; 3368d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 3369d5c65159SKalle Valo u32 ring_id; 3370d5c65159SKalle Valo int ret; 3371d5c65159SKalle Valo 3372d5c65159SKalle Valo ret = ath11k_dp_rx_pdev_srng_alloc(ar); 3373d5c65159SKalle Valo if (ret) { 3374d5c65159SKalle Valo ath11k_warn(ab, "failed to setup rx srngs\n"); 3375d5c65159SKalle Valo return ret; 3376d5c65159SKalle Valo } 3377d5c65159SKalle Valo 3378d5c65159SKalle Valo ret = ath11k_dp_rxdma_pdev_buf_setup(ar); 3379d5c65159SKalle Valo if (ret) { 3380d5c65159SKalle Valo ath11k_warn(ab, "failed to setup rxdma ring\n"); 3381d5c65159SKalle Valo return ret; 3382d5c65159SKalle Valo } 3383d5c65159SKalle Valo 3384d5c65159SKalle Valo ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; 3385d5c65159SKalle Valo ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF); 3386d5c65159SKalle Valo if (ret) { 3387d5c65159SKalle Valo ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n", 3388d5c65159SKalle Valo ret); 3389d5c65159SKalle Valo return ret; 3390d5c65159SKalle Valo } 3391d5c65159SKalle Valo 3392d5c65159SKalle Valo ring_id = dp->rxdma_err_dst_ring.ring_id; 3393d5c65159SKalle Valo ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_DST); 3394d5c65159SKalle Valo if (ret) { 3395d5c65159SKalle Valo ath11k_warn(ab, "failed to configure rxdma_err_dest_ring %d\n", 3396d5c65159SKalle Valo ret); 3397d5c65159SKalle Valo return ret; 3398d5c65159SKalle Valo } 3399d5c65159SKalle Valo 3400d5c65159SKalle Valo ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id; 3401d5c65159SKalle Valo ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, 3402d5c65159SKalle Valo mac_id, HAL_RXDMA_MONITOR_BUF); 3403d5c65159SKalle Valo if (ret) { 3404d5c65159SKalle Valo ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n", 3405d5c65159SKalle Valo ret); 3406d5c65159SKalle Valo return ret; 3407d5c65159SKalle Valo } 3408d5c65159SKalle Valo ret = ath11k_dp_tx_htt_srng_setup(ab, 3409d5c65159SKalle Valo dp->rxdma_mon_dst_ring.ring_id, 3410d5c65159SKalle Valo mac_id, HAL_RXDMA_MONITOR_DST); 3411d5c65159SKalle Valo if (ret) { 3412d5c65159SKalle Valo ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n", 3413d5c65159SKalle Valo ret); 3414d5c65159SKalle Valo return ret; 3415d5c65159SKalle Valo } 3416d5c65159SKalle Valo ret = ath11k_dp_tx_htt_srng_setup(ab, 3417d5c65159SKalle Valo dp->rxdma_mon_desc_ring.ring_id, 3418d5c65159SKalle Valo mac_id, HAL_RXDMA_MONITOR_DESC); 3419d5c65159SKalle Valo if (ret) { 3420d5c65159SKalle Valo ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n", 3421d5c65159SKalle Valo ret); 3422d5c65159SKalle Valo return ret; 3423d5c65159SKalle Valo } 3424d5c65159SKalle Valo ring_id = dp->rx_mon_status_refill_ring.refill_buf_ring.ring_id; 3425d5c65159SKalle Valo ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, 3426d5c65159SKalle Valo HAL_RXDMA_MONITOR_STATUS); 3427d5c65159SKalle Valo if (ret) { 3428d5c65159SKalle Valo ath11k_warn(ab, 3429d5c65159SKalle Valo "failed to configure mon_status_refill_ring %d\n", 3430d5c65159SKalle Valo ret); 3431d5c65159SKalle Valo return ret; 3432d5c65159SKalle Valo } 3433d5c65159SKalle Valo return 0; 3434d5c65159SKalle Valo } 3435d5c65159SKalle Valo 3436d5c65159SKalle Valo static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len) 3437d5c65159SKalle Valo { 3438d5c65159SKalle Valo if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) { 3439d5c65159SKalle Valo *frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc); 3440d5c65159SKalle Valo *total_len -= *frag_len; 3441d5c65159SKalle Valo } else { 3442d5c65159SKalle Valo *frag_len = *total_len; 3443d5c65159SKalle Valo *total_len = 0; 3444d5c65159SKalle Valo } 3445d5c65159SKalle Valo } 3446d5c65159SKalle Valo 3447d5c65159SKalle Valo static 3448d5c65159SKalle Valo int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar, 3449d5c65159SKalle Valo void *p_last_buf_addr_info, 3450d5c65159SKalle Valo u8 mac_id) 3451d5c65159SKalle Valo { 3452d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 3453d5c65159SKalle Valo struct dp_srng *dp_srng; 3454d5c65159SKalle Valo void *hal_srng; 3455d5c65159SKalle Valo void *src_srng_desc; 3456d5c65159SKalle Valo int ret = 0; 3457d5c65159SKalle Valo 3458d5c65159SKalle Valo dp_srng = &dp->rxdma_mon_desc_ring; 3459d5c65159SKalle Valo hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id]; 3460d5c65159SKalle Valo 3461d5c65159SKalle Valo ath11k_hal_srng_access_begin(ar->ab, hal_srng); 3462d5c65159SKalle Valo 3463d5c65159SKalle Valo src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng); 3464d5c65159SKalle Valo 3465d5c65159SKalle Valo if (src_srng_desc) { 3466d5c65159SKalle Valo struct ath11k_buffer_addr *src_desc = 3467d5c65159SKalle Valo (struct ath11k_buffer_addr *)src_srng_desc; 3468d5c65159SKalle Valo 3469d5c65159SKalle Valo *src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info); 3470d5c65159SKalle Valo } else { 3471d5c65159SKalle Valo ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 3472d5c65159SKalle Valo "Monitor Link Desc Ring %d Full", mac_id); 3473d5c65159SKalle Valo ret = -ENOMEM; 3474d5c65159SKalle Valo } 3475d5c65159SKalle Valo 3476d5c65159SKalle Valo ath11k_hal_srng_access_end(ar->ab, hal_srng); 3477d5c65159SKalle Valo return ret; 3478d5c65159SKalle Valo } 3479d5c65159SKalle Valo 3480d5c65159SKalle Valo static 3481d5c65159SKalle Valo void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc, 3482d5c65159SKalle Valo dma_addr_t *paddr, u32 *sw_cookie, 3483d5c65159SKalle Valo void **pp_buf_addr_info) 3484d5c65159SKalle Valo { 3485d5c65159SKalle Valo struct hal_rx_msdu_link *msdu_link = 3486d5c65159SKalle Valo (struct hal_rx_msdu_link *)rx_msdu_link_desc; 3487d5c65159SKalle Valo struct ath11k_buffer_addr *buf_addr_info; 3488d5c65159SKalle Valo u8 rbm = 0; 3489d5c65159SKalle Valo 3490d5c65159SKalle Valo buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info; 3491d5c65159SKalle Valo 3492d5c65159SKalle Valo ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, &rbm); 3493d5c65159SKalle Valo 3494d5c65159SKalle Valo *pp_buf_addr_info = (void *)buf_addr_info; 3495d5c65159SKalle Valo } 3496d5c65159SKalle Valo 3497d5c65159SKalle Valo static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len) 3498d5c65159SKalle Valo { 3499d5c65159SKalle Valo if (skb->len > len) { 3500d5c65159SKalle Valo skb_trim(skb, len); 3501d5c65159SKalle Valo } else { 3502d5c65159SKalle Valo if (skb_tailroom(skb) < len - skb->len) { 3503d5c65159SKalle Valo if ((pskb_expand_head(skb, 0, 3504d5c65159SKalle Valo len - skb->len - skb_tailroom(skb), 3505d5c65159SKalle Valo GFP_ATOMIC))) { 3506d5c65159SKalle Valo dev_kfree_skb_any(skb); 3507d5c65159SKalle Valo return -ENOMEM; 3508d5c65159SKalle Valo } 3509d5c65159SKalle Valo } 3510d5c65159SKalle Valo skb_put(skb, (len - skb->len)); 3511d5c65159SKalle Valo } 3512d5c65159SKalle Valo return 0; 3513d5c65159SKalle Valo } 3514d5c65159SKalle Valo 3515d5c65159SKalle Valo static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar, 3516d5c65159SKalle Valo void *msdu_link_desc, 3517d5c65159SKalle Valo struct hal_rx_msdu_list *msdu_list, 3518d5c65159SKalle Valo u16 *num_msdus) 3519d5c65159SKalle Valo { 3520d5c65159SKalle Valo struct hal_rx_msdu_details *msdu_details = NULL; 3521d5c65159SKalle Valo struct rx_msdu_desc *msdu_desc_info = NULL; 3522d5c65159SKalle Valo struct hal_rx_msdu_link *msdu_link = NULL; 3523d5c65159SKalle Valo int i; 3524d5c65159SKalle Valo u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1); 3525d5c65159SKalle Valo u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1); 3526d5c65159SKalle Valo u8 tmp = 0; 3527d5c65159SKalle Valo 3528d5c65159SKalle Valo msdu_link = (struct hal_rx_msdu_link *)msdu_link_desc; 3529d5c65159SKalle Valo msdu_details = &msdu_link->msdu_link[0]; 3530d5c65159SKalle Valo 3531d5c65159SKalle Valo for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) { 3532d5c65159SKalle Valo if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR, 3533d5c65159SKalle Valo msdu_details[i].buf_addr_info.info0) == 0) { 3534d5c65159SKalle Valo msdu_desc_info = &msdu_details[i - 1].rx_msdu_info; 3535d5c65159SKalle Valo msdu_desc_info->info0 |= last; 3536d5c65159SKalle Valo ; 3537d5c65159SKalle Valo break; 3538d5c65159SKalle Valo } 3539d5c65159SKalle Valo msdu_desc_info = &msdu_details[i].rx_msdu_info; 3540d5c65159SKalle Valo 3541d5c65159SKalle Valo if (!i) 3542d5c65159SKalle Valo msdu_desc_info->info0 |= first; 3543d5c65159SKalle Valo else if (i == (HAL_RX_NUM_MSDU_DESC - 1)) 3544d5c65159SKalle Valo msdu_desc_info->info0 |= last; 3545d5c65159SKalle Valo msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0; 3546d5c65159SKalle Valo msdu_list->msdu_info[i].msdu_len = 3547d5c65159SKalle Valo HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0); 3548d5c65159SKalle Valo msdu_list->sw_cookie[i] = 3549d5c65159SKalle Valo FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, 3550d5c65159SKalle Valo msdu_details[i].buf_addr_info.info1); 3551d5c65159SKalle Valo tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR, 3552d5c65159SKalle Valo msdu_details[i].buf_addr_info.info1); 3553d5c65159SKalle Valo msdu_list->rbm[i] = tmp; 3554d5c65159SKalle Valo } 3555d5c65159SKalle Valo *num_msdus = i; 3556d5c65159SKalle Valo } 3557d5c65159SKalle Valo 3558d5c65159SKalle Valo static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id, 3559d5c65159SKalle Valo u32 *rx_bufs_used) 3560d5c65159SKalle Valo { 3561d5c65159SKalle Valo u32 ret = 0; 3562d5c65159SKalle Valo 3563d5c65159SKalle Valo if ((*ppdu_id < msdu_ppdu_id) && 3564d5c65159SKalle Valo ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) { 3565d5c65159SKalle Valo *ppdu_id = msdu_ppdu_id; 3566d5c65159SKalle Valo ret = msdu_ppdu_id; 3567d5c65159SKalle Valo } else if ((*ppdu_id > msdu_ppdu_id) && 3568d5c65159SKalle Valo ((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) { 3569d5c65159SKalle Valo /* mon_dst is behind than mon_status 3570d5c65159SKalle Valo * skip dst_ring and free it 3571d5c65159SKalle Valo */ 3572d5c65159SKalle Valo *rx_bufs_used += 1; 3573d5c65159SKalle Valo *ppdu_id = msdu_ppdu_id; 3574d5c65159SKalle Valo ret = msdu_ppdu_id; 3575d5c65159SKalle Valo } 3576d5c65159SKalle Valo return ret; 3577d5c65159SKalle Valo } 3578d5c65159SKalle Valo 3579d5c65159SKalle Valo static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info, 3580d5c65159SKalle Valo bool *is_frag, u32 *total_len, 3581d5c65159SKalle Valo u32 *frag_len, u32 *msdu_cnt) 3582d5c65159SKalle Valo { 3583d5c65159SKalle Valo if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) { 3584d5c65159SKalle Valo if (!*is_frag) { 3585d5c65159SKalle Valo *total_len = info->msdu_len; 3586d5c65159SKalle Valo *is_frag = true; 3587d5c65159SKalle Valo } 3588d5c65159SKalle Valo ath11k_dp_mon_set_frag_len(total_len, 3589d5c65159SKalle Valo frag_len); 3590d5c65159SKalle Valo } else { 3591d5c65159SKalle Valo if (*is_frag) { 3592d5c65159SKalle Valo ath11k_dp_mon_set_frag_len(total_len, 3593d5c65159SKalle Valo frag_len); 3594d5c65159SKalle Valo } else { 3595d5c65159SKalle Valo *frag_len = info->msdu_len; 3596d5c65159SKalle Valo } 3597d5c65159SKalle Valo *is_frag = false; 3598d5c65159SKalle Valo *msdu_cnt -= 1; 3599d5c65159SKalle Valo } 3600d5c65159SKalle Valo } 3601d5c65159SKalle Valo 3602d5c65159SKalle Valo static u32 3603d5c65159SKalle Valo ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, 3604d5c65159SKalle Valo void *ring_entry, struct sk_buff **head_msdu, 3605d5c65159SKalle Valo struct sk_buff **tail_msdu, u32 *npackets, 3606d5c65159SKalle Valo u32 *ppdu_id) 3607d5c65159SKalle Valo { 3608d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 3609d5c65159SKalle Valo struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 3610d5c65159SKalle Valo struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring; 3611d5c65159SKalle Valo struct sk_buff *msdu = NULL, *last = NULL; 3612d5c65159SKalle Valo struct hal_rx_msdu_list msdu_list; 3613d5c65159SKalle Valo void *p_buf_addr_info, *p_last_buf_addr_info; 3614d5c65159SKalle Valo struct hal_rx_desc *rx_desc; 3615d5c65159SKalle Valo void *rx_msdu_link_desc; 3616d5c65159SKalle Valo dma_addr_t paddr; 3617d5c65159SKalle Valo u16 num_msdus = 0; 3618d5c65159SKalle Valo u32 rx_buf_size, rx_pkt_offset, sw_cookie; 3619d5c65159SKalle Valo u32 rx_bufs_used = 0, i = 0; 3620d5c65159SKalle Valo u32 msdu_ppdu_id = 0, msdu_cnt = 0; 3621d5c65159SKalle Valo u32 total_len = 0, frag_len = 0; 3622d5c65159SKalle Valo bool is_frag, is_first_msdu; 3623d5c65159SKalle Valo bool drop_mpdu = false; 3624d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb; 3625d5c65159SKalle Valo struct hal_reo_entrance_ring *ent_desc = 3626d5c65159SKalle Valo (struct hal_reo_entrance_ring *)ring_entry; 3627d5c65159SKalle Valo int buf_id; 3628d5c65159SKalle Valo 3629d5c65159SKalle Valo ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr, 3630d5c65159SKalle Valo &sw_cookie, &p_last_buf_addr_info, 3631d5c65159SKalle Valo &msdu_cnt); 3632d5c65159SKalle Valo 3633d5c65159SKalle Valo if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON, 3634d5c65159SKalle Valo ent_desc->info1) == 3635d5c65159SKalle Valo HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { 3636d5c65159SKalle Valo u8 rxdma_err = 3637d5c65159SKalle Valo FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE, 3638d5c65159SKalle Valo ent_desc->info1); 3639d5c65159SKalle Valo if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR || 3640d5c65159SKalle Valo rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR || 3641d5c65159SKalle Valo rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) { 3642d5c65159SKalle Valo drop_mpdu = true; 3643d5c65159SKalle Valo pmon->rx_mon_stats.dest_mpdu_drop++; 3644d5c65159SKalle Valo } 3645d5c65159SKalle Valo } 3646d5c65159SKalle Valo 3647d5c65159SKalle Valo is_frag = false; 3648d5c65159SKalle Valo is_first_msdu = true; 3649d5c65159SKalle Valo 3650d5c65159SKalle Valo do { 3651d5c65159SKalle Valo if (pmon->mon_last_linkdesc_paddr == paddr) { 3652d5c65159SKalle Valo pmon->rx_mon_stats.dup_mon_linkdesc_cnt++; 3653d5c65159SKalle Valo return rx_bufs_used; 3654d5c65159SKalle Valo } 3655d5c65159SKalle Valo 3656d5c65159SKalle Valo rx_msdu_link_desc = 3657d5c65159SKalle Valo (void *)pmon->link_desc_banks[sw_cookie].vaddr + 3658d5c65159SKalle Valo (paddr - pmon->link_desc_banks[sw_cookie].paddr); 3659d5c65159SKalle Valo 3660d5c65159SKalle Valo ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list, 3661d5c65159SKalle Valo &num_msdus); 3662d5c65159SKalle Valo 3663d5c65159SKalle Valo for (i = 0; i < num_msdus; i++) { 3664d5c65159SKalle Valo u32 l2_hdr_offset; 3665d5c65159SKalle Valo 3666d5c65159SKalle Valo if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) { 3667d5c65159SKalle Valo ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 3668d5c65159SKalle Valo "i %d last_cookie %d is same\n", 3669d5c65159SKalle Valo i, pmon->mon_last_buf_cookie); 3670d5c65159SKalle Valo drop_mpdu = true; 3671d5c65159SKalle Valo pmon->rx_mon_stats.dup_mon_buf_cnt++; 3672d5c65159SKalle Valo continue; 3673d5c65159SKalle Valo } 3674d5c65159SKalle Valo buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 3675d5c65159SKalle Valo msdu_list.sw_cookie[i]); 3676d5c65159SKalle Valo 3677d5c65159SKalle Valo spin_lock_bh(&rx_ring->idr_lock); 3678d5c65159SKalle Valo msdu = idr_find(&rx_ring->bufs_idr, buf_id); 3679d5c65159SKalle Valo spin_unlock_bh(&rx_ring->idr_lock); 3680d5c65159SKalle Valo if (!msdu) { 3681d5c65159SKalle Valo ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 3682d5c65159SKalle Valo "msdu_pop: invalid buf_id %d\n", buf_id); 3683d5c65159SKalle Valo break; 3684d5c65159SKalle Valo } 3685d5c65159SKalle Valo rxcb = ATH11K_SKB_RXCB(msdu); 3686d5c65159SKalle Valo if (!rxcb->unmapped) { 3687d5c65159SKalle Valo dma_unmap_single(ar->ab->dev, rxcb->paddr, 3688d5c65159SKalle Valo msdu->len + 3689d5c65159SKalle Valo skb_tailroom(msdu), 3690d5c65159SKalle Valo DMA_FROM_DEVICE); 3691d5c65159SKalle Valo rxcb->unmapped = 1; 3692d5c65159SKalle Valo } 3693d5c65159SKalle Valo if (drop_mpdu) { 3694d5c65159SKalle Valo ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 3695d5c65159SKalle Valo "i %d drop msdu %p *ppdu_id %x\n", 3696d5c65159SKalle Valo i, msdu, *ppdu_id); 3697d5c65159SKalle Valo dev_kfree_skb_any(msdu); 3698d5c65159SKalle Valo msdu = NULL; 3699d5c65159SKalle Valo goto next_msdu; 3700d5c65159SKalle Valo } 3701d5c65159SKalle Valo 3702d5c65159SKalle Valo rx_desc = (struct hal_rx_desc *)msdu->data; 3703d5c65159SKalle Valo 3704d5c65159SKalle Valo rx_pkt_offset = sizeof(struct hal_rx_desc); 3705d5c65159SKalle Valo l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(rx_desc); 3706d5c65159SKalle Valo 3707d5c65159SKalle Valo if (is_first_msdu) { 3708d5c65159SKalle Valo if (!ath11k_dp_rxdesc_mpdu_valid(rx_desc)) { 3709d5c65159SKalle Valo drop_mpdu = true; 3710d5c65159SKalle Valo dev_kfree_skb_any(msdu); 3711d5c65159SKalle Valo msdu = NULL; 3712d5c65159SKalle Valo pmon->mon_last_linkdesc_paddr = paddr; 3713d5c65159SKalle Valo goto next_msdu; 3714d5c65159SKalle Valo } 3715d5c65159SKalle Valo 3716d5c65159SKalle Valo msdu_ppdu_id = 3717d5c65159SKalle Valo ath11k_dp_rxdesc_get_ppduid(rx_desc); 3718d5c65159SKalle Valo 3719d5c65159SKalle Valo if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id, 3720d5c65159SKalle Valo ppdu_id, 3721d5c65159SKalle Valo &rx_bufs_used)) 3722d5c65159SKalle Valo return rx_bufs_used; 3723d5c65159SKalle Valo pmon->mon_last_linkdesc_paddr = paddr; 3724d5c65159SKalle Valo is_first_msdu = false; 3725d5c65159SKalle Valo } 3726d5c65159SKalle Valo ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i], 3727d5c65159SKalle Valo &is_frag, &total_len, 3728d5c65159SKalle Valo &frag_len, &msdu_cnt); 3729d5c65159SKalle Valo rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len; 3730d5c65159SKalle Valo 3731d5c65159SKalle Valo ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size); 3732d5c65159SKalle Valo 3733d5c65159SKalle Valo if (!(*head_msdu)) 3734d5c65159SKalle Valo *head_msdu = msdu; 3735d5c65159SKalle Valo else if (last) 3736d5c65159SKalle Valo last->next = msdu; 3737d5c65159SKalle Valo 3738d5c65159SKalle Valo last = msdu; 3739d5c65159SKalle Valo next_msdu: 3740d5c65159SKalle Valo pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i]; 3741d5c65159SKalle Valo rx_bufs_used++; 3742d5c65159SKalle Valo spin_lock_bh(&rx_ring->idr_lock); 3743d5c65159SKalle Valo idr_remove(&rx_ring->bufs_idr, buf_id); 3744d5c65159SKalle Valo spin_unlock_bh(&rx_ring->idr_lock); 3745d5c65159SKalle Valo } 3746d5c65159SKalle Valo 3747d5c65159SKalle Valo ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr, 3748d5c65159SKalle Valo &sw_cookie, 3749d5c65159SKalle Valo &p_buf_addr_info); 3750d5c65159SKalle Valo 3751d5c65159SKalle Valo if (ath11k_dp_rx_monitor_link_desc_return(ar, 3752d5c65159SKalle Valo p_last_buf_addr_info, 3753d5c65159SKalle Valo dp->mac_id)) 3754d5c65159SKalle Valo ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 3755d5c65159SKalle Valo "dp_rx_monitor_link_desc_return failed"); 3756d5c65159SKalle Valo 3757d5c65159SKalle Valo p_last_buf_addr_info = p_buf_addr_info; 3758d5c65159SKalle Valo 3759d5c65159SKalle Valo } while (paddr && msdu_cnt); 3760d5c65159SKalle Valo 3761d5c65159SKalle Valo if (last) 3762d5c65159SKalle Valo last->next = NULL; 3763d5c65159SKalle Valo 3764d5c65159SKalle Valo *tail_msdu = msdu; 3765d5c65159SKalle Valo 3766d5c65159SKalle Valo if (msdu_cnt == 0) 3767d5c65159SKalle Valo *npackets = 1; 3768d5c65159SKalle Valo 3769d5c65159SKalle Valo return rx_bufs_used; 3770d5c65159SKalle Valo } 3771d5c65159SKalle Valo 3772d5c65159SKalle Valo static void ath11k_dp_rx_msdus_set_payload(struct sk_buff *msdu) 3773d5c65159SKalle Valo { 3774d5c65159SKalle Valo u32 rx_pkt_offset, l2_hdr_offset; 3775d5c65159SKalle Valo 3776d5c65159SKalle Valo rx_pkt_offset = sizeof(struct hal_rx_desc); 3777d5c65159SKalle Valo l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad((struct hal_rx_desc *)msdu->data); 3778d5c65159SKalle Valo skb_pull(msdu, rx_pkt_offset + l2_hdr_offset); 3779d5c65159SKalle Valo } 3780d5c65159SKalle Valo 3781d5c65159SKalle Valo static struct sk_buff * 3782d5c65159SKalle Valo ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, 3783d5c65159SKalle Valo u32 mac_id, struct sk_buff *head_msdu, 3784d5c65159SKalle Valo struct sk_buff *last_msdu, 3785d5c65159SKalle Valo struct ieee80211_rx_status *rxs) 3786d5c65159SKalle Valo { 3787d5c65159SKalle Valo struct sk_buff *msdu, *mpdu_buf, *prev_buf; 3788d5c65159SKalle Valo u32 decap_format, wifi_hdr_len; 3789d5c65159SKalle Valo struct hal_rx_desc *rx_desc; 3790d5c65159SKalle Valo char *hdr_desc; 3791d5c65159SKalle Valo u8 *dest; 3792d5c65159SKalle Valo struct ieee80211_hdr_3addr *wh; 3793d5c65159SKalle Valo 3794d5c65159SKalle Valo mpdu_buf = NULL; 3795d5c65159SKalle Valo 3796d5c65159SKalle Valo if (!head_msdu) 3797d5c65159SKalle Valo goto err_merge_fail; 3798d5c65159SKalle Valo 3799d5c65159SKalle Valo rx_desc = (struct hal_rx_desc *)head_msdu->data; 3800d5c65159SKalle Valo 3801d5c65159SKalle Valo if (ath11k_dp_rxdesc_get_mpdulen_err(rx_desc)) 3802d5c65159SKalle Valo return NULL; 3803d5c65159SKalle Valo 3804d5c65159SKalle Valo decap_format = ath11k_dp_rxdesc_get_decap_format(rx_desc); 3805d5c65159SKalle Valo 3806d5c65159SKalle Valo ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs); 3807d5c65159SKalle Valo 3808d5c65159SKalle Valo if (decap_format == DP_RX_DECAP_TYPE_RAW) { 3809d5c65159SKalle Valo ath11k_dp_rx_msdus_set_payload(head_msdu); 3810d5c65159SKalle Valo 3811d5c65159SKalle Valo prev_buf = head_msdu; 3812d5c65159SKalle Valo msdu = head_msdu->next; 3813d5c65159SKalle Valo 3814d5c65159SKalle Valo while (msdu) { 3815d5c65159SKalle Valo ath11k_dp_rx_msdus_set_payload(msdu); 3816d5c65159SKalle Valo 3817d5c65159SKalle Valo prev_buf = msdu; 3818d5c65159SKalle Valo msdu = msdu->next; 3819d5c65159SKalle Valo } 3820d5c65159SKalle Valo 3821d5c65159SKalle Valo prev_buf->next = NULL; 3822d5c65159SKalle Valo 3823d5c65159SKalle Valo skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN); 3824d5c65159SKalle Valo } else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) { 3825d5c65159SKalle Valo __le16 qos_field; 3826d5c65159SKalle Valo u8 qos_pkt = 0; 3827d5c65159SKalle Valo 3828d5c65159SKalle Valo rx_desc = (struct hal_rx_desc *)head_msdu->data; 3829d5c65159SKalle Valo hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc); 3830d5c65159SKalle Valo 3831d5c65159SKalle Valo /* Base size */ 3832d5c65159SKalle Valo wifi_hdr_len = sizeof(struct ieee80211_hdr_3addr); 3833d5c65159SKalle Valo wh = (struct ieee80211_hdr_3addr *)hdr_desc; 3834d5c65159SKalle Valo 3835d5c65159SKalle Valo if (ieee80211_is_data_qos(wh->frame_control)) { 3836d5c65159SKalle Valo struct ieee80211_qos_hdr *qwh = 3837d5c65159SKalle Valo (struct ieee80211_qos_hdr *)hdr_desc; 3838d5c65159SKalle Valo 3839d5c65159SKalle Valo qos_field = qwh->qos_ctrl; 3840d5c65159SKalle Valo qos_pkt = 1; 3841d5c65159SKalle Valo } 3842d5c65159SKalle Valo msdu = head_msdu; 3843d5c65159SKalle Valo 3844d5c65159SKalle Valo while (msdu) { 3845d5c65159SKalle Valo rx_desc = (struct hal_rx_desc *)msdu->data; 3846d5c65159SKalle Valo hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc); 3847d5c65159SKalle Valo 3848d5c65159SKalle Valo if (qos_pkt) { 3849d5c65159SKalle Valo dest = skb_push(msdu, sizeof(__le16)); 3850d5c65159SKalle Valo if (!dest) 3851d5c65159SKalle Valo goto err_merge_fail; 3852d5c65159SKalle Valo memcpy(dest, hdr_desc, wifi_hdr_len); 3853d5c65159SKalle Valo memcpy(dest + wifi_hdr_len, 3854d5c65159SKalle Valo (u8 *)&qos_field, sizeof(__le16)); 3855d5c65159SKalle Valo } 3856d5c65159SKalle Valo ath11k_dp_rx_msdus_set_payload(msdu); 3857d5c65159SKalle Valo prev_buf = msdu; 3858d5c65159SKalle Valo msdu = msdu->next; 3859d5c65159SKalle Valo } 3860d5c65159SKalle Valo dest = skb_put(prev_buf, HAL_RX_FCS_LEN); 3861d5c65159SKalle Valo if (!dest) 3862d5c65159SKalle Valo goto err_merge_fail; 3863d5c65159SKalle Valo 3864d5c65159SKalle Valo ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 3865d5c65159SKalle Valo "mpdu_buf %pK mpdu_buf->len %u", 3866d5c65159SKalle Valo prev_buf, prev_buf->len); 3867d5c65159SKalle Valo } else { 3868d5c65159SKalle Valo ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 3869d5c65159SKalle Valo "decap format %d is not supported!\n", 3870d5c65159SKalle Valo decap_format); 3871d5c65159SKalle Valo goto err_merge_fail; 3872d5c65159SKalle Valo } 3873d5c65159SKalle Valo 3874d5c65159SKalle Valo return head_msdu; 3875d5c65159SKalle Valo 3876d5c65159SKalle Valo err_merge_fail: 3877d5c65159SKalle Valo if (mpdu_buf && decap_format != DP_RX_DECAP_TYPE_RAW) { 3878d5c65159SKalle Valo ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 3879d5c65159SKalle Valo "err_merge_fail mpdu_buf %pK", mpdu_buf); 3880d5c65159SKalle Valo /* Free the head buffer */ 3881d5c65159SKalle Valo dev_kfree_skb_any(mpdu_buf); 3882d5c65159SKalle Valo } 3883d5c65159SKalle Valo return NULL; 3884d5c65159SKalle Valo } 3885d5c65159SKalle Valo 3886d5c65159SKalle Valo static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id, 3887d5c65159SKalle Valo struct sk_buff *head_msdu, 3888d5c65159SKalle Valo struct sk_buff *tail_msdu, 3889d5c65159SKalle Valo struct napi_struct *napi) 3890d5c65159SKalle Valo { 3891d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 3892d5c65159SKalle Valo struct sk_buff *mon_skb, *skb_next, *header; 3893d5c65159SKalle Valo struct ieee80211_rx_status *rxs = &dp->rx_status, *status; 3894d5c65159SKalle Valo 3895d5c65159SKalle Valo mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu, 3896d5c65159SKalle Valo tail_msdu, rxs); 3897d5c65159SKalle Valo 3898d5c65159SKalle Valo if (!mon_skb) 3899d5c65159SKalle Valo goto mon_deliver_fail; 3900d5c65159SKalle Valo 3901d5c65159SKalle Valo header = mon_skb; 3902d5c65159SKalle Valo 3903d5c65159SKalle Valo rxs->flag = 0; 3904d5c65159SKalle Valo do { 3905d5c65159SKalle Valo skb_next = mon_skb->next; 3906d5c65159SKalle Valo if (!skb_next) 3907d5c65159SKalle Valo rxs->flag &= ~RX_FLAG_AMSDU_MORE; 3908d5c65159SKalle Valo else 3909d5c65159SKalle Valo rxs->flag |= RX_FLAG_AMSDU_MORE; 3910d5c65159SKalle Valo 3911d5c65159SKalle Valo if (mon_skb == header) { 3912d5c65159SKalle Valo header = NULL; 3913d5c65159SKalle Valo rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN; 3914d5c65159SKalle Valo } else { 3915d5c65159SKalle Valo rxs->flag |= RX_FLAG_ALLOW_SAME_PN; 3916d5c65159SKalle Valo } 3917d5c65159SKalle Valo rxs->flag |= RX_FLAG_ONLY_MONITOR; 3918d5c65159SKalle Valo 3919d5c65159SKalle Valo status = IEEE80211_SKB_RXCB(mon_skb); 3920d5c65159SKalle Valo *status = *rxs; 3921d5c65159SKalle Valo 3922d5c65159SKalle Valo ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb); 3923d5c65159SKalle Valo mon_skb = skb_next; 3924d5c65159SKalle Valo } while (mon_skb && (mon_skb != tail_msdu)); 3925d5c65159SKalle Valo rxs->flag = 0; 3926d5c65159SKalle Valo 3927d5c65159SKalle Valo return 0; 3928d5c65159SKalle Valo 3929d5c65159SKalle Valo mon_deliver_fail: 3930d5c65159SKalle Valo mon_skb = head_msdu; 3931d5c65159SKalle Valo while (mon_skb) { 3932d5c65159SKalle Valo skb_next = mon_skb->next; 3933d5c65159SKalle Valo dev_kfree_skb_any(mon_skb); 3934d5c65159SKalle Valo mon_skb = skb_next; 3935d5c65159SKalle Valo } 3936d5c65159SKalle Valo return -EINVAL; 3937d5c65159SKalle Valo } 3938d5c65159SKalle Valo 3939d5c65159SKalle Valo static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, u32 quota, 3940d5c65159SKalle Valo struct napi_struct *napi) 3941d5c65159SKalle Valo { 3942d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 3943d5c65159SKalle Valo struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 3944d5c65159SKalle Valo void *ring_entry; 3945d5c65159SKalle Valo void *mon_dst_srng; 3946d5c65159SKalle Valo u32 ppdu_id; 3947d5c65159SKalle Valo u32 rx_bufs_used; 3948d5c65159SKalle Valo struct ath11k_pdev_mon_stats *rx_mon_stats; 3949d5c65159SKalle Valo u32 npackets = 0; 3950d5c65159SKalle Valo 3951d5c65159SKalle Valo mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id]; 3952d5c65159SKalle Valo 3953d5c65159SKalle Valo if (!mon_dst_srng) { 3954d5c65159SKalle Valo ath11k_warn(ar->ab, 3955d5c65159SKalle Valo "HAL Monitor Destination Ring Init Failed -- %pK", 3956d5c65159SKalle Valo mon_dst_srng); 3957d5c65159SKalle Valo return; 3958d5c65159SKalle Valo } 3959d5c65159SKalle Valo 3960d5c65159SKalle Valo spin_lock_bh(&pmon->mon_lock); 3961d5c65159SKalle Valo 3962d5c65159SKalle Valo ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng); 3963d5c65159SKalle Valo 3964d5c65159SKalle Valo ppdu_id = pmon->mon_ppdu_info.ppdu_id; 3965d5c65159SKalle Valo rx_bufs_used = 0; 3966d5c65159SKalle Valo rx_mon_stats = &pmon->rx_mon_stats; 3967d5c65159SKalle Valo 3968d5c65159SKalle Valo while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) { 3969d5c65159SKalle Valo struct sk_buff *head_msdu, *tail_msdu; 3970d5c65159SKalle Valo 3971d5c65159SKalle Valo head_msdu = NULL; 3972d5c65159SKalle Valo tail_msdu = NULL; 3973d5c65159SKalle Valo 3974d5c65159SKalle Valo rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, ring_entry, 3975d5c65159SKalle Valo &head_msdu, 3976d5c65159SKalle Valo &tail_msdu, 3977d5c65159SKalle Valo &npackets, &ppdu_id); 3978d5c65159SKalle Valo 3979d5c65159SKalle Valo if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) { 3980d5c65159SKalle Valo pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 3981d5c65159SKalle Valo ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 3982d5c65159SKalle Valo "dest_rx: new ppdu_id %x != status ppdu_id %x", 3983d5c65159SKalle Valo ppdu_id, pmon->mon_ppdu_info.ppdu_id); 3984d5c65159SKalle Valo break; 3985d5c65159SKalle Valo } 3986d5c65159SKalle Valo if (head_msdu && tail_msdu) { 3987d5c65159SKalle Valo ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu, 3988d5c65159SKalle Valo tail_msdu, napi); 3989d5c65159SKalle Valo rx_mon_stats->dest_mpdu_done++; 3990d5c65159SKalle Valo } 3991d5c65159SKalle Valo 3992d5c65159SKalle Valo ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab, 3993d5c65159SKalle Valo mon_dst_srng); 3994d5c65159SKalle Valo } 3995d5c65159SKalle Valo ath11k_hal_srng_access_end(ar->ab, mon_dst_srng); 3996d5c65159SKalle Valo 3997d5c65159SKalle Valo spin_unlock_bh(&pmon->mon_lock); 3998d5c65159SKalle Valo 3999d5c65159SKalle Valo if (rx_bufs_used) { 4000d5c65159SKalle Valo rx_mon_stats->dest_ppdu_done++; 4001d5c65159SKalle Valo ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, 4002d5c65159SKalle Valo &dp->rxdma_mon_buf_ring, 4003d5c65159SKalle Valo rx_bufs_used, 4004d5c65159SKalle Valo HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC); 4005d5c65159SKalle Valo } 4006d5c65159SKalle Valo } 4007d5c65159SKalle Valo 4008d5c65159SKalle Valo static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar, 4009d5c65159SKalle Valo u32 quota, 4010d5c65159SKalle Valo struct napi_struct *napi) 4011d5c65159SKalle Valo { 4012d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 4013d5c65159SKalle Valo struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4014d5c65159SKalle Valo struct hal_rx_mon_ppdu_info *ppdu_info; 4015d5c65159SKalle Valo struct sk_buff *status_skb; 4016d5c65159SKalle Valo u32 tlv_status = HAL_TLV_STATUS_BUF_DONE; 4017d5c65159SKalle Valo struct ath11k_pdev_mon_stats *rx_mon_stats; 4018d5c65159SKalle Valo 4019d5c65159SKalle Valo ppdu_info = &pmon->mon_ppdu_info; 4020d5c65159SKalle Valo rx_mon_stats = &pmon->rx_mon_stats; 4021d5c65159SKalle Valo 4022d5c65159SKalle Valo if (pmon->mon_ppdu_status != DP_PPDU_STATUS_START) 4023d5c65159SKalle Valo return; 4024d5c65159SKalle Valo 4025d5c65159SKalle Valo while (!skb_queue_empty(&pmon->rx_status_q)) { 4026d5c65159SKalle Valo status_skb = skb_dequeue(&pmon->rx_status_q); 4027d5c65159SKalle Valo 4028d5c65159SKalle Valo tlv_status = ath11k_hal_rx_parse_mon_status(ar->ab, ppdu_info, 4029d5c65159SKalle Valo status_skb); 4030d5c65159SKalle Valo if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) { 4031d5c65159SKalle Valo rx_mon_stats->status_ppdu_done++; 4032d5c65159SKalle Valo pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE; 4033d5c65159SKalle Valo ath11k_dp_rx_mon_dest_process(ar, quota, napi); 4034d5c65159SKalle Valo pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 4035d5c65159SKalle Valo } 4036d5c65159SKalle Valo dev_kfree_skb_any(status_skb); 4037d5c65159SKalle Valo } 4038d5c65159SKalle Valo } 4039d5c65159SKalle Valo 4040d5c65159SKalle Valo static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id, 4041d5c65159SKalle Valo struct napi_struct *napi, int budget) 4042d5c65159SKalle Valo { 4043d5c65159SKalle Valo struct ath11k *ar = ab->pdevs[mac_id].ar; 4044d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 4045d5c65159SKalle Valo struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4046d5c65159SKalle Valo int num_buffs_reaped = 0; 4047d5c65159SKalle Valo 4048d5c65159SKalle Valo num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ar->ab, dp->mac_id, &budget, 4049d5c65159SKalle Valo &pmon->rx_status_q); 4050d5c65159SKalle Valo if (num_buffs_reaped) 4051d5c65159SKalle Valo ath11k_dp_rx_mon_status_process_tlv(ar, budget, napi); 4052d5c65159SKalle Valo 4053d5c65159SKalle Valo return num_buffs_reaped; 4054d5c65159SKalle Valo } 4055d5c65159SKalle Valo 4056d5c65159SKalle Valo int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id, 4057d5c65159SKalle Valo struct napi_struct *napi, int budget) 4058d5c65159SKalle Valo { 4059d5c65159SKalle Valo struct ath11k *ar = ab->pdevs[mac_id].ar; 4060d5c65159SKalle Valo int ret = 0; 4061d5c65159SKalle Valo 4062d5c65159SKalle Valo if (test_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags)) 4063d5c65159SKalle Valo ret = ath11k_dp_mon_process_rx(ab, mac_id, napi, budget); 4064d5c65159SKalle Valo else 4065d5c65159SKalle Valo ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget); 4066d5c65159SKalle Valo return ret; 4067d5c65159SKalle Valo } 4068d5c65159SKalle Valo 4069d5c65159SKalle Valo static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar) 4070d5c65159SKalle Valo { 4071d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 4072d5c65159SKalle Valo struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4073d5c65159SKalle Valo 4074d5c65159SKalle Valo skb_queue_head_init(&pmon->rx_status_q); 4075d5c65159SKalle Valo 4076d5c65159SKalle Valo pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 4077d5c65159SKalle Valo 4078d5c65159SKalle Valo memset(&pmon->rx_mon_stats, 0, 4079d5c65159SKalle Valo sizeof(pmon->rx_mon_stats)); 4080d5c65159SKalle Valo return 0; 4081d5c65159SKalle Valo } 4082d5c65159SKalle Valo 4083d5c65159SKalle Valo int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar) 4084d5c65159SKalle Valo { 4085d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 4086d5c65159SKalle Valo struct ath11k_mon_data *pmon = &dp->mon_data; 4087d5c65159SKalle Valo struct hal_srng *mon_desc_srng = NULL; 4088d5c65159SKalle Valo struct dp_srng *dp_srng; 4089d5c65159SKalle Valo int ret = 0; 4090d5c65159SKalle Valo u32 n_link_desc = 0; 4091d5c65159SKalle Valo 4092d5c65159SKalle Valo ret = ath11k_dp_rx_pdev_mon_status_attach(ar); 4093d5c65159SKalle Valo if (ret) { 4094d5c65159SKalle Valo ath11k_warn(ar->ab, "pdev_mon_status_attach() failed"); 4095d5c65159SKalle Valo return ret; 4096d5c65159SKalle Valo } 4097d5c65159SKalle Valo 4098d5c65159SKalle Valo dp_srng = &dp->rxdma_mon_desc_ring; 4099d5c65159SKalle Valo n_link_desc = dp_srng->size / 4100d5c65159SKalle Valo ath11k_hal_srng_get_entrysize(HAL_RXDMA_MONITOR_DESC); 4101d5c65159SKalle Valo mon_desc_srng = 4102d5c65159SKalle Valo &ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id]; 4103d5c65159SKalle Valo 4104d5c65159SKalle Valo ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks, 4105d5c65159SKalle Valo HAL_RXDMA_MONITOR_DESC, mon_desc_srng, 4106d5c65159SKalle Valo n_link_desc); 4107d5c65159SKalle Valo if (ret) { 4108d5c65159SKalle Valo ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed"); 4109d5c65159SKalle Valo return ret; 4110d5c65159SKalle Valo } 4111d5c65159SKalle Valo pmon->mon_last_linkdesc_paddr = 0; 4112d5c65159SKalle Valo pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1; 4113d5c65159SKalle Valo spin_lock_init(&pmon->mon_lock); 4114d5c65159SKalle Valo return 0; 4115d5c65159SKalle Valo } 4116d5c65159SKalle Valo 4117d5c65159SKalle Valo static int ath11k_dp_mon_link_free(struct ath11k *ar) 4118d5c65159SKalle Valo { 4119d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 4120d5c65159SKalle Valo struct ath11k_mon_data *pmon = &dp->mon_data; 4121d5c65159SKalle Valo 4122d5c65159SKalle Valo ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks, 4123d5c65159SKalle Valo HAL_RXDMA_MONITOR_DESC, 4124d5c65159SKalle Valo &dp->rxdma_mon_desc_ring); 4125d5c65159SKalle Valo return 0; 4126d5c65159SKalle Valo } 4127d5c65159SKalle Valo 4128d5c65159SKalle Valo int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar) 4129d5c65159SKalle Valo { 4130d5c65159SKalle Valo ath11k_dp_mon_link_free(ar); 4131d5c65159SKalle Valo return 0; 4132d5c65159SKalle Valo } 4133