1d5c65159SKalle Valo // SPDX-License-Identifier: BSD-3-Clause-Clear 2d5c65159SKalle Valo /* 3d5c65159SKalle Valo * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4d5c65159SKalle Valo */ 5d5c65159SKalle Valo 6d5c65159SKalle Valo #include <linux/ieee80211.h> 7243874c6SManikanta Pubbisetty #include <crypto/hash.h> 8d5c65159SKalle Valo #include "core.h" 9d5c65159SKalle Valo #include "debug.h" 10d5c65159SKalle Valo #include "hal_desc.h" 11d5c65159SKalle Valo #include "hw.h" 12d5c65159SKalle Valo #include "dp_rx.h" 13d5c65159SKalle Valo #include "hal_rx.h" 14d5c65159SKalle Valo #include "dp_tx.h" 15d5c65159SKalle Valo #include "peer.h" 16d5c65159SKalle Valo 17243874c6SManikanta Pubbisetty #define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ) 18243874c6SManikanta Pubbisetty 19d5c65159SKalle Valo static u8 *ath11k_dp_rx_h_80211_hdr(struct hal_rx_desc *desc) 20d5c65159SKalle Valo { 21d5c65159SKalle Valo return desc->hdr_status; 22d5c65159SKalle Valo } 23d5c65159SKalle Valo 24d5c65159SKalle Valo static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct hal_rx_desc *desc) 25d5c65159SKalle Valo { 26d5c65159SKalle Valo if (!(__le32_to_cpu(desc->mpdu_start.info1) & 27d5c65159SKalle Valo RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID)) 28d5c65159SKalle Valo return HAL_ENCRYPT_TYPE_OPEN; 29d5c65159SKalle Valo 30d5c65159SKalle Valo return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE, 31d5c65159SKalle Valo __le32_to_cpu(desc->mpdu_start.info2)); 32d5c65159SKalle Valo } 33d5c65159SKalle Valo 34243874c6SManikanta Pubbisetty static u8 ath11k_dp_rx_h_msdu_start_decap_type(struct hal_rx_desc *desc) 35d5c65159SKalle Valo { 36243874c6SManikanta Pubbisetty return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT, 37243874c6SManikanta Pubbisetty __le32_to_cpu(desc->msdu_start.info2)); 38243874c6SManikanta Pubbisetty } 39243874c6SManikanta Pubbisetty 40243874c6SManikanta Pubbisetty static bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct hal_rx_desc *desc) 41243874c6SManikanta Pubbisetty { 42243874c6SManikanta Pubbisetty return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID, 43243874c6SManikanta Pubbisetty __le32_to_cpu(desc->mpdu_start.info1)); 44243874c6SManikanta Pubbisetty } 45243874c6SManikanta Pubbisetty 46243874c6SManikanta Pubbisetty static bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct hal_rx_desc *desc) 47243874c6SManikanta Pubbisetty { 48243874c6SManikanta Pubbisetty return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_FCTRL_VALID, 49243874c6SManikanta Pubbisetty __le32_to_cpu(desc->mpdu_start.info1)); 50243874c6SManikanta Pubbisetty } 51243874c6SManikanta Pubbisetty 52243874c6SManikanta Pubbisetty static bool ath11k_dp_rx_h_mpdu_start_more_frags(struct sk_buff *skb) 53243874c6SManikanta Pubbisetty { 54243874c6SManikanta Pubbisetty struct ieee80211_hdr *hdr; 55243874c6SManikanta Pubbisetty 56243874c6SManikanta Pubbisetty hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE); 57243874c6SManikanta Pubbisetty return ieee80211_has_morefrags(hdr->frame_control); 58243874c6SManikanta Pubbisetty } 59243874c6SManikanta Pubbisetty 60243874c6SManikanta Pubbisetty static u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct sk_buff *skb) 61243874c6SManikanta Pubbisetty { 62243874c6SManikanta Pubbisetty struct ieee80211_hdr *hdr; 63243874c6SManikanta Pubbisetty 64243874c6SManikanta Pubbisetty hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE); 65243874c6SManikanta Pubbisetty return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; 66243874c6SManikanta Pubbisetty } 67243874c6SManikanta Pubbisetty 68243874c6SManikanta Pubbisetty static u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct hal_rx_desc *desc) 69243874c6SManikanta Pubbisetty { 70243874c6SManikanta Pubbisetty return FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_NUM, 71243874c6SManikanta Pubbisetty __le32_to_cpu(desc->mpdu_start.info1)); 72d5c65159SKalle Valo } 73d5c65159SKalle Valo 74d5c65159SKalle Valo static bool ath11k_dp_rx_h_attn_msdu_done(struct hal_rx_desc *desc) 75d5c65159SKalle Valo { 76d5c65159SKalle Valo return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE, 77d5c65159SKalle Valo __le32_to_cpu(desc->attention.info2)); 78d5c65159SKalle Valo } 79d5c65159SKalle Valo 80d5c65159SKalle Valo static bool ath11k_dp_rx_h_attn_first_mpdu(struct hal_rx_desc *desc) 81d5c65159SKalle Valo { 82d5c65159SKalle Valo return !!FIELD_GET(RX_ATTENTION_INFO1_FIRST_MPDU, 83d5c65159SKalle Valo __le32_to_cpu(desc->attention.info1)); 84d5c65159SKalle Valo } 85d5c65159SKalle Valo 86d5c65159SKalle Valo static bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct hal_rx_desc *desc) 87d5c65159SKalle Valo { 88d5c65159SKalle Valo return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL, 89d5c65159SKalle Valo __le32_to_cpu(desc->attention.info1)); 90d5c65159SKalle Valo } 91d5c65159SKalle Valo 92d5c65159SKalle Valo static bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct hal_rx_desc *desc) 93d5c65159SKalle Valo { 94d5c65159SKalle Valo return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL, 95d5c65159SKalle Valo __le32_to_cpu(desc->attention.info1)); 96d5c65159SKalle Valo } 97d5c65159SKalle Valo 98d5c65159SKalle Valo static bool ath11k_dp_rx_h_attn_is_decrypted(struct hal_rx_desc *desc) 99d5c65159SKalle Valo { 100d5c65159SKalle Valo return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE, 101d5c65159SKalle Valo __le32_to_cpu(desc->attention.info2)) == 102d5c65159SKalle Valo RX_DESC_DECRYPT_STATUS_CODE_OK); 103d5c65159SKalle Valo } 104d5c65159SKalle Valo 105d5c65159SKalle Valo static u32 ath11k_dp_rx_h_attn_mpdu_err(struct hal_rx_desc *desc) 106d5c65159SKalle Valo { 107d5c65159SKalle Valo u32 info = __le32_to_cpu(desc->attention.info1); 108d5c65159SKalle Valo u32 errmap = 0; 109d5c65159SKalle Valo 110d5c65159SKalle Valo if (info & RX_ATTENTION_INFO1_FCS_ERR) 111d5c65159SKalle Valo errmap |= DP_RX_MPDU_ERR_FCS; 112d5c65159SKalle Valo 113d5c65159SKalle Valo if (info & RX_ATTENTION_INFO1_DECRYPT_ERR) 114d5c65159SKalle Valo errmap |= DP_RX_MPDU_ERR_DECRYPT; 115d5c65159SKalle Valo 116d5c65159SKalle Valo if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR) 117d5c65159SKalle Valo errmap |= DP_RX_MPDU_ERR_TKIP_MIC; 118d5c65159SKalle Valo 119d5c65159SKalle Valo if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR) 120d5c65159SKalle Valo errmap |= DP_RX_MPDU_ERR_AMSDU_ERR; 121d5c65159SKalle Valo 122d5c65159SKalle Valo if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR) 123d5c65159SKalle Valo errmap |= DP_RX_MPDU_ERR_OVERFLOW; 124d5c65159SKalle Valo 125d5c65159SKalle Valo if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR) 126d5c65159SKalle Valo errmap |= DP_RX_MPDU_ERR_MSDU_LEN; 127d5c65159SKalle Valo 128d5c65159SKalle Valo if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR) 129d5c65159SKalle Valo errmap |= DP_RX_MPDU_ERR_MPDU_LEN; 130d5c65159SKalle Valo 131d5c65159SKalle Valo return errmap; 132d5c65159SKalle Valo } 133d5c65159SKalle Valo 134d5c65159SKalle Valo static u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct hal_rx_desc *desc) 135d5c65159SKalle Valo { 136d5c65159SKalle Valo return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH, 137d5c65159SKalle Valo __le32_to_cpu(desc->msdu_start.info1)); 138d5c65159SKalle Valo } 139d5c65159SKalle Valo 140d5c65159SKalle Valo static u8 ath11k_dp_rx_h_msdu_start_sgi(struct hal_rx_desc *desc) 141d5c65159SKalle Valo { 142d5c65159SKalle Valo return FIELD_GET(RX_MSDU_START_INFO3_SGI, 143d5c65159SKalle Valo __le32_to_cpu(desc->msdu_start.info3)); 144d5c65159SKalle Valo } 145d5c65159SKalle Valo 146d5c65159SKalle Valo static u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct hal_rx_desc *desc) 147d5c65159SKalle Valo { 148d5c65159SKalle Valo return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS, 149d5c65159SKalle Valo __le32_to_cpu(desc->msdu_start.info3)); 150d5c65159SKalle Valo } 151d5c65159SKalle Valo 152d5c65159SKalle Valo static u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct hal_rx_desc *desc) 153d5c65159SKalle Valo { 154d5c65159SKalle Valo return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW, 155d5c65159SKalle Valo __le32_to_cpu(desc->msdu_start.info3)); 156d5c65159SKalle Valo } 157d5c65159SKalle Valo 158d5c65159SKalle Valo static u32 ath11k_dp_rx_h_msdu_start_freq(struct hal_rx_desc *desc) 159d5c65159SKalle Valo { 160d5c65159SKalle Valo return __le32_to_cpu(desc->msdu_start.phy_meta_data); 161d5c65159SKalle Valo } 162d5c65159SKalle Valo 163d5c65159SKalle Valo static u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct hal_rx_desc *desc) 164d5c65159SKalle Valo { 165d5c65159SKalle Valo return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE, 166d5c65159SKalle Valo __le32_to_cpu(desc->msdu_start.info3)); 167d5c65159SKalle Valo } 168d5c65159SKalle Valo 169d5c65159SKalle Valo static u8 ath11k_dp_rx_h_msdu_start_nss(struct hal_rx_desc *desc) 170d5c65159SKalle Valo { 171d5c65159SKalle Valo u8 mimo_ss_bitmap = FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP, 172d5c65159SKalle Valo __le32_to_cpu(desc->msdu_start.info3)); 173d5c65159SKalle Valo 174d5c65159SKalle Valo return hweight8(mimo_ss_bitmap); 175d5c65159SKalle Valo } 176d5c65159SKalle Valo 177243874c6SManikanta Pubbisetty static u8 ath11k_dp_rx_h_mpdu_start_tid(struct hal_rx_desc *desc) 178243874c6SManikanta Pubbisetty { 179243874c6SManikanta Pubbisetty return FIELD_GET(RX_MPDU_START_INFO2_TID, 180243874c6SManikanta Pubbisetty __le32_to_cpu(desc->mpdu_start.info2)); 181243874c6SManikanta Pubbisetty } 182243874c6SManikanta Pubbisetty 183243874c6SManikanta Pubbisetty static u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct hal_rx_desc *desc) 184243874c6SManikanta Pubbisetty { 185243874c6SManikanta Pubbisetty return __le16_to_cpu(desc->mpdu_start.sw_peer_id); 186243874c6SManikanta Pubbisetty } 187243874c6SManikanta Pubbisetty 188d5c65159SKalle Valo static u8 ath11k_dp_rx_h_msdu_end_l3pad(struct hal_rx_desc *desc) 189d5c65159SKalle Valo { 190d5c65159SKalle Valo return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING, 191d5c65159SKalle Valo __le32_to_cpu(desc->msdu_end.info2)); 192d5c65159SKalle Valo } 193d5c65159SKalle Valo 194d5c65159SKalle Valo static bool ath11k_dp_rx_h_msdu_end_first_msdu(struct hal_rx_desc *desc) 195d5c65159SKalle Valo { 196d5c65159SKalle Valo return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU, 197d5c65159SKalle Valo __le32_to_cpu(desc->msdu_end.info2)); 198d5c65159SKalle Valo } 199d5c65159SKalle Valo 200d5c65159SKalle Valo static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct hal_rx_desc *desc) 201d5c65159SKalle Valo { 202d5c65159SKalle Valo return !!FIELD_GET(RX_MSDU_END_INFO2_LAST_MSDU, 203d5c65159SKalle Valo __le32_to_cpu(desc->msdu_end.info2)); 204d5c65159SKalle Valo } 205d5c65159SKalle Valo 206d5c65159SKalle Valo static void ath11k_dp_rx_desc_end_tlv_copy(struct hal_rx_desc *fdesc, 207d5c65159SKalle Valo struct hal_rx_desc *ldesc) 208d5c65159SKalle Valo { 209d5c65159SKalle Valo memcpy((u8 *)&fdesc->msdu_end, (u8 *)&ldesc->msdu_end, 210d5c65159SKalle Valo sizeof(struct rx_msdu_end)); 211d5c65159SKalle Valo memcpy((u8 *)&fdesc->attention, (u8 *)&ldesc->attention, 212d5c65159SKalle Valo sizeof(struct rx_attention)); 213d5c65159SKalle Valo memcpy((u8 *)&fdesc->mpdu_end, (u8 *)&ldesc->mpdu_end, 214d5c65159SKalle Valo sizeof(struct rx_mpdu_end)); 215d5c65159SKalle Valo } 216d5c65159SKalle Valo 217d5c65159SKalle Valo static u32 ath11k_dp_rxdesc_get_mpdulen_err(struct hal_rx_desc *rx_desc) 218d5c65159SKalle Valo { 219d5c65159SKalle Valo struct rx_attention *rx_attn; 220d5c65159SKalle Valo 221d5c65159SKalle Valo rx_attn = &rx_desc->attention; 222d5c65159SKalle Valo 223d5c65159SKalle Valo return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR, 224d5c65159SKalle Valo __le32_to_cpu(rx_attn->info1)); 225d5c65159SKalle Valo } 226d5c65159SKalle Valo 227d5c65159SKalle Valo static u32 ath11k_dp_rxdesc_get_decap_format(struct hal_rx_desc *rx_desc) 228d5c65159SKalle Valo { 229d5c65159SKalle Valo struct rx_msdu_start *rx_msdu_start; 230d5c65159SKalle Valo 231d5c65159SKalle Valo rx_msdu_start = &rx_desc->msdu_start; 232d5c65159SKalle Valo 233d5c65159SKalle Valo return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT, 234d5c65159SKalle Valo __le32_to_cpu(rx_msdu_start->info2)); 235d5c65159SKalle Valo } 236d5c65159SKalle Valo 237d5c65159SKalle Valo static u8 *ath11k_dp_rxdesc_get_80211hdr(struct hal_rx_desc *rx_desc) 238d5c65159SKalle Valo { 239d5c65159SKalle Valo u8 *rx_pkt_hdr; 240d5c65159SKalle Valo 241d5c65159SKalle Valo rx_pkt_hdr = &rx_desc->msdu_payload[0]; 242d5c65159SKalle Valo 243d5c65159SKalle Valo return rx_pkt_hdr; 244d5c65159SKalle Valo } 245d5c65159SKalle Valo 246d5c65159SKalle Valo static bool ath11k_dp_rxdesc_mpdu_valid(struct hal_rx_desc *rx_desc) 247d5c65159SKalle Valo { 248d5c65159SKalle Valo u32 tlv_tag; 249d5c65159SKalle Valo 250d5c65159SKalle Valo tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG, 251d5c65159SKalle Valo __le32_to_cpu(rx_desc->mpdu_start_tag)); 252d5c65159SKalle Valo 253d5c65159SKalle Valo return tlv_tag == HAL_RX_MPDU_START ? true : false; 254d5c65159SKalle Valo } 255d5c65159SKalle Valo 256d5c65159SKalle Valo static u32 ath11k_dp_rxdesc_get_ppduid(struct hal_rx_desc *rx_desc) 257d5c65159SKalle Valo { 258d5c65159SKalle Valo return __le16_to_cpu(rx_desc->mpdu_start.phy_ppdu_id); 259d5c65159SKalle Valo } 260d5c65159SKalle Valo 261d5c65159SKalle Valo /* Returns number of Rx buffers replenished */ 262d5c65159SKalle Valo int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id, 263d5c65159SKalle Valo struct dp_rxdma_ring *rx_ring, 264d5c65159SKalle Valo int req_entries, 265d5c65159SKalle Valo enum hal_rx_buf_return_buf_manager mgr, 266d5c65159SKalle Valo gfp_t gfp) 267d5c65159SKalle Valo { 268d5c65159SKalle Valo struct hal_srng *srng; 269d5c65159SKalle Valo u32 *desc; 270d5c65159SKalle Valo struct sk_buff *skb; 271d5c65159SKalle Valo int num_free; 272d5c65159SKalle Valo int num_remain; 273d5c65159SKalle Valo int buf_id; 274d5c65159SKalle Valo u32 cookie; 275d5c65159SKalle Valo dma_addr_t paddr; 276d5c65159SKalle Valo 277d5c65159SKalle Valo req_entries = min(req_entries, rx_ring->bufs_max); 278d5c65159SKalle Valo 279d5c65159SKalle Valo srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 280d5c65159SKalle Valo 281d5c65159SKalle Valo spin_lock_bh(&srng->lock); 282d5c65159SKalle Valo 283d5c65159SKalle Valo ath11k_hal_srng_access_begin(ab, srng); 284d5c65159SKalle Valo 285d5c65159SKalle Valo num_free = ath11k_hal_srng_src_num_free(ab, srng, true); 286d5c65159SKalle Valo if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4)) 287d5c65159SKalle Valo req_entries = num_free; 288d5c65159SKalle Valo 289d5c65159SKalle Valo req_entries = min(num_free, req_entries); 290d5c65159SKalle Valo num_remain = req_entries; 291d5c65159SKalle Valo 292d5c65159SKalle Valo while (num_remain > 0) { 293d5c65159SKalle Valo skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + 294d5c65159SKalle Valo DP_RX_BUFFER_ALIGN_SIZE); 295d5c65159SKalle Valo if (!skb) 296d5c65159SKalle Valo break; 297d5c65159SKalle Valo 298d5c65159SKalle Valo if (!IS_ALIGNED((unsigned long)skb->data, 299d5c65159SKalle Valo DP_RX_BUFFER_ALIGN_SIZE)) { 300d5c65159SKalle Valo skb_pull(skb, 301d5c65159SKalle Valo PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - 302d5c65159SKalle Valo skb->data); 303d5c65159SKalle Valo } 304d5c65159SKalle Valo 305d5c65159SKalle Valo paddr = dma_map_single(ab->dev, skb->data, 306d5c65159SKalle Valo skb->len + skb_tailroom(skb), 307d5c65159SKalle Valo DMA_FROM_DEVICE); 308d5c65159SKalle Valo if (dma_mapping_error(ab->dev, paddr)) 309d5c65159SKalle Valo goto fail_free_skb; 310d5c65159SKalle Valo 311d5c65159SKalle Valo spin_lock_bh(&rx_ring->idr_lock); 312d5c65159SKalle Valo buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, 313d5c65159SKalle Valo rx_ring->bufs_max * 3, gfp); 314d5c65159SKalle Valo spin_unlock_bh(&rx_ring->idr_lock); 315d5c65159SKalle Valo if (buf_id < 0) 316d5c65159SKalle Valo goto fail_dma_unmap; 317d5c65159SKalle Valo 318d5c65159SKalle Valo desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 319d5c65159SKalle Valo if (!desc) 320d5c65159SKalle Valo goto fail_idr_remove; 321d5c65159SKalle Valo 322d5c65159SKalle Valo ATH11K_SKB_RXCB(skb)->paddr = paddr; 323d5c65159SKalle Valo 324d5c65159SKalle Valo cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 325d5c65159SKalle Valo FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 326d5c65159SKalle Valo 327d5c65159SKalle Valo num_remain--; 328d5c65159SKalle Valo 329d5c65159SKalle Valo ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); 330d5c65159SKalle Valo } 331d5c65159SKalle Valo 332d5c65159SKalle Valo ath11k_hal_srng_access_end(ab, srng); 333d5c65159SKalle Valo 334d5c65159SKalle Valo spin_unlock_bh(&srng->lock); 335d5c65159SKalle Valo 336d5c65159SKalle Valo return req_entries - num_remain; 337d5c65159SKalle Valo 338d5c65159SKalle Valo fail_idr_remove: 339d5c65159SKalle Valo spin_lock_bh(&rx_ring->idr_lock); 340d5c65159SKalle Valo idr_remove(&rx_ring->bufs_idr, buf_id); 341d5c65159SKalle Valo spin_unlock_bh(&rx_ring->idr_lock); 342d5c65159SKalle Valo fail_dma_unmap: 343d5c65159SKalle Valo dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 344d5c65159SKalle Valo DMA_FROM_DEVICE); 345d5c65159SKalle Valo fail_free_skb: 346d5c65159SKalle Valo dev_kfree_skb_any(skb); 347d5c65159SKalle Valo 348d5c65159SKalle Valo ath11k_hal_srng_access_end(ab, srng); 349d5c65159SKalle Valo 350d5c65159SKalle Valo spin_unlock_bh(&srng->lock); 351d5c65159SKalle Valo 352d5c65159SKalle Valo return req_entries - num_remain; 353d5c65159SKalle Valo } 354d5c65159SKalle Valo 355d5c65159SKalle Valo static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar, 356d5c65159SKalle Valo struct dp_rxdma_ring *rx_ring) 357d5c65159SKalle Valo { 358d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 359d5c65159SKalle Valo struct sk_buff *skb; 360d5c65159SKalle Valo int buf_id; 361d5c65159SKalle Valo 362d5c65159SKalle Valo spin_lock_bh(&rx_ring->idr_lock); 363d5c65159SKalle Valo idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { 364d5c65159SKalle Valo idr_remove(&rx_ring->bufs_idr, buf_id); 365d5c65159SKalle Valo /* TODO: Understand where internal driver does this dma_unmap of 366d5c65159SKalle Valo * of rxdma_buffer. 367d5c65159SKalle Valo */ 368d5c65159SKalle Valo dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr, 369d5c65159SKalle Valo skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); 370d5c65159SKalle Valo dev_kfree_skb_any(skb); 371d5c65159SKalle Valo } 372d5c65159SKalle Valo 373d5c65159SKalle Valo idr_destroy(&rx_ring->bufs_idr); 374d5c65159SKalle Valo spin_unlock_bh(&rx_ring->idr_lock); 375d5c65159SKalle Valo 376d5c65159SKalle Valo rx_ring = &dp->rx_mon_status_refill_ring; 377d5c65159SKalle Valo 378d5c65159SKalle Valo spin_lock_bh(&rx_ring->idr_lock); 379d5c65159SKalle Valo idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { 380d5c65159SKalle Valo idr_remove(&rx_ring->bufs_idr, buf_id); 381d5c65159SKalle Valo /* XXX: Understand where internal driver does this dma_unmap of 382d5c65159SKalle Valo * of rxdma_buffer. 383d5c65159SKalle Valo */ 384d5c65159SKalle Valo dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr, 385d5c65159SKalle Valo skb->len + skb_tailroom(skb), DMA_BIDIRECTIONAL); 386d5c65159SKalle Valo dev_kfree_skb_any(skb); 387d5c65159SKalle Valo } 388d5c65159SKalle Valo 389d5c65159SKalle Valo idr_destroy(&rx_ring->bufs_idr); 390d5c65159SKalle Valo spin_unlock_bh(&rx_ring->idr_lock); 391d5c65159SKalle Valo return 0; 392d5c65159SKalle Valo } 393d5c65159SKalle Valo 394d5c65159SKalle Valo static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar) 395d5c65159SKalle Valo { 396d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 397d5c65159SKalle Valo struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 398d5c65159SKalle Valo 399d5c65159SKalle Valo ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 400d5c65159SKalle Valo 401d5c65159SKalle Valo rx_ring = &dp->rxdma_mon_buf_ring; 402d5c65159SKalle Valo ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 403d5c65159SKalle Valo 404d5c65159SKalle Valo rx_ring = &dp->rx_mon_status_refill_ring; 405d5c65159SKalle Valo ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 406d5c65159SKalle Valo return 0; 407d5c65159SKalle Valo } 408d5c65159SKalle Valo 409d5c65159SKalle Valo static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar, 410d5c65159SKalle Valo struct dp_rxdma_ring *rx_ring, 411d5c65159SKalle Valo u32 ringtype) 412d5c65159SKalle Valo { 413d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 414d5c65159SKalle Valo int num_entries; 415d5c65159SKalle Valo 416d5c65159SKalle Valo num_entries = rx_ring->refill_buf_ring.size / 417d5c65159SKalle Valo ath11k_hal_srng_get_entrysize(ringtype); 418d5c65159SKalle Valo 419d5c65159SKalle Valo rx_ring->bufs_max = num_entries; 420d5c65159SKalle Valo ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries, 421d5c65159SKalle Valo HAL_RX_BUF_RBM_SW3_BM, GFP_KERNEL); 422d5c65159SKalle Valo return 0; 423d5c65159SKalle Valo } 424d5c65159SKalle Valo 425d5c65159SKalle Valo static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar) 426d5c65159SKalle Valo { 427d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 428d5c65159SKalle Valo struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 429d5c65159SKalle Valo 430d5c65159SKalle Valo ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF); 431d5c65159SKalle Valo 432d5c65159SKalle Valo rx_ring = &dp->rxdma_mon_buf_ring; 433d5c65159SKalle Valo ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF); 434d5c65159SKalle Valo 435d5c65159SKalle Valo rx_ring = &dp->rx_mon_status_refill_ring; 436d5c65159SKalle Valo ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS); 437d5c65159SKalle Valo 438d5c65159SKalle Valo return 0; 439d5c65159SKalle Valo } 440d5c65159SKalle Valo 441d5c65159SKalle Valo static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar) 442d5c65159SKalle Valo { 443d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 444d5c65159SKalle Valo 445d5c65159SKalle Valo ath11k_dp_srng_cleanup(ar->ab, &dp->rx_refill_buf_ring.refill_buf_ring); 446d5c65159SKalle Valo ath11k_dp_srng_cleanup(ar->ab, &dp->rxdma_err_dst_ring); 447d5c65159SKalle Valo ath11k_dp_srng_cleanup(ar->ab, &dp->rx_mon_status_refill_ring.refill_buf_ring); 448d5c65159SKalle Valo ath11k_dp_srng_cleanup(ar->ab, &dp->rxdma_mon_buf_ring.refill_buf_ring); 449d5c65159SKalle Valo } 450d5c65159SKalle Valo 4519c57d7e3SVasanthakumar Thiagarajan void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab) 4529c57d7e3SVasanthakumar Thiagarajan { 4539c57d7e3SVasanthakumar Thiagarajan struct ath11k_pdev_dp *dp; 4549c57d7e3SVasanthakumar Thiagarajan struct ath11k *ar; 4559c57d7e3SVasanthakumar Thiagarajan int i; 4569c57d7e3SVasanthakumar Thiagarajan 4579c57d7e3SVasanthakumar Thiagarajan for (i = 0; i < ab->num_radios; i++) { 4589c57d7e3SVasanthakumar Thiagarajan ar = ab->pdevs[i].ar; 4599c57d7e3SVasanthakumar Thiagarajan dp = &ar->dp; 4609c57d7e3SVasanthakumar Thiagarajan ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring); 4619c57d7e3SVasanthakumar Thiagarajan } 4629c57d7e3SVasanthakumar Thiagarajan } 4639c57d7e3SVasanthakumar Thiagarajan 4649c57d7e3SVasanthakumar Thiagarajan int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab) 4659c57d7e3SVasanthakumar Thiagarajan { 4669c57d7e3SVasanthakumar Thiagarajan struct ath11k *ar; 4679c57d7e3SVasanthakumar Thiagarajan struct ath11k_pdev_dp *dp; 4689c57d7e3SVasanthakumar Thiagarajan int ret; 4699c57d7e3SVasanthakumar Thiagarajan int i; 4709c57d7e3SVasanthakumar Thiagarajan 4719c57d7e3SVasanthakumar Thiagarajan for (i = 0; i < ab->num_radios; i++) { 4729c57d7e3SVasanthakumar Thiagarajan ar = ab->pdevs[i].ar; 4739c57d7e3SVasanthakumar Thiagarajan dp = &ar->dp; 4749c57d7e3SVasanthakumar Thiagarajan ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring, HAL_REO_DST, 4759c57d7e3SVasanthakumar Thiagarajan dp->mac_id, dp->mac_id, 4769c57d7e3SVasanthakumar Thiagarajan DP_REO_DST_RING_SIZE); 4779c57d7e3SVasanthakumar Thiagarajan if (ret) { 4789c57d7e3SVasanthakumar Thiagarajan ath11k_warn(ar->ab, "failed to setup reo_dst_ring\n"); 4799c57d7e3SVasanthakumar Thiagarajan goto err_reo_cleanup; 4809c57d7e3SVasanthakumar Thiagarajan } 4819c57d7e3SVasanthakumar Thiagarajan } 4829c57d7e3SVasanthakumar Thiagarajan 4839c57d7e3SVasanthakumar Thiagarajan return 0; 4849c57d7e3SVasanthakumar Thiagarajan 4859c57d7e3SVasanthakumar Thiagarajan err_reo_cleanup: 4869c57d7e3SVasanthakumar Thiagarajan ath11k_dp_pdev_reo_cleanup(ab); 4879c57d7e3SVasanthakumar Thiagarajan 4889c57d7e3SVasanthakumar Thiagarajan return ret; 4899c57d7e3SVasanthakumar Thiagarajan } 4909c57d7e3SVasanthakumar Thiagarajan 491d5c65159SKalle Valo static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar) 492d5c65159SKalle Valo { 493d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 494d5c65159SKalle Valo struct dp_srng *srng = NULL; 495d5c65159SKalle Valo int ret; 496d5c65159SKalle Valo 497d5c65159SKalle Valo ret = ath11k_dp_srng_setup(ar->ab, 498d5c65159SKalle Valo &dp->rx_refill_buf_ring.refill_buf_ring, 499d5c65159SKalle Valo HAL_RXDMA_BUF, 0, 500d5c65159SKalle Valo dp->mac_id, DP_RXDMA_BUF_RING_SIZE); 501d5c65159SKalle Valo if (ret) { 502d5c65159SKalle Valo ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n"); 503d5c65159SKalle Valo return ret; 504d5c65159SKalle Valo } 505d5c65159SKalle Valo 506d5c65159SKalle Valo ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring, 507d5c65159SKalle Valo HAL_RXDMA_DST, 0, dp->mac_id, 508d5c65159SKalle Valo DP_RXDMA_ERR_DST_RING_SIZE); 509d5c65159SKalle Valo if (ret) { 510d5c65159SKalle Valo ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring\n"); 511d5c65159SKalle Valo return ret; 512d5c65159SKalle Valo } 513d5c65159SKalle Valo 514d5c65159SKalle Valo srng = &dp->rx_mon_status_refill_ring.refill_buf_ring; 515d5c65159SKalle Valo ret = ath11k_dp_srng_setup(ar->ab, 516d5c65159SKalle Valo srng, 517d5c65159SKalle Valo HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id, 518d5c65159SKalle Valo DP_RXDMA_MON_STATUS_RING_SIZE); 519d5c65159SKalle Valo if (ret) { 520d5c65159SKalle Valo ath11k_warn(ar->ab, 521d5c65159SKalle Valo "failed to setup rx_mon_status_refill_ring\n"); 522d5c65159SKalle Valo return ret; 523d5c65159SKalle Valo } 524d5c65159SKalle Valo ret = ath11k_dp_srng_setup(ar->ab, 525d5c65159SKalle Valo &dp->rxdma_mon_buf_ring.refill_buf_ring, 526d5c65159SKalle Valo HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id, 527d5c65159SKalle Valo DP_RXDMA_MONITOR_BUF_RING_SIZE); 528d5c65159SKalle Valo if (ret) { 529d5c65159SKalle Valo ath11k_warn(ar->ab, 530d5c65159SKalle Valo "failed to setup HAL_RXDMA_MONITOR_BUF\n"); 531d5c65159SKalle Valo return ret; 532d5c65159SKalle Valo } 533d5c65159SKalle Valo 534d5c65159SKalle Valo ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring, 535d5c65159SKalle Valo HAL_RXDMA_MONITOR_DST, 0, dp->mac_id, 536d5c65159SKalle Valo DP_RXDMA_MONITOR_DST_RING_SIZE); 537d5c65159SKalle Valo if (ret) { 538d5c65159SKalle Valo ath11k_warn(ar->ab, 539d5c65159SKalle Valo "failed to setup HAL_RXDMA_MONITOR_DST\n"); 540d5c65159SKalle Valo return ret; 541d5c65159SKalle Valo } 542d5c65159SKalle Valo 543d5c65159SKalle Valo ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring, 544d5c65159SKalle Valo HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id, 545d5c65159SKalle Valo DP_RXDMA_MONITOR_DESC_RING_SIZE); 546d5c65159SKalle Valo if (ret) { 547d5c65159SKalle Valo ath11k_warn(ar->ab, 548d5c65159SKalle Valo "failed to setup HAL_RXDMA_MONITOR_DESC\n"); 549d5c65159SKalle Valo return ret; 550d5c65159SKalle Valo } 551d5c65159SKalle Valo 552d5c65159SKalle Valo return 0; 553d5c65159SKalle Valo } 554d5c65159SKalle Valo 555d5c65159SKalle Valo void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab) 556d5c65159SKalle Valo { 557d5c65159SKalle Valo struct ath11k_dp *dp = &ab->dp; 558d5c65159SKalle Valo struct dp_reo_cmd *cmd, *tmp; 559d5c65159SKalle Valo struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache; 560d5c65159SKalle Valo 561d5c65159SKalle Valo spin_lock_bh(&dp->reo_cmd_lock); 562d5c65159SKalle Valo list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 563d5c65159SKalle Valo list_del(&cmd->list); 564d5c65159SKalle Valo dma_unmap_single(ab->dev, cmd->data.paddr, 565d5c65159SKalle Valo cmd->data.size, DMA_BIDIRECTIONAL); 566d5c65159SKalle Valo kfree(cmd->data.vaddr); 567d5c65159SKalle Valo kfree(cmd); 568d5c65159SKalle Valo } 569d5c65159SKalle Valo 570d5c65159SKalle Valo list_for_each_entry_safe(cmd_cache, tmp_cache, 571d5c65159SKalle Valo &dp->reo_cmd_cache_flush_list, list) { 572d5c65159SKalle Valo list_del(&cmd_cache->list); 573d5c65159SKalle Valo dma_unmap_single(ab->dev, cmd_cache->data.paddr, 574d5c65159SKalle Valo cmd_cache->data.size, DMA_BIDIRECTIONAL); 575d5c65159SKalle Valo kfree(cmd_cache->data.vaddr); 576d5c65159SKalle Valo kfree(cmd_cache); 577d5c65159SKalle Valo } 578d5c65159SKalle Valo spin_unlock_bh(&dp->reo_cmd_lock); 579d5c65159SKalle Valo } 580d5c65159SKalle Valo 581d5c65159SKalle Valo static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx, 582d5c65159SKalle Valo enum hal_reo_cmd_status status) 583d5c65159SKalle Valo { 584d5c65159SKalle Valo struct dp_rx_tid *rx_tid = ctx; 585d5c65159SKalle Valo 586d5c65159SKalle Valo if (status != HAL_REO_CMD_SUCCESS) 587d5c65159SKalle Valo ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n", 588d5c65159SKalle Valo rx_tid->tid, status); 589d5c65159SKalle Valo 590d5c65159SKalle Valo dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size, 591d5c65159SKalle Valo DMA_BIDIRECTIONAL); 592d5c65159SKalle Valo kfree(rx_tid->vaddr); 593d5c65159SKalle Valo } 594d5c65159SKalle Valo 595d5c65159SKalle Valo static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab, 596d5c65159SKalle Valo struct dp_rx_tid *rx_tid) 597d5c65159SKalle Valo { 598d5c65159SKalle Valo struct ath11k_hal_reo_cmd cmd = {0}; 599d5c65159SKalle Valo unsigned long tot_desc_sz, desc_sz; 600d5c65159SKalle Valo int ret; 601d5c65159SKalle Valo 602d5c65159SKalle Valo tot_desc_sz = rx_tid->size; 603d5c65159SKalle Valo desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID); 604d5c65159SKalle Valo 605d5c65159SKalle Valo while (tot_desc_sz > desc_sz) { 606d5c65159SKalle Valo tot_desc_sz -= desc_sz; 607d5c65159SKalle Valo cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz); 608d5c65159SKalle Valo cmd.addr_hi = upper_32_bits(rx_tid->paddr); 609d5c65159SKalle Valo ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, 610d5c65159SKalle Valo HAL_REO_CMD_FLUSH_CACHE, &cmd, 611d5c65159SKalle Valo NULL); 612d5c65159SKalle Valo if (ret) 613d5c65159SKalle Valo ath11k_warn(ab, 614d5c65159SKalle Valo "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n", 615d5c65159SKalle Valo rx_tid->tid, ret); 616d5c65159SKalle Valo } 617d5c65159SKalle Valo 618d5c65159SKalle Valo memset(&cmd, 0, sizeof(cmd)); 619d5c65159SKalle Valo cmd.addr_lo = lower_32_bits(rx_tid->paddr); 620d5c65159SKalle Valo cmd.addr_hi = upper_32_bits(rx_tid->paddr); 621d5c65159SKalle Valo cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS; 622d5c65159SKalle Valo ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, 623d5c65159SKalle Valo HAL_REO_CMD_FLUSH_CACHE, 624d5c65159SKalle Valo &cmd, ath11k_dp_reo_cmd_free); 625d5c65159SKalle Valo if (ret) { 626d5c65159SKalle Valo ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n", 627d5c65159SKalle Valo rx_tid->tid, ret); 628d5c65159SKalle Valo dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 629d5c65159SKalle Valo DMA_BIDIRECTIONAL); 630d5c65159SKalle Valo kfree(rx_tid->vaddr); 631d5c65159SKalle Valo } 632d5c65159SKalle Valo } 633d5c65159SKalle Valo 634d5c65159SKalle Valo static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx, 635d5c65159SKalle Valo enum hal_reo_cmd_status status) 636d5c65159SKalle Valo { 637d5c65159SKalle Valo struct ath11k_base *ab = dp->ab; 638d5c65159SKalle Valo struct dp_rx_tid *rx_tid = ctx; 639d5c65159SKalle Valo struct dp_reo_cache_flush_elem *elem, *tmp; 640d5c65159SKalle Valo 641d5c65159SKalle Valo if (status == HAL_REO_CMD_DRAIN) { 642d5c65159SKalle Valo goto free_desc; 643d5c65159SKalle Valo } else if (status != HAL_REO_CMD_SUCCESS) { 644d5c65159SKalle Valo /* Shouldn't happen! Cleanup in case of other failure? */ 645d5c65159SKalle Valo ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n", 646d5c65159SKalle Valo rx_tid->tid, status); 647d5c65159SKalle Valo return; 648d5c65159SKalle Valo } 649d5c65159SKalle Valo 650d5c65159SKalle Valo elem = kzalloc(sizeof(*elem), GFP_ATOMIC); 651d5c65159SKalle Valo if (!elem) 652d5c65159SKalle Valo goto free_desc; 653d5c65159SKalle Valo 654d5c65159SKalle Valo elem->ts = jiffies; 655d5c65159SKalle Valo memcpy(&elem->data, rx_tid, sizeof(*rx_tid)); 656d5c65159SKalle Valo 657d5c65159SKalle Valo spin_lock_bh(&dp->reo_cmd_lock); 658d5c65159SKalle Valo list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list); 659d5c65159SKalle Valo spin_unlock_bh(&dp->reo_cmd_lock); 660d5c65159SKalle Valo 661d5c65159SKalle Valo /* Flush and invalidate aged REO desc from HW cache */ 662d5c65159SKalle Valo spin_lock_bh(&dp->reo_cmd_lock); 663d5c65159SKalle Valo list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list, 664d5c65159SKalle Valo list) { 665d5c65159SKalle Valo if (time_after(jiffies, elem->ts + 666d5c65159SKalle Valo msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) { 667d5c65159SKalle Valo list_del(&elem->list); 668d5c65159SKalle Valo spin_unlock_bh(&dp->reo_cmd_lock); 669d5c65159SKalle Valo 670d5c65159SKalle Valo ath11k_dp_reo_cache_flush(ab, &elem->data); 671d5c65159SKalle Valo kfree(elem); 672d5c65159SKalle Valo spin_lock_bh(&dp->reo_cmd_lock); 673d5c65159SKalle Valo } 674d5c65159SKalle Valo } 675d5c65159SKalle Valo spin_unlock_bh(&dp->reo_cmd_lock); 676d5c65159SKalle Valo 677d5c65159SKalle Valo return; 678d5c65159SKalle Valo free_desc: 679d5c65159SKalle Valo dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 680d5c65159SKalle Valo DMA_BIDIRECTIONAL); 681d5c65159SKalle Valo kfree(rx_tid->vaddr); 682d5c65159SKalle Valo } 683d5c65159SKalle Valo 684a36adf54SGovindaraj Saminathan void ath11k_peer_rx_tid_delete(struct ath11k *ar, 685d5c65159SKalle Valo struct ath11k_peer *peer, u8 tid) 686d5c65159SKalle Valo { 687d5c65159SKalle Valo struct ath11k_hal_reo_cmd cmd = {0}; 688d5c65159SKalle Valo struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 689d5c65159SKalle Valo int ret; 690d5c65159SKalle Valo 691d5c65159SKalle Valo if (!rx_tid->active) 692d5c65159SKalle Valo return; 693d5c65159SKalle Valo 694d5c65159SKalle Valo cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 695d5c65159SKalle Valo cmd.addr_lo = lower_32_bits(rx_tid->paddr); 696d5c65159SKalle Valo cmd.addr_hi = upper_32_bits(rx_tid->paddr); 697d5c65159SKalle Valo cmd.upd0 |= HAL_REO_CMD_UPD0_VLD; 698d5c65159SKalle Valo ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid, 699d5c65159SKalle Valo HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 700d5c65159SKalle Valo ath11k_dp_rx_tid_del_func); 701d5c65159SKalle Valo if (ret) { 702d5c65159SKalle Valo ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n", 703d5c65159SKalle Valo tid, ret); 704d5c65159SKalle Valo dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size, 705d5c65159SKalle Valo DMA_BIDIRECTIONAL); 706d5c65159SKalle Valo kfree(rx_tid->vaddr); 707d5c65159SKalle Valo } 708d5c65159SKalle Valo 709d5c65159SKalle Valo rx_tid->active = false; 710d5c65159SKalle Valo } 711d5c65159SKalle Valo 712243874c6SManikanta Pubbisetty static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab, 713243874c6SManikanta Pubbisetty u32 *link_desc, 714243874c6SManikanta Pubbisetty enum hal_wbm_rel_bm_act action) 715243874c6SManikanta Pubbisetty { 716243874c6SManikanta Pubbisetty struct ath11k_dp *dp = &ab->dp; 717243874c6SManikanta Pubbisetty struct hal_srng *srng; 718243874c6SManikanta Pubbisetty u32 *desc; 719243874c6SManikanta Pubbisetty int ret = 0; 720243874c6SManikanta Pubbisetty 721243874c6SManikanta Pubbisetty srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id]; 722243874c6SManikanta Pubbisetty 723243874c6SManikanta Pubbisetty spin_lock_bh(&srng->lock); 724243874c6SManikanta Pubbisetty 725243874c6SManikanta Pubbisetty ath11k_hal_srng_access_begin(ab, srng); 726243874c6SManikanta Pubbisetty 727243874c6SManikanta Pubbisetty desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 728243874c6SManikanta Pubbisetty if (!desc) { 729243874c6SManikanta Pubbisetty ret = -ENOBUFS; 730243874c6SManikanta Pubbisetty goto exit; 731243874c6SManikanta Pubbisetty } 732243874c6SManikanta Pubbisetty 733243874c6SManikanta Pubbisetty ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc, 734243874c6SManikanta Pubbisetty action); 735243874c6SManikanta Pubbisetty 736243874c6SManikanta Pubbisetty exit: 737243874c6SManikanta Pubbisetty ath11k_hal_srng_access_end(ab, srng); 738243874c6SManikanta Pubbisetty 739243874c6SManikanta Pubbisetty spin_unlock_bh(&srng->lock); 740243874c6SManikanta Pubbisetty 741243874c6SManikanta Pubbisetty return ret; 742243874c6SManikanta Pubbisetty } 743243874c6SManikanta Pubbisetty 744243874c6SManikanta Pubbisetty static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_desc) 745243874c6SManikanta Pubbisetty { 746243874c6SManikanta Pubbisetty struct ath11k_base *ab = rx_tid->ab; 747243874c6SManikanta Pubbisetty 748243874c6SManikanta Pubbisetty lockdep_assert_held(&ab->base_lock); 749243874c6SManikanta Pubbisetty 750243874c6SManikanta Pubbisetty if (rx_tid->dst_ring_desc) { 751243874c6SManikanta Pubbisetty if (rel_link_desc) 752243874c6SManikanta Pubbisetty ath11k_dp_rx_link_desc_return(ab, (u32 *)rx_tid->dst_ring_desc, 753243874c6SManikanta Pubbisetty HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 754243874c6SManikanta Pubbisetty kfree(rx_tid->dst_ring_desc); 755243874c6SManikanta Pubbisetty rx_tid->dst_ring_desc = NULL; 756243874c6SManikanta Pubbisetty } 757243874c6SManikanta Pubbisetty 758243874c6SManikanta Pubbisetty rx_tid->cur_sn = 0; 759243874c6SManikanta Pubbisetty rx_tid->last_frag_no = 0; 760243874c6SManikanta Pubbisetty rx_tid->rx_frag_bitmap = 0; 761243874c6SManikanta Pubbisetty __skb_queue_purge(&rx_tid->rx_frags); 762243874c6SManikanta Pubbisetty } 763243874c6SManikanta Pubbisetty 764d5c65159SKalle Valo void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer) 765d5c65159SKalle Valo { 766243874c6SManikanta Pubbisetty struct dp_rx_tid *rx_tid; 767d5c65159SKalle Valo int i; 768d5c65159SKalle Valo 769243874c6SManikanta Pubbisetty lockdep_assert_held(&ar->ab->base_lock); 770243874c6SManikanta Pubbisetty 771243874c6SManikanta Pubbisetty for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 772243874c6SManikanta Pubbisetty rx_tid = &peer->rx_tid[i]; 773243874c6SManikanta Pubbisetty 774d5c65159SKalle Valo ath11k_peer_rx_tid_delete(ar, peer, i); 775243874c6SManikanta Pubbisetty ath11k_dp_rx_frags_cleanup(rx_tid, true); 776243874c6SManikanta Pubbisetty 777243874c6SManikanta Pubbisetty spin_unlock_bh(&ar->ab->base_lock); 778243874c6SManikanta Pubbisetty del_timer_sync(&rx_tid->frag_timer); 779243874c6SManikanta Pubbisetty spin_lock_bh(&ar->ab->base_lock); 780243874c6SManikanta Pubbisetty } 781d5c65159SKalle Valo } 782d5c65159SKalle Valo 783d5c65159SKalle Valo static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar, 784d5c65159SKalle Valo struct ath11k_peer *peer, 785d5c65159SKalle Valo struct dp_rx_tid *rx_tid, 786fe201947SVenkateswara Naralasetty u32 ba_win_sz, u16 ssn, 787fe201947SVenkateswara Naralasetty bool update_ssn) 788d5c65159SKalle Valo { 789d5c65159SKalle Valo struct ath11k_hal_reo_cmd cmd = {0}; 790d5c65159SKalle Valo int ret; 791d5c65159SKalle Valo 792d5c65159SKalle Valo cmd.addr_lo = lower_32_bits(rx_tid->paddr); 793d5c65159SKalle Valo cmd.addr_hi = upper_32_bits(rx_tid->paddr); 794d5c65159SKalle Valo cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 795fe201947SVenkateswara Naralasetty cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE; 796d5c65159SKalle Valo cmd.ba_window_size = ba_win_sz; 797fe201947SVenkateswara Naralasetty 798fe201947SVenkateswara Naralasetty if (update_ssn) { 799fe201947SVenkateswara Naralasetty cmd.upd0 |= HAL_REO_CMD_UPD0_SSN; 800d5c65159SKalle Valo cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn); 801fe201947SVenkateswara Naralasetty } 802d5c65159SKalle Valo 803d5c65159SKalle Valo ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid, 804d5c65159SKalle Valo HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 805d5c65159SKalle Valo NULL); 806d5c65159SKalle Valo if (ret) { 807d5c65159SKalle Valo ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n", 808d5c65159SKalle Valo rx_tid->tid, ret); 809d5c65159SKalle Valo return ret; 810d5c65159SKalle Valo } 811d5c65159SKalle Valo 812d5c65159SKalle Valo rx_tid->ba_win_sz = ba_win_sz; 813d5c65159SKalle Valo 814d5c65159SKalle Valo return 0; 815d5c65159SKalle Valo } 816d5c65159SKalle Valo 817d5c65159SKalle Valo static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab, 818d5c65159SKalle Valo const u8 *peer_mac, int vdev_id, u8 tid) 819d5c65159SKalle Valo { 820d5c65159SKalle Valo struct ath11k_peer *peer; 821d5c65159SKalle Valo struct dp_rx_tid *rx_tid; 822d5c65159SKalle Valo 823d5c65159SKalle Valo spin_lock_bh(&ab->base_lock); 824d5c65159SKalle Valo 825d5c65159SKalle Valo peer = ath11k_peer_find(ab, vdev_id, peer_mac); 826d5c65159SKalle Valo if (!peer) { 827d5c65159SKalle Valo ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n"); 828d5c65159SKalle Valo goto unlock_exit; 829d5c65159SKalle Valo } 830d5c65159SKalle Valo 831d5c65159SKalle Valo rx_tid = &peer->rx_tid[tid]; 832d5c65159SKalle Valo if (!rx_tid->active) 833d5c65159SKalle Valo goto unlock_exit; 834d5c65159SKalle Valo 835d5c65159SKalle Valo dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 836d5c65159SKalle Valo DMA_BIDIRECTIONAL); 837d5c65159SKalle Valo kfree(rx_tid->vaddr); 838d5c65159SKalle Valo 839d5c65159SKalle Valo rx_tid->active = false; 840d5c65159SKalle Valo 841d5c65159SKalle Valo unlock_exit: 842d5c65159SKalle Valo spin_unlock_bh(&ab->base_lock); 843d5c65159SKalle Valo } 844d5c65159SKalle Valo 845d5c65159SKalle Valo int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id, 846d5c65159SKalle Valo u8 tid, u32 ba_win_sz, u16 ssn) 847d5c65159SKalle Valo { 848d5c65159SKalle Valo struct ath11k_base *ab = ar->ab; 849d5c65159SKalle Valo struct ath11k_peer *peer; 850d5c65159SKalle Valo struct dp_rx_tid *rx_tid; 851d5c65159SKalle Valo u32 hw_desc_sz; 852d5c65159SKalle Valo u32 *addr_aligned; 853d5c65159SKalle Valo void *vaddr; 854d5c65159SKalle Valo dma_addr_t paddr; 855d5c65159SKalle Valo int ret; 856d5c65159SKalle Valo 857d5c65159SKalle Valo spin_lock_bh(&ab->base_lock); 858d5c65159SKalle Valo 859d5c65159SKalle Valo peer = ath11k_peer_find(ab, vdev_id, peer_mac); 860d5c65159SKalle Valo if (!peer) { 861d5c65159SKalle Valo ath11k_warn(ab, "failed to find the peer to set up rx tid\n"); 862d5c65159SKalle Valo spin_unlock_bh(&ab->base_lock); 863d5c65159SKalle Valo return -ENOENT; 864d5c65159SKalle Valo } 865d5c65159SKalle Valo 866d5c65159SKalle Valo rx_tid = &peer->rx_tid[tid]; 867d5c65159SKalle Valo /* Update the tid queue if it is already setup */ 868d5c65159SKalle Valo if (rx_tid->active) { 869d5c65159SKalle Valo paddr = rx_tid->paddr; 870d5c65159SKalle Valo ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid, 871fe201947SVenkateswara Naralasetty ba_win_sz, ssn, true); 872d5c65159SKalle Valo spin_unlock_bh(&ab->base_lock); 873d5c65159SKalle Valo if (ret) { 874d5c65159SKalle Valo ath11k_warn(ab, "failed to update reo for rx tid %d\n", tid); 875d5c65159SKalle Valo return ret; 876d5c65159SKalle Valo } 877d5c65159SKalle Valo 878d5c65159SKalle Valo ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, 879d5c65159SKalle Valo peer_mac, paddr, 880d5c65159SKalle Valo tid, 1, ba_win_sz); 881d5c65159SKalle Valo if (ret) 882d5c65159SKalle Valo ath11k_warn(ab, "failed to send wmi command to update rx reorder queue, tid :%d (%d)\n", 883d5c65159SKalle Valo tid, ret); 884d5c65159SKalle Valo return ret; 885d5c65159SKalle Valo } 886d5c65159SKalle Valo 887d5c65159SKalle Valo rx_tid->tid = tid; 888d5c65159SKalle Valo 889d5c65159SKalle Valo rx_tid->ba_win_sz = ba_win_sz; 890d5c65159SKalle Valo 891d5c65159SKalle Valo /* TODO: Optimize the memory allocation for qos tid based on the 892d5c65159SKalle Valo * the actual BA window size in REO tid update path. 893d5c65159SKalle Valo */ 894d5c65159SKalle Valo if (tid == HAL_DESC_REO_NON_QOS_TID) 895d5c65159SKalle Valo hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid); 896d5c65159SKalle Valo else 897d5c65159SKalle Valo hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid); 898d5c65159SKalle Valo 899d5c65159SKalle Valo vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_KERNEL); 900d5c65159SKalle Valo if (!vaddr) { 901d5c65159SKalle Valo spin_unlock_bh(&ab->base_lock); 902d5c65159SKalle Valo return -ENOMEM; 903d5c65159SKalle Valo } 904d5c65159SKalle Valo 905d5c65159SKalle Valo addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN); 906d5c65159SKalle Valo 907d5c65159SKalle Valo ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz, ssn); 908d5c65159SKalle Valo 909d5c65159SKalle Valo paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz, 910d5c65159SKalle Valo DMA_BIDIRECTIONAL); 911d5c65159SKalle Valo 912d5c65159SKalle Valo ret = dma_mapping_error(ab->dev, paddr); 913d5c65159SKalle Valo if (ret) { 914d5c65159SKalle Valo spin_unlock_bh(&ab->base_lock); 915d5c65159SKalle Valo goto err_mem_free; 916d5c65159SKalle Valo } 917d5c65159SKalle Valo 918d5c65159SKalle Valo rx_tid->vaddr = vaddr; 919d5c65159SKalle Valo rx_tid->paddr = paddr; 920d5c65159SKalle Valo rx_tid->size = hw_desc_sz; 921d5c65159SKalle Valo rx_tid->active = true; 922d5c65159SKalle Valo 923d5c65159SKalle Valo spin_unlock_bh(&ab->base_lock); 924d5c65159SKalle Valo 925d5c65159SKalle Valo ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, 926d5c65159SKalle Valo paddr, tid, 1, ba_win_sz); 927d5c65159SKalle Valo if (ret) { 928d5c65159SKalle Valo ath11k_warn(ar->ab, "failed to setup rx reorder queue, tid :%d (%d)\n", 929d5c65159SKalle Valo tid, ret); 930d5c65159SKalle Valo ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid); 931d5c65159SKalle Valo } 932d5c65159SKalle Valo 933d5c65159SKalle Valo return ret; 934d5c65159SKalle Valo 935d5c65159SKalle Valo err_mem_free: 936d5c65159SKalle Valo kfree(vaddr); 937d5c65159SKalle Valo 938d5c65159SKalle Valo return ret; 939d5c65159SKalle Valo } 940d5c65159SKalle Valo 941d5c65159SKalle Valo int ath11k_dp_rx_ampdu_start(struct ath11k *ar, 942d5c65159SKalle Valo struct ieee80211_ampdu_params *params) 943d5c65159SKalle Valo { 944d5c65159SKalle Valo struct ath11k_base *ab = ar->ab; 945d5c65159SKalle Valo struct ath11k_sta *arsta = (void *)params->sta->drv_priv; 946d5c65159SKalle Valo int vdev_id = arsta->arvif->vdev_id; 947d5c65159SKalle Valo int ret; 948d5c65159SKalle Valo 949d5c65159SKalle Valo ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id, 950d5c65159SKalle Valo params->tid, params->buf_size, 951d5c65159SKalle Valo params->ssn); 952d5c65159SKalle Valo if (ret) 953d5c65159SKalle Valo ath11k_warn(ab, "failed to setup rx tid %d\n", ret); 954d5c65159SKalle Valo 955d5c65159SKalle Valo return ret; 956d5c65159SKalle Valo } 957d5c65159SKalle Valo 958d5c65159SKalle Valo int ath11k_dp_rx_ampdu_stop(struct ath11k *ar, 959d5c65159SKalle Valo struct ieee80211_ampdu_params *params) 960d5c65159SKalle Valo { 961d5c65159SKalle Valo struct ath11k_base *ab = ar->ab; 962d5c65159SKalle Valo struct ath11k_peer *peer; 963d5c65159SKalle Valo struct ath11k_sta *arsta = (void *)params->sta->drv_priv; 964d5c65159SKalle Valo int vdev_id = arsta->arvif->vdev_id; 965d5c65159SKalle Valo dma_addr_t paddr; 966d5c65159SKalle Valo bool active; 967d5c65159SKalle Valo int ret; 968d5c65159SKalle Valo 969d5c65159SKalle Valo spin_lock_bh(&ab->base_lock); 970d5c65159SKalle Valo 971d5c65159SKalle Valo peer = ath11k_peer_find(ab, vdev_id, params->sta->addr); 972d5c65159SKalle Valo if (!peer) { 973d5c65159SKalle Valo ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n"); 974d5c65159SKalle Valo spin_unlock_bh(&ab->base_lock); 975d5c65159SKalle Valo return -ENOENT; 976d5c65159SKalle Valo } 977d5c65159SKalle Valo 978d5c65159SKalle Valo paddr = peer->rx_tid[params->tid].paddr; 979d5c65159SKalle Valo active = peer->rx_tid[params->tid].active; 980d5c65159SKalle Valo 981fe201947SVenkateswara Naralasetty if (!active) { 982d5c65159SKalle Valo spin_unlock_bh(&ab->base_lock); 983d5c65159SKalle Valo return 0; 984fe201947SVenkateswara Naralasetty } 985fe201947SVenkateswara Naralasetty 986fe201947SVenkateswara Naralasetty ret = ath11k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false); 987fe201947SVenkateswara Naralasetty spin_unlock_bh(&ab->base_lock); 988fe201947SVenkateswara Naralasetty if (ret) { 989fe201947SVenkateswara Naralasetty ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n", 990fe201947SVenkateswara Naralasetty params->tid, ret); 991fe201947SVenkateswara Naralasetty return ret; 992fe201947SVenkateswara Naralasetty } 993d5c65159SKalle Valo 994d5c65159SKalle Valo ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, 995d5c65159SKalle Valo params->sta->addr, paddr, 996d5c65159SKalle Valo params->tid, 1, 1); 997d5c65159SKalle Valo if (ret) 998d5c65159SKalle Valo ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n", 999d5c65159SKalle Valo ret); 1000d5c65159SKalle Valo 1001d5c65159SKalle Valo return ret; 1002d5c65159SKalle Valo } 1003d5c65159SKalle Valo 1004d5c65159SKalle Valo static int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats, 1005d5c65159SKalle Valo u16 peer_id) 1006d5c65159SKalle Valo { 1007d5c65159SKalle Valo int i; 1008d5c65159SKalle Valo 1009d5c65159SKalle Valo for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) { 1010d5c65159SKalle Valo if (ppdu_stats->user_stats[i].is_valid_peer_id) { 1011d5c65159SKalle Valo if (peer_id == ppdu_stats->user_stats[i].peer_id) 1012d5c65159SKalle Valo return i; 1013d5c65159SKalle Valo } else { 1014d5c65159SKalle Valo return i; 1015d5c65159SKalle Valo } 1016d5c65159SKalle Valo } 1017d5c65159SKalle Valo 1018d5c65159SKalle Valo return -EINVAL; 1019d5c65159SKalle Valo } 1020d5c65159SKalle Valo 1021d5c65159SKalle Valo static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab, 1022d5c65159SKalle Valo u16 tag, u16 len, const void *ptr, 1023d5c65159SKalle Valo void *data) 1024d5c65159SKalle Valo { 1025d5c65159SKalle Valo struct htt_ppdu_stats_info *ppdu_info; 1026d5c65159SKalle Valo struct htt_ppdu_user_stats *user_stats; 1027d5c65159SKalle Valo int cur_user; 1028d5c65159SKalle Valo u16 peer_id; 1029d5c65159SKalle Valo 1030d5c65159SKalle Valo ppdu_info = (struct htt_ppdu_stats_info *)data; 1031d5c65159SKalle Valo 1032d5c65159SKalle Valo switch (tag) { 1033d5c65159SKalle Valo case HTT_PPDU_STATS_TAG_COMMON: 1034d5c65159SKalle Valo if (len < sizeof(struct htt_ppdu_stats_common)) { 1035d5c65159SKalle Valo ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1036d5c65159SKalle Valo len, tag); 1037d5c65159SKalle Valo return -EINVAL; 1038d5c65159SKalle Valo } 1039d5c65159SKalle Valo memcpy((void *)&ppdu_info->ppdu_stats.common, ptr, 1040d5c65159SKalle Valo sizeof(struct htt_ppdu_stats_common)); 1041d5c65159SKalle Valo break; 1042d5c65159SKalle Valo case HTT_PPDU_STATS_TAG_USR_RATE: 1043d5c65159SKalle Valo if (len < sizeof(struct htt_ppdu_stats_user_rate)) { 1044d5c65159SKalle Valo ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1045d5c65159SKalle Valo len, tag); 1046d5c65159SKalle Valo return -EINVAL; 1047d5c65159SKalle Valo } 1048d5c65159SKalle Valo 1049d5c65159SKalle Valo peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id; 1050d5c65159SKalle Valo cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1051d5c65159SKalle Valo peer_id); 1052d5c65159SKalle Valo if (cur_user < 0) 1053d5c65159SKalle Valo return -EINVAL; 1054d5c65159SKalle Valo user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1055d5c65159SKalle Valo user_stats->peer_id = peer_id; 1056d5c65159SKalle Valo user_stats->is_valid_peer_id = true; 1057d5c65159SKalle Valo memcpy((void *)&user_stats->rate, ptr, 1058d5c65159SKalle Valo sizeof(struct htt_ppdu_stats_user_rate)); 1059d5c65159SKalle Valo user_stats->tlv_flags |= BIT(tag); 1060d5c65159SKalle Valo break; 1061d5c65159SKalle Valo case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON: 1062d5c65159SKalle Valo if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) { 1063d5c65159SKalle Valo ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1064d5c65159SKalle Valo len, tag); 1065d5c65159SKalle Valo return -EINVAL; 1066d5c65159SKalle Valo } 1067d5c65159SKalle Valo 1068d5c65159SKalle Valo peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id; 1069d5c65159SKalle Valo cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1070d5c65159SKalle Valo peer_id); 1071d5c65159SKalle Valo if (cur_user < 0) 1072d5c65159SKalle Valo return -EINVAL; 1073d5c65159SKalle Valo user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1074d5c65159SKalle Valo user_stats->peer_id = peer_id; 1075d5c65159SKalle Valo user_stats->is_valid_peer_id = true; 1076d5c65159SKalle Valo memcpy((void *)&user_stats->cmpltn_cmn, ptr, 1077d5c65159SKalle Valo sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)); 1078d5c65159SKalle Valo user_stats->tlv_flags |= BIT(tag); 1079d5c65159SKalle Valo break; 1080d5c65159SKalle Valo case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS: 1081d5c65159SKalle Valo if (len < 1082d5c65159SKalle Valo sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) { 1083d5c65159SKalle Valo ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1084d5c65159SKalle Valo len, tag); 1085d5c65159SKalle Valo return -EINVAL; 1086d5c65159SKalle Valo } 1087d5c65159SKalle Valo 1088d5c65159SKalle Valo peer_id = 1089d5c65159SKalle Valo ((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id; 1090d5c65159SKalle Valo cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1091d5c65159SKalle Valo peer_id); 1092d5c65159SKalle Valo if (cur_user < 0) 1093d5c65159SKalle Valo return -EINVAL; 1094d5c65159SKalle Valo user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1095d5c65159SKalle Valo user_stats->peer_id = peer_id; 1096d5c65159SKalle Valo user_stats->is_valid_peer_id = true; 1097d5c65159SKalle Valo memcpy((void *)&user_stats->ack_ba, ptr, 1098d5c65159SKalle Valo sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)); 1099d5c65159SKalle Valo user_stats->tlv_flags |= BIT(tag); 1100d5c65159SKalle Valo break; 1101d5c65159SKalle Valo } 1102d5c65159SKalle Valo return 0; 1103d5c65159SKalle Valo } 1104d5c65159SKalle Valo 1105d5c65159SKalle Valo int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len, 1106d5c65159SKalle Valo int (*iter)(struct ath11k_base *ar, u16 tag, u16 len, 1107d5c65159SKalle Valo const void *ptr, void *data), 1108d5c65159SKalle Valo void *data) 1109d5c65159SKalle Valo { 1110d5c65159SKalle Valo const struct htt_tlv *tlv; 1111d5c65159SKalle Valo const void *begin = ptr; 1112d5c65159SKalle Valo u16 tlv_tag, tlv_len; 1113d5c65159SKalle Valo int ret = -EINVAL; 1114d5c65159SKalle Valo 1115d5c65159SKalle Valo while (len > 0) { 1116d5c65159SKalle Valo if (len < sizeof(*tlv)) { 1117d5c65159SKalle Valo ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n", 1118d5c65159SKalle Valo ptr - begin, len, sizeof(*tlv)); 1119d5c65159SKalle Valo return -EINVAL; 1120d5c65159SKalle Valo } 1121d5c65159SKalle Valo tlv = (struct htt_tlv *)ptr; 1122d5c65159SKalle Valo tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header); 1123d5c65159SKalle Valo tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header); 1124d5c65159SKalle Valo ptr += sizeof(*tlv); 1125d5c65159SKalle Valo len -= sizeof(*tlv); 1126d5c65159SKalle Valo 1127d5c65159SKalle Valo if (tlv_len > len) { 1128d5c65159SKalle Valo ath11k_err(ab, "htt tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n", 1129d5c65159SKalle Valo tlv_tag, ptr - begin, len, tlv_len); 1130d5c65159SKalle Valo return -EINVAL; 1131d5c65159SKalle Valo } 1132d5c65159SKalle Valo ret = iter(ab, tlv_tag, tlv_len, ptr, data); 1133d5c65159SKalle Valo if (ret == -ENOMEM) 1134d5c65159SKalle Valo return ret; 1135d5c65159SKalle Valo 1136d5c65159SKalle Valo ptr += tlv_len; 1137d5c65159SKalle Valo len -= tlv_len; 1138d5c65159SKalle Valo } 1139d5c65159SKalle Valo return 0; 1140d5c65159SKalle Valo } 1141d5c65159SKalle Valo 11426a0c3702SJohn Crispin static inline u32 ath11k_he_gi_to_nl80211_he_gi(u8 sgi) 11436a0c3702SJohn Crispin { 11446a0c3702SJohn Crispin u32 ret = 0; 11456a0c3702SJohn Crispin 11466a0c3702SJohn Crispin switch (sgi) { 11476a0c3702SJohn Crispin case RX_MSDU_START_SGI_0_8_US: 11486a0c3702SJohn Crispin ret = NL80211_RATE_INFO_HE_GI_0_8; 11496a0c3702SJohn Crispin break; 11506a0c3702SJohn Crispin case RX_MSDU_START_SGI_1_6_US: 11516a0c3702SJohn Crispin ret = NL80211_RATE_INFO_HE_GI_1_6; 11526a0c3702SJohn Crispin break; 11536a0c3702SJohn Crispin case RX_MSDU_START_SGI_3_2_US: 11546a0c3702SJohn Crispin ret = NL80211_RATE_INFO_HE_GI_3_2; 11556a0c3702SJohn Crispin break; 11566a0c3702SJohn Crispin } 11576a0c3702SJohn Crispin 11586a0c3702SJohn Crispin return ret; 11596a0c3702SJohn Crispin } 11606a0c3702SJohn Crispin 1161d5c65159SKalle Valo static void 1162d5c65159SKalle Valo ath11k_update_per_peer_tx_stats(struct ath11k *ar, 1163d5c65159SKalle Valo struct htt_ppdu_stats *ppdu_stats, u8 user) 1164d5c65159SKalle Valo { 1165d5c65159SKalle Valo struct ath11k_base *ab = ar->ab; 1166d5c65159SKalle Valo struct ath11k_peer *peer; 1167d5c65159SKalle Valo struct ieee80211_sta *sta; 1168d5c65159SKalle Valo struct ath11k_sta *arsta; 1169d5c65159SKalle Valo struct htt_ppdu_stats_user_rate *user_rate; 1170d5c65159SKalle Valo struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats; 1171d5c65159SKalle Valo struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user]; 1172d5c65159SKalle Valo struct htt_ppdu_stats_common *common = &ppdu_stats->common; 1173d5c65159SKalle Valo int ret; 11746a0c3702SJohn Crispin u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0; 1175d5c65159SKalle Valo u32 succ_bytes = 0; 1176d5c65159SKalle Valo u16 rate = 0, succ_pkts = 0; 1177d5c65159SKalle Valo u32 tx_duration = 0; 1178b9269a07SVenkateswara Naralasetty u8 tid = HTT_PPDU_STATS_NON_QOS_TID; 1179d5c65159SKalle Valo bool is_ampdu = false; 1180d5c65159SKalle Valo 1181d5c65159SKalle Valo if (!usr_stats) 1182d5c65159SKalle Valo return; 1183d5c65159SKalle Valo 1184d5c65159SKalle Valo if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE))) 1185d5c65159SKalle Valo return; 1186d5c65159SKalle Valo 1187d5c65159SKalle Valo if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON)) 1188d5c65159SKalle Valo is_ampdu = 1189d5c65159SKalle Valo HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags); 1190d5c65159SKalle Valo 1191d5c65159SKalle Valo if (usr_stats->tlv_flags & 1192d5c65159SKalle Valo BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) { 1193d5c65159SKalle Valo succ_bytes = usr_stats->ack_ba.success_bytes; 1194d5c65159SKalle Valo succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M, 1195d5c65159SKalle Valo usr_stats->ack_ba.info); 1196b9269a07SVenkateswara Naralasetty tid = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM, 1197b9269a07SVenkateswara Naralasetty usr_stats->ack_ba.info); 1198d5c65159SKalle Valo } 1199d5c65159SKalle Valo 1200d5c65159SKalle Valo if (common->fes_duration_us) 1201d5c65159SKalle Valo tx_duration = common->fes_duration_us; 1202d5c65159SKalle Valo 1203d5c65159SKalle Valo user_rate = &usr_stats->rate; 1204d5c65159SKalle Valo flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags); 1205d5c65159SKalle Valo bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2; 1206d5c65159SKalle Valo nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1; 1207d5c65159SKalle Valo mcs = HTT_USR_RATE_MCS(user_rate->rate_flags); 1208d5c65159SKalle Valo sgi = HTT_USR_RATE_GI(user_rate->rate_flags); 12096a0c3702SJohn Crispin dcm = HTT_USR_RATE_DCM(user_rate->rate_flags); 1210d5c65159SKalle Valo 1211d5c65159SKalle Valo /* Note: If host configured fixed rates and in some other special 1212d5c65159SKalle Valo * cases, the broadcast/management frames are sent in different rates. 1213d5c65159SKalle Valo * Firmware rate's control to be skipped for this? 1214d5c65159SKalle Valo */ 1215d5c65159SKalle Valo 12166a0c3702SJohn Crispin if (flags == WMI_RATE_PREAMBLE_HE && mcs > 11) { 12176a0c3702SJohn Crispin ath11k_warn(ab, "Invalid HE mcs %hhd peer stats", mcs); 12186a0c3702SJohn Crispin return; 12196a0c3702SJohn Crispin } 12206a0c3702SJohn Crispin 12216a0c3702SJohn Crispin if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) { 12226a0c3702SJohn Crispin ath11k_warn(ab, "Invalid HE mcs %hhd peer stats", mcs); 12236a0c3702SJohn Crispin return; 12246a0c3702SJohn Crispin } 12256a0c3702SJohn Crispin 12266a0c3702SJohn Crispin if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH11K_VHT_MCS_MAX) { 1227d5c65159SKalle Valo ath11k_warn(ab, "Invalid VHT mcs %hhd peer stats", mcs); 1228d5c65159SKalle Valo return; 1229d5c65159SKalle Valo } 1230d5c65159SKalle Valo 12316a0c3702SJohn Crispin if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH11K_HT_MCS_MAX || nss < 1)) { 1232d5c65159SKalle Valo ath11k_warn(ab, "Invalid HT mcs %hhd nss %hhd peer stats", 1233d5c65159SKalle Valo mcs, nss); 1234d5c65159SKalle Valo return; 1235d5c65159SKalle Valo } 1236d5c65159SKalle Valo 1237d5c65159SKalle Valo if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) { 1238d5c65159SKalle Valo ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs, 1239d5c65159SKalle Valo flags, 1240d5c65159SKalle Valo &rate_idx, 1241d5c65159SKalle Valo &rate); 1242d5c65159SKalle Valo if (ret < 0) 1243d5c65159SKalle Valo return; 1244d5c65159SKalle Valo } 1245d5c65159SKalle Valo 1246d5c65159SKalle Valo rcu_read_lock(); 1247d5c65159SKalle Valo spin_lock_bh(&ab->base_lock); 1248d5c65159SKalle Valo peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id); 1249d5c65159SKalle Valo 1250d5c65159SKalle Valo if (!peer || !peer->sta) { 1251d5c65159SKalle Valo spin_unlock_bh(&ab->base_lock); 1252d5c65159SKalle Valo rcu_read_unlock(); 1253d5c65159SKalle Valo return; 1254d5c65159SKalle Valo } 1255d5c65159SKalle Valo 1256d5c65159SKalle Valo sta = peer->sta; 1257d5c65159SKalle Valo arsta = (struct ath11k_sta *)sta->drv_priv; 1258d5c65159SKalle Valo 1259d5c65159SKalle Valo memset(&arsta->txrate, 0, sizeof(arsta->txrate)); 1260d5c65159SKalle Valo 1261d5c65159SKalle Valo switch (flags) { 1262d5c65159SKalle Valo case WMI_RATE_PREAMBLE_OFDM: 1263d5c65159SKalle Valo arsta->txrate.legacy = rate; 1264d5c65159SKalle Valo break; 1265d5c65159SKalle Valo case WMI_RATE_PREAMBLE_CCK: 1266d5c65159SKalle Valo arsta->txrate.legacy = rate; 1267d5c65159SKalle Valo break; 1268d5c65159SKalle Valo case WMI_RATE_PREAMBLE_HT: 1269d5c65159SKalle Valo arsta->txrate.mcs = mcs + 8 * (nss - 1); 1270d5c65159SKalle Valo arsta->txrate.flags = RATE_INFO_FLAGS_MCS; 1271be43ce64SJohn Crispin if (sgi) 1272d5c65159SKalle Valo arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1273d5c65159SKalle Valo break; 1274d5c65159SKalle Valo case WMI_RATE_PREAMBLE_VHT: 1275d5c65159SKalle Valo arsta->txrate.mcs = mcs; 1276d5c65159SKalle Valo arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; 1277be43ce64SJohn Crispin if (sgi) 1278d5c65159SKalle Valo arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1279d5c65159SKalle Valo break; 12806a0c3702SJohn Crispin case WMI_RATE_PREAMBLE_HE: 12816a0c3702SJohn Crispin arsta->txrate.mcs = mcs; 12826a0c3702SJohn Crispin arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS; 12836a0c3702SJohn Crispin arsta->txrate.he_dcm = dcm; 12846a0c3702SJohn Crispin arsta->txrate.he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi); 12856a0c3702SJohn Crispin arsta->txrate.he_ru_alloc = ath11k_he_ru_tones_to_nl80211_he_ru_alloc( 12866a0c3702SJohn Crispin (user_rate->ru_end - 12876a0c3702SJohn Crispin user_rate->ru_start) + 1); 12886a0c3702SJohn Crispin break; 1289d5c65159SKalle Valo } 1290d5c65159SKalle Valo 1291d5c65159SKalle Valo arsta->txrate.nss = nss; 129239e81c6aSTamizh chelvam arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw); 1293a9e945eaSVenkateswara Naralasetty arsta->tx_duration += tx_duration; 1294d5c65159SKalle Valo memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info)); 1295d5c65159SKalle Valo 1296b9269a07SVenkateswara Naralasetty /* PPDU stats reported for mgmt packet doesn't have valid tx bytes. 1297b9269a07SVenkateswara Naralasetty * So skip peer stats update for mgmt packets. 1298b9269a07SVenkateswara Naralasetty */ 1299b9269a07SVenkateswara Naralasetty if (tid < HTT_PPDU_STATS_NON_QOS_TID) { 1300d5c65159SKalle Valo memset(peer_stats, 0, sizeof(*peer_stats)); 1301d5c65159SKalle Valo peer_stats->succ_pkts = succ_pkts; 1302d5c65159SKalle Valo peer_stats->succ_bytes = succ_bytes; 1303d5c65159SKalle Valo peer_stats->is_ampdu = is_ampdu; 1304d5c65159SKalle Valo peer_stats->duration = tx_duration; 1305d5c65159SKalle Valo peer_stats->ba_fails = 1306d5c65159SKalle Valo HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) + 1307d5c65159SKalle Valo HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags); 1308d5c65159SKalle Valo 1309d5c65159SKalle Valo if (ath11k_debug_is_extd_tx_stats_enabled(ar)) 1310d5c65159SKalle Valo ath11k_accumulate_per_peer_tx_stats(arsta, 1311d5c65159SKalle Valo peer_stats, rate_idx); 1312b9269a07SVenkateswara Naralasetty } 1313d5c65159SKalle Valo 1314d5c65159SKalle Valo spin_unlock_bh(&ab->base_lock); 1315d5c65159SKalle Valo rcu_read_unlock(); 1316d5c65159SKalle Valo } 1317d5c65159SKalle Valo 1318d5c65159SKalle Valo static void ath11k_htt_update_ppdu_stats(struct ath11k *ar, 1319d5c65159SKalle Valo struct htt_ppdu_stats *ppdu_stats) 1320d5c65159SKalle Valo { 1321d5c65159SKalle Valo u8 user; 1322d5c65159SKalle Valo 1323d5c65159SKalle Valo for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++) 1324d5c65159SKalle Valo ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user); 1325d5c65159SKalle Valo } 1326d5c65159SKalle Valo 1327d5c65159SKalle Valo static 1328d5c65159SKalle Valo struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar, 1329d5c65159SKalle Valo u32 ppdu_id) 1330d5c65159SKalle Valo { 1331269663f1SDan Carpenter struct htt_ppdu_stats_info *ppdu_info; 1332d5c65159SKalle Valo 1333d5c65159SKalle Valo spin_lock_bh(&ar->data_lock); 1334d5c65159SKalle Valo if (!list_empty(&ar->ppdu_stats_info)) { 1335d5c65159SKalle Valo list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) { 1336269663f1SDan Carpenter if (ppdu_info->ppdu_id == ppdu_id) { 1337d5c65159SKalle Valo spin_unlock_bh(&ar->data_lock); 1338d5c65159SKalle Valo return ppdu_info; 1339d5c65159SKalle Valo } 1340d5c65159SKalle Valo } 1341d5c65159SKalle Valo 1342d5c65159SKalle Valo if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) { 1343d5c65159SKalle Valo ppdu_info = list_first_entry(&ar->ppdu_stats_info, 1344d5c65159SKalle Valo typeof(*ppdu_info), list); 1345d5c65159SKalle Valo list_del(&ppdu_info->list); 1346d5c65159SKalle Valo ar->ppdu_stat_list_depth--; 1347d5c65159SKalle Valo ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats); 1348d5c65159SKalle Valo kfree(ppdu_info); 1349d5c65159SKalle Valo } 1350d5c65159SKalle Valo } 1351d5c65159SKalle Valo spin_unlock_bh(&ar->data_lock); 1352d5c65159SKalle Valo 1353d5c65159SKalle Valo ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_KERNEL); 1354d5c65159SKalle Valo if (!ppdu_info) 1355d5c65159SKalle Valo return NULL; 1356d5c65159SKalle Valo 1357d5c65159SKalle Valo spin_lock_bh(&ar->data_lock); 1358d5c65159SKalle Valo list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info); 1359d5c65159SKalle Valo ar->ppdu_stat_list_depth++; 1360d5c65159SKalle Valo spin_unlock_bh(&ar->data_lock); 1361d5c65159SKalle Valo 1362d5c65159SKalle Valo return ppdu_info; 1363d5c65159SKalle Valo } 1364d5c65159SKalle Valo 1365d5c65159SKalle Valo static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab, 1366d5c65159SKalle Valo struct sk_buff *skb) 1367d5c65159SKalle Valo { 1368d5c65159SKalle Valo struct ath11k_htt_ppdu_stats_msg *msg; 1369d5c65159SKalle Valo struct htt_ppdu_stats_info *ppdu_info; 1370d5c65159SKalle Valo struct ath11k *ar; 1371d5c65159SKalle Valo int ret; 1372d5c65159SKalle Valo u8 pdev_id; 1373d5c65159SKalle Valo u32 ppdu_id, len; 1374d5c65159SKalle Valo 1375d5c65159SKalle Valo msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data; 1376d5c65159SKalle Valo len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info); 1377d5c65159SKalle Valo pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info); 1378d5c65159SKalle Valo ppdu_id = msg->ppdu_id; 1379d5c65159SKalle Valo 1380d5c65159SKalle Valo rcu_read_lock(); 1381d5c65159SKalle Valo ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); 1382d5c65159SKalle Valo if (!ar) { 1383d5c65159SKalle Valo ret = -EINVAL; 1384d5c65159SKalle Valo goto exit; 1385d5c65159SKalle Valo } 1386d5c65159SKalle Valo 1387d5c65159SKalle Valo if (ath11k_debug_is_pktlog_lite_mode_enabled(ar)) 1388d5c65159SKalle Valo trace_ath11k_htt_ppdu_stats(ar, skb->data, len); 1389d5c65159SKalle Valo 1390d5c65159SKalle Valo ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id); 1391d5c65159SKalle Valo if (!ppdu_info) { 1392d5c65159SKalle Valo ret = -EINVAL; 1393d5c65159SKalle Valo goto exit; 1394d5c65159SKalle Valo } 1395d5c65159SKalle Valo 1396d5c65159SKalle Valo ppdu_info->ppdu_id = ppdu_id; 1397d5c65159SKalle Valo ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len, 1398d5c65159SKalle Valo ath11k_htt_tlv_ppdu_stats_parse, 1399d5c65159SKalle Valo (void *)ppdu_info); 1400d5c65159SKalle Valo if (ret) { 1401d5c65159SKalle Valo ath11k_warn(ab, "Failed to parse tlv %d\n", ret); 1402d5c65159SKalle Valo goto exit; 1403d5c65159SKalle Valo } 1404d5c65159SKalle Valo 1405d5c65159SKalle Valo exit: 1406d5c65159SKalle Valo rcu_read_unlock(); 1407d5c65159SKalle Valo 1408d5c65159SKalle Valo return ret; 1409d5c65159SKalle Valo } 1410d5c65159SKalle Valo 1411d5c65159SKalle Valo static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb) 1412d5c65159SKalle Valo { 1413d5c65159SKalle Valo struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data; 1414443d2ee7SAnilkumar Kolli struct ath_pktlog_hdr *hdr = (struct ath_pktlog_hdr *)data; 1415d5c65159SKalle Valo struct ath11k *ar; 1416d5c65159SKalle Valo u8 pdev_id; 1417d5c65159SKalle Valo 1418d5c65159SKalle Valo pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr); 1419d0f390eaSAnilkumar Kolli ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); 1420d0f390eaSAnilkumar Kolli if (!ar) { 1421d0f390eaSAnilkumar Kolli ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id); 1422d0f390eaSAnilkumar Kolli return; 1423d0f390eaSAnilkumar Kolli } 1424d5c65159SKalle Valo 1425443d2ee7SAnilkumar Kolli trace_ath11k_htt_pktlog(ar, data->payload, hdr->size); 1426d5c65159SKalle Valo } 1427d5c65159SKalle Valo 1428d5c65159SKalle Valo void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab, 1429d5c65159SKalle Valo struct sk_buff *skb) 1430d5c65159SKalle Valo { 1431d5c65159SKalle Valo struct ath11k_dp *dp = &ab->dp; 1432d5c65159SKalle Valo struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data; 1433d5c65159SKalle Valo enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp); 1434d5c65159SKalle Valo u16 peer_id; 1435d5c65159SKalle Valo u8 vdev_id; 1436d5c65159SKalle Valo u8 mac_addr[ETH_ALEN]; 1437d5c65159SKalle Valo u16 peer_mac_h16; 1438d5c65159SKalle Valo u16 ast_hash; 1439d5c65159SKalle Valo 1440d5c65159SKalle Valo ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type); 1441d5c65159SKalle Valo 1442d5c65159SKalle Valo switch (type) { 1443d5c65159SKalle Valo case HTT_T2H_MSG_TYPE_VERSION_CONF: 1444d5c65159SKalle Valo dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR, 1445d5c65159SKalle Valo resp->version_msg.version); 1446d5c65159SKalle Valo dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR, 1447d5c65159SKalle Valo resp->version_msg.version); 1448d5c65159SKalle Valo complete(&dp->htt_tgt_version_received); 1449d5c65159SKalle Valo break; 1450d5c65159SKalle Valo case HTT_T2H_MSG_TYPE_PEER_MAP: 1451d5c65159SKalle Valo vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID, 1452d5c65159SKalle Valo resp->peer_map_ev.info); 1453d5c65159SKalle Valo peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID, 1454d5c65159SKalle Valo resp->peer_map_ev.info); 1455d5c65159SKalle Valo peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16, 1456d5c65159SKalle Valo resp->peer_map_ev.info1); 1457d5c65159SKalle Valo ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32, 1458d5c65159SKalle Valo peer_mac_h16, mac_addr); 1459d5c65159SKalle Valo ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL, 14600f37fbf4SAnilkumar Kolli resp->peer_map_ev.info2); 1461d5c65159SKalle Valo ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash); 1462d5c65159SKalle Valo break; 1463d5c65159SKalle Valo case HTT_T2H_MSG_TYPE_PEER_UNMAP: 1464d5c65159SKalle Valo peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID, 1465d5c65159SKalle Valo resp->peer_unmap_ev.info); 1466d5c65159SKalle Valo ath11k_peer_unmap_event(ab, peer_id); 1467d5c65159SKalle Valo break; 1468d5c65159SKalle Valo case HTT_T2H_MSG_TYPE_PPDU_STATS_IND: 1469d5c65159SKalle Valo ath11k_htt_pull_ppdu_stats(ab, skb); 1470d5c65159SKalle Valo break; 1471d5c65159SKalle Valo case HTT_T2H_MSG_TYPE_EXT_STATS_CONF: 1472d5c65159SKalle Valo ath11k_dbg_htt_ext_stats_handler(ab, skb); 1473d5c65159SKalle Valo break; 1474d5c65159SKalle Valo case HTT_T2H_MSG_TYPE_PKTLOG: 1475d5c65159SKalle Valo ath11k_htt_pktlog(ab, skb); 1476d5c65159SKalle Valo break; 1477d5c65159SKalle Valo default: 1478d5c65159SKalle Valo ath11k_warn(ab, "htt event %d not handled\n", type); 1479d5c65159SKalle Valo break; 1480d5c65159SKalle Valo } 1481d5c65159SKalle Valo 1482d5c65159SKalle Valo dev_kfree_skb_any(skb); 1483d5c65159SKalle Valo } 1484d5c65159SKalle Valo 1485d5c65159SKalle Valo static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar, 1486d5c65159SKalle Valo struct sk_buff_head *msdu_list, 1487d5c65159SKalle Valo struct sk_buff *first, struct sk_buff *last, 1488d5c65159SKalle Valo u8 l3pad_bytes, int msdu_len) 1489d5c65159SKalle Valo { 1490d5c65159SKalle Valo struct sk_buff *skb; 1491d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first); 1492d2f510faSSriram R int buf_first_hdr_len, buf_first_len; 1493d5c65159SKalle Valo struct hal_rx_desc *ldesc; 1494d5c65159SKalle Valo int space_extra; 1495d5c65159SKalle Valo int rem_len; 1496d5c65159SKalle Valo int buf_len; 1497d5c65159SKalle Valo 1498d2f510faSSriram R /* As the msdu is spread across multiple rx buffers, 1499d2f510faSSriram R * find the offset to the start of msdu for computing 1500d2f510faSSriram R * the length of the msdu in the first buffer. 1501d2f510faSSriram R */ 1502d2f510faSSriram R buf_first_hdr_len = HAL_RX_DESC_SIZE + l3pad_bytes; 1503d2f510faSSriram R buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len; 1504d2f510faSSriram R 1505d2f510faSSriram R if (WARN_ON_ONCE(msdu_len <= buf_first_len)) { 1506d2f510faSSriram R skb_put(first, buf_first_hdr_len + msdu_len); 1507d2f510faSSriram R skb_pull(first, buf_first_hdr_len); 1508d5c65159SKalle Valo return 0; 1509d5c65159SKalle Valo } 1510d5c65159SKalle Valo 1511d5c65159SKalle Valo ldesc = (struct hal_rx_desc *)last->data; 1512d5c65159SKalle Valo rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ldesc); 1513d5c65159SKalle Valo rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ldesc); 1514d5c65159SKalle Valo 1515d5c65159SKalle Valo /* MSDU spans over multiple buffers because the length of the MSDU 1516d5c65159SKalle Valo * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data 1517d5c65159SKalle Valo * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. 1518d5c65159SKalle Valo */ 1519d5c65159SKalle Valo skb_put(first, DP_RX_BUFFER_SIZE); 1520d2f510faSSriram R skb_pull(first, buf_first_hdr_len); 1521d5c65159SKalle Valo 152230679ec4SKarthikeyan Periyasamy /* When an MSDU spread over multiple buffers attention, MSDU_END and 152330679ec4SKarthikeyan Periyasamy * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs. 152430679ec4SKarthikeyan Periyasamy */ 152530679ec4SKarthikeyan Periyasamy ath11k_dp_rx_desc_end_tlv_copy(rxcb->rx_desc, ldesc); 152630679ec4SKarthikeyan Periyasamy 1527d2f510faSSriram R space_extra = msdu_len - (buf_first_len + skb_tailroom(first)); 1528d5c65159SKalle Valo if (space_extra > 0 && 1529d5c65159SKalle Valo (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) { 1530d5c65159SKalle Valo /* Free up all buffers of the MSDU */ 1531d5c65159SKalle Valo while ((skb = __skb_dequeue(msdu_list)) != NULL) { 1532d5c65159SKalle Valo rxcb = ATH11K_SKB_RXCB(skb); 1533d5c65159SKalle Valo if (!rxcb->is_continuation) { 1534d5c65159SKalle Valo dev_kfree_skb_any(skb); 1535d5c65159SKalle Valo break; 1536d5c65159SKalle Valo } 1537d5c65159SKalle Valo dev_kfree_skb_any(skb); 1538d5c65159SKalle Valo } 1539d5c65159SKalle Valo return -ENOMEM; 1540d5c65159SKalle Valo } 1541d5c65159SKalle Valo 1542d2f510faSSriram R rem_len = msdu_len - buf_first_len; 1543d5c65159SKalle Valo while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) { 1544d5c65159SKalle Valo rxcb = ATH11K_SKB_RXCB(skb); 1545d5c65159SKalle Valo if (rxcb->is_continuation) 1546d5c65159SKalle Valo buf_len = DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE; 1547d5c65159SKalle Valo else 1548d5c65159SKalle Valo buf_len = rem_len; 1549d5c65159SKalle Valo 1550d5c65159SKalle Valo if (buf_len > (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE)) { 1551d5c65159SKalle Valo WARN_ON_ONCE(1); 1552d5c65159SKalle Valo dev_kfree_skb_any(skb); 1553d5c65159SKalle Valo return -EINVAL; 1554d5c65159SKalle Valo } 1555d5c65159SKalle Valo 1556d5c65159SKalle Valo skb_put(skb, buf_len + HAL_RX_DESC_SIZE); 1557d5c65159SKalle Valo skb_pull(skb, HAL_RX_DESC_SIZE); 1558d5c65159SKalle Valo skb_copy_from_linear_data(skb, skb_put(first, buf_len), 1559d5c65159SKalle Valo buf_len); 1560d5c65159SKalle Valo dev_kfree_skb_any(skb); 1561d5c65159SKalle Valo 1562d5c65159SKalle Valo rem_len -= buf_len; 1563d5c65159SKalle Valo if (!rxcb->is_continuation) 1564d5c65159SKalle Valo break; 1565d5c65159SKalle Valo } 1566d5c65159SKalle Valo 1567d5c65159SKalle Valo return 0; 1568d5c65159SKalle Valo } 1569d5c65159SKalle Valo 1570d5c65159SKalle Valo static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list, 1571d5c65159SKalle Valo struct sk_buff *first) 1572d5c65159SKalle Valo { 1573d5c65159SKalle Valo struct sk_buff *skb; 1574d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first); 1575d5c65159SKalle Valo 1576d5c65159SKalle Valo if (!rxcb->is_continuation) 1577d5c65159SKalle Valo return first; 1578d5c65159SKalle Valo 1579d5c65159SKalle Valo skb_queue_walk(msdu_list, skb) { 1580d5c65159SKalle Valo rxcb = ATH11K_SKB_RXCB(skb); 1581d5c65159SKalle Valo if (!rxcb->is_continuation) 1582d5c65159SKalle Valo return skb; 1583d5c65159SKalle Valo } 1584d5c65159SKalle Valo 1585d5c65159SKalle Valo return NULL; 1586d5c65159SKalle Valo } 1587d5c65159SKalle Valo 1588d5c65159SKalle Valo static int ath11k_dp_rx_retrieve_amsdu(struct ath11k *ar, 1589d5c65159SKalle Valo struct sk_buff_head *msdu_list, 1590d5c65159SKalle Valo struct sk_buff_head *amsdu_list) 1591d5c65159SKalle Valo { 1592d5c65159SKalle Valo struct sk_buff *msdu = skb_peek(msdu_list); 1593d5c65159SKalle Valo struct sk_buff *last_buf; 1594d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb; 1595d5c65159SKalle Valo struct ieee80211_hdr *hdr; 1596d5c65159SKalle Valo struct hal_rx_desc *rx_desc, *lrx_desc; 1597d5c65159SKalle Valo u16 msdu_len; 1598d5c65159SKalle Valo u8 l3_pad_bytes; 1599d5c65159SKalle Valo u8 *hdr_status; 1600d5c65159SKalle Valo int ret; 1601d5c65159SKalle Valo 1602d5c65159SKalle Valo if (!msdu) 1603d5c65159SKalle Valo return -ENOENT; 1604d5c65159SKalle Valo 1605d5c65159SKalle Valo rx_desc = (struct hal_rx_desc *)msdu->data; 1606d5c65159SKalle Valo hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc); 1607d5c65159SKalle Valo hdr = (struct ieee80211_hdr *)hdr_status; 1608d5c65159SKalle Valo /* Process only data frames */ 1609d5c65159SKalle Valo if (!ieee80211_is_data(hdr->frame_control)) { 1610d5c65159SKalle Valo __skb_unlink(msdu, msdu_list); 1611d5c65159SKalle Valo dev_kfree_skb_any(msdu); 1612d5c65159SKalle Valo return -EINVAL; 1613d5c65159SKalle Valo } 1614d5c65159SKalle Valo 1615d5c65159SKalle Valo do { 1616d5c65159SKalle Valo __skb_unlink(msdu, msdu_list); 1617d5c65159SKalle Valo last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu); 1618d5c65159SKalle Valo if (!last_buf) { 1619d5c65159SKalle Valo ath11k_warn(ar->ab, 1620d5c65159SKalle Valo "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n"); 1621d5c65159SKalle Valo ret = -EIO; 1622d5c65159SKalle Valo goto free_out; 1623d5c65159SKalle Valo } 1624d5c65159SKalle Valo 1625d5c65159SKalle Valo rx_desc = (struct hal_rx_desc *)msdu->data; 1626d5c65159SKalle Valo lrx_desc = (struct hal_rx_desc *)last_buf->data; 1627d5c65159SKalle Valo 1628d5c65159SKalle Valo if (!ath11k_dp_rx_h_attn_msdu_done(lrx_desc)) { 1629d5c65159SKalle Valo ath11k_warn(ar->ab, "msdu_done bit in attention is not set\n"); 1630d5c65159SKalle Valo ret = -EIO; 1631d5c65159SKalle Valo goto free_out; 1632d5c65159SKalle Valo } 1633d5c65159SKalle Valo 1634d5c65159SKalle Valo rxcb = ATH11K_SKB_RXCB(msdu); 1635d5c65159SKalle Valo rxcb->rx_desc = rx_desc; 1636d5c65159SKalle Valo msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc); 1637d5c65159SKalle Valo l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(lrx_desc); 1638d5c65159SKalle Valo 1639243874c6SManikanta Pubbisetty if (rxcb->is_frag) { 1640243874c6SManikanta Pubbisetty skb_pull(msdu, HAL_RX_DESC_SIZE); 1641243874c6SManikanta Pubbisetty } else if (!rxcb->is_continuation) { 1642d5c65159SKalle Valo skb_put(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes + msdu_len); 1643d5c65159SKalle Valo skb_pull(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes); 1644d5c65159SKalle Valo } else { 1645d5c65159SKalle Valo ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list, 1646d5c65159SKalle Valo msdu, last_buf, 1647d5c65159SKalle Valo l3_pad_bytes, msdu_len); 1648d5c65159SKalle Valo if (ret) { 1649d5c65159SKalle Valo ath11k_warn(ar->ab, 1650d5c65159SKalle Valo "failed to coalesce msdu rx buffer%d\n", ret); 1651d5c65159SKalle Valo goto free_out; 1652d5c65159SKalle Valo } 1653d5c65159SKalle Valo } 1654d5c65159SKalle Valo __skb_queue_tail(amsdu_list, msdu); 1655d5c65159SKalle Valo 1656d5c65159SKalle Valo /* Should we also consider msdu_cnt from mpdu_meta while 1657d5c65159SKalle Valo * preparing amsdu list? 1658d5c65159SKalle Valo */ 1659d5c65159SKalle Valo if (rxcb->is_last_msdu) 1660d5c65159SKalle Valo break; 1661d5c65159SKalle Valo } while ((msdu = skb_peek(msdu_list)) != NULL); 1662d5c65159SKalle Valo 1663d5c65159SKalle Valo return 0; 1664d5c65159SKalle Valo 1665d5c65159SKalle Valo free_out: 1666d5c65159SKalle Valo dev_kfree_skb_any(msdu); 1667d5c65159SKalle Valo __skb_queue_purge(amsdu_list); 1668d5c65159SKalle Valo 1669d5c65159SKalle Valo return ret; 1670d5c65159SKalle Valo } 1671d5c65159SKalle Valo 1672d5c65159SKalle Valo static void ath11k_dp_rx_h_csum_offload(struct sk_buff *msdu) 1673d5c65159SKalle Valo { 1674d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 1675d5c65159SKalle Valo bool ip_csum_fail, l4_csum_fail; 1676d5c65159SKalle Valo 1677d5c65159SKalle Valo ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rxcb->rx_desc); 1678d5c65159SKalle Valo l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rxcb->rx_desc); 1679d5c65159SKalle Valo 1680d5c65159SKalle Valo msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ? 1681d5c65159SKalle Valo CHECKSUM_NONE : CHECKSUM_UNNECESSARY; 1682d5c65159SKalle Valo } 1683d5c65159SKalle Valo 1684d5c65159SKalle Valo static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, 1685d5c65159SKalle Valo enum hal_encrypt_type enctype) 1686d5c65159SKalle Valo { 1687d5c65159SKalle Valo switch (enctype) { 1688d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_OPEN: 1689d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1690d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_TKIP_MIC: 1691d5c65159SKalle Valo return 0; 1692d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_CCMP_128: 1693d5c65159SKalle Valo return IEEE80211_CCMP_MIC_LEN; 1694d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_CCMP_256: 1695d5c65159SKalle Valo return IEEE80211_CCMP_256_MIC_LEN; 1696d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_GCMP_128: 1697d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1698d5c65159SKalle Valo return IEEE80211_GCMP_MIC_LEN; 1699d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_WEP_40: 1700d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_WEP_104: 1701d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_WEP_128: 1702d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1703d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_WAPI: 1704d5c65159SKalle Valo break; 1705d5c65159SKalle Valo } 1706d5c65159SKalle Valo 1707d5c65159SKalle Valo ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype); 1708d5c65159SKalle Valo return 0; 1709d5c65159SKalle Valo } 1710d5c65159SKalle Valo 1711d5c65159SKalle Valo static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar, 1712d5c65159SKalle Valo enum hal_encrypt_type enctype) 1713d5c65159SKalle Valo { 1714d5c65159SKalle Valo switch (enctype) { 1715d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_OPEN: 1716d5c65159SKalle Valo return 0; 1717d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1718d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_TKIP_MIC: 1719d5c65159SKalle Valo return IEEE80211_TKIP_IV_LEN; 1720d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_CCMP_128: 1721d5c65159SKalle Valo return IEEE80211_CCMP_HDR_LEN; 1722d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_CCMP_256: 1723d5c65159SKalle Valo return IEEE80211_CCMP_256_HDR_LEN; 1724d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_GCMP_128: 1725d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1726d5c65159SKalle Valo return IEEE80211_GCMP_HDR_LEN; 1727d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_WEP_40: 1728d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_WEP_104: 1729d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_WEP_128: 1730d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1731d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_WAPI: 1732d5c65159SKalle Valo break; 1733d5c65159SKalle Valo } 1734d5c65159SKalle Valo 1735d5c65159SKalle Valo ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 1736d5c65159SKalle Valo return 0; 1737d5c65159SKalle Valo } 1738d5c65159SKalle Valo 1739d5c65159SKalle Valo static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar, 1740d5c65159SKalle Valo enum hal_encrypt_type enctype) 1741d5c65159SKalle Valo { 1742d5c65159SKalle Valo switch (enctype) { 1743d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_OPEN: 1744d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_CCMP_128: 1745d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_CCMP_256: 1746d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_GCMP_128: 1747d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1748d5c65159SKalle Valo return 0; 1749d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1750d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_TKIP_MIC: 1751d5c65159SKalle Valo return IEEE80211_TKIP_ICV_LEN; 1752d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_WEP_40: 1753d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_WEP_104: 1754d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_WEP_128: 1755d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1756d5c65159SKalle Valo case HAL_ENCRYPT_TYPE_WAPI: 1757d5c65159SKalle Valo break; 1758d5c65159SKalle Valo } 1759d5c65159SKalle Valo 1760d5c65159SKalle Valo ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 1761d5c65159SKalle Valo return 0; 1762d5c65159SKalle Valo } 1763d5c65159SKalle Valo 1764d5c65159SKalle Valo static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar, 1765d5c65159SKalle Valo struct sk_buff *msdu, 1766d5c65159SKalle Valo u8 *first_hdr, 1767d5c65159SKalle Valo enum hal_encrypt_type enctype, 1768d5c65159SKalle Valo struct ieee80211_rx_status *status) 1769d5c65159SKalle Valo { 1770d5c65159SKalle Valo struct ieee80211_hdr *hdr; 1771d5c65159SKalle Valo size_t hdr_len; 1772d5c65159SKalle Valo u8 da[ETH_ALEN]; 1773d5c65159SKalle Valo u8 sa[ETH_ALEN]; 1774d5c65159SKalle Valo 1775d5c65159SKalle Valo /* pull decapped header and copy SA & DA */ 1776d5c65159SKalle Valo hdr = (struct ieee80211_hdr *)msdu->data; 1777d5c65159SKalle Valo ether_addr_copy(da, ieee80211_get_DA(hdr)); 1778d5c65159SKalle Valo ether_addr_copy(sa, ieee80211_get_SA(hdr)); 1779d5c65159SKalle Valo skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control)); 1780d5c65159SKalle Valo 1781d5c65159SKalle Valo /* push original 802.11 header */ 1782d5c65159SKalle Valo hdr = (struct ieee80211_hdr *)first_hdr; 1783d5c65159SKalle Valo hdr_len = ieee80211_hdrlen(hdr->frame_control); 1784d5c65159SKalle Valo 1785d5c65159SKalle Valo if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 1786d5c65159SKalle Valo memcpy(skb_push(msdu, 1787d5c65159SKalle Valo ath11k_dp_rx_crypto_param_len(ar, enctype)), 1788d5c65159SKalle Valo (void *)hdr + hdr_len, 1789d5c65159SKalle Valo ath11k_dp_rx_crypto_param_len(ar, enctype)); 1790d5c65159SKalle Valo } 1791d5c65159SKalle Valo 1792d5c65159SKalle Valo memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1793d5c65159SKalle Valo 1794d5c65159SKalle Valo /* original 802.11 header has a different DA and in 1795d5c65159SKalle Valo * case of 4addr it may also have different SA 1796d5c65159SKalle Valo */ 1797d5c65159SKalle Valo hdr = (struct ieee80211_hdr *)msdu->data; 1798d5c65159SKalle Valo ether_addr_copy(ieee80211_get_DA(hdr), da); 1799d5c65159SKalle Valo ether_addr_copy(ieee80211_get_SA(hdr), sa); 1800d5c65159SKalle Valo } 1801d5c65159SKalle Valo 1802d5c65159SKalle Valo static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu, 1803d5c65159SKalle Valo enum hal_encrypt_type enctype, 1804d5c65159SKalle Valo struct ieee80211_rx_status *status, 1805d5c65159SKalle Valo bool decrypted) 1806d5c65159SKalle Valo { 1807d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 1808d5c65159SKalle Valo struct ieee80211_hdr *hdr; 1809d5c65159SKalle Valo size_t hdr_len; 1810d5c65159SKalle Valo size_t crypto_len; 1811d5c65159SKalle Valo 1812d5c65159SKalle Valo if (!rxcb->is_first_msdu || 1813d5c65159SKalle Valo !(rxcb->is_first_msdu && rxcb->is_last_msdu)) { 1814d5c65159SKalle Valo WARN_ON_ONCE(1); 1815d5c65159SKalle Valo return; 1816d5c65159SKalle Valo } 1817d5c65159SKalle Valo 1818d5c65159SKalle Valo skb_trim(msdu, msdu->len - FCS_LEN); 1819d5c65159SKalle Valo 1820d5c65159SKalle Valo if (!decrypted) 1821d5c65159SKalle Valo return; 1822d5c65159SKalle Valo 1823d5c65159SKalle Valo hdr = (void *)msdu->data; 1824d5c65159SKalle Valo 1825d5c65159SKalle Valo /* Tail */ 1826d5c65159SKalle Valo if (status->flag & RX_FLAG_IV_STRIPPED) { 1827d5c65159SKalle Valo skb_trim(msdu, msdu->len - 1828d5c65159SKalle Valo ath11k_dp_rx_crypto_mic_len(ar, enctype)); 1829d5c65159SKalle Valo 1830d5c65159SKalle Valo skb_trim(msdu, msdu->len - 1831d5c65159SKalle Valo ath11k_dp_rx_crypto_icv_len(ar, enctype)); 1832d5c65159SKalle Valo } else { 1833d5c65159SKalle Valo /* MIC */ 1834d5c65159SKalle Valo if (status->flag & RX_FLAG_MIC_STRIPPED) 1835d5c65159SKalle Valo skb_trim(msdu, msdu->len - 1836d5c65159SKalle Valo ath11k_dp_rx_crypto_mic_len(ar, enctype)); 1837d5c65159SKalle Valo 1838d5c65159SKalle Valo /* ICV */ 1839d5c65159SKalle Valo if (status->flag & RX_FLAG_ICV_STRIPPED) 1840d5c65159SKalle Valo skb_trim(msdu, msdu->len - 1841d5c65159SKalle Valo ath11k_dp_rx_crypto_icv_len(ar, enctype)); 1842d5c65159SKalle Valo } 1843d5c65159SKalle Valo 1844d5c65159SKalle Valo /* MMIC */ 1845d5c65159SKalle Valo if ((status->flag & RX_FLAG_MMIC_STRIPPED) && 1846d5c65159SKalle Valo !ieee80211_has_morefrags(hdr->frame_control) && 1847d5c65159SKalle Valo enctype == HAL_ENCRYPT_TYPE_TKIP_MIC) 1848d5c65159SKalle Valo skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN); 1849d5c65159SKalle Valo 1850d5c65159SKalle Valo /* Head */ 1851d5c65159SKalle Valo if (status->flag & RX_FLAG_IV_STRIPPED) { 1852d5c65159SKalle Valo hdr_len = ieee80211_hdrlen(hdr->frame_control); 1853d5c65159SKalle Valo crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 1854d5c65159SKalle Valo 1855d5c65159SKalle Valo memmove((void *)msdu->data + crypto_len, 1856d5c65159SKalle Valo (void *)msdu->data, hdr_len); 1857d5c65159SKalle Valo skb_pull(msdu, crypto_len); 1858d5c65159SKalle Valo } 1859d5c65159SKalle Valo } 1860d5c65159SKalle Valo 1861d5c65159SKalle Valo static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar, 1862d5c65159SKalle Valo struct sk_buff *msdu, 1863d5c65159SKalle Valo enum hal_encrypt_type enctype) 1864d5c65159SKalle Valo { 1865d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 1866d5c65159SKalle Valo struct ieee80211_hdr *hdr; 1867d5c65159SKalle Valo size_t hdr_len, crypto_len; 1868d5c65159SKalle Valo void *rfc1042; 1869d5c65159SKalle Valo bool is_amsdu; 1870d5c65159SKalle Valo 1871d5c65159SKalle Valo is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu); 1872d5c65159SKalle Valo hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(rxcb->rx_desc); 1873d5c65159SKalle Valo rfc1042 = hdr; 1874d5c65159SKalle Valo 1875d5c65159SKalle Valo if (rxcb->is_first_msdu) { 1876d5c65159SKalle Valo hdr_len = ieee80211_hdrlen(hdr->frame_control); 1877d5c65159SKalle Valo crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 1878d5c65159SKalle Valo 1879d5c65159SKalle Valo rfc1042 += hdr_len + crypto_len; 1880d5c65159SKalle Valo } 1881d5c65159SKalle Valo 1882d5c65159SKalle Valo if (is_amsdu) 1883d5c65159SKalle Valo rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr); 1884d5c65159SKalle Valo 1885d5c65159SKalle Valo return rfc1042; 1886d5c65159SKalle Valo } 1887d5c65159SKalle Valo 1888d5c65159SKalle Valo static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar, 1889d5c65159SKalle Valo struct sk_buff *msdu, 1890d5c65159SKalle Valo u8 *first_hdr, 1891d5c65159SKalle Valo enum hal_encrypt_type enctype, 1892d5c65159SKalle Valo struct ieee80211_rx_status *status) 1893d5c65159SKalle Valo { 1894d5c65159SKalle Valo struct ieee80211_hdr *hdr; 1895d5c65159SKalle Valo struct ethhdr *eth; 1896d5c65159SKalle Valo size_t hdr_len; 1897d5c65159SKalle Valo u8 da[ETH_ALEN]; 1898d5c65159SKalle Valo u8 sa[ETH_ALEN]; 1899d5c65159SKalle Valo void *rfc1042; 1900d5c65159SKalle Valo 1901d5c65159SKalle Valo rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype); 1902d5c65159SKalle Valo if (WARN_ON_ONCE(!rfc1042)) 1903d5c65159SKalle Valo return; 1904d5c65159SKalle Valo 1905d5c65159SKalle Valo /* pull decapped header and copy SA & DA */ 1906d5c65159SKalle Valo eth = (struct ethhdr *)msdu->data; 1907d5c65159SKalle Valo ether_addr_copy(da, eth->h_dest); 1908d5c65159SKalle Valo ether_addr_copy(sa, eth->h_source); 1909d5c65159SKalle Valo skb_pull(msdu, sizeof(struct ethhdr)); 1910d5c65159SKalle Valo 1911d5c65159SKalle Valo /* push rfc1042/llc/snap */ 1912d5c65159SKalle Valo memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042, 1913d5c65159SKalle Valo sizeof(struct ath11k_dp_rfc1042_hdr)); 1914d5c65159SKalle Valo 1915d5c65159SKalle Valo /* push original 802.11 header */ 1916d5c65159SKalle Valo hdr = (struct ieee80211_hdr *)first_hdr; 1917d5c65159SKalle Valo hdr_len = ieee80211_hdrlen(hdr->frame_control); 1918d5c65159SKalle Valo 1919d5c65159SKalle Valo if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 1920d5c65159SKalle Valo memcpy(skb_push(msdu, 1921d5c65159SKalle Valo ath11k_dp_rx_crypto_param_len(ar, enctype)), 1922d5c65159SKalle Valo (void *)hdr + hdr_len, 1923d5c65159SKalle Valo ath11k_dp_rx_crypto_param_len(ar, enctype)); 1924d5c65159SKalle Valo } 1925d5c65159SKalle Valo 1926d5c65159SKalle Valo memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1927d5c65159SKalle Valo 1928d5c65159SKalle Valo /* original 802.11 header has a different DA and in 1929d5c65159SKalle Valo * case of 4addr it may also have different SA 1930d5c65159SKalle Valo */ 1931d5c65159SKalle Valo hdr = (struct ieee80211_hdr *)msdu->data; 1932d5c65159SKalle Valo ether_addr_copy(ieee80211_get_DA(hdr), da); 1933d5c65159SKalle Valo ether_addr_copy(ieee80211_get_SA(hdr), sa); 1934d5c65159SKalle Valo } 1935d5c65159SKalle Valo 1936d5c65159SKalle Valo static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu, 1937d5c65159SKalle Valo struct hal_rx_desc *rx_desc, 1938d5c65159SKalle Valo enum hal_encrypt_type enctype, 1939d5c65159SKalle Valo struct ieee80211_rx_status *status, 1940d5c65159SKalle Valo bool decrypted) 1941d5c65159SKalle Valo { 1942d5c65159SKalle Valo u8 *first_hdr; 1943d5c65159SKalle Valo u8 decap; 1944d5c65159SKalle Valo 1945d5c65159SKalle Valo first_hdr = ath11k_dp_rx_h_80211_hdr(rx_desc); 1946243874c6SManikanta Pubbisetty decap = ath11k_dp_rx_h_msdu_start_decap_type(rx_desc); 1947d5c65159SKalle Valo 1948d5c65159SKalle Valo switch (decap) { 1949d5c65159SKalle Valo case DP_RX_DECAP_TYPE_NATIVE_WIFI: 1950d5c65159SKalle Valo ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr, 1951d5c65159SKalle Valo enctype, status); 1952d5c65159SKalle Valo break; 1953d5c65159SKalle Valo case DP_RX_DECAP_TYPE_RAW: 1954d5c65159SKalle Valo ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status, 1955d5c65159SKalle Valo decrypted); 1956d5c65159SKalle Valo break; 1957d5c65159SKalle Valo case DP_RX_DECAP_TYPE_ETHERNET2_DIX: 1958d5c65159SKalle Valo ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr, 1959d5c65159SKalle Valo enctype, status); 1960d5c65159SKalle Valo break; 1961d5c65159SKalle Valo case DP_RX_DECAP_TYPE_8023: 1962d5c65159SKalle Valo /* TODO: Handle undecap for these formats */ 1963d5c65159SKalle Valo break; 1964d5c65159SKalle Valo } 1965d5c65159SKalle Valo } 1966d5c65159SKalle Valo 1967d5c65159SKalle Valo static void ath11k_dp_rx_h_mpdu(struct ath11k *ar, 1968d5c65159SKalle Valo struct sk_buff_head *amsdu_list, 1969d5c65159SKalle Valo struct hal_rx_desc *rx_desc, 1970d5c65159SKalle Valo struct ieee80211_rx_status *rx_status) 1971d5c65159SKalle Valo { 1972d5c65159SKalle Valo struct ieee80211_hdr *hdr; 1973d5c65159SKalle Valo enum hal_encrypt_type enctype; 1974d5c65159SKalle Valo struct sk_buff *last_msdu; 1975d5c65159SKalle Valo struct sk_buff *msdu; 1976d5c65159SKalle Valo struct ath11k_skb_rxcb *last_rxcb; 1977d5c65159SKalle Valo bool is_decrypted; 1978d5c65159SKalle Valo u32 err_bitmap; 1979d5c65159SKalle Valo u8 *qos; 1980d5c65159SKalle Valo 1981d5c65159SKalle Valo if (skb_queue_empty(amsdu_list)) 1982d5c65159SKalle Valo return; 1983d5c65159SKalle Valo 1984d5c65159SKalle Valo hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(rx_desc); 1985d5c65159SKalle Valo 1986d5c65159SKalle Valo /* Each A-MSDU subframe will use the original header as the base and be 1987d5c65159SKalle Valo * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl. 1988d5c65159SKalle Valo */ 1989d5c65159SKalle Valo if (ieee80211_is_data_qos(hdr->frame_control)) { 1990d5c65159SKalle Valo qos = ieee80211_get_qos_ctl(hdr); 1991d5c65159SKalle Valo qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; 1992d5c65159SKalle Valo } 1993d5c65159SKalle Valo 1994d5c65159SKalle Valo is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc); 1995d5c65159SKalle Valo enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc); 1996d5c65159SKalle Valo 1997d5c65159SKalle Valo /* Some attention flags are valid only in the last MSDU. */ 1998d5c65159SKalle Valo last_msdu = skb_peek_tail(amsdu_list); 1999d5c65159SKalle Valo last_rxcb = ATH11K_SKB_RXCB(last_msdu); 2000d5c65159SKalle Valo 2001d5c65159SKalle Valo err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(last_rxcb->rx_desc); 2002d5c65159SKalle Valo 2003d5c65159SKalle Valo /* Clear per-MPDU flags while leaving per-PPDU flags intact. */ 2004d5c65159SKalle Valo rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | 2005d5c65159SKalle Valo RX_FLAG_MMIC_ERROR | 2006d5c65159SKalle Valo RX_FLAG_DECRYPTED | 2007d5c65159SKalle Valo RX_FLAG_IV_STRIPPED | 2008d5c65159SKalle Valo RX_FLAG_MMIC_STRIPPED); 2009d5c65159SKalle Valo 2010d5c65159SKalle Valo if (err_bitmap & DP_RX_MPDU_ERR_FCS) 2011d5c65159SKalle Valo rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; 2012d5c65159SKalle Valo 2013d5c65159SKalle Valo if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC) 2014d5c65159SKalle Valo rx_status->flag |= RX_FLAG_MMIC_ERROR; 2015d5c65159SKalle Valo 2016d5c65159SKalle Valo if (is_decrypted) 2017d5c65159SKalle Valo rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED | 2018d5c65159SKalle Valo RX_FLAG_MIC_STRIPPED | RX_FLAG_ICV_STRIPPED; 2019d5c65159SKalle Valo 2020d5c65159SKalle Valo skb_queue_walk(amsdu_list, msdu) { 2021d5c65159SKalle Valo ath11k_dp_rx_h_csum_offload(msdu); 2022d5c65159SKalle Valo ath11k_dp_rx_h_undecap(ar, msdu, rx_desc, 2023d5c65159SKalle Valo enctype, rx_status, is_decrypted); 2024d5c65159SKalle Valo } 2025d5c65159SKalle Valo } 2026d5c65159SKalle Valo 2027d5c65159SKalle Valo static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc, 2028d5c65159SKalle Valo struct ieee80211_rx_status *rx_status) 2029d5c65159SKalle Valo { 2030d5c65159SKalle Valo struct ieee80211_supported_band *sband; 2031d5c65159SKalle Valo enum rx_msdu_start_pkt_type pkt_type; 2032d5c65159SKalle Valo u8 bw; 2033d5c65159SKalle Valo u8 rate_mcs, nss; 2034d5c65159SKalle Valo u8 sgi; 2035d5c65159SKalle Valo bool is_cck; 2036d5c65159SKalle Valo 2037d5c65159SKalle Valo pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(rx_desc); 2038d5c65159SKalle Valo bw = ath11k_dp_rx_h_msdu_start_rx_bw(rx_desc); 2039d5c65159SKalle Valo rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(rx_desc); 2040d5c65159SKalle Valo nss = ath11k_dp_rx_h_msdu_start_nss(rx_desc); 2041d5c65159SKalle Valo sgi = ath11k_dp_rx_h_msdu_start_sgi(rx_desc); 2042d5c65159SKalle Valo 2043d5c65159SKalle Valo switch (pkt_type) { 2044d5c65159SKalle Valo case RX_MSDU_START_PKT_TYPE_11A: 2045d5c65159SKalle Valo case RX_MSDU_START_PKT_TYPE_11B: 2046d5c65159SKalle Valo is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B); 2047d5c65159SKalle Valo sband = &ar->mac.sbands[rx_status->band]; 2048d5c65159SKalle Valo rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs, 2049d5c65159SKalle Valo is_cck); 2050d5c65159SKalle Valo break; 2051d5c65159SKalle Valo case RX_MSDU_START_PKT_TYPE_11N: 2052d5c65159SKalle Valo rx_status->encoding = RX_ENC_HT; 2053d5c65159SKalle Valo if (rate_mcs > ATH11K_HT_MCS_MAX) { 2054d5c65159SKalle Valo ath11k_warn(ar->ab, 2055d5c65159SKalle Valo "Received with invalid mcs in HT mode %d\n", 2056d5c65159SKalle Valo rate_mcs); 2057d5c65159SKalle Valo break; 2058d5c65159SKalle Valo } 2059d5c65159SKalle Valo rx_status->rate_idx = rate_mcs + (8 * (nss - 1)); 2060d5c65159SKalle Valo if (sgi) 2061d5c65159SKalle Valo rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 206239e81c6aSTamizh chelvam rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 2063d5c65159SKalle Valo break; 2064d5c65159SKalle Valo case RX_MSDU_START_PKT_TYPE_11AC: 2065d5c65159SKalle Valo rx_status->encoding = RX_ENC_VHT; 2066d5c65159SKalle Valo rx_status->rate_idx = rate_mcs; 2067d5c65159SKalle Valo if (rate_mcs > ATH11K_VHT_MCS_MAX) { 2068d5c65159SKalle Valo ath11k_warn(ar->ab, 2069d5c65159SKalle Valo "Received with invalid mcs in VHT mode %d\n", 2070d5c65159SKalle Valo rate_mcs); 2071d5c65159SKalle Valo break; 2072d5c65159SKalle Valo } 2073d5c65159SKalle Valo rx_status->nss = nss; 2074d5c65159SKalle Valo if (sgi) 2075d5c65159SKalle Valo rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 207639e81c6aSTamizh chelvam rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 2077d5c65159SKalle Valo break; 2078d5c65159SKalle Valo case RX_MSDU_START_PKT_TYPE_11AX: 2079d5c65159SKalle Valo rx_status->rate_idx = rate_mcs; 2080d5c65159SKalle Valo if (rate_mcs > ATH11K_HE_MCS_MAX) { 2081d5c65159SKalle Valo ath11k_warn(ar->ab, 2082d5c65159SKalle Valo "Received with invalid mcs in HE mode %d\n", 2083d5c65159SKalle Valo rate_mcs); 2084d5c65159SKalle Valo break; 2085d5c65159SKalle Valo } 2086d5c65159SKalle Valo rx_status->encoding = RX_ENC_HE; 2087d5c65159SKalle Valo rx_status->nss = nss; 20886a0c3702SJohn Crispin rx_status->he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi); 208939e81c6aSTamizh chelvam rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 2090d5c65159SKalle Valo break; 2091d5c65159SKalle Valo } 2092d5c65159SKalle Valo } 2093d5c65159SKalle Valo 2094d5c65159SKalle Valo static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc, 2095d5c65159SKalle Valo struct ieee80211_rx_status *rx_status) 2096d5c65159SKalle Valo { 2097d5c65159SKalle Valo u8 channel_num; 2098d5c65159SKalle Valo 2099d5c65159SKalle Valo rx_status->freq = 0; 2100d5c65159SKalle Valo rx_status->rate_idx = 0; 2101d5c65159SKalle Valo rx_status->nss = 0; 2102d5c65159SKalle Valo rx_status->encoding = RX_ENC_LEGACY; 2103d5c65159SKalle Valo rx_status->bw = RATE_INFO_BW_20; 2104d5c65159SKalle Valo 2105d5c65159SKalle Valo rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 2106d5c65159SKalle Valo 2107d5c65159SKalle Valo channel_num = ath11k_dp_rx_h_msdu_start_freq(rx_desc); 2108d5c65159SKalle Valo 2109d5c65159SKalle Valo if (channel_num >= 1 && channel_num <= 14) { 2110d5c65159SKalle Valo rx_status->band = NL80211_BAND_2GHZ; 2111d5c65159SKalle Valo } else if (channel_num >= 36 && channel_num <= 173) { 2112d5c65159SKalle Valo rx_status->band = NL80211_BAND_5GHZ; 2113d5c65159SKalle Valo } else { 2114d5c65159SKalle Valo ath11k_warn(ar->ab, "Unsupported Channel info received %d\n", 2115d5c65159SKalle Valo channel_num); 2116d5c65159SKalle Valo return; 2117d5c65159SKalle Valo } 2118d5c65159SKalle Valo 2119d5c65159SKalle Valo rx_status->freq = ieee80211_channel_to_frequency(channel_num, 2120d5c65159SKalle Valo rx_status->band); 2121d5c65159SKalle Valo 2122d5c65159SKalle Valo ath11k_dp_rx_h_rate(ar, rx_desc, rx_status); 2123d5c65159SKalle Valo } 2124d5c65159SKalle Valo 2125d5c65159SKalle Valo static void ath11k_dp_rx_process_amsdu(struct ath11k *ar, 2126d5c65159SKalle Valo struct sk_buff_head *amsdu_list, 2127d5c65159SKalle Valo struct ieee80211_rx_status *rx_status) 2128d5c65159SKalle Valo { 2129d5c65159SKalle Valo struct sk_buff *first; 2130d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb; 2131d5c65159SKalle Valo struct hal_rx_desc *rx_desc; 2132d5c65159SKalle Valo bool first_mpdu; 2133d5c65159SKalle Valo 2134d5c65159SKalle Valo if (skb_queue_empty(amsdu_list)) 2135d5c65159SKalle Valo return; 2136d5c65159SKalle Valo 2137d5c65159SKalle Valo first = skb_peek(amsdu_list); 2138d5c65159SKalle Valo rxcb = ATH11K_SKB_RXCB(first); 2139d5c65159SKalle Valo rx_desc = rxcb->rx_desc; 2140d5c65159SKalle Valo 2141d5c65159SKalle Valo first_mpdu = ath11k_dp_rx_h_attn_first_mpdu(rx_desc); 2142d5c65159SKalle Valo if (first_mpdu) 2143d5c65159SKalle Valo ath11k_dp_rx_h_ppdu(ar, rx_desc, rx_status); 2144d5c65159SKalle Valo 2145d5c65159SKalle Valo ath11k_dp_rx_h_mpdu(ar, amsdu_list, rx_desc, rx_status); 2146d5c65159SKalle Valo } 2147d5c65159SKalle Valo 2148d5c65159SKalle Valo static char *ath11k_print_get_tid(struct ieee80211_hdr *hdr, char *out, 2149d5c65159SKalle Valo size_t size) 2150d5c65159SKalle Valo { 2151d5c65159SKalle Valo u8 *qc; 2152d5c65159SKalle Valo int tid; 2153d5c65159SKalle Valo 2154d5c65159SKalle Valo if (!ieee80211_is_data_qos(hdr->frame_control)) 2155d5c65159SKalle Valo return ""; 2156d5c65159SKalle Valo 2157d5c65159SKalle Valo qc = ieee80211_get_qos_ctl(hdr); 2158d5c65159SKalle Valo tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 2159d5c65159SKalle Valo snprintf(out, size, "tid %d", tid); 2160d5c65159SKalle Valo 2161d5c65159SKalle Valo return out; 2162d5c65159SKalle Valo } 2163d5c65159SKalle Valo 2164d5c65159SKalle Valo static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi, 2165d5c65159SKalle Valo struct sk_buff *msdu) 2166d5c65159SKalle Valo { 2167e4eb7b5cSJohn Crispin static const struct ieee80211_radiotap_he known = { 216893634c61SJohn Crispin .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | 216993634c61SJohn Crispin IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN), 2170e4eb7b5cSJohn Crispin .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN), 2171e4eb7b5cSJohn Crispin }; 2172d5c65159SKalle Valo struct ieee80211_rx_status *status; 2173d5c65159SKalle Valo struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; 2174e4eb7b5cSJohn Crispin struct ieee80211_radiotap_he *he = NULL; 2175d5c65159SKalle Valo char tid[32]; 2176d5c65159SKalle Valo 2177d5c65159SKalle Valo status = IEEE80211_SKB_RXCB(msdu); 2178e4eb7b5cSJohn Crispin if (status->encoding == RX_ENC_HE) { 2179e4eb7b5cSJohn Crispin he = skb_push(msdu, sizeof(known)); 2180e4eb7b5cSJohn Crispin memcpy(he, &known, sizeof(known)); 2181e4eb7b5cSJohn Crispin status->flag |= RX_FLAG_RADIOTAP_HE; 2182e4eb7b5cSJohn Crispin } 2183d5c65159SKalle Valo 2184d5c65159SKalle Valo ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 2185d5c65159SKalle Valo "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", 2186d5c65159SKalle Valo msdu, 2187d5c65159SKalle Valo msdu->len, 2188d5c65159SKalle Valo ieee80211_get_SA(hdr), 2189d5c65159SKalle Valo ath11k_print_get_tid(hdr, tid, sizeof(tid)), 2190d5c65159SKalle Valo is_multicast_ether_addr(ieee80211_get_DA(hdr)) ? 2191d5c65159SKalle Valo "mcast" : "ucast", 2192d5c65159SKalle Valo (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4, 2193d5c65159SKalle Valo (status->encoding == RX_ENC_LEGACY) ? "legacy" : "", 2194d5c65159SKalle Valo (status->encoding == RX_ENC_HT) ? "ht" : "", 2195d5c65159SKalle Valo (status->encoding == RX_ENC_VHT) ? "vht" : "", 2196d5c65159SKalle Valo (status->encoding == RX_ENC_HE) ? "he" : "", 2197d5c65159SKalle Valo (status->bw == RATE_INFO_BW_40) ? "40" : "", 2198d5c65159SKalle Valo (status->bw == RATE_INFO_BW_80) ? "80" : "", 2199d5c65159SKalle Valo (status->bw == RATE_INFO_BW_160) ? "160" : "", 2200d5c65159SKalle Valo status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", 2201d5c65159SKalle Valo status->rate_idx, 2202d5c65159SKalle Valo status->nss, 2203d5c65159SKalle Valo status->freq, 2204d5c65159SKalle Valo status->band, status->flag, 2205d5c65159SKalle Valo !!(status->flag & RX_FLAG_FAILED_FCS_CRC), 2206d5c65159SKalle Valo !!(status->flag & RX_FLAG_MMIC_ERROR), 2207d5c65159SKalle Valo !!(status->flag & RX_FLAG_AMSDU_MORE)); 2208d5c65159SKalle Valo 2209d5c65159SKalle Valo /* TODO: trace rx packet */ 2210d5c65159SKalle Valo 2211d5c65159SKalle Valo ieee80211_rx_napi(ar->hw, NULL, msdu, napi); 2212d5c65159SKalle Valo } 2213d5c65159SKalle Valo 2214d5c65159SKalle Valo static void ath11k_dp_rx_pre_deliver_amsdu(struct ath11k *ar, 2215d5c65159SKalle Valo struct sk_buff_head *amsdu_list, 2216d5c65159SKalle Valo struct ieee80211_rx_status *rxs) 2217d5c65159SKalle Valo { 2218d5c65159SKalle Valo struct sk_buff *msdu; 2219d5c65159SKalle Valo struct sk_buff *first_subframe; 2220d5c65159SKalle Valo struct ieee80211_rx_status *status; 2221d5c65159SKalle Valo 2222d5c65159SKalle Valo first_subframe = skb_peek(amsdu_list); 2223d5c65159SKalle Valo 2224d5c65159SKalle Valo skb_queue_walk(amsdu_list, msdu) { 2225d5c65159SKalle Valo /* Setup per-MSDU flags */ 2226d5c65159SKalle Valo if (skb_queue_empty(amsdu_list)) 2227d5c65159SKalle Valo rxs->flag &= ~RX_FLAG_AMSDU_MORE; 2228d5c65159SKalle Valo else 2229d5c65159SKalle Valo rxs->flag |= RX_FLAG_AMSDU_MORE; 2230d5c65159SKalle Valo 2231d5c65159SKalle Valo if (msdu == first_subframe) { 2232d5c65159SKalle Valo first_subframe = NULL; 2233d5c65159SKalle Valo rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN; 2234d5c65159SKalle Valo } else { 2235d5c65159SKalle Valo rxs->flag |= RX_FLAG_ALLOW_SAME_PN; 2236d5c65159SKalle Valo } 2237d5c65159SKalle Valo rxs->flag |= RX_FLAG_SKIP_MONITOR; 2238d5c65159SKalle Valo 2239d5c65159SKalle Valo status = IEEE80211_SKB_RXCB(msdu); 2240d5c65159SKalle Valo *status = *rxs; 2241d5c65159SKalle Valo } 2242d5c65159SKalle Valo } 2243d5c65159SKalle Valo 2244d5c65159SKalle Valo static void ath11k_dp_rx_process_pending_packets(struct ath11k_base *ab, 2245d5c65159SKalle Valo struct napi_struct *napi, 2246d5c65159SKalle Valo struct sk_buff_head *pending_q, 2247d5c65159SKalle Valo int *quota, u8 mac_id) 2248d5c65159SKalle Valo { 2249d5c65159SKalle Valo struct ath11k *ar; 2250d5c65159SKalle Valo struct sk_buff *msdu; 2251d5c65159SKalle Valo struct ath11k_pdev *pdev; 2252d5c65159SKalle Valo 2253d5c65159SKalle Valo if (skb_queue_empty(pending_q)) 2254d5c65159SKalle Valo return; 2255d5c65159SKalle Valo 2256d5c65159SKalle Valo ar = ab->pdevs[mac_id].ar; 2257d5c65159SKalle Valo 2258d5c65159SKalle Valo rcu_read_lock(); 2259d5c65159SKalle Valo pdev = rcu_dereference(ab->pdevs_active[mac_id]); 2260d5c65159SKalle Valo 2261d5c65159SKalle Valo while (*quota && (msdu = __skb_dequeue(pending_q))) { 2262d5c65159SKalle Valo if (!pdev) { 2263d5c65159SKalle Valo dev_kfree_skb_any(msdu); 2264d5c65159SKalle Valo continue; 2265d5c65159SKalle Valo } 2266d5c65159SKalle Valo 2267d5c65159SKalle Valo ath11k_dp_rx_deliver_msdu(ar, napi, msdu); 2268d5c65159SKalle Valo (*quota)--; 2269d5c65159SKalle Valo } 2270d5c65159SKalle Valo rcu_read_unlock(); 2271d5c65159SKalle Valo } 2272d5c65159SKalle Valo 2273d5c65159SKalle Valo int ath11k_dp_process_rx(struct ath11k_base *ab, int mac_id, 2274d5c65159SKalle Valo struct napi_struct *napi, struct sk_buff_head *pending_q, 2275d5c65159SKalle Valo int budget) 2276d5c65159SKalle Valo { 2277d5c65159SKalle Valo struct ath11k *ar = ab->pdevs[mac_id].ar; 2278d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 2279d5c65159SKalle Valo struct ieee80211_rx_status *rx_status = &dp->rx_status; 2280d5c65159SKalle Valo struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 2281d5c65159SKalle Valo struct hal_srng *srng; 2282d5c65159SKalle Valo struct sk_buff *msdu; 2283d5c65159SKalle Valo struct sk_buff_head msdu_list; 2284d5c65159SKalle Valo struct sk_buff_head amsdu_list; 2285d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb; 2286d5c65159SKalle Valo u32 *rx_desc; 2287d5c65159SKalle Valo int buf_id; 2288d5c65159SKalle Valo int num_buffs_reaped = 0; 2289d5c65159SKalle Valo int quota = budget; 2290d5c65159SKalle Valo int ret; 2291d5c65159SKalle Valo bool done = false; 2292d5c65159SKalle Valo 2293d5c65159SKalle Valo /* Process any pending packets from the previous napi poll. 2294d5c65159SKalle Valo * Note: All msdu's in this pending_q corresponds to the same mac id 2295d5c65159SKalle Valo * due to pdev based reo dest mapping and also since each irq group id 2296d5c65159SKalle Valo * maps to specific reo dest ring. 2297d5c65159SKalle Valo */ 2298d5c65159SKalle Valo ath11k_dp_rx_process_pending_packets(ab, napi, pending_q, "a, 2299d5c65159SKalle Valo mac_id); 2300d5c65159SKalle Valo 2301d5c65159SKalle Valo /* If all quota is exhausted by processing the pending_q, 2302d5c65159SKalle Valo * Wait for the next napi poll to reap the new info 2303d5c65159SKalle Valo */ 2304d5c65159SKalle Valo if (!quota) 2305d5c65159SKalle Valo goto exit; 2306d5c65159SKalle Valo 2307d5c65159SKalle Valo __skb_queue_head_init(&msdu_list); 2308d5c65159SKalle Valo 2309d5c65159SKalle Valo srng = &ab->hal.srng_list[dp->reo_dst_ring.ring_id]; 2310d5c65159SKalle Valo 2311d5c65159SKalle Valo spin_lock_bh(&srng->lock); 2312d5c65159SKalle Valo 2313d5c65159SKalle Valo ath11k_hal_srng_access_begin(ab, srng); 2314d5c65159SKalle Valo 2315d5c65159SKalle Valo try_again: 2316d5c65159SKalle Valo while ((rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 2317293cb583SJohn Crispin struct hal_reo_dest_ring *desc = (struct hal_reo_dest_ring *)rx_desc; 2318293cb583SJohn Crispin enum hal_reo_dest_ring_push_reason push_reason; 2319293cb583SJohn Crispin u32 cookie; 2320d5c65159SKalle Valo 2321293cb583SJohn Crispin cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, 2322293cb583SJohn Crispin desc->buf_addr_info.info1); 2323d5c65159SKalle Valo buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 2324293cb583SJohn Crispin cookie); 2325d5c65159SKalle Valo spin_lock_bh(&rx_ring->idr_lock); 2326d5c65159SKalle Valo msdu = idr_find(&rx_ring->bufs_idr, buf_id); 2327d5c65159SKalle Valo if (!msdu) { 2328d5c65159SKalle Valo ath11k_warn(ab, "frame rx with invalid buf_id %d\n", 2329d5c65159SKalle Valo buf_id); 2330d5c65159SKalle Valo spin_unlock_bh(&rx_ring->idr_lock); 2331d5c65159SKalle Valo continue; 2332d5c65159SKalle Valo } 2333d5c65159SKalle Valo 2334d5c65159SKalle Valo idr_remove(&rx_ring->bufs_idr, buf_id); 2335d5c65159SKalle Valo spin_unlock_bh(&rx_ring->idr_lock); 2336d5c65159SKalle Valo 2337d5c65159SKalle Valo rxcb = ATH11K_SKB_RXCB(msdu); 2338d5c65159SKalle Valo dma_unmap_single(ab->dev, rxcb->paddr, 2339d5c65159SKalle Valo msdu->len + skb_tailroom(msdu), 2340d5c65159SKalle Valo DMA_FROM_DEVICE); 2341d5c65159SKalle Valo 2342d5c65159SKalle Valo num_buffs_reaped++; 2343d5c65159SKalle Valo 2344293cb583SJohn Crispin push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON, 2345293cb583SJohn Crispin desc->info0); 2346293cb583SJohn Crispin if (push_reason != 2347d5c65159SKalle Valo HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) { 2348d5c65159SKalle Valo /* TODO: Check if the msdu can be sent up for processing */ 2349d5c65159SKalle Valo dev_kfree_skb_any(msdu); 2350d5c65159SKalle Valo ab->soc_stats.hal_reo_error[dp->reo_dst_ring.ring_id]++; 2351d5c65159SKalle Valo continue; 2352d5c65159SKalle Valo } 2353d5c65159SKalle Valo 2354293cb583SJohn Crispin rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 & 2355293cb583SJohn Crispin RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU); 2356293cb583SJohn Crispin rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 & 2357293cb583SJohn Crispin RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU); 2358293cb583SJohn Crispin rxcb->is_continuation = !!(desc->rx_msdu_info.info0 & 2359293cb583SJohn Crispin RX_MSDU_DESC_INFO0_MSDU_CONTINUATION); 2360d5c65159SKalle Valo rxcb->mac_id = mac_id; 2361d5c65159SKalle Valo __skb_queue_tail(&msdu_list, msdu); 2362d5c65159SKalle Valo 2363d5c65159SKalle Valo /* Stop reaping from the ring once quota is exhausted 2364d5c65159SKalle Valo * and we've received all msdu's in the the AMSDU. The 2365d5c65159SKalle Valo * additional msdu's reaped in excess of quota here would 2366d5c65159SKalle Valo * be pushed into the pending queue to be processed during 2367d5c65159SKalle Valo * the next napi poll. 2368d5c65159SKalle Valo * Note: More profiling can be done to see the impact on 2369d5c65159SKalle Valo * pending_q and throughput during various traffic & density 2370d5c65159SKalle Valo * and how use of budget instead of remaining quota affects it. 2371d5c65159SKalle Valo */ 2372d5c65159SKalle Valo if (num_buffs_reaped >= quota && rxcb->is_last_msdu && 2373d5c65159SKalle Valo !rxcb->is_continuation) { 2374d5c65159SKalle Valo done = true; 2375d5c65159SKalle Valo break; 2376d5c65159SKalle Valo } 2377d5c65159SKalle Valo } 2378d5c65159SKalle Valo 2379d5c65159SKalle Valo /* Hw might have updated the head pointer after we cached it. 2380d5c65159SKalle Valo * In this case, even though there are entries in the ring we'll 2381d5c65159SKalle Valo * get rx_desc NULL. Give the read another try with updated cached 2382d5c65159SKalle Valo * head pointer so that we can reap complete MPDU in the current 2383d5c65159SKalle Valo * rx processing. 2384d5c65159SKalle Valo */ 2385d5c65159SKalle Valo if (!done && ath11k_hal_srng_dst_num_free(ab, srng, true)) { 2386d5c65159SKalle Valo ath11k_hal_srng_access_end(ab, srng); 2387d5c65159SKalle Valo goto try_again; 2388d5c65159SKalle Valo } 2389d5c65159SKalle Valo 2390d5c65159SKalle Valo ath11k_hal_srng_access_end(ab, srng); 2391d5c65159SKalle Valo 2392d5c65159SKalle Valo spin_unlock_bh(&srng->lock); 2393d5c65159SKalle Valo 2394d5c65159SKalle Valo if (!num_buffs_reaped) 2395d5c65159SKalle Valo goto exit; 2396d5c65159SKalle Valo 2397d5c65159SKalle Valo /* Should we reschedule it later if we are not able to replenish all 2398d5c65159SKalle Valo * the buffers? 2399d5c65159SKalle Valo */ 2400d5c65159SKalle Valo ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buffs_reaped, 2401d5c65159SKalle Valo HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC); 2402d5c65159SKalle Valo 2403d5c65159SKalle Valo rcu_read_lock(); 2404d5c65159SKalle Valo if (!rcu_dereference(ab->pdevs_active[mac_id])) { 2405d5c65159SKalle Valo __skb_queue_purge(&msdu_list); 2406d5c65159SKalle Valo goto rcu_unlock; 2407d5c65159SKalle Valo } 2408d5c65159SKalle Valo 2409d5c65159SKalle Valo if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 2410d5c65159SKalle Valo __skb_queue_purge(&msdu_list); 2411d5c65159SKalle Valo goto rcu_unlock; 2412d5c65159SKalle Valo } 2413d5c65159SKalle Valo 2414d5c65159SKalle Valo while (!skb_queue_empty(&msdu_list)) { 2415d5c65159SKalle Valo __skb_queue_head_init(&amsdu_list); 2416d5c65159SKalle Valo ret = ath11k_dp_rx_retrieve_amsdu(ar, &msdu_list, &amsdu_list); 2417d5c65159SKalle Valo if (ret) { 2418d5c65159SKalle Valo if (ret == -EIO) { 2419d5c65159SKalle Valo ath11k_err(ab, "rx ring got corrupted %d\n", ret); 2420d5c65159SKalle Valo __skb_queue_purge(&msdu_list); 2421d5c65159SKalle Valo /* Should stop processing any more rx in 2422d5c65159SKalle Valo * future from this ring? 2423d5c65159SKalle Valo */ 2424d5c65159SKalle Valo goto rcu_unlock; 2425d5c65159SKalle Valo } 2426d5c65159SKalle Valo 2427d5c65159SKalle Valo /* A-MSDU retrieval got failed due to non-fatal condition, 2428d5c65159SKalle Valo * continue processing with the next msdu. 2429d5c65159SKalle Valo */ 2430d5c65159SKalle Valo continue; 2431d5c65159SKalle Valo } 2432d5c65159SKalle Valo 2433d5c65159SKalle Valo ath11k_dp_rx_process_amsdu(ar, &amsdu_list, rx_status); 2434d5c65159SKalle Valo 2435d5c65159SKalle Valo ath11k_dp_rx_pre_deliver_amsdu(ar, &amsdu_list, rx_status); 2436d5c65159SKalle Valo skb_queue_splice_tail(&amsdu_list, pending_q); 2437d5c65159SKalle Valo } 2438d5c65159SKalle Valo 2439d5c65159SKalle Valo while (quota && (msdu = __skb_dequeue(pending_q))) { 2440d5c65159SKalle Valo ath11k_dp_rx_deliver_msdu(ar, napi, msdu); 2441d5c65159SKalle Valo quota--; 2442d5c65159SKalle Valo } 2443d5c65159SKalle Valo 2444d5c65159SKalle Valo rcu_unlock: 2445d5c65159SKalle Valo rcu_read_unlock(); 2446d5c65159SKalle Valo exit: 2447d5c65159SKalle Valo return budget - quota; 2448d5c65159SKalle Valo } 2449d5c65159SKalle Valo 2450d5c65159SKalle Valo static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta, 2451d5c65159SKalle Valo struct hal_rx_mon_ppdu_info *ppdu_info) 2452d5c65159SKalle Valo { 2453d5c65159SKalle Valo struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats; 2454d5c65159SKalle Valo u32 num_msdu; 2455d5c65159SKalle Valo 2456d5c65159SKalle Valo if (!rx_stats) 2457d5c65159SKalle Valo return; 2458d5c65159SKalle Valo 2459d5c65159SKalle Valo num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count + 2460d5c65159SKalle Valo ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count; 2461d5c65159SKalle Valo 2462d5c65159SKalle Valo rx_stats->num_msdu += num_msdu; 2463d5c65159SKalle Valo rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count + 2464d5c65159SKalle Valo ppdu_info->tcp_ack_msdu_count; 2465d5c65159SKalle Valo rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count; 2466d5c65159SKalle Valo rx_stats->other_msdu_count += ppdu_info->other_msdu_count; 2467d5c65159SKalle Valo 2468d5c65159SKalle Valo if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A || 2469d5c65159SKalle Valo ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) { 2470d5c65159SKalle Valo ppdu_info->nss = 1; 2471d5c65159SKalle Valo ppdu_info->mcs = HAL_RX_MAX_MCS; 2472d5c65159SKalle Valo ppdu_info->tid = IEEE80211_NUM_TIDS; 2473d5c65159SKalle Valo } 2474d5c65159SKalle Valo 2475d5c65159SKalle Valo if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS) 2476d5c65159SKalle Valo rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu; 2477d5c65159SKalle Valo 2478d5c65159SKalle Valo if (ppdu_info->mcs <= HAL_RX_MAX_MCS) 2479d5c65159SKalle Valo rx_stats->mcs_count[ppdu_info->mcs] += num_msdu; 2480d5c65159SKalle Valo 2481d5c65159SKalle Valo if (ppdu_info->gi < HAL_RX_GI_MAX) 2482d5c65159SKalle Valo rx_stats->gi_count[ppdu_info->gi] += num_msdu; 2483d5c65159SKalle Valo 2484d5c65159SKalle Valo if (ppdu_info->bw < HAL_RX_BW_MAX) 2485d5c65159SKalle Valo rx_stats->bw_count[ppdu_info->bw] += num_msdu; 2486d5c65159SKalle Valo 2487d5c65159SKalle Valo if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX) 2488d5c65159SKalle Valo rx_stats->coding_count[ppdu_info->ldpc] += num_msdu; 2489d5c65159SKalle Valo 2490d5c65159SKalle Valo if (ppdu_info->tid <= IEEE80211_NUM_TIDS) 2491d5c65159SKalle Valo rx_stats->tid_count[ppdu_info->tid] += num_msdu; 2492d5c65159SKalle Valo 2493d5c65159SKalle Valo if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX) 2494d5c65159SKalle Valo rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu; 2495d5c65159SKalle Valo 2496d5c65159SKalle Valo if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX) 2497d5c65159SKalle Valo rx_stats->reception_type[ppdu_info->reception_type] += num_msdu; 2498d5c65159SKalle Valo 2499d5c65159SKalle Valo if (ppdu_info->is_stbc) 2500d5c65159SKalle Valo rx_stats->stbc_count += num_msdu; 2501d5c65159SKalle Valo 2502d5c65159SKalle Valo if (ppdu_info->beamformed) 2503d5c65159SKalle Valo rx_stats->beamformed_count += num_msdu; 2504d5c65159SKalle Valo 2505d5c65159SKalle Valo if (ppdu_info->num_mpdu_fcs_ok > 1) 2506d5c65159SKalle Valo rx_stats->ampdu_msdu_count += num_msdu; 2507d5c65159SKalle Valo else 2508d5c65159SKalle Valo rx_stats->non_ampdu_msdu_count += num_msdu; 2509d5c65159SKalle Valo 2510d5c65159SKalle Valo rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok; 2511d5c65159SKalle Valo rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err; 25126a0c3702SJohn Crispin rx_stats->dcm_count += ppdu_info->dcm; 25136a0c3702SJohn Crispin rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu; 2514d5c65159SKalle Valo 2515d5c65159SKalle Valo arsta->rssi_comb = ppdu_info->rssi_comb; 2516d5c65159SKalle Valo rx_stats->rx_duration += ppdu_info->rx_duration; 2517d5c65159SKalle Valo arsta->rx_duration = rx_stats->rx_duration; 2518d5c65159SKalle Valo } 2519d5c65159SKalle Valo 2520d5c65159SKalle Valo static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab, 2521d5c65159SKalle Valo struct dp_rxdma_ring *rx_ring, 2522d5c65159SKalle Valo int *buf_id, gfp_t gfp) 2523d5c65159SKalle Valo { 2524d5c65159SKalle Valo struct sk_buff *skb; 2525d5c65159SKalle Valo dma_addr_t paddr; 2526d5c65159SKalle Valo 2527d5c65159SKalle Valo skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + 2528d5c65159SKalle Valo DP_RX_BUFFER_ALIGN_SIZE); 2529d5c65159SKalle Valo 2530d5c65159SKalle Valo if (!skb) 2531d5c65159SKalle Valo goto fail_alloc_skb; 2532d5c65159SKalle Valo 2533d5c65159SKalle Valo if (!IS_ALIGNED((unsigned long)skb->data, 2534d5c65159SKalle Valo DP_RX_BUFFER_ALIGN_SIZE)) { 2535d5c65159SKalle Valo skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - 2536d5c65159SKalle Valo skb->data); 2537d5c65159SKalle Valo } 2538d5c65159SKalle Valo 2539d5c65159SKalle Valo paddr = dma_map_single(ab->dev, skb->data, 2540d5c65159SKalle Valo skb->len + skb_tailroom(skb), 2541d5c65159SKalle Valo DMA_BIDIRECTIONAL); 2542d5c65159SKalle Valo if (unlikely(dma_mapping_error(ab->dev, paddr))) 2543d5c65159SKalle Valo goto fail_free_skb; 2544d5c65159SKalle Valo 2545d5c65159SKalle Valo spin_lock_bh(&rx_ring->idr_lock); 2546d5c65159SKalle Valo *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, 2547d5c65159SKalle Valo rx_ring->bufs_max, gfp); 2548d5c65159SKalle Valo spin_unlock_bh(&rx_ring->idr_lock); 2549d5c65159SKalle Valo if (*buf_id < 0) 2550d5c65159SKalle Valo goto fail_dma_unmap; 2551d5c65159SKalle Valo 2552d5c65159SKalle Valo ATH11K_SKB_RXCB(skb)->paddr = paddr; 2553d5c65159SKalle Valo return skb; 2554d5c65159SKalle Valo 2555d5c65159SKalle Valo fail_dma_unmap: 2556d5c65159SKalle Valo dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 2557d5c65159SKalle Valo DMA_BIDIRECTIONAL); 2558d5c65159SKalle Valo fail_free_skb: 2559d5c65159SKalle Valo dev_kfree_skb_any(skb); 2560d5c65159SKalle Valo fail_alloc_skb: 2561d5c65159SKalle Valo return NULL; 2562d5c65159SKalle Valo } 2563d5c65159SKalle Valo 2564d5c65159SKalle Valo int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id, 2565d5c65159SKalle Valo struct dp_rxdma_ring *rx_ring, 2566d5c65159SKalle Valo int req_entries, 2567d5c65159SKalle Valo enum hal_rx_buf_return_buf_manager mgr, 2568d5c65159SKalle Valo gfp_t gfp) 2569d5c65159SKalle Valo { 2570d5c65159SKalle Valo struct hal_srng *srng; 2571d5c65159SKalle Valo u32 *desc; 2572d5c65159SKalle Valo struct sk_buff *skb; 2573d5c65159SKalle Valo int num_free; 2574d5c65159SKalle Valo int num_remain; 2575d5c65159SKalle Valo int buf_id; 2576d5c65159SKalle Valo u32 cookie; 2577d5c65159SKalle Valo dma_addr_t paddr; 2578d5c65159SKalle Valo 2579d5c65159SKalle Valo req_entries = min(req_entries, rx_ring->bufs_max); 2580d5c65159SKalle Valo 2581d5c65159SKalle Valo srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 2582d5c65159SKalle Valo 2583d5c65159SKalle Valo spin_lock_bh(&srng->lock); 2584d5c65159SKalle Valo 2585d5c65159SKalle Valo ath11k_hal_srng_access_begin(ab, srng); 2586d5c65159SKalle Valo 2587d5c65159SKalle Valo num_free = ath11k_hal_srng_src_num_free(ab, srng, true); 2588d5c65159SKalle Valo 2589d5c65159SKalle Valo req_entries = min(num_free, req_entries); 2590d5c65159SKalle Valo num_remain = req_entries; 2591d5c65159SKalle Valo 2592d5c65159SKalle Valo while (num_remain > 0) { 2593d5c65159SKalle Valo skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, 2594d5c65159SKalle Valo &buf_id, gfp); 2595d5c65159SKalle Valo if (!skb) 2596d5c65159SKalle Valo break; 2597d5c65159SKalle Valo paddr = ATH11K_SKB_RXCB(skb)->paddr; 2598d5c65159SKalle Valo 2599d5c65159SKalle Valo desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 2600d5c65159SKalle Valo if (!desc) 2601d5c65159SKalle Valo goto fail_desc_get; 2602d5c65159SKalle Valo 2603d5c65159SKalle Valo cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 2604d5c65159SKalle Valo FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 2605d5c65159SKalle Valo 2606d5c65159SKalle Valo num_remain--; 2607d5c65159SKalle Valo 2608d5c65159SKalle Valo ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); 2609d5c65159SKalle Valo } 2610d5c65159SKalle Valo 2611d5c65159SKalle Valo ath11k_hal_srng_access_end(ab, srng); 2612d5c65159SKalle Valo 2613d5c65159SKalle Valo spin_unlock_bh(&srng->lock); 2614d5c65159SKalle Valo 2615d5c65159SKalle Valo return req_entries - num_remain; 2616d5c65159SKalle Valo 2617d5c65159SKalle Valo fail_desc_get: 2618d5c65159SKalle Valo spin_lock_bh(&rx_ring->idr_lock); 2619d5c65159SKalle Valo idr_remove(&rx_ring->bufs_idr, buf_id); 2620d5c65159SKalle Valo spin_unlock_bh(&rx_ring->idr_lock); 2621d5c65159SKalle Valo dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 2622d5c65159SKalle Valo DMA_BIDIRECTIONAL); 2623d5c65159SKalle Valo dev_kfree_skb_any(skb); 2624d5c65159SKalle Valo ath11k_hal_srng_access_end(ab, srng); 2625d5c65159SKalle Valo spin_unlock_bh(&srng->lock); 2626d5c65159SKalle Valo 2627d5c65159SKalle Valo return req_entries - num_remain; 2628d5c65159SKalle Valo } 2629d5c65159SKalle Valo 2630d5c65159SKalle Valo static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id, 2631d5c65159SKalle Valo int *budget, struct sk_buff_head *skb_list) 2632d5c65159SKalle Valo { 2633d5c65159SKalle Valo struct ath11k *ar = ab->pdevs[mac_id].ar; 2634d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 2635d5c65159SKalle Valo struct dp_rxdma_ring *rx_ring = &dp->rx_mon_status_refill_ring; 2636d5c65159SKalle Valo struct hal_srng *srng; 2637d5c65159SKalle Valo void *rx_mon_status_desc; 2638d5c65159SKalle Valo struct sk_buff *skb; 2639d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb; 2640d5c65159SKalle Valo struct hal_tlv_hdr *tlv; 2641d5c65159SKalle Valo u32 cookie; 2642d5c65159SKalle Valo int buf_id; 2643d5c65159SKalle Valo dma_addr_t paddr; 2644d5c65159SKalle Valo u8 rbm; 2645d5c65159SKalle Valo int num_buffs_reaped = 0; 2646d5c65159SKalle Valo 2647d5c65159SKalle Valo srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 2648d5c65159SKalle Valo 2649d5c65159SKalle Valo spin_lock_bh(&srng->lock); 2650d5c65159SKalle Valo 2651d5c65159SKalle Valo ath11k_hal_srng_access_begin(ab, srng); 2652d5c65159SKalle Valo while (*budget) { 2653d5c65159SKalle Valo *budget -= 1; 2654d5c65159SKalle Valo rx_mon_status_desc = 2655d5c65159SKalle Valo ath11k_hal_srng_src_peek(ab, srng); 2656d5c65159SKalle Valo if (!rx_mon_status_desc) 2657d5c65159SKalle Valo break; 2658d5c65159SKalle Valo 2659d5c65159SKalle Valo ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr, 2660d5c65159SKalle Valo &cookie, &rbm); 2661d5c65159SKalle Valo if (paddr) { 2662d5c65159SKalle Valo buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie); 2663d5c65159SKalle Valo 2664d5c65159SKalle Valo spin_lock_bh(&rx_ring->idr_lock); 2665d5c65159SKalle Valo skb = idr_find(&rx_ring->bufs_idr, buf_id); 2666d5c65159SKalle Valo if (!skb) { 2667d5c65159SKalle Valo ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n", 2668d5c65159SKalle Valo buf_id); 2669d5c65159SKalle Valo spin_unlock_bh(&rx_ring->idr_lock); 2670d5c65159SKalle Valo continue; 2671d5c65159SKalle Valo } 2672d5c65159SKalle Valo 2673d5c65159SKalle Valo idr_remove(&rx_ring->bufs_idr, buf_id); 2674d5c65159SKalle Valo spin_unlock_bh(&rx_ring->idr_lock); 2675d5c65159SKalle Valo 2676d5c65159SKalle Valo rxcb = ATH11K_SKB_RXCB(skb); 2677d5c65159SKalle Valo 2678d5c65159SKalle Valo dma_sync_single_for_cpu(ab->dev, rxcb->paddr, 2679d5c65159SKalle Valo skb->len + skb_tailroom(skb), 2680d5c65159SKalle Valo DMA_FROM_DEVICE); 2681d5c65159SKalle Valo 2682d5c65159SKalle Valo dma_unmap_single(ab->dev, rxcb->paddr, 2683d5c65159SKalle Valo skb->len + skb_tailroom(skb), 2684d5c65159SKalle Valo DMA_BIDIRECTIONAL); 2685d5c65159SKalle Valo 2686d5c65159SKalle Valo tlv = (struct hal_tlv_hdr *)skb->data; 2687d5c65159SKalle Valo if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != 2688d5c65159SKalle Valo HAL_RX_STATUS_BUFFER_DONE) { 2689d5c65159SKalle Valo ath11k_hal_srng_src_get_next_entry(ab, srng); 2690d5c65159SKalle Valo continue; 2691d5c65159SKalle Valo } 2692d5c65159SKalle Valo 2693d5c65159SKalle Valo __skb_queue_tail(skb_list, skb); 2694d5c65159SKalle Valo } 2695d5c65159SKalle Valo 2696d5c65159SKalle Valo skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, 2697d5c65159SKalle Valo &buf_id, GFP_ATOMIC); 2698d5c65159SKalle Valo 2699d5c65159SKalle Valo if (!skb) { 2700d5c65159SKalle Valo ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0, 2701d5c65159SKalle Valo HAL_RX_BUF_RBM_SW3_BM); 2702d5c65159SKalle Valo num_buffs_reaped++; 2703d5c65159SKalle Valo break; 2704d5c65159SKalle Valo } 2705d5c65159SKalle Valo rxcb = ATH11K_SKB_RXCB(skb); 2706d5c65159SKalle Valo 2707d5c65159SKalle Valo cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 2708d5c65159SKalle Valo FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 2709d5c65159SKalle Valo 2710d5c65159SKalle Valo ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr, 2711d5c65159SKalle Valo cookie, HAL_RX_BUF_RBM_SW3_BM); 2712d5c65159SKalle Valo ath11k_hal_srng_src_get_next_entry(ab, srng); 2713d5c65159SKalle Valo num_buffs_reaped++; 2714d5c65159SKalle Valo } 2715d5c65159SKalle Valo ath11k_hal_srng_access_end(ab, srng); 2716d5c65159SKalle Valo spin_unlock_bh(&srng->lock); 2717d5c65159SKalle Valo 2718d5c65159SKalle Valo return num_buffs_reaped; 2719d5c65159SKalle Valo } 2720d5c65159SKalle Valo 2721d5c65159SKalle Valo int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id, 2722d5c65159SKalle Valo struct napi_struct *napi, int budget) 2723d5c65159SKalle Valo { 2724d5c65159SKalle Valo struct ath11k *ar = ab->pdevs[mac_id].ar; 2725d5c65159SKalle Valo enum hal_rx_mon_status hal_status; 2726d5c65159SKalle Valo struct sk_buff *skb; 2727d5c65159SKalle Valo struct sk_buff_head skb_list; 2728d5c65159SKalle Valo struct hal_rx_mon_ppdu_info ppdu_info; 2729d5c65159SKalle Valo struct ath11k_peer *peer; 2730d5c65159SKalle Valo struct ath11k_sta *arsta; 2731d5c65159SKalle Valo int num_buffs_reaped = 0; 2732d5c65159SKalle Valo 2733d5c65159SKalle Valo __skb_queue_head_init(&skb_list); 2734d5c65159SKalle Valo 2735d5c65159SKalle Valo num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget, 2736d5c65159SKalle Valo &skb_list); 2737d5c65159SKalle Valo if (!num_buffs_reaped) 2738d5c65159SKalle Valo goto exit; 2739d5c65159SKalle Valo 2740d5c65159SKalle Valo while ((skb = __skb_dequeue(&skb_list))) { 2741d5c65159SKalle Valo memset(&ppdu_info, 0, sizeof(ppdu_info)); 2742d5c65159SKalle Valo ppdu_info.peer_id = HAL_INVALID_PEERID; 2743d5c65159SKalle Valo 2744d5c65159SKalle Valo if (ath11k_debug_is_pktlog_rx_stats_enabled(ar)) 2745d5c65159SKalle Valo trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE); 2746d5c65159SKalle Valo 2747d5c65159SKalle Valo hal_status = ath11k_hal_rx_parse_mon_status(ab, &ppdu_info, skb); 2748d5c65159SKalle Valo 2749d5c65159SKalle Valo if (ppdu_info.peer_id == HAL_INVALID_PEERID || 2750d5c65159SKalle Valo hal_status != HAL_RX_MON_STATUS_PPDU_DONE) { 2751d5c65159SKalle Valo dev_kfree_skb_any(skb); 2752d5c65159SKalle Valo continue; 2753d5c65159SKalle Valo } 2754d5c65159SKalle Valo 2755d5c65159SKalle Valo rcu_read_lock(); 2756d5c65159SKalle Valo spin_lock_bh(&ab->base_lock); 2757d5c65159SKalle Valo peer = ath11k_peer_find_by_id(ab, ppdu_info.peer_id); 2758d5c65159SKalle Valo 2759d5c65159SKalle Valo if (!peer || !peer->sta) { 27602dab7d22SJohn Crispin ath11k_dbg(ab, ATH11K_DBG_DATA, 27612dab7d22SJohn Crispin "failed to find the peer with peer_id %d\n", 2762d5c65159SKalle Valo ppdu_info.peer_id); 2763d5c65159SKalle Valo spin_unlock_bh(&ab->base_lock); 2764d5c65159SKalle Valo rcu_read_unlock(); 2765d5c65159SKalle Valo dev_kfree_skb_any(skb); 2766d5c65159SKalle Valo continue; 2767d5c65159SKalle Valo } 2768d5c65159SKalle Valo 2769d5c65159SKalle Valo arsta = (struct ath11k_sta *)peer->sta->drv_priv; 2770d5c65159SKalle Valo ath11k_dp_rx_update_peer_stats(arsta, &ppdu_info); 2771d5c65159SKalle Valo 2772d5c65159SKalle Valo if (ath11k_debug_is_pktlog_peer_valid(ar, peer->addr)) 2773d5c65159SKalle Valo trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE); 2774d5c65159SKalle Valo 2775d5c65159SKalle Valo spin_unlock_bh(&ab->base_lock); 2776d5c65159SKalle Valo rcu_read_unlock(); 2777d5c65159SKalle Valo 2778d5c65159SKalle Valo dev_kfree_skb_any(skb); 2779d5c65159SKalle Valo } 2780d5c65159SKalle Valo exit: 2781d5c65159SKalle Valo return num_buffs_reaped; 2782d5c65159SKalle Valo } 2783d5c65159SKalle Valo 2784243874c6SManikanta Pubbisetty static void ath11k_dp_rx_frag_timer(struct timer_list *timer) 2785d5c65159SKalle Valo { 2786243874c6SManikanta Pubbisetty struct dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer); 2787d5c65159SKalle Valo 2788243874c6SManikanta Pubbisetty spin_lock_bh(&rx_tid->ab->base_lock); 2789243874c6SManikanta Pubbisetty if (rx_tid->last_frag_no && 2790243874c6SManikanta Pubbisetty rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) { 2791243874c6SManikanta Pubbisetty spin_unlock_bh(&rx_tid->ab->base_lock); 2792243874c6SManikanta Pubbisetty return; 2793243874c6SManikanta Pubbisetty } 2794243874c6SManikanta Pubbisetty ath11k_dp_rx_frags_cleanup(rx_tid, true); 2795243874c6SManikanta Pubbisetty spin_unlock_bh(&rx_tid->ab->base_lock); 2796d5c65159SKalle Valo } 2797d5c65159SKalle Valo 2798243874c6SManikanta Pubbisetty int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id) 2799243874c6SManikanta Pubbisetty { 2800243874c6SManikanta Pubbisetty struct ath11k_base *ab = ar->ab; 2801243874c6SManikanta Pubbisetty struct crypto_shash *tfm; 2802243874c6SManikanta Pubbisetty struct ath11k_peer *peer; 2803243874c6SManikanta Pubbisetty struct dp_rx_tid *rx_tid; 2804243874c6SManikanta Pubbisetty int i; 2805d5c65159SKalle Valo 2806243874c6SManikanta Pubbisetty tfm = crypto_alloc_shash("michael_mic", 0, 0); 2807243874c6SManikanta Pubbisetty if (IS_ERR(tfm)) 2808243874c6SManikanta Pubbisetty return PTR_ERR(tfm); 2809d5c65159SKalle Valo 2810243874c6SManikanta Pubbisetty spin_lock_bh(&ab->base_lock); 2811d5c65159SKalle Valo 2812243874c6SManikanta Pubbisetty peer = ath11k_peer_find(ab, vdev_id, peer_mac); 2813243874c6SManikanta Pubbisetty if (!peer) { 2814243874c6SManikanta Pubbisetty ath11k_warn(ab, "failed to find the peer to set up fragment info\n"); 2815243874c6SManikanta Pubbisetty spin_unlock_bh(&ab->base_lock); 2816243874c6SManikanta Pubbisetty return -ENOENT; 2817243874c6SManikanta Pubbisetty } 2818243874c6SManikanta Pubbisetty 2819243874c6SManikanta Pubbisetty for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 2820243874c6SManikanta Pubbisetty rx_tid = &peer->rx_tid[i]; 2821243874c6SManikanta Pubbisetty rx_tid->ab = ab; 2822243874c6SManikanta Pubbisetty timer_setup(&rx_tid->frag_timer, ath11k_dp_rx_frag_timer, 0); 2823243874c6SManikanta Pubbisetty skb_queue_head_init(&rx_tid->rx_frags); 2824243874c6SManikanta Pubbisetty } 2825243874c6SManikanta Pubbisetty 2826243874c6SManikanta Pubbisetty peer->tfm_mmic = tfm; 2827243874c6SManikanta Pubbisetty spin_unlock_bh(&ab->base_lock); 2828243874c6SManikanta Pubbisetty 2829243874c6SManikanta Pubbisetty return 0; 2830243874c6SManikanta Pubbisetty } 2831243874c6SManikanta Pubbisetty 2832243874c6SManikanta Pubbisetty static int ath11k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key, 2833243874c6SManikanta Pubbisetty struct ieee80211_hdr *hdr, u8 *data, 2834243874c6SManikanta Pubbisetty size_t data_len, u8 *mic) 2835243874c6SManikanta Pubbisetty { 2836243874c6SManikanta Pubbisetty SHASH_DESC_ON_STACK(desc, tfm); 2837243874c6SManikanta Pubbisetty u8 mic_hdr[16] = {0}; 2838243874c6SManikanta Pubbisetty u8 tid = 0; 2839243874c6SManikanta Pubbisetty int ret; 2840243874c6SManikanta Pubbisetty 2841243874c6SManikanta Pubbisetty if (!tfm) 2842243874c6SManikanta Pubbisetty return -EINVAL; 2843243874c6SManikanta Pubbisetty 2844243874c6SManikanta Pubbisetty desc->tfm = tfm; 2845243874c6SManikanta Pubbisetty 2846243874c6SManikanta Pubbisetty ret = crypto_shash_setkey(tfm, key, 8); 2847243874c6SManikanta Pubbisetty if (ret) 2848243874c6SManikanta Pubbisetty goto out; 2849243874c6SManikanta Pubbisetty 2850243874c6SManikanta Pubbisetty ret = crypto_shash_init(desc); 2851243874c6SManikanta Pubbisetty if (ret) 2852243874c6SManikanta Pubbisetty goto out; 2853243874c6SManikanta Pubbisetty 2854243874c6SManikanta Pubbisetty /* TKIP MIC header */ 2855243874c6SManikanta Pubbisetty memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN); 2856243874c6SManikanta Pubbisetty memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN); 2857243874c6SManikanta Pubbisetty if (ieee80211_is_data_qos(hdr->frame_control)) 2858243874c6SManikanta Pubbisetty tid = ieee80211_get_tid(hdr); 2859243874c6SManikanta Pubbisetty mic_hdr[12] = tid; 2860243874c6SManikanta Pubbisetty 2861243874c6SManikanta Pubbisetty ret = crypto_shash_update(desc, mic_hdr, 16); 2862243874c6SManikanta Pubbisetty if (ret) 2863243874c6SManikanta Pubbisetty goto out; 2864243874c6SManikanta Pubbisetty ret = crypto_shash_update(desc, data, data_len); 2865243874c6SManikanta Pubbisetty if (ret) 2866243874c6SManikanta Pubbisetty goto out; 2867243874c6SManikanta Pubbisetty ret = crypto_shash_final(desc, mic); 2868243874c6SManikanta Pubbisetty out: 2869243874c6SManikanta Pubbisetty shash_desc_zero(desc); 2870d5c65159SKalle Valo return ret; 2871d5c65159SKalle Valo } 2872d5c65159SKalle Valo 2873243874c6SManikanta Pubbisetty static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer *peer, 2874243874c6SManikanta Pubbisetty struct sk_buff *msdu) 2875d5c65159SKalle Valo { 2876243874c6SManikanta Pubbisetty struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data; 2877243874c6SManikanta Pubbisetty struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu); 2878243874c6SManikanta Pubbisetty struct ieee80211_key_conf *key_conf; 2879243874c6SManikanta Pubbisetty struct ieee80211_hdr *hdr; 2880243874c6SManikanta Pubbisetty u8 mic[IEEE80211_CCMP_MIC_LEN]; 2881243874c6SManikanta Pubbisetty int head_len, tail_len, ret; 2882243874c6SManikanta Pubbisetty size_t data_len; 2883243874c6SManikanta Pubbisetty u32 hdr_len; 2884243874c6SManikanta Pubbisetty u8 *key, *data; 2885243874c6SManikanta Pubbisetty u8 key_idx; 2886d5c65159SKalle Valo 2887243874c6SManikanta Pubbisetty if (ath11k_dp_rx_h_mpdu_start_enctype(rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC) 2888243874c6SManikanta Pubbisetty return 0; 2889d5c65159SKalle Valo 2890243874c6SManikanta Pubbisetty hdr = (struct ieee80211_hdr *)(msdu->data + HAL_RX_DESC_SIZE); 2891243874c6SManikanta Pubbisetty hdr_len = ieee80211_hdrlen(hdr->frame_control); 2892243874c6SManikanta Pubbisetty head_len = hdr_len + HAL_RX_DESC_SIZE + IEEE80211_TKIP_IV_LEN; 2893243874c6SManikanta Pubbisetty tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN; 2894d5c65159SKalle Valo 2895243874c6SManikanta Pubbisetty if (!is_multicast_ether_addr(hdr->addr1)) 2896243874c6SManikanta Pubbisetty key_idx = peer->ucast_keyidx; 2897243874c6SManikanta Pubbisetty else 2898243874c6SManikanta Pubbisetty key_idx = peer->mcast_keyidx; 2899d5c65159SKalle Valo 2900243874c6SManikanta Pubbisetty key_conf = peer->keys[key_idx]; 2901d5c65159SKalle Valo 2902243874c6SManikanta Pubbisetty data = msdu->data + head_len; 2903243874c6SManikanta Pubbisetty data_len = msdu->len - head_len - tail_len; 2904243874c6SManikanta Pubbisetty key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; 2905d5c65159SKalle Valo 2906243874c6SManikanta Pubbisetty ret = ath11k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic); 2907243874c6SManikanta Pubbisetty if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN)) 2908243874c6SManikanta Pubbisetty goto mic_fail; 2909d5c65159SKalle Valo 2910243874c6SManikanta Pubbisetty return 0; 2911243874c6SManikanta Pubbisetty 2912243874c6SManikanta Pubbisetty mic_fail: 2913243874c6SManikanta Pubbisetty (ATH11K_SKB_RXCB(msdu))->is_first_msdu = 1; 2914243874c6SManikanta Pubbisetty (ATH11K_SKB_RXCB(msdu))->is_last_msdu = 1; 2915243874c6SManikanta Pubbisetty 2916243874c6SManikanta Pubbisetty rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED | 2917243874c6SManikanta Pubbisetty RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED; 2918243874c6SManikanta Pubbisetty skb_pull(msdu, HAL_RX_DESC_SIZE); 2919243874c6SManikanta Pubbisetty 2920243874c6SManikanta Pubbisetty ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs); 2921243874c6SManikanta Pubbisetty ath11k_dp_rx_h_undecap(ar, msdu, rx_desc, 2922243874c6SManikanta Pubbisetty HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true); 2923243874c6SManikanta Pubbisetty ieee80211_rx(ar->hw, msdu); 2924243874c6SManikanta Pubbisetty return -EINVAL; 2925d5c65159SKalle Valo } 2926d5c65159SKalle Valo 2927243874c6SManikanta Pubbisetty static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu, 2928243874c6SManikanta Pubbisetty enum hal_encrypt_type enctype, u32 flags) 2929243874c6SManikanta Pubbisetty { 2930243874c6SManikanta Pubbisetty struct ieee80211_hdr *hdr; 2931243874c6SManikanta Pubbisetty size_t hdr_len; 2932243874c6SManikanta Pubbisetty size_t crypto_len; 2933d5c65159SKalle Valo 2934243874c6SManikanta Pubbisetty if (!flags) 2935243874c6SManikanta Pubbisetty return; 2936d5c65159SKalle Valo 2937243874c6SManikanta Pubbisetty hdr = (struct ieee80211_hdr *)(msdu->data + HAL_RX_DESC_SIZE); 2938243874c6SManikanta Pubbisetty 2939243874c6SManikanta Pubbisetty if (flags & RX_FLAG_MIC_STRIPPED) 2940d5c65159SKalle Valo skb_trim(msdu, msdu->len - 2941d5c65159SKalle Valo ath11k_dp_rx_crypto_mic_len(ar, enctype)); 2942243874c6SManikanta Pubbisetty 2943243874c6SManikanta Pubbisetty if (flags & RX_FLAG_ICV_STRIPPED) 2944243874c6SManikanta Pubbisetty skb_trim(msdu, msdu->len - 2945243874c6SManikanta Pubbisetty ath11k_dp_rx_crypto_icv_len(ar, enctype)); 2946243874c6SManikanta Pubbisetty 2947243874c6SManikanta Pubbisetty if (flags & RX_FLAG_IV_STRIPPED) { 2948243874c6SManikanta Pubbisetty hdr_len = ieee80211_hdrlen(hdr->frame_control); 2949243874c6SManikanta Pubbisetty crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 2950243874c6SManikanta Pubbisetty 2951243874c6SManikanta Pubbisetty memmove((void *)msdu->data + HAL_RX_DESC_SIZE + crypto_len, 2952243874c6SManikanta Pubbisetty (void *)msdu->data + HAL_RX_DESC_SIZE, hdr_len); 2953243874c6SManikanta Pubbisetty skb_pull(msdu, crypto_len); 2954d5c65159SKalle Valo } 2955d5c65159SKalle Valo } 2956d5c65159SKalle Valo 2957243874c6SManikanta Pubbisetty static int ath11k_dp_rx_h_defrag(struct ath11k *ar, 2958243874c6SManikanta Pubbisetty struct ath11k_peer *peer, 2959243874c6SManikanta Pubbisetty struct dp_rx_tid *rx_tid, 2960243874c6SManikanta Pubbisetty struct sk_buff **defrag_skb) 2961243874c6SManikanta Pubbisetty { 2962243874c6SManikanta Pubbisetty struct hal_rx_desc *rx_desc; 2963243874c6SManikanta Pubbisetty struct sk_buff *skb, *first_frag, *last_frag; 2964243874c6SManikanta Pubbisetty struct ieee80211_hdr *hdr; 2965243874c6SManikanta Pubbisetty enum hal_encrypt_type enctype; 2966243874c6SManikanta Pubbisetty bool is_decrypted = false; 2967243874c6SManikanta Pubbisetty int msdu_len = 0; 2968243874c6SManikanta Pubbisetty int extra_space; 2969243874c6SManikanta Pubbisetty u32 flags; 2970243874c6SManikanta Pubbisetty 2971243874c6SManikanta Pubbisetty first_frag = skb_peek(&rx_tid->rx_frags); 2972243874c6SManikanta Pubbisetty last_frag = skb_peek_tail(&rx_tid->rx_frags); 2973243874c6SManikanta Pubbisetty 2974243874c6SManikanta Pubbisetty skb_queue_walk(&rx_tid->rx_frags, skb) { 2975243874c6SManikanta Pubbisetty flags = 0; 2976243874c6SManikanta Pubbisetty rx_desc = (struct hal_rx_desc *)skb->data; 2977243874c6SManikanta Pubbisetty hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE); 2978243874c6SManikanta Pubbisetty 2979243874c6SManikanta Pubbisetty enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc); 2980243874c6SManikanta Pubbisetty if (enctype != HAL_ENCRYPT_TYPE_OPEN) 2981243874c6SManikanta Pubbisetty is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc); 2982243874c6SManikanta Pubbisetty 2983243874c6SManikanta Pubbisetty if (is_decrypted) { 2984243874c6SManikanta Pubbisetty if (skb != first_frag) 2985243874c6SManikanta Pubbisetty flags |= RX_FLAG_IV_STRIPPED; 2986243874c6SManikanta Pubbisetty if (skb != last_frag) 2987243874c6SManikanta Pubbisetty flags |= RX_FLAG_ICV_STRIPPED | 2988243874c6SManikanta Pubbisetty RX_FLAG_MIC_STRIPPED; 2989243874c6SManikanta Pubbisetty } 2990243874c6SManikanta Pubbisetty 2991243874c6SManikanta Pubbisetty /* RX fragments are always raw packets */ 2992243874c6SManikanta Pubbisetty if (skb != last_frag) 2993243874c6SManikanta Pubbisetty skb_trim(skb, skb->len - FCS_LEN); 2994243874c6SManikanta Pubbisetty ath11k_dp_rx_h_undecap_frag(ar, skb, enctype, flags); 2995243874c6SManikanta Pubbisetty 2996243874c6SManikanta Pubbisetty if (skb != first_frag) 2997243874c6SManikanta Pubbisetty skb_pull(skb, HAL_RX_DESC_SIZE + 2998243874c6SManikanta Pubbisetty ieee80211_hdrlen(hdr->frame_control)); 2999243874c6SManikanta Pubbisetty msdu_len += skb->len; 3000243874c6SManikanta Pubbisetty } 3001243874c6SManikanta Pubbisetty 3002243874c6SManikanta Pubbisetty extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag)); 3003243874c6SManikanta Pubbisetty if (extra_space > 0 && 3004243874c6SManikanta Pubbisetty (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0)) 3005243874c6SManikanta Pubbisetty return -ENOMEM; 3006243874c6SManikanta Pubbisetty 3007243874c6SManikanta Pubbisetty __skb_unlink(first_frag, &rx_tid->rx_frags); 3008243874c6SManikanta Pubbisetty while ((skb = __skb_dequeue(&rx_tid->rx_frags))) { 3009243874c6SManikanta Pubbisetty skb_put_data(first_frag, skb->data, skb->len); 3010243874c6SManikanta Pubbisetty dev_kfree_skb_any(skb); 3011243874c6SManikanta Pubbisetty } 3012243874c6SManikanta Pubbisetty 3013243874c6SManikanta Pubbisetty hdr = (struct ieee80211_hdr *)(first_frag->data + HAL_RX_DESC_SIZE); 3014243874c6SManikanta Pubbisetty hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); 3015243874c6SManikanta Pubbisetty ATH11K_SKB_RXCB(first_frag)->is_frag = 1; 3016243874c6SManikanta Pubbisetty 3017243874c6SManikanta Pubbisetty if (ath11k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag)) 3018243874c6SManikanta Pubbisetty first_frag = NULL; 3019243874c6SManikanta Pubbisetty 3020243874c6SManikanta Pubbisetty *defrag_skb = first_frag; 3021243874c6SManikanta Pubbisetty return 0; 3022243874c6SManikanta Pubbisetty } 3023243874c6SManikanta Pubbisetty 3024243874c6SManikanta Pubbisetty static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_tid *rx_tid, 3025243874c6SManikanta Pubbisetty struct sk_buff *defrag_skb) 3026243874c6SManikanta Pubbisetty { 3027243874c6SManikanta Pubbisetty struct ath11k_base *ab = ar->ab; 3028243874c6SManikanta Pubbisetty struct ath11k_pdev_dp *dp = &ar->dp; 3029243874c6SManikanta Pubbisetty struct dp_rxdma_ring *rx_refill_ring = &dp->rx_refill_buf_ring; 3030243874c6SManikanta Pubbisetty struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data; 3031243874c6SManikanta Pubbisetty struct hal_reo_entrance_ring *reo_ent_ring; 3032243874c6SManikanta Pubbisetty struct hal_reo_dest_ring *reo_dest_ring; 3033243874c6SManikanta Pubbisetty struct dp_link_desc_bank *link_desc_banks; 3034243874c6SManikanta Pubbisetty struct hal_rx_msdu_link *msdu_link; 3035243874c6SManikanta Pubbisetty struct hal_rx_msdu_details *msdu0; 3036243874c6SManikanta Pubbisetty struct hal_srng *srng; 3037243874c6SManikanta Pubbisetty dma_addr_t paddr; 3038243874c6SManikanta Pubbisetty u32 desc_bank, msdu_info, mpdu_info; 3039243874c6SManikanta Pubbisetty u32 dst_idx, cookie; 3040243874c6SManikanta Pubbisetty u32 *msdu_len_offset; 3041243874c6SManikanta Pubbisetty int ret, buf_id; 3042243874c6SManikanta Pubbisetty 3043243874c6SManikanta Pubbisetty link_desc_banks = ab->dp.link_desc_banks; 3044243874c6SManikanta Pubbisetty reo_dest_ring = rx_tid->dst_ring_desc; 3045243874c6SManikanta Pubbisetty 3046243874c6SManikanta Pubbisetty ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank); 3047243874c6SManikanta Pubbisetty msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr + 3048243874c6SManikanta Pubbisetty (paddr - link_desc_banks[desc_bank].paddr)); 3049243874c6SManikanta Pubbisetty msdu0 = &msdu_link->msdu_link[0]; 3050243874c6SManikanta Pubbisetty dst_idx = FIELD_GET(RX_MSDU_DESC_INFO0_REO_DEST_IND, msdu0->rx_msdu_info.info0); 3051243874c6SManikanta Pubbisetty memset(msdu0, 0, sizeof(*msdu0)); 3052243874c6SManikanta Pubbisetty 3053243874c6SManikanta Pubbisetty msdu_info = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1) | 3054243874c6SManikanta Pubbisetty FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1) | 3055243874c6SManikanta Pubbisetty FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_CONTINUATION, 0) | 3056243874c6SManikanta Pubbisetty FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_LENGTH, 3057243874c6SManikanta Pubbisetty defrag_skb->len - HAL_RX_DESC_SIZE) | 3058243874c6SManikanta Pubbisetty FIELD_PREP(RX_MSDU_DESC_INFO0_REO_DEST_IND, dst_idx) | 3059243874c6SManikanta Pubbisetty FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_SA, 1) | 3060243874c6SManikanta Pubbisetty FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_DA, 1); 3061243874c6SManikanta Pubbisetty msdu0->rx_msdu_info.info0 = msdu_info; 3062243874c6SManikanta Pubbisetty 3063243874c6SManikanta Pubbisetty /* change msdu len in hal rx desc */ 3064243874c6SManikanta Pubbisetty msdu_len_offset = (u32 *)&rx_desc->msdu_start; 3065243874c6SManikanta Pubbisetty *msdu_len_offset &= ~(RX_MSDU_START_INFO1_MSDU_LENGTH); 3066243874c6SManikanta Pubbisetty *msdu_len_offset |= defrag_skb->len - HAL_RX_DESC_SIZE; 3067243874c6SManikanta Pubbisetty 3068243874c6SManikanta Pubbisetty paddr = dma_map_single(ab->dev, defrag_skb->data, 3069243874c6SManikanta Pubbisetty defrag_skb->len + skb_tailroom(defrag_skb), 3070243874c6SManikanta Pubbisetty DMA_FROM_DEVICE); 3071243874c6SManikanta Pubbisetty if (dma_mapping_error(ab->dev, paddr)) 3072243874c6SManikanta Pubbisetty return -ENOMEM; 3073243874c6SManikanta Pubbisetty 3074243874c6SManikanta Pubbisetty spin_lock_bh(&rx_refill_ring->idr_lock); 3075243874c6SManikanta Pubbisetty buf_id = idr_alloc(&rx_refill_ring->bufs_idr, defrag_skb, 0, 3076243874c6SManikanta Pubbisetty rx_refill_ring->bufs_max * 3, GFP_ATOMIC); 3077243874c6SManikanta Pubbisetty spin_unlock_bh(&rx_refill_ring->idr_lock); 3078243874c6SManikanta Pubbisetty if (buf_id < 0) { 3079243874c6SManikanta Pubbisetty ret = -ENOMEM; 3080243874c6SManikanta Pubbisetty goto err_unmap_dma; 3081243874c6SManikanta Pubbisetty } 3082243874c6SManikanta Pubbisetty 3083243874c6SManikanta Pubbisetty ATH11K_SKB_RXCB(defrag_skb)->paddr = paddr; 3084243874c6SManikanta Pubbisetty cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, dp->mac_id) | 3085243874c6SManikanta Pubbisetty FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 3086243874c6SManikanta Pubbisetty 3087243874c6SManikanta Pubbisetty ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie, HAL_RX_BUF_RBM_SW3_BM); 3088243874c6SManikanta Pubbisetty 3089243874c6SManikanta Pubbisetty /* Fill mpdu details into reo entrace ring */ 3090243874c6SManikanta Pubbisetty srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id]; 3091243874c6SManikanta Pubbisetty 3092243874c6SManikanta Pubbisetty spin_lock_bh(&srng->lock); 3093243874c6SManikanta Pubbisetty ath11k_hal_srng_access_begin(ab, srng); 3094243874c6SManikanta Pubbisetty 3095243874c6SManikanta Pubbisetty reo_ent_ring = (struct hal_reo_entrance_ring *) 3096243874c6SManikanta Pubbisetty ath11k_hal_srng_src_get_next_entry(ab, srng); 3097243874c6SManikanta Pubbisetty if (!reo_ent_ring) { 3098243874c6SManikanta Pubbisetty ath11k_hal_srng_access_end(ab, srng); 3099243874c6SManikanta Pubbisetty spin_unlock_bh(&srng->lock); 3100243874c6SManikanta Pubbisetty ret = -ENOSPC; 3101243874c6SManikanta Pubbisetty goto err_free_idr; 3102243874c6SManikanta Pubbisetty } 3103243874c6SManikanta Pubbisetty memset(reo_ent_ring, 0, sizeof(*reo_ent_ring)); 3104243874c6SManikanta Pubbisetty 3105243874c6SManikanta Pubbisetty ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank); 3106243874c6SManikanta Pubbisetty ath11k_hal_rx_buf_addr_info_set(reo_ent_ring, paddr, desc_bank, 3107243874c6SManikanta Pubbisetty HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST); 3108243874c6SManikanta Pubbisetty 3109243874c6SManikanta Pubbisetty mpdu_info = FIELD_PREP(RX_MPDU_DESC_INFO0_MSDU_COUNT, 1) | 3110243874c6SManikanta Pubbisetty FIELD_PREP(RX_MPDU_DESC_INFO0_SEQ_NUM, rx_tid->cur_sn) | 3111243874c6SManikanta Pubbisetty FIELD_PREP(RX_MPDU_DESC_INFO0_FRAG_FLAG, 0) | 3112243874c6SManikanta Pubbisetty FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_SA, 1) | 3113243874c6SManikanta Pubbisetty FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_DA, 1) | 3114243874c6SManikanta Pubbisetty FIELD_PREP(RX_MPDU_DESC_INFO0_RAW_MPDU, 1) | 3115243874c6SManikanta Pubbisetty FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_PN, 1); 3116243874c6SManikanta Pubbisetty 3117243874c6SManikanta Pubbisetty reo_ent_ring->rx_mpdu_info.info0 = mpdu_info; 3118243874c6SManikanta Pubbisetty reo_ent_ring->rx_mpdu_info.meta_data = reo_dest_ring->rx_mpdu_info.meta_data; 3119243874c6SManikanta Pubbisetty reo_ent_ring->queue_addr_lo = reo_dest_ring->queue_addr_lo; 3120243874c6SManikanta Pubbisetty reo_ent_ring->info0 = FIELD_PREP(HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI, 3121243874c6SManikanta Pubbisetty FIELD_GET(HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI, 3122243874c6SManikanta Pubbisetty reo_dest_ring->info0)) | 3123243874c6SManikanta Pubbisetty FIELD_PREP(HAL_REO_ENTR_RING_INFO0_DEST_IND, dst_idx); 3124243874c6SManikanta Pubbisetty ath11k_hal_srng_access_end(ab, srng); 3125243874c6SManikanta Pubbisetty spin_unlock_bh(&srng->lock); 3126243874c6SManikanta Pubbisetty 3127243874c6SManikanta Pubbisetty return 0; 3128243874c6SManikanta Pubbisetty 3129243874c6SManikanta Pubbisetty err_free_idr: 3130243874c6SManikanta Pubbisetty spin_lock_bh(&rx_refill_ring->idr_lock); 3131243874c6SManikanta Pubbisetty idr_remove(&rx_refill_ring->bufs_idr, buf_id); 3132243874c6SManikanta Pubbisetty spin_unlock_bh(&rx_refill_ring->idr_lock); 3133243874c6SManikanta Pubbisetty err_unmap_dma: 3134243874c6SManikanta Pubbisetty dma_unmap_single(ab->dev, paddr, defrag_skb->len + skb_tailroom(defrag_skb), 3135243874c6SManikanta Pubbisetty DMA_FROM_DEVICE); 3136243874c6SManikanta Pubbisetty return ret; 3137243874c6SManikanta Pubbisetty } 3138243874c6SManikanta Pubbisetty 3139243874c6SManikanta Pubbisetty static int ath11k_dp_rx_h_cmp_frags(struct sk_buff *a, struct sk_buff *b) 3140243874c6SManikanta Pubbisetty { 3141243874c6SManikanta Pubbisetty int frag1, frag2; 3142243874c6SManikanta Pubbisetty 3143243874c6SManikanta Pubbisetty frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(a); 3144243874c6SManikanta Pubbisetty frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(b); 3145243874c6SManikanta Pubbisetty 3146243874c6SManikanta Pubbisetty return frag1 - frag2; 3147243874c6SManikanta Pubbisetty } 3148243874c6SManikanta Pubbisetty 3149243874c6SManikanta Pubbisetty static void ath11k_dp_rx_h_sort_frags(struct sk_buff_head *frag_list, 3150243874c6SManikanta Pubbisetty struct sk_buff *cur_frag) 3151243874c6SManikanta Pubbisetty { 3152243874c6SManikanta Pubbisetty struct sk_buff *skb; 3153243874c6SManikanta Pubbisetty int cmp; 3154243874c6SManikanta Pubbisetty 3155243874c6SManikanta Pubbisetty skb_queue_walk(frag_list, skb) { 3156243874c6SManikanta Pubbisetty cmp = ath11k_dp_rx_h_cmp_frags(skb, cur_frag); 3157243874c6SManikanta Pubbisetty if (cmp < 0) 3158243874c6SManikanta Pubbisetty continue; 3159243874c6SManikanta Pubbisetty __skb_queue_before(frag_list, skb, cur_frag); 3160243874c6SManikanta Pubbisetty return; 3161243874c6SManikanta Pubbisetty } 3162243874c6SManikanta Pubbisetty __skb_queue_tail(frag_list, cur_frag); 3163243874c6SManikanta Pubbisetty } 3164243874c6SManikanta Pubbisetty 3165243874c6SManikanta Pubbisetty static u64 ath11k_dp_rx_h_get_pn(struct sk_buff *skb) 3166243874c6SManikanta Pubbisetty { 3167243874c6SManikanta Pubbisetty struct ieee80211_hdr *hdr; 3168243874c6SManikanta Pubbisetty u64 pn = 0; 3169243874c6SManikanta Pubbisetty u8 *ehdr; 3170243874c6SManikanta Pubbisetty 3171243874c6SManikanta Pubbisetty hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE); 3172243874c6SManikanta Pubbisetty ehdr = skb->data + HAL_RX_DESC_SIZE + ieee80211_hdrlen(hdr->frame_control); 3173243874c6SManikanta Pubbisetty 3174243874c6SManikanta Pubbisetty pn = ehdr[0]; 3175243874c6SManikanta Pubbisetty pn |= (u64)ehdr[1] << 8; 3176243874c6SManikanta Pubbisetty pn |= (u64)ehdr[4] << 16; 3177243874c6SManikanta Pubbisetty pn |= (u64)ehdr[5] << 24; 3178243874c6SManikanta Pubbisetty pn |= (u64)ehdr[6] << 32; 3179243874c6SManikanta Pubbisetty pn |= (u64)ehdr[7] << 40; 3180243874c6SManikanta Pubbisetty 3181243874c6SManikanta Pubbisetty return pn; 3182243874c6SManikanta Pubbisetty } 3183243874c6SManikanta Pubbisetty 3184243874c6SManikanta Pubbisetty static bool 3185243874c6SManikanta Pubbisetty ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k *ar, struct dp_rx_tid *rx_tid) 3186243874c6SManikanta Pubbisetty { 3187243874c6SManikanta Pubbisetty enum hal_encrypt_type encrypt_type; 3188243874c6SManikanta Pubbisetty struct sk_buff *first_frag, *skb; 3189243874c6SManikanta Pubbisetty struct hal_rx_desc *desc; 3190243874c6SManikanta Pubbisetty u64 last_pn; 3191243874c6SManikanta Pubbisetty u64 cur_pn; 3192243874c6SManikanta Pubbisetty 3193243874c6SManikanta Pubbisetty first_frag = skb_peek(&rx_tid->rx_frags); 3194243874c6SManikanta Pubbisetty desc = (struct hal_rx_desc *)first_frag->data; 3195243874c6SManikanta Pubbisetty 3196243874c6SManikanta Pubbisetty encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(desc); 3197243874c6SManikanta Pubbisetty if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 && 3198243874c6SManikanta Pubbisetty encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 && 3199243874c6SManikanta Pubbisetty encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 && 3200243874c6SManikanta Pubbisetty encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256) 3201243874c6SManikanta Pubbisetty return true; 3202243874c6SManikanta Pubbisetty 3203243874c6SManikanta Pubbisetty last_pn = ath11k_dp_rx_h_get_pn(first_frag); 3204243874c6SManikanta Pubbisetty skb_queue_walk(&rx_tid->rx_frags, skb) { 3205243874c6SManikanta Pubbisetty if (skb == first_frag) 3206243874c6SManikanta Pubbisetty continue; 3207243874c6SManikanta Pubbisetty 3208243874c6SManikanta Pubbisetty cur_pn = ath11k_dp_rx_h_get_pn(skb); 3209243874c6SManikanta Pubbisetty if (cur_pn != last_pn + 1) 3210243874c6SManikanta Pubbisetty return false; 3211243874c6SManikanta Pubbisetty last_pn = cur_pn; 3212243874c6SManikanta Pubbisetty } 3213243874c6SManikanta Pubbisetty return true; 3214243874c6SManikanta Pubbisetty } 3215243874c6SManikanta Pubbisetty 3216243874c6SManikanta Pubbisetty static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar, 3217243874c6SManikanta Pubbisetty struct sk_buff *msdu, 3218243874c6SManikanta Pubbisetty u32 *ring_desc) 3219243874c6SManikanta Pubbisetty { 3220243874c6SManikanta Pubbisetty struct ath11k_base *ab = ar->ab; 3221243874c6SManikanta Pubbisetty struct hal_rx_desc *rx_desc; 3222243874c6SManikanta Pubbisetty struct ath11k_peer *peer; 3223243874c6SManikanta Pubbisetty struct dp_rx_tid *rx_tid; 3224243874c6SManikanta Pubbisetty struct sk_buff *defrag_skb = NULL; 3225243874c6SManikanta Pubbisetty u32 peer_id; 3226243874c6SManikanta Pubbisetty u16 seqno, frag_no; 3227243874c6SManikanta Pubbisetty u8 tid; 3228243874c6SManikanta Pubbisetty int ret = 0; 3229243874c6SManikanta Pubbisetty bool more_frags; 3230243874c6SManikanta Pubbisetty 3231243874c6SManikanta Pubbisetty rx_desc = (struct hal_rx_desc *)msdu->data; 3232243874c6SManikanta Pubbisetty peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(rx_desc); 3233243874c6SManikanta Pubbisetty tid = ath11k_dp_rx_h_mpdu_start_tid(rx_desc); 3234243874c6SManikanta Pubbisetty seqno = ath11k_dp_rx_h_mpdu_start_seq_no(rx_desc); 3235243874c6SManikanta Pubbisetty frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(msdu); 3236243874c6SManikanta Pubbisetty more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(msdu); 3237243874c6SManikanta Pubbisetty 3238243874c6SManikanta Pubbisetty if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(rx_desc) || 3239243874c6SManikanta Pubbisetty !ath11k_dp_rx_h_mpdu_start_fc_valid(rx_desc) || 3240243874c6SManikanta Pubbisetty tid > IEEE80211_NUM_TIDS) 3241243874c6SManikanta Pubbisetty return -EINVAL; 3242243874c6SManikanta Pubbisetty 3243243874c6SManikanta Pubbisetty /* received unfragmented packet in reo 3244243874c6SManikanta Pubbisetty * exception ring, this shouldn't happen 3245243874c6SManikanta Pubbisetty * as these packets typically come from 3246243874c6SManikanta Pubbisetty * reo2sw srngs. 3247243874c6SManikanta Pubbisetty */ 3248243874c6SManikanta Pubbisetty if (WARN_ON_ONCE(!frag_no && !more_frags)) 3249243874c6SManikanta Pubbisetty return -EINVAL; 3250243874c6SManikanta Pubbisetty 3251243874c6SManikanta Pubbisetty spin_lock_bh(&ab->base_lock); 3252243874c6SManikanta Pubbisetty peer = ath11k_peer_find_by_id(ab, peer_id); 3253243874c6SManikanta Pubbisetty if (!peer) { 3254243874c6SManikanta Pubbisetty ath11k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n", 3255243874c6SManikanta Pubbisetty peer_id); 3256243874c6SManikanta Pubbisetty ret = -ENOENT; 3257243874c6SManikanta Pubbisetty goto out_unlock; 3258243874c6SManikanta Pubbisetty } 3259243874c6SManikanta Pubbisetty rx_tid = &peer->rx_tid[tid]; 3260243874c6SManikanta Pubbisetty 3261243874c6SManikanta Pubbisetty if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) || 3262243874c6SManikanta Pubbisetty skb_queue_empty(&rx_tid->rx_frags)) { 3263243874c6SManikanta Pubbisetty /* Flush stored fragments and start a new sequence */ 3264243874c6SManikanta Pubbisetty ath11k_dp_rx_frags_cleanup(rx_tid, true); 3265243874c6SManikanta Pubbisetty rx_tid->cur_sn = seqno; 3266243874c6SManikanta Pubbisetty } 3267243874c6SManikanta Pubbisetty 3268243874c6SManikanta Pubbisetty if (rx_tid->rx_frag_bitmap & BIT(frag_no)) { 3269243874c6SManikanta Pubbisetty /* Fragment already present */ 3270243874c6SManikanta Pubbisetty ret = -EINVAL; 3271243874c6SManikanta Pubbisetty goto out_unlock; 3272243874c6SManikanta Pubbisetty } 3273243874c6SManikanta Pubbisetty 3274243874c6SManikanta Pubbisetty if (frag_no > __fls(rx_tid->rx_frag_bitmap)) 3275243874c6SManikanta Pubbisetty __skb_queue_tail(&rx_tid->rx_frags, msdu); 3276243874c6SManikanta Pubbisetty else 3277243874c6SManikanta Pubbisetty ath11k_dp_rx_h_sort_frags(&rx_tid->rx_frags, msdu); 3278243874c6SManikanta Pubbisetty 3279243874c6SManikanta Pubbisetty rx_tid->rx_frag_bitmap |= BIT(frag_no); 3280243874c6SManikanta Pubbisetty if (!more_frags) 3281243874c6SManikanta Pubbisetty rx_tid->last_frag_no = frag_no; 3282243874c6SManikanta Pubbisetty 3283243874c6SManikanta Pubbisetty if (frag_no == 0) { 3284243874c6SManikanta Pubbisetty rx_tid->dst_ring_desc = kmemdup(ring_desc, 3285243874c6SManikanta Pubbisetty sizeof(*rx_tid->dst_ring_desc), 3286243874c6SManikanta Pubbisetty GFP_ATOMIC); 3287243874c6SManikanta Pubbisetty if (!rx_tid->dst_ring_desc) { 3288243874c6SManikanta Pubbisetty ret = -ENOMEM; 3289243874c6SManikanta Pubbisetty goto out_unlock; 3290243874c6SManikanta Pubbisetty } 3291243874c6SManikanta Pubbisetty } else { 3292243874c6SManikanta Pubbisetty ath11k_dp_rx_link_desc_return(ab, ring_desc, 3293243874c6SManikanta Pubbisetty HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3294243874c6SManikanta Pubbisetty } 3295243874c6SManikanta Pubbisetty 3296243874c6SManikanta Pubbisetty if (!rx_tid->last_frag_no || 3297243874c6SManikanta Pubbisetty rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) { 3298243874c6SManikanta Pubbisetty mod_timer(&rx_tid->frag_timer, jiffies + 3299243874c6SManikanta Pubbisetty ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS); 3300243874c6SManikanta Pubbisetty goto out_unlock; 3301243874c6SManikanta Pubbisetty } 3302243874c6SManikanta Pubbisetty 3303243874c6SManikanta Pubbisetty spin_unlock_bh(&ab->base_lock); 3304243874c6SManikanta Pubbisetty del_timer_sync(&rx_tid->frag_timer); 3305243874c6SManikanta Pubbisetty spin_lock_bh(&ab->base_lock); 3306243874c6SManikanta Pubbisetty 3307243874c6SManikanta Pubbisetty peer = ath11k_peer_find_by_id(ab, peer_id); 3308243874c6SManikanta Pubbisetty if (!peer) 3309243874c6SManikanta Pubbisetty goto err_frags_cleanup; 3310243874c6SManikanta Pubbisetty 3311243874c6SManikanta Pubbisetty if (!ath11k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid)) 3312243874c6SManikanta Pubbisetty goto err_frags_cleanup; 3313243874c6SManikanta Pubbisetty 3314243874c6SManikanta Pubbisetty if (ath11k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb)) 3315243874c6SManikanta Pubbisetty goto err_frags_cleanup; 3316243874c6SManikanta Pubbisetty 3317243874c6SManikanta Pubbisetty if (!defrag_skb) 3318243874c6SManikanta Pubbisetty goto err_frags_cleanup; 3319243874c6SManikanta Pubbisetty 3320243874c6SManikanta Pubbisetty if (ath11k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb)) 3321243874c6SManikanta Pubbisetty goto err_frags_cleanup; 3322243874c6SManikanta Pubbisetty 3323243874c6SManikanta Pubbisetty ath11k_dp_rx_frags_cleanup(rx_tid, false); 3324243874c6SManikanta Pubbisetty goto out_unlock; 3325243874c6SManikanta Pubbisetty 3326243874c6SManikanta Pubbisetty err_frags_cleanup: 3327243874c6SManikanta Pubbisetty dev_kfree_skb_any(defrag_skb); 3328243874c6SManikanta Pubbisetty ath11k_dp_rx_frags_cleanup(rx_tid, true); 3329243874c6SManikanta Pubbisetty out_unlock: 3330243874c6SManikanta Pubbisetty spin_unlock_bh(&ab->base_lock); 3331243874c6SManikanta Pubbisetty return ret; 3332243874c6SManikanta Pubbisetty } 3333243874c6SManikanta Pubbisetty 3334d5c65159SKalle Valo static int 3335243874c6SManikanta Pubbisetty ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool drop) 3336d5c65159SKalle Valo { 3337d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 3338d5c65159SKalle Valo struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 3339d5c65159SKalle Valo struct sk_buff *msdu; 3340d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb; 3341d5c65159SKalle Valo struct hal_rx_desc *rx_desc; 3342d5c65159SKalle Valo u16 msdu_len; 3343d5c65159SKalle Valo 3344d5c65159SKalle Valo spin_lock_bh(&rx_ring->idr_lock); 3345d5c65159SKalle Valo msdu = idr_find(&rx_ring->bufs_idr, buf_id); 3346d5c65159SKalle Valo if (!msdu) { 3347d5c65159SKalle Valo ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n", 3348d5c65159SKalle Valo buf_id); 3349d5c65159SKalle Valo spin_unlock_bh(&rx_ring->idr_lock); 3350d5c65159SKalle Valo return -EINVAL; 3351d5c65159SKalle Valo } 3352d5c65159SKalle Valo 3353d5c65159SKalle Valo idr_remove(&rx_ring->bufs_idr, buf_id); 3354d5c65159SKalle Valo spin_unlock_bh(&rx_ring->idr_lock); 3355d5c65159SKalle Valo 3356d5c65159SKalle Valo rxcb = ATH11K_SKB_RXCB(msdu); 3357d5c65159SKalle Valo dma_unmap_single(ar->ab->dev, rxcb->paddr, 3358d5c65159SKalle Valo msdu->len + skb_tailroom(msdu), 3359d5c65159SKalle Valo DMA_FROM_DEVICE); 3360d5c65159SKalle Valo 3361243874c6SManikanta Pubbisetty if (drop) { 3362d5c65159SKalle Valo dev_kfree_skb_any(msdu); 3363d5c65159SKalle Valo return 0; 3364d5c65159SKalle Valo } 3365d5c65159SKalle Valo 3366d5c65159SKalle Valo rcu_read_lock(); 3367d5c65159SKalle Valo if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) { 3368d5c65159SKalle Valo dev_kfree_skb_any(msdu); 3369d5c65159SKalle Valo goto exit; 3370d5c65159SKalle Valo } 3371d5c65159SKalle Valo 3372d5c65159SKalle Valo if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 3373d5c65159SKalle Valo dev_kfree_skb_any(msdu); 3374d5c65159SKalle Valo goto exit; 3375d5c65159SKalle Valo } 3376d5c65159SKalle Valo 3377d5c65159SKalle Valo rx_desc = (struct hal_rx_desc *)msdu->data; 3378d5c65159SKalle Valo msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc); 3379d5c65159SKalle Valo skb_put(msdu, HAL_RX_DESC_SIZE + msdu_len); 3380d5c65159SKalle Valo 3381243874c6SManikanta Pubbisetty if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) { 3382243874c6SManikanta Pubbisetty dev_kfree_skb_any(msdu); 3383243874c6SManikanta Pubbisetty ath11k_dp_rx_link_desc_return(ar->ab, ring_desc, 3384243874c6SManikanta Pubbisetty HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3385243874c6SManikanta Pubbisetty } 3386d5c65159SKalle Valo exit: 3387d5c65159SKalle Valo rcu_read_unlock(); 3388d5c65159SKalle Valo return 0; 3389d5c65159SKalle Valo } 3390d5c65159SKalle Valo 3391d5c65159SKalle Valo int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi, 3392d5c65159SKalle Valo int budget) 3393d5c65159SKalle Valo { 3394293cb583SJohn Crispin u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; 3395d5c65159SKalle Valo struct dp_link_desc_bank *link_desc_banks; 3396d5c65159SKalle Valo enum hal_rx_buf_return_buf_manager rbm; 3397d5c65159SKalle Valo int tot_n_bufs_reaped, quota, ret, i; 3398d5c65159SKalle Valo int n_bufs_reaped[MAX_RADIOS] = {0}; 3399d5c65159SKalle Valo struct dp_rxdma_ring *rx_ring; 3400d5c65159SKalle Valo struct dp_srng *reo_except; 3401d5c65159SKalle Valo u32 desc_bank, num_msdus; 3402d5c65159SKalle Valo struct hal_srng *srng; 3403d5c65159SKalle Valo struct ath11k_dp *dp; 3404d5c65159SKalle Valo void *link_desc_va; 3405d5c65159SKalle Valo int buf_id, mac_id; 3406d5c65159SKalle Valo struct ath11k *ar; 3407d5c65159SKalle Valo dma_addr_t paddr; 3408d5c65159SKalle Valo u32 *desc; 3409d5c65159SKalle Valo bool is_frag; 3410243874c6SManikanta Pubbisetty u8 drop = 0; 3411d5c65159SKalle Valo 3412d5c65159SKalle Valo tot_n_bufs_reaped = 0; 3413d5c65159SKalle Valo quota = budget; 3414d5c65159SKalle Valo 3415d5c65159SKalle Valo dp = &ab->dp; 3416d5c65159SKalle Valo reo_except = &dp->reo_except_ring; 3417d5c65159SKalle Valo link_desc_banks = dp->link_desc_banks; 3418d5c65159SKalle Valo 3419d5c65159SKalle Valo srng = &ab->hal.srng_list[reo_except->ring_id]; 3420d5c65159SKalle Valo 3421d5c65159SKalle Valo spin_lock_bh(&srng->lock); 3422d5c65159SKalle Valo 3423d5c65159SKalle Valo ath11k_hal_srng_access_begin(ab, srng); 3424d5c65159SKalle Valo 3425d5c65159SKalle Valo while (budget && 3426d5c65159SKalle Valo (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 3427293cb583SJohn Crispin struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc; 3428293cb583SJohn Crispin 3429d5c65159SKalle Valo ab->soc_stats.err_ring_pkts++; 3430d5c65159SKalle Valo ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr, 3431d5c65159SKalle Valo &desc_bank); 3432d5c65159SKalle Valo if (ret) { 3433d5c65159SKalle Valo ath11k_warn(ab, "failed to parse error reo desc %d\n", 3434d5c65159SKalle Valo ret); 3435d5c65159SKalle Valo continue; 3436d5c65159SKalle Valo } 3437d5c65159SKalle Valo link_desc_va = link_desc_banks[desc_bank].vaddr + 3438d5c65159SKalle Valo (paddr - link_desc_banks[desc_bank].paddr); 3439293cb583SJohn Crispin ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies, 3440d5c65159SKalle Valo &rbm); 3441d5c65159SKalle Valo if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST && 3442d5c65159SKalle Valo rbm != HAL_RX_BUF_RBM_SW3_BM) { 3443d5c65159SKalle Valo ab->soc_stats.invalid_rbm++; 3444d5c65159SKalle Valo ath11k_warn(ab, "invalid return buffer manager %d\n", rbm); 3445d5c65159SKalle Valo ath11k_dp_rx_link_desc_return(ab, desc, 3446d5c65159SKalle Valo HAL_WBM_REL_BM_ACT_REL_MSDU); 3447d5c65159SKalle Valo continue; 3448d5c65159SKalle Valo } 3449d5c65159SKalle Valo 3450293cb583SJohn Crispin is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG); 3451d5c65159SKalle Valo 3452243874c6SManikanta Pubbisetty /* Process only rx fragments with one msdu per link desc below, and drop 3453243874c6SManikanta Pubbisetty * msdu's indicated due to error reasons. 3454243874c6SManikanta Pubbisetty */ 3455243874c6SManikanta Pubbisetty if (!is_frag || num_msdus > 1) { 3456243874c6SManikanta Pubbisetty drop = 1; 3457d5c65159SKalle Valo /* Return the link desc back to wbm idle list */ 3458d5c65159SKalle Valo ath11k_dp_rx_link_desc_return(ab, desc, 3459d5c65159SKalle Valo HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3460243874c6SManikanta Pubbisetty } 3461d5c65159SKalle Valo 3462d5c65159SKalle Valo for (i = 0; i < num_msdus; i++) { 3463d5c65159SKalle Valo buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 3464293cb583SJohn Crispin msdu_cookies[i]); 3465d5c65159SKalle Valo 3466d5c65159SKalle Valo mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, 3467293cb583SJohn Crispin msdu_cookies[i]); 3468d5c65159SKalle Valo 3469d5c65159SKalle Valo ar = ab->pdevs[mac_id].ar; 3470d5c65159SKalle Valo 3471243874c6SManikanta Pubbisetty if (!ath11k_dp_process_rx_err_buf(ar, desc, buf_id, drop)) { 3472d5c65159SKalle Valo n_bufs_reaped[mac_id]++; 3473d5c65159SKalle Valo tot_n_bufs_reaped++; 3474d5c65159SKalle Valo } 3475d5c65159SKalle Valo } 3476d5c65159SKalle Valo 3477d5c65159SKalle Valo if (tot_n_bufs_reaped >= quota) { 3478d5c65159SKalle Valo tot_n_bufs_reaped = quota; 3479d5c65159SKalle Valo goto exit; 3480d5c65159SKalle Valo } 3481d5c65159SKalle Valo 3482d5c65159SKalle Valo budget = quota - tot_n_bufs_reaped; 3483d5c65159SKalle Valo } 3484d5c65159SKalle Valo 3485d5c65159SKalle Valo exit: 3486d5c65159SKalle Valo ath11k_hal_srng_access_end(ab, srng); 3487d5c65159SKalle Valo 3488d5c65159SKalle Valo spin_unlock_bh(&srng->lock); 3489d5c65159SKalle Valo 3490d5c65159SKalle Valo for (i = 0; i < ab->num_radios; i++) { 3491d5c65159SKalle Valo if (!n_bufs_reaped[i]) 3492d5c65159SKalle Valo continue; 3493d5c65159SKalle Valo 3494d5c65159SKalle Valo ar = ab->pdevs[i].ar; 3495d5c65159SKalle Valo rx_ring = &ar->dp.rx_refill_buf_ring; 3496d5c65159SKalle Valo 3497d5c65159SKalle Valo ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i], 3498d5c65159SKalle Valo HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC); 3499d5c65159SKalle Valo } 3500d5c65159SKalle Valo 3501d5c65159SKalle Valo return tot_n_bufs_reaped; 3502d5c65159SKalle Valo } 3503d5c65159SKalle Valo 3504d5c65159SKalle Valo static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar, 3505d5c65159SKalle Valo int msdu_len, 3506d5c65159SKalle Valo struct sk_buff_head *msdu_list) 3507d5c65159SKalle Valo { 3508d5c65159SKalle Valo struct sk_buff *skb, *tmp; 3509d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb; 3510d5c65159SKalle Valo int n_buffs; 3511d5c65159SKalle Valo 3512d5c65159SKalle Valo n_buffs = DIV_ROUND_UP(msdu_len, 3513d5c65159SKalle Valo (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE)); 3514d5c65159SKalle Valo 3515d5c65159SKalle Valo skb_queue_walk_safe(msdu_list, skb, tmp) { 3516d5c65159SKalle Valo rxcb = ATH11K_SKB_RXCB(skb); 3517d5c65159SKalle Valo if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO && 3518d5c65159SKalle Valo rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) { 3519d5c65159SKalle Valo if (!n_buffs) 3520d5c65159SKalle Valo break; 3521d5c65159SKalle Valo __skb_unlink(skb, msdu_list); 3522d5c65159SKalle Valo dev_kfree_skb_any(skb); 3523d5c65159SKalle Valo n_buffs--; 3524d5c65159SKalle Valo } 3525d5c65159SKalle Valo } 3526d5c65159SKalle Valo } 3527d5c65159SKalle Valo 3528d5c65159SKalle Valo static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu, 3529d5c65159SKalle Valo struct ieee80211_rx_status *status, 3530d5c65159SKalle Valo struct sk_buff_head *msdu_list) 3531d5c65159SKalle Valo { 3532d5c65159SKalle Valo struct sk_buff_head amsdu_list; 3533d5c65159SKalle Valo u16 msdu_len; 3534d5c65159SKalle Valo struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 3535d5c65159SKalle Valo u8 l3pad_bytes; 3536d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3537d5c65159SKalle Valo 3538d5c65159SKalle Valo msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc); 3539d5c65159SKalle Valo 3540243874c6SManikanta Pubbisetty if (!rxcb->is_frag && ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE)) { 3541d5c65159SKalle Valo /* First buffer will be freed by the caller, so deduct it's length */ 3542d5c65159SKalle Valo msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE); 3543d5c65159SKalle Valo ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list); 3544d5c65159SKalle Valo return -EINVAL; 3545d5c65159SKalle Valo } 3546d5c65159SKalle Valo 3547d5c65159SKalle Valo if (!ath11k_dp_rx_h_attn_msdu_done(desc)) { 3548d5c65159SKalle Valo ath11k_warn(ar->ab, 3549d5c65159SKalle Valo "msdu_done bit not set in null_q_des processing\n"); 3550d5c65159SKalle Valo __skb_queue_purge(msdu_list); 3551d5c65159SKalle Valo return -EIO; 3552d5c65159SKalle Valo } 3553d5c65159SKalle Valo 3554d5c65159SKalle Valo /* Handle NULL queue descriptor violations arising out a missing 3555d5c65159SKalle Valo * REO queue for a given peer or a given TID. This typically 3556d5c65159SKalle Valo * may happen if a packet is received on a QOS enabled TID before the 3557d5c65159SKalle Valo * ADDBA negotiation for that TID, when the TID queue is setup. Or 3558d5c65159SKalle Valo * it may also happen for MC/BC frames if they are not routed to the 3559d5c65159SKalle Valo * non-QOS TID queue, in the absence of any other default TID queue. 3560d5c65159SKalle Valo * This error can show up both in a REO destination or WBM release ring. 3561d5c65159SKalle Valo */ 3562d5c65159SKalle Valo 3563d5c65159SKalle Valo __skb_queue_head_init(&amsdu_list); 3564d5c65159SKalle Valo 3565d5c65159SKalle Valo rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc); 3566d5c65159SKalle Valo rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc); 3567d5c65159SKalle Valo 3568243874c6SManikanta Pubbisetty if (rxcb->is_frag) { 3569243874c6SManikanta Pubbisetty skb_pull(msdu, HAL_RX_DESC_SIZE); 3570243874c6SManikanta Pubbisetty } else { 3571d5c65159SKalle Valo l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc); 3572d5c65159SKalle Valo 3573d5c65159SKalle Valo if ((HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) 3574d5c65159SKalle Valo return -EINVAL; 3575d5c65159SKalle Valo 3576d5c65159SKalle Valo skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len); 3577d5c65159SKalle Valo skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes); 3578243874c6SManikanta Pubbisetty } 3579d5c65159SKalle Valo ath11k_dp_rx_h_ppdu(ar, desc, status); 3580d5c65159SKalle Valo 3581d5c65159SKalle Valo __skb_queue_tail(&amsdu_list, msdu); 3582d5c65159SKalle Valo 3583d5c65159SKalle Valo ath11k_dp_rx_h_mpdu(ar, &amsdu_list, desc, status); 3584d5c65159SKalle Valo 3585d5c65159SKalle Valo /* Please note that caller will having the access to msdu and completing 3586d5c65159SKalle Valo * rx with mac80211. Need not worry about cleaning up amsdu_list. 3587d5c65159SKalle Valo */ 3588d5c65159SKalle Valo 3589d5c65159SKalle Valo return 0; 3590d5c65159SKalle Valo } 3591d5c65159SKalle Valo 3592d5c65159SKalle Valo static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu, 3593d5c65159SKalle Valo struct ieee80211_rx_status *status, 3594d5c65159SKalle Valo struct sk_buff_head *msdu_list) 3595d5c65159SKalle Valo { 3596d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3597d5c65159SKalle Valo bool drop = false; 3598d5c65159SKalle Valo 3599d5c65159SKalle Valo ar->ab->soc_stats.reo_error[rxcb->err_code]++; 3600d5c65159SKalle Valo 3601d5c65159SKalle Valo switch (rxcb->err_code) { 3602d5c65159SKalle Valo case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO: 3603d5c65159SKalle Valo if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list)) 3604d5c65159SKalle Valo drop = true; 3605d5c65159SKalle Valo break; 3606d5c65159SKalle Valo default: 3607d5c65159SKalle Valo /* TODO: Review other errors and process them to mac80211 3608d5c65159SKalle Valo * as appropriate. 3609d5c65159SKalle Valo */ 3610d5c65159SKalle Valo drop = true; 3611d5c65159SKalle Valo break; 3612d5c65159SKalle Valo } 3613d5c65159SKalle Valo 3614d5c65159SKalle Valo return drop; 3615d5c65159SKalle Valo } 3616d5c65159SKalle Valo 3617d5c65159SKalle Valo static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu, 3618d5c65159SKalle Valo struct ieee80211_rx_status *status) 3619d5c65159SKalle Valo { 3620d5c65159SKalle Valo u16 msdu_len; 3621d5c65159SKalle Valo struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 3622d5c65159SKalle Valo u8 l3pad_bytes; 3623d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3624d5c65159SKalle Valo 3625d5c65159SKalle Valo rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc); 3626d5c65159SKalle Valo rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc); 3627d5c65159SKalle Valo 3628d5c65159SKalle Valo l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc); 3629d5c65159SKalle Valo msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc); 3630d5c65159SKalle Valo skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len); 3631d5c65159SKalle Valo skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes); 3632d5c65159SKalle Valo 3633d5c65159SKalle Valo ath11k_dp_rx_h_ppdu(ar, desc, status); 3634d5c65159SKalle Valo 3635d5c65159SKalle Valo status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR | 3636d5c65159SKalle Valo RX_FLAG_DECRYPTED); 3637d5c65159SKalle Valo 3638d5c65159SKalle Valo ath11k_dp_rx_h_undecap(ar, msdu, desc, 3639d5c65159SKalle Valo HAL_ENCRYPT_TYPE_TKIP_MIC, status, false); 3640d5c65159SKalle Valo } 3641d5c65159SKalle Valo 3642d5c65159SKalle Valo static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar, struct sk_buff *msdu, 3643d5c65159SKalle Valo struct ieee80211_rx_status *status) 3644d5c65159SKalle Valo { 3645d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3646d5c65159SKalle Valo bool drop = false; 3647d5c65159SKalle Valo 3648d5c65159SKalle Valo ar->ab->soc_stats.rxdma_error[rxcb->err_code]++; 3649d5c65159SKalle Valo 3650d5c65159SKalle Valo switch (rxcb->err_code) { 3651d5c65159SKalle Valo case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR: 3652d5c65159SKalle Valo ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status); 3653d5c65159SKalle Valo break; 3654d5c65159SKalle Valo default: 3655d5c65159SKalle Valo /* TODO: Review other rxdma error code to check if anything is 3656d5c65159SKalle Valo * worth reporting to mac80211 3657d5c65159SKalle Valo */ 3658d5c65159SKalle Valo drop = true; 3659d5c65159SKalle Valo break; 3660d5c65159SKalle Valo } 3661d5c65159SKalle Valo 3662d5c65159SKalle Valo return drop; 3663d5c65159SKalle Valo } 3664d5c65159SKalle Valo 3665d5c65159SKalle Valo static void ath11k_dp_rx_wbm_err(struct ath11k *ar, 3666d5c65159SKalle Valo struct napi_struct *napi, 3667d5c65159SKalle Valo struct sk_buff *msdu, 3668d5c65159SKalle Valo struct sk_buff_head *msdu_list) 3669d5c65159SKalle Valo { 3670d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3671d5c65159SKalle Valo struct ieee80211_rx_status rxs = {0}; 3672d5c65159SKalle Valo struct ieee80211_rx_status *status; 3673d5c65159SKalle Valo bool drop = true; 3674d5c65159SKalle Valo 3675d5c65159SKalle Valo switch (rxcb->err_rel_src) { 3676d5c65159SKalle Valo case HAL_WBM_REL_SRC_MODULE_REO: 3677d5c65159SKalle Valo drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list); 3678d5c65159SKalle Valo break; 3679d5c65159SKalle Valo case HAL_WBM_REL_SRC_MODULE_RXDMA: 3680d5c65159SKalle Valo drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs); 3681d5c65159SKalle Valo break; 3682d5c65159SKalle Valo default: 3683d5c65159SKalle Valo /* msdu will get freed */ 3684d5c65159SKalle Valo break; 3685d5c65159SKalle Valo } 3686d5c65159SKalle Valo 3687d5c65159SKalle Valo if (drop) { 3688d5c65159SKalle Valo dev_kfree_skb_any(msdu); 3689d5c65159SKalle Valo return; 3690d5c65159SKalle Valo } 3691d5c65159SKalle Valo 3692d5c65159SKalle Valo status = IEEE80211_SKB_RXCB(msdu); 3693d5c65159SKalle Valo *status = rxs; 3694d5c65159SKalle Valo 3695d5c65159SKalle Valo ath11k_dp_rx_deliver_msdu(ar, napi, msdu); 3696d5c65159SKalle Valo } 3697d5c65159SKalle Valo 3698d5c65159SKalle Valo int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab, 3699d5c65159SKalle Valo struct napi_struct *napi, int budget) 3700d5c65159SKalle Valo { 3701d5c65159SKalle Valo struct ath11k *ar; 3702d5c65159SKalle Valo struct ath11k_dp *dp = &ab->dp; 3703d5c65159SKalle Valo struct dp_rxdma_ring *rx_ring; 3704d5c65159SKalle Valo struct hal_rx_wbm_rel_info err_info; 3705d5c65159SKalle Valo struct hal_srng *srng; 3706d5c65159SKalle Valo struct sk_buff *msdu; 3707d5c65159SKalle Valo struct sk_buff_head msdu_list[MAX_RADIOS]; 3708d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb; 3709d5c65159SKalle Valo u32 *rx_desc; 3710d5c65159SKalle Valo int buf_id, mac_id; 3711d5c65159SKalle Valo int num_buffs_reaped[MAX_RADIOS] = {0}; 3712d5c65159SKalle Valo int total_num_buffs_reaped = 0; 3713d5c65159SKalle Valo int ret, i; 3714d5c65159SKalle Valo 3715d5c65159SKalle Valo for (i = 0; i < MAX_RADIOS; i++) 3716d5c65159SKalle Valo __skb_queue_head_init(&msdu_list[i]); 3717d5c65159SKalle Valo 3718d5c65159SKalle Valo srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id]; 3719d5c65159SKalle Valo 3720d5c65159SKalle Valo spin_lock_bh(&srng->lock); 3721d5c65159SKalle Valo 3722d5c65159SKalle Valo ath11k_hal_srng_access_begin(ab, srng); 3723d5c65159SKalle Valo 3724d5c65159SKalle Valo while (budget) { 3725d5c65159SKalle Valo rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng); 3726d5c65159SKalle Valo if (!rx_desc) 3727d5c65159SKalle Valo break; 3728d5c65159SKalle Valo 3729d5c65159SKalle Valo ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info); 3730d5c65159SKalle Valo if (ret) { 3731d5c65159SKalle Valo ath11k_warn(ab, 3732d5c65159SKalle Valo "failed to parse rx error in wbm_rel ring desc %d\n", 3733d5c65159SKalle Valo ret); 3734d5c65159SKalle Valo continue; 3735d5c65159SKalle Valo } 3736d5c65159SKalle Valo 3737d5c65159SKalle Valo buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie); 3738d5c65159SKalle Valo mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie); 3739d5c65159SKalle Valo 3740d5c65159SKalle Valo ar = ab->pdevs[mac_id].ar; 3741d5c65159SKalle Valo rx_ring = &ar->dp.rx_refill_buf_ring; 3742d5c65159SKalle Valo 3743d5c65159SKalle Valo spin_lock_bh(&rx_ring->idr_lock); 3744d5c65159SKalle Valo msdu = idr_find(&rx_ring->bufs_idr, buf_id); 3745d5c65159SKalle Valo if (!msdu) { 3746d5c65159SKalle Valo ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n", 3747d5c65159SKalle Valo buf_id, mac_id); 3748d5c65159SKalle Valo spin_unlock_bh(&rx_ring->idr_lock); 3749d5c65159SKalle Valo continue; 3750d5c65159SKalle Valo } 3751d5c65159SKalle Valo 3752d5c65159SKalle Valo idr_remove(&rx_ring->bufs_idr, buf_id); 3753d5c65159SKalle Valo spin_unlock_bh(&rx_ring->idr_lock); 3754d5c65159SKalle Valo 3755d5c65159SKalle Valo rxcb = ATH11K_SKB_RXCB(msdu); 3756d5c65159SKalle Valo dma_unmap_single(ab->dev, rxcb->paddr, 3757d5c65159SKalle Valo msdu->len + skb_tailroom(msdu), 3758d5c65159SKalle Valo DMA_FROM_DEVICE); 3759d5c65159SKalle Valo 3760d5c65159SKalle Valo num_buffs_reaped[mac_id]++; 3761d5c65159SKalle Valo total_num_buffs_reaped++; 3762d5c65159SKalle Valo budget--; 3763d5c65159SKalle Valo 3764d5c65159SKalle Valo if (err_info.push_reason != 3765d5c65159SKalle Valo HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { 3766d5c65159SKalle Valo dev_kfree_skb_any(msdu); 3767d5c65159SKalle Valo continue; 3768d5c65159SKalle Valo } 3769d5c65159SKalle Valo 3770d5c65159SKalle Valo rxcb->err_rel_src = err_info.err_rel_src; 3771d5c65159SKalle Valo rxcb->err_code = err_info.err_code; 3772d5c65159SKalle Valo rxcb->rx_desc = (struct hal_rx_desc *)msdu->data; 3773d5c65159SKalle Valo __skb_queue_tail(&msdu_list[mac_id], msdu); 3774d5c65159SKalle Valo } 3775d5c65159SKalle Valo 3776d5c65159SKalle Valo ath11k_hal_srng_access_end(ab, srng); 3777d5c65159SKalle Valo 3778d5c65159SKalle Valo spin_unlock_bh(&srng->lock); 3779d5c65159SKalle Valo 3780d5c65159SKalle Valo if (!total_num_buffs_reaped) 3781d5c65159SKalle Valo goto done; 3782d5c65159SKalle Valo 3783d5c65159SKalle Valo for (i = 0; i < ab->num_radios; i++) { 3784d5c65159SKalle Valo if (!num_buffs_reaped[i]) 3785d5c65159SKalle Valo continue; 3786d5c65159SKalle Valo 3787d5c65159SKalle Valo ar = ab->pdevs[i].ar; 3788d5c65159SKalle Valo rx_ring = &ar->dp.rx_refill_buf_ring; 3789d5c65159SKalle Valo 3790d5c65159SKalle Valo ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i], 3791d5c65159SKalle Valo HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC); 3792d5c65159SKalle Valo } 3793d5c65159SKalle Valo 3794d5c65159SKalle Valo rcu_read_lock(); 3795d5c65159SKalle Valo for (i = 0; i < ab->num_radios; i++) { 3796d5c65159SKalle Valo if (!rcu_dereference(ab->pdevs_active[i])) { 3797d5c65159SKalle Valo __skb_queue_purge(&msdu_list[i]); 3798d5c65159SKalle Valo continue; 3799d5c65159SKalle Valo } 3800d5c65159SKalle Valo 3801d5c65159SKalle Valo ar = ab->pdevs[i].ar; 3802d5c65159SKalle Valo 3803d5c65159SKalle Valo if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 3804d5c65159SKalle Valo __skb_queue_purge(&msdu_list[i]); 3805d5c65159SKalle Valo continue; 3806d5c65159SKalle Valo } 3807d5c65159SKalle Valo 3808d5c65159SKalle Valo while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL) 3809d5c65159SKalle Valo ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]); 3810d5c65159SKalle Valo } 3811d5c65159SKalle Valo rcu_read_unlock(); 3812d5c65159SKalle Valo done: 3813d5c65159SKalle Valo return total_num_buffs_reaped; 3814d5c65159SKalle Valo } 3815d5c65159SKalle Valo 3816d5c65159SKalle Valo int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget) 3817d5c65159SKalle Valo { 3818d5c65159SKalle Valo struct ath11k *ar = ab->pdevs[mac_id].ar; 3819d5c65159SKalle Valo struct dp_srng *err_ring = &ar->dp.rxdma_err_dst_ring; 3820d5c65159SKalle Valo struct dp_rxdma_ring *rx_ring = &ar->dp.rx_refill_buf_ring; 3821d5c65159SKalle Valo struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks; 3822d5c65159SKalle Valo struct hal_srng *srng; 3823293cb583SJohn Crispin u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; 3824d5c65159SKalle Valo enum hal_rx_buf_return_buf_manager rbm; 3825d5c65159SKalle Valo enum hal_reo_entr_rxdma_ecode rxdma_err_code; 3826d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb; 3827d5c65159SKalle Valo struct sk_buff *skb; 3828d5c65159SKalle Valo struct hal_reo_entrance_ring *entr_ring; 3829d5c65159SKalle Valo void *desc; 3830d5c65159SKalle Valo int num_buf_freed = 0; 3831d5c65159SKalle Valo int quota = budget; 3832d5c65159SKalle Valo dma_addr_t paddr; 3833d5c65159SKalle Valo u32 desc_bank; 3834d5c65159SKalle Valo void *link_desc_va; 3835d5c65159SKalle Valo int num_msdus; 3836d5c65159SKalle Valo int i; 3837d5c65159SKalle Valo int buf_id; 3838d5c65159SKalle Valo 3839d5c65159SKalle Valo srng = &ab->hal.srng_list[err_ring->ring_id]; 3840d5c65159SKalle Valo 3841d5c65159SKalle Valo spin_lock_bh(&srng->lock); 3842d5c65159SKalle Valo 3843d5c65159SKalle Valo ath11k_hal_srng_access_begin(ab, srng); 3844d5c65159SKalle Valo 3845d5c65159SKalle Valo while (quota-- && 3846d5c65159SKalle Valo (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 3847d5c65159SKalle Valo ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank); 3848d5c65159SKalle Valo 3849d5c65159SKalle Valo entr_ring = (struct hal_reo_entrance_ring *)desc; 3850d5c65159SKalle Valo rxdma_err_code = 3851d5c65159SKalle Valo FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE, 3852d5c65159SKalle Valo entr_ring->info1); 3853d5c65159SKalle Valo ab->soc_stats.rxdma_error[rxdma_err_code]++; 3854d5c65159SKalle Valo 3855d5c65159SKalle Valo link_desc_va = link_desc_banks[desc_bank].vaddr + 3856d5c65159SKalle Valo (paddr - link_desc_banks[desc_bank].paddr); 3857293cb583SJohn Crispin ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, 3858293cb583SJohn Crispin msdu_cookies, &rbm); 3859d5c65159SKalle Valo 3860d5c65159SKalle Valo for (i = 0; i < num_msdus; i++) { 3861d5c65159SKalle Valo buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 3862293cb583SJohn Crispin msdu_cookies[i]); 3863d5c65159SKalle Valo 3864d5c65159SKalle Valo spin_lock_bh(&rx_ring->idr_lock); 3865d5c65159SKalle Valo skb = idr_find(&rx_ring->bufs_idr, buf_id); 3866d5c65159SKalle Valo if (!skb) { 3867d5c65159SKalle Valo ath11k_warn(ab, "rxdma error with invalid buf_id %d\n", 3868d5c65159SKalle Valo buf_id); 3869d5c65159SKalle Valo spin_unlock_bh(&rx_ring->idr_lock); 3870d5c65159SKalle Valo continue; 3871d5c65159SKalle Valo } 3872d5c65159SKalle Valo 3873d5c65159SKalle Valo idr_remove(&rx_ring->bufs_idr, buf_id); 3874d5c65159SKalle Valo spin_unlock_bh(&rx_ring->idr_lock); 3875d5c65159SKalle Valo 3876d5c65159SKalle Valo rxcb = ATH11K_SKB_RXCB(skb); 3877d5c65159SKalle Valo dma_unmap_single(ab->dev, rxcb->paddr, 3878d5c65159SKalle Valo skb->len + skb_tailroom(skb), 3879d5c65159SKalle Valo DMA_FROM_DEVICE); 3880d5c65159SKalle Valo dev_kfree_skb_any(skb); 3881d5c65159SKalle Valo 3882d5c65159SKalle Valo num_buf_freed++; 3883d5c65159SKalle Valo } 3884d5c65159SKalle Valo 3885d5c65159SKalle Valo ath11k_dp_rx_link_desc_return(ab, desc, 3886d5c65159SKalle Valo HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3887d5c65159SKalle Valo } 3888d5c65159SKalle Valo 3889d5c65159SKalle Valo ath11k_hal_srng_access_end(ab, srng); 3890d5c65159SKalle Valo 3891d5c65159SKalle Valo spin_unlock_bh(&srng->lock); 3892d5c65159SKalle Valo 3893d5c65159SKalle Valo if (num_buf_freed) 3894d5c65159SKalle Valo ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed, 3895d5c65159SKalle Valo HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC); 3896d5c65159SKalle Valo 3897d5c65159SKalle Valo return budget - quota; 3898d5c65159SKalle Valo } 3899d5c65159SKalle Valo 3900d5c65159SKalle Valo void ath11k_dp_process_reo_status(struct ath11k_base *ab) 3901d5c65159SKalle Valo { 3902d5c65159SKalle Valo struct ath11k_dp *dp = &ab->dp; 3903d5c65159SKalle Valo struct hal_srng *srng; 3904d5c65159SKalle Valo struct dp_reo_cmd *cmd, *tmp; 3905d5c65159SKalle Valo bool found = false; 3906d5c65159SKalle Valo u32 *reo_desc; 3907d5c65159SKalle Valo u16 tag; 3908d5c65159SKalle Valo struct hal_reo_status reo_status; 3909d5c65159SKalle Valo 3910d5c65159SKalle Valo srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id]; 3911d5c65159SKalle Valo 3912d5c65159SKalle Valo memset(&reo_status, 0, sizeof(reo_status)); 3913d5c65159SKalle Valo 3914d5c65159SKalle Valo spin_lock_bh(&srng->lock); 3915d5c65159SKalle Valo 3916d5c65159SKalle Valo ath11k_hal_srng_access_begin(ab, srng); 3917d5c65159SKalle Valo 3918d5c65159SKalle Valo while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 3919d5c65159SKalle Valo tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc); 3920d5c65159SKalle Valo 3921d5c65159SKalle Valo switch (tag) { 3922d5c65159SKalle Valo case HAL_REO_GET_QUEUE_STATS_STATUS: 3923d5c65159SKalle Valo ath11k_hal_reo_status_queue_stats(ab, reo_desc, 3924d5c65159SKalle Valo &reo_status); 3925d5c65159SKalle Valo break; 3926d5c65159SKalle Valo case HAL_REO_FLUSH_QUEUE_STATUS: 3927d5c65159SKalle Valo ath11k_hal_reo_flush_queue_status(ab, reo_desc, 3928d5c65159SKalle Valo &reo_status); 3929d5c65159SKalle Valo break; 3930d5c65159SKalle Valo case HAL_REO_FLUSH_CACHE_STATUS: 3931d5c65159SKalle Valo ath11k_hal_reo_flush_cache_status(ab, reo_desc, 3932d5c65159SKalle Valo &reo_status); 3933d5c65159SKalle Valo break; 3934d5c65159SKalle Valo case HAL_REO_UNBLOCK_CACHE_STATUS: 3935d5c65159SKalle Valo ath11k_hal_reo_unblk_cache_status(ab, reo_desc, 3936d5c65159SKalle Valo &reo_status); 3937d5c65159SKalle Valo break; 3938d5c65159SKalle Valo case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS: 3939d5c65159SKalle Valo ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc, 3940d5c65159SKalle Valo &reo_status); 3941d5c65159SKalle Valo break; 3942d5c65159SKalle Valo case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS: 3943d5c65159SKalle Valo ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc, 3944d5c65159SKalle Valo &reo_status); 3945d5c65159SKalle Valo break; 3946d5c65159SKalle Valo case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS: 3947d5c65159SKalle Valo ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc, 3948d5c65159SKalle Valo &reo_status); 3949d5c65159SKalle Valo break; 3950d5c65159SKalle Valo default: 3951d5c65159SKalle Valo ath11k_warn(ab, "Unknown reo status type %d\n", tag); 3952d5c65159SKalle Valo continue; 3953d5c65159SKalle Valo } 3954d5c65159SKalle Valo 3955d5c65159SKalle Valo spin_lock_bh(&dp->reo_cmd_lock); 3956d5c65159SKalle Valo list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 3957d5c65159SKalle Valo if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) { 3958d5c65159SKalle Valo found = true; 3959d5c65159SKalle Valo list_del(&cmd->list); 3960d5c65159SKalle Valo break; 3961d5c65159SKalle Valo } 3962d5c65159SKalle Valo } 3963d5c65159SKalle Valo spin_unlock_bh(&dp->reo_cmd_lock); 3964d5c65159SKalle Valo 3965d5c65159SKalle Valo if (found) { 3966d5c65159SKalle Valo cmd->handler(dp, (void *)&cmd->data, 3967d5c65159SKalle Valo reo_status.uniform_hdr.cmd_status); 3968d5c65159SKalle Valo kfree(cmd); 3969d5c65159SKalle Valo } 3970d5c65159SKalle Valo 3971d5c65159SKalle Valo found = false; 3972d5c65159SKalle Valo } 3973d5c65159SKalle Valo 3974d5c65159SKalle Valo ath11k_hal_srng_access_end(ab, srng); 3975d5c65159SKalle Valo 3976d5c65159SKalle Valo spin_unlock_bh(&srng->lock); 3977d5c65159SKalle Valo } 3978d5c65159SKalle Valo 3979d5c65159SKalle Valo void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id) 3980d5c65159SKalle Valo { 3981d5c65159SKalle Valo struct ath11k *ar = ab->pdevs[mac_id].ar; 3982d5c65159SKalle Valo 3983d5c65159SKalle Valo ath11k_dp_rx_pdev_srng_free(ar); 3984d5c65159SKalle Valo ath11k_dp_rxdma_pdev_buf_free(ar); 3985d5c65159SKalle Valo } 3986d5c65159SKalle Valo 3987d5c65159SKalle Valo int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id) 3988d5c65159SKalle Valo { 3989d5c65159SKalle Valo struct ath11k *ar = ab->pdevs[mac_id].ar; 3990d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 3991d5c65159SKalle Valo u32 ring_id; 3992d5c65159SKalle Valo int ret; 3993d5c65159SKalle Valo 3994d5c65159SKalle Valo ret = ath11k_dp_rx_pdev_srng_alloc(ar); 3995d5c65159SKalle Valo if (ret) { 3996d5c65159SKalle Valo ath11k_warn(ab, "failed to setup rx srngs\n"); 3997d5c65159SKalle Valo return ret; 3998d5c65159SKalle Valo } 3999d5c65159SKalle Valo 4000d5c65159SKalle Valo ret = ath11k_dp_rxdma_pdev_buf_setup(ar); 4001d5c65159SKalle Valo if (ret) { 4002d5c65159SKalle Valo ath11k_warn(ab, "failed to setup rxdma ring\n"); 4003d5c65159SKalle Valo return ret; 4004d5c65159SKalle Valo } 4005d5c65159SKalle Valo 4006d5c65159SKalle Valo ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; 4007d5c65159SKalle Valo ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF); 4008d5c65159SKalle Valo if (ret) { 4009d5c65159SKalle Valo ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n", 4010d5c65159SKalle Valo ret); 4011d5c65159SKalle Valo return ret; 4012d5c65159SKalle Valo } 4013d5c65159SKalle Valo 4014d5c65159SKalle Valo ring_id = dp->rxdma_err_dst_ring.ring_id; 4015d5c65159SKalle Valo ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_DST); 4016d5c65159SKalle Valo if (ret) { 4017d5c65159SKalle Valo ath11k_warn(ab, "failed to configure rxdma_err_dest_ring %d\n", 4018d5c65159SKalle Valo ret); 4019d5c65159SKalle Valo return ret; 4020d5c65159SKalle Valo } 4021d5c65159SKalle Valo 4022d5c65159SKalle Valo ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id; 4023d5c65159SKalle Valo ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, 4024d5c65159SKalle Valo mac_id, HAL_RXDMA_MONITOR_BUF); 4025d5c65159SKalle Valo if (ret) { 4026d5c65159SKalle Valo ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n", 4027d5c65159SKalle Valo ret); 4028d5c65159SKalle Valo return ret; 4029d5c65159SKalle Valo } 4030d5c65159SKalle Valo ret = ath11k_dp_tx_htt_srng_setup(ab, 4031d5c65159SKalle Valo dp->rxdma_mon_dst_ring.ring_id, 4032d5c65159SKalle Valo mac_id, HAL_RXDMA_MONITOR_DST); 4033d5c65159SKalle Valo if (ret) { 4034d5c65159SKalle Valo ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n", 4035d5c65159SKalle Valo ret); 4036d5c65159SKalle Valo return ret; 4037d5c65159SKalle Valo } 4038d5c65159SKalle Valo ret = ath11k_dp_tx_htt_srng_setup(ab, 4039d5c65159SKalle Valo dp->rxdma_mon_desc_ring.ring_id, 4040d5c65159SKalle Valo mac_id, HAL_RXDMA_MONITOR_DESC); 4041d5c65159SKalle Valo if (ret) { 4042d5c65159SKalle Valo ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n", 4043d5c65159SKalle Valo ret); 4044d5c65159SKalle Valo return ret; 4045d5c65159SKalle Valo } 4046d5c65159SKalle Valo ring_id = dp->rx_mon_status_refill_ring.refill_buf_ring.ring_id; 4047d5c65159SKalle Valo ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, 4048d5c65159SKalle Valo HAL_RXDMA_MONITOR_STATUS); 4049d5c65159SKalle Valo if (ret) { 4050d5c65159SKalle Valo ath11k_warn(ab, 4051d5c65159SKalle Valo "failed to configure mon_status_refill_ring %d\n", 4052d5c65159SKalle Valo ret); 4053d5c65159SKalle Valo return ret; 4054d5c65159SKalle Valo } 4055d5c65159SKalle Valo return 0; 4056d5c65159SKalle Valo } 4057d5c65159SKalle Valo 4058d5c65159SKalle Valo static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len) 4059d5c65159SKalle Valo { 4060d5c65159SKalle Valo if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) { 4061d5c65159SKalle Valo *frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc); 4062d5c65159SKalle Valo *total_len -= *frag_len; 4063d5c65159SKalle Valo } else { 4064d5c65159SKalle Valo *frag_len = *total_len; 4065d5c65159SKalle Valo *total_len = 0; 4066d5c65159SKalle Valo } 4067d5c65159SKalle Valo } 4068d5c65159SKalle Valo 4069d5c65159SKalle Valo static 4070d5c65159SKalle Valo int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar, 4071d5c65159SKalle Valo void *p_last_buf_addr_info, 4072d5c65159SKalle Valo u8 mac_id) 4073d5c65159SKalle Valo { 4074d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 4075d5c65159SKalle Valo struct dp_srng *dp_srng; 4076d5c65159SKalle Valo void *hal_srng; 4077d5c65159SKalle Valo void *src_srng_desc; 4078d5c65159SKalle Valo int ret = 0; 4079d5c65159SKalle Valo 4080d5c65159SKalle Valo dp_srng = &dp->rxdma_mon_desc_ring; 4081d5c65159SKalle Valo hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id]; 4082d5c65159SKalle Valo 4083d5c65159SKalle Valo ath11k_hal_srng_access_begin(ar->ab, hal_srng); 4084d5c65159SKalle Valo 4085d5c65159SKalle Valo src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng); 4086d5c65159SKalle Valo 4087d5c65159SKalle Valo if (src_srng_desc) { 4088d5c65159SKalle Valo struct ath11k_buffer_addr *src_desc = 4089d5c65159SKalle Valo (struct ath11k_buffer_addr *)src_srng_desc; 4090d5c65159SKalle Valo 4091d5c65159SKalle Valo *src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info); 4092d5c65159SKalle Valo } else { 4093d5c65159SKalle Valo ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4094d5c65159SKalle Valo "Monitor Link Desc Ring %d Full", mac_id); 4095d5c65159SKalle Valo ret = -ENOMEM; 4096d5c65159SKalle Valo } 4097d5c65159SKalle Valo 4098d5c65159SKalle Valo ath11k_hal_srng_access_end(ar->ab, hal_srng); 4099d5c65159SKalle Valo return ret; 4100d5c65159SKalle Valo } 4101d5c65159SKalle Valo 4102d5c65159SKalle Valo static 4103d5c65159SKalle Valo void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc, 4104d5c65159SKalle Valo dma_addr_t *paddr, u32 *sw_cookie, 4105d5c65159SKalle Valo void **pp_buf_addr_info) 4106d5c65159SKalle Valo { 4107d5c65159SKalle Valo struct hal_rx_msdu_link *msdu_link = 4108d5c65159SKalle Valo (struct hal_rx_msdu_link *)rx_msdu_link_desc; 4109d5c65159SKalle Valo struct ath11k_buffer_addr *buf_addr_info; 4110d5c65159SKalle Valo u8 rbm = 0; 4111d5c65159SKalle Valo 4112d5c65159SKalle Valo buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info; 4113d5c65159SKalle Valo 4114d5c65159SKalle Valo ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, &rbm); 4115d5c65159SKalle Valo 4116d5c65159SKalle Valo *pp_buf_addr_info = (void *)buf_addr_info; 4117d5c65159SKalle Valo } 4118d5c65159SKalle Valo 4119d5c65159SKalle Valo static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len) 4120d5c65159SKalle Valo { 4121d5c65159SKalle Valo if (skb->len > len) { 4122d5c65159SKalle Valo skb_trim(skb, len); 4123d5c65159SKalle Valo } else { 4124d5c65159SKalle Valo if (skb_tailroom(skb) < len - skb->len) { 4125d5c65159SKalle Valo if ((pskb_expand_head(skb, 0, 4126d5c65159SKalle Valo len - skb->len - skb_tailroom(skb), 4127d5c65159SKalle Valo GFP_ATOMIC))) { 4128d5c65159SKalle Valo dev_kfree_skb_any(skb); 4129d5c65159SKalle Valo return -ENOMEM; 4130d5c65159SKalle Valo } 4131d5c65159SKalle Valo } 4132d5c65159SKalle Valo skb_put(skb, (len - skb->len)); 4133d5c65159SKalle Valo } 4134d5c65159SKalle Valo return 0; 4135d5c65159SKalle Valo } 4136d5c65159SKalle Valo 4137d5c65159SKalle Valo static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar, 4138d5c65159SKalle Valo void *msdu_link_desc, 4139d5c65159SKalle Valo struct hal_rx_msdu_list *msdu_list, 4140d5c65159SKalle Valo u16 *num_msdus) 4141d5c65159SKalle Valo { 4142d5c65159SKalle Valo struct hal_rx_msdu_details *msdu_details = NULL; 4143d5c65159SKalle Valo struct rx_msdu_desc *msdu_desc_info = NULL; 4144d5c65159SKalle Valo struct hal_rx_msdu_link *msdu_link = NULL; 4145d5c65159SKalle Valo int i; 4146d5c65159SKalle Valo u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1); 4147d5c65159SKalle Valo u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1); 4148d5c65159SKalle Valo u8 tmp = 0; 4149d5c65159SKalle Valo 4150d5c65159SKalle Valo msdu_link = (struct hal_rx_msdu_link *)msdu_link_desc; 4151d5c65159SKalle Valo msdu_details = &msdu_link->msdu_link[0]; 4152d5c65159SKalle Valo 4153d5c65159SKalle Valo for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) { 4154d5c65159SKalle Valo if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR, 4155d5c65159SKalle Valo msdu_details[i].buf_addr_info.info0) == 0) { 4156d5c65159SKalle Valo msdu_desc_info = &msdu_details[i - 1].rx_msdu_info; 4157d5c65159SKalle Valo msdu_desc_info->info0 |= last; 4158d5c65159SKalle Valo ; 4159d5c65159SKalle Valo break; 4160d5c65159SKalle Valo } 4161d5c65159SKalle Valo msdu_desc_info = &msdu_details[i].rx_msdu_info; 4162d5c65159SKalle Valo 4163d5c65159SKalle Valo if (!i) 4164d5c65159SKalle Valo msdu_desc_info->info0 |= first; 4165d5c65159SKalle Valo else if (i == (HAL_RX_NUM_MSDU_DESC - 1)) 4166d5c65159SKalle Valo msdu_desc_info->info0 |= last; 4167d5c65159SKalle Valo msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0; 4168d5c65159SKalle Valo msdu_list->msdu_info[i].msdu_len = 4169d5c65159SKalle Valo HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0); 4170d5c65159SKalle Valo msdu_list->sw_cookie[i] = 4171d5c65159SKalle Valo FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, 4172d5c65159SKalle Valo msdu_details[i].buf_addr_info.info1); 4173d5c65159SKalle Valo tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR, 4174d5c65159SKalle Valo msdu_details[i].buf_addr_info.info1); 4175d5c65159SKalle Valo msdu_list->rbm[i] = tmp; 4176d5c65159SKalle Valo } 4177d5c65159SKalle Valo *num_msdus = i; 4178d5c65159SKalle Valo } 4179d5c65159SKalle Valo 4180d5c65159SKalle Valo static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id, 4181d5c65159SKalle Valo u32 *rx_bufs_used) 4182d5c65159SKalle Valo { 4183d5c65159SKalle Valo u32 ret = 0; 4184d5c65159SKalle Valo 4185d5c65159SKalle Valo if ((*ppdu_id < msdu_ppdu_id) && 4186d5c65159SKalle Valo ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) { 4187d5c65159SKalle Valo *ppdu_id = msdu_ppdu_id; 4188d5c65159SKalle Valo ret = msdu_ppdu_id; 4189d5c65159SKalle Valo } else if ((*ppdu_id > msdu_ppdu_id) && 4190d5c65159SKalle Valo ((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) { 4191d5c65159SKalle Valo /* mon_dst is behind than mon_status 4192d5c65159SKalle Valo * skip dst_ring and free it 4193d5c65159SKalle Valo */ 4194d5c65159SKalle Valo *rx_bufs_used += 1; 4195d5c65159SKalle Valo *ppdu_id = msdu_ppdu_id; 4196d5c65159SKalle Valo ret = msdu_ppdu_id; 4197d5c65159SKalle Valo } 4198d5c65159SKalle Valo return ret; 4199d5c65159SKalle Valo } 4200d5c65159SKalle Valo 4201d5c65159SKalle Valo static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info, 4202d5c65159SKalle Valo bool *is_frag, u32 *total_len, 4203d5c65159SKalle Valo u32 *frag_len, u32 *msdu_cnt) 4204d5c65159SKalle Valo { 4205d5c65159SKalle Valo if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) { 4206d5c65159SKalle Valo if (!*is_frag) { 4207d5c65159SKalle Valo *total_len = info->msdu_len; 4208d5c65159SKalle Valo *is_frag = true; 4209d5c65159SKalle Valo } 4210d5c65159SKalle Valo ath11k_dp_mon_set_frag_len(total_len, 4211d5c65159SKalle Valo frag_len); 4212d5c65159SKalle Valo } else { 4213d5c65159SKalle Valo if (*is_frag) { 4214d5c65159SKalle Valo ath11k_dp_mon_set_frag_len(total_len, 4215d5c65159SKalle Valo frag_len); 4216d5c65159SKalle Valo } else { 4217d5c65159SKalle Valo *frag_len = info->msdu_len; 4218d5c65159SKalle Valo } 4219d5c65159SKalle Valo *is_frag = false; 4220d5c65159SKalle Valo *msdu_cnt -= 1; 4221d5c65159SKalle Valo } 4222d5c65159SKalle Valo } 4223d5c65159SKalle Valo 4224d5c65159SKalle Valo static u32 4225d5c65159SKalle Valo ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, 4226d5c65159SKalle Valo void *ring_entry, struct sk_buff **head_msdu, 4227d5c65159SKalle Valo struct sk_buff **tail_msdu, u32 *npackets, 4228d5c65159SKalle Valo u32 *ppdu_id) 4229d5c65159SKalle Valo { 4230d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 4231d5c65159SKalle Valo struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4232d5c65159SKalle Valo struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring; 4233d5c65159SKalle Valo struct sk_buff *msdu = NULL, *last = NULL; 4234d5c65159SKalle Valo struct hal_rx_msdu_list msdu_list; 4235d5c65159SKalle Valo void *p_buf_addr_info, *p_last_buf_addr_info; 4236d5c65159SKalle Valo struct hal_rx_desc *rx_desc; 4237d5c65159SKalle Valo void *rx_msdu_link_desc; 4238d5c65159SKalle Valo dma_addr_t paddr; 4239d5c65159SKalle Valo u16 num_msdus = 0; 4240d5c65159SKalle Valo u32 rx_buf_size, rx_pkt_offset, sw_cookie; 4241d5c65159SKalle Valo u32 rx_bufs_used = 0, i = 0; 4242d5c65159SKalle Valo u32 msdu_ppdu_id = 0, msdu_cnt = 0; 4243d5c65159SKalle Valo u32 total_len = 0, frag_len = 0; 4244d5c65159SKalle Valo bool is_frag, is_first_msdu; 4245d5c65159SKalle Valo bool drop_mpdu = false; 4246d5c65159SKalle Valo struct ath11k_skb_rxcb *rxcb; 4247d5c65159SKalle Valo struct hal_reo_entrance_ring *ent_desc = 4248d5c65159SKalle Valo (struct hal_reo_entrance_ring *)ring_entry; 4249d5c65159SKalle Valo int buf_id; 4250d5c65159SKalle Valo 4251d5c65159SKalle Valo ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr, 4252d5c65159SKalle Valo &sw_cookie, &p_last_buf_addr_info, 4253d5c65159SKalle Valo &msdu_cnt); 4254d5c65159SKalle Valo 4255d5c65159SKalle Valo if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON, 4256d5c65159SKalle Valo ent_desc->info1) == 4257d5c65159SKalle Valo HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { 4258d5c65159SKalle Valo u8 rxdma_err = 4259d5c65159SKalle Valo FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE, 4260d5c65159SKalle Valo ent_desc->info1); 4261d5c65159SKalle Valo if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR || 4262d5c65159SKalle Valo rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR || 4263d5c65159SKalle Valo rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) { 4264d5c65159SKalle Valo drop_mpdu = true; 4265d5c65159SKalle Valo pmon->rx_mon_stats.dest_mpdu_drop++; 4266d5c65159SKalle Valo } 4267d5c65159SKalle Valo } 4268d5c65159SKalle Valo 4269d5c65159SKalle Valo is_frag = false; 4270d5c65159SKalle Valo is_first_msdu = true; 4271d5c65159SKalle Valo 4272d5c65159SKalle Valo do { 4273d5c65159SKalle Valo if (pmon->mon_last_linkdesc_paddr == paddr) { 4274d5c65159SKalle Valo pmon->rx_mon_stats.dup_mon_linkdesc_cnt++; 4275d5c65159SKalle Valo return rx_bufs_used; 4276d5c65159SKalle Valo } 4277d5c65159SKalle Valo 4278d5c65159SKalle Valo rx_msdu_link_desc = 4279d5c65159SKalle Valo (void *)pmon->link_desc_banks[sw_cookie].vaddr + 4280d5c65159SKalle Valo (paddr - pmon->link_desc_banks[sw_cookie].paddr); 4281d5c65159SKalle Valo 4282d5c65159SKalle Valo ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list, 4283d5c65159SKalle Valo &num_msdus); 4284d5c65159SKalle Valo 4285d5c65159SKalle Valo for (i = 0; i < num_msdus; i++) { 4286d5c65159SKalle Valo u32 l2_hdr_offset; 4287d5c65159SKalle Valo 4288d5c65159SKalle Valo if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) { 4289d5c65159SKalle Valo ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4290d5c65159SKalle Valo "i %d last_cookie %d is same\n", 4291d5c65159SKalle Valo i, pmon->mon_last_buf_cookie); 4292d5c65159SKalle Valo drop_mpdu = true; 4293d5c65159SKalle Valo pmon->rx_mon_stats.dup_mon_buf_cnt++; 4294d5c65159SKalle Valo continue; 4295d5c65159SKalle Valo } 4296d5c65159SKalle Valo buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 4297d5c65159SKalle Valo msdu_list.sw_cookie[i]); 4298d5c65159SKalle Valo 4299d5c65159SKalle Valo spin_lock_bh(&rx_ring->idr_lock); 4300d5c65159SKalle Valo msdu = idr_find(&rx_ring->bufs_idr, buf_id); 4301d5c65159SKalle Valo spin_unlock_bh(&rx_ring->idr_lock); 4302d5c65159SKalle Valo if (!msdu) { 4303d5c65159SKalle Valo ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4304d5c65159SKalle Valo "msdu_pop: invalid buf_id %d\n", buf_id); 4305d5c65159SKalle Valo break; 4306d5c65159SKalle Valo } 4307d5c65159SKalle Valo rxcb = ATH11K_SKB_RXCB(msdu); 4308d5c65159SKalle Valo if (!rxcb->unmapped) { 4309d5c65159SKalle Valo dma_unmap_single(ar->ab->dev, rxcb->paddr, 4310d5c65159SKalle Valo msdu->len + 4311d5c65159SKalle Valo skb_tailroom(msdu), 4312d5c65159SKalle Valo DMA_FROM_DEVICE); 4313d5c65159SKalle Valo rxcb->unmapped = 1; 4314d5c65159SKalle Valo } 4315d5c65159SKalle Valo if (drop_mpdu) { 4316d5c65159SKalle Valo ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4317d5c65159SKalle Valo "i %d drop msdu %p *ppdu_id %x\n", 4318d5c65159SKalle Valo i, msdu, *ppdu_id); 4319d5c65159SKalle Valo dev_kfree_skb_any(msdu); 4320d5c65159SKalle Valo msdu = NULL; 4321d5c65159SKalle Valo goto next_msdu; 4322d5c65159SKalle Valo } 4323d5c65159SKalle Valo 4324d5c65159SKalle Valo rx_desc = (struct hal_rx_desc *)msdu->data; 4325d5c65159SKalle Valo 4326d5c65159SKalle Valo rx_pkt_offset = sizeof(struct hal_rx_desc); 4327d5c65159SKalle Valo l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(rx_desc); 4328d5c65159SKalle Valo 4329d5c65159SKalle Valo if (is_first_msdu) { 4330d5c65159SKalle Valo if (!ath11k_dp_rxdesc_mpdu_valid(rx_desc)) { 4331d5c65159SKalle Valo drop_mpdu = true; 4332d5c65159SKalle Valo dev_kfree_skb_any(msdu); 4333d5c65159SKalle Valo msdu = NULL; 4334d5c65159SKalle Valo pmon->mon_last_linkdesc_paddr = paddr; 4335d5c65159SKalle Valo goto next_msdu; 4336d5c65159SKalle Valo } 4337d5c65159SKalle Valo 4338d5c65159SKalle Valo msdu_ppdu_id = 4339d5c65159SKalle Valo ath11k_dp_rxdesc_get_ppduid(rx_desc); 4340d5c65159SKalle Valo 4341d5c65159SKalle Valo if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id, 4342d5c65159SKalle Valo ppdu_id, 43435e02bc73SMiles Hu &rx_bufs_used)) { 43445e02bc73SMiles Hu if (rx_bufs_used) { 43455e02bc73SMiles Hu drop_mpdu = true; 43465e02bc73SMiles Hu dev_kfree_skb_any(msdu); 43475e02bc73SMiles Hu msdu = NULL; 43485e02bc73SMiles Hu goto next_msdu; 43495e02bc73SMiles Hu } 4350d5c65159SKalle Valo return rx_bufs_used; 43515e02bc73SMiles Hu } 4352d5c65159SKalle Valo pmon->mon_last_linkdesc_paddr = paddr; 4353d5c65159SKalle Valo is_first_msdu = false; 4354d5c65159SKalle Valo } 4355d5c65159SKalle Valo ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i], 4356d5c65159SKalle Valo &is_frag, &total_len, 4357d5c65159SKalle Valo &frag_len, &msdu_cnt); 4358d5c65159SKalle Valo rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len; 4359d5c65159SKalle Valo 4360d5c65159SKalle Valo ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size); 4361d5c65159SKalle Valo 4362d5c65159SKalle Valo if (!(*head_msdu)) 4363d5c65159SKalle Valo *head_msdu = msdu; 4364d5c65159SKalle Valo else if (last) 4365d5c65159SKalle Valo last->next = msdu; 4366d5c65159SKalle Valo 4367d5c65159SKalle Valo last = msdu; 4368d5c65159SKalle Valo next_msdu: 4369d5c65159SKalle Valo pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i]; 4370d5c65159SKalle Valo rx_bufs_used++; 4371d5c65159SKalle Valo spin_lock_bh(&rx_ring->idr_lock); 4372d5c65159SKalle Valo idr_remove(&rx_ring->bufs_idr, buf_id); 4373d5c65159SKalle Valo spin_unlock_bh(&rx_ring->idr_lock); 4374d5c65159SKalle Valo } 4375d5c65159SKalle Valo 4376d5c65159SKalle Valo ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr, 4377d5c65159SKalle Valo &sw_cookie, 4378d5c65159SKalle Valo &p_buf_addr_info); 4379d5c65159SKalle Valo 4380d5c65159SKalle Valo if (ath11k_dp_rx_monitor_link_desc_return(ar, 4381d5c65159SKalle Valo p_last_buf_addr_info, 4382d5c65159SKalle Valo dp->mac_id)) 4383d5c65159SKalle Valo ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4384d5c65159SKalle Valo "dp_rx_monitor_link_desc_return failed"); 4385d5c65159SKalle Valo 4386d5c65159SKalle Valo p_last_buf_addr_info = p_buf_addr_info; 4387d5c65159SKalle Valo 4388d5c65159SKalle Valo } while (paddr && msdu_cnt); 4389d5c65159SKalle Valo 4390d5c65159SKalle Valo if (last) 4391d5c65159SKalle Valo last->next = NULL; 4392d5c65159SKalle Valo 4393d5c65159SKalle Valo *tail_msdu = msdu; 4394d5c65159SKalle Valo 4395d5c65159SKalle Valo if (msdu_cnt == 0) 4396d5c65159SKalle Valo *npackets = 1; 4397d5c65159SKalle Valo 4398d5c65159SKalle Valo return rx_bufs_used; 4399d5c65159SKalle Valo } 4400d5c65159SKalle Valo 4401d5c65159SKalle Valo static void ath11k_dp_rx_msdus_set_payload(struct sk_buff *msdu) 4402d5c65159SKalle Valo { 4403d5c65159SKalle Valo u32 rx_pkt_offset, l2_hdr_offset; 4404d5c65159SKalle Valo 4405d5c65159SKalle Valo rx_pkt_offset = sizeof(struct hal_rx_desc); 4406d5c65159SKalle Valo l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad((struct hal_rx_desc *)msdu->data); 4407d5c65159SKalle Valo skb_pull(msdu, rx_pkt_offset + l2_hdr_offset); 4408d5c65159SKalle Valo } 4409d5c65159SKalle Valo 4410d5c65159SKalle Valo static struct sk_buff * 4411d5c65159SKalle Valo ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, 4412d5c65159SKalle Valo u32 mac_id, struct sk_buff *head_msdu, 4413d5c65159SKalle Valo struct sk_buff *last_msdu, 4414d5c65159SKalle Valo struct ieee80211_rx_status *rxs) 4415d5c65159SKalle Valo { 4416d5c65159SKalle Valo struct sk_buff *msdu, *mpdu_buf, *prev_buf; 4417d5c65159SKalle Valo u32 decap_format, wifi_hdr_len; 4418d5c65159SKalle Valo struct hal_rx_desc *rx_desc; 4419d5c65159SKalle Valo char *hdr_desc; 4420d5c65159SKalle Valo u8 *dest; 4421d5c65159SKalle Valo struct ieee80211_hdr_3addr *wh; 4422d5c65159SKalle Valo 4423d5c65159SKalle Valo mpdu_buf = NULL; 4424d5c65159SKalle Valo 4425d5c65159SKalle Valo if (!head_msdu) 4426d5c65159SKalle Valo goto err_merge_fail; 4427d5c65159SKalle Valo 4428d5c65159SKalle Valo rx_desc = (struct hal_rx_desc *)head_msdu->data; 4429d5c65159SKalle Valo 4430d5c65159SKalle Valo if (ath11k_dp_rxdesc_get_mpdulen_err(rx_desc)) 4431d5c65159SKalle Valo return NULL; 4432d5c65159SKalle Valo 4433d5c65159SKalle Valo decap_format = ath11k_dp_rxdesc_get_decap_format(rx_desc); 4434d5c65159SKalle Valo 4435d5c65159SKalle Valo ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs); 4436d5c65159SKalle Valo 4437d5c65159SKalle Valo if (decap_format == DP_RX_DECAP_TYPE_RAW) { 4438d5c65159SKalle Valo ath11k_dp_rx_msdus_set_payload(head_msdu); 4439d5c65159SKalle Valo 4440d5c65159SKalle Valo prev_buf = head_msdu; 4441d5c65159SKalle Valo msdu = head_msdu->next; 4442d5c65159SKalle Valo 4443d5c65159SKalle Valo while (msdu) { 4444d5c65159SKalle Valo ath11k_dp_rx_msdus_set_payload(msdu); 4445d5c65159SKalle Valo 4446d5c65159SKalle Valo prev_buf = msdu; 4447d5c65159SKalle Valo msdu = msdu->next; 4448d5c65159SKalle Valo } 4449d5c65159SKalle Valo 4450d5c65159SKalle Valo prev_buf->next = NULL; 4451d5c65159SKalle Valo 4452d5c65159SKalle Valo skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN); 4453d5c65159SKalle Valo } else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) { 4454d5c65159SKalle Valo __le16 qos_field; 4455d5c65159SKalle Valo u8 qos_pkt = 0; 4456d5c65159SKalle Valo 4457d5c65159SKalle Valo rx_desc = (struct hal_rx_desc *)head_msdu->data; 4458d5c65159SKalle Valo hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc); 4459d5c65159SKalle Valo 4460d5c65159SKalle Valo /* Base size */ 4461d5c65159SKalle Valo wifi_hdr_len = sizeof(struct ieee80211_hdr_3addr); 4462d5c65159SKalle Valo wh = (struct ieee80211_hdr_3addr *)hdr_desc; 4463d5c65159SKalle Valo 4464d5c65159SKalle Valo if (ieee80211_is_data_qos(wh->frame_control)) { 4465d5c65159SKalle Valo struct ieee80211_qos_hdr *qwh = 4466d5c65159SKalle Valo (struct ieee80211_qos_hdr *)hdr_desc; 4467d5c65159SKalle Valo 4468d5c65159SKalle Valo qos_field = qwh->qos_ctrl; 4469d5c65159SKalle Valo qos_pkt = 1; 4470d5c65159SKalle Valo } 4471d5c65159SKalle Valo msdu = head_msdu; 4472d5c65159SKalle Valo 4473d5c65159SKalle Valo while (msdu) { 4474d5c65159SKalle Valo rx_desc = (struct hal_rx_desc *)msdu->data; 4475d5c65159SKalle Valo hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc); 4476d5c65159SKalle Valo 4477d5c65159SKalle Valo if (qos_pkt) { 4478d5c65159SKalle Valo dest = skb_push(msdu, sizeof(__le16)); 4479d5c65159SKalle Valo if (!dest) 4480d5c65159SKalle Valo goto err_merge_fail; 4481d5c65159SKalle Valo memcpy(dest, hdr_desc, wifi_hdr_len); 4482d5c65159SKalle Valo memcpy(dest + wifi_hdr_len, 4483d5c65159SKalle Valo (u8 *)&qos_field, sizeof(__le16)); 4484d5c65159SKalle Valo } 4485d5c65159SKalle Valo ath11k_dp_rx_msdus_set_payload(msdu); 4486d5c65159SKalle Valo prev_buf = msdu; 4487d5c65159SKalle Valo msdu = msdu->next; 4488d5c65159SKalle Valo } 4489d5c65159SKalle Valo dest = skb_put(prev_buf, HAL_RX_FCS_LEN); 4490d5c65159SKalle Valo if (!dest) 4491d5c65159SKalle Valo goto err_merge_fail; 4492d5c65159SKalle Valo 4493d5c65159SKalle Valo ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4494d5c65159SKalle Valo "mpdu_buf %pK mpdu_buf->len %u", 4495d5c65159SKalle Valo prev_buf, prev_buf->len); 4496d5c65159SKalle Valo } else { 4497d5c65159SKalle Valo ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4498d5c65159SKalle Valo "decap format %d is not supported!\n", 4499d5c65159SKalle Valo decap_format); 4500d5c65159SKalle Valo goto err_merge_fail; 4501d5c65159SKalle Valo } 4502d5c65159SKalle Valo 4503d5c65159SKalle Valo return head_msdu; 4504d5c65159SKalle Valo 4505d5c65159SKalle Valo err_merge_fail: 4506d5c65159SKalle Valo if (mpdu_buf && decap_format != DP_RX_DECAP_TYPE_RAW) { 4507d5c65159SKalle Valo ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4508d5c65159SKalle Valo "err_merge_fail mpdu_buf %pK", mpdu_buf); 4509d5c65159SKalle Valo /* Free the head buffer */ 4510d5c65159SKalle Valo dev_kfree_skb_any(mpdu_buf); 4511d5c65159SKalle Valo } 4512d5c65159SKalle Valo return NULL; 4513d5c65159SKalle Valo } 4514d5c65159SKalle Valo 4515d5c65159SKalle Valo static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id, 4516d5c65159SKalle Valo struct sk_buff *head_msdu, 4517d5c65159SKalle Valo struct sk_buff *tail_msdu, 4518d5c65159SKalle Valo struct napi_struct *napi) 4519d5c65159SKalle Valo { 4520d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 4521d5c65159SKalle Valo struct sk_buff *mon_skb, *skb_next, *header; 4522d5c65159SKalle Valo struct ieee80211_rx_status *rxs = &dp->rx_status, *status; 4523d5c65159SKalle Valo 4524d5c65159SKalle Valo mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu, 4525d5c65159SKalle Valo tail_msdu, rxs); 4526d5c65159SKalle Valo 4527d5c65159SKalle Valo if (!mon_skb) 4528d5c65159SKalle Valo goto mon_deliver_fail; 4529d5c65159SKalle Valo 4530d5c65159SKalle Valo header = mon_skb; 4531d5c65159SKalle Valo 4532d5c65159SKalle Valo rxs->flag = 0; 4533d5c65159SKalle Valo do { 4534d5c65159SKalle Valo skb_next = mon_skb->next; 4535d5c65159SKalle Valo if (!skb_next) 4536d5c65159SKalle Valo rxs->flag &= ~RX_FLAG_AMSDU_MORE; 4537d5c65159SKalle Valo else 4538d5c65159SKalle Valo rxs->flag |= RX_FLAG_AMSDU_MORE; 4539d5c65159SKalle Valo 4540d5c65159SKalle Valo if (mon_skb == header) { 4541d5c65159SKalle Valo header = NULL; 4542d5c65159SKalle Valo rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN; 4543d5c65159SKalle Valo } else { 4544d5c65159SKalle Valo rxs->flag |= RX_FLAG_ALLOW_SAME_PN; 4545d5c65159SKalle Valo } 4546d5c65159SKalle Valo rxs->flag |= RX_FLAG_ONLY_MONITOR; 4547d5c65159SKalle Valo 4548d5c65159SKalle Valo status = IEEE80211_SKB_RXCB(mon_skb); 4549d5c65159SKalle Valo *status = *rxs; 4550d5c65159SKalle Valo 4551d5c65159SKalle Valo ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb); 4552d5c65159SKalle Valo mon_skb = skb_next; 45535e02bc73SMiles Hu } while (mon_skb); 4554d5c65159SKalle Valo rxs->flag = 0; 4555d5c65159SKalle Valo 4556d5c65159SKalle Valo return 0; 4557d5c65159SKalle Valo 4558d5c65159SKalle Valo mon_deliver_fail: 4559d5c65159SKalle Valo mon_skb = head_msdu; 4560d5c65159SKalle Valo while (mon_skb) { 4561d5c65159SKalle Valo skb_next = mon_skb->next; 4562d5c65159SKalle Valo dev_kfree_skb_any(mon_skb); 4563d5c65159SKalle Valo mon_skb = skb_next; 4564d5c65159SKalle Valo } 4565d5c65159SKalle Valo return -EINVAL; 4566d5c65159SKalle Valo } 4567d5c65159SKalle Valo 4568d5c65159SKalle Valo static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, u32 quota, 4569d5c65159SKalle Valo struct napi_struct *napi) 4570d5c65159SKalle Valo { 4571d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 4572d5c65159SKalle Valo struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4573d5c65159SKalle Valo void *ring_entry; 4574d5c65159SKalle Valo void *mon_dst_srng; 4575d5c65159SKalle Valo u32 ppdu_id; 4576d5c65159SKalle Valo u32 rx_bufs_used; 4577d5c65159SKalle Valo struct ath11k_pdev_mon_stats *rx_mon_stats; 4578d5c65159SKalle Valo u32 npackets = 0; 4579d5c65159SKalle Valo 4580d5c65159SKalle Valo mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id]; 4581d5c65159SKalle Valo 4582d5c65159SKalle Valo if (!mon_dst_srng) { 4583d5c65159SKalle Valo ath11k_warn(ar->ab, 4584d5c65159SKalle Valo "HAL Monitor Destination Ring Init Failed -- %pK", 4585d5c65159SKalle Valo mon_dst_srng); 4586d5c65159SKalle Valo return; 4587d5c65159SKalle Valo } 4588d5c65159SKalle Valo 4589d5c65159SKalle Valo spin_lock_bh(&pmon->mon_lock); 4590d5c65159SKalle Valo 4591d5c65159SKalle Valo ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng); 4592d5c65159SKalle Valo 4593d5c65159SKalle Valo ppdu_id = pmon->mon_ppdu_info.ppdu_id; 4594d5c65159SKalle Valo rx_bufs_used = 0; 4595d5c65159SKalle Valo rx_mon_stats = &pmon->rx_mon_stats; 4596d5c65159SKalle Valo 4597d5c65159SKalle Valo while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) { 4598d5c65159SKalle Valo struct sk_buff *head_msdu, *tail_msdu; 4599d5c65159SKalle Valo 4600d5c65159SKalle Valo head_msdu = NULL; 4601d5c65159SKalle Valo tail_msdu = NULL; 4602d5c65159SKalle Valo 4603d5c65159SKalle Valo rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, ring_entry, 4604d5c65159SKalle Valo &head_msdu, 4605d5c65159SKalle Valo &tail_msdu, 4606d5c65159SKalle Valo &npackets, &ppdu_id); 4607d5c65159SKalle Valo 4608d5c65159SKalle Valo if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) { 4609d5c65159SKalle Valo pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 4610d5c65159SKalle Valo ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4611d5c65159SKalle Valo "dest_rx: new ppdu_id %x != status ppdu_id %x", 4612d5c65159SKalle Valo ppdu_id, pmon->mon_ppdu_info.ppdu_id); 4613d5c65159SKalle Valo break; 4614d5c65159SKalle Valo } 4615d5c65159SKalle Valo if (head_msdu && tail_msdu) { 4616d5c65159SKalle Valo ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu, 4617d5c65159SKalle Valo tail_msdu, napi); 4618d5c65159SKalle Valo rx_mon_stats->dest_mpdu_done++; 4619d5c65159SKalle Valo } 4620d5c65159SKalle Valo 4621d5c65159SKalle Valo ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab, 4622d5c65159SKalle Valo mon_dst_srng); 4623d5c65159SKalle Valo } 4624d5c65159SKalle Valo ath11k_hal_srng_access_end(ar->ab, mon_dst_srng); 4625d5c65159SKalle Valo 4626d5c65159SKalle Valo spin_unlock_bh(&pmon->mon_lock); 4627d5c65159SKalle Valo 4628d5c65159SKalle Valo if (rx_bufs_used) { 4629d5c65159SKalle Valo rx_mon_stats->dest_ppdu_done++; 4630d5c65159SKalle Valo ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, 4631d5c65159SKalle Valo &dp->rxdma_mon_buf_ring, 4632d5c65159SKalle Valo rx_bufs_used, 4633d5c65159SKalle Valo HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC); 4634d5c65159SKalle Valo } 4635d5c65159SKalle Valo } 4636d5c65159SKalle Valo 4637d5c65159SKalle Valo static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar, 4638d5c65159SKalle Valo u32 quota, 4639d5c65159SKalle Valo struct napi_struct *napi) 4640d5c65159SKalle Valo { 4641d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 4642d5c65159SKalle Valo struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4643d5c65159SKalle Valo struct hal_rx_mon_ppdu_info *ppdu_info; 4644d5c65159SKalle Valo struct sk_buff *status_skb; 4645d5c65159SKalle Valo u32 tlv_status = HAL_TLV_STATUS_BUF_DONE; 4646d5c65159SKalle Valo struct ath11k_pdev_mon_stats *rx_mon_stats; 4647d5c65159SKalle Valo 4648d5c65159SKalle Valo ppdu_info = &pmon->mon_ppdu_info; 4649d5c65159SKalle Valo rx_mon_stats = &pmon->rx_mon_stats; 4650d5c65159SKalle Valo 4651d5c65159SKalle Valo if (pmon->mon_ppdu_status != DP_PPDU_STATUS_START) 4652d5c65159SKalle Valo return; 4653d5c65159SKalle Valo 4654d5c65159SKalle Valo while (!skb_queue_empty(&pmon->rx_status_q)) { 4655d5c65159SKalle Valo status_skb = skb_dequeue(&pmon->rx_status_q); 4656d5c65159SKalle Valo 4657d5c65159SKalle Valo tlv_status = ath11k_hal_rx_parse_mon_status(ar->ab, ppdu_info, 4658d5c65159SKalle Valo status_skb); 4659d5c65159SKalle Valo if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) { 4660d5c65159SKalle Valo rx_mon_stats->status_ppdu_done++; 4661d5c65159SKalle Valo pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE; 4662d5c65159SKalle Valo ath11k_dp_rx_mon_dest_process(ar, quota, napi); 4663d5c65159SKalle Valo pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 4664d5c65159SKalle Valo } 4665d5c65159SKalle Valo dev_kfree_skb_any(status_skb); 4666d5c65159SKalle Valo } 4667d5c65159SKalle Valo } 4668d5c65159SKalle Valo 4669d5c65159SKalle Valo static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id, 4670d5c65159SKalle Valo struct napi_struct *napi, int budget) 4671d5c65159SKalle Valo { 4672d5c65159SKalle Valo struct ath11k *ar = ab->pdevs[mac_id].ar; 4673d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 4674d5c65159SKalle Valo struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4675d5c65159SKalle Valo int num_buffs_reaped = 0; 4676d5c65159SKalle Valo 4677d5c65159SKalle Valo num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ar->ab, dp->mac_id, &budget, 4678d5c65159SKalle Valo &pmon->rx_status_q); 4679d5c65159SKalle Valo if (num_buffs_reaped) 4680d5c65159SKalle Valo ath11k_dp_rx_mon_status_process_tlv(ar, budget, napi); 4681d5c65159SKalle Valo 4682d5c65159SKalle Valo return num_buffs_reaped; 4683d5c65159SKalle Valo } 4684d5c65159SKalle Valo 4685d5c65159SKalle Valo int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id, 4686d5c65159SKalle Valo struct napi_struct *napi, int budget) 4687d5c65159SKalle Valo { 4688d5c65159SKalle Valo struct ath11k *ar = ab->pdevs[mac_id].ar; 4689d5c65159SKalle Valo int ret = 0; 4690d5c65159SKalle Valo 4691d5c65159SKalle Valo if (test_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags)) 4692d5c65159SKalle Valo ret = ath11k_dp_mon_process_rx(ab, mac_id, napi, budget); 4693d5c65159SKalle Valo else 4694d5c65159SKalle Valo ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget); 4695d5c65159SKalle Valo return ret; 4696d5c65159SKalle Valo } 4697d5c65159SKalle Valo 4698d5c65159SKalle Valo static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar) 4699d5c65159SKalle Valo { 4700d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 4701d5c65159SKalle Valo struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4702d5c65159SKalle Valo 4703d5c65159SKalle Valo skb_queue_head_init(&pmon->rx_status_q); 4704d5c65159SKalle Valo 4705d5c65159SKalle Valo pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 4706d5c65159SKalle Valo 4707d5c65159SKalle Valo memset(&pmon->rx_mon_stats, 0, 4708d5c65159SKalle Valo sizeof(pmon->rx_mon_stats)); 4709d5c65159SKalle Valo return 0; 4710d5c65159SKalle Valo } 4711d5c65159SKalle Valo 4712d5c65159SKalle Valo int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar) 4713d5c65159SKalle Valo { 4714d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 4715d5c65159SKalle Valo struct ath11k_mon_data *pmon = &dp->mon_data; 4716d5c65159SKalle Valo struct hal_srng *mon_desc_srng = NULL; 4717d5c65159SKalle Valo struct dp_srng *dp_srng; 4718d5c65159SKalle Valo int ret = 0; 4719d5c65159SKalle Valo u32 n_link_desc = 0; 4720d5c65159SKalle Valo 4721d5c65159SKalle Valo ret = ath11k_dp_rx_pdev_mon_status_attach(ar); 4722d5c65159SKalle Valo if (ret) { 4723d5c65159SKalle Valo ath11k_warn(ar->ab, "pdev_mon_status_attach() failed"); 4724d5c65159SKalle Valo return ret; 4725d5c65159SKalle Valo } 4726d5c65159SKalle Valo 4727d5c65159SKalle Valo dp_srng = &dp->rxdma_mon_desc_ring; 4728d5c65159SKalle Valo n_link_desc = dp_srng->size / 4729d5c65159SKalle Valo ath11k_hal_srng_get_entrysize(HAL_RXDMA_MONITOR_DESC); 4730d5c65159SKalle Valo mon_desc_srng = 4731d5c65159SKalle Valo &ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id]; 4732d5c65159SKalle Valo 4733d5c65159SKalle Valo ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks, 4734d5c65159SKalle Valo HAL_RXDMA_MONITOR_DESC, mon_desc_srng, 4735d5c65159SKalle Valo n_link_desc); 4736d5c65159SKalle Valo if (ret) { 4737d5c65159SKalle Valo ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed"); 4738d5c65159SKalle Valo return ret; 4739d5c65159SKalle Valo } 4740d5c65159SKalle Valo pmon->mon_last_linkdesc_paddr = 0; 4741d5c65159SKalle Valo pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1; 4742d5c65159SKalle Valo spin_lock_init(&pmon->mon_lock); 4743d5c65159SKalle Valo return 0; 4744d5c65159SKalle Valo } 4745d5c65159SKalle Valo 4746d5c65159SKalle Valo static int ath11k_dp_mon_link_free(struct ath11k *ar) 4747d5c65159SKalle Valo { 4748d5c65159SKalle Valo struct ath11k_pdev_dp *dp = &ar->dp; 4749d5c65159SKalle Valo struct ath11k_mon_data *pmon = &dp->mon_data; 4750d5c65159SKalle Valo 4751d5c65159SKalle Valo ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks, 4752d5c65159SKalle Valo HAL_RXDMA_MONITOR_DESC, 4753d5c65159SKalle Valo &dp->rxdma_mon_desc_ring); 4754d5c65159SKalle Valo return 0; 4755d5c65159SKalle Valo } 4756d5c65159SKalle Valo 4757d5c65159SKalle Valo int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar) 4758d5c65159SKalle Valo { 4759d5c65159SKalle Valo ath11k_dp_mon_link_free(ar); 4760d5c65159SKalle Valo return 0; 4761d5c65159SKalle Valo } 4762