15e3dd157SKalle Valo /* 25e3dd157SKalle Valo * Copyright (c) 2005-2011 Atheros Communications Inc. 38b1083d6SKalle Valo * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 4caee728aSVasanthakumar Thiagarajan * Copyright (c) 2018, The Linux Foundation. All rights reserved. 55e3dd157SKalle Valo * 65e3dd157SKalle Valo * Permission to use, copy, modify, and/or distribute this software for any 75e3dd157SKalle Valo * purpose with or without fee is hereby granted, provided that the above 85e3dd157SKalle Valo * copyright notice and this permission notice appear in all copies. 95e3dd157SKalle Valo * 105e3dd157SKalle Valo * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 115e3dd157SKalle Valo * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 125e3dd157SKalle Valo * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 135e3dd157SKalle Valo * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 145e3dd157SKalle Valo * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 155e3dd157SKalle Valo * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 165e3dd157SKalle Valo * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 175e3dd157SKalle Valo */ 185e3dd157SKalle Valo 19edb8236dSMichal Kazior #include "core.h" 205e3dd157SKalle Valo #include "htc.h" 215e3dd157SKalle Valo #include "htt.h" 225e3dd157SKalle Valo #include "txrx.h" 235e3dd157SKalle Valo #include "debug.h" 24a9bf0506SKalle Valo #include "trace.h" 25aa5b4fbcSMichal Kazior #include "mac.h" 265e3dd157SKalle Valo 275e3dd157SKalle Valo #include <linux/log2.h> 28235b9c42SVenkateswara Naralasetty #include <linux/bitfield.h> 295e3dd157SKalle Valo 305e3dd157SKalle Valo /* when under memory pressure rx ring refill may fail and needs a retry */ 315e3dd157SKalle Valo #define HTT_RX_RING_REFILL_RETRY_MS 50 325e3dd157SKalle Valo 335c86d97bSRajkumar Manoharan #define HTT_RX_RING_REFILL_RESCHED_MS 5 345c86d97bSRajkumar Manoharan 35f6dc2095SMichal Kazior static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb); 36f6dc2095SMichal Kazior 37c545070eSMichal Kazior static struct sk_buff * 38a91a626bSGovind Singh ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr) 39c545070eSMichal Kazior { 40c545070eSMichal Kazior struct ath10k_skb_rxcb *rxcb; 41c545070eSMichal Kazior 42c545070eSMichal Kazior hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr) 43c545070eSMichal Kazior if (rxcb->paddr == paddr) 44c545070eSMichal Kazior return ATH10K_RXCB_SKB(rxcb); 45c545070eSMichal Kazior 46c545070eSMichal Kazior WARN_ON_ONCE(1); 47c545070eSMichal Kazior return NULL; 48c545070eSMichal Kazior } 49c545070eSMichal Kazior 505e3dd157SKalle Valo static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt) 515e3dd157SKalle Valo { 525e3dd157SKalle Valo struct sk_buff *skb; 53c545070eSMichal Kazior struct ath10k_skb_rxcb *rxcb; 54c545070eSMichal Kazior struct hlist_node *n; 555e3dd157SKalle Valo int i; 565e3dd157SKalle Valo 57c545070eSMichal Kazior if (htt->rx_ring.in_ord_rx) { 58c545070eSMichal Kazior hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) { 59c545070eSMichal Kazior skb = ATH10K_RXCB_SKB(rxcb); 60c545070eSMichal Kazior dma_unmap_single(htt->ar->dev, rxcb->paddr, 61c545070eSMichal Kazior skb->len + skb_tailroom(skb), 62c545070eSMichal Kazior DMA_FROM_DEVICE); 63c545070eSMichal Kazior hash_del(&rxcb->hlist); 64c545070eSMichal Kazior dev_kfree_skb_any(skb); 65c545070eSMichal Kazior } 66c545070eSMichal Kazior } else { 67c545070eSMichal Kazior for (i = 0; i < htt->rx_ring.size; i++) { 685e3dd157SKalle Valo skb = htt->rx_ring.netbufs_ring[i]; 69c545070eSMichal Kazior if (!skb) 70c545070eSMichal Kazior continue; 71c545070eSMichal Kazior 72c545070eSMichal Kazior rxcb = ATH10K_SKB_RXCB(skb); 73c545070eSMichal Kazior dma_unmap_single(htt->ar->dev, rxcb->paddr, 745e3dd157SKalle Valo skb->len + skb_tailroom(skb), 755e3dd157SKalle Valo DMA_FROM_DEVICE); 765e3dd157SKalle Valo dev_kfree_skb_any(skb); 775e3dd157SKalle Valo } 78c545070eSMichal Kazior } 795e3dd157SKalle Valo 805e3dd157SKalle Valo htt->rx_ring.fill_cnt = 0; 81c545070eSMichal Kazior hash_init(htt->rx_ring.skb_table); 82c545070eSMichal Kazior memset(htt->rx_ring.netbufs_ring, 0, 83c545070eSMichal Kazior htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0])); 845e3dd157SKalle Valo } 855e3dd157SKalle Valo 86a91a626bSGovind Singh static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt) 87a91a626bSGovind Singh { 88a91a626bSGovind Singh return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32); 89a91a626bSGovind Singh } 90a91a626bSGovind Singh 91a91a626bSGovind Singh static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt) 92a91a626bSGovind Singh { 93a91a626bSGovind Singh return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64); 94a91a626bSGovind Singh } 95a91a626bSGovind Singh 96a91a626bSGovind Singh static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt, 97a91a626bSGovind Singh void *vaddr) 98a91a626bSGovind Singh { 99a91a626bSGovind Singh htt->rx_ring.paddrs_ring_32 = vaddr; 100a91a626bSGovind Singh } 101a91a626bSGovind Singh 102a91a626bSGovind Singh static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt, 103a91a626bSGovind Singh void *vaddr) 104a91a626bSGovind Singh { 105a91a626bSGovind Singh htt->rx_ring.paddrs_ring_64 = vaddr; 106a91a626bSGovind Singh } 107a91a626bSGovind Singh 108a91a626bSGovind Singh static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt, 109a91a626bSGovind Singh dma_addr_t paddr, int idx) 110a91a626bSGovind Singh { 111a91a626bSGovind Singh htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr); 112a91a626bSGovind Singh } 113a91a626bSGovind Singh 114a91a626bSGovind Singh static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt, 115a91a626bSGovind Singh dma_addr_t paddr, int idx) 116a91a626bSGovind Singh { 117a91a626bSGovind Singh htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr); 118a91a626bSGovind Singh } 119a91a626bSGovind Singh 120a91a626bSGovind Singh static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx) 121a91a626bSGovind Singh { 122a91a626bSGovind Singh htt->rx_ring.paddrs_ring_32[idx] = 0; 123a91a626bSGovind Singh } 124a91a626bSGovind Singh 125a91a626bSGovind Singh static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx) 126a91a626bSGovind Singh { 127a91a626bSGovind Singh htt->rx_ring.paddrs_ring_64[idx] = 0; 128a91a626bSGovind Singh } 129a91a626bSGovind Singh 130a91a626bSGovind Singh static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt) 131a91a626bSGovind Singh { 132a91a626bSGovind Singh return (void *)htt->rx_ring.paddrs_ring_32; 133a91a626bSGovind Singh } 134a91a626bSGovind Singh 135a91a626bSGovind Singh static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt) 136a91a626bSGovind Singh { 137a91a626bSGovind Singh return (void *)htt->rx_ring.paddrs_ring_64; 138a91a626bSGovind Singh } 139a91a626bSGovind Singh 1405e3dd157SKalle Valo static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) 1415e3dd157SKalle Valo { 1425e3dd157SKalle Valo struct htt_rx_desc *rx_desc; 143c545070eSMichal Kazior struct ath10k_skb_rxcb *rxcb; 1445e3dd157SKalle Valo struct sk_buff *skb; 1455e3dd157SKalle Valo dma_addr_t paddr; 1465e3dd157SKalle Valo int ret = 0, idx; 1475e3dd157SKalle Valo 148c545070eSMichal Kazior /* The Full Rx Reorder firmware has no way of telling the host 149c545070eSMichal Kazior * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring. 150c545070eSMichal Kazior * To keep things simple make sure ring is always half empty. This 151c545070eSMichal Kazior * guarantees there'll be no replenishment overruns possible. 152c545070eSMichal Kazior */ 153c545070eSMichal Kazior BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2); 154c545070eSMichal Kazior 1558cc7f26cSKalle Valo idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); 1565e3dd157SKalle Valo while (num > 0) { 1575e3dd157SKalle Valo skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN); 1585e3dd157SKalle Valo if (!skb) { 1595e3dd157SKalle Valo ret = -ENOMEM; 1605e3dd157SKalle Valo goto fail; 1615e3dd157SKalle Valo } 1625e3dd157SKalle Valo 1635e3dd157SKalle Valo if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN)) 1645e3dd157SKalle Valo skb_pull(skb, 1655e3dd157SKalle Valo PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) - 1665e3dd157SKalle Valo skb->data); 1675e3dd157SKalle Valo 1685e3dd157SKalle Valo /* Clear rx_desc attention word before posting to Rx ring */ 1695e3dd157SKalle Valo rx_desc = (struct htt_rx_desc *)skb->data; 1705e3dd157SKalle Valo rx_desc->attention.flags = __cpu_to_le32(0); 1715e3dd157SKalle Valo 1725e3dd157SKalle Valo paddr = dma_map_single(htt->ar->dev, skb->data, 1735e3dd157SKalle Valo skb->len + skb_tailroom(skb), 1745e3dd157SKalle Valo DMA_FROM_DEVICE); 1755e3dd157SKalle Valo 1765e3dd157SKalle Valo if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) { 1775e3dd157SKalle Valo dev_kfree_skb_any(skb); 1785e3dd157SKalle Valo ret = -ENOMEM; 1795e3dd157SKalle Valo goto fail; 1805e3dd157SKalle Valo } 1815e3dd157SKalle Valo 182c545070eSMichal Kazior rxcb = ATH10K_SKB_RXCB(skb); 183c545070eSMichal Kazior rxcb->paddr = paddr; 1845e3dd157SKalle Valo htt->rx_ring.netbufs_ring[idx] = skb; 1859a5511d5SErik Stromdahl ath10k_htt_set_paddrs_ring(htt, paddr, idx); 1865e3dd157SKalle Valo htt->rx_ring.fill_cnt++; 1875e3dd157SKalle Valo 188c545070eSMichal Kazior if (htt->rx_ring.in_ord_rx) { 189c545070eSMichal Kazior hash_add(htt->rx_ring.skb_table, 190c545070eSMichal Kazior &ATH10K_SKB_RXCB(skb)->hlist, 191a91a626bSGovind Singh paddr); 192c545070eSMichal Kazior } 193c545070eSMichal Kazior 1945e3dd157SKalle Valo num--; 1955e3dd157SKalle Valo idx++; 1965e3dd157SKalle Valo idx &= htt->rx_ring.size_mask; 1975e3dd157SKalle Valo } 1985e3dd157SKalle Valo 1995e3dd157SKalle Valo fail: 2005de6dfc8SVasanthakumar Thiagarajan /* 2015de6dfc8SVasanthakumar Thiagarajan * Make sure the rx buffer is updated before available buffer 2025de6dfc8SVasanthakumar Thiagarajan * index to avoid any potential rx ring corruption. 2035de6dfc8SVasanthakumar Thiagarajan */ 2045de6dfc8SVasanthakumar Thiagarajan mb(); 2058cc7f26cSKalle Valo *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx); 2065e3dd157SKalle Valo return ret; 2075e3dd157SKalle Valo } 2085e3dd157SKalle Valo 2095e3dd157SKalle Valo static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) 2105e3dd157SKalle Valo { 2115e3dd157SKalle Valo lockdep_assert_held(&htt->rx_ring.lock); 2125e3dd157SKalle Valo return __ath10k_htt_rx_ring_fill_n(htt, num); 2135e3dd157SKalle Valo } 2145e3dd157SKalle Valo 2155e3dd157SKalle Valo static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt) 2165e3dd157SKalle Valo { 2176e712d42SMichal Kazior int ret, num_deficit, num_to_fill; 2185e3dd157SKalle Valo 2196e712d42SMichal Kazior /* Refilling the whole RX ring buffer proves to be a bad idea. The 2206e712d42SMichal Kazior * reason is RX may take up significant amount of CPU cycles and starve 2216e712d42SMichal Kazior * other tasks, e.g. TX on an ethernet device while acting as a bridge 2226e712d42SMichal Kazior * with ath10k wlan interface. This ended up with very poor performance 2236e712d42SMichal Kazior * once CPU the host system was overwhelmed with RX on ath10k. 2246e712d42SMichal Kazior * 2256e712d42SMichal Kazior * By limiting the number of refills the replenishing occurs 2266e712d42SMichal Kazior * progressively. This in turns makes use of the fact tasklets are 2276e712d42SMichal Kazior * processed in FIFO order. This means actual RX processing can starve 2286e712d42SMichal Kazior * out refilling. If there's not enough buffers on RX ring FW will not 2296e712d42SMichal Kazior * report RX until it is refilled with enough buffers. This 2306e712d42SMichal Kazior * automatically balances load wrt to CPU power. 2316e712d42SMichal Kazior * 2326e712d42SMichal Kazior * This probably comes at a cost of lower maximum throughput but 233d6dfe25cSMarcin Rokicki * improves the average and stability. 234d6dfe25cSMarcin Rokicki */ 2355e3dd157SKalle Valo spin_lock_bh(&htt->rx_ring.lock); 2366e712d42SMichal Kazior num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt; 2376e712d42SMichal Kazior num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit); 2386e712d42SMichal Kazior num_deficit -= num_to_fill; 2395e3dd157SKalle Valo ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill); 2405e3dd157SKalle Valo if (ret == -ENOMEM) { 2415e3dd157SKalle Valo /* 2425e3dd157SKalle Valo * Failed to fill it to the desired level - 2435e3dd157SKalle Valo * we'll start a timer and try again next time. 2445e3dd157SKalle Valo * As long as enough buffers are left in the ring for 2455e3dd157SKalle Valo * another A-MPDU rx, no special recovery is needed. 2465e3dd157SKalle Valo */ 2475e3dd157SKalle Valo mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + 2485e3dd157SKalle Valo msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS)); 2496e712d42SMichal Kazior } else if (num_deficit > 0) { 2505c86d97bSRajkumar Manoharan mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + 2515c86d97bSRajkumar Manoharan msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS)); 2525e3dd157SKalle Valo } 2535e3dd157SKalle Valo spin_unlock_bh(&htt->rx_ring.lock); 2545e3dd157SKalle Valo } 2555e3dd157SKalle Valo 2567ac76764SKees Cook static void ath10k_htt_rx_ring_refill_retry(struct timer_list *t) 2575e3dd157SKalle Valo { 2587ac76764SKees Cook struct ath10k_htt *htt = from_timer(htt, t, rx_ring.refill_retry_timer); 259af762c0bSKalle Valo 2605e3dd157SKalle Valo ath10k_htt_rx_msdu_buff_replenish(htt); 2615e3dd157SKalle Valo } 2625e3dd157SKalle Valo 263c545070eSMichal Kazior int ath10k_htt_rx_ring_refill(struct ath10k *ar) 2643e841fd0SMichal Kazior { 265c545070eSMichal Kazior struct ath10k_htt *htt = &ar->htt; 266c545070eSMichal Kazior int ret; 2673e841fd0SMichal Kazior 268f88d4934SErik Stromdahl if (ar->dev_type == ATH10K_DEV_TYPE_HL) 269f88d4934SErik Stromdahl return 0; 270f88d4934SErik Stromdahl 271c545070eSMichal Kazior spin_lock_bh(&htt->rx_ring.lock); 272c545070eSMichal Kazior ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level - 273c545070eSMichal Kazior htt->rx_ring.fill_cnt)); 2743e841fd0SMichal Kazior 275c545070eSMichal Kazior if (ret) 276c545070eSMichal Kazior ath10k_htt_rx_ring_free(htt); 277c545070eSMichal Kazior 278168f75f1SBen Greear spin_unlock_bh(&htt->rx_ring.lock); 279168f75f1SBen Greear 280c545070eSMichal Kazior return ret; 2813e841fd0SMichal Kazior } 2823e841fd0SMichal Kazior 28395bf21f9SMichal Kazior void ath10k_htt_rx_free(struct ath10k_htt *htt) 2845e3dd157SKalle Valo { 285f88d4934SErik Stromdahl if (htt->ar->dev_type == ATH10K_DEV_TYPE_HL) 286f88d4934SErik Stromdahl return; 287f88d4934SErik Stromdahl 2885e3dd157SKalle Valo del_timer_sync(&htt->rx_ring.refill_retry_timer); 2896c5151a9SMichal Kazior 290deba1b9eSRajkumar Manoharan skb_queue_purge(&htt->rx_msdus_q); 291c545070eSMichal Kazior skb_queue_purge(&htt->rx_in_ord_compl_q); 292426e10eaSMichal Kazior skb_queue_purge(&htt->tx_fetch_ind_q); 2935e3dd157SKalle Valo 294168f75f1SBen Greear spin_lock_bh(&htt->rx_ring.lock); 295c545070eSMichal Kazior ath10k_htt_rx_ring_free(htt); 296168f75f1SBen Greear spin_unlock_bh(&htt->rx_ring.lock); 2975e3dd157SKalle Valo 2985e3dd157SKalle Valo dma_free_coherent(htt->ar->dev, 2999a5511d5SErik Stromdahl ath10k_htt_get_rx_ring_size(htt), 3009a5511d5SErik Stromdahl ath10k_htt_get_vaddr_ring(htt), 3015e3dd157SKalle Valo htt->rx_ring.base_paddr); 3025e3dd157SKalle Valo 3035e3dd157SKalle Valo dma_free_coherent(htt->ar->dev, 3045e3dd157SKalle Valo sizeof(*htt->rx_ring.alloc_idx.vaddr), 3055e3dd157SKalle Valo htt->rx_ring.alloc_idx.vaddr, 3065e3dd157SKalle Valo htt->rx_ring.alloc_idx.paddr); 3075e3dd157SKalle Valo 3085e3dd157SKalle Valo kfree(htt->rx_ring.netbufs_ring); 3095e3dd157SKalle Valo } 3105e3dd157SKalle Valo 3115e3dd157SKalle Valo static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt) 3125e3dd157SKalle Valo { 3137aa7a72aSMichal Kazior struct ath10k *ar = htt->ar; 3145e3dd157SKalle Valo int idx; 3155e3dd157SKalle Valo struct sk_buff *msdu; 3165e3dd157SKalle Valo 31745967089SMichal Kazior lockdep_assert_held(&htt->rx_ring.lock); 3185e3dd157SKalle Valo 3198d60ee87SMichal Kazior if (htt->rx_ring.fill_cnt == 0) { 3207aa7a72aSMichal Kazior ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n"); 3218d60ee87SMichal Kazior return NULL; 3228d60ee87SMichal Kazior } 3235e3dd157SKalle Valo 3245e3dd157SKalle Valo idx = htt->rx_ring.sw_rd_idx.msdu_payld; 3255e3dd157SKalle Valo msdu = htt->rx_ring.netbufs_ring[idx]; 3263e841fd0SMichal Kazior htt->rx_ring.netbufs_ring[idx] = NULL; 3279a5511d5SErik Stromdahl ath10k_htt_reset_paddrs_ring(htt, idx); 3285e3dd157SKalle Valo 3295e3dd157SKalle Valo idx++; 3305e3dd157SKalle Valo idx &= htt->rx_ring.size_mask; 3315e3dd157SKalle Valo htt->rx_ring.sw_rd_idx.msdu_payld = idx; 3325e3dd157SKalle Valo htt->rx_ring.fill_cnt--; 3335e3dd157SKalle Valo 3344de02806SMichal Kazior dma_unmap_single(htt->ar->dev, 3358582bf3bSMichal Kazior ATH10K_SKB_RXCB(msdu)->paddr, 3364de02806SMichal Kazior msdu->len + skb_tailroom(msdu), 3374de02806SMichal Kazior DMA_FROM_DEVICE); 3384de02806SMichal Kazior ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ", 3394de02806SMichal Kazior msdu->data, msdu->len + skb_tailroom(msdu)); 3404de02806SMichal Kazior 3415e3dd157SKalle Valo return msdu; 3425e3dd157SKalle Valo } 3435e3dd157SKalle Valo 344d84dd60fSJanusz Dziedzic /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */ 3455e3dd157SKalle Valo static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, 346f0e2770fSMichal Kazior struct sk_buff_head *amsdu) 3475e3dd157SKalle Valo { 3487aa7a72aSMichal Kazior struct ath10k *ar = htt->ar; 3495e3dd157SKalle Valo int msdu_len, msdu_chaining = 0; 3509aa505d2SMichal Kazior struct sk_buff *msdu; 3515e3dd157SKalle Valo struct htt_rx_desc *rx_desc; 3525e3dd157SKalle Valo 35345967089SMichal Kazior lockdep_assert_held(&htt->rx_ring.lock); 35445967089SMichal Kazior 3559aa505d2SMichal Kazior for (;;) { 3565e3dd157SKalle Valo int last_msdu, msdu_len_invalid, msdu_chained; 3575e3dd157SKalle Valo 3589aa505d2SMichal Kazior msdu = ath10k_htt_rx_netbuf_pop(htt); 3599aa505d2SMichal Kazior if (!msdu) { 3609aa505d2SMichal Kazior __skb_queue_purge(amsdu); 361e0bd7513SMichal Kazior return -ENOENT; 3629aa505d2SMichal Kazior } 3639aa505d2SMichal Kazior 3649aa505d2SMichal Kazior __skb_queue_tail(amsdu, msdu); 3659aa505d2SMichal Kazior 3665e3dd157SKalle Valo rx_desc = (struct htt_rx_desc *)msdu->data; 3675e3dd157SKalle Valo 3685e3dd157SKalle Valo /* FIXME: we must report msdu payload since this is what caller 369d6dfe25cSMarcin Rokicki * expects now 370d6dfe25cSMarcin Rokicki */ 3715e3dd157SKalle Valo skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload)); 3725e3dd157SKalle Valo skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload)); 3735e3dd157SKalle Valo 3745e3dd157SKalle Valo /* 3755e3dd157SKalle Valo * Sanity check - confirm the HW is finished filling in the 3765e3dd157SKalle Valo * rx data. 3775e3dd157SKalle Valo * If the HW and SW are working correctly, then it's guaranteed 3785e3dd157SKalle Valo * that the HW's MAC DMA is done before this point in the SW. 3795e3dd157SKalle Valo * To prevent the case that we handle a stale Rx descriptor, 3805e3dd157SKalle Valo * just assert for now until we have a way to recover. 3815e3dd157SKalle Valo */ 3825e3dd157SKalle Valo if (!(__le32_to_cpu(rx_desc->attention.flags) 3835e3dd157SKalle Valo & RX_ATTENTION_FLAGS_MSDU_DONE)) { 3849aa505d2SMichal Kazior __skb_queue_purge(amsdu); 385e0bd7513SMichal Kazior return -EIO; 3865e3dd157SKalle Valo } 3875e3dd157SKalle Valo 3885e3dd157SKalle Valo msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags) 3895e3dd157SKalle Valo & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR | 3905e3dd157SKalle Valo RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR)); 3911f5dbfbbSPeter Oh msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0), 3925e3dd157SKalle Valo RX_MSDU_START_INFO0_MSDU_LENGTH); 3935e3dd157SKalle Valo msdu_chained = rx_desc->frag_info.ring2_more_count; 3945e3dd157SKalle Valo 3955e3dd157SKalle Valo if (msdu_len_invalid) 3965e3dd157SKalle Valo msdu_len = 0; 3975e3dd157SKalle Valo 3985e3dd157SKalle Valo skb_trim(msdu, 0); 3995e3dd157SKalle Valo skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE)); 4005e3dd157SKalle Valo msdu_len -= msdu->len; 4015e3dd157SKalle Valo 4029aa505d2SMichal Kazior /* Note: Chained buffers do not contain rx descriptor */ 4035e3dd157SKalle Valo while (msdu_chained--) { 4049aa505d2SMichal Kazior msdu = ath10k_htt_rx_netbuf_pop(htt); 4059aa505d2SMichal Kazior if (!msdu) { 4069aa505d2SMichal Kazior __skb_queue_purge(amsdu); 407e0bd7513SMichal Kazior return -ENOENT; 408b30595aeSMichal Kazior } 409b30595aeSMichal Kazior 4109aa505d2SMichal Kazior __skb_queue_tail(amsdu, msdu); 4119aa505d2SMichal Kazior skb_trim(msdu, 0); 4129aa505d2SMichal Kazior skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE)); 4139aa505d2SMichal Kazior msdu_len -= msdu->len; 414ede9c8e0SMichal Kazior msdu_chaining = 1; 4155e3dd157SKalle Valo } 4165e3dd157SKalle Valo 4171f5dbfbbSPeter Oh last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) & 4185e3dd157SKalle Valo RX_MSDU_END_INFO0_LAST_MSDU; 4195e3dd157SKalle Valo 420b04e204fSMichal Kazior trace_ath10k_htt_rx_desc(ar, &rx_desc->attention, 421a0883cf7SRajkumar Manoharan sizeof(*rx_desc) - sizeof(u32)); 4229aa505d2SMichal Kazior 4239aa505d2SMichal Kazior if (last_msdu) 4245e3dd157SKalle Valo break; 425d8bb26b9SKalle Valo } 426d8bb26b9SKalle Valo 4279aa505d2SMichal Kazior if (skb_queue_empty(amsdu)) 428d84dd60fSJanusz Dziedzic msdu_chaining = -1; 429d84dd60fSJanusz Dziedzic 4305e3dd157SKalle Valo /* 4315e3dd157SKalle Valo * Don't refill the ring yet. 4325e3dd157SKalle Valo * 4335e3dd157SKalle Valo * First, the elements popped here are still in use - it is not 4345e3dd157SKalle Valo * safe to overwrite them until the matching call to 4355e3dd157SKalle Valo * mpdu_desc_list_next. Second, for efficiency it is preferable to 4365e3dd157SKalle Valo * refill the rx ring with 1 PPDU's worth of rx buffers (something 4375e3dd157SKalle Valo * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers 4385e3dd157SKalle Valo * (something like 3 buffers). Consequently, we'll rely on the txrx 4395e3dd157SKalle Valo * SW to tell us when it is done pulling all the PPDU's rx buffers 4405e3dd157SKalle Valo * out of the rx ring, and then refill it just once. 4415e3dd157SKalle Valo */ 4425e3dd157SKalle Valo 4435e3dd157SKalle Valo return msdu_chaining; 4445e3dd157SKalle Valo } 4455e3dd157SKalle Valo 446c545070eSMichal Kazior static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt, 447a91a626bSGovind Singh u64 paddr) 448c545070eSMichal Kazior { 449c545070eSMichal Kazior struct ath10k *ar = htt->ar; 450c545070eSMichal Kazior struct ath10k_skb_rxcb *rxcb; 451c545070eSMichal Kazior struct sk_buff *msdu; 452c545070eSMichal Kazior 453c545070eSMichal Kazior lockdep_assert_held(&htt->rx_ring.lock); 454c545070eSMichal Kazior 455c545070eSMichal Kazior msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr); 456c545070eSMichal Kazior if (!msdu) 457c545070eSMichal Kazior return NULL; 458c545070eSMichal Kazior 459c545070eSMichal Kazior rxcb = ATH10K_SKB_RXCB(msdu); 460c545070eSMichal Kazior hash_del(&rxcb->hlist); 461c545070eSMichal Kazior htt->rx_ring.fill_cnt--; 462c545070eSMichal Kazior 463c545070eSMichal Kazior dma_unmap_single(htt->ar->dev, rxcb->paddr, 464c545070eSMichal Kazior msdu->len + skb_tailroom(msdu), 465c545070eSMichal Kazior DMA_FROM_DEVICE); 466c545070eSMichal Kazior ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ", 467c545070eSMichal Kazior msdu->data, msdu->len + skb_tailroom(msdu)); 468c545070eSMichal Kazior 469c545070eSMichal Kazior return msdu; 470c545070eSMichal Kazior } 471c545070eSMichal Kazior 4723b0b55b1SGovind Singh static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt, 473c545070eSMichal Kazior struct htt_rx_in_ord_ind *ev, 474c545070eSMichal Kazior struct sk_buff_head *list) 475c545070eSMichal Kazior { 476c545070eSMichal Kazior struct ath10k *ar = htt->ar; 4773b0b55b1SGovind Singh struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32; 478c545070eSMichal Kazior struct htt_rx_desc *rxd; 479c545070eSMichal Kazior struct sk_buff *msdu; 480c545070eSMichal Kazior int msdu_count; 481c545070eSMichal Kazior bool is_offload; 482c545070eSMichal Kazior u32 paddr; 483c545070eSMichal Kazior 484c545070eSMichal Kazior lockdep_assert_held(&htt->rx_ring.lock); 485c545070eSMichal Kazior 486c545070eSMichal Kazior msdu_count = __le16_to_cpu(ev->msdu_count); 487c545070eSMichal Kazior is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK); 488c545070eSMichal Kazior 489c545070eSMichal Kazior while (msdu_count--) { 490c545070eSMichal Kazior paddr = __le32_to_cpu(msdu_desc->msdu_paddr); 491c545070eSMichal Kazior 492c545070eSMichal Kazior msdu = ath10k_htt_rx_pop_paddr(htt, paddr); 493c545070eSMichal Kazior if (!msdu) { 494c545070eSMichal Kazior __skb_queue_purge(list); 495c545070eSMichal Kazior return -ENOENT; 496c545070eSMichal Kazior } 497c545070eSMichal Kazior 498c545070eSMichal Kazior __skb_queue_tail(list, msdu); 499c545070eSMichal Kazior 500c545070eSMichal Kazior if (!is_offload) { 501c545070eSMichal Kazior rxd = (void *)msdu->data; 502c545070eSMichal Kazior 503c545070eSMichal Kazior trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd)); 504c545070eSMichal Kazior 505c545070eSMichal Kazior skb_put(msdu, sizeof(*rxd)); 506c545070eSMichal Kazior skb_pull(msdu, sizeof(*rxd)); 507c545070eSMichal Kazior skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len)); 508c545070eSMichal Kazior 509c545070eSMichal Kazior if (!(__le32_to_cpu(rxd->attention.flags) & 510c545070eSMichal Kazior RX_ATTENTION_FLAGS_MSDU_DONE)) { 511c545070eSMichal Kazior ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n"); 512c545070eSMichal Kazior return -EIO; 513c545070eSMichal Kazior } 514c545070eSMichal Kazior } 515c545070eSMichal Kazior 516c545070eSMichal Kazior msdu_desc++; 517c545070eSMichal Kazior } 518c545070eSMichal Kazior 519c545070eSMichal Kazior return 0; 520c545070eSMichal Kazior } 521c545070eSMichal Kazior 5223b0b55b1SGovind Singh static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt, 5233b0b55b1SGovind Singh struct htt_rx_in_ord_ind *ev, 5243b0b55b1SGovind Singh struct sk_buff_head *list) 5253b0b55b1SGovind Singh { 5263b0b55b1SGovind Singh struct ath10k *ar = htt->ar; 5273b0b55b1SGovind Singh struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64; 5283b0b55b1SGovind Singh struct htt_rx_desc *rxd; 5293b0b55b1SGovind Singh struct sk_buff *msdu; 5303b0b55b1SGovind Singh int msdu_count; 5313b0b55b1SGovind Singh bool is_offload; 5323b0b55b1SGovind Singh u64 paddr; 5333b0b55b1SGovind Singh 5343b0b55b1SGovind Singh lockdep_assert_held(&htt->rx_ring.lock); 5353b0b55b1SGovind Singh 5363b0b55b1SGovind Singh msdu_count = __le16_to_cpu(ev->msdu_count); 5373b0b55b1SGovind Singh is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK); 5383b0b55b1SGovind Singh 5393b0b55b1SGovind Singh while (msdu_count--) { 5403b0b55b1SGovind Singh paddr = __le64_to_cpu(msdu_desc->msdu_paddr); 5413b0b55b1SGovind Singh msdu = ath10k_htt_rx_pop_paddr(htt, paddr); 5423b0b55b1SGovind Singh if (!msdu) { 5433b0b55b1SGovind Singh __skb_queue_purge(list); 5443b0b55b1SGovind Singh return -ENOENT; 5453b0b55b1SGovind Singh } 5463b0b55b1SGovind Singh 5473b0b55b1SGovind Singh __skb_queue_tail(list, msdu); 5483b0b55b1SGovind Singh 5493b0b55b1SGovind Singh if (!is_offload) { 5503b0b55b1SGovind Singh rxd = (void *)msdu->data; 5513b0b55b1SGovind Singh 5523b0b55b1SGovind Singh trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd)); 5533b0b55b1SGovind Singh 5543b0b55b1SGovind Singh skb_put(msdu, sizeof(*rxd)); 5553b0b55b1SGovind Singh skb_pull(msdu, sizeof(*rxd)); 5563b0b55b1SGovind Singh skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len)); 5573b0b55b1SGovind Singh 5583b0b55b1SGovind Singh if (!(__le32_to_cpu(rxd->attention.flags) & 5593b0b55b1SGovind Singh RX_ATTENTION_FLAGS_MSDU_DONE)) { 5603b0b55b1SGovind Singh ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n"); 5613b0b55b1SGovind Singh return -EIO; 5623b0b55b1SGovind Singh } 5633b0b55b1SGovind Singh } 5643b0b55b1SGovind Singh 5653b0b55b1SGovind Singh msdu_desc++; 5663b0b55b1SGovind Singh } 5673b0b55b1SGovind Singh 5683b0b55b1SGovind Singh return 0; 5693b0b55b1SGovind Singh } 5703b0b55b1SGovind Singh 57195bf21f9SMichal Kazior int ath10k_htt_rx_alloc(struct ath10k_htt *htt) 5725e3dd157SKalle Valo { 5737aa7a72aSMichal Kazior struct ath10k *ar = htt->ar; 5745e3dd157SKalle Valo dma_addr_t paddr; 575a91a626bSGovind Singh void *vaddr, *vaddr_ring; 576bd8bdbb6SKalle Valo size_t size; 5775e3dd157SKalle Valo struct timer_list *timer = &htt->rx_ring.refill_retry_timer; 5785e3dd157SKalle Valo 579f88d4934SErik Stromdahl if (ar->dev_type == ATH10K_DEV_TYPE_HL) 580f88d4934SErik Stromdahl return 0; 581f88d4934SErik Stromdahl 58251fc7d74SMichal Kazior htt->rx_confused = false; 58351fc7d74SMichal Kazior 584fe2407a8SMichal Kazior /* XXX: The fill level could be changed during runtime in response to 585fe2407a8SMichal Kazior * the host processing latency. Is this really worth it? 586fe2407a8SMichal Kazior */ 587fe2407a8SMichal Kazior htt->rx_ring.size = HTT_RX_RING_SIZE; 588fe2407a8SMichal Kazior htt->rx_ring.size_mask = htt->rx_ring.size - 1; 589bb8d0d15SGovind Singh htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level; 590fe2407a8SMichal Kazior 5915e3dd157SKalle Valo if (!is_power_of_2(htt->rx_ring.size)) { 5927aa7a72aSMichal Kazior ath10k_warn(ar, "htt rx ring size is not power of 2\n"); 5935e3dd157SKalle Valo return -EINVAL; 5945e3dd157SKalle Valo } 5955e3dd157SKalle Valo 5965e3dd157SKalle Valo htt->rx_ring.netbufs_ring = 5976396bb22SKees Cook kcalloc(htt->rx_ring.size, sizeof(struct sk_buff *), 5985e3dd157SKalle Valo GFP_KERNEL); 5995e3dd157SKalle Valo if (!htt->rx_ring.netbufs_ring) 6005e3dd157SKalle Valo goto err_netbuf; 6015e3dd157SKalle Valo 6029a5511d5SErik Stromdahl size = ath10k_htt_get_rx_ring_size(htt); 603bd8bdbb6SKalle Valo 604a91a626bSGovind Singh vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL); 605a91a626bSGovind Singh if (!vaddr_ring) 6065e3dd157SKalle Valo goto err_dma_ring; 6075e3dd157SKalle Valo 6089a5511d5SErik Stromdahl ath10k_htt_config_paddrs_ring(htt, vaddr_ring); 6095e3dd157SKalle Valo htt->rx_ring.base_paddr = paddr; 6105e3dd157SKalle Valo 6115e3dd157SKalle Valo vaddr = dma_alloc_coherent(htt->ar->dev, 6125e3dd157SKalle Valo sizeof(*htt->rx_ring.alloc_idx.vaddr), 613d6cb23b5SFelix Fietkau &paddr, GFP_KERNEL); 6145e3dd157SKalle Valo if (!vaddr) 6155e3dd157SKalle Valo goto err_dma_idx; 6165e3dd157SKalle Valo 6175e3dd157SKalle Valo htt->rx_ring.alloc_idx.vaddr = vaddr; 6185e3dd157SKalle Valo htt->rx_ring.alloc_idx.paddr = paddr; 619c545070eSMichal Kazior htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask; 6205e3dd157SKalle Valo *htt->rx_ring.alloc_idx.vaddr = 0; 6215e3dd157SKalle Valo 6225e3dd157SKalle Valo /* Initialize the Rx refill retry timer */ 6237ac76764SKees Cook timer_setup(timer, ath10k_htt_rx_ring_refill_retry, 0); 6245e3dd157SKalle Valo 6255e3dd157SKalle Valo spin_lock_init(&htt->rx_ring.lock); 6265e3dd157SKalle Valo 6275e3dd157SKalle Valo htt->rx_ring.fill_cnt = 0; 628c545070eSMichal Kazior htt->rx_ring.sw_rd_idx.msdu_payld = 0; 629c545070eSMichal Kazior hash_init(htt->rx_ring.skb_table); 6305e3dd157SKalle Valo 631deba1b9eSRajkumar Manoharan skb_queue_head_init(&htt->rx_msdus_q); 632c545070eSMichal Kazior skb_queue_head_init(&htt->rx_in_ord_compl_q); 633426e10eaSMichal Kazior skb_queue_head_init(&htt->tx_fetch_ind_q); 6343128b3d8SRajkumar Manoharan atomic_set(&htt->num_mpdus_ready, 0); 6356c5151a9SMichal Kazior 6367aa7a72aSMichal Kazior ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n", 6375e3dd157SKalle Valo htt->rx_ring.size, htt->rx_ring.fill_level); 6385e3dd157SKalle Valo return 0; 6395e3dd157SKalle Valo 6405e3dd157SKalle Valo err_dma_idx: 6415e3dd157SKalle Valo dma_free_coherent(htt->ar->dev, 6429a5511d5SErik Stromdahl ath10k_htt_get_rx_ring_size(htt), 643a91a626bSGovind Singh vaddr_ring, 6445e3dd157SKalle Valo htt->rx_ring.base_paddr); 6455e3dd157SKalle Valo err_dma_ring: 6465e3dd157SKalle Valo kfree(htt->rx_ring.netbufs_ring); 6475e3dd157SKalle Valo err_netbuf: 6485e3dd157SKalle Valo return -ENOMEM; 6495e3dd157SKalle Valo } 6505e3dd157SKalle Valo 6517aa7a72aSMichal Kazior static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar, 6527aa7a72aSMichal Kazior enum htt_rx_mpdu_encrypt_type type) 6535e3dd157SKalle Valo { 6545e3dd157SKalle Valo switch (type) { 655890d3b2aSMichal Kazior case HTT_RX_MPDU_ENCRYPT_NONE: 656890d3b2aSMichal Kazior return 0; 6575e3dd157SKalle Valo case HTT_RX_MPDU_ENCRYPT_WEP40: 6585e3dd157SKalle Valo case HTT_RX_MPDU_ENCRYPT_WEP104: 659890d3b2aSMichal Kazior return IEEE80211_WEP_IV_LEN; 6605e3dd157SKalle Valo case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: 6615e3dd157SKalle Valo case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: 662890d3b2aSMichal Kazior return IEEE80211_TKIP_IV_LEN; 6635e3dd157SKalle Valo case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: 664890d3b2aSMichal Kazior return IEEE80211_CCMP_HDR_LEN; 6657eccb738SVasanthakumar Thiagarajan case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2: 6667eccb738SVasanthakumar Thiagarajan return IEEE80211_CCMP_256_HDR_LEN; 6677eccb738SVasanthakumar Thiagarajan case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2: 6687eccb738SVasanthakumar Thiagarajan case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2: 6697eccb738SVasanthakumar Thiagarajan return IEEE80211_GCMP_HDR_LEN; 670890d3b2aSMichal Kazior case HTT_RX_MPDU_ENCRYPT_WEP128: 671890d3b2aSMichal Kazior case HTT_RX_MPDU_ENCRYPT_WAPI: 672890d3b2aSMichal Kazior break; 673890d3b2aSMichal Kazior } 674890d3b2aSMichal Kazior 675890d3b2aSMichal Kazior ath10k_warn(ar, "unsupported encryption type %d\n", type); 6765e3dd157SKalle Valo return 0; 6775e3dd157SKalle Valo } 6785e3dd157SKalle Valo 679890d3b2aSMichal Kazior #define MICHAEL_MIC_LEN 8 6805e3dd157SKalle Valo 681307aeb31SVasanthakumar Thiagarajan static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar, 6827aa7a72aSMichal Kazior enum htt_rx_mpdu_encrypt_type type) 6835e3dd157SKalle Valo { 6845e3dd157SKalle Valo switch (type) { 6855e3dd157SKalle Valo case HTT_RX_MPDU_ENCRYPT_NONE: 6865e3dd157SKalle Valo case HTT_RX_MPDU_ENCRYPT_WEP40: 6875e3dd157SKalle Valo case HTT_RX_MPDU_ENCRYPT_WEP104: 6885e3dd157SKalle Valo case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: 6895e3dd157SKalle Valo case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: 690307aeb31SVasanthakumar Thiagarajan return 0; 6915e3dd157SKalle Valo case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: 692890d3b2aSMichal Kazior return IEEE80211_CCMP_MIC_LEN; 6937eccb738SVasanthakumar Thiagarajan case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2: 6947eccb738SVasanthakumar Thiagarajan return IEEE80211_CCMP_256_MIC_LEN; 6957eccb738SVasanthakumar Thiagarajan case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2: 6967eccb738SVasanthakumar Thiagarajan case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2: 6977eccb738SVasanthakumar Thiagarajan return IEEE80211_GCMP_MIC_LEN; 698890d3b2aSMichal Kazior case HTT_RX_MPDU_ENCRYPT_WEP128: 699890d3b2aSMichal Kazior case HTT_RX_MPDU_ENCRYPT_WAPI: 700890d3b2aSMichal Kazior break; 7015e3dd157SKalle Valo } 7025e3dd157SKalle Valo 703890d3b2aSMichal Kazior ath10k_warn(ar, "unsupported encryption type %d\n", type); 7045e3dd157SKalle Valo return 0; 7055e3dd157SKalle Valo } 7065e3dd157SKalle Valo 707307aeb31SVasanthakumar Thiagarajan static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar, 708307aeb31SVasanthakumar Thiagarajan enum htt_rx_mpdu_encrypt_type type) 709307aeb31SVasanthakumar Thiagarajan { 710307aeb31SVasanthakumar Thiagarajan switch (type) { 711307aeb31SVasanthakumar Thiagarajan case HTT_RX_MPDU_ENCRYPT_NONE: 712307aeb31SVasanthakumar Thiagarajan case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: 713307aeb31SVasanthakumar Thiagarajan case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2: 714307aeb31SVasanthakumar Thiagarajan case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2: 715307aeb31SVasanthakumar Thiagarajan case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2: 716307aeb31SVasanthakumar Thiagarajan return 0; 717307aeb31SVasanthakumar Thiagarajan case HTT_RX_MPDU_ENCRYPT_WEP40: 718307aeb31SVasanthakumar Thiagarajan case HTT_RX_MPDU_ENCRYPT_WEP104: 719307aeb31SVasanthakumar Thiagarajan return IEEE80211_WEP_ICV_LEN; 720307aeb31SVasanthakumar Thiagarajan case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: 721307aeb31SVasanthakumar Thiagarajan case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: 722307aeb31SVasanthakumar Thiagarajan return IEEE80211_TKIP_ICV_LEN; 723307aeb31SVasanthakumar Thiagarajan case HTT_RX_MPDU_ENCRYPT_WEP128: 724307aeb31SVasanthakumar Thiagarajan case HTT_RX_MPDU_ENCRYPT_WAPI: 725307aeb31SVasanthakumar Thiagarajan break; 726307aeb31SVasanthakumar Thiagarajan } 727307aeb31SVasanthakumar Thiagarajan 728307aeb31SVasanthakumar Thiagarajan ath10k_warn(ar, "unsupported encryption type %d\n", type); 729307aeb31SVasanthakumar Thiagarajan return 0; 730307aeb31SVasanthakumar Thiagarajan } 731307aeb31SVasanthakumar Thiagarajan 732f6dc2095SMichal Kazior struct amsdu_subframe_hdr { 733f6dc2095SMichal Kazior u8 dst[ETH_ALEN]; 734f6dc2095SMichal Kazior u8 src[ETH_ALEN]; 735f6dc2095SMichal Kazior __be16 len; 736f6dc2095SMichal Kazior } __packed; 737f6dc2095SMichal Kazior 7386986fdd6SMichal Kazior #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63) 7396986fdd6SMichal Kazior 74091493e8eSChristian Lamparter static inline u8 ath10k_bw_to_mac80211_bw(u8 bw) 74191493e8eSChristian Lamparter { 74291493e8eSChristian Lamparter u8 ret = 0; 74391493e8eSChristian Lamparter 74491493e8eSChristian Lamparter switch (bw) { 74591493e8eSChristian Lamparter case 0: 74691493e8eSChristian Lamparter ret = RATE_INFO_BW_20; 74791493e8eSChristian Lamparter break; 74891493e8eSChristian Lamparter case 1: 74991493e8eSChristian Lamparter ret = RATE_INFO_BW_40; 75091493e8eSChristian Lamparter break; 75191493e8eSChristian Lamparter case 2: 75291493e8eSChristian Lamparter ret = RATE_INFO_BW_80; 75391493e8eSChristian Lamparter break; 75491493e8eSChristian Lamparter case 3: 75591493e8eSChristian Lamparter ret = RATE_INFO_BW_160; 75691493e8eSChristian Lamparter break; 75791493e8eSChristian Lamparter } 75891493e8eSChristian Lamparter 75991493e8eSChristian Lamparter return ret; 76091493e8eSChristian Lamparter } 76191493e8eSChristian Lamparter 76287326c97SJanusz Dziedzic static void ath10k_htt_rx_h_rates(struct ath10k *ar, 763b9fd8a84SMichal Kazior struct ieee80211_rx_status *status, 764b9fd8a84SMichal Kazior struct htt_rx_desc *rxd) 76573539b40SJanusz Dziedzic { 7665528e032SMichal Kazior struct ieee80211_supported_band *sband; 7675528e032SMichal Kazior u8 cck, rate, bw, sgi, mcs, nss; 76873539b40SJanusz Dziedzic u8 preamble = 0; 7696986fdd6SMichal Kazior u8 group_id; 770b9fd8a84SMichal Kazior u32 info1, info2, info3; 77173539b40SJanusz Dziedzic 772b9fd8a84SMichal Kazior info1 = __le32_to_cpu(rxd->ppdu_start.info1); 773b9fd8a84SMichal Kazior info2 = __le32_to_cpu(rxd->ppdu_start.info2); 774b9fd8a84SMichal Kazior info3 = __le32_to_cpu(rxd->ppdu_start.info3); 775b9fd8a84SMichal Kazior 776b9fd8a84SMichal Kazior preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE); 77773539b40SJanusz Dziedzic 77873539b40SJanusz Dziedzic switch (preamble) { 77973539b40SJanusz Dziedzic case HTT_RX_LEGACY: 7805528e032SMichal Kazior /* To get legacy rate index band is required. Since band can't 7815528e032SMichal Kazior * be undefined check if freq is non-zero. 7825528e032SMichal Kazior */ 7835528e032SMichal Kazior if (!status->freq) 7845528e032SMichal Kazior return; 7855528e032SMichal Kazior 786b9fd8a84SMichal Kazior cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT; 787b9fd8a84SMichal Kazior rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE); 7885528e032SMichal Kazior rate &= ~RX_PPDU_START_RATE_FLAG; 78973539b40SJanusz Dziedzic 7905528e032SMichal Kazior sband = &ar->mac.sbands[status->band]; 7914b7f353bSYanbo Li status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck); 79273539b40SJanusz Dziedzic break; 79373539b40SJanusz Dziedzic case HTT_RX_HT: 79473539b40SJanusz Dziedzic case HTT_RX_HT_WITH_TXBF: 795b9fd8a84SMichal Kazior /* HT-SIG - Table 20-11 in info2 and info3 */ 796b9fd8a84SMichal Kazior mcs = info2 & 0x1F; 79773539b40SJanusz Dziedzic nss = mcs >> 3; 798b9fd8a84SMichal Kazior bw = (info2 >> 7) & 1; 799b9fd8a84SMichal Kazior sgi = (info3 >> 7) & 1; 80073539b40SJanusz Dziedzic 80173539b40SJanusz Dziedzic status->rate_idx = mcs; 802da6a4352SJohannes Berg status->encoding = RX_ENC_HT; 80373539b40SJanusz Dziedzic if (sgi) 8047fdd69c5SJohannes Berg status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 80573539b40SJanusz Dziedzic if (bw) 806da6a4352SJohannes Berg status->bw = RATE_INFO_BW_40; 80773539b40SJanusz Dziedzic break; 80873539b40SJanusz Dziedzic case HTT_RX_VHT: 80973539b40SJanusz Dziedzic case HTT_RX_VHT_WITH_TXBF: 810b9fd8a84SMichal Kazior /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3 811d6dfe25cSMarcin Rokicki * TODO check this 812d6dfe25cSMarcin Rokicki */ 813b9fd8a84SMichal Kazior bw = info2 & 3; 814b9fd8a84SMichal Kazior sgi = info3 & 1; 8156986fdd6SMichal Kazior group_id = (info2 >> 4) & 0x3F; 8166986fdd6SMichal Kazior 8176986fdd6SMichal Kazior if (GROUP_ID_IS_SU_MIMO(group_id)) { 8186986fdd6SMichal Kazior mcs = (info3 >> 4) & 0x0F; 8196986fdd6SMichal Kazior nss = ((info2 >> 10) & 0x07) + 1; 8206986fdd6SMichal Kazior } else { 8216986fdd6SMichal Kazior /* Hardware doesn't decode VHT-SIG-B into Rx descriptor 8226986fdd6SMichal Kazior * so it's impossible to decode MCS. Also since 8236986fdd6SMichal Kazior * firmware consumes Group Id Management frames host 8246986fdd6SMichal Kazior * has no knowledge regarding group/user position 8256986fdd6SMichal Kazior * mapping so it's impossible to pick the correct Nsts 8266986fdd6SMichal Kazior * from VHT-SIG-A1. 8276986fdd6SMichal Kazior * 8286986fdd6SMichal Kazior * Bandwidth and SGI are valid so report the rateinfo 8296986fdd6SMichal Kazior * on best-effort basis. 8306986fdd6SMichal Kazior */ 8316986fdd6SMichal Kazior mcs = 0; 8326986fdd6SMichal Kazior nss = 1; 8336986fdd6SMichal Kazior } 83473539b40SJanusz Dziedzic 8356ccea107SManikanta Pubbisetty if (mcs > 0x09) { 8366ccea107SManikanta Pubbisetty ath10k_warn(ar, "invalid MCS received %u\n", mcs); 8376ccea107SManikanta Pubbisetty ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n", 8386ccea107SManikanta Pubbisetty __le32_to_cpu(rxd->attention.flags), 8396ccea107SManikanta Pubbisetty __le32_to_cpu(rxd->mpdu_start.info0), 8406ccea107SManikanta Pubbisetty __le32_to_cpu(rxd->mpdu_start.info1), 8416ccea107SManikanta Pubbisetty __le32_to_cpu(rxd->msdu_start.common.info0), 8426ccea107SManikanta Pubbisetty __le32_to_cpu(rxd->msdu_start.common.info1), 8436ccea107SManikanta Pubbisetty rxd->ppdu_start.info0, 8446ccea107SManikanta Pubbisetty __le32_to_cpu(rxd->ppdu_start.info1), 8456ccea107SManikanta Pubbisetty __le32_to_cpu(rxd->ppdu_start.info2), 8466ccea107SManikanta Pubbisetty __le32_to_cpu(rxd->ppdu_start.info3), 8476ccea107SManikanta Pubbisetty __le32_to_cpu(rxd->ppdu_start.info4)); 8486ccea107SManikanta Pubbisetty 8496ccea107SManikanta Pubbisetty ath10k_warn(ar, "msdu end %08x mpdu end %08x\n", 8506ccea107SManikanta Pubbisetty __le32_to_cpu(rxd->msdu_end.common.info0), 8516ccea107SManikanta Pubbisetty __le32_to_cpu(rxd->mpdu_end.info0)); 8526ccea107SManikanta Pubbisetty 8536ccea107SManikanta Pubbisetty ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, 8546ccea107SManikanta Pubbisetty "rx desc msdu payload: ", 8556ccea107SManikanta Pubbisetty rxd->msdu_payload, 50); 8566ccea107SManikanta Pubbisetty } 8576ccea107SManikanta Pubbisetty 85873539b40SJanusz Dziedzic status->rate_idx = mcs; 8598613c948SJohannes Berg status->nss = nss; 86073539b40SJanusz Dziedzic 86173539b40SJanusz Dziedzic if (sgi) 8627fdd69c5SJohannes Berg status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 86373539b40SJanusz Dziedzic 86491493e8eSChristian Lamparter status->bw = ath10k_bw_to_mac80211_bw(bw); 865da6a4352SJohannes Berg status->encoding = RX_ENC_VHT; 86673539b40SJanusz Dziedzic break; 86773539b40SJanusz Dziedzic default: 86873539b40SJanusz Dziedzic break; 86973539b40SJanusz Dziedzic } 87073539b40SJanusz Dziedzic } 87173539b40SJanusz Dziedzic 872500ff9f9SMichal Kazior static struct ieee80211_channel * 873500ff9f9SMichal Kazior ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd) 874500ff9f9SMichal Kazior { 875500ff9f9SMichal Kazior struct ath10k_peer *peer; 876500ff9f9SMichal Kazior struct ath10k_vif *arvif; 877500ff9f9SMichal Kazior struct cfg80211_chan_def def; 878500ff9f9SMichal Kazior u16 peer_id; 879500ff9f9SMichal Kazior 880500ff9f9SMichal Kazior lockdep_assert_held(&ar->data_lock); 881500ff9f9SMichal Kazior 882500ff9f9SMichal Kazior if (!rxd) 883500ff9f9SMichal Kazior return NULL; 884500ff9f9SMichal Kazior 885500ff9f9SMichal Kazior if (rxd->attention.flags & 886500ff9f9SMichal Kazior __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID)) 887500ff9f9SMichal Kazior return NULL; 888500ff9f9SMichal Kazior 8891f5dbfbbSPeter Oh if (!(rxd->msdu_end.common.info0 & 890500ff9f9SMichal Kazior __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU))) 891500ff9f9SMichal Kazior return NULL; 892500ff9f9SMichal Kazior 893500ff9f9SMichal Kazior peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0), 894500ff9f9SMichal Kazior RX_MPDU_START_INFO0_PEER_IDX); 895500ff9f9SMichal Kazior 896500ff9f9SMichal Kazior peer = ath10k_peer_find_by_id(ar, peer_id); 897500ff9f9SMichal Kazior if (!peer) 898500ff9f9SMichal Kazior return NULL; 899500ff9f9SMichal Kazior 900500ff9f9SMichal Kazior arvif = ath10k_get_arvif(ar, peer->vdev_id); 901500ff9f9SMichal Kazior if (WARN_ON_ONCE(!arvif)) 902500ff9f9SMichal Kazior return NULL; 903500ff9f9SMichal Kazior 904569fba2cSMohammed Shafi Shajakhan if (ath10k_mac_vif_chan(arvif->vif, &def)) 905500ff9f9SMichal Kazior return NULL; 906500ff9f9SMichal Kazior 907500ff9f9SMichal Kazior return def.chan; 908500ff9f9SMichal Kazior } 909500ff9f9SMichal Kazior 910500ff9f9SMichal Kazior static struct ieee80211_channel * 911500ff9f9SMichal Kazior ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id) 912500ff9f9SMichal Kazior { 913500ff9f9SMichal Kazior struct ath10k_vif *arvif; 914500ff9f9SMichal Kazior struct cfg80211_chan_def def; 915500ff9f9SMichal Kazior 916500ff9f9SMichal Kazior lockdep_assert_held(&ar->data_lock); 917500ff9f9SMichal Kazior 918500ff9f9SMichal Kazior list_for_each_entry(arvif, &ar->arvifs, list) { 919500ff9f9SMichal Kazior if (arvif->vdev_id == vdev_id && 920500ff9f9SMichal Kazior ath10k_mac_vif_chan(arvif->vif, &def) == 0) 921500ff9f9SMichal Kazior return def.chan; 922500ff9f9SMichal Kazior } 923500ff9f9SMichal Kazior 924500ff9f9SMichal Kazior return NULL; 925500ff9f9SMichal Kazior } 926500ff9f9SMichal Kazior 927500ff9f9SMichal Kazior static void 928500ff9f9SMichal Kazior ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw, 929500ff9f9SMichal Kazior struct ieee80211_chanctx_conf *conf, 930500ff9f9SMichal Kazior void *data) 931500ff9f9SMichal Kazior { 932500ff9f9SMichal Kazior struct cfg80211_chan_def *def = data; 933500ff9f9SMichal Kazior 934500ff9f9SMichal Kazior *def = conf->def; 935500ff9f9SMichal Kazior } 936500ff9f9SMichal Kazior 937500ff9f9SMichal Kazior static struct ieee80211_channel * 938500ff9f9SMichal Kazior ath10k_htt_rx_h_any_channel(struct ath10k *ar) 939500ff9f9SMichal Kazior { 940500ff9f9SMichal Kazior struct cfg80211_chan_def def = {}; 941500ff9f9SMichal Kazior 942500ff9f9SMichal Kazior ieee80211_iter_chan_contexts_atomic(ar->hw, 943500ff9f9SMichal Kazior ath10k_htt_rx_h_any_chan_iter, 944500ff9f9SMichal Kazior &def); 945500ff9f9SMichal Kazior 946500ff9f9SMichal Kazior return def.chan; 947500ff9f9SMichal Kazior } 948500ff9f9SMichal Kazior 94936653f05SJanusz Dziedzic static bool ath10k_htt_rx_h_channel(struct ath10k *ar, 950500ff9f9SMichal Kazior struct ieee80211_rx_status *status, 951500ff9f9SMichal Kazior struct htt_rx_desc *rxd, 952500ff9f9SMichal Kazior u32 vdev_id) 95336653f05SJanusz Dziedzic { 95436653f05SJanusz Dziedzic struct ieee80211_channel *ch; 95536653f05SJanusz Dziedzic 95636653f05SJanusz Dziedzic spin_lock_bh(&ar->data_lock); 95736653f05SJanusz Dziedzic ch = ar->scan_channel; 95836653f05SJanusz Dziedzic if (!ch) 95936653f05SJanusz Dziedzic ch = ar->rx_channel; 960500ff9f9SMichal Kazior if (!ch) 961500ff9f9SMichal Kazior ch = ath10k_htt_rx_h_peer_channel(ar, rxd); 962500ff9f9SMichal Kazior if (!ch) 963500ff9f9SMichal Kazior ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id); 964500ff9f9SMichal Kazior if (!ch) 965500ff9f9SMichal Kazior ch = ath10k_htt_rx_h_any_channel(ar); 9662ce9b25cSRajkumar Manoharan if (!ch) 9672ce9b25cSRajkumar Manoharan ch = ar->tgt_oper_chan; 96836653f05SJanusz Dziedzic spin_unlock_bh(&ar->data_lock); 96936653f05SJanusz Dziedzic 97036653f05SJanusz Dziedzic if (!ch) 97136653f05SJanusz Dziedzic return false; 97236653f05SJanusz Dziedzic 97336653f05SJanusz Dziedzic status->band = ch->band; 97436653f05SJanusz Dziedzic status->freq = ch->center_freq; 97536653f05SJanusz Dziedzic 97636653f05SJanusz Dziedzic return true; 97736653f05SJanusz Dziedzic } 97836653f05SJanusz Dziedzic 979b9fd8a84SMichal Kazior static void ath10k_htt_rx_h_signal(struct ath10k *ar, 980b9fd8a84SMichal Kazior struct ieee80211_rx_status *status, 981b9fd8a84SMichal Kazior struct htt_rx_desc *rxd) 982b9fd8a84SMichal Kazior { 9838241253dSNorik Dzhandzhapanyan int i; 9848241253dSNorik Dzhandzhapanyan 9858241253dSNorik Dzhandzhapanyan for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) { 9868241253dSNorik Dzhandzhapanyan status->chains &= ~BIT(i); 9878241253dSNorik Dzhandzhapanyan 9888241253dSNorik Dzhandzhapanyan if (rxd->ppdu_start.rssi_chains[i].pri20_mhz != 0x80) { 9898241253dSNorik Dzhandzhapanyan status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR + 9908241253dSNorik Dzhandzhapanyan rxd->ppdu_start.rssi_chains[i].pri20_mhz; 9918241253dSNorik Dzhandzhapanyan 9928241253dSNorik Dzhandzhapanyan status->chains |= BIT(i); 9938241253dSNorik Dzhandzhapanyan } 9948241253dSNorik Dzhandzhapanyan } 9958241253dSNorik Dzhandzhapanyan 996b9fd8a84SMichal Kazior /* FIXME: Get real NF */ 997b9fd8a84SMichal Kazior status->signal = ATH10K_DEFAULT_NOISE_FLOOR + 998b9fd8a84SMichal Kazior rxd->ppdu_start.rssi_comb; 999b9fd8a84SMichal Kazior status->flag &= ~RX_FLAG_NO_SIGNAL_VAL; 1000b9fd8a84SMichal Kazior } 1001b9fd8a84SMichal Kazior 1002b9fd8a84SMichal Kazior static void ath10k_htt_rx_h_mactime(struct ath10k *ar, 1003b9fd8a84SMichal Kazior struct ieee80211_rx_status *status, 1004b9fd8a84SMichal Kazior struct htt_rx_desc *rxd) 1005b9fd8a84SMichal Kazior { 1006b9fd8a84SMichal Kazior /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This 1007b9fd8a84SMichal Kazior * means all prior MSDUs in a PPDU are reported to mac80211 without the 1008b9fd8a84SMichal Kazior * TSF. Is it worth holding frames until end of PPDU is known? 1009b9fd8a84SMichal Kazior * 1010b9fd8a84SMichal Kazior * FIXME: Can we get/compute 64bit TSF? 1011b9fd8a84SMichal Kazior */ 10123ec79e3aSMichal Kazior status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp); 1013b9fd8a84SMichal Kazior status->flag |= RX_FLAG_MACTIME_END; 1014b9fd8a84SMichal Kazior } 1015b9fd8a84SMichal Kazior 1016b9fd8a84SMichal Kazior static void ath10k_htt_rx_h_ppdu(struct ath10k *ar, 1017b9fd8a84SMichal Kazior struct sk_buff_head *amsdu, 1018500ff9f9SMichal Kazior struct ieee80211_rx_status *status, 1019500ff9f9SMichal Kazior u32 vdev_id) 1020b9fd8a84SMichal Kazior { 1021b9fd8a84SMichal Kazior struct sk_buff *first; 1022b9fd8a84SMichal Kazior struct htt_rx_desc *rxd; 1023b9fd8a84SMichal Kazior bool is_first_ppdu; 1024b9fd8a84SMichal Kazior bool is_last_ppdu; 1025b9fd8a84SMichal Kazior 1026b9fd8a84SMichal Kazior if (skb_queue_empty(amsdu)) 1027b9fd8a84SMichal Kazior return; 1028b9fd8a84SMichal Kazior 1029b9fd8a84SMichal Kazior first = skb_peek(amsdu); 1030b9fd8a84SMichal Kazior rxd = (void *)first->data - sizeof(*rxd); 1031b9fd8a84SMichal Kazior 1032b9fd8a84SMichal Kazior is_first_ppdu = !!(rxd->attention.flags & 1033b9fd8a84SMichal Kazior __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU)); 1034b9fd8a84SMichal Kazior is_last_ppdu = !!(rxd->attention.flags & 1035b9fd8a84SMichal Kazior __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU)); 1036b9fd8a84SMichal Kazior 1037b9fd8a84SMichal Kazior if (is_first_ppdu) { 1038b9fd8a84SMichal Kazior /* New PPDU starts so clear out the old per-PPDU status. */ 1039b9fd8a84SMichal Kazior status->freq = 0; 1040b9fd8a84SMichal Kazior status->rate_idx = 0; 10418613c948SJohannes Berg status->nss = 0; 1042da6a4352SJohannes Berg status->encoding = RX_ENC_LEGACY; 1043da6a4352SJohannes Berg status->bw = RATE_INFO_BW_20; 104447cc0ca9SMatthias Frei 10457fdd69c5SJohannes Berg status->flag &= ~RX_FLAG_MACTIME_END; 1046b9fd8a84SMichal Kazior status->flag |= RX_FLAG_NO_SIGNAL_VAL; 1047b9fd8a84SMichal Kazior 104847cc0ca9SMatthias Frei status->flag &= ~(RX_FLAG_AMPDU_IS_LAST); 104947cc0ca9SMatthias Frei status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN; 105047cc0ca9SMatthias Frei status->ampdu_reference = ar->ampdu_reference; 105147cc0ca9SMatthias Frei 1052b9fd8a84SMichal Kazior ath10k_htt_rx_h_signal(ar, status, rxd); 1053500ff9f9SMichal Kazior ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id); 1054b9fd8a84SMichal Kazior ath10k_htt_rx_h_rates(ar, status, rxd); 1055b9fd8a84SMichal Kazior } 1056b9fd8a84SMichal Kazior 105747cc0ca9SMatthias Frei if (is_last_ppdu) { 1058b9fd8a84SMichal Kazior ath10k_htt_rx_h_mactime(ar, status, rxd); 105947cc0ca9SMatthias Frei 106047cc0ca9SMatthias Frei /* set ampdu last segment flag */ 106147cc0ca9SMatthias Frei status->flag |= RX_FLAG_AMPDU_IS_LAST; 106247cc0ca9SMatthias Frei ar->ampdu_reference++; 106347cc0ca9SMatthias Frei } 1064b9fd8a84SMichal Kazior } 1065b9fd8a84SMichal Kazior 106676f5329aSJanusz Dziedzic static const char * const tid_to_ac[] = { 106776f5329aSJanusz Dziedzic "BE", 106876f5329aSJanusz Dziedzic "BK", 106976f5329aSJanusz Dziedzic "BK", 107076f5329aSJanusz Dziedzic "BE", 107176f5329aSJanusz Dziedzic "VI", 107276f5329aSJanusz Dziedzic "VI", 107376f5329aSJanusz Dziedzic "VO", 107476f5329aSJanusz Dziedzic "VO", 107576f5329aSJanusz Dziedzic }; 107676f5329aSJanusz Dziedzic 107776f5329aSJanusz Dziedzic static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size) 107876f5329aSJanusz Dziedzic { 107976f5329aSJanusz Dziedzic u8 *qc; 108076f5329aSJanusz Dziedzic int tid; 108176f5329aSJanusz Dziedzic 108276f5329aSJanusz Dziedzic if (!ieee80211_is_data_qos(hdr->frame_control)) 108376f5329aSJanusz Dziedzic return ""; 108476f5329aSJanusz Dziedzic 108576f5329aSJanusz Dziedzic qc = ieee80211_get_qos_ctl(hdr); 108676f5329aSJanusz Dziedzic tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 108776f5329aSJanusz Dziedzic if (tid < 8) 108876f5329aSJanusz Dziedzic snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]); 108976f5329aSJanusz Dziedzic else 109076f5329aSJanusz Dziedzic snprintf(out, size, "tid %d", tid); 109176f5329aSJanusz Dziedzic 109276f5329aSJanusz Dziedzic return out; 109376f5329aSJanusz Dziedzic } 109476f5329aSJanusz Dziedzic 1095deba1b9eSRajkumar Manoharan static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar, 109685f6d7cfSJanusz Dziedzic struct ieee80211_rx_status *rx_status, 109785f6d7cfSJanusz Dziedzic struct sk_buff *skb) 109873539b40SJanusz Dziedzic { 109973539b40SJanusz Dziedzic struct ieee80211_rx_status *status; 1100deba1b9eSRajkumar Manoharan 1101deba1b9eSRajkumar Manoharan status = IEEE80211_SKB_RXCB(skb); 1102deba1b9eSRajkumar Manoharan *status = *rx_status; 1103deba1b9eSRajkumar Manoharan 110462652555SBob Copeland skb_queue_tail(&ar->htt.rx_msdus_q, skb); 1105deba1b9eSRajkumar Manoharan } 1106deba1b9eSRajkumar Manoharan 1107deba1b9eSRajkumar Manoharan static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb) 1108deba1b9eSRajkumar Manoharan { 1109deba1b9eSRajkumar Manoharan struct ieee80211_rx_status *status; 111076f5329aSJanusz Dziedzic struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 111176f5329aSJanusz Dziedzic char tid[32]; 111273539b40SJanusz Dziedzic 111385f6d7cfSJanusz Dziedzic status = IEEE80211_SKB_RXCB(skb); 111473539b40SJanusz Dziedzic 11157aa7a72aSMichal Kazior ath10k_dbg(ar, ATH10K_DBG_DATA, 11167fdd69c5SJohannes Berg "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", 111785f6d7cfSJanusz Dziedzic skb, 111885f6d7cfSJanusz Dziedzic skb->len, 111976f5329aSJanusz Dziedzic ieee80211_get_SA(hdr), 112076f5329aSJanusz Dziedzic ath10k_get_tid(hdr, tid, sizeof(tid)), 112176f5329aSJanusz Dziedzic is_multicast_ether_addr(ieee80211_get_DA(hdr)) ? 112276f5329aSJanusz Dziedzic "mcast" : "ucast", 112376f5329aSJanusz Dziedzic (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4, 1124da6a4352SJohannes Berg (status->encoding == RX_ENC_LEGACY) ? "legacy" : "", 1125da6a4352SJohannes Berg (status->encoding == RX_ENC_HT) ? "ht" : "", 1126da6a4352SJohannes Berg (status->encoding == RX_ENC_VHT) ? "vht" : "", 1127da6a4352SJohannes Berg (status->bw == RATE_INFO_BW_40) ? "40" : "", 1128da6a4352SJohannes Berg (status->bw == RATE_INFO_BW_80) ? "80" : "", 1129da6a4352SJohannes Berg (status->bw == RATE_INFO_BW_160) ? "160" : "", 11307fdd69c5SJohannes Berg status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", 113173539b40SJanusz Dziedzic status->rate_idx, 11328613c948SJohannes Berg status->nss, 113373539b40SJanusz Dziedzic status->freq, 113487326c97SJanusz Dziedzic status->band, status->flag, 113578433f96SJanusz Dziedzic !!(status->flag & RX_FLAG_FAILED_FCS_CRC), 113676f5329aSJanusz Dziedzic !!(status->flag & RX_FLAG_MMIC_ERROR), 113776f5329aSJanusz Dziedzic !!(status->flag & RX_FLAG_AMSDU_MORE)); 11387aa7a72aSMichal Kazior ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ", 113985f6d7cfSJanusz Dziedzic skb->data, skb->len); 11405ce8e7fdSRajkumar Manoharan trace_ath10k_rx_hdr(ar, skb->data, skb->len); 11415ce8e7fdSRajkumar Manoharan trace_ath10k_rx_payload(ar, skb->data, skb->len); 114273539b40SJanusz Dziedzic 11433c97f5deSRajkumar Manoharan ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi); 114473539b40SJanusz Dziedzic } 114573539b40SJanusz Dziedzic 114648f4ca34SMichal Kazior static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar, 114748f4ca34SMichal Kazior struct ieee80211_hdr *hdr) 1148d960c369SMichal Kazior { 114948f4ca34SMichal Kazior int len = ieee80211_hdrlen(hdr->frame_control); 115048f4ca34SMichal Kazior 115148f4ca34SMichal Kazior if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING, 1152c4cdf753SKalle Valo ar->running_fw->fw_file.fw_features)) 115348f4ca34SMichal Kazior len = round_up(len, 4); 115448f4ca34SMichal Kazior 115548f4ca34SMichal Kazior return len; 1156d960c369SMichal Kazior } 1157d960c369SMichal Kazior 1158581c25f8SMichal Kazior static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar, 1159581c25f8SMichal Kazior struct sk_buff *msdu, 1160581c25f8SMichal Kazior struct ieee80211_rx_status *status, 1161581c25f8SMichal Kazior enum htt_rx_mpdu_encrypt_type enctype, 1162581c25f8SMichal Kazior bool is_decrypted) 11635e3dd157SKalle Valo { 1164f6dc2095SMichal Kazior struct ieee80211_hdr *hdr; 1165581c25f8SMichal Kazior struct htt_rx_desc *rxd; 1166581c25f8SMichal Kazior size_t hdr_len; 1167581c25f8SMichal Kazior size_t crypto_len; 1168581c25f8SMichal Kazior bool is_first; 1169581c25f8SMichal Kazior bool is_last; 11705e3dd157SKalle Valo 1171581c25f8SMichal Kazior rxd = (void *)msdu->data - sizeof(*rxd); 11721f5dbfbbSPeter Oh is_first = !!(rxd->msdu_end.common.info0 & 1173581c25f8SMichal Kazior __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); 11741f5dbfbbSPeter Oh is_last = !!(rxd->msdu_end.common.info0 & 1175581c25f8SMichal Kazior __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); 11769aa505d2SMichal Kazior 1177581c25f8SMichal Kazior /* Delivered decapped frame: 1178581c25f8SMichal Kazior * [802.11 header] 1179581c25f8SMichal Kazior * [crypto param] <-- can be trimmed if !fcs_err && 1180581c25f8SMichal Kazior * !decrypt_err && !peer_idx_invalid 1181581c25f8SMichal Kazior * [amsdu header] <-- only if A-MSDU 1182581c25f8SMichal Kazior * [rfc1042/llc] 1183581c25f8SMichal Kazior * [payload] 1184581c25f8SMichal Kazior * [FCS] <-- at end, needs to be trimmed 1185581c25f8SMichal Kazior */ 11865e3dd157SKalle Valo 1187581c25f8SMichal Kazior /* This probably shouldn't happen but warn just in case */ 1188581c25f8SMichal Kazior if (unlikely(WARN_ON_ONCE(!is_first))) 1189581c25f8SMichal Kazior return; 1190581c25f8SMichal Kazior 1191581c25f8SMichal Kazior /* This probably shouldn't happen but warn just in case */ 1192581c25f8SMichal Kazior if (unlikely(WARN_ON_ONCE(!(is_first && is_last)))) 1193581c25f8SMichal Kazior return; 1194581c25f8SMichal Kazior 1195581c25f8SMichal Kazior skb_trim(msdu, msdu->len - FCS_LEN); 1196581c25f8SMichal Kazior 1197581c25f8SMichal Kazior /* In most cases this will be true for sniffed frames. It makes sense 1198ccec9038SDavid Liu * to deliver them as-is without stripping the crypto param. This is 1199ccec9038SDavid Liu * necessary for software based decryption. 1200581c25f8SMichal Kazior * 1201581c25f8SMichal Kazior * If there's no error then the frame is decrypted. At least that is 1202581c25f8SMichal Kazior * the case for frames that come in via fragmented rx indication. 1203581c25f8SMichal Kazior */ 1204581c25f8SMichal Kazior if (!is_decrypted) 1205581c25f8SMichal Kazior return; 1206581c25f8SMichal Kazior 1207581c25f8SMichal Kazior /* The payload is decrypted so strip crypto params. Start from tail 1208581c25f8SMichal Kazior * since hdr is used to compute some stuff. 1209581c25f8SMichal Kazior */ 1210581c25f8SMichal Kazior 1211581c25f8SMichal Kazior hdr = (void *)msdu->data; 1212581c25f8SMichal Kazior 1213581c25f8SMichal Kazior /* Tail */ 12147eccb738SVasanthakumar Thiagarajan if (status->flag & RX_FLAG_IV_STRIPPED) { 121560549cabSGrzegorz Bajorski skb_trim(msdu, msdu->len - 1216307aeb31SVasanthakumar Thiagarajan ath10k_htt_rx_crypto_mic_len(ar, enctype)); 1217307aeb31SVasanthakumar Thiagarajan 1218307aeb31SVasanthakumar Thiagarajan skb_trim(msdu, msdu->len - 1219307aeb31SVasanthakumar Thiagarajan ath10k_htt_rx_crypto_icv_len(ar, enctype)); 12207eccb738SVasanthakumar Thiagarajan } else { 12217eccb738SVasanthakumar Thiagarajan /* MIC */ 1222307aeb31SVasanthakumar Thiagarajan if (status->flag & RX_FLAG_MIC_STRIPPED) 1223307aeb31SVasanthakumar Thiagarajan skb_trim(msdu, msdu->len - 1224307aeb31SVasanthakumar Thiagarajan ath10k_htt_rx_crypto_mic_len(ar, enctype)); 12257eccb738SVasanthakumar Thiagarajan 12267eccb738SVasanthakumar Thiagarajan /* ICV */ 1227307aeb31SVasanthakumar Thiagarajan if (status->flag & RX_FLAG_ICV_STRIPPED) 12287eccb738SVasanthakumar Thiagarajan skb_trim(msdu, msdu->len - 1229307aeb31SVasanthakumar Thiagarajan ath10k_htt_rx_crypto_icv_len(ar, enctype)); 12307eccb738SVasanthakumar Thiagarajan } 1231581c25f8SMichal Kazior 1232581c25f8SMichal Kazior /* MMIC */ 123360549cabSGrzegorz Bajorski if ((status->flag & RX_FLAG_MMIC_STRIPPED) && 123460549cabSGrzegorz Bajorski !ieee80211_has_morefrags(hdr->frame_control) && 1235581c25f8SMichal Kazior enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) 1236307aeb31SVasanthakumar Thiagarajan skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN); 1237581c25f8SMichal Kazior 1238581c25f8SMichal Kazior /* Head */ 123960549cabSGrzegorz Bajorski if (status->flag & RX_FLAG_IV_STRIPPED) { 1240f6dc2095SMichal Kazior hdr_len = ieee80211_hdrlen(hdr->frame_control); 1241581c25f8SMichal Kazior crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); 12425e3dd157SKalle Valo 1243581c25f8SMichal Kazior memmove((void *)msdu->data + crypto_len, 1244581c25f8SMichal Kazior (void *)msdu->data, hdr_len); 1245581c25f8SMichal Kazior skb_pull(msdu, crypto_len); 12465e3dd157SKalle Valo } 124760549cabSGrzegorz Bajorski } 12485e3dd157SKalle Valo 1249581c25f8SMichal Kazior static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar, 1250581c25f8SMichal Kazior struct sk_buff *msdu, 1251581c25f8SMichal Kazior struct ieee80211_rx_status *status, 12527eccb738SVasanthakumar Thiagarajan const u8 first_hdr[64], 12537eccb738SVasanthakumar Thiagarajan enum htt_rx_mpdu_encrypt_type enctype) 1254581c25f8SMichal Kazior { 1255581c25f8SMichal Kazior struct ieee80211_hdr *hdr; 12569e19e132SVasanthakumar Thiagarajan struct htt_rx_desc *rxd; 1257581c25f8SMichal Kazior size_t hdr_len; 1258581c25f8SMichal Kazior u8 da[ETH_ALEN]; 1259581c25f8SMichal Kazior u8 sa[ETH_ALEN]; 12609e19e132SVasanthakumar Thiagarajan int l3_pad_bytes; 12617eccb738SVasanthakumar Thiagarajan int bytes_aligned = ar->hw_params.decap_align_bytes; 1262581c25f8SMichal Kazior 1263581c25f8SMichal Kazior /* Delivered decapped frame: 1264581c25f8SMichal Kazior * [nwifi 802.11 header] <-- replaced with 802.11 hdr 1265581c25f8SMichal Kazior * [rfc1042/llc] 1266581c25f8SMichal Kazior * 1267581c25f8SMichal Kazior * Note: The nwifi header doesn't have QoS Control and is 1268581c25f8SMichal Kazior * (always?) a 3addr frame. 1269581c25f8SMichal Kazior * 1270581c25f8SMichal Kazior * Note2: There's no A-MSDU subframe header. Even if it's part 1271581c25f8SMichal Kazior * of an A-MSDU. 1272581c25f8SMichal Kazior */ 1273581c25f8SMichal Kazior 127472bdeb86SMichal Kazior /* pull decapped header and copy SA & DA */ 12759e19e132SVasanthakumar Thiagarajan rxd = (void *)msdu->data - sizeof(*rxd); 12769e19e132SVasanthakumar Thiagarajan 12779e19e132SVasanthakumar Thiagarajan l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd); 12789e19e132SVasanthakumar Thiagarajan skb_put(msdu, l3_pad_bytes); 12799e19e132SVasanthakumar Thiagarajan 12809e19e132SVasanthakumar Thiagarajan hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes); 1281b8d55fcaSYanbo Li 128248f4ca34SMichal Kazior hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr); 1283b25f32cbSKalle Valo ether_addr_copy(da, ieee80211_get_DA(hdr)); 1284b25f32cbSKalle Valo ether_addr_copy(sa, ieee80211_get_SA(hdr)); 1285581c25f8SMichal Kazior skb_pull(msdu, hdr_len); 1286784f69d3SMichal Kazior 1287784f69d3SMichal Kazior /* push original 802.11 header */ 1288581c25f8SMichal Kazior hdr = (struct ieee80211_hdr *)first_hdr; 1289784f69d3SMichal Kazior hdr_len = ieee80211_hdrlen(hdr->frame_control); 12907eccb738SVasanthakumar Thiagarajan 12917eccb738SVasanthakumar Thiagarajan if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 12927eccb738SVasanthakumar Thiagarajan memcpy(skb_push(msdu, 12937eccb738SVasanthakumar Thiagarajan ath10k_htt_rx_crypto_param_len(ar, enctype)), 12947eccb738SVasanthakumar Thiagarajan (void *)hdr + round_up(hdr_len, bytes_aligned), 12957eccb738SVasanthakumar Thiagarajan ath10k_htt_rx_crypto_param_len(ar, enctype)); 12967eccb738SVasanthakumar Thiagarajan } 12977eccb738SVasanthakumar Thiagarajan 1298581c25f8SMichal Kazior memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1299784f69d3SMichal Kazior 130072bdeb86SMichal Kazior /* original 802.11 header has a different DA and in 130172bdeb86SMichal Kazior * case of 4addr it may also have different SA 130272bdeb86SMichal Kazior */ 1303581c25f8SMichal Kazior hdr = (struct ieee80211_hdr *)msdu->data; 1304b25f32cbSKalle Valo ether_addr_copy(ieee80211_get_DA(hdr), da); 1305b25f32cbSKalle Valo ether_addr_copy(ieee80211_get_SA(hdr), sa); 13065e3dd157SKalle Valo } 13075e3dd157SKalle Valo 1308581c25f8SMichal Kazior static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar, 1309581c25f8SMichal Kazior struct sk_buff *msdu, 1310581c25f8SMichal Kazior enum htt_rx_mpdu_encrypt_type enctype) 13115e3dd157SKalle Valo { 13125e3dd157SKalle Valo struct ieee80211_hdr *hdr; 1313581c25f8SMichal Kazior struct htt_rx_desc *rxd; 1314581c25f8SMichal Kazior size_t hdr_len, crypto_len; 1315e3fbf8d2SMichal Kazior void *rfc1042; 1316581c25f8SMichal Kazior bool is_first, is_last, is_amsdu; 13172f38c3c0SVasanthakumar Thiagarajan int bytes_aligned = ar->hw_params.decap_align_bytes; 13185e3dd157SKalle Valo 1319581c25f8SMichal Kazior rxd = (void *)msdu->data - sizeof(*rxd); 1320581c25f8SMichal Kazior hdr = (void *)rxd->rx_hdr_status; 13215e3dd157SKalle Valo 13221f5dbfbbSPeter Oh is_first = !!(rxd->msdu_end.common.info0 & 1323581c25f8SMichal Kazior __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); 13241f5dbfbbSPeter Oh is_last = !!(rxd->msdu_end.common.info0 & 1325581c25f8SMichal Kazior __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); 1326581c25f8SMichal Kazior is_amsdu = !(is_first && is_last); 1327e3fbf8d2SMichal Kazior 1328e3fbf8d2SMichal Kazior rfc1042 = hdr; 1329e3fbf8d2SMichal Kazior 1330581c25f8SMichal Kazior if (is_first) { 1331581c25f8SMichal Kazior hdr_len = ieee80211_hdrlen(hdr->frame_control); 1332581c25f8SMichal Kazior crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); 1333e3fbf8d2SMichal Kazior 13342f38c3c0SVasanthakumar Thiagarajan rfc1042 += round_up(hdr_len, bytes_aligned) + 13352f38c3c0SVasanthakumar Thiagarajan round_up(crypto_len, bytes_aligned); 13365e3dd157SKalle Valo } 13375e3dd157SKalle Valo 1338581c25f8SMichal Kazior if (is_amsdu) 1339581c25f8SMichal Kazior rfc1042 += sizeof(struct amsdu_subframe_hdr); 1340f6dc2095SMichal Kazior 1341581c25f8SMichal Kazior return rfc1042; 1342581c25f8SMichal Kazior } 1343581c25f8SMichal Kazior 1344581c25f8SMichal Kazior static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar, 1345581c25f8SMichal Kazior struct sk_buff *msdu, 1346581c25f8SMichal Kazior struct ieee80211_rx_status *status, 1347581c25f8SMichal Kazior const u8 first_hdr[64], 1348581c25f8SMichal Kazior enum htt_rx_mpdu_encrypt_type enctype) 1349581c25f8SMichal Kazior { 1350581c25f8SMichal Kazior struct ieee80211_hdr *hdr; 1351581c25f8SMichal Kazior struct ethhdr *eth; 1352581c25f8SMichal Kazior size_t hdr_len; 1353581c25f8SMichal Kazior void *rfc1042; 1354581c25f8SMichal Kazior u8 da[ETH_ALEN]; 1355581c25f8SMichal Kazior u8 sa[ETH_ALEN]; 13569e19e132SVasanthakumar Thiagarajan int l3_pad_bytes; 13579e19e132SVasanthakumar Thiagarajan struct htt_rx_desc *rxd; 13587eccb738SVasanthakumar Thiagarajan int bytes_aligned = ar->hw_params.decap_align_bytes; 1359581c25f8SMichal Kazior 1360581c25f8SMichal Kazior /* Delivered decapped frame: 1361581c25f8SMichal Kazior * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc 1362581c25f8SMichal Kazior * [payload] 1363581c25f8SMichal Kazior */ 1364581c25f8SMichal Kazior 1365581c25f8SMichal Kazior rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype); 1366581c25f8SMichal Kazior if (WARN_ON_ONCE(!rfc1042)) 1367581c25f8SMichal Kazior return; 1368581c25f8SMichal Kazior 13699e19e132SVasanthakumar Thiagarajan rxd = (void *)msdu->data - sizeof(*rxd); 13709e19e132SVasanthakumar Thiagarajan l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd); 13719e19e132SVasanthakumar Thiagarajan skb_put(msdu, l3_pad_bytes); 13729e19e132SVasanthakumar Thiagarajan skb_pull(msdu, l3_pad_bytes); 13739e19e132SVasanthakumar Thiagarajan 1374581c25f8SMichal Kazior /* pull decapped header and copy SA & DA */ 1375581c25f8SMichal Kazior eth = (struct ethhdr *)msdu->data; 1376581c25f8SMichal Kazior ether_addr_copy(da, eth->h_dest); 1377581c25f8SMichal Kazior ether_addr_copy(sa, eth->h_source); 1378581c25f8SMichal Kazior skb_pull(msdu, sizeof(struct ethhdr)); 1379581c25f8SMichal Kazior 1380581c25f8SMichal Kazior /* push rfc1042/llc/snap */ 1381581c25f8SMichal Kazior memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042, 1382581c25f8SMichal Kazior sizeof(struct rfc1042_hdr)); 1383581c25f8SMichal Kazior 1384581c25f8SMichal Kazior /* push original 802.11 header */ 1385581c25f8SMichal Kazior hdr = (struct ieee80211_hdr *)first_hdr; 1386581c25f8SMichal Kazior hdr_len = ieee80211_hdrlen(hdr->frame_control); 13877eccb738SVasanthakumar Thiagarajan 13887eccb738SVasanthakumar Thiagarajan if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 13897eccb738SVasanthakumar Thiagarajan memcpy(skb_push(msdu, 13907eccb738SVasanthakumar Thiagarajan ath10k_htt_rx_crypto_param_len(ar, enctype)), 13917eccb738SVasanthakumar Thiagarajan (void *)hdr + round_up(hdr_len, bytes_aligned), 13927eccb738SVasanthakumar Thiagarajan ath10k_htt_rx_crypto_param_len(ar, enctype)); 13937eccb738SVasanthakumar Thiagarajan } 13947eccb738SVasanthakumar Thiagarajan 1395581c25f8SMichal Kazior memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1396581c25f8SMichal Kazior 1397581c25f8SMichal Kazior /* original 802.11 header has a different DA and in 1398581c25f8SMichal Kazior * case of 4addr it may also have different SA 1399581c25f8SMichal Kazior */ 1400581c25f8SMichal Kazior hdr = (struct ieee80211_hdr *)msdu->data; 1401581c25f8SMichal Kazior ether_addr_copy(ieee80211_get_DA(hdr), da); 1402581c25f8SMichal Kazior ether_addr_copy(ieee80211_get_SA(hdr), sa); 1403581c25f8SMichal Kazior } 1404581c25f8SMichal Kazior 1405581c25f8SMichal Kazior static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar, 1406581c25f8SMichal Kazior struct sk_buff *msdu, 1407581c25f8SMichal Kazior struct ieee80211_rx_status *status, 14087eccb738SVasanthakumar Thiagarajan const u8 first_hdr[64], 14097eccb738SVasanthakumar Thiagarajan enum htt_rx_mpdu_encrypt_type enctype) 1410581c25f8SMichal Kazior { 1411581c25f8SMichal Kazior struct ieee80211_hdr *hdr; 1412581c25f8SMichal Kazior size_t hdr_len; 14139e19e132SVasanthakumar Thiagarajan int l3_pad_bytes; 14149e19e132SVasanthakumar Thiagarajan struct htt_rx_desc *rxd; 14157eccb738SVasanthakumar Thiagarajan int bytes_aligned = ar->hw_params.decap_align_bytes; 1416581c25f8SMichal Kazior 1417581c25f8SMichal Kazior /* Delivered decapped frame: 1418581c25f8SMichal Kazior * [amsdu header] <-- replaced with 802.11 hdr 1419581c25f8SMichal Kazior * [rfc1042/llc] 1420581c25f8SMichal Kazior * [payload] 1421581c25f8SMichal Kazior */ 1422581c25f8SMichal Kazior 14239e19e132SVasanthakumar Thiagarajan rxd = (void *)msdu->data - sizeof(*rxd); 14249e19e132SVasanthakumar Thiagarajan l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd); 14259e19e132SVasanthakumar Thiagarajan 14269e19e132SVasanthakumar Thiagarajan skb_put(msdu, l3_pad_bytes); 14279e19e132SVasanthakumar Thiagarajan skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes); 1428581c25f8SMichal Kazior 1429581c25f8SMichal Kazior hdr = (struct ieee80211_hdr *)first_hdr; 1430581c25f8SMichal Kazior hdr_len = ieee80211_hdrlen(hdr->frame_control); 14317eccb738SVasanthakumar Thiagarajan 14327eccb738SVasanthakumar Thiagarajan if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 14337eccb738SVasanthakumar Thiagarajan memcpy(skb_push(msdu, 14347eccb738SVasanthakumar Thiagarajan ath10k_htt_rx_crypto_param_len(ar, enctype)), 14357eccb738SVasanthakumar Thiagarajan (void *)hdr + round_up(hdr_len, bytes_aligned), 14367eccb738SVasanthakumar Thiagarajan ath10k_htt_rx_crypto_param_len(ar, enctype)); 14377eccb738SVasanthakumar Thiagarajan } 14387eccb738SVasanthakumar Thiagarajan 1439581c25f8SMichal Kazior memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1440581c25f8SMichal Kazior } 1441581c25f8SMichal Kazior 1442581c25f8SMichal Kazior static void ath10k_htt_rx_h_undecap(struct ath10k *ar, 1443581c25f8SMichal Kazior struct sk_buff *msdu, 1444581c25f8SMichal Kazior struct ieee80211_rx_status *status, 1445581c25f8SMichal Kazior u8 first_hdr[64], 1446581c25f8SMichal Kazior enum htt_rx_mpdu_encrypt_type enctype, 1447581c25f8SMichal Kazior bool is_decrypted) 1448581c25f8SMichal Kazior { 1449581c25f8SMichal Kazior struct htt_rx_desc *rxd; 1450581c25f8SMichal Kazior enum rx_msdu_decap_format decap; 1451581c25f8SMichal Kazior 1452581c25f8SMichal Kazior /* First msdu's decapped header: 1453581c25f8SMichal Kazior * [802.11 header] <-- padded to 4 bytes long 1454581c25f8SMichal Kazior * [crypto param] <-- padded to 4 bytes long 1455581c25f8SMichal Kazior * [amsdu header] <-- only if A-MSDU 1456581c25f8SMichal Kazior * [rfc1042/llc] 1457581c25f8SMichal Kazior * 1458581c25f8SMichal Kazior * Other (2nd, 3rd, ..) msdu's decapped header: 1459581c25f8SMichal Kazior * [amsdu header] <-- only if A-MSDU 1460581c25f8SMichal Kazior * [rfc1042/llc] 1461581c25f8SMichal Kazior */ 1462581c25f8SMichal Kazior 1463581c25f8SMichal Kazior rxd = (void *)msdu->data - sizeof(*rxd); 14641f5dbfbbSPeter Oh decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1), 1465581c25f8SMichal Kazior RX_MSDU_START_INFO1_DECAP_FORMAT); 1466581c25f8SMichal Kazior 1467581c25f8SMichal Kazior switch (decap) { 1468581c25f8SMichal Kazior case RX_MSDU_DECAP_RAW: 1469581c25f8SMichal Kazior ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype, 1470581c25f8SMichal Kazior is_decrypted); 1471581c25f8SMichal Kazior break; 1472581c25f8SMichal Kazior case RX_MSDU_DECAP_NATIVE_WIFI: 14737eccb738SVasanthakumar Thiagarajan ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr, 14747eccb738SVasanthakumar Thiagarajan enctype); 1475581c25f8SMichal Kazior break; 1476581c25f8SMichal Kazior case RX_MSDU_DECAP_ETHERNET2_DIX: 1477581c25f8SMichal Kazior ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype); 1478581c25f8SMichal Kazior break; 1479581c25f8SMichal Kazior case RX_MSDU_DECAP_8023_SNAP_LLC: 14807eccb738SVasanthakumar Thiagarajan ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr, 14817eccb738SVasanthakumar Thiagarajan enctype); 1482581c25f8SMichal Kazior break; 1483581c25f8SMichal Kazior } 14845e3dd157SKalle Valo } 14855e3dd157SKalle Valo 1486605f81aaSMichal Kazior static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb) 1487605f81aaSMichal Kazior { 1488605f81aaSMichal Kazior struct htt_rx_desc *rxd; 1489605f81aaSMichal Kazior u32 flags, info; 1490605f81aaSMichal Kazior bool is_ip4, is_ip6; 1491605f81aaSMichal Kazior bool is_tcp, is_udp; 1492605f81aaSMichal Kazior bool ip_csum_ok, tcpudp_csum_ok; 1493605f81aaSMichal Kazior 1494605f81aaSMichal Kazior rxd = (void *)skb->data - sizeof(*rxd); 1495605f81aaSMichal Kazior flags = __le32_to_cpu(rxd->attention.flags); 14961f5dbfbbSPeter Oh info = __le32_to_cpu(rxd->msdu_start.common.info1); 1497605f81aaSMichal Kazior 1498605f81aaSMichal Kazior is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO); 1499605f81aaSMichal Kazior is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO); 1500605f81aaSMichal Kazior is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO); 1501605f81aaSMichal Kazior is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO); 1502605f81aaSMichal Kazior ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL); 1503605f81aaSMichal Kazior tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL); 1504605f81aaSMichal Kazior 1505605f81aaSMichal Kazior if (!is_ip4 && !is_ip6) 1506605f81aaSMichal Kazior return CHECKSUM_NONE; 1507605f81aaSMichal Kazior if (!is_tcp && !is_udp) 1508605f81aaSMichal Kazior return CHECKSUM_NONE; 1509605f81aaSMichal Kazior if (!ip_csum_ok) 1510605f81aaSMichal Kazior return CHECKSUM_NONE; 1511605f81aaSMichal Kazior if (!tcpudp_csum_ok) 1512605f81aaSMichal Kazior return CHECKSUM_NONE; 1513605f81aaSMichal Kazior 1514605f81aaSMichal Kazior return CHECKSUM_UNNECESSARY; 1515605f81aaSMichal Kazior } 1516605f81aaSMichal Kazior 1517581c25f8SMichal Kazior static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu) 1518581c25f8SMichal Kazior { 1519581c25f8SMichal Kazior msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu); 1520581c25f8SMichal Kazior } 1521581c25f8SMichal Kazior 1522581c25f8SMichal Kazior static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, 1523581c25f8SMichal Kazior struct sk_buff_head *amsdu, 15247eccb738SVasanthakumar Thiagarajan struct ieee80211_rx_status *status, 1525caee728aSVasanthakumar Thiagarajan bool fill_crypt_header, 1526caee728aSVasanthakumar Thiagarajan u8 *rx_hdr, 1527caee728aSVasanthakumar Thiagarajan enum ath10k_pkt_rx_err *err) 1528581c25f8SMichal Kazior { 1529581c25f8SMichal Kazior struct sk_buff *first; 1530581c25f8SMichal Kazior struct sk_buff *last; 1531581c25f8SMichal Kazior struct sk_buff *msdu; 1532581c25f8SMichal Kazior struct htt_rx_desc *rxd; 1533581c25f8SMichal Kazior struct ieee80211_hdr *hdr; 1534581c25f8SMichal Kazior enum htt_rx_mpdu_encrypt_type enctype; 1535581c25f8SMichal Kazior u8 first_hdr[64]; 1536581c25f8SMichal Kazior u8 *qos; 1537581c25f8SMichal Kazior bool has_fcs_err; 1538581c25f8SMichal Kazior bool has_crypto_err; 1539581c25f8SMichal Kazior bool has_tkip_err; 1540581c25f8SMichal Kazior bool has_peer_idx_invalid; 1541581c25f8SMichal Kazior bool is_decrypted; 154260549cabSGrzegorz Bajorski bool is_mgmt; 1543581c25f8SMichal Kazior u32 attention; 1544581c25f8SMichal Kazior 1545581c25f8SMichal Kazior if (skb_queue_empty(amsdu)) 1546581c25f8SMichal Kazior return; 1547581c25f8SMichal Kazior 1548581c25f8SMichal Kazior first = skb_peek(amsdu); 1549581c25f8SMichal Kazior rxd = (void *)first->data - sizeof(*rxd); 1550581c25f8SMichal Kazior 155160549cabSGrzegorz Bajorski is_mgmt = !!(rxd->attention.flags & 155260549cabSGrzegorz Bajorski __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE)); 155360549cabSGrzegorz Bajorski 1554581c25f8SMichal Kazior enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), 1555581c25f8SMichal Kazior RX_MPDU_START_INFO0_ENCRYPT_TYPE); 1556581c25f8SMichal Kazior 1557581c25f8SMichal Kazior /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11 1558581c25f8SMichal Kazior * decapped header. It'll be used for undecapping of each MSDU. 1559581c25f8SMichal Kazior */ 1560581c25f8SMichal Kazior hdr = (void *)rxd->rx_hdr_status; 15617eccb738SVasanthakumar Thiagarajan memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN); 1562581c25f8SMichal Kazior 1563caee728aSVasanthakumar Thiagarajan if (rx_hdr) 1564caee728aSVasanthakumar Thiagarajan memcpy(rx_hdr, hdr, RX_HTT_HDR_STATUS_LEN); 1565caee728aSVasanthakumar Thiagarajan 1566581c25f8SMichal Kazior /* Each A-MSDU subframe will use the original header as the base and be 1567581c25f8SMichal Kazior * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl. 1568581c25f8SMichal Kazior */ 1569581c25f8SMichal Kazior hdr = (void *)first_hdr; 15707eccb738SVasanthakumar Thiagarajan 15717eccb738SVasanthakumar Thiagarajan if (ieee80211_is_data_qos(hdr->frame_control)) { 1572581c25f8SMichal Kazior qos = ieee80211_get_qos_ctl(hdr); 1573581c25f8SMichal Kazior qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; 15747eccb738SVasanthakumar Thiagarajan } 1575581c25f8SMichal Kazior 1576581c25f8SMichal Kazior /* Some attention flags are valid only in the last MSDU. */ 1577581c25f8SMichal Kazior last = skb_peek_tail(amsdu); 1578581c25f8SMichal Kazior rxd = (void *)last->data - sizeof(*rxd); 1579581c25f8SMichal Kazior attention = __le32_to_cpu(rxd->attention.flags); 1580581c25f8SMichal Kazior 1581581c25f8SMichal Kazior has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR); 1582581c25f8SMichal Kazior has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR); 1583581c25f8SMichal Kazior has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR); 1584581c25f8SMichal Kazior has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID); 1585581c25f8SMichal Kazior 1586581c25f8SMichal Kazior /* Note: If hardware captures an encrypted frame that it can't decrypt, 1587581c25f8SMichal Kazior * e.g. due to fcs error, missing peer or invalid key data it will 1588581c25f8SMichal Kazior * report the frame as raw. 1589581c25f8SMichal Kazior */ 1590581c25f8SMichal Kazior is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE && 1591581c25f8SMichal Kazior !has_fcs_err && 1592581c25f8SMichal Kazior !has_crypto_err && 1593581c25f8SMichal Kazior !has_peer_idx_invalid); 1594581c25f8SMichal Kazior 1595581c25f8SMichal Kazior /* Clear per-MPDU flags while leaving per-PPDU flags intact. */ 1596581c25f8SMichal Kazior status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | 1597581c25f8SMichal Kazior RX_FLAG_MMIC_ERROR | 1598581c25f8SMichal Kazior RX_FLAG_DECRYPTED | 1599581c25f8SMichal Kazior RX_FLAG_IV_STRIPPED | 160060549cabSGrzegorz Bajorski RX_FLAG_ONLY_MONITOR | 1601581c25f8SMichal Kazior RX_FLAG_MMIC_STRIPPED); 1602581c25f8SMichal Kazior 1603581c25f8SMichal Kazior if (has_fcs_err) 1604581c25f8SMichal Kazior status->flag |= RX_FLAG_FAILED_FCS_CRC; 1605581c25f8SMichal Kazior 1606581c25f8SMichal Kazior if (has_tkip_err) 1607581c25f8SMichal Kazior status->flag |= RX_FLAG_MMIC_ERROR; 1608581c25f8SMichal Kazior 1609caee728aSVasanthakumar Thiagarajan if (err) { 1610caee728aSVasanthakumar Thiagarajan if (has_fcs_err) 1611caee728aSVasanthakumar Thiagarajan *err = ATH10K_PKT_RX_ERR_FCS; 1612caee728aSVasanthakumar Thiagarajan else if (has_tkip_err) 1613caee728aSVasanthakumar Thiagarajan *err = ATH10K_PKT_RX_ERR_TKIP; 1614caee728aSVasanthakumar Thiagarajan else if (has_crypto_err) 1615caee728aSVasanthakumar Thiagarajan *err = ATH10K_PKT_RX_ERR_CRYPT; 1616caee728aSVasanthakumar Thiagarajan else if (has_peer_idx_invalid) 1617caee728aSVasanthakumar Thiagarajan *err = ATH10K_PKT_RX_ERR_PEER_IDX_INVAL; 1618caee728aSVasanthakumar Thiagarajan } 1619caee728aSVasanthakumar Thiagarajan 162060549cabSGrzegorz Bajorski /* Firmware reports all necessary management frames via WMI already. 162160549cabSGrzegorz Bajorski * They are not reported to monitor interfaces at all so pass the ones 162260549cabSGrzegorz Bajorski * coming via HTT to monitor interfaces instead. This simplifies 162360549cabSGrzegorz Bajorski * matters a lot. 162460549cabSGrzegorz Bajorski */ 162560549cabSGrzegorz Bajorski if (is_mgmt) 162660549cabSGrzegorz Bajorski status->flag |= RX_FLAG_ONLY_MONITOR; 162760549cabSGrzegorz Bajorski 162860549cabSGrzegorz Bajorski if (is_decrypted) { 162960549cabSGrzegorz Bajorski status->flag |= RX_FLAG_DECRYPTED; 163060549cabSGrzegorz Bajorski 163160549cabSGrzegorz Bajorski if (likely(!is_mgmt)) 16327eccb738SVasanthakumar Thiagarajan status->flag |= RX_FLAG_MMIC_STRIPPED; 16337eccb738SVasanthakumar Thiagarajan 16347eccb738SVasanthakumar Thiagarajan if (fill_crypt_header) 16357eccb738SVasanthakumar Thiagarajan status->flag |= RX_FLAG_MIC_STRIPPED | 16367eccb738SVasanthakumar Thiagarajan RX_FLAG_ICV_STRIPPED; 16377eccb738SVasanthakumar Thiagarajan else 16387eccb738SVasanthakumar Thiagarajan status->flag |= RX_FLAG_IV_STRIPPED; 163960549cabSGrzegorz Bajorski } 1640581c25f8SMichal Kazior 1641581c25f8SMichal Kazior skb_queue_walk(amsdu, msdu) { 1642581c25f8SMichal Kazior ath10k_htt_rx_h_csum_offload(msdu); 1643581c25f8SMichal Kazior ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype, 1644581c25f8SMichal Kazior is_decrypted); 1645581c25f8SMichal Kazior 1646581c25f8SMichal Kazior /* Undecapping involves copying the original 802.11 header back 1647581c25f8SMichal Kazior * to sk_buff. If frame is protected and hardware has decrypted 1648581c25f8SMichal Kazior * it then remove the protected bit. 1649581c25f8SMichal Kazior */ 1650581c25f8SMichal Kazior if (!is_decrypted) 1651581c25f8SMichal Kazior continue; 165260549cabSGrzegorz Bajorski if (is_mgmt) 165360549cabSGrzegorz Bajorski continue; 1654581c25f8SMichal Kazior 16557eccb738SVasanthakumar Thiagarajan if (fill_crypt_header) 16567eccb738SVasanthakumar Thiagarajan continue; 16577eccb738SVasanthakumar Thiagarajan 1658581c25f8SMichal Kazior hdr = (void *)msdu->data; 1659581c25f8SMichal Kazior hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 1660581c25f8SMichal Kazior } 1661581c25f8SMichal Kazior } 1662581c25f8SMichal Kazior 1663deba1b9eSRajkumar Manoharan static void ath10k_htt_rx_h_enqueue(struct ath10k *ar, 1664581c25f8SMichal Kazior struct sk_buff_head *amsdu, 1665581c25f8SMichal Kazior struct ieee80211_rx_status *status) 1666581c25f8SMichal Kazior { 1667581c25f8SMichal Kazior struct sk_buff *msdu; 16687eccb738SVasanthakumar Thiagarajan struct sk_buff *first_subframe; 16697eccb738SVasanthakumar Thiagarajan 16707eccb738SVasanthakumar Thiagarajan first_subframe = skb_peek(amsdu); 1671581c25f8SMichal Kazior 1672581c25f8SMichal Kazior while ((msdu = __skb_dequeue(amsdu))) { 1673581c25f8SMichal Kazior /* Setup per-MSDU flags */ 1674581c25f8SMichal Kazior if (skb_queue_empty(amsdu)) 1675581c25f8SMichal Kazior status->flag &= ~RX_FLAG_AMSDU_MORE; 1676581c25f8SMichal Kazior else 1677581c25f8SMichal Kazior status->flag |= RX_FLAG_AMSDU_MORE; 1678581c25f8SMichal Kazior 16797eccb738SVasanthakumar Thiagarajan if (msdu == first_subframe) { 16807eccb738SVasanthakumar Thiagarajan first_subframe = NULL; 16817eccb738SVasanthakumar Thiagarajan status->flag &= ~RX_FLAG_ALLOW_SAME_PN; 16827eccb738SVasanthakumar Thiagarajan } else { 16837eccb738SVasanthakumar Thiagarajan status->flag |= RX_FLAG_ALLOW_SAME_PN; 16847eccb738SVasanthakumar Thiagarajan } 16857eccb738SVasanthakumar Thiagarajan 1686deba1b9eSRajkumar Manoharan ath10k_htt_rx_h_queue_msdu(ar, status, msdu); 1687581c25f8SMichal Kazior } 1688581c25f8SMichal Kazior } 1689581c25f8SMichal Kazior 1690caee728aSVasanthakumar Thiagarajan static int ath10k_unchain_msdu(struct sk_buff_head *amsdu, 1691caee728aSVasanthakumar Thiagarajan unsigned long int *unchain_cnt) 1692bfa35368SBen Greear { 16939aa505d2SMichal Kazior struct sk_buff *skb, *first; 1694bfa35368SBen Greear int space; 1695bfa35368SBen Greear int total_len = 0; 1696caee728aSVasanthakumar Thiagarajan int amsdu_len = skb_queue_len(amsdu); 1697bfa35368SBen Greear 1698bfa35368SBen Greear /* TODO: Might could optimize this by using 1699bfa35368SBen Greear * skb_try_coalesce or similar method to 1700bfa35368SBen Greear * decrease copying, or maybe get mac80211 to 1701bfa35368SBen Greear * provide a way to just receive a list of 1702bfa35368SBen Greear * skb? 1703bfa35368SBen Greear */ 1704bfa35368SBen Greear 17059aa505d2SMichal Kazior first = __skb_dequeue(amsdu); 1706bfa35368SBen Greear 1707bfa35368SBen Greear /* Allocate total length all at once. */ 17089aa505d2SMichal Kazior skb_queue_walk(amsdu, skb) 17099aa505d2SMichal Kazior total_len += skb->len; 1710bfa35368SBen Greear 17119aa505d2SMichal Kazior space = total_len - skb_tailroom(first); 1712bfa35368SBen Greear if ((space > 0) && 17139aa505d2SMichal Kazior (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) { 1714bfa35368SBen Greear /* TODO: bump some rx-oom error stat */ 1715bfa35368SBen Greear /* put it back together so we can free the 1716bfa35368SBen Greear * whole list at once. 1717bfa35368SBen Greear */ 17189aa505d2SMichal Kazior __skb_queue_head(amsdu, first); 1719bfa35368SBen Greear return -1; 1720bfa35368SBen Greear } 1721bfa35368SBen Greear 1722bfa35368SBen Greear /* Walk list again, copying contents into 1723bfa35368SBen Greear * msdu_head 1724bfa35368SBen Greear */ 17259aa505d2SMichal Kazior while ((skb = __skb_dequeue(amsdu))) { 17269aa505d2SMichal Kazior skb_copy_from_linear_data(skb, skb_put(first, skb->len), 17279aa505d2SMichal Kazior skb->len); 17289aa505d2SMichal Kazior dev_kfree_skb_any(skb); 1729bfa35368SBen Greear } 1730bfa35368SBen Greear 17319aa505d2SMichal Kazior __skb_queue_head(amsdu, first); 1732caee728aSVasanthakumar Thiagarajan 1733caee728aSVasanthakumar Thiagarajan *unchain_cnt += amsdu_len - 1; 1734caee728aSVasanthakumar Thiagarajan 1735bfa35368SBen Greear return 0; 1736bfa35368SBen Greear } 1737bfa35368SBen Greear 1738581c25f8SMichal Kazior static void ath10k_htt_rx_h_unchain(struct ath10k *ar, 1739caee728aSVasanthakumar Thiagarajan struct sk_buff_head *amsdu, 1740caee728aSVasanthakumar Thiagarajan unsigned long int *drop_cnt, 1741caee728aSVasanthakumar Thiagarajan unsigned long int *unchain_cnt) 17422acc4eb2SJanusz Dziedzic { 1743581c25f8SMichal Kazior struct sk_buff *first; 1744581c25f8SMichal Kazior struct htt_rx_desc *rxd; 1745581c25f8SMichal Kazior enum rx_msdu_decap_format decap; 17467aa7a72aSMichal Kazior 1747581c25f8SMichal Kazior first = skb_peek(amsdu); 1748581c25f8SMichal Kazior rxd = (void *)first->data - sizeof(*rxd); 17491f5dbfbbSPeter Oh decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1), 1750581c25f8SMichal Kazior RX_MSDU_START_INFO1_DECAP_FORMAT); 1751581c25f8SMichal Kazior 1752581c25f8SMichal Kazior /* FIXME: Current unchaining logic can only handle simple case of raw 1753581c25f8SMichal Kazior * msdu chaining. If decapping is other than raw the chaining may be 1754581c25f8SMichal Kazior * more complex and this isn't handled by the current code. Don't even 1755581c25f8SMichal Kazior * try re-constructing such frames - it'll be pretty much garbage. 1756581c25f8SMichal Kazior */ 1757581c25f8SMichal Kazior if (decap != RX_MSDU_DECAP_RAW || 1758581c25f8SMichal Kazior skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) { 1759caee728aSVasanthakumar Thiagarajan *drop_cnt += skb_queue_len(amsdu); 1760581c25f8SMichal Kazior __skb_queue_purge(amsdu); 1761581c25f8SMichal Kazior return; 1762581c25f8SMichal Kazior } 1763581c25f8SMichal Kazior 1764caee728aSVasanthakumar Thiagarajan ath10k_unchain_msdu(amsdu, unchain_cnt); 1765581c25f8SMichal Kazior } 1766581c25f8SMichal Kazior 1767581c25f8SMichal Kazior static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar, 1768581c25f8SMichal Kazior struct sk_buff_head *amsdu, 1769581c25f8SMichal Kazior struct ieee80211_rx_status *rx_status) 1770581c25f8SMichal Kazior { 1771581c25f8SMichal Kazior /* FIXME: It might be a good idea to do some fuzzy-testing to drop 1772581c25f8SMichal Kazior * invalid/dangerous frames. 1773581c25f8SMichal Kazior */ 1774581c25f8SMichal Kazior 1775581c25f8SMichal Kazior if (!rx_status->freq) { 1776984eb905SGabriel Craciunescu ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n"); 17772acc4eb2SJanusz Dziedzic return false; 17782acc4eb2SJanusz Dziedzic } 17792acc4eb2SJanusz Dziedzic 1780581c25f8SMichal Kazior if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) { 1781581c25f8SMichal Kazior ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n"); 17822acc4eb2SJanusz Dziedzic return false; 17832acc4eb2SJanusz Dziedzic } 17842acc4eb2SJanusz Dziedzic 17852acc4eb2SJanusz Dziedzic return true; 17862acc4eb2SJanusz Dziedzic } 17872acc4eb2SJanusz Dziedzic 1788581c25f8SMichal Kazior static void ath10k_htt_rx_h_filter(struct ath10k *ar, 1789581c25f8SMichal Kazior struct sk_buff_head *amsdu, 1790caee728aSVasanthakumar Thiagarajan struct ieee80211_rx_status *rx_status, 1791caee728aSVasanthakumar Thiagarajan unsigned long int *drop_cnt) 1792581c25f8SMichal Kazior { 1793581c25f8SMichal Kazior if (skb_queue_empty(amsdu)) 1794581c25f8SMichal Kazior return; 1795581c25f8SMichal Kazior 1796581c25f8SMichal Kazior if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status)) 1797581c25f8SMichal Kazior return; 1798581c25f8SMichal Kazior 1799caee728aSVasanthakumar Thiagarajan if (drop_cnt) 1800caee728aSVasanthakumar Thiagarajan *drop_cnt += skb_queue_len(amsdu); 1801caee728aSVasanthakumar Thiagarajan 1802581c25f8SMichal Kazior __skb_queue_purge(amsdu); 1803581c25f8SMichal Kazior } 1804581c25f8SMichal Kazior 180518235664SRajkumar Manoharan static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt) 180618235664SRajkumar Manoharan { 180718235664SRajkumar Manoharan struct ath10k *ar = htt->ar; 1808237e15dfSAshok Raj Nagarajan struct ieee80211_rx_status *rx_status = &htt->rx_status; 180918235664SRajkumar Manoharan struct sk_buff_head amsdu; 1810deba1b9eSRajkumar Manoharan int ret; 1811caee728aSVasanthakumar Thiagarajan unsigned long int drop_cnt = 0; 1812caee728aSVasanthakumar Thiagarajan unsigned long int unchain_cnt = 0; 1813caee728aSVasanthakumar Thiagarajan unsigned long int drop_cnt_filter = 0; 1814caee728aSVasanthakumar Thiagarajan unsigned long int msdus_to_queue, num_msdus; 1815caee728aSVasanthakumar Thiagarajan enum ath10k_pkt_rx_err err = ATH10K_PKT_RX_ERR_MAX; 1816caee728aSVasanthakumar Thiagarajan u8 first_hdr[RX_HTT_HDR_STATUS_LEN]; 181718235664SRajkumar Manoharan 181818235664SRajkumar Manoharan __skb_queue_head_init(&amsdu); 181918235664SRajkumar Manoharan 182018235664SRajkumar Manoharan spin_lock_bh(&htt->rx_ring.lock); 182118235664SRajkumar Manoharan if (htt->rx_confused) { 182218235664SRajkumar Manoharan spin_unlock_bh(&htt->rx_ring.lock); 182318235664SRajkumar Manoharan return -EIO; 182418235664SRajkumar Manoharan } 182518235664SRajkumar Manoharan ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu); 182618235664SRajkumar Manoharan spin_unlock_bh(&htt->rx_ring.lock); 182718235664SRajkumar Manoharan 182818235664SRajkumar Manoharan if (ret < 0) { 182918235664SRajkumar Manoharan ath10k_warn(ar, "rx ring became corrupted: %d\n", ret); 183018235664SRajkumar Manoharan __skb_queue_purge(&amsdu); 183118235664SRajkumar Manoharan /* FIXME: It's probably a good idea to reboot the 183218235664SRajkumar Manoharan * device instead of leaving it inoperable. 183318235664SRajkumar Manoharan */ 183418235664SRajkumar Manoharan htt->rx_confused = true; 183518235664SRajkumar Manoharan return ret; 183618235664SRajkumar Manoharan } 183718235664SRajkumar Manoharan 1838caee728aSVasanthakumar Thiagarajan num_msdus = skb_queue_len(&amsdu); 1839caee728aSVasanthakumar Thiagarajan 1840237e15dfSAshok Raj Nagarajan ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff); 18417543d116SMohammed Shafi Shajakhan 18427543d116SMohammed Shafi Shajakhan /* only for ret = 1 indicates chained msdus */ 18437543d116SMohammed Shafi Shajakhan if (ret > 0) 1844caee728aSVasanthakumar Thiagarajan ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt); 18457543d116SMohammed Shafi Shajakhan 1846caee728aSVasanthakumar Thiagarajan ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter); 1847caee728aSVasanthakumar Thiagarajan ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err); 1848caee728aSVasanthakumar Thiagarajan msdus_to_queue = skb_queue_len(&amsdu); 1849deba1b9eSRajkumar Manoharan ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status); 185018235664SRajkumar Manoharan 1851caee728aSVasanthakumar Thiagarajan ath10k_sta_update_rx_tid_stats(ar, first_hdr, num_msdus, err, 1852caee728aSVasanthakumar Thiagarajan unchain_cnt, drop_cnt, drop_cnt_filter, 1853caee728aSVasanthakumar Thiagarajan msdus_to_queue); 1854caee728aSVasanthakumar Thiagarajan 1855deba1b9eSRajkumar Manoharan return 0; 185618235664SRajkumar Manoharan } 185718235664SRajkumar Manoharan 1858f88d4934SErik Stromdahl static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt, 1859f88d4934SErik Stromdahl struct htt_rx_indication_hl *rx, 1860f88d4934SErik Stromdahl struct sk_buff *skb) 1861f88d4934SErik Stromdahl { 1862f88d4934SErik Stromdahl struct ath10k *ar = htt->ar; 1863f88d4934SErik Stromdahl struct ath10k_peer *peer; 1864f88d4934SErik Stromdahl struct htt_rx_indication_mpdu_range *mpdu_ranges; 1865f88d4934SErik Stromdahl struct fw_rx_desc_hl *fw_desc; 1866f88d4934SErik Stromdahl struct ieee80211_hdr *hdr; 1867f88d4934SErik Stromdahl struct ieee80211_rx_status *rx_status; 1868f88d4934SErik Stromdahl u16 peer_id; 1869f88d4934SErik Stromdahl u8 rx_desc_len; 1870f88d4934SErik Stromdahl int num_mpdu_ranges; 1871f88d4934SErik Stromdahl size_t tot_hdr_len; 1872f88d4934SErik Stromdahl struct ieee80211_channel *ch; 1873f88d4934SErik Stromdahl 1874f88d4934SErik Stromdahl peer_id = __le16_to_cpu(rx->hdr.peer_id); 1875f88d4934SErik Stromdahl 1876f88d4934SErik Stromdahl spin_lock_bh(&ar->data_lock); 1877f88d4934SErik Stromdahl peer = ath10k_peer_find_by_id(ar, peer_id); 1878f88d4934SErik Stromdahl spin_unlock_bh(&ar->data_lock); 1879f88d4934SErik Stromdahl if (!peer) 1880f88d4934SErik Stromdahl ath10k_warn(ar, "Got RX ind from invalid peer: %u\n", peer_id); 1881f88d4934SErik Stromdahl 1882f88d4934SErik Stromdahl num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), 1883f88d4934SErik Stromdahl HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); 1884f88d4934SErik Stromdahl mpdu_ranges = htt_rx_ind_get_mpdu_ranges_hl(rx); 1885f88d4934SErik Stromdahl fw_desc = &rx->fw_desc; 1886f88d4934SErik Stromdahl rx_desc_len = fw_desc->len; 1887f88d4934SErik Stromdahl 1888f88d4934SErik Stromdahl /* I have not yet seen any case where num_mpdu_ranges > 1. 1889f88d4934SErik Stromdahl * qcacld does not seem handle that case either, so we introduce the 1890f88d4934SErik Stromdahl * same limitiation here as well. 1891f88d4934SErik Stromdahl */ 1892f88d4934SErik Stromdahl if (num_mpdu_ranges > 1) 1893f88d4934SErik Stromdahl ath10k_warn(ar, 1894f88d4934SErik Stromdahl "Unsupported number of MPDU ranges: %d, ignoring all but the first\n", 1895f88d4934SErik Stromdahl num_mpdu_ranges); 1896f88d4934SErik Stromdahl 1897f88d4934SErik Stromdahl if (mpdu_ranges->mpdu_range_status != 1898f88d4934SErik Stromdahl HTT_RX_IND_MPDU_STATUS_OK) { 1899f88d4934SErik Stromdahl ath10k_warn(ar, "MPDU range status: %d\n", 1900f88d4934SErik Stromdahl mpdu_ranges->mpdu_range_status); 1901f88d4934SErik Stromdahl goto err; 1902f88d4934SErik Stromdahl } 1903f88d4934SErik Stromdahl 1904f88d4934SErik Stromdahl /* Strip off all headers before the MAC header before delivery to 1905f88d4934SErik Stromdahl * mac80211 1906f88d4934SErik Stromdahl */ 1907f88d4934SErik Stromdahl tot_hdr_len = sizeof(struct htt_resp_hdr) + sizeof(rx->hdr) + 1908f88d4934SErik Stromdahl sizeof(rx->ppdu) + sizeof(rx->prefix) + 1909f88d4934SErik Stromdahl sizeof(rx->fw_desc) + 1910f88d4934SErik Stromdahl sizeof(*mpdu_ranges) * num_mpdu_ranges + rx_desc_len; 1911f88d4934SErik Stromdahl skb_pull(skb, tot_hdr_len); 1912f88d4934SErik Stromdahl 1913f88d4934SErik Stromdahl hdr = (struct ieee80211_hdr *)skb->data; 1914f88d4934SErik Stromdahl rx_status = IEEE80211_SKB_RXCB(skb); 1915f88d4934SErik Stromdahl rx_status->chains |= BIT(0); 1916f88d4934SErik Stromdahl rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR + 1917f88d4934SErik Stromdahl rx->ppdu.combined_rssi; 1918f88d4934SErik Stromdahl rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL; 1919f88d4934SErik Stromdahl 1920f88d4934SErik Stromdahl spin_lock_bh(&ar->data_lock); 1921f88d4934SErik Stromdahl ch = ar->scan_channel; 1922f88d4934SErik Stromdahl if (!ch) 1923f88d4934SErik Stromdahl ch = ar->rx_channel; 1924f88d4934SErik Stromdahl if (!ch) 1925f88d4934SErik Stromdahl ch = ath10k_htt_rx_h_any_channel(ar); 1926f88d4934SErik Stromdahl if (!ch) 1927f88d4934SErik Stromdahl ch = ar->tgt_oper_chan; 1928f88d4934SErik Stromdahl spin_unlock_bh(&ar->data_lock); 1929f88d4934SErik Stromdahl 1930f88d4934SErik Stromdahl if (ch) { 1931f88d4934SErik Stromdahl rx_status->band = ch->band; 1932f88d4934SErik Stromdahl rx_status->freq = ch->center_freq; 1933f88d4934SErik Stromdahl } 1934f88d4934SErik Stromdahl if (rx->fw_desc.flags & FW_RX_DESC_FLAGS_LAST_MSDU) 1935f88d4934SErik Stromdahl rx_status->flag &= ~RX_FLAG_AMSDU_MORE; 1936f88d4934SErik Stromdahl else 1937f88d4934SErik Stromdahl rx_status->flag |= RX_FLAG_AMSDU_MORE; 1938f88d4934SErik Stromdahl 1939f88d4934SErik Stromdahl /* Not entirely sure about this, but all frames from the chipset has 1940f88d4934SErik Stromdahl * the protected flag set even though they have already been decrypted. 1941f88d4934SErik Stromdahl * Unmasking this flag is necessary in order for mac80211 not to drop 1942f88d4934SErik Stromdahl * the frame. 1943f88d4934SErik Stromdahl * TODO: Verify this is always the case or find out a way to check 1944f88d4934SErik Stromdahl * if there has been hw decryption. 1945f88d4934SErik Stromdahl */ 1946f88d4934SErik Stromdahl if (ieee80211_has_protected(hdr->frame_control)) { 1947f88d4934SErik Stromdahl hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 1948f88d4934SErik Stromdahl rx_status->flag |= RX_FLAG_DECRYPTED | 1949f88d4934SErik Stromdahl RX_FLAG_IV_STRIPPED | 1950f88d4934SErik Stromdahl RX_FLAG_MMIC_STRIPPED; 1951f88d4934SErik Stromdahl } 1952f88d4934SErik Stromdahl 1953f88d4934SErik Stromdahl ieee80211_rx_ni(ar->hw, skb); 1954f88d4934SErik Stromdahl 1955f88d4934SErik Stromdahl /* We have delivered the skb to the upper layers (mac80211) so we 1956f88d4934SErik Stromdahl * must not free it. 1957f88d4934SErik Stromdahl */ 1958f88d4934SErik Stromdahl return false; 1959f88d4934SErik Stromdahl err: 1960f88d4934SErik Stromdahl /* Tell the caller that it must free the skb since we have not 1961f88d4934SErik Stromdahl * consumed it 1962f88d4934SErik Stromdahl */ 1963f88d4934SErik Stromdahl return true; 1964f88d4934SErik Stromdahl } 1965f88d4934SErik Stromdahl 1966f88d4934SErik Stromdahl static void ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt *htt, 19675e3dd157SKalle Valo struct htt_rx_indication *rx) 19685e3dd157SKalle Valo { 19697aa7a72aSMichal Kazior struct ath10k *ar = htt->ar; 19705e3dd157SKalle Valo struct htt_rx_indication_mpdu_range *mpdu_ranges; 19715e3dd157SKalle Valo int num_mpdu_ranges; 197218235664SRajkumar Manoharan int i, mpdu_count = 0; 1973caee728aSVasanthakumar Thiagarajan u16 peer_id; 1974caee728aSVasanthakumar Thiagarajan u8 tid; 19755e3dd157SKalle Valo 19765e3dd157SKalle Valo num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), 19775e3dd157SKalle Valo HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); 1978caee728aSVasanthakumar Thiagarajan peer_id = __le16_to_cpu(rx->hdr.peer_id); 1979caee728aSVasanthakumar Thiagarajan tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID); 1980caee728aSVasanthakumar Thiagarajan 19815e3dd157SKalle Valo mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx); 19825e3dd157SKalle Valo 19837aa7a72aSMichal Kazior ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ", 19845e3dd157SKalle Valo rx, sizeof(*rx) + 19855e3dd157SKalle Valo (sizeof(struct htt_rx_indication_mpdu_range) * 19865e3dd157SKalle Valo num_mpdu_ranges)); 19875e3dd157SKalle Valo 1988d540690dSMichal Kazior for (i = 0; i < num_mpdu_ranges; i++) 1989d540690dSMichal Kazior mpdu_count += mpdu_ranges[i].mpdu_count; 1990d540690dSMichal Kazior 19913128b3d8SRajkumar Manoharan atomic_add(mpdu_count, &htt->num_mpdus_ready); 1992caee728aSVasanthakumar Thiagarajan 1993caee728aSVasanthakumar Thiagarajan ath10k_sta_update_rx_tid_stats_ampdu(ar, peer_id, tid, mpdu_ranges, 1994caee728aSVasanthakumar Thiagarajan num_mpdu_ranges); 19955e3dd157SKalle Valo } 19965e3dd157SKalle Valo 199759465fe4SRajkumar Manoharan static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar, 19986c5151a9SMichal Kazior struct sk_buff *skb) 19996c5151a9SMichal Kazior { 20006c5151a9SMichal Kazior struct ath10k_htt *htt = &ar->htt; 20016c5151a9SMichal Kazior struct htt_resp *resp = (struct htt_resp *)skb->data; 20026c5151a9SMichal Kazior struct htt_tx_done tx_done = {}; 20036c5151a9SMichal Kazior int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS); 2004c7fd8d23SBalaji Pothunoori __le16 msdu_id, *msdus; 2005c7fd8d23SBalaji Pothunoori bool rssi_enabled = false; 2006c7fd8d23SBalaji Pothunoori u8 msdu_count = 0; 20076c5151a9SMichal Kazior int i; 20086c5151a9SMichal Kazior 20096c5151a9SMichal Kazior switch (status) { 20106c5151a9SMichal Kazior case HTT_DATA_TX_STATUS_NO_ACK: 201159465fe4SRajkumar Manoharan tx_done.status = HTT_TX_COMPL_STATE_NOACK; 20126c5151a9SMichal Kazior break; 20136c5151a9SMichal Kazior case HTT_DATA_TX_STATUS_OK: 201459465fe4SRajkumar Manoharan tx_done.status = HTT_TX_COMPL_STATE_ACK; 20156c5151a9SMichal Kazior break; 20166c5151a9SMichal Kazior case HTT_DATA_TX_STATUS_DISCARD: 20176c5151a9SMichal Kazior case HTT_DATA_TX_STATUS_POSTPONE: 20186c5151a9SMichal Kazior case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL: 201959465fe4SRajkumar Manoharan tx_done.status = HTT_TX_COMPL_STATE_DISCARD; 20206c5151a9SMichal Kazior break; 20216c5151a9SMichal Kazior default: 20227aa7a72aSMichal Kazior ath10k_warn(ar, "unhandled tx completion status %d\n", status); 202359465fe4SRajkumar Manoharan tx_done.status = HTT_TX_COMPL_STATE_DISCARD; 20246c5151a9SMichal Kazior break; 20256c5151a9SMichal Kazior } 20266c5151a9SMichal Kazior 20277aa7a72aSMichal Kazior ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n", 20286c5151a9SMichal Kazior resp->data_tx_completion.num_msdus); 20296c5151a9SMichal Kazior 2030c7fd8d23SBalaji Pothunoori msdu_count = resp->data_tx_completion.num_msdus; 2031c7fd8d23SBalaji Pothunoori 2032c7fd8d23SBalaji Pothunoori if (resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_DATA_RSSI) 2033c7fd8d23SBalaji Pothunoori rssi_enabled = true; 2034c7fd8d23SBalaji Pothunoori 2035c7fd8d23SBalaji Pothunoori for (i = 0; i < msdu_count; i++) { 2036c7fd8d23SBalaji Pothunoori msdus = resp->data_tx_completion.msdus; 2037c7fd8d23SBalaji Pothunoori msdu_id = msdus[i]; 20386c5151a9SMichal Kazior tx_done.msdu_id = __le16_to_cpu(msdu_id); 203959465fe4SRajkumar Manoharan 2040c7fd8d23SBalaji Pothunoori if (rssi_enabled) { 2041c7fd8d23SBalaji Pothunoori /* Total no of MSDUs should be even, 2042c7fd8d23SBalaji Pothunoori * if odd MSDUs are sent firmware fills 2043c7fd8d23SBalaji Pothunoori * last msdu id with 0xffff 2044c7fd8d23SBalaji Pothunoori */ 2045c7fd8d23SBalaji Pothunoori if (msdu_count & 0x01) { 2046c7fd8d23SBalaji Pothunoori msdu_id = msdus[msdu_count + i + 1]; 2047c7fd8d23SBalaji Pothunoori tx_done.ack_rssi = __le16_to_cpu(msdu_id); 2048c7fd8d23SBalaji Pothunoori } else { 2049c7fd8d23SBalaji Pothunoori msdu_id = msdus[msdu_count + i]; 2050c7fd8d23SBalaji Pothunoori tx_done.ack_rssi = __le16_to_cpu(msdu_id); 2051c7fd8d23SBalaji Pothunoori } 2052c7fd8d23SBalaji Pothunoori } 2053c7fd8d23SBalaji Pothunoori 205459465fe4SRajkumar Manoharan /* kfifo_put: In practice firmware shouldn't fire off per-CE 205559465fe4SRajkumar Manoharan * interrupt and main interrupt (MSI/-X range case) for the same 205659465fe4SRajkumar Manoharan * HTC service so it should be safe to use kfifo_put w/o lock. 205759465fe4SRajkumar Manoharan * 205859465fe4SRajkumar Manoharan * From kfifo_put() documentation: 205959465fe4SRajkumar Manoharan * Note that with only one concurrent reader and one concurrent 206059465fe4SRajkumar Manoharan * writer, you don't need extra locking to use these macro. 206159465fe4SRajkumar Manoharan */ 206259465fe4SRajkumar Manoharan if (!kfifo_put(&htt->txdone_fifo, tx_done)) { 206359465fe4SRajkumar Manoharan ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n", 206459465fe4SRajkumar Manoharan tx_done.msdu_id, tx_done.status); 20656c5151a9SMichal Kazior ath10k_txrx_tx_unref(htt, &tx_done); 20666c5151a9SMichal Kazior } 20676c5151a9SMichal Kazior } 206859465fe4SRajkumar Manoharan } 20696c5151a9SMichal Kazior 2070aa5b4fbcSMichal Kazior static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp) 2071aa5b4fbcSMichal Kazior { 2072aa5b4fbcSMichal Kazior struct htt_rx_addba *ev = &resp->rx_addba; 2073aa5b4fbcSMichal Kazior struct ath10k_peer *peer; 2074aa5b4fbcSMichal Kazior struct ath10k_vif *arvif; 2075aa5b4fbcSMichal Kazior u16 info0, tid, peer_id; 2076aa5b4fbcSMichal Kazior 2077aa5b4fbcSMichal Kazior info0 = __le16_to_cpu(ev->info0); 2078aa5b4fbcSMichal Kazior tid = MS(info0, HTT_RX_BA_INFO0_TID); 2079aa5b4fbcSMichal Kazior peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID); 2080aa5b4fbcSMichal Kazior 20817aa7a72aSMichal Kazior ath10k_dbg(ar, ATH10K_DBG_HTT, 2082aa5b4fbcSMichal Kazior "htt rx addba tid %hu peer_id %hu size %hhu\n", 2083aa5b4fbcSMichal Kazior tid, peer_id, ev->window_size); 2084aa5b4fbcSMichal Kazior 2085aa5b4fbcSMichal Kazior spin_lock_bh(&ar->data_lock); 2086aa5b4fbcSMichal Kazior peer = ath10k_peer_find_by_id(ar, peer_id); 2087aa5b4fbcSMichal Kazior if (!peer) { 20887aa7a72aSMichal Kazior ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n", 2089aa5b4fbcSMichal Kazior peer_id); 2090aa5b4fbcSMichal Kazior spin_unlock_bh(&ar->data_lock); 2091aa5b4fbcSMichal Kazior return; 2092aa5b4fbcSMichal Kazior } 2093aa5b4fbcSMichal Kazior 2094aa5b4fbcSMichal Kazior arvif = ath10k_get_arvif(ar, peer->vdev_id); 2095aa5b4fbcSMichal Kazior if (!arvif) { 20967aa7a72aSMichal Kazior ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n", 2097aa5b4fbcSMichal Kazior peer->vdev_id); 2098aa5b4fbcSMichal Kazior spin_unlock_bh(&ar->data_lock); 2099aa5b4fbcSMichal Kazior return; 2100aa5b4fbcSMichal Kazior } 2101aa5b4fbcSMichal Kazior 21027aa7a72aSMichal Kazior ath10k_dbg(ar, ATH10K_DBG_HTT, 2103aa5b4fbcSMichal Kazior "htt rx start rx ba session sta %pM tid %hu size %hhu\n", 2104aa5b4fbcSMichal Kazior peer->addr, tid, ev->window_size); 2105aa5b4fbcSMichal Kazior 2106aa5b4fbcSMichal Kazior ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid); 2107aa5b4fbcSMichal Kazior spin_unlock_bh(&ar->data_lock); 2108aa5b4fbcSMichal Kazior } 2109aa5b4fbcSMichal Kazior 2110aa5b4fbcSMichal Kazior static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp) 2111aa5b4fbcSMichal Kazior { 2112aa5b4fbcSMichal Kazior struct htt_rx_delba *ev = &resp->rx_delba; 2113aa5b4fbcSMichal Kazior struct ath10k_peer *peer; 2114aa5b4fbcSMichal Kazior struct ath10k_vif *arvif; 2115aa5b4fbcSMichal Kazior u16 info0, tid, peer_id; 2116aa5b4fbcSMichal Kazior 2117aa5b4fbcSMichal Kazior info0 = __le16_to_cpu(ev->info0); 2118aa5b4fbcSMichal Kazior tid = MS(info0, HTT_RX_BA_INFO0_TID); 2119aa5b4fbcSMichal Kazior peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID); 2120aa5b4fbcSMichal Kazior 21217aa7a72aSMichal Kazior ath10k_dbg(ar, ATH10K_DBG_HTT, 2122aa5b4fbcSMichal Kazior "htt rx delba tid %hu peer_id %hu\n", 2123aa5b4fbcSMichal Kazior tid, peer_id); 2124aa5b4fbcSMichal Kazior 2125aa5b4fbcSMichal Kazior spin_lock_bh(&ar->data_lock); 2126aa5b4fbcSMichal Kazior peer = ath10k_peer_find_by_id(ar, peer_id); 2127aa5b4fbcSMichal Kazior if (!peer) { 21287aa7a72aSMichal Kazior ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n", 2129aa5b4fbcSMichal Kazior peer_id); 2130aa5b4fbcSMichal Kazior spin_unlock_bh(&ar->data_lock); 2131aa5b4fbcSMichal Kazior return; 2132aa5b4fbcSMichal Kazior } 2133aa5b4fbcSMichal Kazior 2134aa5b4fbcSMichal Kazior arvif = ath10k_get_arvif(ar, peer->vdev_id); 2135aa5b4fbcSMichal Kazior if (!arvif) { 21367aa7a72aSMichal Kazior ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n", 2137aa5b4fbcSMichal Kazior peer->vdev_id); 2138aa5b4fbcSMichal Kazior spin_unlock_bh(&ar->data_lock); 2139aa5b4fbcSMichal Kazior return; 2140aa5b4fbcSMichal Kazior } 2141aa5b4fbcSMichal Kazior 21427aa7a72aSMichal Kazior ath10k_dbg(ar, ATH10K_DBG_HTT, 2143aa5b4fbcSMichal Kazior "htt rx stop rx ba session sta %pM tid %hu\n", 2144aa5b4fbcSMichal Kazior peer->addr, tid); 2145aa5b4fbcSMichal Kazior 2146aa5b4fbcSMichal Kazior ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid); 2147aa5b4fbcSMichal Kazior spin_unlock_bh(&ar->data_lock); 2148aa5b4fbcSMichal Kazior } 2149aa5b4fbcSMichal Kazior 2150c545070eSMichal Kazior static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list, 2151e48e9c42SKalle Valo struct sk_buff_head *amsdu) 2152c545070eSMichal Kazior { 2153c545070eSMichal Kazior struct sk_buff *msdu; 2154c545070eSMichal Kazior struct htt_rx_desc *rxd; 2155c545070eSMichal Kazior 2156c545070eSMichal Kazior if (skb_queue_empty(list)) 2157c545070eSMichal Kazior return -ENOBUFS; 2158c545070eSMichal Kazior 2159c545070eSMichal Kazior if (WARN_ON(!skb_queue_empty(amsdu))) 2160c545070eSMichal Kazior return -EINVAL; 2161c545070eSMichal Kazior 2162e48e9c42SKalle Valo while ((msdu = __skb_dequeue(list))) { 2163c545070eSMichal Kazior __skb_queue_tail(amsdu, msdu); 2164c545070eSMichal Kazior 2165c545070eSMichal Kazior rxd = (void *)msdu->data - sizeof(*rxd); 21661f5dbfbbSPeter Oh if (rxd->msdu_end.common.info0 & 2167c545070eSMichal Kazior __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)) 2168c545070eSMichal Kazior break; 2169c545070eSMichal Kazior } 2170c545070eSMichal Kazior 2171c545070eSMichal Kazior msdu = skb_peek_tail(amsdu); 2172c545070eSMichal Kazior rxd = (void *)msdu->data - sizeof(*rxd); 21731f5dbfbbSPeter Oh if (!(rxd->msdu_end.common.info0 & 2174c545070eSMichal Kazior __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) { 2175c545070eSMichal Kazior skb_queue_splice_init(amsdu, list); 2176c545070eSMichal Kazior return -EAGAIN; 2177c545070eSMichal Kazior } 2178c545070eSMichal Kazior 2179c545070eSMichal Kazior return 0; 2180c545070eSMichal Kazior } 2181c545070eSMichal Kazior 2182c545070eSMichal Kazior static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status, 2183c545070eSMichal Kazior struct sk_buff *skb) 2184c545070eSMichal Kazior { 2185c545070eSMichal Kazior struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2186c545070eSMichal Kazior 2187c545070eSMichal Kazior if (!ieee80211_has_protected(hdr->frame_control)) 2188c545070eSMichal Kazior return; 2189c545070eSMichal Kazior 2190c545070eSMichal Kazior /* Offloaded frames are already decrypted but firmware insists they are 2191c545070eSMichal Kazior * protected in the 802.11 header. Strip the flag. Otherwise mac80211 2192c545070eSMichal Kazior * will drop the frame. 2193c545070eSMichal Kazior */ 2194c545070eSMichal Kazior 2195c545070eSMichal Kazior hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 2196c545070eSMichal Kazior status->flag |= RX_FLAG_DECRYPTED | 2197c545070eSMichal Kazior RX_FLAG_IV_STRIPPED | 2198c545070eSMichal Kazior RX_FLAG_MMIC_STRIPPED; 2199c545070eSMichal Kazior } 2200c545070eSMichal Kazior 2201deba1b9eSRajkumar Manoharan static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar, 2202c545070eSMichal Kazior struct sk_buff_head *list) 2203c545070eSMichal Kazior { 2204c545070eSMichal Kazior struct ath10k_htt *htt = &ar->htt; 2205c545070eSMichal Kazior struct ieee80211_rx_status *status = &htt->rx_status; 2206c545070eSMichal Kazior struct htt_rx_offload_msdu *rx; 2207c545070eSMichal Kazior struct sk_buff *msdu; 2208c545070eSMichal Kazior size_t offset; 2209c545070eSMichal Kazior 2210c545070eSMichal Kazior while ((msdu = __skb_dequeue(list))) { 2211c545070eSMichal Kazior /* Offloaded frames don't have Rx descriptor. Instead they have 2212c545070eSMichal Kazior * a short meta information header. 2213c545070eSMichal Kazior */ 2214c545070eSMichal Kazior 2215c545070eSMichal Kazior rx = (void *)msdu->data; 2216c545070eSMichal Kazior 2217c545070eSMichal Kazior skb_put(msdu, sizeof(*rx)); 2218c545070eSMichal Kazior skb_pull(msdu, sizeof(*rx)); 2219c545070eSMichal Kazior 2220c545070eSMichal Kazior if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) { 2221c545070eSMichal Kazior ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n"); 2222c545070eSMichal Kazior dev_kfree_skb_any(msdu); 2223c545070eSMichal Kazior continue; 2224c545070eSMichal Kazior } 2225c545070eSMichal Kazior 2226c545070eSMichal Kazior skb_put(msdu, __le16_to_cpu(rx->msdu_len)); 2227c545070eSMichal Kazior 2228c545070eSMichal Kazior /* Offloaded rx header length isn't multiple of 2 nor 4 so the 2229c545070eSMichal Kazior * actual payload is unaligned. Align the frame. Otherwise 2230c545070eSMichal Kazior * mac80211 complains. This shouldn't reduce performance much 2231c545070eSMichal Kazior * because these offloaded frames are rare. 2232c545070eSMichal Kazior */ 2233c545070eSMichal Kazior offset = 4 - ((unsigned long)msdu->data & 3); 2234c545070eSMichal Kazior skb_put(msdu, offset); 2235c545070eSMichal Kazior memmove(msdu->data + offset, msdu->data, msdu->len); 2236c545070eSMichal Kazior skb_pull(msdu, offset); 2237c545070eSMichal Kazior 2238c545070eSMichal Kazior /* FIXME: The frame is NWifi. Re-construct QoS Control 2239c545070eSMichal Kazior * if possible later. 2240c545070eSMichal Kazior */ 2241c545070eSMichal Kazior 2242c545070eSMichal Kazior memset(status, 0, sizeof(*status)); 2243c545070eSMichal Kazior status->flag |= RX_FLAG_NO_SIGNAL_VAL; 2244c545070eSMichal Kazior 2245c545070eSMichal Kazior ath10k_htt_rx_h_rx_offload_prot(status, msdu); 2246500ff9f9SMichal Kazior ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id); 2247deba1b9eSRajkumar Manoharan ath10k_htt_rx_h_queue_msdu(ar, status, msdu); 2248c545070eSMichal Kazior } 2249c545070eSMichal Kazior } 2250c545070eSMichal Kazior 2251e48e9c42SKalle Valo static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) 2252c545070eSMichal Kazior { 2253c545070eSMichal Kazior struct ath10k_htt *htt = &ar->htt; 2254c545070eSMichal Kazior struct htt_resp *resp = (void *)skb->data; 2255c545070eSMichal Kazior struct ieee80211_rx_status *status = &htt->rx_status; 2256c545070eSMichal Kazior struct sk_buff_head list; 2257c545070eSMichal Kazior struct sk_buff_head amsdu; 2258c545070eSMichal Kazior u16 peer_id; 2259c545070eSMichal Kazior u16 msdu_count; 2260c545070eSMichal Kazior u8 vdev_id; 2261c545070eSMichal Kazior u8 tid; 2262c545070eSMichal Kazior bool offload; 2263c545070eSMichal Kazior bool frag; 2264deba1b9eSRajkumar Manoharan int ret; 2265c545070eSMichal Kazior 2266c545070eSMichal Kazior lockdep_assert_held(&htt->rx_ring.lock); 2267c545070eSMichal Kazior 2268c545070eSMichal Kazior if (htt->rx_confused) 22693c97f5deSRajkumar Manoharan return -EIO; 2270c545070eSMichal Kazior 2271c545070eSMichal Kazior skb_pull(skb, sizeof(resp->hdr)); 2272c545070eSMichal Kazior skb_pull(skb, sizeof(resp->rx_in_ord_ind)); 2273c545070eSMichal Kazior 2274c545070eSMichal Kazior peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id); 2275c545070eSMichal Kazior msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count); 2276c545070eSMichal Kazior vdev_id = resp->rx_in_ord_ind.vdev_id; 2277c545070eSMichal Kazior tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID); 2278c545070eSMichal Kazior offload = !!(resp->rx_in_ord_ind.info & 2279c545070eSMichal Kazior HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK); 2280c545070eSMichal Kazior frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK); 2281c545070eSMichal Kazior 2282c545070eSMichal Kazior ath10k_dbg(ar, ATH10K_DBG_HTT, 2283c545070eSMichal Kazior "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n", 2284c545070eSMichal Kazior vdev_id, peer_id, tid, offload, frag, msdu_count); 2285c545070eSMichal Kazior 22863b0b55b1SGovind Singh if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) { 2287c545070eSMichal Kazior ath10k_warn(ar, "dropping invalid in order rx indication\n"); 22883c97f5deSRajkumar Manoharan return -EINVAL; 2289c545070eSMichal Kazior } 2290c545070eSMichal Kazior 2291c545070eSMichal Kazior /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later 2292c545070eSMichal Kazior * extracted and processed. 2293c545070eSMichal Kazior */ 2294c545070eSMichal Kazior __skb_queue_head_init(&list); 22953b0b55b1SGovind Singh if (ar->hw_params.target_64bit) 22963b0b55b1SGovind Singh ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind, 22973b0b55b1SGovind Singh &list); 22983b0b55b1SGovind Singh else 22993b0b55b1SGovind Singh ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind, 23003b0b55b1SGovind Singh &list); 23013b0b55b1SGovind Singh 2302c545070eSMichal Kazior if (ret < 0) { 2303c545070eSMichal Kazior ath10k_warn(ar, "failed to pop paddr list: %d\n", ret); 2304c545070eSMichal Kazior htt->rx_confused = true; 23053c97f5deSRajkumar Manoharan return -EIO; 2306c545070eSMichal Kazior } 2307c545070eSMichal Kazior 2308c545070eSMichal Kazior /* Offloaded frames are very different and need to be handled 2309c545070eSMichal Kazior * separately. 2310c545070eSMichal Kazior */ 2311c545070eSMichal Kazior if (offload) 2312deba1b9eSRajkumar Manoharan ath10k_htt_rx_h_rx_offload(ar, &list); 2313c545070eSMichal Kazior 2314e48e9c42SKalle Valo while (!skb_queue_empty(&list)) { 2315c545070eSMichal Kazior __skb_queue_head_init(&amsdu); 2316e48e9c42SKalle Valo ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu); 2317c545070eSMichal Kazior switch (ret) { 2318c545070eSMichal Kazior case 0: 2319c545070eSMichal Kazior /* Note: The in-order indication may report interleaved 2320c545070eSMichal Kazior * frames from different PPDUs meaning reported rx rate 2321c545070eSMichal Kazior * to mac80211 isn't accurate/reliable. It's still 2322c545070eSMichal Kazior * better to report something than nothing though. This 2323c545070eSMichal Kazior * should still give an idea about rx rate to the user. 2324c545070eSMichal Kazior */ 2325500ff9f9SMichal Kazior ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id); 2326caee728aSVasanthakumar Thiagarajan ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL); 2327caee728aSVasanthakumar Thiagarajan ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL, 2328caee728aSVasanthakumar Thiagarajan NULL); 2329deba1b9eSRajkumar Manoharan ath10k_htt_rx_h_enqueue(ar, &amsdu, status); 2330c545070eSMichal Kazior break; 2331c545070eSMichal Kazior case -EAGAIN: 2332c545070eSMichal Kazior /* fall through */ 2333c545070eSMichal Kazior default: 2334c545070eSMichal Kazior /* Should not happen. */ 2335c545070eSMichal Kazior ath10k_warn(ar, "failed to extract amsdu: %d\n", ret); 2336c545070eSMichal Kazior htt->rx_confused = true; 2337c545070eSMichal Kazior __skb_queue_purge(&list); 23383c97f5deSRajkumar Manoharan return -EIO; 2339c545070eSMichal Kazior } 2340c545070eSMichal Kazior } 2341deba1b9eSRajkumar Manoharan return ret; 2342c545070eSMichal Kazior } 2343c545070eSMichal Kazior 2344839ae637SMichal Kazior static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar, 2345839ae637SMichal Kazior const __le32 *resp_ids, 2346839ae637SMichal Kazior int num_resp_ids) 2347839ae637SMichal Kazior { 2348839ae637SMichal Kazior int i; 2349839ae637SMichal Kazior u32 resp_id; 2350839ae637SMichal Kazior 2351839ae637SMichal Kazior ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n", 2352839ae637SMichal Kazior num_resp_ids); 2353839ae637SMichal Kazior 2354839ae637SMichal Kazior for (i = 0; i < num_resp_ids; i++) { 2355839ae637SMichal Kazior resp_id = le32_to_cpu(resp_ids[i]); 2356839ae637SMichal Kazior 2357839ae637SMichal Kazior ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n", 2358839ae637SMichal Kazior resp_id); 2359839ae637SMichal Kazior 2360839ae637SMichal Kazior /* TODO: free resp_id */ 2361839ae637SMichal Kazior } 2362839ae637SMichal Kazior } 2363839ae637SMichal Kazior 2364839ae637SMichal Kazior static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb) 2365839ae637SMichal Kazior { 2366426e10eaSMichal Kazior struct ieee80211_hw *hw = ar->hw; 2367426e10eaSMichal Kazior struct ieee80211_txq *txq; 2368839ae637SMichal Kazior struct htt_resp *resp = (struct htt_resp *)skb->data; 2369839ae637SMichal Kazior struct htt_tx_fetch_record *record; 2370839ae637SMichal Kazior size_t len; 2371839ae637SMichal Kazior size_t max_num_bytes; 2372839ae637SMichal Kazior size_t max_num_msdus; 2373426e10eaSMichal Kazior size_t num_bytes; 2374426e10eaSMichal Kazior size_t num_msdus; 2375839ae637SMichal Kazior const __le32 *resp_ids; 2376839ae637SMichal Kazior u16 num_records; 2377839ae637SMichal Kazior u16 num_resp_ids; 2378839ae637SMichal Kazior u16 peer_id; 2379839ae637SMichal Kazior u8 tid; 2380426e10eaSMichal Kazior int ret; 2381839ae637SMichal Kazior int i; 2382839ae637SMichal Kazior 2383839ae637SMichal Kazior ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n"); 2384839ae637SMichal Kazior 2385839ae637SMichal Kazior len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind); 2386839ae637SMichal Kazior if (unlikely(skb->len < len)) { 2387839ae637SMichal Kazior ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n"); 2388839ae637SMichal Kazior return; 2389839ae637SMichal Kazior } 2390839ae637SMichal Kazior 2391839ae637SMichal Kazior num_records = le16_to_cpu(resp->tx_fetch_ind.num_records); 2392839ae637SMichal Kazior num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids); 2393839ae637SMichal Kazior 2394839ae637SMichal Kazior len += sizeof(resp->tx_fetch_ind.records[0]) * num_records; 2395839ae637SMichal Kazior len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids; 2396839ae637SMichal Kazior 2397839ae637SMichal Kazior if (unlikely(skb->len < len)) { 2398839ae637SMichal Kazior ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n"); 2399839ae637SMichal Kazior return; 2400839ae637SMichal Kazior } 2401839ae637SMichal Kazior 2402839ae637SMichal Kazior ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n", 2403839ae637SMichal Kazior num_records, num_resp_ids, 2404839ae637SMichal Kazior le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num)); 2405839ae637SMichal Kazior 2406426e10eaSMichal Kazior if (!ar->htt.tx_q_state.enabled) { 2407426e10eaSMichal Kazior ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n"); 2408426e10eaSMichal Kazior return; 2409426e10eaSMichal Kazior } 2410426e10eaSMichal Kazior 2411426e10eaSMichal Kazior if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) { 2412426e10eaSMichal Kazior ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n"); 2413426e10eaSMichal Kazior return; 2414426e10eaSMichal Kazior } 2415426e10eaSMichal Kazior 2416426e10eaSMichal Kazior rcu_read_lock(); 2417839ae637SMichal Kazior 2418839ae637SMichal Kazior for (i = 0; i < num_records; i++) { 2419839ae637SMichal Kazior record = &resp->tx_fetch_ind.records[i]; 2420839ae637SMichal Kazior peer_id = MS(le16_to_cpu(record->info), 2421839ae637SMichal Kazior HTT_TX_FETCH_RECORD_INFO_PEER_ID); 2422839ae637SMichal Kazior tid = MS(le16_to_cpu(record->info), 2423839ae637SMichal Kazior HTT_TX_FETCH_RECORD_INFO_TID); 2424839ae637SMichal Kazior max_num_msdus = le16_to_cpu(record->num_msdus); 2425839ae637SMichal Kazior max_num_bytes = le32_to_cpu(record->num_bytes); 2426839ae637SMichal Kazior 2427839ae637SMichal Kazior ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n", 2428839ae637SMichal Kazior i, peer_id, tid, max_num_msdus, max_num_bytes); 2429839ae637SMichal Kazior 2430839ae637SMichal Kazior if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) || 2431839ae637SMichal Kazior unlikely(tid >= ar->htt.tx_q_state.num_tids)) { 2432839ae637SMichal Kazior ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n", 2433839ae637SMichal Kazior peer_id, tid); 2434839ae637SMichal Kazior continue; 2435839ae637SMichal Kazior } 2436839ae637SMichal Kazior 2437426e10eaSMichal Kazior spin_lock_bh(&ar->data_lock); 2438426e10eaSMichal Kazior txq = ath10k_mac_txq_lookup(ar, peer_id, tid); 2439426e10eaSMichal Kazior spin_unlock_bh(&ar->data_lock); 2440426e10eaSMichal Kazior 2441426e10eaSMichal Kazior /* It is okay to release the lock and use txq because RCU read 2442426e10eaSMichal Kazior * lock is held. 2443426e10eaSMichal Kazior */ 2444426e10eaSMichal Kazior 2445426e10eaSMichal Kazior if (unlikely(!txq)) { 2446426e10eaSMichal Kazior ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n", 2447426e10eaSMichal Kazior peer_id, tid); 2448426e10eaSMichal Kazior continue; 2449839ae637SMichal Kazior } 2450839ae637SMichal Kazior 2451426e10eaSMichal Kazior num_msdus = 0; 2452426e10eaSMichal Kazior num_bytes = 0; 2453426e10eaSMichal Kazior 2454426e10eaSMichal Kazior while (num_msdus < max_num_msdus && 2455426e10eaSMichal Kazior num_bytes < max_num_bytes) { 2456426e10eaSMichal Kazior ret = ath10k_mac_tx_push_txq(hw, txq); 2457426e10eaSMichal Kazior if (ret < 0) 2458426e10eaSMichal Kazior break; 2459426e10eaSMichal Kazior 2460426e10eaSMichal Kazior num_msdus++; 2461426e10eaSMichal Kazior num_bytes += ret; 2462426e10eaSMichal Kazior } 2463426e10eaSMichal Kazior 2464426e10eaSMichal Kazior record->num_msdus = cpu_to_le16(num_msdus); 2465426e10eaSMichal Kazior record->num_bytes = cpu_to_le32(num_bytes); 2466426e10eaSMichal Kazior 2467426e10eaSMichal Kazior ath10k_htt_tx_txq_recalc(hw, txq); 2468426e10eaSMichal Kazior } 2469426e10eaSMichal Kazior 2470426e10eaSMichal Kazior rcu_read_unlock(); 2471426e10eaSMichal Kazior 2472839ae637SMichal Kazior resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind); 2473839ae637SMichal Kazior ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids); 2474839ae637SMichal Kazior 2475426e10eaSMichal Kazior ret = ath10k_htt_tx_fetch_resp(ar, 2476426e10eaSMichal Kazior resp->tx_fetch_ind.token, 2477426e10eaSMichal Kazior resp->tx_fetch_ind.fetch_seq_num, 2478426e10eaSMichal Kazior resp->tx_fetch_ind.records, 2479426e10eaSMichal Kazior num_records); 2480426e10eaSMichal Kazior if (unlikely(ret)) { 2481426e10eaSMichal Kazior ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n", 2482426e10eaSMichal Kazior le32_to_cpu(resp->tx_fetch_ind.token), ret); 2483426e10eaSMichal Kazior /* FIXME: request fw restart */ 2484426e10eaSMichal Kazior } 2485426e10eaSMichal Kazior 2486426e10eaSMichal Kazior ath10k_htt_tx_txq_sync(ar); 2487839ae637SMichal Kazior } 2488839ae637SMichal Kazior 2489839ae637SMichal Kazior static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar, 2490839ae637SMichal Kazior struct sk_buff *skb) 2491839ae637SMichal Kazior { 2492839ae637SMichal Kazior const struct htt_resp *resp = (void *)skb->data; 2493839ae637SMichal Kazior size_t len; 2494839ae637SMichal Kazior int num_resp_ids; 2495839ae637SMichal Kazior 2496839ae637SMichal Kazior ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n"); 2497839ae637SMichal Kazior 2498839ae637SMichal Kazior len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm); 2499839ae637SMichal Kazior if (unlikely(skb->len < len)) { 2500839ae637SMichal Kazior ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n"); 2501839ae637SMichal Kazior return; 2502839ae637SMichal Kazior } 2503839ae637SMichal Kazior 2504839ae637SMichal Kazior num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids); 2505839ae637SMichal Kazior len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids; 2506839ae637SMichal Kazior 2507839ae637SMichal Kazior if (unlikely(skb->len < len)) { 2508839ae637SMichal Kazior ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n"); 2509839ae637SMichal Kazior return; 2510839ae637SMichal Kazior } 2511839ae637SMichal Kazior 2512839ae637SMichal Kazior ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, 2513839ae637SMichal Kazior resp->tx_fetch_confirm.resp_ids, 2514839ae637SMichal Kazior num_resp_ids); 2515839ae637SMichal Kazior } 2516839ae637SMichal Kazior 2517839ae637SMichal Kazior static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar, 2518839ae637SMichal Kazior struct sk_buff *skb) 2519839ae637SMichal Kazior { 2520839ae637SMichal Kazior const struct htt_resp *resp = (void *)skb->data; 2521839ae637SMichal Kazior const struct htt_tx_mode_switch_record *record; 2522426e10eaSMichal Kazior struct ieee80211_txq *txq; 2523426e10eaSMichal Kazior struct ath10k_txq *artxq; 2524839ae637SMichal Kazior size_t len; 2525839ae637SMichal Kazior size_t num_records; 2526839ae637SMichal Kazior enum htt_tx_mode_switch_mode mode; 2527839ae637SMichal Kazior bool enable; 2528839ae637SMichal Kazior u16 info0; 2529839ae637SMichal Kazior u16 info1; 2530839ae637SMichal Kazior u16 threshold; 2531839ae637SMichal Kazior u16 peer_id; 2532839ae637SMichal Kazior u8 tid; 2533839ae637SMichal Kazior int i; 2534839ae637SMichal Kazior 2535839ae637SMichal Kazior ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n"); 2536839ae637SMichal Kazior 2537839ae637SMichal Kazior len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind); 2538839ae637SMichal Kazior if (unlikely(skb->len < len)) { 2539839ae637SMichal Kazior ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n"); 2540839ae637SMichal Kazior return; 2541839ae637SMichal Kazior } 2542839ae637SMichal Kazior 2543839ae637SMichal Kazior info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0); 2544839ae637SMichal Kazior info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1); 2545839ae637SMichal Kazior 2546839ae637SMichal Kazior enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE); 2547839ae637SMichal Kazior num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD); 2548839ae637SMichal Kazior mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE); 2549839ae637SMichal Kazior threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD); 2550839ae637SMichal Kazior 2551839ae637SMichal Kazior ath10k_dbg(ar, ATH10K_DBG_HTT, 2552839ae637SMichal Kazior "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n", 2553839ae637SMichal Kazior info0, info1, enable, num_records, mode, threshold); 2554839ae637SMichal Kazior 2555839ae637SMichal Kazior len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records; 2556839ae637SMichal Kazior 2557839ae637SMichal Kazior if (unlikely(skb->len < len)) { 2558839ae637SMichal Kazior ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n"); 2559839ae637SMichal Kazior return; 2560839ae637SMichal Kazior } 2561839ae637SMichal Kazior 2562839ae637SMichal Kazior switch (mode) { 2563839ae637SMichal Kazior case HTT_TX_MODE_SWITCH_PUSH: 2564839ae637SMichal Kazior case HTT_TX_MODE_SWITCH_PUSH_PULL: 2565839ae637SMichal Kazior break; 2566839ae637SMichal Kazior default: 2567839ae637SMichal Kazior ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n", 2568839ae637SMichal Kazior mode); 2569839ae637SMichal Kazior return; 2570839ae637SMichal Kazior } 2571839ae637SMichal Kazior 2572839ae637SMichal Kazior if (!enable) 2573839ae637SMichal Kazior return; 2574839ae637SMichal Kazior 2575426e10eaSMichal Kazior ar->htt.tx_q_state.enabled = enable; 2576426e10eaSMichal Kazior ar->htt.tx_q_state.mode = mode; 2577426e10eaSMichal Kazior ar->htt.tx_q_state.num_push_allowed = threshold; 2578426e10eaSMichal Kazior 2579426e10eaSMichal Kazior rcu_read_lock(); 2580839ae637SMichal Kazior 2581839ae637SMichal Kazior for (i = 0; i < num_records; i++) { 2582839ae637SMichal Kazior record = &resp->tx_mode_switch_ind.records[i]; 2583839ae637SMichal Kazior info0 = le16_to_cpu(record->info0); 2584839ae637SMichal Kazior peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID); 2585839ae637SMichal Kazior tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID); 2586839ae637SMichal Kazior 2587839ae637SMichal Kazior if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) || 2588839ae637SMichal Kazior unlikely(tid >= ar->htt.tx_q_state.num_tids)) { 2589839ae637SMichal Kazior ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n", 2590839ae637SMichal Kazior peer_id, tid); 2591839ae637SMichal Kazior continue; 2592839ae637SMichal Kazior } 2593839ae637SMichal Kazior 2594426e10eaSMichal Kazior spin_lock_bh(&ar->data_lock); 2595426e10eaSMichal Kazior txq = ath10k_mac_txq_lookup(ar, peer_id, tid); 2596426e10eaSMichal Kazior spin_unlock_bh(&ar->data_lock); 2597426e10eaSMichal Kazior 2598426e10eaSMichal Kazior /* It is okay to release the lock and use txq because RCU read 2599426e10eaSMichal Kazior * lock is held. 2600426e10eaSMichal Kazior */ 2601426e10eaSMichal Kazior 2602426e10eaSMichal Kazior if (unlikely(!txq)) { 2603426e10eaSMichal Kazior ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n", 2604426e10eaSMichal Kazior peer_id, tid); 2605426e10eaSMichal Kazior continue; 2606839ae637SMichal Kazior } 2607839ae637SMichal Kazior 2608426e10eaSMichal Kazior spin_lock_bh(&ar->htt.tx_lock); 2609426e10eaSMichal Kazior artxq = (void *)txq->drv_priv; 2610426e10eaSMichal Kazior artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus); 2611426e10eaSMichal Kazior spin_unlock_bh(&ar->htt.tx_lock); 2612426e10eaSMichal Kazior } 2613426e10eaSMichal Kazior 2614426e10eaSMichal Kazior rcu_read_unlock(); 2615426e10eaSMichal Kazior 2616426e10eaSMichal Kazior ath10k_mac_tx_push_pending(ar); 2617839ae637SMichal Kazior } 2618839ae637SMichal Kazior 2619e3a91f87SRajkumar Manoharan void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) 2620e3a91f87SRajkumar Manoharan { 2621e3a91f87SRajkumar Manoharan bool release; 2622e3a91f87SRajkumar Manoharan 2623e3a91f87SRajkumar Manoharan release = ath10k_htt_t2h_msg_handler(ar, skb); 2624e3a91f87SRajkumar Manoharan 2625e3a91f87SRajkumar Manoharan /* Free the indication buffer */ 2626e3a91f87SRajkumar Manoharan if (release) 2627e3a91f87SRajkumar Manoharan dev_kfree_skb_any(skb); 2628e3a91f87SRajkumar Manoharan } 2629e3a91f87SRajkumar Manoharan 26300189dbd7SAnilkumar Kolli static inline int ath10k_get_legacy_rate_idx(struct ath10k *ar, u8 rate) 2631cec17c38SAnilkumar Kolli { 2632cec17c38SAnilkumar Kolli static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12, 2633cec17c38SAnilkumar Kolli 18, 24, 36, 48, 54}; 2634cec17c38SAnilkumar Kolli int i; 2635cec17c38SAnilkumar Kolli 2636cec17c38SAnilkumar Kolli for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) { 2637cec17c38SAnilkumar Kolli if (rate == legacy_rates[i]) 26380189dbd7SAnilkumar Kolli return i; 2639cec17c38SAnilkumar Kolli } 2640cec17c38SAnilkumar Kolli 26410189dbd7SAnilkumar Kolli ath10k_warn(ar, "Invalid legacy rate %hhd peer stats", rate); 26420189dbd7SAnilkumar Kolli return -EINVAL; 2643cec17c38SAnilkumar Kolli } 2644cec17c38SAnilkumar Kolli 2645cec17c38SAnilkumar Kolli static void 2646a904417fSAnilkumar Kolli ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar, 2647a904417fSAnilkumar Kolli struct ath10k_sta *arsta, 2648a904417fSAnilkumar Kolli struct ath10k_per_peer_tx_stats *pstats, 2649a904417fSAnilkumar Kolli u8 legacy_rate_idx) 2650a904417fSAnilkumar Kolli { 2651a904417fSAnilkumar Kolli struct rate_info *txrate = &arsta->txrate; 2652a904417fSAnilkumar Kolli struct ath10k_htt_tx_stats *tx_stats; 2653a904417fSAnilkumar Kolli int ht_idx, gi, mcs, bw, nss; 2654a904417fSAnilkumar Kolli 2655a904417fSAnilkumar Kolli if (!arsta->tx_stats) 2656a904417fSAnilkumar Kolli return; 2657a904417fSAnilkumar Kolli 2658a904417fSAnilkumar Kolli tx_stats = arsta->tx_stats; 2659a904417fSAnilkumar Kolli gi = (arsta->txrate.flags & RATE_INFO_FLAGS_SHORT_GI); 2660a904417fSAnilkumar Kolli ht_idx = txrate->mcs + txrate->nss * 8; 2661a904417fSAnilkumar Kolli mcs = txrate->mcs; 2662a904417fSAnilkumar Kolli bw = txrate->bw; 2663a904417fSAnilkumar Kolli nss = txrate->nss; 2664a904417fSAnilkumar Kolli 2665a904417fSAnilkumar Kolli #define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name] 2666a904417fSAnilkumar Kolli 2667a904417fSAnilkumar Kolli if (txrate->flags == RATE_INFO_FLAGS_VHT_MCS) { 2668a904417fSAnilkumar Kolli STATS_OP_FMT(SUCC).vht[0][mcs] += pstats->succ_bytes; 2669a904417fSAnilkumar Kolli STATS_OP_FMT(SUCC).vht[1][mcs] += pstats->succ_pkts; 2670a904417fSAnilkumar Kolli STATS_OP_FMT(FAIL).vht[0][mcs] += pstats->failed_bytes; 2671a904417fSAnilkumar Kolli STATS_OP_FMT(FAIL).vht[1][mcs] += pstats->failed_pkts; 2672a904417fSAnilkumar Kolli STATS_OP_FMT(RETRY).vht[0][mcs] += pstats->retry_bytes; 2673a904417fSAnilkumar Kolli STATS_OP_FMT(RETRY).vht[1][mcs] += pstats->retry_pkts; 2674a904417fSAnilkumar Kolli } else if (txrate->flags == RATE_INFO_FLAGS_MCS) { 2675a904417fSAnilkumar Kolli STATS_OP_FMT(SUCC).ht[0][ht_idx] += pstats->succ_bytes; 2676a904417fSAnilkumar Kolli STATS_OP_FMT(SUCC).ht[1][ht_idx] += pstats->succ_pkts; 2677a904417fSAnilkumar Kolli STATS_OP_FMT(FAIL).ht[0][ht_idx] += pstats->failed_bytes; 2678a904417fSAnilkumar Kolli STATS_OP_FMT(FAIL).ht[1][ht_idx] += pstats->failed_pkts; 2679a904417fSAnilkumar Kolli STATS_OP_FMT(RETRY).ht[0][ht_idx] += pstats->retry_bytes; 2680a904417fSAnilkumar Kolli STATS_OP_FMT(RETRY).ht[1][ht_idx] += pstats->retry_pkts; 2681a904417fSAnilkumar Kolli } else { 2682a904417fSAnilkumar Kolli mcs = legacy_rate_idx; 2683a904417fSAnilkumar Kolli if (mcs < 0) 2684a904417fSAnilkumar Kolli return; 2685a904417fSAnilkumar Kolli 2686a904417fSAnilkumar Kolli STATS_OP_FMT(SUCC).legacy[0][mcs] += pstats->succ_bytes; 2687a904417fSAnilkumar Kolli STATS_OP_FMT(SUCC).legacy[1][mcs] += pstats->succ_pkts; 2688a904417fSAnilkumar Kolli STATS_OP_FMT(FAIL).legacy[0][mcs] += pstats->failed_bytes; 2689a904417fSAnilkumar Kolli STATS_OP_FMT(FAIL).legacy[1][mcs] += pstats->failed_pkts; 2690a904417fSAnilkumar Kolli STATS_OP_FMT(RETRY).legacy[0][mcs] += pstats->retry_bytes; 2691a904417fSAnilkumar Kolli STATS_OP_FMT(RETRY).legacy[1][mcs] += pstats->retry_pkts; 2692a904417fSAnilkumar Kolli } 2693a904417fSAnilkumar Kolli 2694a904417fSAnilkumar Kolli if (ATH10K_HW_AMPDU(pstats->flags)) { 2695a904417fSAnilkumar Kolli tx_stats->ba_fails += ATH10K_HW_BA_FAIL(pstats->flags); 2696a904417fSAnilkumar Kolli 2697a904417fSAnilkumar Kolli if (txrate->flags == RATE_INFO_FLAGS_MCS) { 2698a904417fSAnilkumar Kolli STATS_OP_FMT(AMPDU).ht[0][ht_idx] += 2699a904417fSAnilkumar Kolli pstats->succ_bytes + pstats->retry_bytes; 2700a904417fSAnilkumar Kolli STATS_OP_FMT(AMPDU).ht[1][ht_idx] += 2701a904417fSAnilkumar Kolli pstats->succ_pkts + pstats->retry_pkts; 2702a904417fSAnilkumar Kolli } else { 2703a904417fSAnilkumar Kolli STATS_OP_FMT(AMPDU).vht[0][mcs] += 2704a904417fSAnilkumar Kolli pstats->succ_bytes + pstats->retry_bytes; 2705a904417fSAnilkumar Kolli STATS_OP_FMT(AMPDU).vht[1][mcs] += 2706a904417fSAnilkumar Kolli pstats->succ_pkts + pstats->retry_pkts; 2707a904417fSAnilkumar Kolli } 2708a904417fSAnilkumar Kolli STATS_OP_FMT(AMPDU).bw[0][bw] += 2709a904417fSAnilkumar Kolli pstats->succ_bytes + pstats->retry_bytes; 2710a904417fSAnilkumar Kolli STATS_OP_FMT(AMPDU).nss[0][nss] += 2711a904417fSAnilkumar Kolli pstats->succ_bytes + pstats->retry_bytes; 2712a904417fSAnilkumar Kolli STATS_OP_FMT(AMPDU).gi[0][gi] += 2713a904417fSAnilkumar Kolli pstats->succ_bytes + pstats->retry_bytes; 2714a904417fSAnilkumar Kolli STATS_OP_FMT(AMPDU).bw[1][bw] += 2715a904417fSAnilkumar Kolli pstats->succ_pkts + pstats->retry_pkts; 2716a904417fSAnilkumar Kolli STATS_OP_FMT(AMPDU).nss[1][nss] += 2717a904417fSAnilkumar Kolli pstats->succ_pkts + pstats->retry_pkts; 2718a904417fSAnilkumar Kolli STATS_OP_FMT(AMPDU).gi[1][gi] += 2719a904417fSAnilkumar Kolli pstats->succ_pkts + pstats->retry_pkts; 2720a904417fSAnilkumar Kolli } else { 2721a904417fSAnilkumar Kolli tx_stats->ack_fails += 2722a904417fSAnilkumar Kolli ATH10K_HW_BA_FAIL(pstats->flags); 2723a904417fSAnilkumar Kolli } 2724a904417fSAnilkumar Kolli 2725a904417fSAnilkumar Kolli STATS_OP_FMT(SUCC).bw[0][bw] += pstats->succ_bytes; 2726a904417fSAnilkumar Kolli STATS_OP_FMT(SUCC).nss[0][nss] += pstats->succ_bytes; 2727a904417fSAnilkumar Kolli STATS_OP_FMT(SUCC).gi[0][gi] += pstats->succ_bytes; 2728a904417fSAnilkumar Kolli 2729a904417fSAnilkumar Kolli STATS_OP_FMT(SUCC).bw[1][bw] += pstats->succ_pkts; 2730a904417fSAnilkumar Kolli STATS_OP_FMT(SUCC).nss[1][nss] += pstats->succ_pkts; 2731a904417fSAnilkumar Kolli STATS_OP_FMT(SUCC).gi[1][gi] += pstats->succ_pkts; 2732a904417fSAnilkumar Kolli 2733a904417fSAnilkumar Kolli STATS_OP_FMT(FAIL).bw[0][bw] += pstats->failed_bytes; 2734a904417fSAnilkumar Kolli STATS_OP_FMT(FAIL).nss[0][nss] += pstats->failed_bytes; 2735a904417fSAnilkumar Kolli STATS_OP_FMT(FAIL).gi[0][gi] += pstats->failed_bytes; 2736a904417fSAnilkumar Kolli 2737a904417fSAnilkumar Kolli STATS_OP_FMT(FAIL).bw[1][bw] += pstats->failed_pkts; 2738a904417fSAnilkumar Kolli STATS_OP_FMT(FAIL).nss[1][nss] += pstats->failed_pkts; 2739a904417fSAnilkumar Kolli STATS_OP_FMT(FAIL).gi[1][gi] += pstats->failed_pkts; 2740a904417fSAnilkumar Kolli 2741a904417fSAnilkumar Kolli STATS_OP_FMT(RETRY).bw[0][bw] += pstats->retry_bytes; 2742a904417fSAnilkumar Kolli STATS_OP_FMT(RETRY).nss[0][nss] += pstats->retry_bytes; 2743a904417fSAnilkumar Kolli STATS_OP_FMT(RETRY).gi[0][gi] += pstats->retry_bytes; 2744a904417fSAnilkumar Kolli 2745a904417fSAnilkumar Kolli STATS_OP_FMT(RETRY).bw[1][bw] += pstats->retry_pkts; 2746a904417fSAnilkumar Kolli STATS_OP_FMT(RETRY).nss[1][nss] += pstats->retry_pkts; 2747a904417fSAnilkumar Kolli STATS_OP_FMT(RETRY).gi[1][gi] += pstats->retry_pkts; 2748a904417fSAnilkumar Kolli } 2749a904417fSAnilkumar Kolli 2750a904417fSAnilkumar Kolli static void 2751cec17c38SAnilkumar Kolli ath10k_update_per_peer_tx_stats(struct ath10k *ar, 2752cec17c38SAnilkumar Kolli struct ieee80211_sta *sta, 2753cec17c38SAnilkumar Kolli struct ath10k_per_peer_tx_stats *peer_stats) 2754cec17c38SAnilkumar Kolli { 2755cec17c38SAnilkumar Kolli struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 27560189dbd7SAnilkumar Kolli u8 rate = 0, rate_idx = 0, sgi; 2757cec17c38SAnilkumar Kolli struct rate_info txrate; 2758cec17c38SAnilkumar Kolli 2759cec17c38SAnilkumar Kolli lockdep_assert_held(&ar->data_lock); 2760cec17c38SAnilkumar Kolli 2761cec17c38SAnilkumar Kolli txrate.flags = ATH10K_HW_PREAMBLE(peer_stats->ratecode); 2762cec17c38SAnilkumar Kolli txrate.bw = ATH10K_HW_BW(peer_stats->flags); 2763cec17c38SAnilkumar Kolli txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode); 2764cec17c38SAnilkumar Kolli txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode); 2765cec17c38SAnilkumar Kolli sgi = ATH10K_HW_GI(peer_stats->flags); 2766cec17c38SAnilkumar Kolli 2767c1dd8016SSven Eckelmann if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) { 2768c1dd8016SSven Eckelmann ath10k_warn(ar, "Invalid VHT mcs %hhd peer stats", txrate.mcs); 2769c1dd8016SSven Eckelmann return; 2770c1dd8016SSven Eckelmann } 2771c1dd8016SSven Eckelmann 2772c1dd8016SSven Eckelmann if (txrate.flags == WMI_RATE_PREAMBLE_HT && 2773c1dd8016SSven Eckelmann (txrate.mcs > 7 || txrate.nss < 1)) { 2774c1dd8016SSven Eckelmann ath10k_warn(ar, "Invalid HT mcs %hhd nss %hhd peer stats", 2775c1dd8016SSven Eckelmann txrate.mcs, txrate.nss); 2776cec17c38SAnilkumar Kolli return; 2777cec17c38SAnilkumar Kolli } 2778cec17c38SAnilkumar Kolli 27790f8a2b77SMohammed Shafi Shajakhan memset(&arsta->txrate, 0, sizeof(arsta->txrate)); 27800f8a2b77SMohammed Shafi Shajakhan 2781cec17c38SAnilkumar Kolli if (txrate.flags == WMI_RATE_PREAMBLE_CCK || 2782cec17c38SAnilkumar Kolli txrate.flags == WMI_RATE_PREAMBLE_OFDM) { 2783cec17c38SAnilkumar Kolli rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode); 2784cec17c38SAnilkumar Kolli /* This is hacky, FW sends CCK rate 5.5Mbps as 6 */ 27850189dbd7SAnilkumar Kolli if (rate == 6 && txrate.flags == WMI_RATE_PREAMBLE_CCK) 27860189dbd7SAnilkumar Kolli rate = 5; 27870189dbd7SAnilkumar Kolli rate_idx = ath10k_get_legacy_rate_idx(ar, rate); 27880189dbd7SAnilkumar Kolli if (rate_idx < 0) 27890189dbd7SAnilkumar Kolli return; 2790cd591027SMohammed Shafi Shajakhan arsta->txrate.legacy = rate; 2791cec17c38SAnilkumar Kolli } else if (txrate.flags == WMI_RATE_PREAMBLE_HT) { 2792cec17c38SAnilkumar Kolli arsta->txrate.flags = RATE_INFO_FLAGS_MCS; 2793c1dd8016SSven Eckelmann arsta->txrate.mcs = txrate.mcs + 8 * (txrate.nss - 1); 2794cec17c38SAnilkumar Kolli } else { 2795cec17c38SAnilkumar Kolli arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; 2796cec17c38SAnilkumar Kolli arsta->txrate.mcs = txrate.mcs; 2797cec17c38SAnilkumar Kolli } 2798cec17c38SAnilkumar Kolli 2799cec17c38SAnilkumar Kolli if (sgi) 2800cec17c38SAnilkumar Kolli arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 2801cec17c38SAnilkumar Kolli 2802cec17c38SAnilkumar Kolli arsta->txrate.nss = txrate.nss; 280391493e8eSChristian Lamparter arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw); 2804a904417fSAnilkumar Kolli 2805a904417fSAnilkumar Kolli if (ath10k_debug_is_extd_tx_stats_enabled(ar)) 2806a904417fSAnilkumar Kolli ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats, 2807a904417fSAnilkumar Kolli rate_idx); 2808cec17c38SAnilkumar Kolli } 2809cec17c38SAnilkumar Kolli 2810cec17c38SAnilkumar Kolli static void ath10k_htt_fetch_peer_stats(struct ath10k *ar, 2811cec17c38SAnilkumar Kolli struct sk_buff *skb) 2812cec17c38SAnilkumar Kolli { 2813cec17c38SAnilkumar Kolli struct htt_resp *resp = (struct htt_resp *)skb->data; 2814cec17c38SAnilkumar Kolli struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats; 2815cec17c38SAnilkumar Kolli struct htt_per_peer_tx_stats_ind *tx_stats; 2816cec17c38SAnilkumar Kolli struct ieee80211_sta *sta; 2817cec17c38SAnilkumar Kolli struct ath10k_peer *peer; 2818cec17c38SAnilkumar Kolli int peer_id, i; 2819cec17c38SAnilkumar Kolli u8 ppdu_len, num_ppdu; 2820cec17c38SAnilkumar Kolli 2821cec17c38SAnilkumar Kolli num_ppdu = resp->peer_tx_stats.num_ppdu; 2822cec17c38SAnilkumar Kolli ppdu_len = resp->peer_tx_stats.ppdu_len * sizeof(__le32); 2823cec17c38SAnilkumar Kolli 2824cec17c38SAnilkumar Kolli if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) { 2825cec17c38SAnilkumar Kolli ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len); 2826cec17c38SAnilkumar Kolli return; 2827cec17c38SAnilkumar Kolli } 2828cec17c38SAnilkumar Kolli 2829cec17c38SAnilkumar Kolli tx_stats = (struct htt_per_peer_tx_stats_ind *) 2830cec17c38SAnilkumar Kolli (resp->peer_tx_stats.payload); 2831cec17c38SAnilkumar Kolli peer_id = __le16_to_cpu(tx_stats->peer_id); 2832cec17c38SAnilkumar Kolli 2833cec17c38SAnilkumar Kolli rcu_read_lock(); 2834cec17c38SAnilkumar Kolli spin_lock_bh(&ar->data_lock); 2835cec17c38SAnilkumar Kolli peer = ath10k_peer_find_by_id(ar, peer_id); 2836cec17c38SAnilkumar Kolli if (!peer) { 2837cec17c38SAnilkumar Kolli ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n", 2838cec17c38SAnilkumar Kolli peer_id); 2839cec17c38SAnilkumar Kolli goto out; 2840cec17c38SAnilkumar Kolli } 2841cec17c38SAnilkumar Kolli 2842cec17c38SAnilkumar Kolli sta = peer->sta; 2843cec17c38SAnilkumar Kolli for (i = 0; i < num_ppdu; i++) { 2844cec17c38SAnilkumar Kolli tx_stats = (struct htt_per_peer_tx_stats_ind *) 2845cec17c38SAnilkumar Kolli (resp->peer_tx_stats.payload + i * ppdu_len); 2846cec17c38SAnilkumar Kolli 2847cec17c38SAnilkumar Kolli p_tx_stats->succ_bytes = __le32_to_cpu(tx_stats->succ_bytes); 2848cec17c38SAnilkumar Kolli p_tx_stats->retry_bytes = __le32_to_cpu(tx_stats->retry_bytes); 2849cec17c38SAnilkumar Kolli p_tx_stats->failed_bytes = 2850cec17c38SAnilkumar Kolli __le32_to_cpu(tx_stats->failed_bytes); 2851cec17c38SAnilkumar Kolli p_tx_stats->ratecode = tx_stats->ratecode; 2852cec17c38SAnilkumar Kolli p_tx_stats->flags = tx_stats->flags; 2853cec17c38SAnilkumar Kolli p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts); 2854cec17c38SAnilkumar Kolli p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts); 2855cec17c38SAnilkumar Kolli p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts); 2856cec17c38SAnilkumar Kolli 2857cec17c38SAnilkumar Kolli ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats); 2858cec17c38SAnilkumar Kolli } 2859cec17c38SAnilkumar Kolli 2860cec17c38SAnilkumar Kolli out: 2861cec17c38SAnilkumar Kolli spin_unlock_bh(&ar->data_lock); 2862cec17c38SAnilkumar Kolli rcu_read_unlock(); 2863cec17c38SAnilkumar Kolli } 2864cec17c38SAnilkumar Kolli 2865e8123bb7SAnilkumar Kolli static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data) 2866e8123bb7SAnilkumar Kolli { 2867e8123bb7SAnilkumar Kolli struct ath10k_pktlog_hdr *hdr = (struct ath10k_pktlog_hdr *)data; 2868e8123bb7SAnilkumar Kolli struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats; 2869e8123bb7SAnilkumar Kolli struct ath10k_10_2_peer_tx_stats *tx_stats; 2870e8123bb7SAnilkumar Kolli struct ieee80211_sta *sta; 2871e8123bb7SAnilkumar Kolli struct ath10k_peer *peer; 2872e8123bb7SAnilkumar Kolli u16 log_type = __le16_to_cpu(hdr->log_type); 2873e8123bb7SAnilkumar Kolli u32 peer_id = 0, i; 2874e8123bb7SAnilkumar Kolli 2875e8123bb7SAnilkumar Kolli if (log_type != ATH_PKTLOG_TYPE_TX_STAT) 2876e8123bb7SAnilkumar Kolli return; 2877e8123bb7SAnilkumar Kolli 2878e8123bb7SAnilkumar Kolli tx_stats = (struct ath10k_10_2_peer_tx_stats *)((hdr->payload) + 2879e8123bb7SAnilkumar Kolli ATH10K_10_2_TX_STATS_OFFSET); 2880e8123bb7SAnilkumar Kolli 2881e8123bb7SAnilkumar Kolli if (!tx_stats->tx_ppdu_cnt) 2882e8123bb7SAnilkumar Kolli return; 2883e8123bb7SAnilkumar Kolli 2884e8123bb7SAnilkumar Kolli peer_id = tx_stats->peer_id; 2885e8123bb7SAnilkumar Kolli 2886e8123bb7SAnilkumar Kolli rcu_read_lock(); 2887e8123bb7SAnilkumar Kolli spin_lock_bh(&ar->data_lock); 2888e8123bb7SAnilkumar Kolli peer = ath10k_peer_find_by_id(ar, peer_id); 2889e8123bb7SAnilkumar Kolli if (!peer) { 2890e8123bb7SAnilkumar Kolli ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n", 2891e8123bb7SAnilkumar Kolli peer_id); 2892e8123bb7SAnilkumar Kolli goto out; 2893e8123bb7SAnilkumar Kolli } 2894e8123bb7SAnilkumar Kolli 2895e8123bb7SAnilkumar Kolli sta = peer->sta; 2896e8123bb7SAnilkumar Kolli for (i = 0; i < tx_stats->tx_ppdu_cnt; i++) { 2897e8123bb7SAnilkumar Kolli p_tx_stats->succ_bytes = 2898e8123bb7SAnilkumar Kolli __le16_to_cpu(tx_stats->success_bytes[i]); 2899e8123bb7SAnilkumar Kolli p_tx_stats->retry_bytes = 2900e8123bb7SAnilkumar Kolli __le16_to_cpu(tx_stats->retry_bytes[i]); 2901e8123bb7SAnilkumar Kolli p_tx_stats->failed_bytes = 2902e8123bb7SAnilkumar Kolli __le16_to_cpu(tx_stats->failed_bytes[i]); 2903e8123bb7SAnilkumar Kolli p_tx_stats->ratecode = tx_stats->ratecode[i]; 2904e8123bb7SAnilkumar Kolli p_tx_stats->flags = tx_stats->flags[i]; 2905e8123bb7SAnilkumar Kolli p_tx_stats->succ_pkts = tx_stats->success_pkts[i]; 2906e8123bb7SAnilkumar Kolli p_tx_stats->retry_pkts = tx_stats->retry_pkts[i]; 2907e8123bb7SAnilkumar Kolli p_tx_stats->failed_pkts = tx_stats->failed_pkts[i]; 2908e8123bb7SAnilkumar Kolli 2909e8123bb7SAnilkumar Kolli ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats); 2910e8123bb7SAnilkumar Kolli } 2911e8123bb7SAnilkumar Kolli spin_unlock_bh(&ar->data_lock); 2912e8123bb7SAnilkumar Kolli rcu_read_unlock(); 2913e8123bb7SAnilkumar Kolli 2914e8123bb7SAnilkumar Kolli return; 2915e8123bb7SAnilkumar Kolli 2916e8123bb7SAnilkumar Kolli out: 2917e8123bb7SAnilkumar Kolli spin_unlock_bh(&ar->data_lock); 2918e8123bb7SAnilkumar Kolli rcu_read_unlock(); 2919e8123bb7SAnilkumar Kolli } 2920e8123bb7SAnilkumar Kolli 2921e3a91f87SRajkumar Manoharan bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) 29225e3dd157SKalle Valo { 2923edb8236dSMichal Kazior struct ath10k_htt *htt = &ar->htt; 29245e3dd157SKalle Valo struct htt_resp *resp = (struct htt_resp *)skb->data; 29258348db29SRajkumar Manoharan enum htt_t2h_msg_type type; 29265e3dd157SKalle Valo 29275e3dd157SKalle Valo /* confirm alignment */ 29285e3dd157SKalle Valo if (!IS_ALIGNED((unsigned long)skb->data, 4)) 29297aa7a72aSMichal Kazior ath10k_warn(ar, "unaligned htt message, expect trouble\n"); 29305e3dd157SKalle Valo 29317aa7a72aSMichal Kazior ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n", 29325e3dd157SKalle Valo resp->hdr.msg_type); 29338348db29SRajkumar Manoharan 29348348db29SRajkumar Manoharan if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) { 29358348db29SRajkumar Manoharan ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X", 29368348db29SRajkumar Manoharan resp->hdr.msg_type, ar->htt.t2h_msg_types_max); 2937e3a91f87SRajkumar Manoharan return true; 29388348db29SRajkumar Manoharan } 29398348db29SRajkumar Manoharan type = ar->htt.t2h_msg_types[resp->hdr.msg_type]; 29408348db29SRajkumar Manoharan 29418348db29SRajkumar Manoharan switch (type) { 29425e3dd157SKalle Valo case HTT_T2H_MSG_TYPE_VERSION_CONF: { 29435e3dd157SKalle Valo htt->target_version_major = resp->ver_resp.major; 29445e3dd157SKalle Valo htt->target_version_minor = resp->ver_resp.minor; 29455e3dd157SKalle Valo complete(&htt->target_version_received); 29465e3dd157SKalle Valo break; 29475e3dd157SKalle Valo } 29486c5151a9SMichal Kazior case HTT_T2H_MSG_TYPE_RX_IND: 2949f88d4934SErik Stromdahl if (ar->dev_type == ATH10K_DEV_TYPE_HL) 2950f88d4934SErik Stromdahl return ath10k_htt_rx_proc_rx_ind_hl(htt, 2951f88d4934SErik Stromdahl &resp->rx_ind_hl, 2952f88d4934SErik Stromdahl skb); 2953f88d4934SErik Stromdahl else 2954f88d4934SErik Stromdahl ath10k_htt_rx_proc_rx_ind_ll(htt, &resp->rx_ind); 29553128b3d8SRajkumar Manoharan break; 29565e3dd157SKalle Valo case HTT_T2H_MSG_TYPE_PEER_MAP: { 29575e3dd157SKalle Valo struct htt_peer_map_event ev = { 29585e3dd157SKalle Valo .vdev_id = resp->peer_map.vdev_id, 29595e3dd157SKalle Valo .peer_id = __le16_to_cpu(resp->peer_map.peer_id), 29605e3dd157SKalle Valo }; 29615e3dd157SKalle Valo memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr)); 29625e3dd157SKalle Valo ath10k_peer_map_event(htt, &ev); 29635e3dd157SKalle Valo break; 29645e3dd157SKalle Valo } 29655e3dd157SKalle Valo case HTT_T2H_MSG_TYPE_PEER_UNMAP: { 29665e3dd157SKalle Valo struct htt_peer_unmap_event ev = { 29675e3dd157SKalle Valo .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id), 29685e3dd157SKalle Valo }; 29695e3dd157SKalle Valo ath10k_peer_unmap_event(htt, &ev); 29705e3dd157SKalle Valo break; 29715e3dd157SKalle Valo } 29725e3dd157SKalle Valo case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: { 29735e3dd157SKalle Valo struct htt_tx_done tx_done = {}; 29745e3dd157SKalle Valo int status = __le32_to_cpu(resp->mgmt_tx_completion.status); 2975235b9c42SVenkateswara Naralasetty int info = __le32_to_cpu(resp->mgmt_tx_completion.info); 29765e3dd157SKalle Valo 297759465fe4SRajkumar Manoharan tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id); 29785e3dd157SKalle Valo 29795e3dd157SKalle Valo switch (status) { 29805e3dd157SKalle Valo case HTT_MGMT_TX_STATUS_OK: 298159465fe4SRajkumar Manoharan tx_done.status = HTT_TX_COMPL_STATE_ACK; 2982235b9c42SVenkateswara Naralasetty if (test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, 2983235b9c42SVenkateswara Naralasetty ar->wmi.svc_map) && 2984235b9c42SVenkateswara Naralasetty (resp->mgmt_tx_completion.flags & 2985235b9c42SVenkateswara Naralasetty HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI)) { 2986235b9c42SVenkateswara Naralasetty tx_done.ack_rssi = 2987235b9c42SVenkateswara Naralasetty FIELD_GET(HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK, 2988235b9c42SVenkateswara Naralasetty info); 2989235b9c42SVenkateswara Naralasetty } 29905e3dd157SKalle Valo break; 29915e3dd157SKalle Valo case HTT_MGMT_TX_STATUS_RETRY: 299259465fe4SRajkumar Manoharan tx_done.status = HTT_TX_COMPL_STATE_NOACK; 29935e3dd157SKalle Valo break; 29945e3dd157SKalle Valo case HTT_MGMT_TX_STATUS_DROP: 299559465fe4SRajkumar Manoharan tx_done.status = HTT_TX_COMPL_STATE_DISCARD; 29965e3dd157SKalle Valo break; 29975e3dd157SKalle Valo } 29985e3dd157SKalle Valo 2999cac08552SRajkumar Manoharan status = ath10k_txrx_tx_unref(htt, &tx_done); 3000cac08552SRajkumar Manoharan if (!status) { 3001cac08552SRajkumar Manoharan spin_lock_bh(&htt->tx_lock); 3002cac08552SRajkumar Manoharan ath10k_htt_tx_mgmt_dec_pending(htt); 3003cac08552SRajkumar Manoharan spin_unlock_bh(&htt->tx_lock); 3004cac08552SRajkumar Manoharan } 30055e3dd157SKalle Valo break; 30065e3dd157SKalle Valo } 30076c5151a9SMichal Kazior case HTT_T2H_MSG_TYPE_TX_COMPL_IND: 300859465fe4SRajkumar Manoharan ath10k_htt_rx_tx_compl_ind(htt->ar, skb); 300959465fe4SRajkumar Manoharan break; 30105e3dd157SKalle Valo case HTT_T2H_MSG_TYPE_SEC_IND: { 30115e3dd157SKalle Valo struct ath10k *ar = htt->ar; 30125e3dd157SKalle Valo struct htt_security_indication *ev = &resp->security_indication; 30135e3dd157SKalle Valo 30147aa7a72aSMichal Kazior ath10k_dbg(ar, ATH10K_DBG_HTT, 30155e3dd157SKalle Valo "sec ind peer_id %d unicast %d type %d\n", 30165e3dd157SKalle Valo __le16_to_cpu(ev->peer_id), 30175e3dd157SKalle Valo !!(ev->flags & HTT_SECURITY_IS_UNICAST), 30185e3dd157SKalle Valo MS(ev->flags, HTT_SECURITY_TYPE)); 30195e3dd157SKalle Valo complete(&ar->install_key_done); 30205e3dd157SKalle Valo break; 30215e3dd157SKalle Valo } 30225e3dd157SKalle Valo case HTT_T2H_MSG_TYPE_RX_FRAG_IND: { 30237aa7a72aSMichal Kazior ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", 30245e3dd157SKalle Valo skb->data, skb->len); 30253c97f5deSRajkumar Manoharan atomic_inc(&htt->num_mpdus_ready); 30265e3dd157SKalle Valo break; 30275e3dd157SKalle Valo } 30285e3dd157SKalle Valo case HTT_T2H_MSG_TYPE_TEST: 30295e3dd157SKalle Valo break; 30305e3dd157SKalle Valo case HTT_T2H_MSG_TYPE_STATS_CONF: 3031d35a6c18SMichal Kazior trace_ath10k_htt_stats(ar, skb->data, skb->len); 3032a9bf0506SKalle Valo break; 3033a9bf0506SKalle Valo case HTT_T2H_MSG_TYPE_TX_INSPECT_IND: 3034708b9bdeSMichal Kazior /* Firmware can return tx frames if it's unable to fully 3035708b9bdeSMichal Kazior * process them and suspects host may be able to fix it. ath10k 3036708b9bdeSMichal Kazior * sends all tx frames as already inspected so this shouldn't 3037708b9bdeSMichal Kazior * happen unless fw has a bug. 3038708b9bdeSMichal Kazior */ 30397aa7a72aSMichal Kazior ath10k_warn(ar, "received an unexpected htt tx inspect event\n"); 3040708b9bdeSMichal Kazior break; 30415e3dd157SKalle Valo case HTT_T2H_MSG_TYPE_RX_ADDBA: 3042aa5b4fbcSMichal Kazior ath10k_htt_rx_addba(ar, resp); 3043aa5b4fbcSMichal Kazior break; 30445e3dd157SKalle Valo case HTT_T2H_MSG_TYPE_RX_DELBA: 3045aa5b4fbcSMichal Kazior ath10k_htt_rx_delba(ar, resp); 3046aa5b4fbcSMichal Kazior break; 3047bfdd7937SRajkumar Manoharan case HTT_T2H_MSG_TYPE_PKTLOG: { 3048bfdd7937SRajkumar Manoharan trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload, 304934293f75SAshok Raj Nagarajan skb->len - 305034293f75SAshok Raj Nagarajan offsetof(struct htt_resp, 305134293f75SAshok Raj Nagarajan pktlog_msg.payload)); 3052e8123bb7SAnilkumar Kolli 3053e8123bb7SAnilkumar Kolli if (ath10k_peer_stats_enabled(ar)) 3054e8123bb7SAnilkumar Kolli ath10k_fetch_10_2_tx_stats(ar, 3055e8123bb7SAnilkumar Kolli resp->pktlog_msg.payload); 3056bfdd7937SRajkumar Manoharan break; 3057bfdd7937SRajkumar Manoharan } 3058aa5b4fbcSMichal Kazior case HTT_T2H_MSG_TYPE_RX_FLUSH: { 3059aa5b4fbcSMichal Kazior /* Ignore this event because mac80211 takes care of Rx 3060aa5b4fbcSMichal Kazior * aggregation reordering. 3061aa5b4fbcSMichal Kazior */ 3062aa5b4fbcSMichal Kazior break; 3063aa5b4fbcSMichal Kazior } 3064c545070eSMichal Kazior case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: { 306562652555SBob Copeland skb_queue_tail(&htt->rx_in_ord_compl_q, skb); 3066e3a91f87SRajkumar Manoharan return false; 3067c545070eSMichal Kazior } 3068c545070eSMichal Kazior case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND: 30698348db29SRajkumar Manoharan break; 30702ce9b25cSRajkumar Manoharan case HTT_T2H_MSG_TYPE_CHAN_CHANGE: { 30712ce9b25cSRajkumar Manoharan u32 phymode = __le32_to_cpu(resp->chan_change.phymode); 30722ce9b25cSRajkumar Manoharan u32 freq = __le32_to_cpu(resp->chan_change.freq); 30732ce9b25cSRajkumar Manoharan 3074543b921bSArend Van Spriel ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq); 30752ce9b25cSRajkumar Manoharan ath10k_dbg(ar, ATH10K_DBG_HTT, 30762ce9b25cSRajkumar Manoharan "htt chan change freq %u phymode %s\n", 30772ce9b25cSRajkumar Manoharan freq, ath10k_wmi_phymode_str(phymode)); 3078c545070eSMichal Kazior break; 30792ce9b25cSRajkumar Manoharan } 3080ccec9038SDavid Liu case HTT_T2H_MSG_TYPE_AGGR_CONF: 3081ccec9038SDavid Liu break; 3082b2fdbccdSRajkumar Manoharan case HTT_T2H_MSG_TYPE_TX_FETCH_IND: { 3083b2fdbccdSRajkumar Manoharan struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC); 3084b2fdbccdSRajkumar Manoharan 3085b2fdbccdSRajkumar Manoharan if (!tx_fetch_ind) { 3086b2fdbccdSRajkumar Manoharan ath10k_warn(ar, "failed to copy htt tx fetch ind\n"); 3087b2fdbccdSRajkumar Manoharan break; 3088b2fdbccdSRajkumar Manoharan } 3089b2fdbccdSRajkumar Manoharan skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind); 3090b2fdbccdSRajkumar Manoharan break; 3091b2fdbccdSRajkumar Manoharan } 3092df94e702SMichal Kazior case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM: 3093839ae637SMichal Kazior ath10k_htt_rx_tx_fetch_confirm(ar, skb); 3094839ae637SMichal Kazior break; 3095df94e702SMichal Kazior case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND: 3096839ae637SMichal Kazior ath10k_htt_rx_tx_mode_switch_ind(ar, skb); 30979b158736SMichal Kazior break; 3098cec17c38SAnilkumar Kolli case HTT_T2H_MSG_TYPE_PEER_STATS: 3099cec17c38SAnilkumar Kolli ath10k_htt_fetch_peer_stats(ar, skb); 3100cec17c38SAnilkumar Kolli break; 31019b158736SMichal Kazior case HTT_T2H_MSG_TYPE_EN_STATS: 31025e3dd157SKalle Valo default: 31032358a544SMichal Kazior ath10k_warn(ar, "htt event (%d) not handled\n", 31045e3dd157SKalle Valo resp->hdr.msg_type); 31057aa7a72aSMichal Kazior ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", 31065e3dd157SKalle Valo skb->data, skb->len); 31075e3dd157SKalle Valo break; 3108dab55d10SWaldemar Rymarkiewicz } 3109e3a91f87SRajkumar Manoharan return true; 31105e3dd157SKalle Valo } 31113f0f7ed4SRajkumar Manoharan EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler); 31126c5151a9SMichal Kazior 3113afb0bf7fSVivek Natarajan void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar, 3114afb0bf7fSVivek Natarajan struct sk_buff *skb) 3115afb0bf7fSVivek Natarajan { 311653a5c9bcSAshok Raj Nagarajan trace_ath10k_htt_pktlog(ar, skb->data, skb->len); 3117afb0bf7fSVivek Natarajan dev_kfree_skb_any(skb); 3118afb0bf7fSVivek Natarajan } 3119afb0bf7fSVivek Natarajan EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler); 3120afb0bf7fSVivek Natarajan 3121deba1b9eSRajkumar Manoharan static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget) 3122deba1b9eSRajkumar Manoharan { 3123deba1b9eSRajkumar Manoharan struct sk_buff *skb; 3124deba1b9eSRajkumar Manoharan 3125deba1b9eSRajkumar Manoharan while (quota < budget) { 3126deba1b9eSRajkumar Manoharan if (skb_queue_empty(&ar->htt.rx_msdus_q)) 3127deba1b9eSRajkumar Manoharan break; 3128deba1b9eSRajkumar Manoharan 312962652555SBob Copeland skb = skb_dequeue(&ar->htt.rx_msdus_q); 3130deba1b9eSRajkumar Manoharan if (!skb) 3131deba1b9eSRajkumar Manoharan break; 3132deba1b9eSRajkumar Manoharan ath10k_process_rx(ar, skb); 3133deba1b9eSRajkumar Manoharan quota++; 3134deba1b9eSRajkumar Manoharan } 3135deba1b9eSRajkumar Manoharan 3136deba1b9eSRajkumar Manoharan return quota; 3137deba1b9eSRajkumar Manoharan } 3138deba1b9eSRajkumar Manoharan 31393c97f5deSRajkumar Manoharan int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget) 31406c5151a9SMichal Kazior { 31413c97f5deSRajkumar Manoharan struct ath10k_htt *htt = &ar->htt; 314259465fe4SRajkumar Manoharan struct htt_tx_done tx_done = {}; 3143426e10eaSMichal Kazior struct sk_buff_head tx_ind_q; 31446c5151a9SMichal Kazior struct sk_buff *skb; 3145d742c969SMichal Kazior unsigned long flags; 3146deba1b9eSRajkumar Manoharan int quota = 0, done, ret; 31473c97f5deSRajkumar Manoharan bool resched_napi = false; 31486c5151a9SMichal Kazior 3149426e10eaSMichal Kazior __skb_queue_head_init(&tx_ind_q); 3150da6416caSRajkumar Manoharan 3151deba1b9eSRajkumar Manoharan /* Process pending frames before dequeuing more data 3152deba1b9eSRajkumar Manoharan * from hardware. 31533c97f5deSRajkumar Manoharan */ 3154deba1b9eSRajkumar Manoharan quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget); 3155deba1b9eSRajkumar Manoharan if (quota == budget) { 31563c97f5deSRajkumar Manoharan resched_napi = true; 31573c97f5deSRajkumar Manoharan goto exit; 31583c97f5deSRajkumar Manoharan } 31593c97f5deSRajkumar Manoharan 316062652555SBob Copeland while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) { 31613c97f5deSRajkumar Manoharan spin_lock_bh(&htt->rx_ring.lock); 3162deba1b9eSRajkumar Manoharan ret = ath10k_htt_rx_in_ord_ind(ar, skb); 31633c97f5deSRajkumar Manoharan spin_unlock_bh(&htt->rx_ring.lock); 31643c97f5deSRajkumar Manoharan 31653c97f5deSRajkumar Manoharan dev_kfree_skb_any(skb); 3166deba1b9eSRajkumar Manoharan if (ret == -EIO) { 31673c97f5deSRajkumar Manoharan resched_napi = true; 31683c97f5deSRajkumar Manoharan goto exit; 31693c97f5deSRajkumar Manoharan } 31703c97f5deSRajkumar Manoharan } 31713c97f5deSRajkumar Manoharan 3172deba1b9eSRajkumar Manoharan while (atomic_read(&htt->num_mpdus_ready)) { 3173deba1b9eSRajkumar Manoharan ret = ath10k_htt_rx_handle_amsdu(htt); 3174deba1b9eSRajkumar Manoharan if (ret == -EIO) { 31753c97f5deSRajkumar Manoharan resched_napi = true; 31763c97f5deSRajkumar Manoharan goto exit; 31773c97f5deSRajkumar Manoharan } 31783c97f5deSRajkumar Manoharan atomic_dec(&htt->num_mpdus_ready); 31793c97f5deSRajkumar Manoharan } 3180deba1b9eSRajkumar Manoharan 3181deba1b9eSRajkumar Manoharan /* Deliver received data after processing data from hardware */ 3182deba1b9eSRajkumar Manoharan quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget); 31833c97f5deSRajkumar Manoharan 31843c97f5deSRajkumar Manoharan /* From NAPI documentation: 31853c97f5deSRajkumar Manoharan * The napi poll() function may also process TX completions, in which 31863c97f5deSRajkumar Manoharan * case if it processes the entire TX ring then it should count that 31873c97f5deSRajkumar Manoharan * work as the rest of the budget. 31883c97f5deSRajkumar Manoharan */ 31893c97f5deSRajkumar Manoharan if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo)) 31903c97f5deSRajkumar Manoharan quota = budget; 3191426e10eaSMichal Kazior 319259465fe4SRajkumar Manoharan /* kfifo_get: called only within txrx_tasklet so it's neatly serialized. 319359465fe4SRajkumar Manoharan * From kfifo_get() documentation: 319459465fe4SRajkumar Manoharan * Note that with only one concurrent reader and one concurrent writer, 319559465fe4SRajkumar Manoharan * you don't need extra locking to use these macro. 319659465fe4SRajkumar Manoharan */ 319759465fe4SRajkumar Manoharan while (kfifo_get(&htt->txdone_fifo, &tx_done)) 319859465fe4SRajkumar Manoharan ath10k_txrx_tx_unref(htt, &tx_done); 31996c5151a9SMichal Kazior 320018f53fe0SRajkumar Manoharan ath10k_mac_tx_push_pending(ar); 320118f53fe0SRajkumar Manoharan 32023c97f5deSRajkumar Manoharan spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags); 32033c97f5deSRajkumar Manoharan skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q); 32043c97f5deSRajkumar Manoharan spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags); 32053c97f5deSRajkumar Manoharan 3206426e10eaSMichal Kazior while ((skb = __skb_dequeue(&tx_ind_q))) { 3207426e10eaSMichal Kazior ath10k_htt_rx_tx_fetch_ind(ar, skb); 32086c5151a9SMichal Kazior dev_kfree_skb_any(skb); 32096c5151a9SMichal Kazior } 32106c5151a9SMichal Kazior 32113c97f5deSRajkumar Manoharan exit: 32125c86d97bSRajkumar Manoharan ath10k_htt_rx_msdu_buff_replenish(htt); 32133c97f5deSRajkumar Manoharan /* In case of rx failure or more data to read, report budget 32143c97f5deSRajkumar Manoharan * to reschedule NAPI poll 32153c97f5deSRajkumar Manoharan */ 32163c97f5deSRajkumar Manoharan done = resched_napi ? budget : quota; 32173c97f5deSRajkumar Manoharan 32183c97f5deSRajkumar Manoharan return done; 32196c5151a9SMichal Kazior } 32203c97f5deSRajkumar Manoharan EXPORT_SYMBOL(ath10k_htt_txrx_compl_task); 3221a91a626bSGovind Singh 3222a91a626bSGovind Singh static const struct ath10k_htt_rx_ops htt_rx_ops_32 = { 3223a91a626bSGovind Singh .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32, 3224a91a626bSGovind Singh .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32, 3225a91a626bSGovind Singh .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32, 3226a91a626bSGovind Singh .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32, 3227a91a626bSGovind Singh .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32, 3228a91a626bSGovind Singh }; 3229a91a626bSGovind Singh 3230a91a626bSGovind Singh static const struct ath10k_htt_rx_ops htt_rx_ops_64 = { 3231a91a626bSGovind Singh .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64, 3232a91a626bSGovind Singh .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64, 3233a91a626bSGovind Singh .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64, 3234a91a626bSGovind Singh .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64, 3235a91a626bSGovind Singh .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64, 3236a91a626bSGovind Singh }; 3237a91a626bSGovind Singh 3238d4e7f553SErik Stromdahl static const struct ath10k_htt_rx_ops htt_rx_ops_hl = { 3239d4e7f553SErik Stromdahl }; 3240d4e7f553SErik Stromdahl 3241a91a626bSGovind Singh void ath10k_htt_set_rx_ops(struct ath10k_htt *htt) 3242a91a626bSGovind Singh { 3243a91a626bSGovind Singh struct ath10k *ar = htt->ar; 3244a91a626bSGovind Singh 3245d4e7f553SErik Stromdahl if (ar->dev_type == ATH10K_DEV_TYPE_HL) 3246d4e7f553SErik Stromdahl htt->rx_ops = &htt_rx_ops_hl; 3247d4e7f553SErik Stromdahl else if (ar->hw_params.target_64bit) 3248a91a626bSGovind Singh htt->rx_ops = &htt_rx_ops_64; 3249a91a626bSGovind Singh else 3250a91a626bSGovind Singh htt->rx_ops = &htt_rx_ops_32; 3251a91a626bSGovind Singh } 3252