15e3dd157SKalle Valo /* 25e3dd157SKalle Valo * Copyright (c) 2005-2011 Atheros Communications Inc. 38b1083d6SKalle Valo * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 4caee728aSVasanthakumar Thiagarajan * Copyright (c) 2018, The Linux Foundation. All rights reserved. 55e3dd157SKalle Valo * 65e3dd157SKalle Valo * Permission to use, copy, modify, and/or distribute this software for any 75e3dd157SKalle Valo * purpose with or without fee is hereby granted, provided that the above 85e3dd157SKalle Valo * copyright notice and this permission notice appear in all copies. 95e3dd157SKalle Valo * 105e3dd157SKalle Valo * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 115e3dd157SKalle Valo * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 125e3dd157SKalle Valo * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 135e3dd157SKalle Valo * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 145e3dd157SKalle Valo * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 155e3dd157SKalle Valo * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 165e3dd157SKalle Valo * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 175e3dd157SKalle Valo */ 185e3dd157SKalle Valo 19edb8236dSMichal Kazior #include "core.h" 205e3dd157SKalle Valo #include "htc.h" 215e3dd157SKalle Valo #include "htt.h" 225e3dd157SKalle Valo #include "txrx.h" 235e3dd157SKalle Valo #include "debug.h" 24a9bf0506SKalle Valo #include "trace.h" 25aa5b4fbcSMichal Kazior #include "mac.h" 265e3dd157SKalle Valo 275e3dd157SKalle Valo #include <linux/log2.h> 28235b9c42SVenkateswara Naralasetty #include <linux/bitfield.h> 295e3dd157SKalle Valo 305e3dd157SKalle Valo /* when under memory pressure rx ring refill may fail and needs a retry */ 315e3dd157SKalle Valo #define HTT_RX_RING_REFILL_RETRY_MS 50 325e3dd157SKalle Valo 335c86d97bSRajkumar Manoharan #define HTT_RX_RING_REFILL_RESCHED_MS 5 345c86d97bSRajkumar Manoharan 35f6dc2095SMichal Kazior static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb); 36f6dc2095SMichal Kazior 37c545070eSMichal Kazior static struct sk_buff * 38a91a626bSGovind Singh ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr) 39c545070eSMichal Kazior { 40c545070eSMichal Kazior struct ath10k_skb_rxcb *rxcb; 41c545070eSMichal Kazior 42c545070eSMichal Kazior hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr) 43c545070eSMichal Kazior if (rxcb->paddr == paddr) 44c545070eSMichal Kazior return ATH10K_RXCB_SKB(rxcb); 45c545070eSMichal Kazior 46c545070eSMichal Kazior WARN_ON_ONCE(1); 47c545070eSMichal Kazior return NULL; 48c545070eSMichal Kazior } 49c545070eSMichal Kazior 505e3dd157SKalle Valo static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt) 515e3dd157SKalle Valo { 525e3dd157SKalle Valo struct sk_buff *skb; 53c545070eSMichal Kazior struct ath10k_skb_rxcb *rxcb; 54c545070eSMichal Kazior struct hlist_node *n; 555e3dd157SKalle Valo int i; 565e3dd157SKalle Valo 57c545070eSMichal Kazior if (htt->rx_ring.in_ord_rx) { 58c545070eSMichal Kazior hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) { 59c545070eSMichal Kazior skb = ATH10K_RXCB_SKB(rxcb); 60c545070eSMichal Kazior dma_unmap_single(htt->ar->dev, rxcb->paddr, 61c545070eSMichal Kazior skb->len + skb_tailroom(skb), 62c545070eSMichal Kazior DMA_FROM_DEVICE); 63c545070eSMichal Kazior hash_del(&rxcb->hlist); 64c545070eSMichal Kazior dev_kfree_skb_any(skb); 65c545070eSMichal Kazior } 66c545070eSMichal Kazior } else { 67c545070eSMichal Kazior for (i = 0; i < htt->rx_ring.size; i++) { 685e3dd157SKalle Valo skb = htt->rx_ring.netbufs_ring[i]; 69c545070eSMichal Kazior if (!skb) 70c545070eSMichal Kazior continue; 71c545070eSMichal Kazior 72c545070eSMichal Kazior rxcb = ATH10K_SKB_RXCB(skb); 73c545070eSMichal Kazior dma_unmap_single(htt->ar->dev, rxcb->paddr, 745e3dd157SKalle Valo skb->len + skb_tailroom(skb), 755e3dd157SKalle Valo DMA_FROM_DEVICE); 765e3dd157SKalle Valo dev_kfree_skb_any(skb); 775e3dd157SKalle Valo } 78c545070eSMichal Kazior } 795e3dd157SKalle Valo 805e3dd157SKalle Valo htt->rx_ring.fill_cnt = 0; 81c545070eSMichal Kazior hash_init(htt->rx_ring.skb_table); 82c545070eSMichal Kazior memset(htt->rx_ring.netbufs_ring, 0, 83c545070eSMichal Kazior htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0])); 845e3dd157SKalle Valo } 855e3dd157SKalle Valo 86a91a626bSGovind Singh static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt) 87a91a626bSGovind Singh { 88a91a626bSGovind Singh return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32); 89a91a626bSGovind Singh } 90a91a626bSGovind Singh 91a91a626bSGovind Singh static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt) 92a91a626bSGovind Singh { 93a91a626bSGovind Singh return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64); 94a91a626bSGovind Singh } 95a91a626bSGovind Singh 96a91a626bSGovind Singh static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt, 97a91a626bSGovind Singh void *vaddr) 98a91a626bSGovind Singh { 99a91a626bSGovind Singh htt->rx_ring.paddrs_ring_32 = vaddr; 100a91a626bSGovind Singh } 101a91a626bSGovind Singh 102a91a626bSGovind Singh static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt, 103a91a626bSGovind Singh void *vaddr) 104a91a626bSGovind Singh { 105a91a626bSGovind Singh htt->rx_ring.paddrs_ring_64 = vaddr; 106a91a626bSGovind Singh } 107a91a626bSGovind Singh 108a91a626bSGovind Singh static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt, 109a91a626bSGovind Singh dma_addr_t paddr, int idx) 110a91a626bSGovind Singh { 111a91a626bSGovind Singh htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr); 112a91a626bSGovind Singh } 113a91a626bSGovind Singh 114a91a626bSGovind Singh static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt, 115a91a626bSGovind Singh dma_addr_t paddr, int idx) 116a91a626bSGovind Singh { 117a91a626bSGovind Singh htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr); 118a91a626bSGovind Singh } 119a91a626bSGovind Singh 120a91a626bSGovind Singh static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx) 121a91a626bSGovind Singh { 122a91a626bSGovind Singh htt->rx_ring.paddrs_ring_32[idx] = 0; 123a91a626bSGovind Singh } 124a91a626bSGovind Singh 125a91a626bSGovind Singh static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx) 126a91a626bSGovind Singh { 127a91a626bSGovind Singh htt->rx_ring.paddrs_ring_64[idx] = 0; 128a91a626bSGovind Singh } 129a91a626bSGovind Singh 130a91a626bSGovind Singh static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt) 131a91a626bSGovind Singh { 132a91a626bSGovind Singh return (void *)htt->rx_ring.paddrs_ring_32; 133a91a626bSGovind Singh } 134a91a626bSGovind Singh 135a91a626bSGovind Singh static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt) 136a91a626bSGovind Singh { 137a91a626bSGovind Singh return (void *)htt->rx_ring.paddrs_ring_64; 138a91a626bSGovind Singh } 139a91a626bSGovind Singh 1405e3dd157SKalle Valo static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) 1415e3dd157SKalle Valo { 1425e3dd157SKalle Valo struct htt_rx_desc *rx_desc; 143c545070eSMichal Kazior struct ath10k_skb_rxcb *rxcb; 1445e3dd157SKalle Valo struct sk_buff *skb; 1455e3dd157SKalle Valo dma_addr_t paddr; 1465e3dd157SKalle Valo int ret = 0, idx; 1475e3dd157SKalle Valo 148c545070eSMichal Kazior /* The Full Rx Reorder firmware has no way of telling the host 149c545070eSMichal Kazior * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring. 150c545070eSMichal Kazior * To keep things simple make sure ring is always half empty. This 151c545070eSMichal Kazior * guarantees there'll be no replenishment overruns possible. 152c545070eSMichal Kazior */ 153c545070eSMichal Kazior BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2); 154c545070eSMichal Kazior 1558cc7f26cSKalle Valo idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); 1565e3dd157SKalle Valo while (num > 0) { 1575e3dd157SKalle Valo skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN); 1585e3dd157SKalle Valo if (!skb) { 1595e3dd157SKalle Valo ret = -ENOMEM; 1605e3dd157SKalle Valo goto fail; 1615e3dd157SKalle Valo } 1625e3dd157SKalle Valo 1635e3dd157SKalle Valo if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN)) 1645e3dd157SKalle Valo skb_pull(skb, 1655e3dd157SKalle Valo PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) - 1665e3dd157SKalle Valo skb->data); 1675e3dd157SKalle Valo 1685e3dd157SKalle Valo /* Clear rx_desc attention word before posting to Rx ring */ 1695e3dd157SKalle Valo rx_desc = (struct htt_rx_desc *)skb->data; 1705e3dd157SKalle Valo rx_desc->attention.flags = __cpu_to_le32(0); 1715e3dd157SKalle Valo 1725e3dd157SKalle Valo paddr = dma_map_single(htt->ar->dev, skb->data, 1735e3dd157SKalle Valo skb->len + skb_tailroom(skb), 1745e3dd157SKalle Valo DMA_FROM_DEVICE); 1755e3dd157SKalle Valo 1765e3dd157SKalle Valo if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) { 1775e3dd157SKalle Valo dev_kfree_skb_any(skb); 1785e3dd157SKalle Valo ret = -ENOMEM; 1795e3dd157SKalle Valo goto fail; 1805e3dd157SKalle Valo } 1815e3dd157SKalle Valo 182c545070eSMichal Kazior rxcb = ATH10K_SKB_RXCB(skb); 183c545070eSMichal Kazior rxcb->paddr = paddr; 1845e3dd157SKalle Valo htt->rx_ring.netbufs_ring[idx] = skb; 1859a5511d5SErik Stromdahl ath10k_htt_set_paddrs_ring(htt, paddr, idx); 1865e3dd157SKalle Valo htt->rx_ring.fill_cnt++; 1875e3dd157SKalle Valo 188c545070eSMichal Kazior if (htt->rx_ring.in_ord_rx) { 189c545070eSMichal Kazior hash_add(htt->rx_ring.skb_table, 190c545070eSMichal Kazior &ATH10K_SKB_RXCB(skb)->hlist, 191a91a626bSGovind Singh paddr); 192c545070eSMichal Kazior } 193c545070eSMichal Kazior 1945e3dd157SKalle Valo num--; 1955e3dd157SKalle Valo idx++; 1965e3dd157SKalle Valo idx &= htt->rx_ring.size_mask; 1975e3dd157SKalle Valo } 1985e3dd157SKalle Valo 1995e3dd157SKalle Valo fail: 2005de6dfc8SVasanthakumar Thiagarajan /* 2015de6dfc8SVasanthakumar Thiagarajan * Make sure the rx buffer is updated before available buffer 2025de6dfc8SVasanthakumar Thiagarajan * index to avoid any potential rx ring corruption. 2035de6dfc8SVasanthakumar Thiagarajan */ 2045de6dfc8SVasanthakumar Thiagarajan mb(); 2058cc7f26cSKalle Valo *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx); 2065e3dd157SKalle Valo return ret; 2075e3dd157SKalle Valo } 2085e3dd157SKalle Valo 2095e3dd157SKalle Valo static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) 2105e3dd157SKalle Valo { 2115e3dd157SKalle Valo lockdep_assert_held(&htt->rx_ring.lock); 2125e3dd157SKalle Valo return __ath10k_htt_rx_ring_fill_n(htt, num); 2135e3dd157SKalle Valo } 2145e3dd157SKalle Valo 2155e3dd157SKalle Valo static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt) 2165e3dd157SKalle Valo { 2176e712d42SMichal Kazior int ret, num_deficit, num_to_fill; 2185e3dd157SKalle Valo 2196e712d42SMichal Kazior /* Refilling the whole RX ring buffer proves to be a bad idea. The 2206e712d42SMichal Kazior * reason is RX may take up significant amount of CPU cycles and starve 2216e712d42SMichal Kazior * other tasks, e.g. TX on an ethernet device while acting as a bridge 2226e712d42SMichal Kazior * with ath10k wlan interface. This ended up with very poor performance 2236e712d42SMichal Kazior * once CPU the host system was overwhelmed with RX on ath10k. 2246e712d42SMichal Kazior * 2256e712d42SMichal Kazior * By limiting the number of refills the replenishing occurs 2266e712d42SMichal Kazior * progressively. This in turns makes use of the fact tasklets are 2276e712d42SMichal Kazior * processed in FIFO order. This means actual RX processing can starve 2286e712d42SMichal Kazior * out refilling. If there's not enough buffers on RX ring FW will not 2296e712d42SMichal Kazior * report RX until it is refilled with enough buffers. This 2306e712d42SMichal Kazior * automatically balances load wrt to CPU power. 2316e712d42SMichal Kazior * 2326e712d42SMichal Kazior * This probably comes at a cost of lower maximum throughput but 233d6dfe25cSMarcin Rokicki * improves the average and stability. 234d6dfe25cSMarcin Rokicki */ 2355e3dd157SKalle Valo spin_lock_bh(&htt->rx_ring.lock); 2366e712d42SMichal Kazior num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt; 2376e712d42SMichal Kazior num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit); 2386e712d42SMichal Kazior num_deficit -= num_to_fill; 2395e3dd157SKalle Valo ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill); 2405e3dd157SKalle Valo if (ret == -ENOMEM) { 2415e3dd157SKalle Valo /* 2425e3dd157SKalle Valo * Failed to fill it to the desired level - 2435e3dd157SKalle Valo * we'll start a timer and try again next time. 2445e3dd157SKalle Valo * As long as enough buffers are left in the ring for 2455e3dd157SKalle Valo * another A-MPDU rx, no special recovery is needed. 2465e3dd157SKalle Valo */ 2475e3dd157SKalle Valo mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + 2485e3dd157SKalle Valo msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS)); 2496e712d42SMichal Kazior } else if (num_deficit > 0) { 2505c86d97bSRajkumar Manoharan mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + 2515c86d97bSRajkumar Manoharan msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS)); 2525e3dd157SKalle Valo } 2535e3dd157SKalle Valo spin_unlock_bh(&htt->rx_ring.lock); 2545e3dd157SKalle Valo } 2555e3dd157SKalle Valo 2567ac76764SKees Cook static void ath10k_htt_rx_ring_refill_retry(struct timer_list *t) 2575e3dd157SKalle Valo { 2587ac76764SKees Cook struct ath10k_htt *htt = from_timer(htt, t, rx_ring.refill_retry_timer); 259af762c0bSKalle Valo 2605e3dd157SKalle Valo ath10k_htt_rx_msdu_buff_replenish(htt); 2615e3dd157SKalle Valo } 2625e3dd157SKalle Valo 263c545070eSMichal Kazior int ath10k_htt_rx_ring_refill(struct ath10k *ar) 2643e841fd0SMichal Kazior { 265c545070eSMichal Kazior struct ath10k_htt *htt = &ar->htt; 266c545070eSMichal Kazior int ret; 2673e841fd0SMichal Kazior 268f88d4934SErik Stromdahl if (ar->dev_type == ATH10K_DEV_TYPE_HL) 269f88d4934SErik Stromdahl return 0; 270f88d4934SErik Stromdahl 271c545070eSMichal Kazior spin_lock_bh(&htt->rx_ring.lock); 272c545070eSMichal Kazior ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level - 273c545070eSMichal Kazior htt->rx_ring.fill_cnt)); 2743e841fd0SMichal Kazior 275c545070eSMichal Kazior if (ret) 276c545070eSMichal Kazior ath10k_htt_rx_ring_free(htt); 277c545070eSMichal Kazior 278168f75f1SBen Greear spin_unlock_bh(&htt->rx_ring.lock); 279168f75f1SBen Greear 280c545070eSMichal Kazior return ret; 2813e841fd0SMichal Kazior } 2823e841fd0SMichal Kazior 28395bf21f9SMichal Kazior void ath10k_htt_rx_free(struct ath10k_htt *htt) 2845e3dd157SKalle Valo { 285f88d4934SErik Stromdahl if (htt->ar->dev_type == ATH10K_DEV_TYPE_HL) 286f88d4934SErik Stromdahl return; 287f88d4934SErik Stromdahl 2885e3dd157SKalle Valo del_timer_sync(&htt->rx_ring.refill_retry_timer); 2896c5151a9SMichal Kazior 290deba1b9eSRajkumar Manoharan skb_queue_purge(&htt->rx_msdus_q); 291c545070eSMichal Kazior skb_queue_purge(&htt->rx_in_ord_compl_q); 292426e10eaSMichal Kazior skb_queue_purge(&htt->tx_fetch_ind_q); 2935e3dd157SKalle Valo 294168f75f1SBen Greear spin_lock_bh(&htt->rx_ring.lock); 295c545070eSMichal Kazior ath10k_htt_rx_ring_free(htt); 296168f75f1SBen Greear spin_unlock_bh(&htt->rx_ring.lock); 2975e3dd157SKalle Valo 2985e3dd157SKalle Valo dma_free_coherent(htt->ar->dev, 2999a5511d5SErik Stromdahl ath10k_htt_get_rx_ring_size(htt), 3009a5511d5SErik Stromdahl ath10k_htt_get_vaddr_ring(htt), 3015e3dd157SKalle Valo htt->rx_ring.base_paddr); 3025e3dd157SKalle Valo 3035e3dd157SKalle Valo dma_free_coherent(htt->ar->dev, 3045e3dd157SKalle Valo sizeof(*htt->rx_ring.alloc_idx.vaddr), 3055e3dd157SKalle Valo htt->rx_ring.alloc_idx.vaddr, 3065e3dd157SKalle Valo htt->rx_ring.alloc_idx.paddr); 3075e3dd157SKalle Valo 3085e3dd157SKalle Valo kfree(htt->rx_ring.netbufs_ring); 3095e3dd157SKalle Valo } 3105e3dd157SKalle Valo 3115e3dd157SKalle Valo static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt) 3125e3dd157SKalle Valo { 3137aa7a72aSMichal Kazior struct ath10k *ar = htt->ar; 3145e3dd157SKalle Valo int idx; 3155e3dd157SKalle Valo struct sk_buff *msdu; 3165e3dd157SKalle Valo 31745967089SMichal Kazior lockdep_assert_held(&htt->rx_ring.lock); 3185e3dd157SKalle Valo 3198d60ee87SMichal Kazior if (htt->rx_ring.fill_cnt == 0) { 3207aa7a72aSMichal Kazior ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n"); 3218d60ee87SMichal Kazior return NULL; 3228d60ee87SMichal Kazior } 3235e3dd157SKalle Valo 3245e3dd157SKalle Valo idx = htt->rx_ring.sw_rd_idx.msdu_payld; 3255e3dd157SKalle Valo msdu = htt->rx_ring.netbufs_ring[idx]; 3263e841fd0SMichal Kazior htt->rx_ring.netbufs_ring[idx] = NULL; 3279a5511d5SErik Stromdahl ath10k_htt_reset_paddrs_ring(htt, idx); 3285e3dd157SKalle Valo 3295e3dd157SKalle Valo idx++; 3305e3dd157SKalle Valo idx &= htt->rx_ring.size_mask; 3315e3dd157SKalle Valo htt->rx_ring.sw_rd_idx.msdu_payld = idx; 3325e3dd157SKalle Valo htt->rx_ring.fill_cnt--; 3335e3dd157SKalle Valo 3344de02806SMichal Kazior dma_unmap_single(htt->ar->dev, 3358582bf3bSMichal Kazior ATH10K_SKB_RXCB(msdu)->paddr, 3364de02806SMichal Kazior msdu->len + skb_tailroom(msdu), 3374de02806SMichal Kazior DMA_FROM_DEVICE); 3384de02806SMichal Kazior ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ", 3394de02806SMichal Kazior msdu->data, msdu->len + skb_tailroom(msdu)); 3404de02806SMichal Kazior 3415e3dd157SKalle Valo return msdu; 3425e3dd157SKalle Valo } 3435e3dd157SKalle Valo 344d84dd60fSJanusz Dziedzic /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */ 3455e3dd157SKalle Valo static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, 346f0e2770fSMichal Kazior struct sk_buff_head *amsdu) 3475e3dd157SKalle Valo { 3487aa7a72aSMichal Kazior struct ath10k *ar = htt->ar; 3495e3dd157SKalle Valo int msdu_len, msdu_chaining = 0; 3509aa505d2SMichal Kazior struct sk_buff *msdu; 3515e3dd157SKalle Valo struct htt_rx_desc *rx_desc; 3525e3dd157SKalle Valo 35345967089SMichal Kazior lockdep_assert_held(&htt->rx_ring.lock); 35445967089SMichal Kazior 3559aa505d2SMichal Kazior for (;;) { 3565e3dd157SKalle Valo int last_msdu, msdu_len_invalid, msdu_chained; 3575e3dd157SKalle Valo 3589aa505d2SMichal Kazior msdu = ath10k_htt_rx_netbuf_pop(htt); 3599aa505d2SMichal Kazior if (!msdu) { 3609aa505d2SMichal Kazior __skb_queue_purge(amsdu); 361e0bd7513SMichal Kazior return -ENOENT; 3629aa505d2SMichal Kazior } 3639aa505d2SMichal Kazior 3649aa505d2SMichal Kazior __skb_queue_tail(amsdu, msdu); 3659aa505d2SMichal Kazior 3665e3dd157SKalle Valo rx_desc = (struct htt_rx_desc *)msdu->data; 3675e3dd157SKalle Valo 3685e3dd157SKalle Valo /* FIXME: we must report msdu payload since this is what caller 369d6dfe25cSMarcin Rokicki * expects now 370d6dfe25cSMarcin Rokicki */ 3715e3dd157SKalle Valo skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload)); 3725e3dd157SKalle Valo skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload)); 3735e3dd157SKalle Valo 3745e3dd157SKalle Valo /* 3755e3dd157SKalle Valo * Sanity check - confirm the HW is finished filling in the 3765e3dd157SKalle Valo * rx data. 3775e3dd157SKalle Valo * If the HW and SW are working correctly, then it's guaranteed 3785e3dd157SKalle Valo * that the HW's MAC DMA is done before this point in the SW. 3795e3dd157SKalle Valo * To prevent the case that we handle a stale Rx descriptor, 3805e3dd157SKalle Valo * just assert for now until we have a way to recover. 3815e3dd157SKalle Valo */ 3825e3dd157SKalle Valo if (!(__le32_to_cpu(rx_desc->attention.flags) 3835e3dd157SKalle Valo & RX_ATTENTION_FLAGS_MSDU_DONE)) { 3849aa505d2SMichal Kazior __skb_queue_purge(amsdu); 385e0bd7513SMichal Kazior return -EIO; 3865e3dd157SKalle Valo } 3875e3dd157SKalle Valo 3885e3dd157SKalle Valo msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags) 3895e3dd157SKalle Valo & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR | 3905e3dd157SKalle Valo RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR)); 3911f5dbfbbSPeter Oh msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0), 3925e3dd157SKalle Valo RX_MSDU_START_INFO0_MSDU_LENGTH); 3935e3dd157SKalle Valo msdu_chained = rx_desc->frag_info.ring2_more_count; 3945e3dd157SKalle Valo 3955e3dd157SKalle Valo if (msdu_len_invalid) 3965e3dd157SKalle Valo msdu_len = 0; 3975e3dd157SKalle Valo 3985e3dd157SKalle Valo skb_trim(msdu, 0); 3995e3dd157SKalle Valo skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE)); 4005e3dd157SKalle Valo msdu_len -= msdu->len; 4015e3dd157SKalle Valo 4029aa505d2SMichal Kazior /* Note: Chained buffers do not contain rx descriptor */ 4035e3dd157SKalle Valo while (msdu_chained--) { 4049aa505d2SMichal Kazior msdu = ath10k_htt_rx_netbuf_pop(htt); 4059aa505d2SMichal Kazior if (!msdu) { 4069aa505d2SMichal Kazior __skb_queue_purge(amsdu); 407e0bd7513SMichal Kazior return -ENOENT; 408b30595aeSMichal Kazior } 409b30595aeSMichal Kazior 4109aa505d2SMichal Kazior __skb_queue_tail(amsdu, msdu); 4119aa505d2SMichal Kazior skb_trim(msdu, 0); 4129aa505d2SMichal Kazior skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE)); 4139aa505d2SMichal Kazior msdu_len -= msdu->len; 414ede9c8e0SMichal Kazior msdu_chaining = 1; 4155e3dd157SKalle Valo } 4165e3dd157SKalle Valo 4171f5dbfbbSPeter Oh last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) & 4185e3dd157SKalle Valo RX_MSDU_END_INFO0_LAST_MSDU; 4195e3dd157SKalle Valo 420b04e204fSMichal Kazior trace_ath10k_htt_rx_desc(ar, &rx_desc->attention, 421a0883cf7SRajkumar Manoharan sizeof(*rx_desc) - sizeof(u32)); 4229aa505d2SMichal Kazior 4239aa505d2SMichal Kazior if (last_msdu) 4245e3dd157SKalle Valo break; 425d8bb26b9SKalle Valo } 426d8bb26b9SKalle Valo 4279aa505d2SMichal Kazior if (skb_queue_empty(amsdu)) 428d84dd60fSJanusz Dziedzic msdu_chaining = -1; 429d84dd60fSJanusz Dziedzic 4305e3dd157SKalle Valo /* 4315e3dd157SKalle Valo * Don't refill the ring yet. 4325e3dd157SKalle Valo * 4335e3dd157SKalle Valo * First, the elements popped here are still in use - it is not 4345e3dd157SKalle Valo * safe to overwrite them until the matching call to 4355e3dd157SKalle Valo * mpdu_desc_list_next. Second, for efficiency it is preferable to 4365e3dd157SKalle Valo * refill the rx ring with 1 PPDU's worth of rx buffers (something 4375e3dd157SKalle Valo * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers 4385e3dd157SKalle Valo * (something like 3 buffers). Consequently, we'll rely on the txrx 4395e3dd157SKalle Valo * SW to tell us when it is done pulling all the PPDU's rx buffers 4405e3dd157SKalle Valo * out of the rx ring, and then refill it just once. 4415e3dd157SKalle Valo */ 4425e3dd157SKalle Valo 4435e3dd157SKalle Valo return msdu_chaining; 4445e3dd157SKalle Valo } 4455e3dd157SKalle Valo 446c545070eSMichal Kazior static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt, 447a91a626bSGovind Singh u64 paddr) 448c545070eSMichal Kazior { 449c545070eSMichal Kazior struct ath10k *ar = htt->ar; 450c545070eSMichal Kazior struct ath10k_skb_rxcb *rxcb; 451c545070eSMichal Kazior struct sk_buff *msdu; 452c545070eSMichal Kazior 453c545070eSMichal Kazior lockdep_assert_held(&htt->rx_ring.lock); 454c545070eSMichal Kazior 455c545070eSMichal Kazior msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr); 456c545070eSMichal Kazior if (!msdu) 457c545070eSMichal Kazior return NULL; 458c545070eSMichal Kazior 459c545070eSMichal Kazior rxcb = ATH10K_SKB_RXCB(msdu); 460c545070eSMichal Kazior hash_del(&rxcb->hlist); 461c545070eSMichal Kazior htt->rx_ring.fill_cnt--; 462c545070eSMichal Kazior 463c545070eSMichal Kazior dma_unmap_single(htt->ar->dev, rxcb->paddr, 464c545070eSMichal Kazior msdu->len + skb_tailroom(msdu), 465c545070eSMichal Kazior DMA_FROM_DEVICE); 466c545070eSMichal Kazior ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ", 467c545070eSMichal Kazior msdu->data, msdu->len + skb_tailroom(msdu)); 468c545070eSMichal Kazior 469c545070eSMichal Kazior return msdu; 470c545070eSMichal Kazior } 471c545070eSMichal Kazior 47285bd0107SYu Wang static inline void ath10k_htt_append_frag_list(struct sk_buff *skb_head, 47385bd0107SYu Wang struct sk_buff *frag_list, 47485bd0107SYu Wang unsigned int frag_len) 47585bd0107SYu Wang { 47685bd0107SYu Wang skb_shinfo(skb_head)->frag_list = frag_list; 47785bd0107SYu Wang skb_head->data_len = frag_len; 47885bd0107SYu Wang skb_head->len += skb_head->data_len; 47985bd0107SYu Wang } 48085bd0107SYu Wang 48185bd0107SYu Wang static int ath10k_htt_rx_handle_amsdu_mon_32(struct ath10k_htt *htt, 48285bd0107SYu Wang struct sk_buff *msdu, 48385bd0107SYu Wang struct htt_rx_in_ord_msdu_desc **msdu_desc) 48485bd0107SYu Wang { 48585bd0107SYu Wang struct ath10k *ar = htt->ar; 48685bd0107SYu Wang u32 paddr; 48785bd0107SYu Wang struct sk_buff *frag_buf; 48885bd0107SYu Wang struct sk_buff *prev_frag_buf; 48985bd0107SYu Wang u8 last_frag; 49085bd0107SYu Wang struct htt_rx_in_ord_msdu_desc *ind_desc = *msdu_desc; 49185bd0107SYu Wang struct htt_rx_desc *rxd; 49285bd0107SYu Wang int amsdu_len = __le16_to_cpu(ind_desc->msdu_len); 49385bd0107SYu Wang 49485bd0107SYu Wang rxd = (void *)msdu->data; 49585bd0107SYu Wang trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd)); 49685bd0107SYu Wang 49785bd0107SYu Wang skb_put(msdu, sizeof(struct htt_rx_desc)); 49885bd0107SYu Wang skb_pull(msdu, sizeof(struct htt_rx_desc)); 49985bd0107SYu Wang skb_put(msdu, min(amsdu_len, HTT_RX_MSDU_SIZE)); 50085bd0107SYu Wang amsdu_len -= msdu->len; 50185bd0107SYu Wang 50285bd0107SYu Wang last_frag = ind_desc->reserved; 50385bd0107SYu Wang if (last_frag) { 50485bd0107SYu Wang if (amsdu_len) { 50585bd0107SYu Wang ath10k_warn(ar, "invalid amsdu len %u, left %d", 50685bd0107SYu Wang __le16_to_cpu(ind_desc->msdu_len), 50785bd0107SYu Wang amsdu_len); 50885bd0107SYu Wang } 50985bd0107SYu Wang return 0; 51085bd0107SYu Wang } 51185bd0107SYu Wang 51285bd0107SYu Wang ind_desc++; 51385bd0107SYu Wang paddr = __le32_to_cpu(ind_desc->msdu_paddr); 51485bd0107SYu Wang frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr); 51585bd0107SYu Wang if (!frag_buf) { 51685bd0107SYu Wang ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%x", paddr); 51785bd0107SYu Wang return -ENOENT; 51885bd0107SYu Wang } 51985bd0107SYu Wang 52085bd0107SYu Wang skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE)); 52185bd0107SYu Wang ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len); 52285bd0107SYu Wang 52385bd0107SYu Wang amsdu_len -= frag_buf->len; 52485bd0107SYu Wang prev_frag_buf = frag_buf; 52585bd0107SYu Wang last_frag = ind_desc->reserved; 52685bd0107SYu Wang while (!last_frag) { 52785bd0107SYu Wang ind_desc++; 52885bd0107SYu Wang paddr = __le32_to_cpu(ind_desc->msdu_paddr); 52985bd0107SYu Wang frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr); 53085bd0107SYu Wang if (!frag_buf) { 53185bd0107SYu Wang ath10k_warn(ar, "failed to pop frag-n paddr: 0x%x", 53285bd0107SYu Wang paddr); 53385bd0107SYu Wang prev_frag_buf->next = NULL; 53485bd0107SYu Wang return -ENOENT; 53585bd0107SYu Wang } 53685bd0107SYu Wang 53785bd0107SYu Wang skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE)); 53885bd0107SYu Wang last_frag = ind_desc->reserved; 53985bd0107SYu Wang amsdu_len -= frag_buf->len; 54085bd0107SYu Wang 54185bd0107SYu Wang prev_frag_buf->next = frag_buf; 54285bd0107SYu Wang prev_frag_buf = frag_buf; 54385bd0107SYu Wang } 54485bd0107SYu Wang 54585bd0107SYu Wang if (amsdu_len) { 54685bd0107SYu Wang ath10k_warn(ar, "invalid amsdu len %u, left %d", 54785bd0107SYu Wang __le16_to_cpu(ind_desc->msdu_len), amsdu_len); 54885bd0107SYu Wang } 54985bd0107SYu Wang 55085bd0107SYu Wang *msdu_desc = ind_desc; 55185bd0107SYu Wang 55285bd0107SYu Wang prev_frag_buf->next = NULL; 55385bd0107SYu Wang return 0; 55485bd0107SYu Wang } 55585bd0107SYu Wang 55685bd0107SYu Wang static int 55785bd0107SYu Wang ath10k_htt_rx_handle_amsdu_mon_64(struct ath10k_htt *htt, 55885bd0107SYu Wang struct sk_buff *msdu, 55985bd0107SYu Wang struct htt_rx_in_ord_msdu_desc_ext **msdu_desc) 56085bd0107SYu Wang { 56185bd0107SYu Wang struct ath10k *ar = htt->ar; 56285bd0107SYu Wang u64 paddr; 56385bd0107SYu Wang struct sk_buff *frag_buf; 56485bd0107SYu Wang struct sk_buff *prev_frag_buf; 56585bd0107SYu Wang u8 last_frag; 56685bd0107SYu Wang struct htt_rx_in_ord_msdu_desc_ext *ind_desc = *msdu_desc; 56785bd0107SYu Wang struct htt_rx_desc *rxd; 56885bd0107SYu Wang int amsdu_len = __le16_to_cpu(ind_desc->msdu_len); 56985bd0107SYu Wang 57085bd0107SYu Wang rxd = (void *)msdu->data; 57185bd0107SYu Wang trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd)); 57285bd0107SYu Wang 57385bd0107SYu Wang skb_put(msdu, sizeof(struct htt_rx_desc)); 57485bd0107SYu Wang skb_pull(msdu, sizeof(struct htt_rx_desc)); 57585bd0107SYu Wang skb_put(msdu, min(amsdu_len, HTT_RX_MSDU_SIZE)); 57685bd0107SYu Wang amsdu_len -= msdu->len; 57785bd0107SYu Wang 57885bd0107SYu Wang last_frag = ind_desc->reserved; 57985bd0107SYu Wang if (last_frag) { 58085bd0107SYu Wang if (amsdu_len) { 58185bd0107SYu Wang ath10k_warn(ar, "invalid amsdu len %u, left %d", 58285bd0107SYu Wang __le16_to_cpu(ind_desc->msdu_len), 58385bd0107SYu Wang amsdu_len); 58485bd0107SYu Wang } 58585bd0107SYu Wang return 0; 58685bd0107SYu Wang } 58785bd0107SYu Wang 58885bd0107SYu Wang ind_desc++; 58985bd0107SYu Wang paddr = __le64_to_cpu(ind_desc->msdu_paddr); 59085bd0107SYu Wang frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr); 59185bd0107SYu Wang if (!frag_buf) { 59285bd0107SYu Wang ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%llx", paddr); 59385bd0107SYu Wang return -ENOENT; 59485bd0107SYu Wang } 59585bd0107SYu Wang 59685bd0107SYu Wang skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE)); 59785bd0107SYu Wang ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len); 59885bd0107SYu Wang 59985bd0107SYu Wang amsdu_len -= frag_buf->len; 60085bd0107SYu Wang prev_frag_buf = frag_buf; 60185bd0107SYu Wang last_frag = ind_desc->reserved; 60285bd0107SYu Wang while (!last_frag) { 60385bd0107SYu Wang ind_desc++; 60485bd0107SYu Wang paddr = __le64_to_cpu(ind_desc->msdu_paddr); 60585bd0107SYu Wang frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr); 60685bd0107SYu Wang if (!frag_buf) { 60785bd0107SYu Wang ath10k_warn(ar, "failed to pop frag-n paddr: 0x%llx", 60885bd0107SYu Wang paddr); 60985bd0107SYu Wang prev_frag_buf->next = NULL; 61085bd0107SYu Wang return -ENOENT; 61185bd0107SYu Wang } 61285bd0107SYu Wang 61385bd0107SYu Wang skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE)); 61485bd0107SYu Wang last_frag = ind_desc->reserved; 61585bd0107SYu Wang amsdu_len -= frag_buf->len; 61685bd0107SYu Wang 61785bd0107SYu Wang prev_frag_buf->next = frag_buf; 61885bd0107SYu Wang prev_frag_buf = frag_buf; 61985bd0107SYu Wang } 62085bd0107SYu Wang 62185bd0107SYu Wang if (amsdu_len) { 62285bd0107SYu Wang ath10k_warn(ar, "invalid amsdu len %u, left %d", 62385bd0107SYu Wang __le16_to_cpu(ind_desc->msdu_len), amsdu_len); 62485bd0107SYu Wang } 62585bd0107SYu Wang 62685bd0107SYu Wang *msdu_desc = ind_desc; 62785bd0107SYu Wang 62885bd0107SYu Wang prev_frag_buf->next = NULL; 62985bd0107SYu Wang return 0; 63085bd0107SYu Wang } 63185bd0107SYu Wang 6323b0b55b1SGovind Singh static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt, 633c545070eSMichal Kazior struct htt_rx_in_ord_ind *ev, 634c545070eSMichal Kazior struct sk_buff_head *list) 635c545070eSMichal Kazior { 636c545070eSMichal Kazior struct ath10k *ar = htt->ar; 6373b0b55b1SGovind Singh struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32; 638c545070eSMichal Kazior struct htt_rx_desc *rxd; 639c545070eSMichal Kazior struct sk_buff *msdu; 64085bd0107SYu Wang int msdu_count, ret; 641c545070eSMichal Kazior bool is_offload; 642c545070eSMichal Kazior u32 paddr; 643c545070eSMichal Kazior 644c545070eSMichal Kazior lockdep_assert_held(&htt->rx_ring.lock); 645c545070eSMichal Kazior 646c545070eSMichal Kazior msdu_count = __le16_to_cpu(ev->msdu_count); 647c545070eSMichal Kazior is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK); 648c545070eSMichal Kazior 649c545070eSMichal Kazior while (msdu_count--) { 650c545070eSMichal Kazior paddr = __le32_to_cpu(msdu_desc->msdu_paddr); 651c545070eSMichal Kazior 652c545070eSMichal Kazior msdu = ath10k_htt_rx_pop_paddr(htt, paddr); 653c545070eSMichal Kazior if (!msdu) { 654c545070eSMichal Kazior __skb_queue_purge(list); 655c545070eSMichal Kazior return -ENOENT; 656c545070eSMichal Kazior } 657c545070eSMichal Kazior 65885bd0107SYu Wang if (!is_offload && ar->monitor_arvif) { 65985bd0107SYu Wang ret = ath10k_htt_rx_handle_amsdu_mon_32(htt, msdu, 66085bd0107SYu Wang &msdu_desc); 66185bd0107SYu Wang if (ret) { 66285bd0107SYu Wang __skb_queue_purge(list); 66385bd0107SYu Wang return ret; 66485bd0107SYu Wang } 66585bd0107SYu Wang __skb_queue_tail(list, msdu); 66685bd0107SYu Wang msdu_desc++; 66785bd0107SYu Wang continue; 66885bd0107SYu Wang } 66985bd0107SYu Wang 670c545070eSMichal Kazior __skb_queue_tail(list, msdu); 671c545070eSMichal Kazior 672c545070eSMichal Kazior if (!is_offload) { 673c545070eSMichal Kazior rxd = (void *)msdu->data; 674c545070eSMichal Kazior 675c545070eSMichal Kazior trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd)); 676c545070eSMichal Kazior 677c545070eSMichal Kazior skb_put(msdu, sizeof(*rxd)); 678c545070eSMichal Kazior skb_pull(msdu, sizeof(*rxd)); 679c545070eSMichal Kazior skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len)); 680c545070eSMichal Kazior 681c545070eSMichal Kazior if (!(__le32_to_cpu(rxd->attention.flags) & 682c545070eSMichal Kazior RX_ATTENTION_FLAGS_MSDU_DONE)) { 683c545070eSMichal Kazior ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n"); 684c545070eSMichal Kazior return -EIO; 685c545070eSMichal Kazior } 686c545070eSMichal Kazior } 687c545070eSMichal Kazior 688c545070eSMichal Kazior msdu_desc++; 689c545070eSMichal Kazior } 690c545070eSMichal Kazior 691c545070eSMichal Kazior return 0; 692c545070eSMichal Kazior } 693c545070eSMichal Kazior 6943b0b55b1SGovind Singh static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt, 6953b0b55b1SGovind Singh struct htt_rx_in_ord_ind *ev, 6963b0b55b1SGovind Singh struct sk_buff_head *list) 6973b0b55b1SGovind Singh { 6983b0b55b1SGovind Singh struct ath10k *ar = htt->ar; 6993b0b55b1SGovind Singh struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64; 7003b0b55b1SGovind Singh struct htt_rx_desc *rxd; 7013b0b55b1SGovind Singh struct sk_buff *msdu; 70285bd0107SYu Wang int msdu_count, ret; 7033b0b55b1SGovind Singh bool is_offload; 7043b0b55b1SGovind Singh u64 paddr; 7053b0b55b1SGovind Singh 7063b0b55b1SGovind Singh lockdep_assert_held(&htt->rx_ring.lock); 7073b0b55b1SGovind Singh 7083b0b55b1SGovind Singh msdu_count = __le16_to_cpu(ev->msdu_count); 7093b0b55b1SGovind Singh is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK); 7103b0b55b1SGovind Singh 7113b0b55b1SGovind Singh while (msdu_count--) { 7123b0b55b1SGovind Singh paddr = __le64_to_cpu(msdu_desc->msdu_paddr); 7133b0b55b1SGovind Singh msdu = ath10k_htt_rx_pop_paddr(htt, paddr); 7143b0b55b1SGovind Singh if (!msdu) { 7153b0b55b1SGovind Singh __skb_queue_purge(list); 7163b0b55b1SGovind Singh return -ENOENT; 7173b0b55b1SGovind Singh } 7183b0b55b1SGovind Singh 71985bd0107SYu Wang if (!is_offload && ar->monitor_arvif) { 72085bd0107SYu Wang ret = ath10k_htt_rx_handle_amsdu_mon_64(htt, msdu, 72185bd0107SYu Wang &msdu_desc); 72285bd0107SYu Wang if (ret) { 72385bd0107SYu Wang __skb_queue_purge(list); 72485bd0107SYu Wang return ret; 72585bd0107SYu Wang } 72685bd0107SYu Wang __skb_queue_tail(list, msdu); 72785bd0107SYu Wang msdu_desc++; 72885bd0107SYu Wang continue; 72985bd0107SYu Wang } 73085bd0107SYu Wang 7313b0b55b1SGovind Singh __skb_queue_tail(list, msdu); 7323b0b55b1SGovind Singh 7333b0b55b1SGovind Singh if (!is_offload) { 7343b0b55b1SGovind Singh rxd = (void *)msdu->data; 7353b0b55b1SGovind Singh 7363b0b55b1SGovind Singh trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd)); 7373b0b55b1SGovind Singh 7383b0b55b1SGovind Singh skb_put(msdu, sizeof(*rxd)); 7393b0b55b1SGovind Singh skb_pull(msdu, sizeof(*rxd)); 7403b0b55b1SGovind Singh skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len)); 7413b0b55b1SGovind Singh 7423b0b55b1SGovind Singh if (!(__le32_to_cpu(rxd->attention.flags) & 7433b0b55b1SGovind Singh RX_ATTENTION_FLAGS_MSDU_DONE)) { 7443b0b55b1SGovind Singh ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n"); 7453b0b55b1SGovind Singh return -EIO; 7463b0b55b1SGovind Singh } 7473b0b55b1SGovind Singh } 7483b0b55b1SGovind Singh 7493b0b55b1SGovind Singh msdu_desc++; 7503b0b55b1SGovind Singh } 7513b0b55b1SGovind Singh 7523b0b55b1SGovind Singh return 0; 7533b0b55b1SGovind Singh } 7543b0b55b1SGovind Singh 75595bf21f9SMichal Kazior int ath10k_htt_rx_alloc(struct ath10k_htt *htt) 7565e3dd157SKalle Valo { 7577aa7a72aSMichal Kazior struct ath10k *ar = htt->ar; 7585e3dd157SKalle Valo dma_addr_t paddr; 759a91a626bSGovind Singh void *vaddr, *vaddr_ring; 760bd8bdbb6SKalle Valo size_t size; 7615e3dd157SKalle Valo struct timer_list *timer = &htt->rx_ring.refill_retry_timer; 7625e3dd157SKalle Valo 763f88d4934SErik Stromdahl if (ar->dev_type == ATH10K_DEV_TYPE_HL) 764f88d4934SErik Stromdahl return 0; 765f88d4934SErik Stromdahl 76651fc7d74SMichal Kazior htt->rx_confused = false; 76751fc7d74SMichal Kazior 768fe2407a8SMichal Kazior /* XXX: The fill level could be changed during runtime in response to 769fe2407a8SMichal Kazior * the host processing latency. Is this really worth it? 770fe2407a8SMichal Kazior */ 771fe2407a8SMichal Kazior htt->rx_ring.size = HTT_RX_RING_SIZE; 772fe2407a8SMichal Kazior htt->rx_ring.size_mask = htt->rx_ring.size - 1; 773bb8d0d15SGovind Singh htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level; 774fe2407a8SMichal Kazior 7755e3dd157SKalle Valo if (!is_power_of_2(htt->rx_ring.size)) { 7767aa7a72aSMichal Kazior ath10k_warn(ar, "htt rx ring size is not power of 2\n"); 7775e3dd157SKalle Valo return -EINVAL; 7785e3dd157SKalle Valo } 7795e3dd157SKalle Valo 7805e3dd157SKalle Valo htt->rx_ring.netbufs_ring = 7816396bb22SKees Cook kcalloc(htt->rx_ring.size, sizeof(struct sk_buff *), 7825e3dd157SKalle Valo GFP_KERNEL); 7835e3dd157SKalle Valo if (!htt->rx_ring.netbufs_ring) 7845e3dd157SKalle Valo goto err_netbuf; 7855e3dd157SKalle Valo 7869a5511d5SErik Stromdahl size = ath10k_htt_get_rx_ring_size(htt); 787bd8bdbb6SKalle Valo 788a91a626bSGovind Singh vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL); 789a91a626bSGovind Singh if (!vaddr_ring) 7905e3dd157SKalle Valo goto err_dma_ring; 7915e3dd157SKalle Valo 7929a5511d5SErik Stromdahl ath10k_htt_config_paddrs_ring(htt, vaddr_ring); 7935e3dd157SKalle Valo htt->rx_ring.base_paddr = paddr; 7945e3dd157SKalle Valo 7955e3dd157SKalle Valo vaddr = dma_alloc_coherent(htt->ar->dev, 7965e3dd157SKalle Valo sizeof(*htt->rx_ring.alloc_idx.vaddr), 797d6cb23b5SFelix Fietkau &paddr, GFP_KERNEL); 7985e3dd157SKalle Valo if (!vaddr) 7995e3dd157SKalle Valo goto err_dma_idx; 8005e3dd157SKalle Valo 8015e3dd157SKalle Valo htt->rx_ring.alloc_idx.vaddr = vaddr; 8025e3dd157SKalle Valo htt->rx_ring.alloc_idx.paddr = paddr; 803c545070eSMichal Kazior htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask; 8045e3dd157SKalle Valo *htt->rx_ring.alloc_idx.vaddr = 0; 8055e3dd157SKalle Valo 8065e3dd157SKalle Valo /* Initialize the Rx refill retry timer */ 8077ac76764SKees Cook timer_setup(timer, ath10k_htt_rx_ring_refill_retry, 0); 8085e3dd157SKalle Valo 8095e3dd157SKalle Valo spin_lock_init(&htt->rx_ring.lock); 8105e3dd157SKalle Valo 8115e3dd157SKalle Valo htt->rx_ring.fill_cnt = 0; 812c545070eSMichal Kazior htt->rx_ring.sw_rd_idx.msdu_payld = 0; 813c545070eSMichal Kazior hash_init(htt->rx_ring.skb_table); 8145e3dd157SKalle Valo 815deba1b9eSRajkumar Manoharan skb_queue_head_init(&htt->rx_msdus_q); 816c545070eSMichal Kazior skb_queue_head_init(&htt->rx_in_ord_compl_q); 817426e10eaSMichal Kazior skb_queue_head_init(&htt->tx_fetch_ind_q); 8183128b3d8SRajkumar Manoharan atomic_set(&htt->num_mpdus_ready, 0); 8196c5151a9SMichal Kazior 8207aa7a72aSMichal Kazior ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n", 8215e3dd157SKalle Valo htt->rx_ring.size, htt->rx_ring.fill_level); 8225e3dd157SKalle Valo return 0; 8235e3dd157SKalle Valo 8245e3dd157SKalle Valo err_dma_idx: 8255e3dd157SKalle Valo dma_free_coherent(htt->ar->dev, 8269a5511d5SErik Stromdahl ath10k_htt_get_rx_ring_size(htt), 827a91a626bSGovind Singh vaddr_ring, 8285e3dd157SKalle Valo htt->rx_ring.base_paddr); 8295e3dd157SKalle Valo err_dma_ring: 8305e3dd157SKalle Valo kfree(htt->rx_ring.netbufs_ring); 8315e3dd157SKalle Valo err_netbuf: 8325e3dd157SKalle Valo return -ENOMEM; 8335e3dd157SKalle Valo } 8345e3dd157SKalle Valo 8357aa7a72aSMichal Kazior static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar, 8367aa7a72aSMichal Kazior enum htt_rx_mpdu_encrypt_type type) 8375e3dd157SKalle Valo { 8385e3dd157SKalle Valo switch (type) { 839890d3b2aSMichal Kazior case HTT_RX_MPDU_ENCRYPT_NONE: 840890d3b2aSMichal Kazior return 0; 8415e3dd157SKalle Valo case HTT_RX_MPDU_ENCRYPT_WEP40: 8425e3dd157SKalle Valo case HTT_RX_MPDU_ENCRYPT_WEP104: 843890d3b2aSMichal Kazior return IEEE80211_WEP_IV_LEN; 8445e3dd157SKalle Valo case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: 8455e3dd157SKalle Valo case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: 846890d3b2aSMichal Kazior return IEEE80211_TKIP_IV_LEN; 8475e3dd157SKalle Valo case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: 848890d3b2aSMichal Kazior return IEEE80211_CCMP_HDR_LEN; 8497eccb738SVasanthakumar Thiagarajan case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2: 8507eccb738SVasanthakumar Thiagarajan return IEEE80211_CCMP_256_HDR_LEN; 8517eccb738SVasanthakumar Thiagarajan case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2: 8527eccb738SVasanthakumar Thiagarajan case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2: 8537eccb738SVasanthakumar Thiagarajan return IEEE80211_GCMP_HDR_LEN; 854890d3b2aSMichal Kazior case HTT_RX_MPDU_ENCRYPT_WEP128: 855890d3b2aSMichal Kazior case HTT_RX_MPDU_ENCRYPT_WAPI: 856890d3b2aSMichal Kazior break; 857890d3b2aSMichal Kazior } 858890d3b2aSMichal Kazior 859890d3b2aSMichal Kazior ath10k_warn(ar, "unsupported encryption type %d\n", type); 8605e3dd157SKalle Valo return 0; 8615e3dd157SKalle Valo } 8625e3dd157SKalle Valo 863890d3b2aSMichal Kazior #define MICHAEL_MIC_LEN 8 8645e3dd157SKalle Valo 865307aeb31SVasanthakumar Thiagarajan static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar, 8667aa7a72aSMichal Kazior enum htt_rx_mpdu_encrypt_type type) 8675e3dd157SKalle Valo { 8685e3dd157SKalle Valo switch (type) { 8695e3dd157SKalle Valo case HTT_RX_MPDU_ENCRYPT_NONE: 8705e3dd157SKalle Valo case HTT_RX_MPDU_ENCRYPT_WEP40: 8715e3dd157SKalle Valo case HTT_RX_MPDU_ENCRYPT_WEP104: 8725e3dd157SKalle Valo case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: 8735e3dd157SKalle Valo case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: 874307aeb31SVasanthakumar Thiagarajan return 0; 8755e3dd157SKalle Valo case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: 876890d3b2aSMichal Kazior return IEEE80211_CCMP_MIC_LEN; 8777eccb738SVasanthakumar Thiagarajan case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2: 8787eccb738SVasanthakumar Thiagarajan return IEEE80211_CCMP_256_MIC_LEN; 8797eccb738SVasanthakumar Thiagarajan case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2: 8807eccb738SVasanthakumar Thiagarajan case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2: 8817eccb738SVasanthakumar Thiagarajan return IEEE80211_GCMP_MIC_LEN; 882890d3b2aSMichal Kazior case HTT_RX_MPDU_ENCRYPT_WEP128: 883890d3b2aSMichal Kazior case HTT_RX_MPDU_ENCRYPT_WAPI: 884890d3b2aSMichal Kazior break; 8855e3dd157SKalle Valo } 8865e3dd157SKalle Valo 887890d3b2aSMichal Kazior ath10k_warn(ar, "unsupported encryption type %d\n", type); 8885e3dd157SKalle Valo return 0; 8895e3dd157SKalle Valo } 8905e3dd157SKalle Valo 891307aeb31SVasanthakumar Thiagarajan static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar, 892307aeb31SVasanthakumar Thiagarajan enum htt_rx_mpdu_encrypt_type type) 893307aeb31SVasanthakumar Thiagarajan { 894307aeb31SVasanthakumar Thiagarajan switch (type) { 895307aeb31SVasanthakumar Thiagarajan case HTT_RX_MPDU_ENCRYPT_NONE: 896307aeb31SVasanthakumar Thiagarajan case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: 897307aeb31SVasanthakumar Thiagarajan case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2: 898307aeb31SVasanthakumar Thiagarajan case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2: 899307aeb31SVasanthakumar Thiagarajan case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2: 900307aeb31SVasanthakumar Thiagarajan return 0; 901307aeb31SVasanthakumar Thiagarajan case HTT_RX_MPDU_ENCRYPT_WEP40: 902307aeb31SVasanthakumar Thiagarajan case HTT_RX_MPDU_ENCRYPT_WEP104: 903307aeb31SVasanthakumar Thiagarajan return IEEE80211_WEP_ICV_LEN; 904307aeb31SVasanthakumar Thiagarajan case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: 905307aeb31SVasanthakumar Thiagarajan case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: 906307aeb31SVasanthakumar Thiagarajan return IEEE80211_TKIP_ICV_LEN; 907307aeb31SVasanthakumar Thiagarajan case HTT_RX_MPDU_ENCRYPT_WEP128: 908307aeb31SVasanthakumar Thiagarajan case HTT_RX_MPDU_ENCRYPT_WAPI: 909307aeb31SVasanthakumar Thiagarajan break; 910307aeb31SVasanthakumar Thiagarajan } 911307aeb31SVasanthakumar Thiagarajan 912307aeb31SVasanthakumar Thiagarajan ath10k_warn(ar, "unsupported encryption type %d\n", type); 913307aeb31SVasanthakumar Thiagarajan return 0; 914307aeb31SVasanthakumar Thiagarajan } 915307aeb31SVasanthakumar Thiagarajan 916f6dc2095SMichal Kazior struct amsdu_subframe_hdr { 917f6dc2095SMichal Kazior u8 dst[ETH_ALEN]; 918f6dc2095SMichal Kazior u8 src[ETH_ALEN]; 919f6dc2095SMichal Kazior __be16 len; 920f6dc2095SMichal Kazior } __packed; 921f6dc2095SMichal Kazior 9226986fdd6SMichal Kazior #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63) 9236986fdd6SMichal Kazior 92491493e8eSChristian Lamparter static inline u8 ath10k_bw_to_mac80211_bw(u8 bw) 92591493e8eSChristian Lamparter { 92691493e8eSChristian Lamparter u8 ret = 0; 92791493e8eSChristian Lamparter 92891493e8eSChristian Lamparter switch (bw) { 92991493e8eSChristian Lamparter case 0: 93091493e8eSChristian Lamparter ret = RATE_INFO_BW_20; 93191493e8eSChristian Lamparter break; 93291493e8eSChristian Lamparter case 1: 93391493e8eSChristian Lamparter ret = RATE_INFO_BW_40; 93491493e8eSChristian Lamparter break; 93591493e8eSChristian Lamparter case 2: 93691493e8eSChristian Lamparter ret = RATE_INFO_BW_80; 93791493e8eSChristian Lamparter break; 93891493e8eSChristian Lamparter case 3: 93991493e8eSChristian Lamparter ret = RATE_INFO_BW_160; 94091493e8eSChristian Lamparter break; 94191493e8eSChristian Lamparter } 94291493e8eSChristian Lamparter 94391493e8eSChristian Lamparter return ret; 94491493e8eSChristian Lamparter } 94591493e8eSChristian Lamparter 94687326c97SJanusz Dziedzic static void ath10k_htt_rx_h_rates(struct ath10k *ar, 947b9fd8a84SMichal Kazior struct ieee80211_rx_status *status, 948b9fd8a84SMichal Kazior struct htt_rx_desc *rxd) 94973539b40SJanusz Dziedzic { 9505528e032SMichal Kazior struct ieee80211_supported_band *sband; 9515528e032SMichal Kazior u8 cck, rate, bw, sgi, mcs, nss; 95273539b40SJanusz Dziedzic u8 preamble = 0; 9536986fdd6SMichal Kazior u8 group_id; 954b9fd8a84SMichal Kazior u32 info1, info2, info3; 95573539b40SJanusz Dziedzic 956b9fd8a84SMichal Kazior info1 = __le32_to_cpu(rxd->ppdu_start.info1); 957b9fd8a84SMichal Kazior info2 = __le32_to_cpu(rxd->ppdu_start.info2); 958b9fd8a84SMichal Kazior info3 = __le32_to_cpu(rxd->ppdu_start.info3); 959b9fd8a84SMichal Kazior 960b9fd8a84SMichal Kazior preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE); 96173539b40SJanusz Dziedzic 96273539b40SJanusz Dziedzic switch (preamble) { 96373539b40SJanusz Dziedzic case HTT_RX_LEGACY: 9645528e032SMichal Kazior /* To get legacy rate index band is required. Since band can't 9655528e032SMichal Kazior * be undefined check if freq is non-zero. 9665528e032SMichal Kazior */ 9675528e032SMichal Kazior if (!status->freq) 9685528e032SMichal Kazior return; 9695528e032SMichal Kazior 970b9fd8a84SMichal Kazior cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT; 971b9fd8a84SMichal Kazior rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE); 9725528e032SMichal Kazior rate &= ~RX_PPDU_START_RATE_FLAG; 97373539b40SJanusz Dziedzic 9745528e032SMichal Kazior sband = &ar->mac.sbands[status->band]; 9754b7f353bSYanbo Li status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck); 97673539b40SJanusz Dziedzic break; 97773539b40SJanusz Dziedzic case HTT_RX_HT: 97873539b40SJanusz Dziedzic case HTT_RX_HT_WITH_TXBF: 979b9fd8a84SMichal Kazior /* HT-SIG - Table 20-11 in info2 and info3 */ 980b9fd8a84SMichal Kazior mcs = info2 & 0x1F; 98173539b40SJanusz Dziedzic nss = mcs >> 3; 982b9fd8a84SMichal Kazior bw = (info2 >> 7) & 1; 983b9fd8a84SMichal Kazior sgi = (info3 >> 7) & 1; 98473539b40SJanusz Dziedzic 98573539b40SJanusz Dziedzic status->rate_idx = mcs; 986da6a4352SJohannes Berg status->encoding = RX_ENC_HT; 98773539b40SJanusz Dziedzic if (sgi) 9887fdd69c5SJohannes Berg status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 98973539b40SJanusz Dziedzic if (bw) 990da6a4352SJohannes Berg status->bw = RATE_INFO_BW_40; 99173539b40SJanusz Dziedzic break; 99273539b40SJanusz Dziedzic case HTT_RX_VHT: 99373539b40SJanusz Dziedzic case HTT_RX_VHT_WITH_TXBF: 994b9fd8a84SMichal Kazior /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3 995d6dfe25cSMarcin Rokicki * TODO check this 996d6dfe25cSMarcin Rokicki */ 997b9fd8a84SMichal Kazior bw = info2 & 3; 998b9fd8a84SMichal Kazior sgi = info3 & 1; 9996986fdd6SMichal Kazior group_id = (info2 >> 4) & 0x3F; 10006986fdd6SMichal Kazior 10016986fdd6SMichal Kazior if (GROUP_ID_IS_SU_MIMO(group_id)) { 10026986fdd6SMichal Kazior mcs = (info3 >> 4) & 0x0F; 10036986fdd6SMichal Kazior nss = ((info2 >> 10) & 0x07) + 1; 10046986fdd6SMichal Kazior } else { 10056986fdd6SMichal Kazior /* Hardware doesn't decode VHT-SIG-B into Rx descriptor 10066986fdd6SMichal Kazior * so it's impossible to decode MCS. Also since 10076986fdd6SMichal Kazior * firmware consumes Group Id Management frames host 10086986fdd6SMichal Kazior * has no knowledge regarding group/user position 10096986fdd6SMichal Kazior * mapping so it's impossible to pick the correct Nsts 10106986fdd6SMichal Kazior * from VHT-SIG-A1. 10116986fdd6SMichal Kazior * 10126986fdd6SMichal Kazior * Bandwidth and SGI are valid so report the rateinfo 10136986fdd6SMichal Kazior * on best-effort basis. 10146986fdd6SMichal Kazior */ 10156986fdd6SMichal Kazior mcs = 0; 10166986fdd6SMichal Kazior nss = 1; 10176986fdd6SMichal Kazior } 101873539b40SJanusz Dziedzic 10196ccea107SManikanta Pubbisetty if (mcs > 0x09) { 10206ccea107SManikanta Pubbisetty ath10k_warn(ar, "invalid MCS received %u\n", mcs); 10216ccea107SManikanta Pubbisetty ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n", 10226ccea107SManikanta Pubbisetty __le32_to_cpu(rxd->attention.flags), 10236ccea107SManikanta Pubbisetty __le32_to_cpu(rxd->mpdu_start.info0), 10246ccea107SManikanta Pubbisetty __le32_to_cpu(rxd->mpdu_start.info1), 10256ccea107SManikanta Pubbisetty __le32_to_cpu(rxd->msdu_start.common.info0), 10266ccea107SManikanta Pubbisetty __le32_to_cpu(rxd->msdu_start.common.info1), 10276ccea107SManikanta Pubbisetty rxd->ppdu_start.info0, 10286ccea107SManikanta Pubbisetty __le32_to_cpu(rxd->ppdu_start.info1), 10296ccea107SManikanta Pubbisetty __le32_to_cpu(rxd->ppdu_start.info2), 10306ccea107SManikanta Pubbisetty __le32_to_cpu(rxd->ppdu_start.info3), 10316ccea107SManikanta Pubbisetty __le32_to_cpu(rxd->ppdu_start.info4)); 10326ccea107SManikanta Pubbisetty 10336ccea107SManikanta Pubbisetty ath10k_warn(ar, "msdu end %08x mpdu end %08x\n", 10346ccea107SManikanta Pubbisetty __le32_to_cpu(rxd->msdu_end.common.info0), 10356ccea107SManikanta Pubbisetty __le32_to_cpu(rxd->mpdu_end.info0)); 10366ccea107SManikanta Pubbisetty 10376ccea107SManikanta Pubbisetty ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, 10386ccea107SManikanta Pubbisetty "rx desc msdu payload: ", 10396ccea107SManikanta Pubbisetty rxd->msdu_payload, 50); 10406ccea107SManikanta Pubbisetty } 10416ccea107SManikanta Pubbisetty 104273539b40SJanusz Dziedzic status->rate_idx = mcs; 10438613c948SJohannes Berg status->nss = nss; 104473539b40SJanusz Dziedzic 104573539b40SJanusz Dziedzic if (sgi) 10467fdd69c5SJohannes Berg status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 104773539b40SJanusz Dziedzic 104891493e8eSChristian Lamparter status->bw = ath10k_bw_to_mac80211_bw(bw); 1049da6a4352SJohannes Berg status->encoding = RX_ENC_VHT; 105073539b40SJanusz Dziedzic break; 105173539b40SJanusz Dziedzic default: 105273539b40SJanusz Dziedzic break; 105373539b40SJanusz Dziedzic } 105473539b40SJanusz Dziedzic } 105573539b40SJanusz Dziedzic 1056500ff9f9SMichal Kazior static struct ieee80211_channel * 1057500ff9f9SMichal Kazior ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd) 1058500ff9f9SMichal Kazior { 1059500ff9f9SMichal Kazior struct ath10k_peer *peer; 1060500ff9f9SMichal Kazior struct ath10k_vif *arvif; 1061500ff9f9SMichal Kazior struct cfg80211_chan_def def; 1062500ff9f9SMichal Kazior u16 peer_id; 1063500ff9f9SMichal Kazior 1064500ff9f9SMichal Kazior lockdep_assert_held(&ar->data_lock); 1065500ff9f9SMichal Kazior 1066500ff9f9SMichal Kazior if (!rxd) 1067500ff9f9SMichal Kazior return NULL; 1068500ff9f9SMichal Kazior 1069500ff9f9SMichal Kazior if (rxd->attention.flags & 1070500ff9f9SMichal Kazior __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID)) 1071500ff9f9SMichal Kazior return NULL; 1072500ff9f9SMichal Kazior 10731f5dbfbbSPeter Oh if (!(rxd->msdu_end.common.info0 & 1074500ff9f9SMichal Kazior __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU))) 1075500ff9f9SMichal Kazior return NULL; 1076500ff9f9SMichal Kazior 1077500ff9f9SMichal Kazior peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0), 1078500ff9f9SMichal Kazior RX_MPDU_START_INFO0_PEER_IDX); 1079500ff9f9SMichal Kazior 1080500ff9f9SMichal Kazior peer = ath10k_peer_find_by_id(ar, peer_id); 1081500ff9f9SMichal Kazior if (!peer) 1082500ff9f9SMichal Kazior return NULL; 1083500ff9f9SMichal Kazior 1084500ff9f9SMichal Kazior arvif = ath10k_get_arvif(ar, peer->vdev_id); 1085500ff9f9SMichal Kazior if (WARN_ON_ONCE(!arvif)) 1086500ff9f9SMichal Kazior return NULL; 1087500ff9f9SMichal Kazior 1088569fba2cSMohammed Shafi Shajakhan if (ath10k_mac_vif_chan(arvif->vif, &def)) 1089500ff9f9SMichal Kazior return NULL; 1090500ff9f9SMichal Kazior 1091500ff9f9SMichal Kazior return def.chan; 1092500ff9f9SMichal Kazior } 1093500ff9f9SMichal Kazior 1094500ff9f9SMichal Kazior static struct ieee80211_channel * 1095500ff9f9SMichal Kazior ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id) 1096500ff9f9SMichal Kazior { 1097500ff9f9SMichal Kazior struct ath10k_vif *arvif; 1098500ff9f9SMichal Kazior struct cfg80211_chan_def def; 1099500ff9f9SMichal Kazior 1100500ff9f9SMichal Kazior lockdep_assert_held(&ar->data_lock); 1101500ff9f9SMichal Kazior 1102500ff9f9SMichal Kazior list_for_each_entry(arvif, &ar->arvifs, list) { 1103500ff9f9SMichal Kazior if (arvif->vdev_id == vdev_id && 1104500ff9f9SMichal Kazior ath10k_mac_vif_chan(arvif->vif, &def) == 0) 1105500ff9f9SMichal Kazior return def.chan; 1106500ff9f9SMichal Kazior } 1107500ff9f9SMichal Kazior 1108500ff9f9SMichal Kazior return NULL; 1109500ff9f9SMichal Kazior } 1110500ff9f9SMichal Kazior 1111500ff9f9SMichal Kazior static void 1112500ff9f9SMichal Kazior ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw, 1113500ff9f9SMichal Kazior struct ieee80211_chanctx_conf *conf, 1114500ff9f9SMichal Kazior void *data) 1115500ff9f9SMichal Kazior { 1116500ff9f9SMichal Kazior struct cfg80211_chan_def *def = data; 1117500ff9f9SMichal Kazior 1118500ff9f9SMichal Kazior *def = conf->def; 1119500ff9f9SMichal Kazior } 1120500ff9f9SMichal Kazior 1121500ff9f9SMichal Kazior static struct ieee80211_channel * 1122500ff9f9SMichal Kazior ath10k_htt_rx_h_any_channel(struct ath10k *ar) 1123500ff9f9SMichal Kazior { 1124500ff9f9SMichal Kazior struct cfg80211_chan_def def = {}; 1125500ff9f9SMichal Kazior 1126500ff9f9SMichal Kazior ieee80211_iter_chan_contexts_atomic(ar->hw, 1127500ff9f9SMichal Kazior ath10k_htt_rx_h_any_chan_iter, 1128500ff9f9SMichal Kazior &def); 1129500ff9f9SMichal Kazior 1130500ff9f9SMichal Kazior return def.chan; 1131500ff9f9SMichal Kazior } 1132500ff9f9SMichal Kazior 113336653f05SJanusz Dziedzic static bool ath10k_htt_rx_h_channel(struct ath10k *ar, 1134500ff9f9SMichal Kazior struct ieee80211_rx_status *status, 1135500ff9f9SMichal Kazior struct htt_rx_desc *rxd, 1136500ff9f9SMichal Kazior u32 vdev_id) 113736653f05SJanusz Dziedzic { 113836653f05SJanusz Dziedzic struct ieee80211_channel *ch; 113936653f05SJanusz Dziedzic 114036653f05SJanusz Dziedzic spin_lock_bh(&ar->data_lock); 114136653f05SJanusz Dziedzic ch = ar->scan_channel; 114236653f05SJanusz Dziedzic if (!ch) 114336653f05SJanusz Dziedzic ch = ar->rx_channel; 1144500ff9f9SMichal Kazior if (!ch) 1145500ff9f9SMichal Kazior ch = ath10k_htt_rx_h_peer_channel(ar, rxd); 1146500ff9f9SMichal Kazior if (!ch) 1147500ff9f9SMichal Kazior ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id); 1148500ff9f9SMichal Kazior if (!ch) 1149500ff9f9SMichal Kazior ch = ath10k_htt_rx_h_any_channel(ar); 11502ce9b25cSRajkumar Manoharan if (!ch) 11512ce9b25cSRajkumar Manoharan ch = ar->tgt_oper_chan; 115236653f05SJanusz Dziedzic spin_unlock_bh(&ar->data_lock); 115336653f05SJanusz Dziedzic 115436653f05SJanusz Dziedzic if (!ch) 115536653f05SJanusz Dziedzic return false; 115636653f05SJanusz Dziedzic 115736653f05SJanusz Dziedzic status->band = ch->band; 115836653f05SJanusz Dziedzic status->freq = ch->center_freq; 115936653f05SJanusz Dziedzic 116036653f05SJanusz Dziedzic return true; 116136653f05SJanusz Dziedzic } 116236653f05SJanusz Dziedzic 1163b9fd8a84SMichal Kazior static void ath10k_htt_rx_h_signal(struct ath10k *ar, 1164b9fd8a84SMichal Kazior struct ieee80211_rx_status *status, 1165b9fd8a84SMichal Kazior struct htt_rx_desc *rxd) 1166b9fd8a84SMichal Kazior { 11678241253dSNorik Dzhandzhapanyan int i; 11688241253dSNorik Dzhandzhapanyan 11698241253dSNorik Dzhandzhapanyan for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) { 11708241253dSNorik Dzhandzhapanyan status->chains &= ~BIT(i); 11718241253dSNorik Dzhandzhapanyan 11728241253dSNorik Dzhandzhapanyan if (rxd->ppdu_start.rssi_chains[i].pri20_mhz != 0x80) { 11738241253dSNorik Dzhandzhapanyan status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR + 11748241253dSNorik Dzhandzhapanyan rxd->ppdu_start.rssi_chains[i].pri20_mhz; 11758241253dSNorik Dzhandzhapanyan 11768241253dSNorik Dzhandzhapanyan status->chains |= BIT(i); 11778241253dSNorik Dzhandzhapanyan } 11788241253dSNorik Dzhandzhapanyan } 11798241253dSNorik Dzhandzhapanyan 1180b9fd8a84SMichal Kazior /* FIXME: Get real NF */ 1181b9fd8a84SMichal Kazior status->signal = ATH10K_DEFAULT_NOISE_FLOOR + 1182b9fd8a84SMichal Kazior rxd->ppdu_start.rssi_comb; 1183b9fd8a84SMichal Kazior status->flag &= ~RX_FLAG_NO_SIGNAL_VAL; 1184b9fd8a84SMichal Kazior } 1185b9fd8a84SMichal Kazior 1186b9fd8a84SMichal Kazior static void ath10k_htt_rx_h_mactime(struct ath10k *ar, 1187b9fd8a84SMichal Kazior struct ieee80211_rx_status *status, 1188b9fd8a84SMichal Kazior struct htt_rx_desc *rxd) 1189b9fd8a84SMichal Kazior { 1190b9fd8a84SMichal Kazior /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This 1191b9fd8a84SMichal Kazior * means all prior MSDUs in a PPDU are reported to mac80211 without the 1192b9fd8a84SMichal Kazior * TSF. Is it worth holding frames until end of PPDU is known? 1193b9fd8a84SMichal Kazior * 1194b9fd8a84SMichal Kazior * FIXME: Can we get/compute 64bit TSF? 1195b9fd8a84SMichal Kazior */ 11963ec79e3aSMichal Kazior status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp); 1197b9fd8a84SMichal Kazior status->flag |= RX_FLAG_MACTIME_END; 1198b9fd8a84SMichal Kazior } 1199b9fd8a84SMichal Kazior 1200b9fd8a84SMichal Kazior static void ath10k_htt_rx_h_ppdu(struct ath10k *ar, 1201b9fd8a84SMichal Kazior struct sk_buff_head *amsdu, 1202500ff9f9SMichal Kazior struct ieee80211_rx_status *status, 1203500ff9f9SMichal Kazior u32 vdev_id) 1204b9fd8a84SMichal Kazior { 1205b9fd8a84SMichal Kazior struct sk_buff *first; 1206b9fd8a84SMichal Kazior struct htt_rx_desc *rxd; 1207b9fd8a84SMichal Kazior bool is_first_ppdu; 1208b9fd8a84SMichal Kazior bool is_last_ppdu; 1209b9fd8a84SMichal Kazior 1210b9fd8a84SMichal Kazior if (skb_queue_empty(amsdu)) 1211b9fd8a84SMichal Kazior return; 1212b9fd8a84SMichal Kazior 1213b9fd8a84SMichal Kazior first = skb_peek(amsdu); 1214b9fd8a84SMichal Kazior rxd = (void *)first->data - sizeof(*rxd); 1215b9fd8a84SMichal Kazior 1216b9fd8a84SMichal Kazior is_first_ppdu = !!(rxd->attention.flags & 1217b9fd8a84SMichal Kazior __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU)); 1218b9fd8a84SMichal Kazior is_last_ppdu = !!(rxd->attention.flags & 1219b9fd8a84SMichal Kazior __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU)); 1220b9fd8a84SMichal Kazior 1221b9fd8a84SMichal Kazior if (is_first_ppdu) { 1222b9fd8a84SMichal Kazior /* New PPDU starts so clear out the old per-PPDU status. */ 1223b9fd8a84SMichal Kazior status->freq = 0; 1224b9fd8a84SMichal Kazior status->rate_idx = 0; 12258613c948SJohannes Berg status->nss = 0; 1226da6a4352SJohannes Berg status->encoding = RX_ENC_LEGACY; 1227da6a4352SJohannes Berg status->bw = RATE_INFO_BW_20; 122847cc0ca9SMatthias Frei 12297fdd69c5SJohannes Berg status->flag &= ~RX_FLAG_MACTIME_END; 1230b9fd8a84SMichal Kazior status->flag |= RX_FLAG_NO_SIGNAL_VAL; 1231b9fd8a84SMichal Kazior 123247cc0ca9SMatthias Frei status->flag &= ~(RX_FLAG_AMPDU_IS_LAST); 123347cc0ca9SMatthias Frei status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN; 123447cc0ca9SMatthias Frei status->ampdu_reference = ar->ampdu_reference; 123547cc0ca9SMatthias Frei 1236b9fd8a84SMichal Kazior ath10k_htt_rx_h_signal(ar, status, rxd); 1237500ff9f9SMichal Kazior ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id); 1238b9fd8a84SMichal Kazior ath10k_htt_rx_h_rates(ar, status, rxd); 1239b9fd8a84SMichal Kazior } 1240b9fd8a84SMichal Kazior 124147cc0ca9SMatthias Frei if (is_last_ppdu) { 1242b9fd8a84SMichal Kazior ath10k_htt_rx_h_mactime(ar, status, rxd); 124347cc0ca9SMatthias Frei 124447cc0ca9SMatthias Frei /* set ampdu last segment flag */ 124547cc0ca9SMatthias Frei status->flag |= RX_FLAG_AMPDU_IS_LAST; 124647cc0ca9SMatthias Frei ar->ampdu_reference++; 124747cc0ca9SMatthias Frei } 1248b9fd8a84SMichal Kazior } 1249b9fd8a84SMichal Kazior 125076f5329aSJanusz Dziedzic static const char * const tid_to_ac[] = { 125176f5329aSJanusz Dziedzic "BE", 125276f5329aSJanusz Dziedzic "BK", 125376f5329aSJanusz Dziedzic "BK", 125476f5329aSJanusz Dziedzic "BE", 125576f5329aSJanusz Dziedzic "VI", 125676f5329aSJanusz Dziedzic "VI", 125776f5329aSJanusz Dziedzic "VO", 125876f5329aSJanusz Dziedzic "VO", 125976f5329aSJanusz Dziedzic }; 126076f5329aSJanusz Dziedzic 126176f5329aSJanusz Dziedzic static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size) 126276f5329aSJanusz Dziedzic { 126376f5329aSJanusz Dziedzic u8 *qc; 126476f5329aSJanusz Dziedzic int tid; 126576f5329aSJanusz Dziedzic 126676f5329aSJanusz Dziedzic if (!ieee80211_is_data_qos(hdr->frame_control)) 126776f5329aSJanusz Dziedzic return ""; 126876f5329aSJanusz Dziedzic 126976f5329aSJanusz Dziedzic qc = ieee80211_get_qos_ctl(hdr); 127076f5329aSJanusz Dziedzic tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 127176f5329aSJanusz Dziedzic if (tid < 8) 127276f5329aSJanusz Dziedzic snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]); 127376f5329aSJanusz Dziedzic else 127476f5329aSJanusz Dziedzic snprintf(out, size, "tid %d", tid); 127576f5329aSJanusz Dziedzic 127676f5329aSJanusz Dziedzic return out; 127776f5329aSJanusz Dziedzic } 127876f5329aSJanusz Dziedzic 1279deba1b9eSRajkumar Manoharan static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar, 128085f6d7cfSJanusz Dziedzic struct ieee80211_rx_status *rx_status, 128185f6d7cfSJanusz Dziedzic struct sk_buff *skb) 128273539b40SJanusz Dziedzic { 128373539b40SJanusz Dziedzic struct ieee80211_rx_status *status; 1284deba1b9eSRajkumar Manoharan 1285deba1b9eSRajkumar Manoharan status = IEEE80211_SKB_RXCB(skb); 1286deba1b9eSRajkumar Manoharan *status = *rx_status; 1287deba1b9eSRajkumar Manoharan 128862652555SBob Copeland skb_queue_tail(&ar->htt.rx_msdus_q, skb); 1289deba1b9eSRajkumar Manoharan } 1290deba1b9eSRajkumar Manoharan 1291deba1b9eSRajkumar Manoharan static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb) 1292deba1b9eSRajkumar Manoharan { 1293deba1b9eSRajkumar Manoharan struct ieee80211_rx_status *status; 129476f5329aSJanusz Dziedzic struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 129576f5329aSJanusz Dziedzic char tid[32]; 129673539b40SJanusz Dziedzic 129785f6d7cfSJanusz Dziedzic status = IEEE80211_SKB_RXCB(skb); 129873539b40SJanusz Dziedzic 12997aa7a72aSMichal Kazior ath10k_dbg(ar, ATH10K_DBG_DATA, 13007fdd69c5SJohannes Berg "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", 130185f6d7cfSJanusz Dziedzic skb, 130285f6d7cfSJanusz Dziedzic skb->len, 130376f5329aSJanusz Dziedzic ieee80211_get_SA(hdr), 130476f5329aSJanusz Dziedzic ath10k_get_tid(hdr, tid, sizeof(tid)), 130576f5329aSJanusz Dziedzic is_multicast_ether_addr(ieee80211_get_DA(hdr)) ? 130676f5329aSJanusz Dziedzic "mcast" : "ucast", 130776f5329aSJanusz Dziedzic (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4, 1308da6a4352SJohannes Berg (status->encoding == RX_ENC_LEGACY) ? "legacy" : "", 1309da6a4352SJohannes Berg (status->encoding == RX_ENC_HT) ? "ht" : "", 1310da6a4352SJohannes Berg (status->encoding == RX_ENC_VHT) ? "vht" : "", 1311da6a4352SJohannes Berg (status->bw == RATE_INFO_BW_40) ? "40" : "", 1312da6a4352SJohannes Berg (status->bw == RATE_INFO_BW_80) ? "80" : "", 1313da6a4352SJohannes Berg (status->bw == RATE_INFO_BW_160) ? "160" : "", 13147fdd69c5SJohannes Berg status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", 131573539b40SJanusz Dziedzic status->rate_idx, 13168613c948SJohannes Berg status->nss, 131773539b40SJanusz Dziedzic status->freq, 131887326c97SJanusz Dziedzic status->band, status->flag, 131978433f96SJanusz Dziedzic !!(status->flag & RX_FLAG_FAILED_FCS_CRC), 132076f5329aSJanusz Dziedzic !!(status->flag & RX_FLAG_MMIC_ERROR), 132176f5329aSJanusz Dziedzic !!(status->flag & RX_FLAG_AMSDU_MORE)); 13227aa7a72aSMichal Kazior ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ", 132385f6d7cfSJanusz Dziedzic skb->data, skb->len); 13245ce8e7fdSRajkumar Manoharan trace_ath10k_rx_hdr(ar, skb->data, skb->len); 13255ce8e7fdSRajkumar Manoharan trace_ath10k_rx_payload(ar, skb->data, skb->len); 132673539b40SJanusz Dziedzic 13273c97f5deSRajkumar Manoharan ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi); 132873539b40SJanusz Dziedzic } 132973539b40SJanusz Dziedzic 133048f4ca34SMichal Kazior static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar, 133148f4ca34SMichal Kazior struct ieee80211_hdr *hdr) 1332d960c369SMichal Kazior { 133348f4ca34SMichal Kazior int len = ieee80211_hdrlen(hdr->frame_control); 133448f4ca34SMichal Kazior 133548f4ca34SMichal Kazior if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING, 1336c4cdf753SKalle Valo ar->running_fw->fw_file.fw_features)) 133748f4ca34SMichal Kazior len = round_up(len, 4); 133848f4ca34SMichal Kazior 133948f4ca34SMichal Kazior return len; 1340d960c369SMichal Kazior } 1341d960c369SMichal Kazior 1342581c25f8SMichal Kazior static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar, 1343581c25f8SMichal Kazior struct sk_buff *msdu, 1344581c25f8SMichal Kazior struct ieee80211_rx_status *status, 1345581c25f8SMichal Kazior enum htt_rx_mpdu_encrypt_type enctype, 1346a2864772SBhagavathi Perumal S bool is_decrypted, 1347a2864772SBhagavathi Perumal S const u8 first_hdr[64]) 13485e3dd157SKalle Valo { 1349f6dc2095SMichal Kazior struct ieee80211_hdr *hdr; 1350581c25f8SMichal Kazior struct htt_rx_desc *rxd; 1351581c25f8SMichal Kazior size_t hdr_len; 1352581c25f8SMichal Kazior size_t crypto_len; 1353581c25f8SMichal Kazior bool is_first; 1354581c25f8SMichal Kazior bool is_last; 1355a2864772SBhagavathi Perumal S bool msdu_limit_err; 1356a2864772SBhagavathi Perumal S int bytes_aligned = ar->hw_params.decap_align_bytes; 1357a2864772SBhagavathi Perumal S u8 *qos; 13585e3dd157SKalle Valo 1359581c25f8SMichal Kazior rxd = (void *)msdu->data - sizeof(*rxd); 13601f5dbfbbSPeter Oh is_first = !!(rxd->msdu_end.common.info0 & 1361581c25f8SMichal Kazior __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); 13621f5dbfbbSPeter Oh is_last = !!(rxd->msdu_end.common.info0 & 1363581c25f8SMichal Kazior __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); 13649aa505d2SMichal Kazior 1365581c25f8SMichal Kazior /* Delivered decapped frame: 1366581c25f8SMichal Kazior * [802.11 header] 1367581c25f8SMichal Kazior * [crypto param] <-- can be trimmed if !fcs_err && 1368581c25f8SMichal Kazior * !decrypt_err && !peer_idx_invalid 1369581c25f8SMichal Kazior * [amsdu header] <-- only if A-MSDU 1370581c25f8SMichal Kazior * [rfc1042/llc] 1371581c25f8SMichal Kazior * [payload] 1372581c25f8SMichal Kazior * [FCS] <-- at end, needs to be trimmed 1373581c25f8SMichal Kazior */ 13745e3dd157SKalle Valo 1375a2864772SBhagavathi Perumal S /* Some hardwares(QCA99x0 variants) limit number of msdus in a-msdu when 1376a2864772SBhagavathi Perumal S * deaggregate, so that unwanted MSDU-deaggregation is avoided for 1377a2864772SBhagavathi Perumal S * error packets. If limit exceeds, hw sends all remaining MSDUs as 1378a2864772SBhagavathi Perumal S * a single last MSDU with this msdu limit error set. 1379a2864772SBhagavathi Perumal S */ 1380a2864772SBhagavathi Perumal S msdu_limit_err = ath10k_rx_desc_msdu_limit_error(&ar->hw_params, rxd); 1381a2864772SBhagavathi Perumal S 1382a2864772SBhagavathi Perumal S /* If MSDU limit error happens, then don't warn on, the partial raw MSDU 1383a2864772SBhagavathi Perumal S * without first MSDU is expected in that case, and handled later here. 1384a2864772SBhagavathi Perumal S */ 1385581c25f8SMichal Kazior /* This probably shouldn't happen but warn just in case */ 1386a2864772SBhagavathi Perumal S if (WARN_ON_ONCE(!is_first && !msdu_limit_err)) 1387581c25f8SMichal Kazior return; 1388581c25f8SMichal Kazior 1389581c25f8SMichal Kazior /* This probably shouldn't happen but warn just in case */ 1390a2864772SBhagavathi Perumal S if (WARN_ON_ONCE(!(is_first && is_last) && !msdu_limit_err)) 1391581c25f8SMichal Kazior return; 1392581c25f8SMichal Kazior 1393581c25f8SMichal Kazior skb_trim(msdu, msdu->len - FCS_LEN); 1394581c25f8SMichal Kazior 1395a2864772SBhagavathi Perumal S /* Push original 80211 header */ 1396a2864772SBhagavathi Perumal S if (unlikely(msdu_limit_err)) { 1397a2864772SBhagavathi Perumal S hdr = (struct ieee80211_hdr *)first_hdr; 1398a2864772SBhagavathi Perumal S hdr_len = ieee80211_hdrlen(hdr->frame_control); 1399a2864772SBhagavathi Perumal S crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); 1400a2864772SBhagavathi Perumal S 1401a2864772SBhagavathi Perumal S if (ieee80211_is_data_qos(hdr->frame_control)) { 1402a2864772SBhagavathi Perumal S qos = ieee80211_get_qos_ctl(hdr); 1403a2864772SBhagavathi Perumal S qos[0] |= IEEE80211_QOS_CTL_A_MSDU_PRESENT; 1404a2864772SBhagavathi Perumal S } 1405a2864772SBhagavathi Perumal S 1406a2864772SBhagavathi Perumal S if (crypto_len) 1407a2864772SBhagavathi Perumal S memcpy(skb_push(msdu, crypto_len), 1408a2864772SBhagavathi Perumal S (void *)hdr + round_up(hdr_len, bytes_aligned), 1409a2864772SBhagavathi Perumal S crypto_len); 1410a2864772SBhagavathi Perumal S 1411a2864772SBhagavathi Perumal S memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1412a2864772SBhagavathi Perumal S } 1413a2864772SBhagavathi Perumal S 1414581c25f8SMichal Kazior /* In most cases this will be true for sniffed frames. It makes sense 1415ccec9038SDavid Liu * to deliver them as-is without stripping the crypto param. This is 1416ccec9038SDavid Liu * necessary for software based decryption. 1417581c25f8SMichal Kazior * 1418581c25f8SMichal Kazior * If there's no error then the frame is decrypted. At least that is 1419581c25f8SMichal Kazior * the case for frames that come in via fragmented rx indication. 1420581c25f8SMichal Kazior */ 1421581c25f8SMichal Kazior if (!is_decrypted) 1422581c25f8SMichal Kazior return; 1423581c25f8SMichal Kazior 1424581c25f8SMichal Kazior /* The payload is decrypted so strip crypto params. Start from tail 1425581c25f8SMichal Kazior * since hdr is used to compute some stuff. 1426581c25f8SMichal Kazior */ 1427581c25f8SMichal Kazior 1428581c25f8SMichal Kazior hdr = (void *)msdu->data; 1429581c25f8SMichal Kazior 1430581c25f8SMichal Kazior /* Tail */ 14317eccb738SVasanthakumar Thiagarajan if (status->flag & RX_FLAG_IV_STRIPPED) { 143260549cabSGrzegorz Bajorski skb_trim(msdu, msdu->len - 1433307aeb31SVasanthakumar Thiagarajan ath10k_htt_rx_crypto_mic_len(ar, enctype)); 1434307aeb31SVasanthakumar Thiagarajan 1435307aeb31SVasanthakumar Thiagarajan skb_trim(msdu, msdu->len - 1436307aeb31SVasanthakumar Thiagarajan ath10k_htt_rx_crypto_icv_len(ar, enctype)); 14377eccb738SVasanthakumar Thiagarajan } else { 14387eccb738SVasanthakumar Thiagarajan /* MIC */ 1439307aeb31SVasanthakumar Thiagarajan if (status->flag & RX_FLAG_MIC_STRIPPED) 1440307aeb31SVasanthakumar Thiagarajan skb_trim(msdu, msdu->len - 1441307aeb31SVasanthakumar Thiagarajan ath10k_htt_rx_crypto_mic_len(ar, enctype)); 14427eccb738SVasanthakumar Thiagarajan 14437eccb738SVasanthakumar Thiagarajan /* ICV */ 1444307aeb31SVasanthakumar Thiagarajan if (status->flag & RX_FLAG_ICV_STRIPPED) 14457eccb738SVasanthakumar Thiagarajan skb_trim(msdu, msdu->len - 1446307aeb31SVasanthakumar Thiagarajan ath10k_htt_rx_crypto_icv_len(ar, enctype)); 14477eccb738SVasanthakumar Thiagarajan } 1448581c25f8SMichal Kazior 1449581c25f8SMichal Kazior /* MMIC */ 145060549cabSGrzegorz Bajorski if ((status->flag & RX_FLAG_MMIC_STRIPPED) && 145160549cabSGrzegorz Bajorski !ieee80211_has_morefrags(hdr->frame_control) && 1452581c25f8SMichal Kazior enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) 1453307aeb31SVasanthakumar Thiagarajan skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN); 1454581c25f8SMichal Kazior 1455581c25f8SMichal Kazior /* Head */ 145660549cabSGrzegorz Bajorski if (status->flag & RX_FLAG_IV_STRIPPED) { 1457f6dc2095SMichal Kazior hdr_len = ieee80211_hdrlen(hdr->frame_control); 1458581c25f8SMichal Kazior crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); 14595e3dd157SKalle Valo 1460581c25f8SMichal Kazior memmove((void *)msdu->data + crypto_len, 1461581c25f8SMichal Kazior (void *)msdu->data, hdr_len); 1462581c25f8SMichal Kazior skb_pull(msdu, crypto_len); 14635e3dd157SKalle Valo } 146460549cabSGrzegorz Bajorski } 14655e3dd157SKalle Valo 1466581c25f8SMichal Kazior static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar, 1467581c25f8SMichal Kazior struct sk_buff *msdu, 1468581c25f8SMichal Kazior struct ieee80211_rx_status *status, 14697eccb738SVasanthakumar Thiagarajan const u8 first_hdr[64], 14707eccb738SVasanthakumar Thiagarajan enum htt_rx_mpdu_encrypt_type enctype) 1471581c25f8SMichal Kazior { 1472581c25f8SMichal Kazior struct ieee80211_hdr *hdr; 14739e19e132SVasanthakumar Thiagarajan struct htt_rx_desc *rxd; 1474581c25f8SMichal Kazior size_t hdr_len; 1475581c25f8SMichal Kazior u8 da[ETH_ALEN]; 1476581c25f8SMichal Kazior u8 sa[ETH_ALEN]; 14779e19e132SVasanthakumar Thiagarajan int l3_pad_bytes; 14787eccb738SVasanthakumar Thiagarajan int bytes_aligned = ar->hw_params.decap_align_bytes; 1479581c25f8SMichal Kazior 1480581c25f8SMichal Kazior /* Delivered decapped frame: 1481581c25f8SMichal Kazior * [nwifi 802.11 header] <-- replaced with 802.11 hdr 1482581c25f8SMichal Kazior * [rfc1042/llc] 1483581c25f8SMichal Kazior * 1484581c25f8SMichal Kazior * Note: The nwifi header doesn't have QoS Control and is 1485581c25f8SMichal Kazior * (always?) a 3addr frame. 1486581c25f8SMichal Kazior * 1487581c25f8SMichal Kazior * Note2: There's no A-MSDU subframe header. Even if it's part 1488581c25f8SMichal Kazior * of an A-MSDU. 1489581c25f8SMichal Kazior */ 1490581c25f8SMichal Kazior 149172bdeb86SMichal Kazior /* pull decapped header and copy SA & DA */ 14929e19e132SVasanthakumar Thiagarajan rxd = (void *)msdu->data - sizeof(*rxd); 14939e19e132SVasanthakumar Thiagarajan 14949e19e132SVasanthakumar Thiagarajan l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd); 14959e19e132SVasanthakumar Thiagarajan skb_put(msdu, l3_pad_bytes); 14969e19e132SVasanthakumar Thiagarajan 14979e19e132SVasanthakumar Thiagarajan hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes); 1498b8d55fcaSYanbo Li 149948f4ca34SMichal Kazior hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr); 1500b25f32cbSKalle Valo ether_addr_copy(da, ieee80211_get_DA(hdr)); 1501b25f32cbSKalle Valo ether_addr_copy(sa, ieee80211_get_SA(hdr)); 1502581c25f8SMichal Kazior skb_pull(msdu, hdr_len); 1503784f69d3SMichal Kazior 1504784f69d3SMichal Kazior /* push original 802.11 header */ 1505581c25f8SMichal Kazior hdr = (struct ieee80211_hdr *)first_hdr; 1506784f69d3SMichal Kazior hdr_len = ieee80211_hdrlen(hdr->frame_control); 15077eccb738SVasanthakumar Thiagarajan 15087eccb738SVasanthakumar Thiagarajan if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 15097eccb738SVasanthakumar Thiagarajan memcpy(skb_push(msdu, 15107eccb738SVasanthakumar Thiagarajan ath10k_htt_rx_crypto_param_len(ar, enctype)), 15117eccb738SVasanthakumar Thiagarajan (void *)hdr + round_up(hdr_len, bytes_aligned), 15127eccb738SVasanthakumar Thiagarajan ath10k_htt_rx_crypto_param_len(ar, enctype)); 15137eccb738SVasanthakumar Thiagarajan } 15147eccb738SVasanthakumar Thiagarajan 1515581c25f8SMichal Kazior memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1516784f69d3SMichal Kazior 151772bdeb86SMichal Kazior /* original 802.11 header has a different DA and in 151872bdeb86SMichal Kazior * case of 4addr it may also have different SA 151972bdeb86SMichal Kazior */ 1520581c25f8SMichal Kazior hdr = (struct ieee80211_hdr *)msdu->data; 1521b25f32cbSKalle Valo ether_addr_copy(ieee80211_get_DA(hdr), da); 1522b25f32cbSKalle Valo ether_addr_copy(ieee80211_get_SA(hdr), sa); 15235e3dd157SKalle Valo } 15245e3dd157SKalle Valo 1525581c25f8SMichal Kazior static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar, 1526581c25f8SMichal Kazior struct sk_buff *msdu, 1527581c25f8SMichal Kazior enum htt_rx_mpdu_encrypt_type enctype) 15285e3dd157SKalle Valo { 15295e3dd157SKalle Valo struct ieee80211_hdr *hdr; 1530581c25f8SMichal Kazior struct htt_rx_desc *rxd; 1531581c25f8SMichal Kazior size_t hdr_len, crypto_len; 1532e3fbf8d2SMichal Kazior void *rfc1042; 1533581c25f8SMichal Kazior bool is_first, is_last, is_amsdu; 15342f38c3c0SVasanthakumar Thiagarajan int bytes_aligned = ar->hw_params.decap_align_bytes; 15355e3dd157SKalle Valo 1536581c25f8SMichal Kazior rxd = (void *)msdu->data - sizeof(*rxd); 1537581c25f8SMichal Kazior hdr = (void *)rxd->rx_hdr_status; 15385e3dd157SKalle Valo 15391f5dbfbbSPeter Oh is_first = !!(rxd->msdu_end.common.info0 & 1540581c25f8SMichal Kazior __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); 15411f5dbfbbSPeter Oh is_last = !!(rxd->msdu_end.common.info0 & 1542581c25f8SMichal Kazior __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); 1543581c25f8SMichal Kazior is_amsdu = !(is_first && is_last); 1544e3fbf8d2SMichal Kazior 1545e3fbf8d2SMichal Kazior rfc1042 = hdr; 1546e3fbf8d2SMichal Kazior 1547581c25f8SMichal Kazior if (is_first) { 1548581c25f8SMichal Kazior hdr_len = ieee80211_hdrlen(hdr->frame_control); 1549581c25f8SMichal Kazior crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); 1550e3fbf8d2SMichal Kazior 15512f38c3c0SVasanthakumar Thiagarajan rfc1042 += round_up(hdr_len, bytes_aligned) + 15522f38c3c0SVasanthakumar Thiagarajan round_up(crypto_len, bytes_aligned); 15535e3dd157SKalle Valo } 15545e3dd157SKalle Valo 1555581c25f8SMichal Kazior if (is_amsdu) 1556581c25f8SMichal Kazior rfc1042 += sizeof(struct amsdu_subframe_hdr); 1557f6dc2095SMichal Kazior 1558581c25f8SMichal Kazior return rfc1042; 1559581c25f8SMichal Kazior } 1560581c25f8SMichal Kazior 1561581c25f8SMichal Kazior static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar, 1562581c25f8SMichal Kazior struct sk_buff *msdu, 1563581c25f8SMichal Kazior struct ieee80211_rx_status *status, 1564581c25f8SMichal Kazior const u8 first_hdr[64], 1565581c25f8SMichal Kazior enum htt_rx_mpdu_encrypt_type enctype) 1566581c25f8SMichal Kazior { 1567581c25f8SMichal Kazior struct ieee80211_hdr *hdr; 1568581c25f8SMichal Kazior struct ethhdr *eth; 1569581c25f8SMichal Kazior size_t hdr_len; 1570581c25f8SMichal Kazior void *rfc1042; 1571581c25f8SMichal Kazior u8 da[ETH_ALEN]; 1572581c25f8SMichal Kazior u8 sa[ETH_ALEN]; 15739e19e132SVasanthakumar Thiagarajan int l3_pad_bytes; 15749e19e132SVasanthakumar Thiagarajan struct htt_rx_desc *rxd; 15757eccb738SVasanthakumar Thiagarajan int bytes_aligned = ar->hw_params.decap_align_bytes; 1576581c25f8SMichal Kazior 1577581c25f8SMichal Kazior /* Delivered decapped frame: 1578581c25f8SMichal Kazior * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc 1579581c25f8SMichal Kazior * [payload] 1580581c25f8SMichal Kazior */ 1581581c25f8SMichal Kazior 1582581c25f8SMichal Kazior rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype); 1583581c25f8SMichal Kazior if (WARN_ON_ONCE(!rfc1042)) 1584581c25f8SMichal Kazior return; 1585581c25f8SMichal Kazior 15869e19e132SVasanthakumar Thiagarajan rxd = (void *)msdu->data - sizeof(*rxd); 15879e19e132SVasanthakumar Thiagarajan l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd); 15889e19e132SVasanthakumar Thiagarajan skb_put(msdu, l3_pad_bytes); 15899e19e132SVasanthakumar Thiagarajan skb_pull(msdu, l3_pad_bytes); 15909e19e132SVasanthakumar Thiagarajan 1591581c25f8SMichal Kazior /* pull decapped header and copy SA & DA */ 1592581c25f8SMichal Kazior eth = (struct ethhdr *)msdu->data; 1593581c25f8SMichal Kazior ether_addr_copy(da, eth->h_dest); 1594581c25f8SMichal Kazior ether_addr_copy(sa, eth->h_source); 1595581c25f8SMichal Kazior skb_pull(msdu, sizeof(struct ethhdr)); 1596581c25f8SMichal Kazior 1597581c25f8SMichal Kazior /* push rfc1042/llc/snap */ 1598581c25f8SMichal Kazior memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042, 1599581c25f8SMichal Kazior sizeof(struct rfc1042_hdr)); 1600581c25f8SMichal Kazior 1601581c25f8SMichal Kazior /* push original 802.11 header */ 1602581c25f8SMichal Kazior hdr = (struct ieee80211_hdr *)first_hdr; 1603581c25f8SMichal Kazior hdr_len = ieee80211_hdrlen(hdr->frame_control); 16047eccb738SVasanthakumar Thiagarajan 16057eccb738SVasanthakumar Thiagarajan if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 16067eccb738SVasanthakumar Thiagarajan memcpy(skb_push(msdu, 16077eccb738SVasanthakumar Thiagarajan ath10k_htt_rx_crypto_param_len(ar, enctype)), 16087eccb738SVasanthakumar Thiagarajan (void *)hdr + round_up(hdr_len, bytes_aligned), 16097eccb738SVasanthakumar Thiagarajan ath10k_htt_rx_crypto_param_len(ar, enctype)); 16107eccb738SVasanthakumar Thiagarajan } 16117eccb738SVasanthakumar Thiagarajan 1612581c25f8SMichal Kazior memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1613581c25f8SMichal Kazior 1614581c25f8SMichal Kazior /* original 802.11 header has a different DA and in 1615581c25f8SMichal Kazior * case of 4addr it may also have different SA 1616581c25f8SMichal Kazior */ 1617581c25f8SMichal Kazior hdr = (struct ieee80211_hdr *)msdu->data; 1618581c25f8SMichal Kazior ether_addr_copy(ieee80211_get_DA(hdr), da); 1619581c25f8SMichal Kazior ether_addr_copy(ieee80211_get_SA(hdr), sa); 1620581c25f8SMichal Kazior } 1621581c25f8SMichal Kazior 1622581c25f8SMichal Kazior static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar, 1623581c25f8SMichal Kazior struct sk_buff *msdu, 1624581c25f8SMichal Kazior struct ieee80211_rx_status *status, 16257eccb738SVasanthakumar Thiagarajan const u8 first_hdr[64], 16267eccb738SVasanthakumar Thiagarajan enum htt_rx_mpdu_encrypt_type enctype) 1627581c25f8SMichal Kazior { 1628581c25f8SMichal Kazior struct ieee80211_hdr *hdr; 1629581c25f8SMichal Kazior size_t hdr_len; 16309e19e132SVasanthakumar Thiagarajan int l3_pad_bytes; 16319e19e132SVasanthakumar Thiagarajan struct htt_rx_desc *rxd; 16327eccb738SVasanthakumar Thiagarajan int bytes_aligned = ar->hw_params.decap_align_bytes; 1633581c25f8SMichal Kazior 1634581c25f8SMichal Kazior /* Delivered decapped frame: 1635581c25f8SMichal Kazior * [amsdu header] <-- replaced with 802.11 hdr 1636581c25f8SMichal Kazior * [rfc1042/llc] 1637581c25f8SMichal Kazior * [payload] 1638581c25f8SMichal Kazior */ 1639581c25f8SMichal Kazior 16409e19e132SVasanthakumar Thiagarajan rxd = (void *)msdu->data - sizeof(*rxd); 16419e19e132SVasanthakumar Thiagarajan l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd); 16429e19e132SVasanthakumar Thiagarajan 16439e19e132SVasanthakumar Thiagarajan skb_put(msdu, l3_pad_bytes); 16449e19e132SVasanthakumar Thiagarajan skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes); 1645581c25f8SMichal Kazior 1646581c25f8SMichal Kazior hdr = (struct ieee80211_hdr *)first_hdr; 1647581c25f8SMichal Kazior hdr_len = ieee80211_hdrlen(hdr->frame_control); 16487eccb738SVasanthakumar Thiagarajan 16497eccb738SVasanthakumar Thiagarajan if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 16507eccb738SVasanthakumar Thiagarajan memcpy(skb_push(msdu, 16517eccb738SVasanthakumar Thiagarajan ath10k_htt_rx_crypto_param_len(ar, enctype)), 16527eccb738SVasanthakumar Thiagarajan (void *)hdr + round_up(hdr_len, bytes_aligned), 16537eccb738SVasanthakumar Thiagarajan ath10k_htt_rx_crypto_param_len(ar, enctype)); 16547eccb738SVasanthakumar Thiagarajan } 16557eccb738SVasanthakumar Thiagarajan 1656581c25f8SMichal Kazior memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1657581c25f8SMichal Kazior } 1658581c25f8SMichal Kazior 1659581c25f8SMichal Kazior static void ath10k_htt_rx_h_undecap(struct ath10k *ar, 1660581c25f8SMichal Kazior struct sk_buff *msdu, 1661581c25f8SMichal Kazior struct ieee80211_rx_status *status, 1662581c25f8SMichal Kazior u8 first_hdr[64], 1663581c25f8SMichal Kazior enum htt_rx_mpdu_encrypt_type enctype, 1664581c25f8SMichal Kazior bool is_decrypted) 1665581c25f8SMichal Kazior { 1666581c25f8SMichal Kazior struct htt_rx_desc *rxd; 1667581c25f8SMichal Kazior enum rx_msdu_decap_format decap; 1668581c25f8SMichal Kazior 1669581c25f8SMichal Kazior /* First msdu's decapped header: 1670581c25f8SMichal Kazior * [802.11 header] <-- padded to 4 bytes long 1671581c25f8SMichal Kazior * [crypto param] <-- padded to 4 bytes long 1672581c25f8SMichal Kazior * [amsdu header] <-- only if A-MSDU 1673581c25f8SMichal Kazior * [rfc1042/llc] 1674581c25f8SMichal Kazior * 1675581c25f8SMichal Kazior * Other (2nd, 3rd, ..) msdu's decapped header: 1676581c25f8SMichal Kazior * [amsdu header] <-- only if A-MSDU 1677581c25f8SMichal Kazior * [rfc1042/llc] 1678581c25f8SMichal Kazior */ 1679581c25f8SMichal Kazior 1680581c25f8SMichal Kazior rxd = (void *)msdu->data - sizeof(*rxd); 16811f5dbfbbSPeter Oh decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1), 1682581c25f8SMichal Kazior RX_MSDU_START_INFO1_DECAP_FORMAT); 1683581c25f8SMichal Kazior 1684581c25f8SMichal Kazior switch (decap) { 1685581c25f8SMichal Kazior case RX_MSDU_DECAP_RAW: 1686581c25f8SMichal Kazior ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype, 1687a2864772SBhagavathi Perumal S is_decrypted, first_hdr); 1688581c25f8SMichal Kazior break; 1689581c25f8SMichal Kazior case RX_MSDU_DECAP_NATIVE_WIFI: 16907eccb738SVasanthakumar Thiagarajan ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr, 16917eccb738SVasanthakumar Thiagarajan enctype); 1692581c25f8SMichal Kazior break; 1693581c25f8SMichal Kazior case RX_MSDU_DECAP_ETHERNET2_DIX: 1694581c25f8SMichal Kazior ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype); 1695581c25f8SMichal Kazior break; 1696581c25f8SMichal Kazior case RX_MSDU_DECAP_8023_SNAP_LLC: 16977eccb738SVasanthakumar Thiagarajan ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr, 16987eccb738SVasanthakumar Thiagarajan enctype); 1699581c25f8SMichal Kazior break; 1700581c25f8SMichal Kazior } 17015e3dd157SKalle Valo } 17025e3dd157SKalle Valo 1703605f81aaSMichal Kazior static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb) 1704605f81aaSMichal Kazior { 1705605f81aaSMichal Kazior struct htt_rx_desc *rxd; 1706605f81aaSMichal Kazior u32 flags, info; 1707605f81aaSMichal Kazior bool is_ip4, is_ip6; 1708605f81aaSMichal Kazior bool is_tcp, is_udp; 1709605f81aaSMichal Kazior bool ip_csum_ok, tcpudp_csum_ok; 1710605f81aaSMichal Kazior 1711605f81aaSMichal Kazior rxd = (void *)skb->data - sizeof(*rxd); 1712605f81aaSMichal Kazior flags = __le32_to_cpu(rxd->attention.flags); 17131f5dbfbbSPeter Oh info = __le32_to_cpu(rxd->msdu_start.common.info1); 1714605f81aaSMichal Kazior 1715605f81aaSMichal Kazior is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO); 1716605f81aaSMichal Kazior is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO); 1717605f81aaSMichal Kazior is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO); 1718605f81aaSMichal Kazior is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO); 1719605f81aaSMichal Kazior ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL); 1720605f81aaSMichal Kazior tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL); 1721605f81aaSMichal Kazior 1722605f81aaSMichal Kazior if (!is_ip4 && !is_ip6) 1723605f81aaSMichal Kazior return CHECKSUM_NONE; 1724605f81aaSMichal Kazior if (!is_tcp && !is_udp) 1725605f81aaSMichal Kazior return CHECKSUM_NONE; 1726605f81aaSMichal Kazior if (!ip_csum_ok) 1727605f81aaSMichal Kazior return CHECKSUM_NONE; 1728605f81aaSMichal Kazior if (!tcpudp_csum_ok) 1729605f81aaSMichal Kazior return CHECKSUM_NONE; 1730605f81aaSMichal Kazior 1731605f81aaSMichal Kazior return CHECKSUM_UNNECESSARY; 1732605f81aaSMichal Kazior } 1733605f81aaSMichal Kazior 1734581c25f8SMichal Kazior static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu) 1735581c25f8SMichal Kazior { 1736581c25f8SMichal Kazior msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu); 1737581c25f8SMichal Kazior } 1738581c25f8SMichal Kazior 1739581c25f8SMichal Kazior static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, 1740581c25f8SMichal Kazior struct sk_buff_head *amsdu, 17417eccb738SVasanthakumar Thiagarajan struct ieee80211_rx_status *status, 1742caee728aSVasanthakumar Thiagarajan bool fill_crypt_header, 1743caee728aSVasanthakumar Thiagarajan u8 *rx_hdr, 1744caee728aSVasanthakumar Thiagarajan enum ath10k_pkt_rx_err *err) 1745581c25f8SMichal Kazior { 1746581c25f8SMichal Kazior struct sk_buff *first; 1747581c25f8SMichal Kazior struct sk_buff *last; 1748581c25f8SMichal Kazior struct sk_buff *msdu; 1749581c25f8SMichal Kazior struct htt_rx_desc *rxd; 1750581c25f8SMichal Kazior struct ieee80211_hdr *hdr; 1751581c25f8SMichal Kazior enum htt_rx_mpdu_encrypt_type enctype; 1752581c25f8SMichal Kazior u8 first_hdr[64]; 1753581c25f8SMichal Kazior u8 *qos; 1754581c25f8SMichal Kazior bool has_fcs_err; 1755581c25f8SMichal Kazior bool has_crypto_err; 1756581c25f8SMichal Kazior bool has_tkip_err; 1757581c25f8SMichal Kazior bool has_peer_idx_invalid; 1758581c25f8SMichal Kazior bool is_decrypted; 175960549cabSGrzegorz Bajorski bool is_mgmt; 1760581c25f8SMichal Kazior u32 attention; 1761581c25f8SMichal Kazior 1762581c25f8SMichal Kazior if (skb_queue_empty(amsdu)) 1763581c25f8SMichal Kazior return; 1764581c25f8SMichal Kazior 1765581c25f8SMichal Kazior first = skb_peek(amsdu); 1766581c25f8SMichal Kazior rxd = (void *)first->data - sizeof(*rxd); 1767581c25f8SMichal Kazior 176860549cabSGrzegorz Bajorski is_mgmt = !!(rxd->attention.flags & 176960549cabSGrzegorz Bajorski __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE)); 177060549cabSGrzegorz Bajorski 1771581c25f8SMichal Kazior enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), 1772581c25f8SMichal Kazior RX_MPDU_START_INFO0_ENCRYPT_TYPE); 1773581c25f8SMichal Kazior 1774581c25f8SMichal Kazior /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11 1775581c25f8SMichal Kazior * decapped header. It'll be used for undecapping of each MSDU. 1776581c25f8SMichal Kazior */ 1777581c25f8SMichal Kazior hdr = (void *)rxd->rx_hdr_status; 17787eccb738SVasanthakumar Thiagarajan memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN); 1779581c25f8SMichal Kazior 1780caee728aSVasanthakumar Thiagarajan if (rx_hdr) 1781caee728aSVasanthakumar Thiagarajan memcpy(rx_hdr, hdr, RX_HTT_HDR_STATUS_LEN); 1782caee728aSVasanthakumar Thiagarajan 1783581c25f8SMichal Kazior /* Each A-MSDU subframe will use the original header as the base and be 1784581c25f8SMichal Kazior * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl. 1785581c25f8SMichal Kazior */ 1786581c25f8SMichal Kazior hdr = (void *)first_hdr; 17877eccb738SVasanthakumar Thiagarajan 17887eccb738SVasanthakumar Thiagarajan if (ieee80211_is_data_qos(hdr->frame_control)) { 1789581c25f8SMichal Kazior qos = ieee80211_get_qos_ctl(hdr); 1790581c25f8SMichal Kazior qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; 17917eccb738SVasanthakumar Thiagarajan } 1792581c25f8SMichal Kazior 1793581c25f8SMichal Kazior /* Some attention flags are valid only in the last MSDU. */ 1794581c25f8SMichal Kazior last = skb_peek_tail(amsdu); 1795581c25f8SMichal Kazior rxd = (void *)last->data - sizeof(*rxd); 1796581c25f8SMichal Kazior attention = __le32_to_cpu(rxd->attention.flags); 1797581c25f8SMichal Kazior 1798581c25f8SMichal Kazior has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR); 1799581c25f8SMichal Kazior has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR); 1800581c25f8SMichal Kazior has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR); 1801581c25f8SMichal Kazior has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID); 1802581c25f8SMichal Kazior 1803581c25f8SMichal Kazior /* Note: If hardware captures an encrypted frame that it can't decrypt, 1804581c25f8SMichal Kazior * e.g. due to fcs error, missing peer or invalid key data it will 1805581c25f8SMichal Kazior * report the frame as raw. 1806581c25f8SMichal Kazior */ 1807581c25f8SMichal Kazior is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE && 1808581c25f8SMichal Kazior !has_fcs_err && 1809581c25f8SMichal Kazior !has_crypto_err && 1810581c25f8SMichal Kazior !has_peer_idx_invalid); 1811581c25f8SMichal Kazior 1812581c25f8SMichal Kazior /* Clear per-MPDU flags while leaving per-PPDU flags intact. */ 1813581c25f8SMichal Kazior status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | 1814581c25f8SMichal Kazior RX_FLAG_MMIC_ERROR | 1815581c25f8SMichal Kazior RX_FLAG_DECRYPTED | 1816581c25f8SMichal Kazior RX_FLAG_IV_STRIPPED | 181760549cabSGrzegorz Bajorski RX_FLAG_ONLY_MONITOR | 1818581c25f8SMichal Kazior RX_FLAG_MMIC_STRIPPED); 1819581c25f8SMichal Kazior 1820581c25f8SMichal Kazior if (has_fcs_err) 1821581c25f8SMichal Kazior status->flag |= RX_FLAG_FAILED_FCS_CRC; 1822581c25f8SMichal Kazior 1823581c25f8SMichal Kazior if (has_tkip_err) 1824581c25f8SMichal Kazior status->flag |= RX_FLAG_MMIC_ERROR; 1825581c25f8SMichal Kazior 1826caee728aSVasanthakumar Thiagarajan if (err) { 1827caee728aSVasanthakumar Thiagarajan if (has_fcs_err) 1828caee728aSVasanthakumar Thiagarajan *err = ATH10K_PKT_RX_ERR_FCS; 1829caee728aSVasanthakumar Thiagarajan else if (has_tkip_err) 1830caee728aSVasanthakumar Thiagarajan *err = ATH10K_PKT_RX_ERR_TKIP; 1831caee728aSVasanthakumar Thiagarajan else if (has_crypto_err) 1832caee728aSVasanthakumar Thiagarajan *err = ATH10K_PKT_RX_ERR_CRYPT; 1833caee728aSVasanthakumar Thiagarajan else if (has_peer_idx_invalid) 1834caee728aSVasanthakumar Thiagarajan *err = ATH10K_PKT_RX_ERR_PEER_IDX_INVAL; 1835caee728aSVasanthakumar Thiagarajan } 1836caee728aSVasanthakumar Thiagarajan 183760549cabSGrzegorz Bajorski /* Firmware reports all necessary management frames via WMI already. 183860549cabSGrzegorz Bajorski * They are not reported to monitor interfaces at all so pass the ones 183960549cabSGrzegorz Bajorski * coming via HTT to monitor interfaces instead. This simplifies 184060549cabSGrzegorz Bajorski * matters a lot. 184160549cabSGrzegorz Bajorski */ 184260549cabSGrzegorz Bajorski if (is_mgmt) 184360549cabSGrzegorz Bajorski status->flag |= RX_FLAG_ONLY_MONITOR; 184460549cabSGrzegorz Bajorski 184560549cabSGrzegorz Bajorski if (is_decrypted) { 184660549cabSGrzegorz Bajorski status->flag |= RX_FLAG_DECRYPTED; 184760549cabSGrzegorz Bajorski 184860549cabSGrzegorz Bajorski if (likely(!is_mgmt)) 18497eccb738SVasanthakumar Thiagarajan status->flag |= RX_FLAG_MMIC_STRIPPED; 18507eccb738SVasanthakumar Thiagarajan 18517eccb738SVasanthakumar Thiagarajan if (fill_crypt_header) 18527eccb738SVasanthakumar Thiagarajan status->flag |= RX_FLAG_MIC_STRIPPED | 18537eccb738SVasanthakumar Thiagarajan RX_FLAG_ICV_STRIPPED; 18547eccb738SVasanthakumar Thiagarajan else 18557eccb738SVasanthakumar Thiagarajan status->flag |= RX_FLAG_IV_STRIPPED; 185660549cabSGrzegorz Bajorski } 1857581c25f8SMichal Kazior 1858581c25f8SMichal Kazior skb_queue_walk(amsdu, msdu) { 1859581c25f8SMichal Kazior ath10k_htt_rx_h_csum_offload(msdu); 1860581c25f8SMichal Kazior ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype, 1861581c25f8SMichal Kazior is_decrypted); 1862581c25f8SMichal Kazior 1863581c25f8SMichal Kazior /* Undecapping involves copying the original 802.11 header back 1864581c25f8SMichal Kazior * to sk_buff. If frame is protected and hardware has decrypted 1865581c25f8SMichal Kazior * it then remove the protected bit. 1866581c25f8SMichal Kazior */ 1867581c25f8SMichal Kazior if (!is_decrypted) 1868581c25f8SMichal Kazior continue; 186960549cabSGrzegorz Bajorski if (is_mgmt) 187060549cabSGrzegorz Bajorski continue; 1871581c25f8SMichal Kazior 18727eccb738SVasanthakumar Thiagarajan if (fill_crypt_header) 18737eccb738SVasanthakumar Thiagarajan continue; 18747eccb738SVasanthakumar Thiagarajan 1875581c25f8SMichal Kazior hdr = (void *)msdu->data; 1876581c25f8SMichal Kazior hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 1877581c25f8SMichal Kazior } 1878581c25f8SMichal Kazior } 1879581c25f8SMichal Kazior 1880deba1b9eSRajkumar Manoharan static void ath10k_htt_rx_h_enqueue(struct ath10k *ar, 1881581c25f8SMichal Kazior struct sk_buff_head *amsdu, 1882581c25f8SMichal Kazior struct ieee80211_rx_status *status) 1883581c25f8SMichal Kazior { 1884581c25f8SMichal Kazior struct sk_buff *msdu; 18857eccb738SVasanthakumar Thiagarajan struct sk_buff *first_subframe; 18867eccb738SVasanthakumar Thiagarajan 18877eccb738SVasanthakumar Thiagarajan first_subframe = skb_peek(amsdu); 1888581c25f8SMichal Kazior 1889581c25f8SMichal Kazior while ((msdu = __skb_dequeue(amsdu))) { 1890581c25f8SMichal Kazior /* Setup per-MSDU flags */ 1891581c25f8SMichal Kazior if (skb_queue_empty(amsdu)) 1892581c25f8SMichal Kazior status->flag &= ~RX_FLAG_AMSDU_MORE; 1893581c25f8SMichal Kazior else 1894581c25f8SMichal Kazior status->flag |= RX_FLAG_AMSDU_MORE; 1895581c25f8SMichal Kazior 18967eccb738SVasanthakumar Thiagarajan if (msdu == first_subframe) { 18977eccb738SVasanthakumar Thiagarajan first_subframe = NULL; 18987eccb738SVasanthakumar Thiagarajan status->flag &= ~RX_FLAG_ALLOW_SAME_PN; 18997eccb738SVasanthakumar Thiagarajan } else { 19007eccb738SVasanthakumar Thiagarajan status->flag |= RX_FLAG_ALLOW_SAME_PN; 19017eccb738SVasanthakumar Thiagarajan } 19027eccb738SVasanthakumar Thiagarajan 1903deba1b9eSRajkumar Manoharan ath10k_htt_rx_h_queue_msdu(ar, status, msdu); 1904581c25f8SMichal Kazior } 1905581c25f8SMichal Kazior } 1906581c25f8SMichal Kazior 1907caee728aSVasanthakumar Thiagarajan static int ath10k_unchain_msdu(struct sk_buff_head *amsdu, 1908caee728aSVasanthakumar Thiagarajan unsigned long int *unchain_cnt) 1909bfa35368SBen Greear { 19109aa505d2SMichal Kazior struct sk_buff *skb, *first; 1911bfa35368SBen Greear int space; 1912bfa35368SBen Greear int total_len = 0; 1913caee728aSVasanthakumar Thiagarajan int amsdu_len = skb_queue_len(amsdu); 1914bfa35368SBen Greear 1915bfa35368SBen Greear /* TODO: Might could optimize this by using 1916bfa35368SBen Greear * skb_try_coalesce or similar method to 1917bfa35368SBen Greear * decrease copying, or maybe get mac80211 to 1918bfa35368SBen Greear * provide a way to just receive a list of 1919bfa35368SBen Greear * skb? 1920bfa35368SBen Greear */ 1921bfa35368SBen Greear 19229aa505d2SMichal Kazior first = __skb_dequeue(amsdu); 1923bfa35368SBen Greear 1924bfa35368SBen Greear /* Allocate total length all at once. */ 19259aa505d2SMichal Kazior skb_queue_walk(amsdu, skb) 19269aa505d2SMichal Kazior total_len += skb->len; 1927bfa35368SBen Greear 19289aa505d2SMichal Kazior space = total_len - skb_tailroom(first); 1929bfa35368SBen Greear if ((space > 0) && 19309aa505d2SMichal Kazior (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) { 1931bfa35368SBen Greear /* TODO: bump some rx-oom error stat */ 1932bfa35368SBen Greear /* put it back together so we can free the 1933bfa35368SBen Greear * whole list at once. 1934bfa35368SBen Greear */ 19359aa505d2SMichal Kazior __skb_queue_head(amsdu, first); 1936bfa35368SBen Greear return -1; 1937bfa35368SBen Greear } 1938bfa35368SBen Greear 1939bfa35368SBen Greear /* Walk list again, copying contents into 1940bfa35368SBen Greear * msdu_head 1941bfa35368SBen Greear */ 19429aa505d2SMichal Kazior while ((skb = __skb_dequeue(amsdu))) { 19439aa505d2SMichal Kazior skb_copy_from_linear_data(skb, skb_put(first, skb->len), 19449aa505d2SMichal Kazior skb->len); 19459aa505d2SMichal Kazior dev_kfree_skb_any(skb); 1946bfa35368SBen Greear } 1947bfa35368SBen Greear 19489aa505d2SMichal Kazior __skb_queue_head(amsdu, first); 1949caee728aSVasanthakumar Thiagarajan 1950caee728aSVasanthakumar Thiagarajan *unchain_cnt += amsdu_len - 1; 1951caee728aSVasanthakumar Thiagarajan 1952bfa35368SBen Greear return 0; 1953bfa35368SBen Greear } 1954bfa35368SBen Greear 1955581c25f8SMichal Kazior static void ath10k_htt_rx_h_unchain(struct ath10k *ar, 1956caee728aSVasanthakumar Thiagarajan struct sk_buff_head *amsdu, 1957caee728aSVasanthakumar Thiagarajan unsigned long int *drop_cnt, 1958caee728aSVasanthakumar Thiagarajan unsigned long int *unchain_cnt) 19592acc4eb2SJanusz Dziedzic { 1960581c25f8SMichal Kazior struct sk_buff *first; 1961581c25f8SMichal Kazior struct htt_rx_desc *rxd; 1962581c25f8SMichal Kazior enum rx_msdu_decap_format decap; 19637aa7a72aSMichal Kazior 1964581c25f8SMichal Kazior first = skb_peek(amsdu); 1965581c25f8SMichal Kazior rxd = (void *)first->data - sizeof(*rxd); 19661f5dbfbbSPeter Oh decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1), 1967581c25f8SMichal Kazior RX_MSDU_START_INFO1_DECAP_FORMAT); 1968581c25f8SMichal Kazior 1969581c25f8SMichal Kazior /* FIXME: Current unchaining logic can only handle simple case of raw 1970581c25f8SMichal Kazior * msdu chaining. If decapping is other than raw the chaining may be 1971581c25f8SMichal Kazior * more complex and this isn't handled by the current code. Don't even 1972581c25f8SMichal Kazior * try re-constructing such frames - it'll be pretty much garbage. 1973581c25f8SMichal Kazior */ 1974581c25f8SMichal Kazior if (decap != RX_MSDU_DECAP_RAW || 1975581c25f8SMichal Kazior skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) { 1976caee728aSVasanthakumar Thiagarajan *drop_cnt += skb_queue_len(amsdu); 1977581c25f8SMichal Kazior __skb_queue_purge(amsdu); 1978581c25f8SMichal Kazior return; 1979581c25f8SMichal Kazior } 1980581c25f8SMichal Kazior 1981caee728aSVasanthakumar Thiagarajan ath10k_unchain_msdu(amsdu, unchain_cnt); 1982581c25f8SMichal Kazior } 1983581c25f8SMichal Kazior 1984581c25f8SMichal Kazior static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar, 1985581c25f8SMichal Kazior struct sk_buff_head *amsdu, 1986581c25f8SMichal Kazior struct ieee80211_rx_status *rx_status) 1987581c25f8SMichal Kazior { 1988581c25f8SMichal Kazior /* FIXME: It might be a good idea to do some fuzzy-testing to drop 1989581c25f8SMichal Kazior * invalid/dangerous frames. 1990581c25f8SMichal Kazior */ 1991581c25f8SMichal Kazior 1992581c25f8SMichal Kazior if (!rx_status->freq) { 1993984eb905SGabriel Craciunescu ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n"); 19942acc4eb2SJanusz Dziedzic return false; 19952acc4eb2SJanusz Dziedzic } 19962acc4eb2SJanusz Dziedzic 1997581c25f8SMichal Kazior if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) { 1998581c25f8SMichal Kazior ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n"); 19992acc4eb2SJanusz Dziedzic return false; 20002acc4eb2SJanusz Dziedzic } 20012acc4eb2SJanusz Dziedzic 20022acc4eb2SJanusz Dziedzic return true; 20032acc4eb2SJanusz Dziedzic } 20042acc4eb2SJanusz Dziedzic 2005581c25f8SMichal Kazior static void ath10k_htt_rx_h_filter(struct ath10k *ar, 2006581c25f8SMichal Kazior struct sk_buff_head *amsdu, 2007caee728aSVasanthakumar Thiagarajan struct ieee80211_rx_status *rx_status, 2008caee728aSVasanthakumar Thiagarajan unsigned long int *drop_cnt) 2009581c25f8SMichal Kazior { 2010581c25f8SMichal Kazior if (skb_queue_empty(amsdu)) 2011581c25f8SMichal Kazior return; 2012581c25f8SMichal Kazior 2013581c25f8SMichal Kazior if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status)) 2014581c25f8SMichal Kazior return; 2015581c25f8SMichal Kazior 2016caee728aSVasanthakumar Thiagarajan if (drop_cnt) 2017caee728aSVasanthakumar Thiagarajan *drop_cnt += skb_queue_len(amsdu); 2018caee728aSVasanthakumar Thiagarajan 2019581c25f8SMichal Kazior __skb_queue_purge(amsdu); 2020581c25f8SMichal Kazior } 2021581c25f8SMichal Kazior 202218235664SRajkumar Manoharan static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt) 202318235664SRajkumar Manoharan { 202418235664SRajkumar Manoharan struct ath10k *ar = htt->ar; 2025237e15dfSAshok Raj Nagarajan struct ieee80211_rx_status *rx_status = &htt->rx_status; 202618235664SRajkumar Manoharan struct sk_buff_head amsdu; 2027deba1b9eSRajkumar Manoharan int ret; 2028caee728aSVasanthakumar Thiagarajan unsigned long int drop_cnt = 0; 2029caee728aSVasanthakumar Thiagarajan unsigned long int unchain_cnt = 0; 2030caee728aSVasanthakumar Thiagarajan unsigned long int drop_cnt_filter = 0; 2031caee728aSVasanthakumar Thiagarajan unsigned long int msdus_to_queue, num_msdus; 2032caee728aSVasanthakumar Thiagarajan enum ath10k_pkt_rx_err err = ATH10K_PKT_RX_ERR_MAX; 2033caee728aSVasanthakumar Thiagarajan u8 first_hdr[RX_HTT_HDR_STATUS_LEN]; 203418235664SRajkumar Manoharan 203518235664SRajkumar Manoharan __skb_queue_head_init(&amsdu); 203618235664SRajkumar Manoharan 203718235664SRajkumar Manoharan spin_lock_bh(&htt->rx_ring.lock); 203818235664SRajkumar Manoharan if (htt->rx_confused) { 203918235664SRajkumar Manoharan spin_unlock_bh(&htt->rx_ring.lock); 204018235664SRajkumar Manoharan return -EIO; 204118235664SRajkumar Manoharan } 204218235664SRajkumar Manoharan ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu); 204318235664SRajkumar Manoharan spin_unlock_bh(&htt->rx_ring.lock); 204418235664SRajkumar Manoharan 204518235664SRajkumar Manoharan if (ret < 0) { 204618235664SRajkumar Manoharan ath10k_warn(ar, "rx ring became corrupted: %d\n", ret); 204718235664SRajkumar Manoharan __skb_queue_purge(&amsdu); 204818235664SRajkumar Manoharan /* FIXME: It's probably a good idea to reboot the 204918235664SRajkumar Manoharan * device instead of leaving it inoperable. 205018235664SRajkumar Manoharan */ 205118235664SRajkumar Manoharan htt->rx_confused = true; 205218235664SRajkumar Manoharan return ret; 205318235664SRajkumar Manoharan } 205418235664SRajkumar Manoharan 2055caee728aSVasanthakumar Thiagarajan num_msdus = skb_queue_len(&amsdu); 2056caee728aSVasanthakumar Thiagarajan 2057237e15dfSAshok Raj Nagarajan ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff); 20587543d116SMohammed Shafi Shajakhan 20597543d116SMohammed Shafi Shajakhan /* only for ret = 1 indicates chained msdus */ 20607543d116SMohammed Shafi Shajakhan if (ret > 0) 2061caee728aSVasanthakumar Thiagarajan ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt); 20627543d116SMohammed Shafi Shajakhan 2063caee728aSVasanthakumar Thiagarajan ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter); 2064caee728aSVasanthakumar Thiagarajan ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err); 2065caee728aSVasanthakumar Thiagarajan msdus_to_queue = skb_queue_len(&amsdu); 2066deba1b9eSRajkumar Manoharan ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status); 206718235664SRajkumar Manoharan 2068caee728aSVasanthakumar Thiagarajan ath10k_sta_update_rx_tid_stats(ar, first_hdr, num_msdus, err, 2069caee728aSVasanthakumar Thiagarajan unchain_cnt, drop_cnt, drop_cnt_filter, 2070caee728aSVasanthakumar Thiagarajan msdus_to_queue); 2071caee728aSVasanthakumar Thiagarajan 2072deba1b9eSRajkumar Manoharan return 0; 207318235664SRajkumar Manoharan } 207418235664SRajkumar Manoharan 2075f88d4934SErik Stromdahl static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt, 2076f88d4934SErik Stromdahl struct htt_rx_indication_hl *rx, 2077f88d4934SErik Stromdahl struct sk_buff *skb) 2078f88d4934SErik Stromdahl { 2079f88d4934SErik Stromdahl struct ath10k *ar = htt->ar; 2080f88d4934SErik Stromdahl struct ath10k_peer *peer; 2081f88d4934SErik Stromdahl struct htt_rx_indication_mpdu_range *mpdu_ranges; 2082f88d4934SErik Stromdahl struct fw_rx_desc_hl *fw_desc; 2083f88d4934SErik Stromdahl struct ieee80211_hdr *hdr; 2084f88d4934SErik Stromdahl struct ieee80211_rx_status *rx_status; 2085f88d4934SErik Stromdahl u16 peer_id; 2086f88d4934SErik Stromdahl u8 rx_desc_len; 2087f88d4934SErik Stromdahl int num_mpdu_ranges; 2088f88d4934SErik Stromdahl size_t tot_hdr_len; 2089f88d4934SErik Stromdahl struct ieee80211_channel *ch; 2090f88d4934SErik Stromdahl 2091f88d4934SErik Stromdahl peer_id = __le16_to_cpu(rx->hdr.peer_id); 2092f88d4934SErik Stromdahl 2093f88d4934SErik Stromdahl spin_lock_bh(&ar->data_lock); 2094f88d4934SErik Stromdahl peer = ath10k_peer_find_by_id(ar, peer_id); 2095f88d4934SErik Stromdahl spin_unlock_bh(&ar->data_lock); 2096f88d4934SErik Stromdahl if (!peer) 2097f88d4934SErik Stromdahl ath10k_warn(ar, "Got RX ind from invalid peer: %u\n", peer_id); 2098f88d4934SErik Stromdahl 2099f88d4934SErik Stromdahl num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), 2100f88d4934SErik Stromdahl HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); 2101f88d4934SErik Stromdahl mpdu_ranges = htt_rx_ind_get_mpdu_ranges_hl(rx); 2102f88d4934SErik Stromdahl fw_desc = &rx->fw_desc; 2103f88d4934SErik Stromdahl rx_desc_len = fw_desc->len; 2104f88d4934SErik Stromdahl 2105f88d4934SErik Stromdahl /* I have not yet seen any case where num_mpdu_ranges > 1. 2106f88d4934SErik Stromdahl * qcacld does not seem handle that case either, so we introduce the 2107f88d4934SErik Stromdahl * same limitiation here as well. 2108f88d4934SErik Stromdahl */ 2109f88d4934SErik Stromdahl if (num_mpdu_ranges > 1) 2110f88d4934SErik Stromdahl ath10k_warn(ar, 2111f88d4934SErik Stromdahl "Unsupported number of MPDU ranges: %d, ignoring all but the first\n", 2112f88d4934SErik Stromdahl num_mpdu_ranges); 2113f88d4934SErik Stromdahl 2114f88d4934SErik Stromdahl if (mpdu_ranges->mpdu_range_status != 2115f88d4934SErik Stromdahl HTT_RX_IND_MPDU_STATUS_OK) { 2116f88d4934SErik Stromdahl ath10k_warn(ar, "MPDU range status: %d\n", 2117f88d4934SErik Stromdahl mpdu_ranges->mpdu_range_status); 2118f88d4934SErik Stromdahl goto err; 2119f88d4934SErik Stromdahl } 2120f88d4934SErik Stromdahl 2121f88d4934SErik Stromdahl /* Strip off all headers before the MAC header before delivery to 2122f88d4934SErik Stromdahl * mac80211 2123f88d4934SErik Stromdahl */ 2124f88d4934SErik Stromdahl tot_hdr_len = sizeof(struct htt_resp_hdr) + sizeof(rx->hdr) + 2125f88d4934SErik Stromdahl sizeof(rx->ppdu) + sizeof(rx->prefix) + 2126f88d4934SErik Stromdahl sizeof(rx->fw_desc) + 2127f88d4934SErik Stromdahl sizeof(*mpdu_ranges) * num_mpdu_ranges + rx_desc_len; 2128f88d4934SErik Stromdahl skb_pull(skb, tot_hdr_len); 2129f88d4934SErik Stromdahl 2130f88d4934SErik Stromdahl hdr = (struct ieee80211_hdr *)skb->data; 2131f88d4934SErik Stromdahl rx_status = IEEE80211_SKB_RXCB(skb); 2132f88d4934SErik Stromdahl rx_status->chains |= BIT(0); 2133f88d4934SErik Stromdahl rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR + 2134f88d4934SErik Stromdahl rx->ppdu.combined_rssi; 2135f88d4934SErik Stromdahl rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL; 2136f88d4934SErik Stromdahl 2137f88d4934SErik Stromdahl spin_lock_bh(&ar->data_lock); 2138f88d4934SErik Stromdahl ch = ar->scan_channel; 2139f88d4934SErik Stromdahl if (!ch) 2140f88d4934SErik Stromdahl ch = ar->rx_channel; 2141f88d4934SErik Stromdahl if (!ch) 2142f88d4934SErik Stromdahl ch = ath10k_htt_rx_h_any_channel(ar); 2143f88d4934SErik Stromdahl if (!ch) 2144f88d4934SErik Stromdahl ch = ar->tgt_oper_chan; 2145f88d4934SErik Stromdahl spin_unlock_bh(&ar->data_lock); 2146f88d4934SErik Stromdahl 2147f88d4934SErik Stromdahl if (ch) { 2148f88d4934SErik Stromdahl rx_status->band = ch->band; 2149f88d4934SErik Stromdahl rx_status->freq = ch->center_freq; 2150f88d4934SErik Stromdahl } 2151f88d4934SErik Stromdahl if (rx->fw_desc.flags & FW_RX_DESC_FLAGS_LAST_MSDU) 2152f88d4934SErik Stromdahl rx_status->flag &= ~RX_FLAG_AMSDU_MORE; 2153f88d4934SErik Stromdahl else 2154f88d4934SErik Stromdahl rx_status->flag |= RX_FLAG_AMSDU_MORE; 2155f88d4934SErik Stromdahl 2156f88d4934SErik Stromdahl /* Not entirely sure about this, but all frames from the chipset has 2157f88d4934SErik Stromdahl * the protected flag set even though they have already been decrypted. 2158f88d4934SErik Stromdahl * Unmasking this flag is necessary in order for mac80211 not to drop 2159f88d4934SErik Stromdahl * the frame. 2160f88d4934SErik Stromdahl * TODO: Verify this is always the case or find out a way to check 2161f88d4934SErik Stromdahl * if there has been hw decryption. 2162f88d4934SErik Stromdahl */ 2163f88d4934SErik Stromdahl if (ieee80211_has_protected(hdr->frame_control)) { 2164f88d4934SErik Stromdahl hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 2165f88d4934SErik Stromdahl rx_status->flag |= RX_FLAG_DECRYPTED | 2166f88d4934SErik Stromdahl RX_FLAG_IV_STRIPPED | 2167f88d4934SErik Stromdahl RX_FLAG_MMIC_STRIPPED; 2168f88d4934SErik Stromdahl } 2169f88d4934SErik Stromdahl 2170f88d4934SErik Stromdahl ieee80211_rx_ni(ar->hw, skb); 2171f88d4934SErik Stromdahl 2172f88d4934SErik Stromdahl /* We have delivered the skb to the upper layers (mac80211) so we 2173f88d4934SErik Stromdahl * must not free it. 2174f88d4934SErik Stromdahl */ 2175f88d4934SErik Stromdahl return false; 2176f88d4934SErik Stromdahl err: 2177f88d4934SErik Stromdahl /* Tell the caller that it must free the skb since we have not 2178f88d4934SErik Stromdahl * consumed it 2179f88d4934SErik Stromdahl */ 2180f88d4934SErik Stromdahl return true; 2181f88d4934SErik Stromdahl } 2182f88d4934SErik Stromdahl 2183f88d4934SErik Stromdahl static void ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt *htt, 21845e3dd157SKalle Valo struct htt_rx_indication *rx) 21855e3dd157SKalle Valo { 21867aa7a72aSMichal Kazior struct ath10k *ar = htt->ar; 21875e3dd157SKalle Valo struct htt_rx_indication_mpdu_range *mpdu_ranges; 21885e3dd157SKalle Valo int num_mpdu_ranges; 218918235664SRajkumar Manoharan int i, mpdu_count = 0; 2190caee728aSVasanthakumar Thiagarajan u16 peer_id; 2191caee728aSVasanthakumar Thiagarajan u8 tid; 21925e3dd157SKalle Valo 21935e3dd157SKalle Valo num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), 21945e3dd157SKalle Valo HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); 2195caee728aSVasanthakumar Thiagarajan peer_id = __le16_to_cpu(rx->hdr.peer_id); 2196caee728aSVasanthakumar Thiagarajan tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID); 2197caee728aSVasanthakumar Thiagarajan 21985e3dd157SKalle Valo mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx); 21995e3dd157SKalle Valo 22007aa7a72aSMichal Kazior ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ", 22015e3dd157SKalle Valo rx, sizeof(*rx) + 22025e3dd157SKalle Valo (sizeof(struct htt_rx_indication_mpdu_range) * 22035e3dd157SKalle Valo num_mpdu_ranges)); 22045e3dd157SKalle Valo 2205d540690dSMichal Kazior for (i = 0; i < num_mpdu_ranges; i++) 2206d540690dSMichal Kazior mpdu_count += mpdu_ranges[i].mpdu_count; 2207d540690dSMichal Kazior 22083128b3d8SRajkumar Manoharan atomic_add(mpdu_count, &htt->num_mpdus_ready); 2209caee728aSVasanthakumar Thiagarajan 2210caee728aSVasanthakumar Thiagarajan ath10k_sta_update_rx_tid_stats_ampdu(ar, peer_id, tid, mpdu_ranges, 2211caee728aSVasanthakumar Thiagarajan num_mpdu_ranges); 22125e3dd157SKalle Valo } 22135e3dd157SKalle Valo 221459465fe4SRajkumar Manoharan static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar, 22156c5151a9SMichal Kazior struct sk_buff *skb) 22166c5151a9SMichal Kazior { 22176c5151a9SMichal Kazior struct ath10k_htt *htt = &ar->htt; 22186c5151a9SMichal Kazior struct htt_resp *resp = (struct htt_resp *)skb->data; 22196c5151a9SMichal Kazior struct htt_tx_done tx_done = {}; 22206c5151a9SMichal Kazior int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS); 2221c7fd8d23SBalaji Pothunoori __le16 msdu_id, *msdus; 2222c7fd8d23SBalaji Pothunoori bool rssi_enabled = false; 2223c7fd8d23SBalaji Pothunoori u8 msdu_count = 0; 22246c5151a9SMichal Kazior int i; 22256c5151a9SMichal Kazior 22266c5151a9SMichal Kazior switch (status) { 22276c5151a9SMichal Kazior case HTT_DATA_TX_STATUS_NO_ACK: 222859465fe4SRajkumar Manoharan tx_done.status = HTT_TX_COMPL_STATE_NOACK; 22296c5151a9SMichal Kazior break; 22306c5151a9SMichal Kazior case HTT_DATA_TX_STATUS_OK: 223159465fe4SRajkumar Manoharan tx_done.status = HTT_TX_COMPL_STATE_ACK; 22326c5151a9SMichal Kazior break; 22336c5151a9SMichal Kazior case HTT_DATA_TX_STATUS_DISCARD: 22346c5151a9SMichal Kazior case HTT_DATA_TX_STATUS_POSTPONE: 22356c5151a9SMichal Kazior case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL: 223659465fe4SRajkumar Manoharan tx_done.status = HTT_TX_COMPL_STATE_DISCARD; 22376c5151a9SMichal Kazior break; 22386c5151a9SMichal Kazior default: 22397aa7a72aSMichal Kazior ath10k_warn(ar, "unhandled tx completion status %d\n", status); 224059465fe4SRajkumar Manoharan tx_done.status = HTT_TX_COMPL_STATE_DISCARD; 22416c5151a9SMichal Kazior break; 22426c5151a9SMichal Kazior } 22436c5151a9SMichal Kazior 22447aa7a72aSMichal Kazior ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n", 22456c5151a9SMichal Kazior resp->data_tx_completion.num_msdus); 22466c5151a9SMichal Kazior 2247c7fd8d23SBalaji Pothunoori msdu_count = resp->data_tx_completion.num_msdus; 2248c7fd8d23SBalaji Pothunoori 2249c7fd8d23SBalaji Pothunoori if (resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_DATA_RSSI) 2250c7fd8d23SBalaji Pothunoori rssi_enabled = true; 2251c7fd8d23SBalaji Pothunoori 2252c7fd8d23SBalaji Pothunoori for (i = 0; i < msdu_count; i++) { 2253c7fd8d23SBalaji Pothunoori msdus = resp->data_tx_completion.msdus; 2254c7fd8d23SBalaji Pothunoori msdu_id = msdus[i]; 22556c5151a9SMichal Kazior tx_done.msdu_id = __le16_to_cpu(msdu_id); 225659465fe4SRajkumar Manoharan 2257c7fd8d23SBalaji Pothunoori if (rssi_enabled) { 2258c7fd8d23SBalaji Pothunoori /* Total no of MSDUs should be even, 2259c7fd8d23SBalaji Pothunoori * if odd MSDUs are sent firmware fills 2260c7fd8d23SBalaji Pothunoori * last msdu id with 0xffff 2261c7fd8d23SBalaji Pothunoori */ 2262c7fd8d23SBalaji Pothunoori if (msdu_count & 0x01) { 2263c7fd8d23SBalaji Pothunoori msdu_id = msdus[msdu_count + i + 1]; 2264c7fd8d23SBalaji Pothunoori tx_done.ack_rssi = __le16_to_cpu(msdu_id); 2265c7fd8d23SBalaji Pothunoori } else { 2266c7fd8d23SBalaji Pothunoori msdu_id = msdus[msdu_count + i]; 2267c7fd8d23SBalaji Pothunoori tx_done.ack_rssi = __le16_to_cpu(msdu_id); 2268c7fd8d23SBalaji Pothunoori } 2269c7fd8d23SBalaji Pothunoori } 2270c7fd8d23SBalaji Pothunoori 227159465fe4SRajkumar Manoharan /* kfifo_put: In practice firmware shouldn't fire off per-CE 227259465fe4SRajkumar Manoharan * interrupt and main interrupt (MSI/-X range case) for the same 227359465fe4SRajkumar Manoharan * HTC service so it should be safe to use kfifo_put w/o lock. 227459465fe4SRajkumar Manoharan * 227559465fe4SRajkumar Manoharan * From kfifo_put() documentation: 227659465fe4SRajkumar Manoharan * Note that with only one concurrent reader and one concurrent 227759465fe4SRajkumar Manoharan * writer, you don't need extra locking to use these macro. 227859465fe4SRajkumar Manoharan */ 227959465fe4SRajkumar Manoharan if (!kfifo_put(&htt->txdone_fifo, tx_done)) { 228059465fe4SRajkumar Manoharan ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n", 228159465fe4SRajkumar Manoharan tx_done.msdu_id, tx_done.status); 22826c5151a9SMichal Kazior ath10k_txrx_tx_unref(htt, &tx_done); 22836c5151a9SMichal Kazior } 22846c5151a9SMichal Kazior } 228559465fe4SRajkumar Manoharan } 22866c5151a9SMichal Kazior 2287aa5b4fbcSMichal Kazior static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp) 2288aa5b4fbcSMichal Kazior { 2289aa5b4fbcSMichal Kazior struct htt_rx_addba *ev = &resp->rx_addba; 2290aa5b4fbcSMichal Kazior struct ath10k_peer *peer; 2291aa5b4fbcSMichal Kazior struct ath10k_vif *arvif; 2292aa5b4fbcSMichal Kazior u16 info0, tid, peer_id; 2293aa5b4fbcSMichal Kazior 2294aa5b4fbcSMichal Kazior info0 = __le16_to_cpu(ev->info0); 2295aa5b4fbcSMichal Kazior tid = MS(info0, HTT_RX_BA_INFO0_TID); 2296aa5b4fbcSMichal Kazior peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID); 2297aa5b4fbcSMichal Kazior 22987aa7a72aSMichal Kazior ath10k_dbg(ar, ATH10K_DBG_HTT, 2299aa5b4fbcSMichal Kazior "htt rx addba tid %hu peer_id %hu size %hhu\n", 2300aa5b4fbcSMichal Kazior tid, peer_id, ev->window_size); 2301aa5b4fbcSMichal Kazior 2302aa5b4fbcSMichal Kazior spin_lock_bh(&ar->data_lock); 2303aa5b4fbcSMichal Kazior peer = ath10k_peer_find_by_id(ar, peer_id); 2304aa5b4fbcSMichal Kazior if (!peer) { 23057aa7a72aSMichal Kazior ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n", 2306aa5b4fbcSMichal Kazior peer_id); 2307aa5b4fbcSMichal Kazior spin_unlock_bh(&ar->data_lock); 2308aa5b4fbcSMichal Kazior return; 2309aa5b4fbcSMichal Kazior } 2310aa5b4fbcSMichal Kazior 2311aa5b4fbcSMichal Kazior arvif = ath10k_get_arvif(ar, peer->vdev_id); 2312aa5b4fbcSMichal Kazior if (!arvif) { 23137aa7a72aSMichal Kazior ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n", 2314aa5b4fbcSMichal Kazior peer->vdev_id); 2315aa5b4fbcSMichal Kazior spin_unlock_bh(&ar->data_lock); 2316aa5b4fbcSMichal Kazior return; 2317aa5b4fbcSMichal Kazior } 2318aa5b4fbcSMichal Kazior 23197aa7a72aSMichal Kazior ath10k_dbg(ar, ATH10K_DBG_HTT, 2320aa5b4fbcSMichal Kazior "htt rx start rx ba session sta %pM tid %hu size %hhu\n", 2321aa5b4fbcSMichal Kazior peer->addr, tid, ev->window_size); 2322aa5b4fbcSMichal Kazior 2323aa5b4fbcSMichal Kazior ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid); 2324aa5b4fbcSMichal Kazior spin_unlock_bh(&ar->data_lock); 2325aa5b4fbcSMichal Kazior } 2326aa5b4fbcSMichal Kazior 2327aa5b4fbcSMichal Kazior static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp) 2328aa5b4fbcSMichal Kazior { 2329aa5b4fbcSMichal Kazior struct htt_rx_delba *ev = &resp->rx_delba; 2330aa5b4fbcSMichal Kazior struct ath10k_peer *peer; 2331aa5b4fbcSMichal Kazior struct ath10k_vif *arvif; 2332aa5b4fbcSMichal Kazior u16 info0, tid, peer_id; 2333aa5b4fbcSMichal Kazior 2334aa5b4fbcSMichal Kazior info0 = __le16_to_cpu(ev->info0); 2335aa5b4fbcSMichal Kazior tid = MS(info0, HTT_RX_BA_INFO0_TID); 2336aa5b4fbcSMichal Kazior peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID); 2337aa5b4fbcSMichal Kazior 23387aa7a72aSMichal Kazior ath10k_dbg(ar, ATH10K_DBG_HTT, 2339aa5b4fbcSMichal Kazior "htt rx delba tid %hu peer_id %hu\n", 2340aa5b4fbcSMichal Kazior tid, peer_id); 2341aa5b4fbcSMichal Kazior 2342aa5b4fbcSMichal Kazior spin_lock_bh(&ar->data_lock); 2343aa5b4fbcSMichal Kazior peer = ath10k_peer_find_by_id(ar, peer_id); 2344aa5b4fbcSMichal Kazior if (!peer) { 23457aa7a72aSMichal Kazior ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n", 2346aa5b4fbcSMichal Kazior peer_id); 2347aa5b4fbcSMichal Kazior spin_unlock_bh(&ar->data_lock); 2348aa5b4fbcSMichal Kazior return; 2349aa5b4fbcSMichal Kazior } 2350aa5b4fbcSMichal Kazior 2351aa5b4fbcSMichal Kazior arvif = ath10k_get_arvif(ar, peer->vdev_id); 2352aa5b4fbcSMichal Kazior if (!arvif) { 23537aa7a72aSMichal Kazior ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n", 2354aa5b4fbcSMichal Kazior peer->vdev_id); 2355aa5b4fbcSMichal Kazior spin_unlock_bh(&ar->data_lock); 2356aa5b4fbcSMichal Kazior return; 2357aa5b4fbcSMichal Kazior } 2358aa5b4fbcSMichal Kazior 23597aa7a72aSMichal Kazior ath10k_dbg(ar, ATH10K_DBG_HTT, 2360aa5b4fbcSMichal Kazior "htt rx stop rx ba session sta %pM tid %hu\n", 2361aa5b4fbcSMichal Kazior peer->addr, tid); 2362aa5b4fbcSMichal Kazior 2363aa5b4fbcSMichal Kazior ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid); 2364aa5b4fbcSMichal Kazior spin_unlock_bh(&ar->data_lock); 2365aa5b4fbcSMichal Kazior } 2366aa5b4fbcSMichal Kazior 2367c545070eSMichal Kazior static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list, 2368e48e9c42SKalle Valo struct sk_buff_head *amsdu) 2369c545070eSMichal Kazior { 2370c545070eSMichal Kazior struct sk_buff *msdu; 2371c545070eSMichal Kazior struct htt_rx_desc *rxd; 2372c545070eSMichal Kazior 2373c545070eSMichal Kazior if (skb_queue_empty(list)) 2374c545070eSMichal Kazior return -ENOBUFS; 2375c545070eSMichal Kazior 2376c545070eSMichal Kazior if (WARN_ON(!skb_queue_empty(amsdu))) 2377c545070eSMichal Kazior return -EINVAL; 2378c545070eSMichal Kazior 2379e48e9c42SKalle Valo while ((msdu = __skb_dequeue(list))) { 2380c545070eSMichal Kazior __skb_queue_tail(amsdu, msdu); 2381c545070eSMichal Kazior 2382c545070eSMichal Kazior rxd = (void *)msdu->data - sizeof(*rxd); 23831f5dbfbbSPeter Oh if (rxd->msdu_end.common.info0 & 2384c545070eSMichal Kazior __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)) 2385c545070eSMichal Kazior break; 2386c545070eSMichal Kazior } 2387c545070eSMichal Kazior 2388c545070eSMichal Kazior msdu = skb_peek_tail(amsdu); 2389c545070eSMichal Kazior rxd = (void *)msdu->data - sizeof(*rxd); 23901f5dbfbbSPeter Oh if (!(rxd->msdu_end.common.info0 & 2391c545070eSMichal Kazior __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) { 2392c545070eSMichal Kazior skb_queue_splice_init(amsdu, list); 2393c545070eSMichal Kazior return -EAGAIN; 2394c545070eSMichal Kazior } 2395c545070eSMichal Kazior 2396c545070eSMichal Kazior return 0; 2397c545070eSMichal Kazior } 2398c545070eSMichal Kazior 2399c545070eSMichal Kazior static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status, 2400c545070eSMichal Kazior struct sk_buff *skb) 2401c545070eSMichal Kazior { 2402c545070eSMichal Kazior struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2403c545070eSMichal Kazior 2404c545070eSMichal Kazior if (!ieee80211_has_protected(hdr->frame_control)) 2405c545070eSMichal Kazior return; 2406c545070eSMichal Kazior 2407c545070eSMichal Kazior /* Offloaded frames are already decrypted but firmware insists they are 2408c545070eSMichal Kazior * protected in the 802.11 header. Strip the flag. Otherwise mac80211 2409c545070eSMichal Kazior * will drop the frame. 2410c545070eSMichal Kazior */ 2411c545070eSMichal Kazior 2412c545070eSMichal Kazior hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 2413c545070eSMichal Kazior status->flag |= RX_FLAG_DECRYPTED | 2414c545070eSMichal Kazior RX_FLAG_IV_STRIPPED | 2415c545070eSMichal Kazior RX_FLAG_MMIC_STRIPPED; 2416c545070eSMichal Kazior } 2417c545070eSMichal Kazior 2418deba1b9eSRajkumar Manoharan static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar, 2419c545070eSMichal Kazior struct sk_buff_head *list) 2420c545070eSMichal Kazior { 2421c545070eSMichal Kazior struct ath10k_htt *htt = &ar->htt; 2422c545070eSMichal Kazior struct ieee80211_rx_status *status = &htt->rx_status; 2423c545070eSMichal Kazior struct htt_rx_offload_msdu *rx; 2424c545070eSMichal Kazior struct sk_buff *msdu; 2425c545070eSMichal Kazior size_t offset; 2426c545070eSMichal Kazior 2427c545070eSMichal Kazior while ((msdu = __skb_dequeue(list))) { 2428c545070eSMichal Kazior /* Offloaded frames don't have Rx descriptor. Instead they have 2429c545070eSMichal Kazior * a short meta information header. 2430c545070eSMichal Kazior */ 2431c545070eSMichal Kazior 2432c545070eSMichal Kazior rx = (void *)msdu->data; 2433c545070eSMichal Kazior 2434c545070eSMichal Kazior skb_put(msdu, sizeof(*rx)); 2435c545070eSMichal Kazior skb_pull(msdu, sizeof(*rx)); 2436c545070eSMichal Kazior 2437c545070eSMichal Kazior if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) { 2438c545070eSMichal Kazior ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n"); 2439c545070eSMichal Kazior dev_kfree_skb_any(msdu); 2440c545070eSMichal Kazior continue; 2441c545070eSMichal Kazior } 2442c545070eSMichal Kazior 2443c545070eSMichal Kazior skb_put(msdu, __le16_to_cpu(rx->msdu_len)); 2444c545070eSMichal Kazior 2445c545070eSMichal Kazior /* Offloaded rx header length isn't multiple of 2 nor 4 so the 2446c545070eSMichal Kazior * actual payload is unaligned. Align the frame. Otherwise 2447c545070eSMichal Kazior * mac80211 complains. This shouldn't reduce performance much 2448c545070eSMichal Kazior * because these offloaded frames are rare. 2449c545070eSMichal Kazior */ 2450c545070eSMichal Kazior offset = 4 - ((unsigned long)msdu->data & 3); 2451c545070eSMichal Kazior skb_put(msdu, offset); 2452c545070eSMichal Kazior memmove(msdu->data + offset, msdu->data, msdu->len); 2453c545070eSMichal Kazior skb_pull(msdu, offset); 2454c545070eSMichal Kazior 2455c545070eSMichal Kazior /* FIXME: The frame is NWifi. Re-construct QoS Control 2456c545070eSMichal Kazior * if possible later. 2457c545070eSMichal Kazior */ 2458c545070eSMichal Kazior 2459c545070eSMichal Kazior memset(status, 0, sizeof(*status)); 2460c545070eSMichal Kazior status->flag |= RX_FLAG_NO_SIGNAL_VAL; 2461c545070eSMichal Kazior 2462c545070eSMichal Kazior ath10k_htt_rx_h_rx_offload_prot(status, msdu); 2463500ff9f9SMichal Kazior ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id); 2464deba1b9eSRajkumar Manoharan ath10k_htt_rx_h_queue_msdu(ar, status, msdu); 2465c545070eSMichal Kazior } 2466c545070eSMichal Kazior } 2467c545070eSMichal Kazior 2468e48e9c42SKalle Valo static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) 2469c545070eSMichal Kazior { 2470c545070eSMichal Kazior struct ath10k_htt *htt = &ar->htt; 2471c545070eSMichal Kazior struct htt_resp *resp = (void *)skb->data; 2472c545070eSMichal Kazior struct ieee80211_rx_status *status = &htt->rx_status; 2473c545070eSMichal Kazior struct sk_buff_head list; 2474c545070eSMichal Kazior struct sk_buff_head amsdu; 2475c545070eSMichal Kazior u16 peer_id; 2476c545070eSMichal Kazior u16 msdu_count; 2477c545070eSMichal Kazior u8 vdev_id; 2478c545070eSMichal Kazior u8 tid; 2479c545070eSMichal Kazior bool offload; 2480c545070eSMichal Kazior bool frag; 2481deba1b9eSRajkumar Manoharan int ret; 2482c545070eSMichal Kazior 2483c545070eSMichal Kazior lockdep_assert_held(&htt->rx_ring.lock); 2484c545070eSMichal Kazior 2485c545070eSMichal Kazior if (htt->rx_confused) 24863c97f5deSRajkumar Manoharan return -EIO; 2487c545070eSMichal Kazior 2488c545070eSMichal Kazior skb_pull(skb, sizeof(resp->hdr)); 2489c545070eSMichal Kazior skb_pull(skb, sizeof(resp->rx_in_ord_ind)); 2490c545070eSMichal Kazior 2491c545070eSMichal Kazior peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id); 2492c545070eSMichal Kazior msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count); 2493c545070eSMichal Kazior vdev_id = resp->rx_in_ord_ind.vdev_id; 2494c545070eSMichal Kazior tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID); 2495c545070eSMichal Kazior offload = !!(resp->rx_in_ord_ind.info & 2496c545070eSMichal Kazior HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK); 2497c545070eSMichal Kazior frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK); 2498c545070eSMichal Kazior 2499c545070eSMichal Kazior ath10k_dbg(ar, ATH10K_DBG_HTT, 2500c545070eSMichal Kazior "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n", 2501c545070eSMichal Kazior vdev_id, peer_id, tid, offload, frag, msdu_count); 2502c545070eSMichal Kazior 25033b0b55b1SGovind Singh if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) { 2504c545070eSMichal Kazior ath10k_warn(ar, "dropping invalid in order rx indication\n"); 25053c97f5deSRajkumar Manoharan return -EINVAL; 2506c545070eSMichal Kazior } 2507c545070eSMichal Kazior 2508c545070eSMichal Kazior /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later 2509c545070eSMichal Kazior * extracted and processed. 2510c545070eSMichal Kazior */ 2511c545070eSMichal Kazior __skb_queue_head_init(&list); 25123b0b55b1SGovind Singh if (ar->hw_params.target_64bit) 25133b0b55b1SGovind Singh ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind, 25143b0b55b1SGovind Singh &list); 25153b0b55b1SGovind Singh else 25163b0b55b1SGovind Singh ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind, 25173b0b55b1SGovind Singh &list); 25183b0b55b1SGovind Singh 2519c545070eSMichal Kazior if (ret < 0) { 2520c545070eSMichal Kazior ath10k_warn(ar, "failed to pop paddr list: %d\n", ret); 2521c545070eSMichal Kazior htt->rx_confused = true; 25223c97f5deSRajkumar Manoharan return -EIO; 2523c545070eSMichal Kazior } 2524c545070eSMichal Kazior 2525c545070eSMichal Kazior /* Offloaded frames are very different and need to be handled 2526c545070eSMichal Kazior * separately. 2527c545070eSMichal Kazior */ 2528c545070eSMichal Kazior if (offload) 2529deba1b9eSRajkumar Manoharan ath10k_htt_rx_h_rx_offload(ar, &list); 2530c545070eSMichal Kazior 2531e48e9c42SKalle Valo while (!skb_queue_empty(&list)) { 2532c545070eSMichal Kazior __skb_queue_head_init(&amsdu); 2533e48e9c42SKalle Valo ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu); 2534c545070eSMichal Kazior switch (ret) { 2535c545070eSMichal Kazior case 0: 2536c545070eSMichal Kazior /* Note: The in-order indication may report interleaved 2537c545070eSMichal Kazior * frames from different PPDUs meaning reported rx rate 2538c545070eSMichal Kazior * to mac80211 isn't accurate/reliable. It's still 2539c545070eSMichal Kazior * better to report something than nothing though. This 2540c545070eSMichal Kazior * should still give an idea about rx rate to the user. 2541c545070eSMichal Kazior */ 2542500ff9f9SMichal Kazior ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id); 2543caee728aSVasanthakumar Thiagarajan ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL); 2544caee728aSVasanthakumar Thiagarajan ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL, 2545caee728aSVasanthakumar Thiagarajan NULL); 2546deba1b9eSRajkumar Manoharan ath10k_htt_rx_h_enqueue(ar, &amsdu, status); 2547c545070eSMichal Kazior break; 2548c545070eSMichal Kazior case -EAGAIN: 2549c545070eSMichal Kazior /* fall through */ 2550c545070eSMichal Kazior default: 2551c545070eSMichal Kazior /* Should not happen. */ 2552c545070eSMichal Kazior ath10k_warn(ar, "failed to extract amsdu: %d\n", ret); 2553c545070eSMichal Kazior htt->rx_confused = true; 2554c545070eSMichal Kazior __skb_queue_purge(&list); 25553c97f5deSRajkumar Manoharan return -EIO; 2556c545070eSMichal Kazior } 2557c545070eSMichal Kazior } 2558deba1b9eSRajkumar Manoharan return ret; 2559c545070eSMichal Kazior } 2560c545070eSMichal Kazior 2561839ae637SMichal Kazior static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar, 2562839ae637SMichal Kazior const __le32 *resp_ids, 2563839ae637SMichal Kazior int num_resp_ids) 2564839ae637SMichal Kazior { 2565839ae637SMichal Kazior int i; 2566839ae637SMichal Kazior u32 resp_id; 2567839ae637SMichal Kazior 2568839ae637SMichal Kazior ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n", 2569839ae637SMichal Kazior num_resp_ids); 2570839ae637SMichal Kazior 2571839ae637SMichal Kazior for (i = 0; i < num_resp_ids; i++) { 2572839ae637SMichal Kazior resp_id = le32_to_cpu(resp_ids[i]); 2573839ae637SMichal Kazior 2574839ae637SMichal Kazior ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n", 2575839ae637SMichal Kazior resp_id); 2576839ae637SMichal Kazior 2577839ae637SMichal Kazior /* TODO: free resp_id */ 2578839ae637SMichal Kazior } 2579839ae637SMichal Kazior } 2580839ae637SMichal Kazior 2581839ae637SMichal Kazior static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb) 2582839ae637SMichal Kazior { 2583426e10eaSMichal Kazior struct ieee80211_hw *hw = ar->hw; 2584426e10eaSMichal Kazior struct ieee80211_txq *txq; 2585839ae637SMichal Kazior struct htt_resp *resp = (struct htt_resp *)skb->data; 2586839ae637SMichal Kazior struct htt_tx_fetch_record *record; 2587839ae637SMichal Kazior size_t len; 2588839ae637SMichal Kazior size_t max_num_bytes; 2589839ae637SMichal Kazior size_t max_num_msdus; 2590426e10eaSMichal Kazior size_t num_bytes; 2591426e10eaSMichal Kazior size_t num_msdus; 2592839ae637SMichal Kazior const __le32 *resp_ids; 2593839ae637SMichal Kazior u16 num_records; 2594839ae637SMichal Kazior u16 num_resp_ids; 2595839ae637SMichal Kazior u16 peer_id; 2596839ae637SMichal Kazior u8 tid; 2597426e10eaSMichal Kazior int ret; 2598839ae637SMichal Kazior int i; 2599839ae637SMichal Kazior 2600839ae637SMichal Kazior ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n"); 2601839ae637SMichal Kazior 2602839ae637SMichal Kazior len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind); 2603839ae637SMichal Kazior if (unlikely(skb->len < len)) { 2604839ae637SMichal Kazior ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n"); 2605839ae637SMichal Kazior return; 2606839ae637SMichal Kazior } 2607839ae637SMichal Kazior 2608839ae637SMichal Kazior num_records = le16_to_cpu(resp->tx_fetch_ind.num_records); 2609839ae637SMichal Kazior num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids); 2610839ae637SMichal Kazior 2611839ae637SMichal Kazior len += sizeof(resp->tx_fetch_ind.records[0]) * num_records; 2612839ae637SMichal Kazior len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids; 2613839ae637SMichal Kazior 2614839ae637SMichal Kazior if (unlikely(skb->len < len)) { 2615839ae637SMichal Kazior ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n"); 2616839ae637SMichal Kazior return; 2617839ae637SMichal Kazior } 2618839ae637SMichal Kazior 2619839ae637SMichal Kazior ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n", 2620839ae637SMichal Kazior num_records, num_resp_ids, 2621839ae637SMichal Kazior le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num)); 2622839ae637SMichal Kazior 2623426e10eaSMichal Kazior if (!ar->htt.tx_q_state.enabled) { 2624426e10eaSMichal Kazior ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n"); 2625426e10eaSMichal Kazior return; 2626426e10eaSMichal Kazior } 2627426e10eaSMichal Kazior 2628426e10eaSMichal Kazior if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) { 2629426e10eaSMichal Kazior ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n"); 2630426e10eaSMichal Kazior return; 2631426e10eaSMichal Kazior } 2632426e10eaSMichal Kazior 2633426e10eaSMichal Kazior rcu_read_lock(); 2634839ae637SMichal Kazior 2635839ae637SMichal Kazior for (i = 0; i < num_records; i++) { 2636839ae637SMichal Kazior record = &resp->tx_fetch_ind.records[i]; 2637839ae637SMichal Kazior peer_id = MS(le16_to_cpu(record->info), 2638839ae637SMichal Kazior HTT_TX_FETCH_RECORD_INFO_PEER_ID); 2639839ae637SMichal Kazior tid = MS(le16_to_cpu(record->info), 2640839ae637SMichal Kazior HTT_TX_FETCH_RECORD_INFO_TID); 2641839ae637SMichal Kazior max_num_msdus = le16_to_cpu(record->num_msdus); 2642839ae637SMichal Kazior max_num_bytes = le32_to_cpu(record->num_bytes); 2643839ae637SMichal Kazior 2644839ae637SMichal Kazior ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n", 2645839ae637SMichal Kazior i, peer_id, tid, max_num_msdus, max_num_bytes); 2646839ae637SMichal Kazior 2647839ae637SMichal Kazior if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) || 2648839ae637SMichal Kazior unlikely(tid >= ar->htt.tx_q_state.num_tids)) { 2649839ae637SMichal Kazior ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n", 2650839ae637SMichal Kazior peer_id, tid); 2651839ae637SMichal Kazior continue; 2652839ae637SMichal Kazior } 2653839ae637SMichal Kazior 2654426e10eaSMichal Kazior spin_lock_bh(&ar->data_lock); 2655426e10eaSMichal Kazior txq = ath10k_mac_txq_lookup(ar, peer_id, tid); 2656426e10eaSMichal Kazior spin_unlock_bh(&ar->data_lock); 2657426e10eaSMichal Kazior 2658426e10eaSMichal Kazior /* It is okay to release the lock and use txq because RCU read 2659426e10eaSMichal Kazior * lock is held. 2660426e10eaSMichal Kazior */ 2661426e10eaSMichal Kazior 2662426e10eaSMichal Kazior if (unlikely(!txq)) { 2663426e10eaSMichal Kazior ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n", 2664426e10eaSMichal Kazior peer_id, tid); 2665426e10eaSMichal Kazior continue; 2666839ae637SMichal Kazior } 2667839ae637SMichal Kazior 2668426e10eaSMichal Kazior num_msdus = 0; 2669426e10eaSMichal Kazior num_bytes = 0; 2670426e10eaSMichal Kazior 2671426e10eaSMichal Kazior while (num_msdus < max_num_msdus && 2672426e10eaSMichal Kazior num_bytes < max_num_bytes) { 2673426e10eaSMichal Kazior ret = ath10k_mac_tx_push_txq(hw, txq); 2674426e10eaSMichal Kazior if (ret < 0) 2675426e10eaSMichal Kazior break; 2676426e10eaSMichal Kazior 2677426e10eaSMichal Kazior num_msdus++; 2678426e10eaSMichal Kazior num_bytes += ret; 2679426e10eaSMichal Kazior } 2680426e10eaSMichal Kazior 2681426e10eaSMichal Kazior record->num_msdus = cpu_to_le16(num_msdus); 2682426e10eaSMichal Kazior record->num_bytes = cpu_to_le32(num_bytes); 2683426e10eaSMichal Kazior 2684426e10eaSMichal Kazior ath10k_htt_tx_txq_recalc(hw, txq); 2685426e10eaSMichal Kazior } 2686426e10eaSMichal Kazior 2687426e10eaSMichal Kazior rcu_read_unlock(); 2688426e10eaSMichal Kazior 2689839ae637SMichal Kazior resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind); 2690839ae637SMichal Kazior ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids); 2691839ae637SMichal Kazior 2692426e10eaSMichal Kazior ret = ath10k_htt_tx_fetch_resp(ar, 2693426e10eaSMichal Kazior resp->tx_fetch_ind.token, 2694426e10eaSMichal Kazior resp->tx_fetch_ind.fetch_seq_num, 2695426e10eaSMichal Kazior resp->tx_fetch_ind.records, 2696426e10eaSMichal Kazior num_records); 2697426e10eaSMichal Kazior if (unlikely(ret)) { 2698426e10eaSMichal Kazior ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n", 2699426e10eaSMichal Kazior le32_to_cpu(resp->tx_fetch_ind.token), ret); 2700426e10eaSMichal Kazior /* FIXME: request fw restart */ 2701426e10eaSMichal Kazior } 2702426e10eaSMichal Kazior 2703426e10eaSMichal Kazior ath10k_htt_tx_txq_sync(ar); 2704839ae637SMichal Kazior } 2705839ae637SMichal Kazior 2706839ae637SMichal Kazior static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar, 2707839ae637SMichal Kazior struct sk_buff *skb) 2708839ae637SMichal Kazior { 2709839ae637SMichal Kazior const struct htt_resp *resp = (void *)skb->data; 2710839ae637SMichal Kazior size_t len; 2711839ae637SMichal Kazior int num_resp_ids; 2712839ae637SMichal Kazior 2713839ae637SMichal Kazior ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n"); 2714839ae637SMichal Kazior 2715839ae637SMichal Kazior len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm); 2716839ae637SMichal Kazior if (unlikely(skb->len < len)) { 2717839ae637SMichal Kazior ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n"); 2718839ae637SMichal Kazior return; 2719839ae637SMichal Kazior } 2720839ae637SMichal Kazior 2721839ae637SMichal Kazior num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids); 2722839ae637SMichal Kazior len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids; 2723839ae637SMichal Kazior 2724839ae637SMichal Kazior if (unlikely(skb->len < len)) { 2725839ae637SMichal Kazior ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n"); 2726839ae637SMichal Kazior return; 2727839ae637SMichal Kazior } 2728839ae637SMichal Kazior 2729839ae637SMichal Kazior ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, 2730839ae637SMichal Kazior resp->tx_fetch_confirm.resp_ids, 2731839ae637SMichal Kazior num_resp_ids); 2732839ae637SMichal Kazior } 2733839ae637SMichal Kazior 2734839ae637SMichal Kazior static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar, 2735839ae637SMichal Kazior struct sk_buff *skb) 2736839ae637SMichal Kazior { 2737839ae637SMichal Kazior const struct htt_resp *resp = (void *)skb->data; 2738839ae637SMichal Kazior const struct htt_tx_mode_switch_record *record; 2739426e10eaSMichal Kazior struct ieee80211_txq *txq; 2740426e10eaSMichal Kazior struct ath10k_txq *artxq; 2741839ae637SMichal Kazior size_t len; 2742839ae637SMichal Kazior size_t num_records; 2743839ae637SMichal Kazior enum htt_tx_mode_switch_mode mode; 2744839ae637SMichal Kazior bool enable; 2745839ae637SMichal Kazior u16 info0; 2746839ae637SMichal Kazior u16 info1; 2747839ae637SMichal Kazior u16 threshold; 2748839ae637SMichal Kazior u16 peer_id; 2749839ae637SMichal Kazior u8 tid; 2750839ae637SMichal Kazior int i; 2751839ae637SMichal Kazior 2752839ae637SMichal Kazior ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n"); 2753839ae637SMichal Kazior 2754839ae637SMichal Kazior len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind); 2755839ae637SMichal Kazior if (unlikely(skb->len < len)) { 2756839ae637SMichal Kazior ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n"); 2757839ae637SMichal Kazior return; 2758839ae637SMichal Kazior } 2759839ae637SMichal Kazior 2760839ae637SMichal Kazior info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0); 2761839ae637SMichal Kazior info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1); 2762839ae637SMichal Kazior 2763839ae637SMichal Kazior enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE); 2764839ae637SMichal Kazior num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD); 2765839ae637SMichal Kazior mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE); 2766839ae637SMichal Kazior threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD); 2767839ae637SMichal Kazior 2768839ae637SMichal Kazior ath10k_dbg(ar, ATH10K_DBG_HTT, 2769839ae637SMichal Kazior "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n", 2770839ae637SMichal Kazior info0, info1, enable, num_records, mode, threshold); 2771839ae637SMichal Kazior 2772839ae637SMichal Kazior len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records; 2773839ae637SMichal Kazior 2774839ae637SMichal Kazior if (unlikely(skb->len < len)) { 2775839ae637SMichal Kazior ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n"); 2776839ae637SMichal Kazior return; 2777839ae637SMichal Kazior } 2778839ae637SMichal Kazior 2779839ae637SMichal Kazior switch (mode) { 2780839ae637SMichal Kazior case HTT_TX_MODE_SWITCH_PUSH: 2781839ae637SMichal Kazior case HTT_TX_MODE_SWITCH_PUSH_PULL: 2782839ae637SMichal Kazior break; 2783839ae637SMichal Kazior default: 2784839ae637SMichal Kazior ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n", 2785839ae637SMichal Kazior mode); 2786839ae637SMichal Kazior return; 2787839ae637SMichal Kazior } 2788839ae637SMichal Kazior 2789839ae637SMichal Kazior if (!enable) 2790839ae637SMichal Kazior return; 2791839ae637SMichal Kazior 2792426e10eaSMichal Kazior ar->htt.tx_q_state.enabled = enable; 2793426e10eaSMichal Kazior ar->htt.tx_q_state.mode = mode; 2794426e10eaSMichal Kazior ar->htt.tx_q_state.num_push_allowed = threshold; 2795426e10eaSMichal Kazior 2796426e10eaSMichal Kazior rcu_read_lock(); 2797839ae637SMichal Kazior 2798839ae637SMichal Kazior for (i = 0; i < num_records; i++) { 2799839ae637SMichal Kazior record = &resp->tx_mode_switch_ind.records[i]; 2800839ae637SMichal Kazior info0 = le16_to_cpu(record->info0); 2801839ae637SMichal Kazior peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID); 2802839ae637SMichal Kazior tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID); 2803839ae637SMichal Kazior 2804839ae637SMichal Kazior if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) || 2805839ae637SMichal Kazior unlikely(tid >= ar->htt.tx_q_state.num_tids)) { 2806839ae637SMichal Kazior ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n", 2807839ae637SMichal Kazior peer_id, tid); 2808839ae637SMichal Kazior continue; 2809839ae637SMichal Kazior } 2810839ae637SMichal Kazior 2811426e10eaSMichal Kazior spin_lock_bh(&ar->data_lock); 2812426e10eaSMichal Kazior txq = ath10k_mac_txq_lookup(ar, peer_id, tid); 2813426e10eaSMichal Kazior spin_unlock_bh(&ar->data_lock); 2814426e10eaSMichal Kazior 2815426e10eaSMichal Kazior /* It is okay to release the lock and use txq because RCU read 2816426e10eaSMichal Kazior * lock is held. 2817426e10eaSMichal Kazior */ 2818426e10eaSMichal Kazior 2819426e10eaSMichal Kazior if (unlikely(!txq)) { 2820426e10eaSMichal Kazior ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n", 2821426e10eaSMichal Kazior peer_id, tid); 2822426e10eaSMichal Kazior continue; 2823839ae637SMichal Kazior } 2824839ae637SMichal Kazior 2825426e10eaSMichal Kazior spin_lock_bh(&ar->htt.tx_lock); 2826426e10eaSMichal Kazior artxq = (void *)txq->drv_priv; 2827426e10eaSMichal Kazior artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus); 2828426e10eaSMichal Kazior spin_unlock_bh(&ar->htt.tx_lock); 2829426e10eaSMichal Kazior } 2830426e10eaSMichal Kazior 2831426e10eaSMichal Kazior rcu_read_unlock(); 2832426e10eaSMichal Kazior 2833426e10eaSMichal Kazior ath10k_mac_tx_push_pending(ar); 2834839ae637SMichal Kazior } 2835839ae637SMichal Kazior 2836e3a91f87SRajkumar Manoharan void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) 2837e3a91f87SRajkumar Manoharan { 2838e3a91f87SRajkumar Manoharan bool release; 2839e3a91f87SRajkumar Manoharan 2840e3a91f87SRajkumar Manoharan release = ath10k_htt_t2h_msg_handler(ar, skb); 2841e3a91f87SRajkumar Manoharan 2842e3a91f87SRajkumar Manoharan /* Free the indication buffer */ 2843e3a91f87SRajkumar Manoharan if (release) 2844e3a91f87SRajkumar Manoharan dev_kfree_skb_any(skb); 2845e3a91f87SRajkumar Manoharan } 2846e3a91f87SRajkumar Manoharan 28479a9cf0e6SAnilkumar Kolli static inline s8 ath10k_get_legacy_rate_idx(struct ath10k *ar, u8 rate) 2848cec17c38SAnilkumar Kolli { 2849cec17c38SAnilkumar Kolli static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12, 2850cec17c38SAnilkumar Kolli 18, 24, 36, 48, 54}; 2851cec17c38SAnilkumar Kolli int i; 2852cec17c38SAnilkumar Kolli 2853cec17c38SAnilkumar Kolli for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) { 2854cec17c38SAnilkumar Kolli if (rate == legacy_rates[i]) 28550189dbd7SAnilkumar Kolli return i; 2856cec17c38SAnilkumar Kolli } 2857cec17c38SAnilkumar Kolli 28580189dbd7SAnilkumar Kolli ath10k_warn(ar, "Invalid legacy rate %hhd peer stats", rate); 28590189dbd7SAnilkumar Kolli return -EINVAL; 2860cec17c38SAnilkumar Kolli } 2861cec17c38SAnilkumar Kolli 2862cec17c38SAnilkumar Kolli static void 2863a904417fSAnilkumar Kolli ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar, 2864a904417fSAnilkumar Kolli struct ath10k_sta *arsta, 2865a904417fSAnilkumar Kolli struct ath10k_per_peer_tx_stats *pstats, 28669a9cf0e6SAnilkumar Kolli s8 legacy_rate_idx) 2867a904417fSAnilkumar Kolli { 2868a904417fSAnilkumar Kolli struct rate_info *txrate = &arsta->txrate; 2869a904417fSAnilkumar Kolli struct ath10k_htt_tx_stats *tx_stats; 2870e88975caSAnilkumar Kolli int idx, ht_idx, gi, mcs, bw, nss; 2871a904417fSAnilkumar Kolli 2872a904417fSAnilkumar Kolli if (!arsta->tx_stats) 2873a904417fSAnilkumar Kolli return; 2874a904417fSAnilkumar Kolli 2875a904417fSAnilkumar Kolli tx_stats = arsta->tx_stats; 2876a904417fSAnilkumar Kolli gi = (arsta->txrate.flags & RATE_INFO_FLAGS_SHORT_GI); 2877a904417fSAnilkumar Kolli ht_idx = txrate->mcs + txrate->nss * 8; 2878a904417fSAnilkumar Kolli mcs = txrate->mcs; 2879a904417fSAnilkumar Kolli bw = txrate->bw; 2880a904417fSAnilkumar Kolli nss = txrate->nss; 2881e88975caSAnilkumar Kolli idx = mcs * 8 + 8 * 10 * nss; 2882e88975caSAnilkumar Kolli idx += bw * 2 + gi; 2883a904417fSAnilkumar Kolli 2884a904417fSAnilkumar Kolli #define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name] 2885a904417fSAnilkumar Kolli 2886a904417fSAnilkumar Kolli if (txrate->flags == RATE_INFO_FLAGS_VHT_MCS) { 2887a904417fSAnilkumar Kolli STATS_OP_FMT(SUCC).vht[0][mcs] += pstats->succ_bytes; 2888a904417fSAnilkumar Kolli STATS_OP_FMT(SUCC).vht[1][mcs] += pstats->succ_pkts; 2889a904417fSAnilkumar Kolli STATS_OP_FMT(FAIL).vht[0][mcs] += pstats->failed_bytes; 2890a904417fSAnilkumar Kolli STATS_OP_FMT(FAIL).vht[1][mcs] += pstats->failed_pkts; 2891a904417fSAnilkumar Kolli STATS_OP_FMT(RETRY).vht[0][mcs] += pstats->retry_bytes; 2892a904417fSAnilkumar Kolli STATS_OP_FMT(RETRY).vht[1][mcs] += pstats->retry_pkts; 2893a904417fSAnilkumar Kolli } else if (txrate->flags == RATE_INFO_FLAGS_MCS) { 2894a904417fSAnilkumar Kolli STATS_OP_FMT(SUCC).ht[0][ht_idx] += pstats->succ_bytes; 2895a904417fSAnilkumar Kolli STATS_OP_FMT(SUCC).ht[1][ht_idx] += pstats->succ_pkts; 2896a904417fSAnilkumar Kolli STATS_OP_FMT(FAIL).ht[0][ht_idx] += pstats->failed_bytes; 2897a904417fSAnilkumar Kolli STATS_OP_FMT(FAIL).ht[1][ht_idx] += pstats->failed_pkts; 2898a904417fSAnilkumar Kolli STATS_OP_FMT(RETRY).ht[0][ht_idx] += pstats->retry_bytes; 2899a904417fSAnilkumar Kolli STATS_OP_FMT(RETRY).ht[1][ht_idx] += pstats->retry_pkts; 2900a904417fSAnilkumar Kolli } else { 2901a904417fSAnilkumar Kolli mcs = legacy_rate_idx; 2902a904417fSAnilkumar Kolli 2903a904417fSAnilkumar Kolli STATS_OP_FMT(SUCC).legacy[0][mcs] += pstats->succ_bytes; 2904a904417fSAnilkumar Kolli STATS_OP_FMT(SUCC).legacy[1][mcs] += pstats->succ_pkts; 2905a904417fSAnilkumar Kolli STATS_OP_FMT(FAIL).legacy[0][mcs] += pstats->failed_bytes; 2906a904417fSAnilkumar Kolli STATS_OP_FMT(FAIL).legacy[1][mcs] += pstats->failed_pkts; 2907a904417fSAnilkumar Kolli STATS_OP_FMT(RETRY).legacy[0][mcs] += pstats->retry_bytes; 2908a904417fSAnilkumar Kolli STATS_OP_FMT(RETRY).legacy[1][mcs] += pstats->retry_pkts; 2909a904417fSAnilkumar Kolli } 2910a904417fSAnilkumar Kolli 2911a904417fSAnilkumar Kolli if (ATH10K_HW_AMPDU(pstats->flags)) { 2912a904417fSAnilkumar Kolli tx_stats->ba_fails += ATH10K_HW_BA_FAIL(pstats->flags); 2913a904417fSAnilkumar Kolli 2914a904417fSAnilkumar Kolli if (txrate->flags == RATE_INFO_FLAGS_MCS) { 2915a904417fSAnilkumar Kolli STATS_OP_FMT(AMPDU).ht[0][ht_idx] += 2916a904417fSAnilkumar Kolli pstats->succ_bytes + pstats->retry_bytes; 2917a904417fSAnilkumar Kolli STATS_OP_FMT(AMPDU).ht[1][ht_idx] += 2918a904417fSAnilkumar Kolli pstats->succ_pkts + pstats->retry_pkts; 2919a904417fSAnilkumar Kolli } else { 2920a904417fSAnilkumar Kolli STATS_OP_FMT(AMPDU).vht[0][mcs] += 2921a904417fSAnilkumar Kolli pstats->succ_bytes + pstats->retry_bytes; 2922a904417fSAnilkumar Kolli STATS_OP_FMT(AMPDU).vht[1][mcs] += 2923a904417fSAnilkumar Kolli pstats->succ_pkts + pstats->retry_pkts; 2924a904417fSAnilkumar Kolli } 2925a904417fSAnilkumar Kolli STATS_OP_FMT(AMPDU).bw[0][bw] += 2926a904417fSAnilkumar Kolli pstats->succ_bytes + pstats->retry_bytes; 2927a904417fSAnilkumar Kolli STATS_OP_FMT(AMPDU).nss[0][nss] += 2928a904417fSAnilkumar Kolli pstats->succ_bytes + pstats->retry_bytes; 2929a904417fSAnilkumar Kolli STATS_OP_FMT(AMPDU).gi[0][gi] += 2930a904417fSAnilkumar Kolli pstats->succ_bytes + pstats->retry_bytes; 2931e88975caSAnilkumar Kolli STATS_OP_FMT(AMPDU).rate_table[0][idx] += 2932e88975caSAnilkumar Kolli pstats->succ_bytes + pstats->retry_bytes; 2933a904417fSAnilkumar Kolli STATS_OP_FMT(AMPDU).bw[1][bw] += 2934a904417fSAnilkumar Kolli pstats->succ_pkts + pstats->retry_pkts; 2935a904417fSAnilkumar Kolli STATS_OP_FMT(AMPDU).nss[1][nss] += 2936a904417fSAnilkumar Kolli pstats->succ_pkts + pstats->retry_pkts; 2937a904417fSAnilkumar Kolli STATS_OP_FMT(AMPDU).gi[1][gi] += 2938a904417fSAnilkumar Kolli pstats->succ_pkts + pstats->retry_pkts; 2939e88975caSAnilkumar Kolli STATS_OP_FMT(AMPDU).rate_table[1][idx] += 2940e88975caSAnilkumar Kolli pstats->succ_pkts + pstats->retry_pkts; 2941a904417fSAnilkumar Kolli } else { 2942a904417fSAnilkumar Kolli tx_stats->ack_fails += 2943a904417fSAnilkumar Kolli ATH10K_HW_BA_FAIL(pstats->flags); 2944a904417fSAnilkumar Kolli } 2945a904417fSAnilkumar Kolli 2946a904417fSAnilkumar Kolli STATS_OP_FMT(SUCC).bw[0][bw] += pstats->succ_bytes; 2947a904417fSAnilkumar Kolli STATS_OP_FMT(SUCC).nss[0][nss] += pstats->succ_bytes; 2948a904417fSAnilkumar Kolli STATS_OP_FMT(SUCC).gi[0][gi] += pstats->succ_bytes; 2949a904417fSAnilkumar Kolli 2950a904417fSAnilkumar Kolli STATS_OP_FMT(SUCC).bw[1][bw] += pstats->succ_pkts; 2951a904417fSAnilkumar Kolli STATS_OP_FMT(SUCC).nss[1][nss] += pstats->succ_pkts; 2952a904417fSAnilkumar Kolli STATS_OP_FMT(SUCC).gi[1][gi] += pstats->succ_pkts; 2953a904417fSAnilkumar Kolli 2954a904417fSAnilkumar Kolli STATS_OP_FMT(FAIL).bw[0][bw] += pstats->failed_bytes; 2955a904417fSAnilkumar Kolli STATS_OP_FMT(FAIL).nss[0][nss] += pstats->failed_bytes; 2956a904417fSAnilkumar Kolli STATS_OP_FMT(FAIL).gi[0][gi] += pstats->failed_bytes; 2957a904417fSAnilkumar Kolli 2958a904417fSAnilkumar Kolli STATS_OP_FMT(FAIL).bw[1][bw] += pstats->failed_pkts; 2959a904417fSAnilkumar Kolli STATS_OP_FMT(FAIL).nss[1][nss] += pstats->failed_pkts; 2960a904417fSAnilkumar Kolli STATS_OP_FMT(FAIL).gi[1][gi] += pstats->failed_pkts; 2961a904417fSAnilkumar Kolli 2962a904417fSAnilkumar Kolli STATS_OP_FMT(RETRY).bw[0][bw] += pstats->retry_bytes; 2963a904417fSAnilkumar Kolli STATS_OP_FMT(RETRY).nss[0][nss] += pstats->retry_bytes; 2964a904417fSAnilkumar Kolli STATS_OP_FMT(RETRY).gi[0][gi] += pstats->retry_bytes; 2965a904417fSAnilkumar Kolli 2966a904417fSAnilkumar Kolli STATS_OP_FMT(RETRY).bw[1][bw] += pstats->retry_pkts; 2967a904417fSAnilkumar Kolli STATS_OP_FMT(RETRY).nss[1][nss] += pstats->retry_pkts; 2968a904417fSAnilkumar Kolli STATS_OP_FMT(RETRY).gi[1][gi] += pstats->retry_pkts; 2969e88975caSAnilkumar Kolli 2970e88975caSAnilkumar Kolli if (txrate->flags >= RATE_INFO_FLAGS_MCS) { 2971e88975caSAnilkumar Kolli STATS_OP_FMT(SUCC).rate_table[0][idx] += pstats->succ_bytes; 2972e88975caSAnilkumar Kolli STATS_OP_FMT(SUCC).rate_table[1][idx] += pstats->succ_pkts; 2973e88975caSAnilkumar Kolli STATS_OP_FMT(FAIL).rate_table[0][idx] += pstats->failed_bytes; 2974e88975caSAnilkumar Kolli STATS_OP_FMT(FAIL).rate_table[1][idx] += pstats->failed_pkts; 2975e88975caSAnilkumar Kolli STATS_OP_FMT(RETRY).rate_table[0][idx] += pstats->retry_bytes; 2976e88975caSAnilkumar Kolli STATS_OP_FMT(RETRY).rate_table[1][idx] += pstats->retry_pkts; 2977e88975caSAnilkumar Kolli } 2978cec17c38SAnilkumar Kolli } 2979cec17c38SAnilkumar Kolli 2980cec17c38SAnilkumar Kolli static void 2981cec17c38SAnilkumar Kolli ath10k_update_per_peer_tx_stats(struct ath10k *ar, 2982cec17c38SAnilkumar Kolli struct ieee80211_sta *sta, 2983cec17c38SAnilkumar Kolli struct ath10k_per_peer_tx_stats *peer_stats) 2984cec17c38SAnilkumar Kolli { 2985cec17c38SAnilkumar Kolli struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 29869a9cf0e6SAnilkumar Kolli struct ieee80211_chanctx_conf *conf = NULL; 29879d9cdbf3SGustavo A. R. Silva u8 rate = 0, sgi; 29889d9cdbf3SGustavo A. R. Silva s8 rate_idx = 0; 29899a9cf0e6SAnilkumar Kolli bool skip_auto_rate; 2990cec17c38SAnilkumar Kolli struct rate_info txrate; 2991cec17c38SAnilkumar Kolli 2992cec17c38SAnilkumar Kolli lockdep_assert_held(&ar->data_lock); 2993cec17c38SAnilkumar Kolli 2994cec17c38SAnilkumar Kolli txrate.flags = ATH10K_HW_PREAMBLE(peer_stats->ratecode); 2995cec17c38SAnilkumar Kolli txrate.bw = ATH10K_HW_BW(peer_stats->flags); 2996cec17c38SAnilkumar Kolli txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode); 2997cec17c38SAnilkumar Kolli txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode); 2998cec17c38SAnilkumar Kolli sgi = ATH10K_HW_GI(peer_stats->flags); 29999a9cf0e6SAnilkumar Kolli skip_auto_rate = ATH10K_FW_SKIPPED_RATE_CTRL(peer_stats->flags); 30009a9cf0e6SAnilkumar Kolli 30019a9cf0e6SAnilkumar Kolli /* Firmware's rate control skips broadcast/management frames, 30029a9cf0e6SAnilkumar Kolli * if host has configure fixed rates and in some other special cases. 30039a9cf0e6SAnilkumar Kolli */ 30049a9cf0e6SAnilkumar Kolli if (skip_auto_rate) 30059a9cf0e6SAnilkumar Kolli return; 3006cec17c38SAnilkumar Kolli 3007c1dd8016SSven Eckelmann if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) { 3008c1dd8016SSven Eckelmann ath10k_warn(ar, "Invalid VHT mcs %hhd peer stats", txrate.mcs); 3009c1dd8016SSven Eckelmann return; 3010c1dd8016SSven Eckelmann } 3011c1dd8016SSven Eckelmann 3012c1dd8016SSven Eckelmann if (txrate.flags == WMI_RATE_PREAMBLE_HT && 3013c1dd8016SSven Eckelmann (txrate.mcs > 7 || txrate.nss < 1)) { 3014c1dd8016SSven Eckelmann ath10k_warn(ar, "Invalid HT mcs %hhd nss %hhd peer stats", 3015c1dd8016SSven Eckelmann txrate.mcs, txrate.nss); 3016cec17c38SAnilkumar Kolli return; 3017cec17c38SAnilkumar Kolli } 3018cec17c38SAnilkumar Kolli 30190f8a2b77SMohammed Shafi Shajakhan memset(&arsta->txrate, 0, sizeof(arsta->txrate)); 30209a9cf0e6SAnilkumar Kolli memset(&arsta->tx_info.status, 0, sizeof(arsta->tx_info.status)); 3021cec17c38SAnilkumar Kolli if (txrate.flags == WMI_RATE_PREAMBLE_CCK || 3022cec17c38SAnilkumar Kolli txrate.flags == WMI_RATE_PREAMBLE_OFDM) { 3023cec17c38SAnilkumar Kolli rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode); 3024cec17c38SAnilkumar Kolli /* This is hacky, FW sends CCK rate 5.5Mbps as 6 */ 30250189dbd7SAnilkumar Kolli if (rate == 6 && txrate.flags == WMI_RATE_PREAMBLE_CCK) 30260189dbd7SAnilkumar Kolli rate = 5; 30270189dbd7SAnilkumar Kolli rate_idx = ath10k_get_legacy_rate_idx(ar, rate); 30280189dbd7SAnilkumar Kolli if (rate_idx < 0) 30290189dbd7SAnilkumar Kolli return; 3030cd591027SMohammed Shafi Shajakhan arsta->txrate.legacy = rate; 3031cec17c38SAnilkumar Kolli } else if (txrate.flags == WMI_RATE_PREAMBLE_HT) { 3032cec17c38SAnilkumar Kolli arsta->txrate.flags = RATE_INFO_FLAGS_MCS; 3033c1dd8016SSven Eckelmann arsta->txrate.mcs = txrate.mcs + 8 * (txrate.nss - 1); 3034cec17c38SAnilkumar Kolli } else { 3035cec17c38SAnilkumar Kolli arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; 3036cec17c38SAnilkumar Kolli arsta->txrate.mcs = txrate.mcs; 3037cec17c38SAnilkumar Kolli } 3038cec17c38SAnilkumar Kolli 30399a9cf0e6SAnilkumar Kolli switch (txrate.flags) { 30409a9cf0e6SAnilkumar Kolli case WMI_RATE_PREAMBLE_OFDM: 30419a9cf0e6SAnilkumar Kolli if (arsta->arvif && arsta->arvif->vif) 30429a9cf0e6SAnilkumar Kolli conf = rcu_dereference(arsta->arvif->vif->chanctx_conf); 30439a9cf0e6SAnilkumar Kolli if (conf && conf->def.chan->band == NL80211_BAND_5GHZ) 30449a9cf0e6SAnilkumar Kolli arsta->tx_info.status.rates[0].idx = rate_idx - 4; 30459a9cf0e6SAnilkumar Kolli break; 30469a9cf0e6SAnilkumar Kolli case WMI_RATE_PREAMBLE_CCK: 30479a9cf0e6SAnilkumar Kolli arsta->tx_info.status.rates[0].idx = rate_idx; 3048cec17c38SAnilkumar Kolli if (sgi) 30499a9cf0e6SAnilkumar Kolli arsta->tx_info.status.rates[0].flags |= 30509a9cf0e6SAnilkumar Kolli (IEEE80211_TX_RC_USE_SHORT_PREAMBLE | 30519a9cf0e6SAnilkumar Kolli IEEE80211_TX_RC_SHORT_GI); 30529a9cf0e6SAnilkumar Kolli break; 30539a9cf0e6SAnilkumar Kolli case WMI_RATE_PREAMBLE_HT: 30549a9cf0e6SAnilkumar Kolli arsta->tx_info.status.rates[0].idx = 30559a9cf0e6SAnilkumar Kolli txrate.mcs + ((txrate.nss - 1) * 8); 30569a9cf0e6SAnilkumar Kolli if (sgi) 30579a9cf0e6SAnilkumar Kolli arsta->tx_info.status.rates[0].flags |= 30589a9cf0e6SAnilkumar Kolli IEEE80211_TX_RC_SHORT_GI; 30599a9cf0e6SAnilkumar Kolli arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_MCS; 30609a9cf0e6SAnilkumar Kolli break; 30619a9cf0e6SAnilkumar Kolli case WMI_RATE_PREAMBLE_VHT: 30629a9cf0e6SAnilkumar Kolli ieee80211_rate_set_vht(&arsta->tx_info.status.rates[0], 30639a9cf0e6SAnilkumar Kolli txrate.mcs, txrate.nss); 30649a9cf0e6SAnilkumar Kolli if (sgi) 30659a9cf0e6SAnilkumar Kolli arsta->tx_info.status.rates[0].flags |= 30669a9cf0e6SAnilkumar Kolli IEEE80211_TX_RC_SHORT_GI; 30679a9cf0e6SAnilkumar Kolli arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_VHT_MCS; 30689a9cf0e6SAnilkumar Kolli break; 30699a9cf0e6SAnilkumar Kolli } 3070cec17c38SAnilkumar Kolli 3071cec17c38SAnilkumar Kolli arsta->txrate.nss = txrate.nss; 307291493e8eSChristian Lamparter arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw); 30739a9cf0e6SAnilkumar Kolli if (sgi) 30749a9cf0e6SAnilkumar Kolli arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 30759a9cf0e6SAnilkumar Kolli 30769a9cf0e6SAnilkumar Kolli switch (arsta->txrate.bw) { 30779a9cf0e6SAnilkumar Kolli case RATE_INFO_BW_40: 30789a9cf0e6SAnilkumar Kolli arsta->tx_info.status.rates[0].flags |= 30799a9cf0e6SAnilkumar Kolli IEEE80211_TX_RC_40_MHZ_WIDTH; 30809a9cf0e6SAnilkumar Kolli break; 30819a9cf0e6SAnilkumar Kolli case RATE_INFO_BW_80: 30829a9cf0e6SAnilkumar Kolli arsta->tx_info.status.rates[0].flags |= 30839a9cf0e6SAnilkumar Kolli IEEE80211_TX_RC_80_MHZ_WIDTH; 30849a9cf0e6SAnilkumar Kolli break; 30859a9cf0e6SAnilkumar Kolli } 30869a9cf0e6SAnilkumar Kolli 30879a9cf0e6SAnilkumar Kolli if (peer_stats->succ_pkts) { 30889a9cf0e6SAnilkumar Kolli arsta->tx_info.flags = IEEE80211_TX_STAT_ACK; 30899a9cf0e6SAnilkumar Kolli arsta->tx_info.status.rates[0].count = 1; 30909a9cf0e6SAnilkumar Kolli ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info); 30919a9cf0e6SAnilkumar Kolli } 3092a904417fSAnilkumar Kolli 3093a904417fSAnilkumar Kolli if (ath10k_debug_is_extd_tx_stats_enabled(ar)) 3094a904417fSAnilkumar Kolli ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats, 3095a904417fSAnilkumar Kolli rate_idx); 3096cec17c38SAnilkumar Kolli } 3097cec17c38SAnilkumar Kolli 3098cec17c38SAnilkumar Kolli static void ath10k_htt_fetch_peer_stats(struct ath10k *ar, 3099cec17c38SAnilkumar Kolli struct sk_buff *skb) 3100cec17c38SAnilkumar Kolli { 3101cec17c38SAnilkumar Kolli struct htt_resp *resp = (struct htt_resp *)skb->data; 3102cec17c38SAnilkumar Kolli struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats; 3103cec17c38SAnilkumar Kolli struct htt_per_peer_tx_stats_ind *tx_stats; 3104cec17c38SAnilkumar Kolli struct ieee80211_sta *sta; 3105cec17c38SAnilkumar Kolli struct ath10k_peer *peer; 3106cec17c38SAnilkumar Kolli int peer_id, i; 3107cec17c38SAnilkumar Kolli u8 ppdu_len, num_ppdu; 3108cec17c38SAnilkumar Kolli 3109cec17c38SAnilkumar Kolli num_ppdu = resp->peer_tx_stats.num_ppdu; 3110cec17c38SAnilkumar Kolli ppdu_len = resp->peer_tx_stats.ppdu_len * sizeof(__le32); 3111cec17c38SAnilkumar Kolli 3112cec17c38SAnilkumar Kolli if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) { 3113cec17c38SAnilkumar Kolli ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len); 3114cec17c38SAnilkumar Kolli return; 3115cec17c38SAnilkumar Kolli } 3116cec17c38SAnilkumar Kolli 3117cec17c38SAnilkumar Kolli tx_stats = (struct htt_per_peer_tx_stats_ind *) 3118cec17c38SAnilkumar Kolli (resp->peer_tx_stats.payload); 3119cec17c38SAnilkumar Kolli peer_id = __le16_to_cpu(tx_stats->peer_id); 3120cec17c38SAnilkumar Kolli 3121cec17c38SAnilkumar Kolli rcu_read_lock(); 3122cec17c38SAnilkumar Kolli spin_lock_bh(&ar->data_lock); 3123cec17c38SAnilkumar Kolli peer = ath10k_peer_find_by_id(ar, peer_id); 31242d3b5585SZhi Chen if (!peer || !peer->sta) { 3125cec17c38SAnilkumar Kolli ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n", 3126cec17c38SAnilkumar Kolli peer_id); 3127cec17c38SAnilkumar Kolli goto out; 3128cec17c38SAnilkumar Kolli } 3129cec17c38SAnilkumar Kolli 3130cec17c38SAnilkumar Kolli sta = peer->sta; 3131cec17c38SAnilkumar Kolli for (i = 0; i < num_ppdu; i++) { 3132cec17c38SAnilkumar Kolli tx_stats = (struct htt_per_peer_tx_stats_ind *) 3133cec17c38SAnilkumar Kolli (resp->peer_tx_stats.payload + i * ppdu_len); 3134cec17c38SAnilkumar Kolli 3135cec17c38SAnilkumar Kolli p_tx_stats->succ_bytes = __le32_to_cpu(tx_stats->succ_bytes); 3136cec17c38SAnilkumar Kolli p_tx_stats->retry_bytes = __le32_to_cpu(tx_stats->retry_bytes); 3137cec17c38SAnilkumar Kolli p_tx_stats->failed_bytes = 3138cec17c38SAnilkumar Kolli __le32_to_cpu(tx_stats->failed_bytes); 3139cec17c38SAnilkumar Kolli p_tx_stats->ratecode = tx_stats->ratecode; 3140cec17c38SAnilkumar Kolli p_tx_stats->flags = tx_stats->flags; 3141cec17c38SAnilkumar Kolli p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts); 3142cec17c38SAnilkumar Kolli p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts); 3143cec17c38SAnilkumar Kolli p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts); 3144cec17c38SAnilkumar Kolli 3145cec17c38SAnilkumar Kolli ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats); 3146cec17c38SAnilkumar Kolli } 3147cec17c38SAnilkumar Kolli 3148cec17c38SAnilkumar Kolli out: 3149cec17c38SAnilkumar Kolli spin_unlock_bh(&ar->data_lock); 3150cec17c38SAnilkumar Kolli rcu_read_unlock(); 3151cec17c38SAnilkumar Kolli } 3152cec17c38SAnilkumar Kolli 3153e8123bb7SAnilkumar Kolli static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data) 3154e8123bb7SAnilkumar Kolli { 3155e8123bb7SAnilkumar Kolli struct ath10k_pktlog_hdr *hdr = (struct ath10k_pktlog_hdr *)data; 3156e8123bb7SAnilkumar Kolli struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats; 3157e8123bb7SAnilkumar Kolli struct ath10k_10_2_peer_tx_stats *tx_stats; 3158e8123bb7SAnilkumar Kolli struct ieee80211_sta *sta; 3159e8123bb7SAnilkumar Kolli struct ath10k_peer *peer; 3160e8123bb7SAnilkumar Kolli u16 log_type = __le16_to_cpu(hdr->log_type); 3161e8123bb7SAnilkumar Kolli u32 peer_id = 0, i; 3162e8123bb7SAnilkumar Kolli 3163e8123bb7SAnilkumar Kolli if (log_type != ATH_PKTLOG_TYPE_TX_STAT) 3164e8123bb7SAnilkumar Kolli return; 3165e8123bb7SAnilkumar Kolli 3166e8123bb7SAnilkumar Kolli tx_stats = (struct ath10k_10_2_peer_tx_stats *)((hdr->payload) + 3167e8123bb7SAnilkumar Kolli ATH10K_10_2_TX_STATS_OFFSET); 3168e8123bb7SAnilkumar Kolli 3169e8123bb7SAnilkumar Kolli if (!tx_stats->tx_ppdu_cnt) 3170e8123bb7SAnilkumar Kolli return; 3171e8123bb7SAnilkumar Kolli 3172e8123bb7SAnilkumar Kolli peer_id = tx_stats->peer_id; 3173e8123bb7SAnilkumar Kolli 3174e8123bb7SAnilkumar Kolli rcu_read_lock(); 3175e8123bb7SAnilkumar Kolli spin_lock_bh(&ar->data_lock); 3176e8123bb7SAnilkumar Kolli peer = ath10k_peer_find_by_id(ar, peer_id); 31772d3b5585SZhi Chen if (!peer || !peer->sta) { 3178e8123bb7SAnilkumar Kolli ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n", 3179e8123bb7SAnilkumar Kolli peer_id); 3180e8123bb7SAnilkumar Kolli goto out; 3181e8123bb7SAnilkumar Kolli } 3182e8123bb7SAnilkumar Kolli 3183e8123bb7SAnilkumar Kolli sta = peer->sta; 3184e8123bb7SAnilkumar Kolli for (i = 0; i < tx_stats->tx_ppdu_cnt; i++) { 3185e8123bb7SAnilkumar Kolli p_tx_stats->succ_bytes = 3186e8123bb7SAnilkumar Kolli __le16_to_cpu(tx_stats->success_bytes[i]); 3187e8123bb7SAnilkumar Kolli p_tx_stats->retry_bytes = 3188e8123bb7SAnilkumar Kolli __le16_to_cpu(tx_stats->retry_bytes[i]); 3189e8123bb7SAnilkumar Kolli p_tx_stats->failed_bytes = 3190e8123bb7SAnilkumar Kolli __le16_to_cpu(tx_stats->failed_bytes[i]); 3191e8123bb7SAnilkumar Kolli p_tx_stats->ratecode = tx_stats->ratecode[i]; 3192e8123bb7SAnilkumar Kolli p_tx_stats->flags = tx_stats->flags[i]; 3193e8123bb7SAnilkumar Kolli p_tx_stats->succ_pkts = tx_stats->success_pkts[i]; 3194e8123bb7SAnilkumar Kolli p_tx_stats->retry_pkts = tx_stats->retry_pkts[i]; 3195e8123bb7SAnilkumar Kolli p_tx_stats->failed_pkts = tx_stats->failed_pkts[i]; 3196e8123bb7SAnilkumar Kolli 3197e8123bb7SAnilkumar Kolli ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats); 3198e8123bb7SAnilkumar Kolli } 3199e8123bb7SAnilkumar Kolli spin_unlock_bh(&ar->data_lock); 3200e8123bb7SAnilkumar Kolli rcu_read_unlock(); 3201e8123bb7SAnilkumar Kolli 3202e8123bb7SAnilkumar Kolli return; 3203e8123bb7SAnilkumar Kolli 3204e8123bb7SAnilkumar Kolli out: 3205e8123bb7SAnilkumar Kolli spin_unlock_bh(&ar->data_lock); 3206e8123bb7SAnilkumar Kolli rcu_read_unlock(); 3207e8123bb7SAnilkumar Kolli } 3208e8123bb7SAnilkumar Kolli 3209e3a91f87SRajkumar Manoharan bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) 32105e3dd157SKalle Valo { 3211edb8236dSMichal Kazior struct ath10k_htt *htt = &ar->htt; 32125e3dd157SKalle Valo struct htt_resp *resp = (struct htt_resp *)skb->data; 32138348db29SRajkumar Manoharan enum htt_t2h_msg_type type; 32145e3dd157SKalle Valo 32155e3dd157SKalle Valo /* confirm alignment */ 32165e3dd157SKalle Valo if (!IS_ALIGNED((unsigned long)skb->data, 4)) 32177aa7a72aSMichal Kazior ath10k_warn(ar, "unaligned htt message, expect trouble\n"); 32185e3dd157SKalle Valo 32197aa7a72aSMichal Kazior ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n", 32205e3dd157SKalle Valo resp->hdr.msg_type); 32218348db29SRajkumar Manoharan 32228348db29SRajkumar Manoharan if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) { 32238348db29SRajkumar Manoharan ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X", 32248348db29SRajkumar Manoharan resp->hdr.msg_type, ar->htt.t2h_msg_types_max); 3225e3a91f87SRajkumar Manoharan return true; 32268348db29SRajkumar Manoharan } 32278348db29SRajkumar Manoharan type = ar->htt.t2h_msg_types[resp->hdr.msg_type]; 32288348db29SRajkumar Manoharan 32298348db29SRajkumar Manoharan switch (type) { 32305e3dd157SKalle Valo case HTT_T2H_MSG_TYPE_VERSION_CONF: { 32315e3dd157SKalle Valo htt->target_version_major = resp->ver_resp.major; 32325e3dd157SKalle Valo htt->target_version_minor = resp->ver_resp.minor; 32335e3dd157SKalle Valo complete(&htt->target_version_received); 32345e3dd157SKalle Valo break; 32355e3dd157SKalle Valo } 32366c5151a9SMichal Kazior case HTT_T2H_MSG_TYPE_RX_IND: 3237f88d4934SErik Stromdahl if (ar->dev_type == ATH10K_DEV_TYPE_HL) 3238f88d4934SErik Stromdahl return ath10k_htt_rx_proc_rx_ind_hl(htt, 3239f88d4934SErik Stromdahl &resp->rx_ind_hl, 3240f88d4934SErik Stromdahl skb); 3241f88d4934SErik Stromdahl else 3242f88d4934SErik Stromdahl ath10k_htt_rx_proc_rx_ind_ll(htt, &resp->rx_ind); 32433128b3d8SRajkumar Manoharan break; 32445e3dd157SKalle Valo case HTT_T2H_MSG_TYPE_PEER_MAP: { 32455e3dd157SKalle Valo struct htt_peer_map_event ev = { 32465e3dd157SKalle Valo .vdev_id = resp->peer_map.vdev_id, 32475e3dd157SKalle Valo .peer_id = __le16_to_cpu(resp->peer_map.peer_id), 32485e3dd157SKalle Valo }; 32495e3dd157SKalle Valo memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr)); 32505e3dd157SKalle Valo ath10k_peer_map_event(htt, &ev); 32515e3dd157SKalle Valo break; 32525e3dd157SKalle Valo } 32535e3dd157SKalle Valo case HTT_T2H_MSG_TYPE_PEER_UNMAP: { 32545e3dd157SKalle Valo struct htt_peer_unmap_event ev = { 32555e3dd157SKalle Valo .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id), 32565e3dd157SKalle Valo }; 32575e3dd157SKalle Valo ath10k_peer_unmap_event(htt, &ev); 32585e3dd157SKalle Valo break; 32595e3dd157SKalle Valo } 32605e3dd157SKalle Valo case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: { 32615e3dd157SKalle Valo struct htt_tx_done tx_done = {}; 32625e3dd157SKalle Valo int status = __le32_to_cpu(resp->mgmt_tx_completion.status); 3263235b9c42SVenkateswara Naralasetty int info = __le32_to_cpu(resp->mgmt_tx_completion.info); 32645e3dd157SKalle Valo 326559465fe4SRajkumar Manoharan tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id); 32665e3dd157SKalle Valo 32675e3dd157SKalle Valo switch (status) { 32685e3dd157SKalle Valo case HTT_MGMT_TX_STATUS_OK: 326959465fe4SRajkumar Manoharan tx_done.status = HTT_TX_COMPL_STATE_ACK; 3270235b9c42SVenkateswara Naralasetty if (test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, 3271235b9c42SVenkateswara Naralasetty ar->wmi.svc_map) && 3272235b9c42SVenkateswara Naralasetty (resp->mgmt_tx_completion.flags & 3273235b9c42SVenkateswara Naralasetty HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI)) { 3274235b9c42SVenkateswara Naralasetty tx_done.ack_rssi = 3275235b9c42SVenkateswara Naralasetty FIELD_GET(HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK, 3276235b9c42SVenkateswara Naralasetty info); 3277235b9c42SVenkateswara Naralasetty } 32785e3dd157SKalle Valo break; 32795e3dd157SKalle Valo case HTT_MGMT_TX_STATUS_RETRY: 328059465fe4SRajkumar Manoharan tx_done.status = HTT_TX_COMPL_STATE_NOACK; 32815e3dd157SKalle Valo break; 32825e3dd157SKalle Valo case HTT_MGMT_TX_STATUS_DROP: 328359465fe4SRajkumar Manoharan tx_done.status = HTT_TX_COMPL_STATE_DISCARD; 32845e3dd157SKalle Valo break; 32855e3dd157SKalle Valo } 32865e3dd157SKalle Valo 3287cac08552SRajkumar Manoharan status = ath10k_txrx_tx_unref(htt, &tx_done); 3288cac08552SRajkumar Manoharan if (!status) { 3289cac08552SRajkumar Manoharan spin_lock_bh(&htt->tx_lock); 3290cac08552SRajkumar Manoharan ath10k_htt_tx_mgmt_dec_pending(htt); 3291cac08552SRajkumar Manoharan spin_unlock_bh(&htt->tx_lock); 3292cac08552SRajkumar Manoharan } 32935e3dd157SKalle Valo break; 32945e3dd157SKalle Valo } 32956c5151a9SMichal Kazior case HTT_T2H_MSG_TYPE_TX_COMPL_IND: 329659465fe4SRajkumar Manoharan ath10k_htt_rx_tx_compl_ind(htt->ar, skb); 329759465fe4SRajkumar Manoharan break; 32985e3dd157SKalle Valo case HTT_T2H_MSG_TYPE_SEC_IND: { 32995e3dd157SKalle Valo struct ath10k *ar = htt->ar; 33005e3dd157SKalle Valo struct htt_security_indication *ev = &resp->security_indication; 33015e3dd157SKalle Valo 33027aa7a72aSMichal Kazior ath10k_dbg(ar, ATH10K_DBG_HTT, 33035e3dd157SKalle Valo "sec ind peer_id %d unicast %d type %d\n", 33045e3dd157SKalle Valo __le16_to_cpu(ev->peer_id), 33055e3dd157SKalle Valo !!(ev->flags & HTT_SECURITY_IS_UNICAST), 33065e3dd157SKalle Valo MS(ev->flags, HTT_SECURITY_TYPE)); 33075e3dd157SKalle Valo complete(&ar->install_key_done); 33085e3dd157SKalle Valo break; 33095e3dd157SKalle Valo } 33105e3dd157SKalle Valo case HTT_T2H_MSG_TYPE_RX_FRAG_IND: { 33117aa7a72aSMichal Kazior ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", 33125e3dd157SKalle Valo skb->data, skb->len); 33133c97f5deSRajkumar Manoharan atomic_inc(&htt->num_mpdus_ready); 33145e3dd157SKalle Valo break; 33155e3dd157SKalle Valo } 33165e3dd157SKalle Valo case HTT_T2H_MSG_TYPE_TEST: 33175e3dd157SKalle Valo break; 33185e3dd157SKalle Valo case HTT_T2H_MSG_TYPE_STATS_CONF: 3319d35a6c18SMichal Kazior trace_ath10k_htt_stats(ar, skb->data, skb->len); 3320a9bf0506SKalle Valo break; 3321a9bf0506SKalle Valo case HTT_T2H_MSG_TYPE_TX_INSPECT_IND: 3322708b9bdeSMichal Kazior /* Firmware can return tx frames if it's unable to fully 3323708b9bdeSMichal Kazior * process them and suspects host may be able to fix it. ath10k 3324708b9bdeSMichal Kazior * sends all tx frames as already inspected so this shouldn't 3325708b9bdeSMichal Kazior * happen unless fw has a bug. 3326708b9bdeSMichal Kazior */ 33277aa7a72aSMichal Kazior ath10k_warn(ar, "received an unexpected htt tx inspect event\n"); 3328708b9bdeSMichal Kazior break; 33295e3dd157SKalle Valo case HTT_T2H_MSG_TYPE_RX_ADDBA: 3330aa5b4fbcSMichal Kazior ath10k_htt_rx_addba(ar, resp); 3331aa5b4fbcSMichal Kazior break; 33325e3dd157SKalle Valo case HTT_T2H_MSG_TYPE_RX_DELBA: 3333aa5b4fbcSMichal Kazior ath10k_htt_rx_delba(ar, resp); 3334aa5b4fbcSMichal Kazior break; 3335bfdd7937SRajkumar Manoharan case HTT_T2H_MSG_TYPE_PKTLOG: { 3336bfdd7937SRajkumar Manoharan trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload, 333734293f75SAshok Raj Nagarajan skb->len - 333834293f75SAshok Raj Nagarajan offsetof(struct htt_resp, 333934293f75SAshok Raj Nagarajan pktlog_msg.payload)); 3340e8123bb7SAnilkumar Kolli 3341e8123bb7SAnilkumar Kolli if (ath10k_peer_stats_enabled(ar)) 3342e8123bb7SAnilkumar Kolli ath10k_fetch_10_2_tx_stats(ar, 3343e8123bb7SAnilkumar Kolli resp->pktlog_msg.payload); 3344bfdd7937SRajkumar Manoharan break; 3345bfdd7937SRajkumar Manoharan } 3346aa5b4fbcSMichal Kazior case HTT_T2H_MSG_TYPE_RX_FLUSH: { 3347aa5b4fbcSMichal Kazior /* Ignore this event because mac80211 takes care of Rx 3348aa5b4fbcSMichal Kazior * aggregation reordering. 3349aa5b4fbcSMichal Kazior */ 3350aa5b4fbcSMichal Kazior break; 3351aa5b4fbcSMichal Kazior } 3352c545070eSMichal Kazior case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: { 335362652555SBob Copeland skb_queue_tail(&htt->rx_in_ord_compl_q, skb); 3354e3a91f87SRajkumar Manoharan return false; 3355c545070eSMichal Kazior } 3356c545070eSMichal Kazior case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND: 33578348db29SRajkumar Manoharan break; 33582ce9b25cSRajkumar Manoharan case HTT_T2H_MSG_TYPE_CHAN_CHANGE: { 33592ce9b25cSRajkumar Manoharan u32 phymode = __le32_to_cpu(resp->chan_change.phymode); 33602ce9b25cSRajkumar Manoharan u32 freq = __le32_to_cpu(resp->chan_change.freq); 33612ce9b25cSRajkumar Manoharan 3362543b921bSArend Van Spriel ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq); 33632ce9b25cSRajkumar Manoharan ath10k_dbg(ar, ATH10K_DBG_HTT, 33642ce9b25cSRajkumar Manoharan "htt chan change freq %u phymode %s\n", 33652ce9b25cSRajkumar Manoharan freq, ath10k_wmi_phymode_str(phymode)); 3366c545070eSMichal Kazior break; 33672ce9b25cSRajkumar Manoharan } 3368ccec9038SDavid Liu case HTT_T2H_MSG_TYPE_AGGR_CONF: 3369ccec9038SDavid Liu break; 3370b2fdbccdSRajkumar Manoharan case HTT_T2H_MSG_TYPE_TX_FETCH_IND: { 3371b2fdbccdSRajkumar Manoharan struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC); 3372b2fdbccdSRajkumar Manoharan 3373b2fdbccdSRajkumar Manoharan if (!tx_fetch_ind) { 3374b2fdbccdSRajkumar Manoharan ath10k_warn(ar, "failed to copy htt tx fetch ind\n"); 3375b2fdbccdSRajkumar Manoharan break; 3376b2fdbccdSRajkumar Manoharan } 3377b2fdbccdSRajkumar Manoharan skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind); 3378b2fdbccdSRajkumar Manoharan break; 3379b2fdbccdSRajkumar Manoharan } 3380df94e702SMichal Kazior case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM: 3381839ae637SMichal Kazior ath10k_htt_rx_tx_fetch_confirm(ar, skb); 3382839ae637SMichal Kazior break; 3383df94e702SMichal Kazior case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND: 3384839ae637SMichal Kazior ath10k_htt_rx_tx_mode_switch_ind(ar, skb); 33859b158736SMichal Kazior break; 3386cec17c38SAnilkumar Kolli case HTT_T2H_MSG_TYPE_PEER_STATS: 3387cec17c38SAnilkumar Kolli ath10k_htt_fetch_peer_stats(ar, skb); 3388cec17c38SAnilkumar Kolli break; 33899b158736SMichal Kazior case HTT_T2H_MSG_TYPE_EN_STATS: 33905e3dd157SKalle Valo default: 33912358a544SMichal Kazior ath10k_warn(ar, "htt event (%d) not handled\n", 33925e3dd157SKalle Valo resp->hdr.msg_type); 33937aa7a72aSMichal Kazior ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", 33945e3dd157SKalle Valo skb->data, skb->len); 33955e3dd157SKalle Valo break; 3396dab55d10SWaldemar Rymarkiewicz } 3397e3a91f87SRajkumar Manoharan return true; 33985e3dd157SKalle Valo } 33993f0f7ed4SRajkumar Manoharan EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler); 34006c5151a9SMichal Kazior 3401afb0bf7fSVivek Natarajan void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar, 3402afb0bf7fSVivek Natarajan struct sk_buff *skb) 3403afb0bf7fSVivek Natarajan { 340453a5c9bcSAshok Raj Nagarajan trace_ath10k_htt_pktlog(ar, skb->data, skb->len); 3405afb0bf7fSVivek Natarajan dev_kfree_skb_any(skb); 3406afb0bf7fSVivek Natarajan } 3407afb0bf7fSVivek Natarajan EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler); 3408afb0bf7fSVivek Natarajan 3409deba1b9eSRajkumar Manoharan static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget) 3410deba1b9eSRajkumar Manoharan { 3411deba1b9eSRajkumar Manoharan struct sk_buff *skb; 3412deba1b9eSRajkumar Manoharan 3413deba1b9eSRajkumar Manoharan while (quota < budget) { 3414deba1b9eSRajkumar Manoharan if (skb_queue_empty(&ar->htt.rx_msdus_q)) 3415deba1b9eSRajkumar Manoharan break; 3416deba1b9eSRajkumar Manoharan 341762652555SBob Copeland skb = skb_dequeue(&ar->htt.rx_msdus_q); 3418deba1b9eSRajkumar Manoharan if (!skb) 3419deba1b9eSRajkumar Manoharan break; 3420deba1b9eSRajkumar Manoharan ath10k_process_rx(ar, skb); 3421deba1b9eSRajkumar Manoharan quota++; 3422deba1b9eSRajkumar Manoharan } 3423deba1b9eSRajkumar Manoharan 3424deba1b9eSRajkumar Manoharan return quota; 3425deba1b9eSRajkumar Manoharan } 3426deba1b9eSRajkumar Manoharan 34273c97f5deSRajkumar Manoharan int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget) 34286c5151a9SMichal Kazior { 34293c97f5deSRajkumar Manoharan struct ath10k_htt *htt = &ar->htt; 343059465fe4SRajkumar Manoharan struct htt_tx_done tx_done = {}; 3431426e10eaSMichal Kazior struct sk_buff_head tx_ind_q; 34326c5151a9SMichal Kazior struct sk_buff *skb; 3433d742c969SMichal Kazior unsigned long flags; 3434deba1b9eSRajkumar Manoharan int quota = 0, done, ret; 34353c97f5deSRajkumar Manoharan bool resched_napi = false; 34366c5151a9SMichal Kazior 3437426e10eaSMichal Kazior __skb_queue_head_init(&tx_ind_q); 3438da6416caSRajkumar Manoharan 3439deba1b9eSRajkumar Manoharan /* Process pending frames before dequeuing more data 3440deba1b9eSRajkumar Manoharan * from hardware. 34413c97f5deSRajkumar Manoharan */ 3442deba1b9eSRajkumar Manoharan quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget); 3443deba1b9eSRajkumar Manoharan if (quota == budget) { 34443c97f5deSRajkumar Manoharan resched_napi = true; 34453c97f5deSRajkumar Manoharan goto exit; 34463c97f5deSRajkumar Manoharan } 34473c97f5deSRajkumar Manoharan 344862652555SBob Copeland while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) { 34493c97f5deSRajkumar Manoharan spin_lock_bh(&htt->rx_ring.lock); 3450deba1b9eSRajkumar Manoharan ret = ath10k_htt_rx_in_ord_ind(ar, skb); 34513c97f5deSRajkumar Manoharan spin_unlock_bh(&htt->rx_ring.lock); 34523c97f5deSRajkumar Manoharan 34533c97f5deSRajkumar Manoharan dev_kfree_skb_any(skb); 3454deba1b9eSRajkumar Manoharan if (ret == -EIO) { 34553c97f5deSRajkumar Manoharan resched_napi = true; 34563c97f5deSRajkumar Manoharan goto exit; 34573c97f5deSRajkumar Manoharan } 34583c97f5deSRajkumar Manoharan } 34593c97f5deSRajkumar Manoharan 3460deba1b9eSRajkumar Manoharan while (atomic_read(&htt->num_mpdus_ready)) { 3461deba1b9eSRajkumar Manoharan ret = ath10k_htt_rx_handle_amsdu(htt); 3462deba1b9eSRajkumar Manoharan if (ret == -EIO) { 34633c97f5deSRajkumar Manoharan resched_napi = true; 34643c97f5deSRajkumar Manoharan goto exit; 34653c97f5deSRajkumar Manoharan } 34663c97f5deSRajkumar Manoharan atomic_dec(&htt->num_mpdus_ready); 34673c97f5deSRajkumar Manoharan } 3468deba1b9eSRajkumar Manoharan 3469deba1b9eSRajkumar Manoharan /* Deliver received data after processing data from hardware */ 3470deba1b9eSRajkumar Manoharan quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget); 34713c97f5deSRajkumar Manoharan 34723c97f5deSRajkumar Manoharan /* From NAPI documentation: 34733c97f5deSRajkumar Manoharan * The napi poll() function may also process TX completions, in which 34743c97f5deSRajkumar Manoharan * case if it processes the entire TX ring then it should count that 34753c97f5deSRajkumar Manoharan * work as the rest of the budget. 34763c97f5deSRajkumar Manoharan */ 34773c97f5deSRajkumar Manoharan if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo)) 34783c97f5deSRajkumar Manoharan quota = budget; 3479426e10eaSMichal Kazior 348059465fe4SRajkumar Manoharan /* kfifo_get: called only within txrx_tasklet so it's neatly serialized. 348159465fe4SRajkumar Manoharan * From kfifo_get() documentation: 348259465fe4SRajkumar Manoharan * Note that with only one concurrent reader and one concurrent writer, 348359465fe4SRajkumar Manoharan * you don't need extra locking to use these macro. 348459465fe4SRajkumar Manoharan */ 348559465fe4SRajkumar Manoharan while (kfifo_get(&htt->txdone_fifo, &tx_done)) 348659465fe4SRajkumar Manoharan ath10k_txrx_tx_unref(htt, &tx_done); 34876c5151a9SMichal Kazior 348818f53fe0SRajkumar Manoharan ath10k_mac_tx_push_pending(ar); 348918f53fe0SRajkumar Manoharan 34903c97f5deSRajkumar Manoharan spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags); 34913c97f5deSRajkumar Manoharan skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q); 34923c97f5deSRajkumar Manoharan spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags); 34933c97f5deSRajkumar Manoharan 3494426e10eaSMichal Kazior while ((skb = __skb_dequeue(&tx_ind_q))) { 3495426e10eaSMichal Kazior ath10k_htt_rx_tx_fetch_ind(ar, skb); 34966c5151a9SMichal Kazior dev_kfree_skb_any(skb); 34976c5151a9SMichal Kazior } 34986c5151a9SMichal Kazior 34993c97f5deSRajkumar Manoharan exit: 35005c86d97bSRajkumar Manoharan ath10k_htt_rx_msdu_buff_replenish(htt); 35013c97f5deSRajkumar Manoharan /* In case of rx failure or more data to read, report budget 35023c97f5deSRajkumar Manoharan * to reschedule NAPI poll 35033c97f5deSRajkumar Manoharan */ 35043c97f5deSRajkumar Manoharan done = resched_napi ? budget : quota; 35053c97f5deSRajkumar Manoharan 35063c97f5deSRajkumar Manoharan return done; 35076c5151a9SMichal Kazior } 35083c97f5deSRajkumar Manoharan EXPORT_SYMBOL(ath10k_htt_txrx_compl_task); 3509a91a626bSGovind Singh 3510a91a626bSGovind Singh static const struct ath10k_htt_rx_ops htt_rx_ops_32 = { 3511a91a626bSGovind Singh .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32, 3512a91a626bSGovind Singh .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32, 3513a91a626bSGovind Singh .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32, 3514a91a626bSGovind Singh .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32, 3515a91a626bSGovind Singh .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32, 3516a91a626bSGovind Singh }; 3517a91a626bSGovind Singh 3518a91a626bSGovind Singh static const struct ath10k_htt_rx_ops htt_rx_ops_64 = { 3519a91a626bSGovind Singh .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64, 3520a91a626bSGovind Singh .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64, 3521a91a626bSGovind Singh .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64, 3522a91a626bSGovind Singh .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64, 3523a91a626bSGovind Singh .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64, 3524a91a626bSGovind Singh }; 3525a91a626bSGovind Singh 3526d4e7f553SErik Stromdahl static const struct ath10k_htt_rx_ops htt_rx_ops_hl = { 3527d4e7f553SErik Stromdahl }; 3528d4e7f553SErik Stromdahl 3529a91a626bSGovind Singh void ath10k_htt_set_rx_ops(struct ath10k_htt *htt) 3530a91a626bSGovind Singh { 3531a91a626bSGovind Singh struct ath10k *ar = htt->ar; 3532a91a626bSGovind Singh 3533d4e7f553SErik Stromdahl if (ar->dev_type == ATH10K_DEV_TYPE_HL) 3534d4e7f553SErik Stromdahl htt->rx_ops = &htt_rx_ops_hl; 3535d4e7f553SErik Stromdahl else if (ar->hw_params.target_64bit) 3536a91a626bSGovind Singh htt->rx_ops = &htt_rx_ops_64; 3537a91a626bSGovind Singh else 3538a91a626bSGovind Singh htt->rx_ops = &htt_rx_ops_32; 3539a91a626bSGovind Singh } 3540