15e3dd157SKalle Valo /*
25e3dd157SKalle Valo  * Copyright (c) 2005-2011 Atheros Communications Inc.
35e3dd157SKalle Valo  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
45e3dd157SKalle Valo  *
55e3dd157SKalle Valo  * Permission to use, copy, modify, and/or distribute this software for any
65e3dd157SKalle Valo  * purpose with or without fee is hereby granted, provided that the above
75e3dd157SKalle Valo  * copyright notice and this permission notice appear in all copies.
85e3dd157SKalle Valo  *
95e3dd157SKalle Valo  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
105e3dd157SKalle Valo  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
115e3dd157SKalle Valo  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
125e3dd157SKalle Valo  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
135e3dd157SKalle Valo  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
145e3dd157SKalle Valo  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
155e3dd157SKalle Valo  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
165e3dd157SKalle Valo  */
175e3dd157SKalle Valo 
18edb8236dSMichal Kazior #include "core.h"
195e3dd157SKalle Valo #include "htc.h"
205e3dd157SKalle Valo #include "htt.h"
215e3dd157SKalle Valo #include "txrx.h"
225e3dd157SKalle Valo #include "debug.h"
23a9bf0506SKalle Valo #include "trace.h"
24aa5b4fbcSMichal Kazior #include "mac.h"
255e3dd157SKalle Valo 
265e3dd157SKalle Valo #include <linux/log2.h>
275e3dd157SKalle Valo 
28c545070eSMichal Kazior #define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
29c545070eSMichal Kazior #define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
305e3dd157SKalle Valo 
315e3dd157SKalle Valo /* when under memory pressure rx ring refill may fail and needs a retry */
325e3dd157SKalle Valo #define HTT_RX_RING_REFILL_RETRY_MS 50
335e3dd157SKalle Valo 
34f6dc2095SMichal Kazior static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
356c5151a9SMichal Kazior static void ath10k_htt_txrx_compl_task(unsigned long ptr);
36f6dc2095SMichal Kazior 
37c545070eSMichal Kazior static struct sk_buff *
38c545070eSMichal Kazior ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
39c545070eSMichal Kazior {
40c545070eSMichal Kazior 	struct ath10k_skb_rxcb *rxcb;
41c545070eSMichal Kazior 
42c545070eSMichal Kazior 	hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
43c545070eSMichal Kazior 		if (rxcb->paddr == paddr)
44c545070eSMichal Kazior 			return ATH10K_RXCB_SKB(rxcb);
45c545070eSMichal Kazior 
46c545070eSMichal Kazior 	WARN_ON_ONCE(1);
47c545070eSMichal Kazior 	return NULL;
48c545070eSMichal Kazior }
49c545070eSMichal Kazior 
505e3dd157SKalle Valo static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
515e3dd157SKalle Valo {
525e3dd157SKalle Valo 	struct sk_buff *skb;
53c545070eSMichal Kazior 	struct ath10k_skb_rxcb *rxcb;
54c545070eSMichal Kazior 	struct hlist_node *n;
555e3dd157SKalle Valo 	int i;
565e3dd157SKalle Valo 
57c545070eSMichal Kazior 	if (htt->rx_ring.in_ord_rx) {
58c545070eSMichal Kazior 		hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
59c545070eSMichal Kazior 			skb = ATH10K_RXCB_SKB(rxcb);
60c545070eSMichal Kazior 			dma_unmap_single(htt->ar->dev, rxcb->paddr,
61c545070eSMichal Kazior 					 skb->len + skb_tailroom(skb),
62c545070eSMichal Kazior 					 DMA_FROM_DEVICE);
63c545070eSMichal Kazior 			hash_del(&rxcb->hlist);
64c545070eSMichal Kazior 			dev_kfree_skb_any(skb);
65c545070eSMichal Kazior 		}
66c545070eSMichal Kazior 	} else {
67c545070eSMichal Kazior 		for (i = 0; i < htt->rx_ring.size; i++) {
685e3dd157SKalle Valo 			skb = htt->rx_ring.netbufs_ring[i];
69c545070eSMichal Kazior 			if (!skb)
70c545070eSMichal Kazior 				continue;
71c545070eSMichal Kazior 
72c545070eSMichal Kazior 			rxcb = ATH10K_SKB_RXCB(skb);
73c545070eSMichal Kazior 			dma_unmap_single(htt->ar->dev, rxcb->paddr,
745e3dd157SKalle Valo 					 skb->len + skb_tailroom(skb),
755e3dd157SKalle Valo 					 DMA_FROM_DEVICE);
765e3dd157SKalle Valo 			dev_kfree_skb_any(skb);
775e3dd157SKalle Valo 		}
78c545070eSMichal Kazior 	}
795e3dd157SKalle Valo 
805e3dd157SKalle Valo 	htt->rx_ring.fill_cnt = 0;
81c545070eSMichal Kazior 	hash_init(htt->rx_ring.skb_table);
82c545070eSMichal Kazior 	memset(htt->rx_ring.netbufs_ring, 0,
83c545070eSMichal Kazior 	       htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
845e3dd157SKalle Valo }
855e3dd157SKalle Valo 
865e3dd157SKalle Valo static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
875e3dd157SKalle Valo {
885e3dd157SKalle Valo 	struct htt_rx_desc *rx_desc;
89c545070eSMichal Kazior 	struct ath10k_skb_rxcb *rxcb;
905e3dd157SKalle Valo 	struct sk_buff *skb;
915e3dd157SKalle Valo 	dma_addr_t paddr;
925e3dd157SKalle Valo 	int ret = 0, idx;
935e3dd157SKalle Valo 
94c545070eSMichal Kazior 	/* The Full Rx Reorder firmware has no way of telling the host
95c545070eSMichal Kazior 	 * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
96c545070eSMichal Kazior 	 * To keep things simple make sure ring is always half empty. This
97c545070eSMichal Kazior 	 * guarantees there'll be no replenishment overruns possible.
98c545070eSMichal Kazior 	 */
99c545070eSMichal Kazior 	BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
100c545070eSMichal Kazior 
1018cc7f26cSKalle Valo 	idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
1025e3dd157SKalle Valo 	while (num > 0) {
1035e3dd157SKalle Valo 		skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
1045e3dd157SKalle Valo 		if (!skb) {
1055e3dd157SKalle Valo 			ret = -ENOMEM;
1065e3dd157SKalle Valo 			goto fail;
1075e3dd157SKalle Valo 		}
1085e3dd157SKalle Valo 
1095e3dd157SKalle Valo 		if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
1105e3dd157SKalle Valo 			skb_pull(skb,
1115e3dd157SKalle Valo 				 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
1125e3dd157SKalle Valo 				 skb->data);
1135e3dd157SKalle Valo 
1145e3dd157SKalle Valo 		/* Clear rx_desc attention word before posting to Rx ring */
1155e3dd157SKalle Valo 		rx_desc = (struct htt_rx_desc *)skb->data;
1165e3dd157SKalle Valo 		rx_desc->attention.flags = __cpu_to_le32(0);
1175e3dd157SKalle Valo 
1185e3dd157SKalle Valo 		paddr = dma_map_single(htt->ar->dev, skb->data,
1195e3dd157SKalle Valo 				       skb->len + skb_tailroom(skb),
1205e3dd157SKalle Valo 				       DMA_FROM_DEVICE);
1215e3dd157SKalle Valo 
1225e3dd157SKalle Valo 		if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
1235e3dd157SKalle Valo 			dev_kfree_skb_any(skb);
1245e3dd157SKalle Valo 			ret = -ENOMEM;
1255e3dd157SKalle Valo 			goto fail;
1265e3dd157SKalle Valo 		}
1275e3dd157SKalle Valo 
128c545070eSMichal Kazior 		rxcb = ATH10K_SKB_RXCB(skb);
129c545070eSMichal Kazior 		rxcb->paddr = paddr;
1305e3dd157SKalle Valo 		htt->rx_ring.netbufs_ring[idx] = skb;
1315e3dd157SKalle Valo 		htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
1325e3dd157SKalle Valo 		htt->rx_ring.fill_cnt++;
1335e3dd157SKalle Valo 
134c545070eSMichal Kazior 		if (htt->rx_ring.in_ord_rx) {
135c545070eSMichal Kazior 			hash_add(htt->rx_ring.skb_table,
136c545070eSMichal Kazior 				 &ATH10K_SKB_RXCB(skb)->hlist,
137c545070eSMichal Kazior 				 (u32)paddr);
138c545070eSMichal Kazior 		}
139c545070eSMichal Kazior 
1405e3dd157SKalle Valo 		num--;
1415e3dd157SKalle Valo 		idx++;
1425e3dd157SKalle Valo 		idx &= htt->rx_ring.size_mask;
1435e3dd157SKalle Valo 	}
1445e3dd157SKalle Valo 
1455e3dd157SKalle Valo fail:
1465de6dfc8SVasanthakumar Thiagarajan 	/*
1475de6dfc8SVasanthakumar Thiagarajan 	 * Make sure the rx buffer is updated before available buffer
1485de6dfc8SVasanthakumar Thiagarajan 	 * index to avoid any potential rx ring corruption.
1495de6dfc8SVasanthakumar Thiagarajan 	 */
1505de6dfc8SVasanthakumar Thiagarajan 	mb();
1518cc7f26cSKalle Valo 	*htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
1525e3dd157SKalle Valo 	return ret;
1535e3dd157SKalle Valo }
1545e3dd157SKalle Valo 
1555e3dd157SKalle Valo static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
1565e3dd157SKalle Valo {
1575e3dd157SKalle Valo 	lockdep_assert_held(&htt->rx_ring.lock);
1585e3dd157SKalle Valo 	return __ath10k_htt_rx_ring_fill_n(htt, num);
1595e3dd157SKalle Valo }
1605e3dd157SKalle Valo 
1615e3dd157SKalle Valo static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
1625e3dd157SKalle Valo {
1636e712d42SMichal Kazior 	int ret, num_deficit, num_to_fill;
1645e3dd157SKalle Valo 
1656e712d42SMichal Kazior 	/* Refilling the whole RX ring buffer proves to be a bad idea. The
1666e712d42SMichal Kazior 	 * reason is RX may take up significant amount of CPU cycles and starve
1676e712d42SMichal Kazior 	 * other tasks, e.g. TX on an ethernet device while acting as a bridge
1686e712d42SMichal Kazior 	 * with ath10k wlan interface. This ended up with very poor performance
1696e712d42SMichal Kazior 	 * once CPU the host system was overwhelmed with RX on ath10k.
1706e712d42SMichal Kazior 	 *
1716e712d42SMichal Kazior 	 * By limiting the number of refills the replenishing occurs
1726e712d42SMichal Kazior 	 * progressively. This in turns makes use of the fact tasklets are
1736e712d42SMichal Kazior 	 * processed in FIFO order. This means actual RX processing can starve
1746e712d42SMichal Kazior 	 * out refilling. If there's not enough buffers on RX ring FW will not
1756e712d42SMichal Kazior 	 * report RX until it is refilled with enough buffers. This
1766e712d42SMichal Kazior 	 * automatically balances load wrt to CPU power.
1776e712d42SMichal Kazior 	 *
1786e712d42SMichal Kazior 	 * This probably comes at a cost of lower maximum throughput but
1796e712d42SMichal Kazior 	 * improves the avarage and stability. */
1805e3dd157SKalle Valo 	spin_lock_bh(&htt->rx_ring.lock);
1816e712d42SMichal Kazior 	num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
1826e712d42SMichal Kazior 	num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
1836e712d42SMichal Kazior 	num_deficit -= num_to_fill;
1845e3dd157SKalle Valo 	ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
1855e3dd157SKalle Valo 	if (ret == -ENOMEM) {
1865e3dd157SKalle Valo 		/*
1875e3dd157SKalle Valo 		 * Failed to fill it to the desired level -
1885e3dd157SKalle Valo 		 * we'll start a timer and try again next time.
1895e3dd157SKalle Valo 		 * As long as enough buffers are left in the ring for
1905e3dd157SKalle Valo 		 * another A-MPDU rx, no special recovery is needed.
1915e3dd157SKalle Valo 		 */
1925e3dd157SKalle Valo 		mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
1935e3dd157SKalle Valo 			  msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
1946e712d42SMichal Kazior 	} else if (num_deficit > 0) {
1956e712d42SMichal Kazior 		tasklet_schedule(&htt->rx_replenish_task);
1965e3dd157SKalle Valo 	}
1975e3dd157SKalle Valo 	spin_unlock_bh(&htt->rx_ring.lock);
1985e3dd157SKalle Valo }
1995e3dd157SKalle Valo 
2005e3dd157SKalle Valo static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
2015e3dd157SKalle Valo {
2025e3dd157SKalle Valo 	struct ath10k_htt *htt = (struct ath10k_htt *)arg;
203af762c0bSKalle Valo 
2045e3dd157SKalle Valo 	ath10k_htt_rx_msdu_buff_replenish(htt);
2055e3dd157SKalle Valo }
2065e3dd157SKalle Valo 
207c545070eSMichal Kazior int ath10k_htt_rx_ring_refill(struct ath10k *ar)
2083e841fd0SMichal Kazior {
209c545070eSMichal Kazior 	struct ath10k_htt *htt = &ar->htt;
210c545070eSMichal Kazior 	int ret;
2113e841fd0SMichal Kazior 
212c545070eSMichal Kazior 	spin_lock_bh(&htt->rx_ring.lock);
213c545070eSMichal Kazior 	ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
214c545070eSMichal Kazior 					      htt->rx_ring.fill_cnt));
215c545070eSMichal Kazior 	spin_unlock_bh(&htt->rx_ring.lock);
2163e841fd0SMichal Kazior 
217c545070eSMichal Kazior 	if (ret)
218c545070eSMichal Kazior 		ath10k_htt_rx_ring_free(htt);
219c545070eSMichal Kazior 
220c545070eSMichal Kazior 	return ret;
2213e841fd0SMichal Kazior }
2223e841fd0SMichal Kazior 
22395bf21f9SMichal Kazior void ath10k_htt_rx_free(struct ath10k_htt *htt)
2245e3dd157SKalle Valo {
2255e3dd157SKalle Valo 	del_timer_sync(&htt->rx_ring.refill_retry_timer);
2266e712d42SMichal Kazior 	tasklet_kill(&htt->rx_replenish_task);
2276c5151a9SMichal Kazior 	tasklet_kill(&htt->txrx_compl_task);
2286c5151a9SMichal Kazior 
2296c5151a9SMichal Kazior 	skb_queue_purge(&htt->tx_compl_q);
2306c5151a9SMichal Kazior 	skb_queue_purge(&htt->rx_compl_q);
231c545070eSMichal Kazior 	skb_queue_purge(&htt->rx_in_ord_compl_q);
2325e3dd157SKalle Valo 
233c545070eSMichal Kazior 	ath10k_htt_rx_ring_free(htt);
2345e3dd157SKalle Valo 
2355e3dd157SKalle Valo 	dma_free_coherent(htt->ar->dev,
2365e3dd157SKalle Valo 			  (htt->rx_ring.size *
2375e3dd157SKalle Valo 			   sizeof(htt->rx_ring.paddrs_ring)),
2385e3dd157SKalle Valo 			  htt->rx_ring.paddrs_ring,
2395e3dd157SKalle Valo 			  htt->rx_ring.base_paddr);
2405e3dd157SKalle Valo 
2415e3dd157SKalle Valo 	dma_free_coherent(htt->ar->dev,
2425e3dd157SKalle Valo 			  sizeof(*htt->rx_ring.alloc_idx.vaddr),
2435e3dd157SKalle Valo 			  htt->rx_ring.alloc_idx.vaddr,
2445e3dd157SKalle Valo 			  htt->rx_ring.alloc_idx.paddr);
2455e3dd157SKalle Valo 
2465e3dd157SKalle Valo 	kfree(htt->rx_ring.netbufs_ring);
2475e3dd157SKalle Valo }
2485e3dd157SKalle Valo 
2495e3dd157SKalle Valo static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
2505e3dd157SKalle Valo {
2517aa7a72aSMichal Kazior 	struct ath10k *ar = htt->ar;
2525e3dd157SKalle Valo 	int idx;
2535e3dd157SKalle Valo 	struct sk_buff *msdu;
2545e3dd157SKalle Valo 
25545967089SMichal Kazior 	lockdep_assert_held(&htt->rx_ring.lock);
2565e3dd157SKalle Valo 
2578d60ee87SMichal Kazior 	if (htt->rx_ring.fill_cnt == 0) {
2587aa7a72aSMichal Kazior 		ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
2598d60ee87SMichal Kazior 		return NULL;
2608d60ee87SMichal Kazior 	}
2615e3dd157SKalle Valo 
2625e3dd157SKalle Valo 	idx = htt->rx_ring.sw_rd_idx.msdu_payld;
2635e3dd157SKalle Valo 	msdu = htt->rx_ring.netbufs_ring[idx];
2643e841fd0SMichal Kazior 	htt->rx_ring.netbufs_ring[idx] = NULL;
265c545070eSMichal Kazior 	htt->rx_ring.paddrs_ring[idx] = 0;
2665e3dd157SKalle Valo 
2675e3dd157SKalle Valo 	idx++;
2685e3dd157SKalle Valo 	idx &= htt->rx_ring.size_mask;
2695e3dd157SKalle Valo 	htt->rx_ring.sw_rd_idx.msdu_payld = idx;
2705e3dd157SKalle Valo 	htt->rx_ring.fill_cnt--;
2715e3dd157SKalle Valo 
2724de02806SMichal Kazior 	dma_unmap_single(htt->ar->dev,
2738582bf3bSMichal Kazior 			 ATH10K_SKB_RXCB(msdu)->paddr,
2744de02806SMichal Kazior 			 msdu->len + skb_tailroom(msdu),
2754de02806SMichal Kazior 			 DMA_FROM_DEVICE);
2764de02806SMichal Kazior 	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
2774de02806SMichal Kazior 			msdu->data, msdu->len + skb_tailroom(msdu));
2784de02806SMichal Kazior 
2795e3dd157SKalle Valo 	return msdu;
2805e3dd157SKalle Valo }
2815e3dd157SKalle Valo 
282d84dd60fSJanusz Dziedzic /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
2835e3dd157SKalle Valo static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
2845e3dd157SKalle Valo 				   u8 **fw_desc, int *fw_desc_len,
285f0e2770fSMichal Kazior 				   struct sk_buff_head *amsdu)
2865e3dd157SKalle Valo {
2877aa7a72aSMichal Kazior 	struct ath10k *ar = htt->ar;
2885e3dd157SKalle Valo 	int msdu_len, msdu_chaining = 0;
2899aa505d2SMichal Kazior 	struct sk_buff *msdu;
2905e3dd157SKalle Valo 	struct htt_rx_desc *rx_desc;
2915e3dd157SKalle Valo 
29245967089SMichal Kazior 	lockdep_assert_held(&htt->rx_ring.lock);
29345967089SMichal Kazior 
2949aa505d2SMichal Kazior 	for (;;) {
2955e3dd157SKalle Valo 		int last_msdu, msdu_len_invalid, msdu_chained;
2965e3dd157SKalle Valo 
2979aa505d2SMichal Kazior 		msdu = ath10k_htt_rx_netbuf_pop(htt);
2989aa505d2SMichal Kazior 		if (!msdu) {
2999aa505d2SMichal Kazior 			__skb_queue_purge(amsdu);
300e0bd7513SMichal Kazior 			return -ENOENT;
3019aa505d2SMichal Kazior 		}
3029aa505d2SMichal Kazior 
3039aa505d2SMichal Kazior 		__skb_queue_tail(amsdu, msdu);
3049aa505d2SMichal Kazior 
3055e3dd157SKalle Valo 		rx_desc = (struct htt_rx_desc *)msdu->data;
3065e3dd157SKalle Valo 
3075e3dd157SKalle Valo 		/* FIXME: we must report msdu payload since this is what caller
3085e3dd157SKalle Valo 		 *        expects now */
3095e3dd157SKalle Valo 		skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
3105e3dd157SKalle Valo 		skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
3115e3dd157SKalle Valo 
3125e3dd157SKalle Valo 		/*
3135e3dd157SKalle Valo 		 * Sanity check - confirm the HW is finished filling in the
3145e3dd157SKalle Valo 		 * rx data.
3155e3dd157SKalle Valo 		 * If the HW and SW are working correctly, then it's guaranteed
3165e3dd157SKalle Valo 		 * that the HW's MAC DMA is done before this point in the SW.
3175e3dd157SKalle Valo 		 * To prevent the case that we handle a stale Rx descriptor,
3185e3dd157SKalle Valo 		 * just assert for now until we have a way to recover.
3195e3dd157SKalle Valo 		 */
3205e3dd157SKalle Valo 		if (!(__le32_to_cpu(rx_desc->attention.flags)
3215e3dd157SKalle Valo 				& RX_ATTENTION_FLAGS_MSDU_DONE)) {
3229aa505d2SMichal Kazior 			__skb_queue_purge(amsdu);
323e0bd7513SMichal Kazior 			return -EIO;
3245e3dd157SKalle Valo 		}
3255e3dd157SKalle Valo 
3265e3dd157SKalle Valo 		/*
3275e3dd157SKalle Valo 		 * Copy the FW rx descriptor for this MSDU from the rx
3285e3dd157SKalle Valo 		 * indication message into the MSDU's netbuf. HL uses the
3295e3dd157SKalle Valo 		 * same rx indication message definition as LL, and simply
3305e3dd157SKalle Valo 		 * appends new info (fields from the HW rx desc, and the
3315e3dd157SKalle Valo 		 * MSDU payload itself). So, the offset into the rx
3325e3dd157SKalle Valo 		 * indication message only has to account for the standard
3335e3dd157SKalle Valo 		 * offset of the per-MSDU FW rx desc info within the
3345e3dd157SKalle Valo 		 * message, and how many bytes of the per-MSDU FW rx desc
3355e3dd157SKalle Valo 		 * info have already been consumed. (And the endianness of
3365e3dd157SKalle Valo 		 * the host, since for a big-endian host, the rx ind
3375e3dd157SKalle Valo 		 * message contents, including the per-MSDU rx desc bytes,
3385e3dd157SKalle Valo 		 * were byteswapped during upload.)
3395e3dd157SKalle Valo 		 */
3405e3dd157SKalle Valo 		if (*fw_desc_len > 0) {
3415e3dd157SKalle Valo 			rx_desc->fw_desc.info0 = **fw_desc;
3425e3dd157SKalle Valo 			/*
3435e3dd157SKalle Valo 			 * The target is expected to only provide the basic
3445e3dd157SKalle Valo 			 * per-MSDU rx descriptors. Just to be sure, verify
3455e3dd157SKalle Valo 			 * that the target has not attached extension data
3465e3dd157SKalle Valo 			 * (e.g. LRO flow ID).
3475e3dd157SKalle Valo 			 */
3485e3dd157SKalle Valo 
3495e3dd157SKalle Valo 			/* or more, if there's extension data */
3505e3dd157SKalle Valo 			(*fw_desc)++;
3515e3dd157SKalle Valo 			(*fw_desc_len)--;
3525e3dd157SKalle Valo 		} else {
3535e3dd157SKalle Valo 			/*
3545e3dd157SKalle Valo 			 * When an oversized AMSDU happened, FW will lost
3555e3dd157SKalle Valo 			 * some of MSDU status - in this case, the FW
3565e3dd157SKalle Valo 			 * descriptors provided will be less than the
3575e3dd157SKalle Valo 			 * actual MSDUs inside this MPDU. Mark the FW
3585e3dd157SKalle Valo 			 * descriptors so that it will still deliver to
3595e3dd157SKalle Valo 			 * upper stack, if no CRC error for this MPDU.
3605e3dd157SKalle Valo 			 *
3615e3dd157SKalle Valo 			 * FIX THIS - the FW descriptors are actually for
3625e3dd157SKalle Valo 			 * MSDUs in the end of this A-MSDU instead of the
3635e3dd157SKalle Valo 			 * beginning.
3645e3dd157SKalle Valo 			 */
3655e3dd157SKalle Valo 			rx_desc->fw_desc.info0 = 0;
3665e3dd157SKalle Valo 		}
3675e3dd157SKalle Valo 
3685e3dd157SKalle Valo 		msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
3695e3dd157SKalle Valo 					& (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
3705e3dd157SKalle Valo 					   RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
3715e3dd157SKalle Valo 		msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
3725e3dd157SKalle Valo 			      RX_MSDU_START_INFO0_MSDU_LENGTH);
3735e3dd157SKalle Valo 		msdu_chained = rx_desc->frag_info.ring2_more_count;
3745e3dd157SKalle Valo 
3755e3dd157SKalle Valo 		if (msdu_len_invalid)
3765e3dd157SKalle Valo 			msdu_len = 0;
3775e3dd157SKalle Valo 
3785e3dd157SKalle Valo 		skb_trim(msdu, 0);
3795e3dd157SKalle Valo 		skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
3805e3dd157SKalle Valo 		msdu_len -= msdu->len;
3815e3dd157SKalle Valo 
3829aa505d2SMichal Kazior 		/* Note: Chained buffers do not contain rx descriptor */
3835e3dd157SKalle Valo 		while (msdu_chained--) {
3849aa505d2SMichal Kazior 			msdu = ath10k_htt_rx_netbuf_pop(htt);
3859aa505d2SMichal Kazior 			if (!msdu) {
3869aa505d2SMichal Kazior 				__skb_queue_purge(amsdu);
387e0bd7513SMichal Kazior 				return -ENOENT;
388b30595aeSMichal Kazior 			}
389b30595aeSMichal Kazior 
3909aa505d2SMichal Kazior 			__skb_queue_tail(amsdu, msdu);
3919aa505d2SMichal Kazior 			skb_trim(msdu, 0);
3929aa505d2SMichal Kazior 			skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
3939aa505d2SMichal Kazior 			msdu_len -= msdu->len;
394ede9c8e0SMichal Kazior 			msdu_chaining = 1;
3955e3dd157SKalle Valo 		}
3965e3dd157SKalle Valo 
3975e3dd157SKalle Valo 		last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
3985e3dd157SKalle Valo 				RX_MSDU_END_INFO0_LAST_MSDU;
3995e3dd157SKalle Valo 
400b04e204fSMichal Kazior 		trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
401a0883cf7SRajkumar Manoharan 					 sizeof(*rx_desc) - sizeof(u32));
4029aa505d2SMichal Kazior 
4039aa505d2SMichal Kazior 		if (last_msdu)
4045e3dd157SKalle Valo 			break;
405d8bb26b9SKalle Valo 	}
406d8bb26b9SKalle Valo 
4079aa505d2SMichal Kazior 	if (skb_queue_empty(amsdu))
408d84dd60fSJanusz Dziedzic 		msdu_chaining = -1;
409d84dd60fSJanusz Dziedzic 
4105e3dd157SKalle Valo 	/*
4115e3dd157SKalle Valo 	 * Don't refill the ring yet.
4125e3dd157SKalle Valo 	 *
4135e3dd157SKalle Valo 	 * First, the elements popped here are still in use - it is not
4145e3dd157SKalle Valo 	 * safe to overwrite them until the matching call to
4155e3dd157SKalle Valo 	 * mpdu_desc_list_next. Second, for efficiency it is preferable to
4165e3dd157SKalle Valo 	 * refill the rx ring with 1 PPDU's worth of rx buffers (something
4175e3dd157SKalle Valo 	 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
4185e3dd157SKalle Valo 	 * (something like 3 buffers). Consequently, we'll rely on the txrx
4195e3dd157SKalle Valo 	 * SW to tell us when it is done pulling all the PPDU's rx buffers
4205e3dd157SKalle Valo 	 * out of the rx ring, and then refill it just once.
4215e3dd157SKalle Valo 	 */
4225e3dd157SKalle Valo 
4235e3dd157SKalle Valo 	return msdu_chaining;
4245e3dd157SKalle Valo }
4255e3dd157SKalle Valo 
4266e712d42SMichal Kazior static void ath10k_htt_rx_replenish_task(unsigned long ptr)
4276e712d42SMichal Kazior {
4286e712d42SMichal Kazior 	struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
429af762c0bSKalle Valo 
4306e712d42SMichal Kazior 	ath10k_htt_rx_msdu_buff_replenish(htt);
4316e712d42SMichal Kazior }
4326e712d42SMichal Kazior 
433c545070eSMichal Kazior static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
434c545070eSMichal Kazior 					       u32 paddr)
435c545070eSMichal Kazior {
436c545070eSMichal Kazior 	struct ath10k *ar = htt->ar;
437c545070eSMichal Kazior 	struct ath10k_skb_rxcb *rxcb;
438c545070eSMichal Kazior 	struct sk_buff *msdu;
439c545070eSMichal Kazior 
440c545070eSMichal Kazior 	lockdep_assert_held(&htt->rx_ring.lock);
441c545070eSMichal Kazior 
442c545070eSMichal Kazior 	msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
443c545070eSMichal Kazior 	if (!msdu)
444c545070eSMichal Kazior 		return NULL;
445c545070eSMichal Kazior 
446c545070eSMichal Kazior 	rxcb = ATH10K_SKB_RXCB(msdu);
447c545070eSMichal Kazior 	hash_del(&rxcb->hlist);
448c545070eSMichal Kazior 	htt->rx_ring.fill_cnt--;
449c545070eSMichal Kazior 
450c545070eSMichal Kazior 	dma_unmap_single(htt->ar->dev, rxcb->paddr,
451c545070eSMichal Kazior 			 msdu->len + skb_tailroom(msdu),
452c545070eSMichal Kazior 			 DMA_FROM_DEVICE);
453c545070eSMichal Kazior 	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
454c545070eSMichal Kazior 			msdu->data, msdu->len + skb_tailroom(msdu));
455c545070eSMichal Kazior 
456c545070eSMichal Kazior 	return msdu;
457c545070eSMichal Kazior }
458c545070eSMichal Kazior 
459c545070eSMichal Kazior static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
460c545070eSMichal Kazior 					struct htt_rx_in_ord_ind *ev,
461c545070eSMichal Kazior 					struct sk_buff_head *list)
462c545070eSMichal Kazior {
463c545070eSMichal Kazior 	struct ath10k *ar = htt->ar;
464c545070eSMichal Kazior 	struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs;
465c545070eSMichal Kazior 	struct htt_rx_desc *rxd;
466c545070eSMichal Kazior 	struct sk_buff *msdu;
467c545070eSMichal Kazior 	int msdu_count;
468c545070eSMichal Kazior 	bool is_offload;
469c545070eSMichal Kazior 	u32 paddr;
470c545070eSMichal Kazior 
471c545070eSMichal Kazior 	lockdep_assert_held(&htt->rx_ring.lock);
472c545070eSMichal Kazior 
473c545070eSMichal Kazior 	msdu_count = __le16_to_cpu(ev->msdu_count);
474c545070eSMichal Kazior 	is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
475c545070eSMichal Kazior 
476c545070eSMichal Kazior 	while (msdu_count--) {
477c545070eSMichal Kazior 		paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
478c545070eSMichal Kazior 
479c545070eSMichal Kazior 		msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
480c545070eSMichal Kazior 		if (!msdu) {
481c545070eSMichal Kazior 			__skb_queue_purge(list);
482c545070eSMichal Kazior 			return -ENOENT;
483c545070eSMichal Kazior 		}
484c545070eSMichal Kazior 
485c545070eSMichal Kazior 		__skb_queue_tail(list, msdu);
486c545070eSMichal Kazior 
487c545070eSMichal Kazior 		if (!is_offload) {
488c545070eSMichal Kazior 			rxd = (void *)msdu->data;
489c545070eSMichal Kazior 
490c545070eSMichal Kazior 			trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
491c545070eSMichal Kazior 
492c545070eSMichal Kazior 			skb_put(msdu, sizeof(*rxd));
493c545070eSMichal Kazior 			skb_pull(msdu, sizeof(*rxd));
494c545070eSMichal Kazior 			skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
495c545070eSMichal Kazior 
496c545070eSMichal Kazior 			if (!(__le32_to_cpu(rxd->attention.flags) &
497c545070eSMichal Kazior 			      RX_ATTENTION_FLAGS_MSDU_DONE)) {
498c545070eSMichal Kazior 				ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
499c545070eSMichal Kazior 				return -EIO;
500c545070eSMichal Kazior 			}
501c545070eSMichal Kazior 		}
502c545070eSMichal Kazior 
503c545070eSMichal Kazior 		msdu_desc++;
504c545070eSMichal Kazior 	}
505c545070eSMichal Kazior 
506c545070eSMichal Kazior 	return 0;
507c545070eSMichal Kazior }
508c545070eSMichal Kazior 
50995bf21f9SMichal Kazior int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
5105e3dd157SKalle Valo {
5117aa7a72aSMichal Kazior 	struct ath10k *ar = htt->ar;
5125e3dd157SKalle Valo 	dma_addr_t paddr;
5135e3dd157SKalle Valo 	void *vaddr;
514bd8bdbb6SKalle Valo 	size_t size;
5155e3dd157SKalle Valo 	struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
5165e3dd157SKalle Valo 
51751fc7d74SMichal Kazior 	htt->rx_confused = false;
51851fc7d74SMichal Kazior 
519fe2407a8SMichal Kazior 	/* XXX: The fill level could be changed during runtime in response to
520fe2407a8SMichal Kazior 	 * the host processing latency. Is this really worth it?
521fe2407a8SMichal Kazior 	 */
522fe2407a8SMichal Kazior 	htt->rx_ring.size = HTT_RX_RING_SIZE;
523fe2407a8SMichal Kazior 	htt->rx_ring.size_mask = htt->rx_ring.size - 1;
524fe2407a8SMichal Kazior 	htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL;
525fe2407a8SMichal Kazior 
5265e3dd157SKalle Valo 	if (!is_power_of_2(htt->rx_ring.size)) {
5277aa7a72aSMichal Kazior 		ath10k_warn(ar, "htt rx ring size is not power of 2\n");
5285e3dd157SKalle Valo 		return -EINVAL;
5295e3dd157SKalle Valo 	}
5305e3dd157SKalle Valo 
5315e3dd157SKalle Valo 	htt->rx_ring.netbufs_ring =
5323e841fd0SMichal Kazior 		kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
5335e3dd157SKalle Valo 			GFP_KERNEL);
5345e3dd157SKalle Valo 	if (!htt->rx_ring.netbufs_ring)
5355e3dd157SKalle Valo 		goto err_netbuf;
5365e3dd157SKalle Valo 
537bd8bdbb6SKalle Valo 	size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring);
538bd8bdbb6SKalle Valo 
539bd8bdbb6SKalle Valo 	vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_DMA);
5405e3dd157SKalle Valo 	if (!vaddr)
5415e3dd157SKalle Valo 		goto err_dma_ring;
5425e3dd157SKalle Valo 
5435e3dd157SKalle Valo 	htt->rx_ring.paddrs_ring = vaddr;
5445e3dd157SKalle Valo 	htt->rx_ring.base_paddr = paddr;
5455e3dd157SKalle Valo 
5465e3dd157SKalle Valo 	vaddr = dma_alloc_coherent(htt->ar->dev,
5475e3dd157SKalle Valo 				   sizeof(*htt->rx_ring.alloc_idx.vaddr),
5485e3dd157SKalle Valo 				   &paddr, GFP_DMA);
5495e3dd157SKalle Valo 	if (!vaddr)
5505e3dd157SKalle Valo 		goto err_dma_idx;
5515e3dd157SKalle Valo 
5525e3dd157SKalle Valo 	htt->rx_ring.alloc_idx.vaddr = vaddr;
5535e3dd157SKalle Valo 	htt->rx_ring.alloc_idx.paddr = paddr;
554c545070eSMichal Kazior 	htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
5555e3dd157SKalle Valo 	*htt->rx_ring.alloc_idx.vaddr = 0;
5565e3dd157SKalle Valo 
5575e3dd157SKalle Valo 	/* Initialize the Rx refill retry timer */
5585e3dd157SKalle Valo 	setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
5595e3dd157SKalle Valo 
5605e3dd157SKalle Valo 	spin_lock_init(&htt->rx_ring.lock);
5615e3dd157SKalle Valo 
5625e3dd157SKalle Valo 	htt->rx_ring.fill_cnt = 0;
563c545070eSMichal Kazior 	htt->rx_ring.sw_rd_idx.msdu_payld = 0;
564c545070eSMichal Kazior 	hash_init(htt->rx_ring.skb_table);
5655e3dd157SKalle Valo 
5666e712d42SMichal Kazior 	tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
5676e712d42SMichal Kazior 		     (unsigned long)htt);
5686e712d42SMichal Kazior 
5696c5151a9SMichal Kazior 	skb_queue_head_init(&htt->tx_compl_q);
5706c5151a9SMichal Kazior 	skb_queue_head_init(&htt->rx_compl_q);
571c545070eSMichal Kazior 	skb_queue_head_init(&htt->rx_in_ord_compl_q);
5726c5151a9SMichal Kazior 
5736c5151a9SMichal Kazior 	tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
5746c5151a9SMichal Kazior 		     (unsigned long)htt);
5756c5151a9SMichal Kazior 
5767aa7a72aSMichal Kazior 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
5775e3dd157SKalle Valo 		   htt->rx_ring.size, htt->rx_ring.fill_level);
5785e3dd157SKalle Valo 	return 0;
5795e3dd157SKalle Valo 
5805e3dd157SKalle Valo err_dma_idx:
5815e3dd157SKalle Valo 	dma_free_coherent(htt->ar->dev,
5825e3dd157SKalle Valo 			  (htt->rx_ring.size *
5835e3dd157SKalle Valo 			   sizeof(htt->rx_ring.paddrs_ring)),
5845e3dd157SKalle Valo 			  htt->rx_ring.paddrs_ring,
5855e3dd157SKalle Valo 			  htt->rx_ring.base_paddr);
5865e3dd157SKalle Valo err_dma_ring:
5875e3dd157SKalle Valo 	kfree(htt->rx_ring.netbufs_ring);
5885e3dd157SKalle Valo err_netbuf:
5895e3dd157SKalle Valo 	return -ENOMEM;
5905e3dd157SKalle Valo }
5915e3dd157SKalle Valo 
5927aa7a72aSMichal Kazior static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
5937aa7a72aSMichal Kazior 					  enum htt_rx_mpdu_encrypt_type type)
5945e3dd157SKalle Valo {
5955e3dd157SKalle Valo 	switch (type) {
596890d3b2aSMichal Kazior 	case HTT_RX_MPDU_ENCRYPT_NONE:
597890d3b2aSMichal Kazior 		return 0;
5985e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_WEP40:
5995e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_WEP104:
600890d3b2aSMichal Kazior 		return IEEE80211_WEP_IV_LEN;
6015e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
6025e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
603890d3b2aSMichal Kazior 		return IEEE80211_TKIP_IV_LEN;
6045e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
605890d3b2aSMichal Kazior 		return IEEE80211_CCMP_HDR_LEN;
606890d3b2aSMichal Kazior 	case HTT_RX_MPDU_ENCRYPT_WEP128:
607890d3b2aSMichal Kazior 	case HTT_RX_MPDU_ENCRYPT_WAPI:
608890d3b2aSMichal Kazior 		break;
609890d3b2aSMichal Kazior 	}
610890d3b2aSMichal Kazior 
611890d3b2aSMichal Kazior 	ath10k_warn(ar, "unsupported encryption type %d\n", type);
6125e3dd157SKalle Valo 	return 0;
6135e3dd157SKalle Valo }
6145e3dd157SKalle Valo 
615890d3b2aSMichal Kazior #define MICHAEL_MIC_LEN 8
6165e3dd157SKalle Valo 
6177aa7a72aSMichal Kazior static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
6187aa7a72aSMichal Kazior 					 enum htt_rx_mpdu_encrypt_type type)
6195e3dd157SKalle Valo {
6205e3dd157SKalle Valo 	switch (type) {
6215e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_NONE:
622890d3b2aSMichal Kazior 		return 0;
6235e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_WEP40:
6245e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_WEP104:
625890d3b2aSMichal Kazior 		return IEEE80211_WEP_ICV_LEN;
6265e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
6275e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
628890d3b2aSMichal Kazior 		return IEEE80211_TKIP_ICV_LEN;
6295e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
630890d3b2aSMichal Kazior 		return IEEE80211_CCMP_MIC_LEN;
631890d3b2aSMichal Kazior 	case HTT_RX_MPDU_ENCRYPT_WEP128:
632890d3b2aSMichal Kazior 	case HTT_RX_MPDU_ENCRYPT_WAPI:
633890d3b2aSMichal Kazior 		break;
6345e3dd157SKalle Valo 	}
6355e3dd157SKalle Valo 
636890d3b2aSMichal Kazior 	ath10k_warn(ar, "unsupported encryption type %d\n", type);
6375e3dd157SKalle Valo 	return 0;
6385e3dd157SKalle Valo }
6395e3dd157SKalle Valo 
640f6dc2095SMichal Kazior struct rfc1042_hdr {
641f6dc2095SMichal Kazior 	u8 llc_dsap;
642f6dc2095SMichal Kazior 	u8 llc_ssap;
643f6dc2095SMichal Kazior 	u8 llc_ctrl;
644f6dc2095SMichal Kazior 	u8 snap_oui[3];
645f6dc2095SMichal Kazior 	__be16 snap_type;
646f6dc2095SMichal Kazior } __packed;
647f6dc2095SMichal Kazior 
648f6dc2095SMichal Kazior struct amsdu_subframe_hdr {
649f6dc2095SMichal Kazior 	u8 dst[ETH_ALEN];
650f6dc2095SMichal Kazior 	u8 src[ETH_ALEN];
651f6dc2095SMichal Kazior 	__be16 len;
652f6dc2095SMichal Kazior } __packed;
653f6dc2095SMichal Kazior 
65473539b40SJanusz Dziedzic static const u8 rx_legacy_rate_idx[] = {
65573539b40SJanusz Dziedzic 	3,	/* 0x00  - 11Mbps  */
65673539b40SJanusz Dziedzic 	2,	/* 0x01  - 5.5Mbps */
65773539b40SJanusz Dziedzic 	1,	/* 0x02  - 2Mbps   */
65873539b40SJanusz Dziedzic 	0,	/* 0x03  - 1Mbps   */
65973539b40SJanusz Dziedzic 	3,	/* 0x04  - 11Mbps  */
66073539b40SJanusz Dziedzic 	2,	/* 0x05  - 5.5Mbps */
66173539b40SJanusz Dziedzic 	1,	/* 0x06  - 2Mbps   */
66273539b40SJanusz Dziedzic 	0,	/* 0x07  - 1Mbps   */
66373539b40SJanusz Dziedzic 	10,	/* 0x08  - 48Mbps  */
66473539b40SJanusz Dziedzic 	8,	/* 0x09  - 24Mbps  */
66573539b40SJanusz Dziedzic 	6,	/* 0x0A  - 12Mbps  */
66673539b40SJanusz Dziedzic 	4,	/* 0x0B  - 6Mbps   */
66773539b40SJanusz Dziedzic 	11,	/* 0x0C  - 54Mbps  */
66873539b40SJanusz Dziedzic 	9,	/* 0x0D  - 36Mbps  */
66973539b40SJanusz Dziedzic 	7,	/* 0x0E  - 18Mbps  */
67073539b40SJanusz Dziedzic 	5,	/* 0x0F  - 9Mbps   */
67173539b40SJanusz Dziedzic };
67273539b40SJanusz Dziedzic 
67387326c97SJanusz Dziedzic static void ath10k_htt_rx_h_rates(struct ath10k *ar,
674b9fd8a84SMichal Kazior 				  struct ieee80211_rx_status *status,
675b9fd8a84SMichal Kazior 				  struct htt_rx_desc *rxd)
67673539b40SJanusz Dziedzic {
677b9fd8a84SMichal Kazior 	enum ieee80211_band band;
67873539b40SJanusz Dziedzic 	u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
67973539b40SJanusz Dziedzic 	u8 preamble = 0;
680b9fd8a84SMichal Kazior 	u32 info1, info2, info3;
68173539b40SJanusz Dziedzic 
682b9fd8a84SMichal Kazior 	/* Band value can't be set as undefined but freq can be 0 - use that to
683b9fd8a84SMichal Kazior 	 * determine whether band is provided.
684b9fd8a84SMichal Kazior 	 *
685b9fd8a84SMichal Kazior 	 * FIXME: Perhaps this can go away if CCK rate reporting is a little
686b9fd8a84SMichal Kazior 	 * reworked?
687b9fd8a84SMichal Kazior 	 */
688b9fd8a84SMichal Kazior 	if (!status->freq)
68973539b40SJanusz Dziedzic 		return;
69073539b40SJanusz Dziedzic 
691b9fd8a84SMichal Kazior 	band = status->band;
692b9fd8a84SMichal Kazior 	info1 = __le32_to_cpu(rxd->ppdu_start.info1);
693b9fd8a84SMichal Kazior 	info2 = __le32_to_cpu(rxd->ppdu_start.info2);
694b9fd8a84SMichal Kazior 	info3 = __le32_to_cpu(rxd->ppdu_start.info3);
695b9fd8a84SMichal Kazior 
696b9fd8a84SMichal Kazior 	preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
69773539b40SJanusz Dziedzic 
69873539b40SJanusz Dziedzic 	switch (preamble) {
69973539b40SJanusz Dziedzic 	case HTT_RX_LEGACY:
700b9fd8a84SMichal Kazior 		cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
701b9fd8a84SMichal Kazior 		rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
70273539b40SJanusz Dziedzic 		rate_idx = 0;
70373539b40SJanusz Dziedzic 
70473539b40SJanusz Dziedzic 		if (rate < 0x08 || rate > 0x0F)
70573539b40SJanusz Dziedzic 			break;
70673539b40SJanusz Dziedzic 
70773539b40SJanusz Dziedzic 		switch (band) {
70873539b40SJanusz Dziedzic 		case IEEE80211_BAND_2GHZ:
70973539b40SJanusz Dziedzic 			if (cck)
71073539b40SJanusz Dziedzic 				rate &= ~BIT(3);
71173539b40SJanusz Dziedzic 			rate_idx = rx_legacy_rate_idx[rate];
71273539b40SJanusz Dziedzic 			break;
71373539b40SJanusz Dziedzic 		case IEEE80211_BAND_5GHZ:
71473539b40SJanusz Dziedzic 			rate_idx = rx_legacy_rate_idx[rate];
71573539b40SJanusz Dziedzic 			/* We are using same rate table registering
71673539b40SJanusz Dziedzic 			   HW - ath10k_rates[]. In case of 5GHz skip
71773539b40SJanusz Dziedzic 			   CCK rates, so -4 here */
71873539b40SJanusz Dziedzic 			rate_idx -= 4;
71973539b40SJanusz Dziedzic 			break;
72073539b40SJanusz Dziedzic 		default:
72173539b40SJanusz Dziedzic 			break;
72273539b40SJanusz Dziedzic 		}
72373539b40SJanusz Dziedzic 
72473539b40SJanusz Dziedzic 		status->rate_idx = rate_idx;
72573539b40SJanusz Dziedzic 		break;
72673539b40SJanusz Dziedzic 	case HTT_RX_HT:
72773539b40SJanusz Dziedzic 	case HTT_RX_HT_WITH_TXBF:
728b9fd8a84SMichal Kazior 		/* HT-SIG - Table 20-11 in info2 and info3 */
729b9fd8a84SMichal Kazior 		mcs = info2 & 0x1F;
73073539b40SJanusz Dziedzic 		nss = mcs >> 3;
731b9fd8a84SMichal Kazior 		bw = (info2 >> 7) & 1;
732b9fd8a84SMichal Kazior 		sgi = (info3 >> 7) & 1;
73373539b40SJanusz Dziedzic 
73473539b40SJanusz Dziedzic 		status->rate_idx = mcs;
73573539b40SJanusz Dziedzic 		status->flag |= RX_FLAG_HT;
73673539b40SJanusz Dziedzic 		if (sgi)
73773539b40SJanusz Dziedzic 			status->flag |= RX_FLAG_SHORT_GI;
73873539b40SJanusz Dziedzic 		if (bw)
73973539b40SJanusz Dziedzic 			status->flag |= RX_FLAG_40MHZ;
74073539b40SJanusz Dziedzic 		break;
74173539b40SJanusz Dziedzic 	case HTT_RX_VHT:
74273539b40SJanusz Dziedzic 	case HTT_RX_VHT_WITH_TXBF:
743b9fd8a84SMichal Kazior 		/* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
74473539b40SJanusz Dziedzic 		   TODO check this */
745b9fd8a84SMichal Kazior 		mcs = (info3 >> 4) & 0x0F;
746b9fd8a84SMichal Kazior 		nss = ((info2 >> 10) & 0x07) + 1;
747b9fd8a84SMichal Kazior 		bw = info2 & 3;
748b9fd8a84SMichal Kazior 		sgi = info3 & 1;
74973539b40SJanusz Dziedzic 
75073539b40SJanusz Dziedzic 		status->rate_idx = mcs;
75173539b40SJanusz Dziedzic 		status->vht_nss = nss;
75273539b40SJanusz Dziedzic 
75373539b40SJanusz Dziedzic 		if (sgi)
75473539b40SJanusz Dziedzic 			status->flag |= RX_FLAG_SHORT_GI;
75573539b40SJanusz Dziedzic 
75673539b40SJanusz Dziedzic 		switch (bw) {
75773539b40SJanusz Dziedzic 		/* 20MHZ */
75873539b40SJanusz Dziedzic 		case 0:
75973539b40SJanusz Dziedzic 			break;
76073539b40SJanusz Dziedzic 		/* 40MHZ */
76173539b40SJanusz Dziedzic 		case 1:
76273539b40SJanusz Dziedzic 			status->flag |= RX_FLAG_40MHZ;
76373539b40SJanusz Dziedzic 			break;
76473539b40SJanusz Dziedzic 		/* 80MHZ */
76573539b40SJanusz Dziedzic 		case 2:
76673539b40SJanusz Dziedzic 			status->vht_flag |= RX_VHT_FLAG_80MHZ;
76773539b40SJanusz Dziedzic 		}
76873539b40SJanusz Dziedzic 
76973539b40SJanusz Dziedzic 		status->flag |= RX_FLAG_VHT;
77073539b40SJanusz Dziedzic 		break;
77173539b40SJanusz Dziedzic 	default:
77273539b40SJanusz Dziedzic 		break;
77373539b40SJanusz Dziedzic 	}
77473539b40SJanusz Dziedzic }
77573539b40SJanusz Dziedzic 
77636653f05SJanusz Dziedzic static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
77736653f05SJanusz Dziedzic 				    struct ieee80211_rx_status *status)
77836653f05SJanusz Dziedzic {
77936653f05SJanusz Dziedzic 	struct ieee80211_channel *ch;
78036653f05SJanusz Dziedzic 
78136653f05SJanusz Dziedzic 	spin_lock_bh(&ar->data_lock);
78236653f05SJanusz Dziedzic 	ch = ar->scan_channel;
78336653f05SJanusz Dziedzic 	if (!ch)
78436653f05SJanusz Dziedzic 		ch = ar->rx_channel;
78536653f05SJanusz Dziedzic 	spin_unlock_bh(&ar->data_lock);
78636653f05SJanusz Dziedzic 
78736653f05SJanusz Dziedzic 	if (!ch)
78836653f05SJanusz Dziedzic 		return false;
78936653f05SJanusz Dziedzic 
79036653f05SJanusz Dziedzic 	status->band = ch->band;
79136653f05SJanusz Dziedzic 	status->freq = ch->center_freq;
79236653f05SJanusz Dziedzic 
79336653f05SJanusz Dziedzic 	return true;
79436653f05SJanusz Dziedzic }
79536653f05SJanusz Dziedzic 
796b9fd8a84SMichal Kazior static void ath10k_htt_rx_h_signal(struct ath10k *ar,
797b9fd8a84SMichal Kazior 				   struct ieee80211_rx_status *status,
798b9fd8a84SMichal Kazior 				   struct htt_rx_desc *rxd)
799b9fd8a84SMichal Kazior {
800b9fd8a84SMichal Kazior 	/* FIXME: Get real NF */
801b9fd8a84SMichal Kazior 	status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
802b9fd8a84SMichal Kazior 			 rxd->ppdu_start.rssi_comb;
803b9fd8a84SMichal Kazior 	status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
804b9fd8a84SMichal Kazior }
805b9fd8a84SMichal Kazior 
806b9fd8a84SMichal Kazior static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
807b9fd8a84SMichal Kazior 				    struct ieee80211_rx_status *status,
808b9fd8a84SMichal Kazior 				    struct htt_rx_desc *rxd)
809b9fd8a84SMichal Kazior {
810b9fd8a84SMichal Kazior 	/* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
811b9fd8a84SMichal Kazior 	 * means all prior MSDUs in a PPDU are reported to mac80211 without the
812b9fd8a84SMichal Kazior 	 * TSF. Is it worth holding frames until end of PPDU is known?
813b9fd8a84SMichal Kazior 	 *
814b9fd8a84SMichal Kazior 	 * FIXME: Can we get/compute 64bit TSF?
815b9fd8a84SMichal Kazior 	 */
8163ec79e3aSMichal Kazior 	status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
817b9fd8a84SMichal Kazior 	status->flag |= RX_FLAG_MACTIME_END;
818b9fd8a84SMichal Kazior }
819b9fd8a84SMichal Kazior 
820b9fd8a84SMichal Kazior static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
821b9fd8a84SMichal Kazior 				 struct sk_buff_head *amsdu,
822b9fd8a84SMichal Kazior 				 struct ieee80211_rx_status *status)
823b9fd8a84SMichal Kazior {
824b9fd8a84SMichal Kazior 	struct sk_buff *first;
825b9fd8a84SMichal Kazior 	struct htt_rx_desc *rxd;
826b9fd8a84SMichal Kazior 	bool is_first_ppdu;
827b9fd8a84SMichal Kazior 	bool is_last_ppdu;
828b9fd8a84SMichal Kazior 
829b9fd8a84SMichal Kazior 	if (skb_queue_empty(amsdu))
830b9fd8a84SMichal Kazior 		return;
831b9fd8a84SMichal Kazior 
832b9fd8a84SMichal Kazior 	first = skb_peek(amsdu);
833b9fd8a84SMichal Kazior 	rxd = (void *)first->data - sizeof(*rxd);
834b9fd8a84SMichal Kazior 
835b9fd8a84SMichal Kazior 	is_first_ppdu = !!(rxd->attention.flags &
836b9fd8a84SMichal Kazior 			   __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
837b9fd8a84SMichal Kazior 	is_last_ppdu = !!(rxd->attention.flags &
838b9fd8a84SMichal Kazior 			  __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
839b9fd8a84SMichal Kazior 
840b9fd8a84SMichal Kazior 	if (is_first_ppdu) {
841b9fd8a84SMichal Kazior 		/* New PPDU starts so clear out the old per-PPDU status. */
842b9fd8a84SMichal Kazior 		status->freq = 0;
843b9fd8a84SMichal Kazior 		status->rate_idx = 0;
844b9fd8a84SMichal Kazior 		status->vht_nss = 0;
845b9fd8a84SMichal Kazior 		status->vht_flag &= ~RX_VHT_FLAG_80MHZ;
846b9fd8a84SMichal Kazior 		status->flag &= ~(RX_FLAG_HT |
847b9fd8a84SMichal Kazior 				  RX_FLAG_VHT |
848b9fd8a84SMichal Kazior 				  RX_FLAG_SHORT_GI |
849b9fd8a84SMichal Kazior 				  RX_FLAG_40MHZ |
850b9fd8a84SMichal Kazior 				  RX_FLAG_MACTIME_END);
851b9fd8a84SMichal Kazior 		status->flag |= RX_FLAG_NO_SIGNAL_VAL;
852b9fd8a84SMichal Kazior 
853b9fd8a84SMichal Kazior 		ath10k_htt_rx_h_signal(ar, status, rxd);
854b9fd8a84SMichal Kazior 		ath10k_htt_rx_h_channel(ar, status);
855b9fd8a84SMichal Kazior 		ath10k_htt_rx_h_rates(ar, status, rxd);
856b9fd8a84SMichal Kazior 	}
857b9fd8a84SMichal Kazior 
858b9fd8a84SMichal Kazior 	if (is_last_ppdu)
859b9fd8a84SMichal Kazior 		ath10k_htt_rx_h_mactime(ar, status, rxd);
860b9fd8a84SMichal Kazior }
861b9fd8a84SMichal Kazior 
86276f5329aSJanusz Dziedzic static const char * const tid_to_ac[] = {
86376f5329aSJanusz Dziedzic 	"BE",
86476f5329aSJanusz Dziedzic 	"BK",
86576f5329aSJanusz Dziedzic 	"BK",
86676f5329aSJanusz Dziedzic 	"BE",
86776f5329aSJanusz Dziedzic 	"VI",
86876f5329aSJanusz Dziedzic 	"VI",
86976f5329aSJanusz Dziedzic 	"VO",
87076f5329aSJanusz Dziedzic 	"VO",
87176f5329aSJanusz Dziedzic };
87276f5329aSJanusz Dziedzic 
87376f5329aSJanusz Dziedzic static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
87476f5329aSJanusz Dziedzic {
87576f5329aSJanusz Dziedzic 	u8 *qc;
87676f5329aSJanusz Dziedzic 	int tid;
87776f5329aSJanusz Dziedzic 
87876f5329aSJanusz Dziedzic 	if (!ieee80211_is_data_qos(hdr->frame_control))
87976f5329aSJanusz Dziedzic 		return "";
88076f5329aSJanusz Dziedzic 
88176f5329aSJanusz Dziedzic 	qc = ieee80211_get_qos_ctl(hdr);
88276f5329aSJanusz Dziedzic 	tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
88376f5329aSJanusz Dziedzic 	if (tid < 8)
88476f5329aSJanusz Dziedzic 		snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
88576f5329aSJanusz Dziedzic 	else
88676f5329aSJanusz Dziedzic 		snprintf(out, size, "tid %d", tid);
88776f5329aSJanusz Dziedzic 
88876f5329aSJanusz Dziedzic 	return out;
88976f5329aSJanusz Dziedzic }
89076f5329aSJanusz Dziedzic 
89185f6d7cfSJanusz Dziedzic static void ath10k_process_rx(struct ath10k *ar,
89285f6d7cfSJanusz Dziedzic 			      struct ieee80211_rx_status *rx_status,
89385f6d7cfSJanusz Dziedzic 			      struct sk_buff *skb)
89473539b40SJanusz Dziedzic {
89573539b40SJanusz Dziedzic 	struct ieee80211_rx_status *status;
89676f5329aSJanusz Dziedzic 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
89776f5329aSJanusz Dziedzic 	char tid[32];
89873539b40SJanusz Dziedzic 
89985f6d7cfSJanusz Dziedzic 	status = IEEE80211_SKB_RXCB(skb);
90085f6d7cfSJanusz Dziedzic 	*status = *rx_status;
90173539b40SJanusz Dziedzic 
9027aa7a72aSMichal Kazior 	ath10k_dbg(ar, ATH10K_DBG_DATA,
90376f5329aSJanusz Dziedzic 		   "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
90485f6d7cfSJanusz Dziedzic 		   skb,
90585f6d7cfSJanusz Dziedzic 		   skb->len,
90676f5329aSJanusz Dziedzic 		   ieee80211_get_SA(hdr),
90776f5329aSJanusz Dziedzic 		   ath10k_get_tid(hdr, tid, sizeof(tid)),
90876f5329aSJanusz Dziedzic 		   is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
90976f5329aSJanusz Dziedzic 							"mcast" : "ucast",
91076f5329aSJanusz Dziedzic 		   (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
91173539b40SJanusz Dziedzic 		   status->flag == 0 ? "legacy" : "",
91273539b40SJanusz Dziedzic 		   status->flag & RX_FLAG_HT ? "ht" : "",
91373539b40SJanusz Dziedzic 		   status->flag & RX_FLAG_VHT ? "vht" : "",
91473539b40SJanusz Dziedzic 		   status->flag & RX_FLAG_40MHZ ? "40" : "",
91573539b40SJanusz Dziedzic 		   status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
91673539b40SJanusz Dziedzic 		   status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
91773539b40SJanusz Dziedzic 		   status->rate_idx,
91873539b40SJanusz Dziedzic 		   status->vht_nss,
91973539b40SJanusz Dziedzic 		   status->freq,
92087326c97SJanusz Dziedzic 		   status->band, status->flag,
92178433f96SJanusz Dziedzic 		   !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
92276f5329aSJanusz Dziedzic 		   !!(status->flag & RX_FLAG_MMIC_ERROR),
92376f5329aSJanusz Dziedzic 		   !!(status->flag & RX_FLAG_AMSDU_MORE));
9247aa7a72aSMichal Kazior 	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
92585f6d7cfSJanusz Dziedzic 			skb->data, skb->len);
9265ce8e7fdSRajkumar Manoharan 	trace_ath10k_rx_hdr(ar, skb->data, skb->len);
9275ce8e7fdSRajkumar Manoharan 	trace_ath10k_rx_payload(ar, skb->data, skb->len);
92873539b40SJanusz Dziedzic 
92985f6d7cfSJanusz Dziedzic 	ieee80211_rx(ar->hw, skb);
93073539b40SJanusz Dziedzic }
93173539b40SJanusz Dziedzic 
932d960c369SMichal Kazior static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
933d960c369SMichal Kazior {
934d960c369SMichal Kazior 	/* nwifi header is padded to 4 bytes. this fixes 4addr rx */
935d960c369SMichal Kazior 	return round_up(ieee80211_hdrlen(hdr->frame_control), 4);
936d960c369SMichal Kazior }
937d960c369SMichal Kazior 
938581c25f8SMichal Kazior static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
939581c25f8SMichal Kazior 					struct sk_buff *msdu,
940581c25f8SMichal Kazior 					struct ieee80211_rx_status *status,
941581c25f8SMichal Kazior 					enum htt_rx_mpdu_encrypt_type enctype,
942581c25f8SMichal Kazior 					bool is_decrypted)
9435e3dd157SKalle Valo {
944f6dc2095SMichal Kazior 	struct ieee80211_hdr *hdr;
945581c25f8SMichal Kazior 	struct htt_rx_desc *rxd;
946581c25f8SMichal Kazior 	size_t hdr_len;
947581c25f8SMichal Kazior 	size_t crypto_len;
948581c25f8SMichal Kazior 	bool is_first;
949581c25f8SMichal Kazior 	bool is_last;
9505e3dd157SKalle Valo 
951581c25f8SMichal Kazior 	rxd = (void *)msdu->data - sizeof(*rxd);
952581c25f8SMichal Kazior 	is_first = !!(rxd->msdu_end.info0 &
953581c25f8SMichal Kazior 		      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
954581c25f8SMichal Kazior 	is_last = !!(rxd->msdu_end.info0 &
955581c25f8SMichal Kazior 		     __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
9569aa505d2SMichal Kazior 
957581c25f8SMichal Kazior 	/* Delivered decapped frame:
958581c25f8SMichal Kazior 	 * [802.11 header]
959581c25f8SMichal Kazior 	 * [crypto param] <-- can be trimmed if !fcs_err &&
960581c25f8SMichal Kazior 	 *                    !decrypt_err && !peer_idx_invalid
961581c25f8SMichal Kazior 	 * [amsdu header] <-- only if A-MSDU
962581c25f8SMichal Kazior 	 * [rfc1042/llc]
963581c25f8SMichal Kazior 	 * [payload]
964581c25f8SMichal Kazior 	 * [FCS] <-- at end, needs to be trimmed
965581c25f8SMichal Kazior 	 */
9665e3dd157SKalle Valo 
967581c25f8SMichal Kazior 	/* This probably shouldn't happen but warn just in case */
968581c25f8SMichal Kazior 	if (unlikely(WARN_ON_ONCE(!is_first)))
969581c25f8SMichal Kazior 		return;
970581c25f8SMichal Kazior 
971581c25f8SMichal Kazior 	/* This probably shouldn't happen but warn just in case */
972581c25f8SMichal Kazior 	if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
973581c25f8SMichal Kazior 		return;
974581c25f8SMichal Kazior 
975581c25f8SMichal Kazior 	skb_trim(msdu, msdu->len - FCS_LEN);
976581c25f8SMichal Kazior 
977581c25f8SMichal Kazior 	/* In most cases this will be true for sniffed frames. It makes sense
978581c25f8SMichal Kazior 	 * to deliver them as-is without stripping the crypto param. This would
979581c25f8SMichal Kazior 	 * also make sense for software based decryption (which is not
980581c25f8SMichal Kazior 	 * implemented in ath10k).
981581c25f8SMichal Kazior 	 *
982581c25f8SMichal Kazior 	 * If there's no error then the frame is decrypted. At least that is
983581c25f8SMichal Kazior 	 * the case for frames that come in via fragmented rx indication.
984581c25f8SMichal Kazior 	 */
985581c25f8SMichal Kazior 	if (!is_decrypted)
986581c25f8SMichal Kazior 		return;
987581c25f8SMichal Kazior 
988581c25f8SMichal Kazior 	/* The payload is decrypted so strip crypto params. Start from tail
989581c25f8SMichal Kazior 	 * since hdr is used to compute some stuff.
990581c25f8SMichal Kazior 	 */
991581c25f8SMichal Kazior 
992581c25f8SMichal Kazior 	hdr = (void *)msdu->data;
993581c25f8SMichal Kazior 
994581c25f8SMichal Kazior 	/* Tail */
995581c25f8SMichal Kazior 	skb_trim(msdu, msdu->len - ath10k_htt_rx_crypto_tail_len(ar, enctype));
996581c25f8SMichal Kazior 
997581c25f8SMichal Kazior 	/* MMIC */
998581c25f8SMichal Kazior 	if (!ieee80211_has_morefrags(hdr->frame_control) &&
999581c25f8SMichal Kazior 	    enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1000581c25f8SMichal Kazior 		skb_trim(msdu, msdu->len - 8);
1001581c25f8SMichal Kazior 
1002581c25f8SMichal Kazior 	/* Head */
1003f6dc2095SMichal Kazior 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1004581c25f8SMichal Kazior 	crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
10055e3dd157SKalle Valo 
1006581c25f8SMichal Kazior 	memmove((void *)msdu->data + crypto_len,
1007581c25f8SMichal Kazior 		(void *)msdu->data, hdr_len);
1008581c25f8SMichal Kazior 	skb_pull(msdu, crypto_len);
10095e3dd157SKalle Valo }
10105e3dd157SKalle Valo 
1011581c25f8SMichal Kazior static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
1012581c25f8SMichal Kazior 					  struct sk_buff *msdu,
1013581c25f8SMichal Kazior 					  struct ieee80211_rx_status *status,
1014581c25f8SMichal Kazior 					  const u8 first_hdr[64])
1015581c25f8SMichal Kazior {
1016581c25f8SMichal Kazior 	struct ieee80211_hdr *hdr;
1017581c25f8SMichal Kazior 	size_t hdr_len;
1018581c25f8SMichal Kazior 	u8 da[ETH_ALEN];
1019581c25f8SMichal Kazior 	u8 sa[ETH_ALEN];
1020581c25f8SMichal Kazior 
1021581c25f8SMichal Kazior 	/* Delivered decapped frame:
1022581c25f8SMichal Kazior 	 * [nwifi 802.11 header] <-- replaced with 802.11 hdr
1023581c25f8SMichal Kazior 	 * [rfc1042/llc]
1024581c25f8SMichal Kazior 	 *
1025581c25f8SMichal Kazior 	 * Note: The nwifi header doesn't have QoS Control and is
1026581c25f8SMichal Kazior 	 * (always?) a 3addr frame.
1027581c25f8SMichal Kazior 	 *
1028581c25f8SMichal Kazior 	 * Note2: There's no A-MSDU subframe header. Even if it's part
1029581c25f8SMichal Kazior 	 * of an A-MSDU.
1030581c25f8SMichal Kazior 	 */
1031581c25f8SMichal Kazior 
103272bdeb86SMichal Kazior 	/* pull decapped header and copy SA & DA */
1033581c25f8SMichal Kazior 	hdr = (struct ieee80211_hdr *)msdu->data;
1034d960c369SMichal Kazior 	hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
1035b25f32cbSKalle Valo 	ether_addr_copy(da, ieee80211_get_DA(hdr));
1036b25f32cbSKalle Valo 	ether_addr_copy(sa, ieee80211_get_SA(hdr));
1037581c25f8SMichal Kazior 	skb_pull(msdu, hdr_len);
1038784f69d3SMichal Kazior 
1039784f69d3SMichal Kazior 	/* push original 802.11 header */
1040581c25f8SMichal Kazior 	hdr = (struct ieee80211_hdr *)first_hdr;
1041784f69d3SMichal Kazior 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1042581c25f8SMichal Kazior 	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1043784f69d3SMichal Kazior 
104472bdeb86SMichal Kazior 	/* original 802.11 header has a different DA and in
104572bdeb86SMichal Kazior 	 * case of 4addr it may also have different SA
104672bdeb86SMichal Kazior 	 */
1047581c25f8SMichal Kazior 	hdr = (struct ieee80211_hdr *)msdu->data;
1048b25f32cbSKalle Valo 	ether_addr_copy(ieee80211_get_DA(hdr), da);
1049b25f32cbSKalle Valo 	ether_addr_copy(ieee80211_get_SA(hdr), sa);
10505e3dd157SKalle Valo }
10515e3dd157SKalle Valo 
1052581c25f8SMichal Kazior static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
1053581c25f8SMichal Kazior 					  struct sk_buff *msdu,
1054581c25f8SMichal Kazior 					  enum htt_rx_mpdu_encrypt_type enctype)
10555e3dd157SKalle Valo {
10565e3dd157SKalle Valo 	struct ieee80211_hdr *hdr;
1057581c25f8SMichal Kazior 	struct htt_rx_desc *rxd;
1058581c25f8SMichal Kazior 	size_t hdr_len, crypto_len;
1059e3fbf8d2SMichal Kazior 	void *rfc1042;
1060581c25f8SMichal Kazior 	bool is_first, is_last, is_amsdu;
10615e3dd157SKalle Valo 
1062581c25f8SMichal Kazior 	rxd = (void *)msdu->data - sizeof(*rxd);
1063581c25f8SMichal Kazior 	hdr = (void *)rxd->rx_hdr_status;
10645e3dd157SKalle Valo 
1065581c25f8SMichal Kazior 	is_first = !!(rxd->msdu_end.info0 &
1066581c25f8SMichal Kazior 		      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1067581c25f8SMichal Kazior 	is_last = !!(rxd->msdu_end.info0 &
1068581c25f8SMichal Kazior 		     __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1069581c25f8SMichal Kazior 	is_amsdu = !(is_first && is_last);
1070e3fbf8d2SMichal Kazior 
1071e3fbf8d2SMichal Kazior 	rfc1042 = hdr;
1072e3fbf8d2SMichal Kazior 
1073581c25f8SMichal Kazior 	if (is_first) {
1074581c25f8SMichal Kazior 		hdr_len = ieee80211_hdrlen(hdr->frame_control);
1075581c25f8SMichal Kazior 		crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1076e3fbf8d2SMichal Kazior 
1077581c25f8SMichal Kazior 		rfc1042 += round_up(hdr_len, 4) +
1078581c25f8SMichal Kazior 			   round_up(crypto_len, 4);
10795e3dd157SKalle Valo 	}
10805e3dd157SKalle Valo 
1081581c25f8SMichal Kazior 	if (is_amsdu)
1082581c25f8SMichal Kazior 		rfc1042 += sizeof(struct amsdu_subframe_hdr);
1083f6dc2095SMichal Kazior 
1084581c25f8SMichal Kazior 	return rfc1042;
1085581c25f8SMichal Kazior }
1086581c25f8SMichal Kazior 
1087581c25f8SMichal Kazior static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1088581c25f8SMichal Kazior 					struct sk_buff *msdu,
1089581c25f8SMichal Kazior 					struct ieee80211_rx_status *status,
1090581c25f8SMichal Kazior 					const u8 first_hdr[64],
1091581c25f8SMichal Kazior 					enum htt_rx_mpdu_encrypt_type enctype)
1092581c25f8SMichal Kazior {
1093581c25f8SMichal Kazior 	struct ieee80211_hdr *hdr;
1094581c25f8SMichal Kazior 	struct ethhdr *eth;
1095581c25f8SMichal Kazior 	size_t hdr_len;
1096581c25f8SMichal Kazior 	void *rfc1042;
1097581c25f8SMichal Kazior 	u8 da[ETH_ALEN];
1098581c25f8SMichal Kazior 	u8 sa[ETH_ALEN];
1099581c25f8SMichal Kazior 
1100581c25f8SMichal Kazior 	/* Delivered decapped frame:
1101581c25f8SMichal Kazior 	 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
1102581c25f8SMichal Kazior 	 * [payload]
1103581c25f8SMichal Kazior 	 */
1104581c25f8SMichal Kazior 
1105581c25f8SMichal Kazior 	rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
1106581c25f8SMichal Kazior 	if (WARN_ON_ONCE(!rfc1042))
1107581c25f8SMichal Kazior 		return;
1108581c25f8SMichal Kazior 
1109581c25f8SMichal Kazior 	/* pull decapped header and copy SA & DA */
1110581c25f8SMichal Kazior 	eth = (struct ethhdr *)msdu->data;
1111581c25f8SMichal Kazior 	ether_addr_copy(da, eth->h_dest);
1112581c25f8SMichal Kazior 	ether_addr_copy(sa, eth->h_source);
1113581c25f8SMichal Kazior 	skb_pull(msdu, sizeof(struct ethhdr));
1114581c25f8SMichal Kazior 
1115581c25f8SMichal Kazior 	/* push rfc1042/llc/snap */
1116581c25f8SMichal Kazior 	memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
1117581c25f8SMichal Kazior 	       sizeof(struct rfc1042_hdr));
1118581c25f8SMichal Kazior 
1119581c25f8SMichal Kazior 	/* push original 802.11 header */
1120581c25f8SMichal Kazior 	hdr = (struct ieee80211_hdr *)first_hdr;
1121581c25f8SMichal Kazior 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1122581c25f8SMichal Kazior 	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1123581c25f8SMichal Kazior 
1124581c25f8SMichal Kazior 	/* original 802.11 header has a different DA and in
1125581c25f8SMichal Kazior 	 * case of 4addr it may also have different SA
1126581c25f8SMichal Kazior 	 */
1127581c25f8SMichal Kazior 	hdr = (struct ieee80211_hdr *)msdu->data;
1128581c25f8SMichal Kazior 	ether_addr_copy(ieee80211_get_DA(hdr), da);
1129581c25f8SMichal Kazior 	ether_addr_copy(ieee80211_get_SA(hdr), sa);
1130581c25f8SMichal Kazior }
1131581c25f8SMichal Kazior 
1132581c25f8SMichal Kazior static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
1133581c25f8SMichal Kazior 					 struct sk_buff *msdu,
1134581c25f8SMichal Kazior 					 struct ieee80211_rx_status *status,
1135581c25f8SMichal Kazior 					 const u8 first_hdr[64])
1136581c25f8SMichal Kazior {
1137581c25f8SMichal Kazior 	struct ieee80211_hdr *hdr;
1138581c25f8SMichal Kazior 	size_t hdr_len;
1139581c25f8SMichal Kazior 
1140581c25f8SMichal Kazior 	/* Delivered decapped frame:
1141581c25f8SMichal Kazior 	 * [amsdu header] <-- replaced with 802.11 hdr
1142581c25f8SMichal Kazior 	 * [rfc1042/llc]
1143581c25f8SMichal Kazior 	 * [payload]
1144581c25f8SMichal Kazior 	 */
1145581c25f8SMichal Kazior 
1146581c25f8SMichal Kazior 	skb_pull(msdu, sizeof(struct amsdu_subframe_hdr));
1147581c25f8SMichal Kazior 
1148581c25f8SMichal Kazior 	hdr = (struct ieee80211_hdr *)first_hdr;
1149581c25f8SMichal Kazior 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1150581c25f8SMichal Kazior 	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1151581c25f8SMichal Kazior }
1152581c25f8SMichal Kazior 
1153581c25f8SMichal Kazior static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
1154581c25f8SMichal Kazior 				    struct sk_buff *msdu,
1155581c25f8SMichal Kazior 				    struct ieee80211_rx_status *status,
1156581c25f8SMichal Kazior 				    u8 first_hdr[64],
1157581c25f8SMichal Kazior 				    enum htt_rx_mpdu_encrypt_type enctype,
1158581c25f8SMichal Kazior 				    bool is_decrypted)
1159581c25f8SMichal Kazior {
1160581c25f8SMichal Kazior 	struct htt_rx_desc *rxd;
1161581c25f8SMichal Kazior 	enum rx_msdu_decap_format decap;
1162581c25f8SMichal Kazior 	struct ieee80211_hdr *hdr;
1163581c25f8SMichal Kazior 
1164581c25f8SMichal Kazior 	/* First msdu's decapped header:
1165581c25f8SMichal Kazior 	 * [802.11 header] <-- padded to 4 bytes long
1166581c25f8SMichal Kazior 	 * [crypto param] <-- padded to 4 bytes long
1167581c25f8SMichal Kazior 	 * [amsdu header] <-- only if A-MSDU
1168581c25f8SMichal Kazior 	 * [rfc1042/llc]
1169581c25f8SMichal Kazior 	 *
1170581c25f8SMichal Kazior 	 * Other (2nd, 3rd, ..) msdu's decapped header:
1171581c25f8SMichal Kazior 	 * [amsdu header] <-- only if A-MSDU
1172581c25f8SMichal Kazior 	 * [rfc1042/llc]
1173581c25f8SMichal Kazior 	 */
1174581c25f8SMichal Kazior 
1175581c25f8SMichal Kazior 	rxd = (void *)msdu->data - sizeof(*rxd);
1176581c25f8SMichal Kazior 	hdr = (void *)rxd->rx_hdr_status;
1177581c25f8SMichal Kazior 	decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
1178581c25f8SMichal Kazior 		   RX_MSDU_START_INFO1_DECAP_FORMAT);
1179581c25f8SMichal Kazior 
1180581c25f8SMichal Kazior 	switch (decap) {
1181581c25f8SMichal Kazior 	case RX_MSDU_DECAP_RAW:
1182581c25f8SMichal Kazior 		ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
1183581c25f8SMichal Kazior 					    is_decrypted);
1184581c25f8SMichal Kazior 		break;
1185581c25f8SMichal Kazior 	case RX_MSDU_DECAP_NATIVE_WIFI:
1186581c25f8SMichal Kazior 		ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr);
1187581c25f8SMichal Kazior 		break;
1188581c25f8SMichal Kazior 	case RX_MSDU_DECAP_ETHERNET2_DIX:
1189581c25f8SMichal Kazior 		ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
1190581c25f8SMichal Kazior 		break;
1191581c25f8SMichal Kazior 	case RX_MSDU_DECAP_8023_SNAP_LLC:
1192581c25f8SMichal Kazior 		ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr);
1193581c25f8SMichal Kazior 		break;
1194581c25f8SMichal Kazior 	}
11955e3dd157SKalle Valo }
11965e3dd157SKalle Valo 
1197605f81aaSMichal Kazior static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
1198605f81aaSMichal Kazior {
1199605f81aaSMichal Kazior 	struct htt_rx_desc *rxd;
1200605f81aaSMichal Kazior 	u32 flags, info;
1201605f81aaSMichal Kazior 	bool is_ip4, is_ip6;
1202605f81aaSMichal Kazior 	bool is_tcp, is_udp;
1203605f81aaSMichal Kazior 	bool ip_csum_ok, tcpudp_csum_ok;
1204605f81aaSMichal Kazior 
1205605f81aaSMichal Kazior 	rxd = (void *)skb->data - sizeof(*rxd);
1206605f81aaSMichal Kazior 	flags = __le32_to_cpu(rxd->attention.flags);
1207605f81aaSMichal Kazior 	info = __le32_to_cpu(rxd->msdu_start.info1);
1208605f81aaSMichal Kazior 
1209605f81aaSMichal Kazior 	is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
1210605f81aaSMichal Kazior 	is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
1211605f81aaSMichal Kazior 	is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
1212605f81aaSMichal Kazior 	is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
1213605f81aaSMichal Kazior 	ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
1214605f81aaSMichal Kazior 	tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
1215605f81aaSMichal Kazior 
1216605f81aaSMichal Kazior 	if (!is_ip4 && !is_ip6)
1217605f81aaSMichal Kazior 		return CHECKSUM_NONE;
1218605f81aaSMichal Kazior 	if (!is_tcp && !is_udp)
1219605f81aaSMichal Kazior 		return CHECKSUM_NONE;
1220605f81aaSMichal Kazior 	if (!ip_csum_ok)
1221605f81aaSMichal Kazior 		return CHECKSUM_NONE;
1222605f81aaSMichal Kazior 	if (!tcpudp_csum_ok)
1223605f81aaSMichal Kazior 		return CHECKSUM_NONE;
1224605f81aaSMichal Kazior 
1225605f81aaSMichal Kazior 	return CHECKSUM_UNNECESSARY;
1226605f81aaSMichal Kazior }
1227605f81aaSMichal Kazior 
1228581c25f8SMichal Kazior static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
1229581c25f8SMichal Kazior {
1230581c25f8SMichal Kazior 	msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
1231581c25f8SMichal Kazior }
1232581c25f8SMichal Kazior 
1233581c25f8SMichal Kazior static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1234581c25f8SMichal Kazior 				 struct sk_buff_head *amsdu,
1235581c25f8SMichal Kazior 				 struct ieee80211_rx_status *status)
1236581c25f8SMichal Kazior {
1237581c25f8SMichal Kazior 	struct sk_buff *first;
1238581c25f8SMichal Kazior 	struct sk_buff *last;
1239581c25f8SMichal Kazior 	struct sk_buff *msdu;
1240581c25f8SMichal Kazior 	struct htt_rx_desc *rxd;
1241581c25f8SMichal Kazior 	struct ieee80211_hdr *hdr;
1242581c25f8SMichal Kazior 	enum htt_rx_mpdu_encrypt_type enctype;
1243581c25f8SMichal Kazior 	u8 first_hdr[64];
1244581c25f8SMichal Kazior 	u8 *qos;
1245581c25f8SMichal Kazior 	size_t hdr_len;
1246581c25f8SMichal Kazior 	bool has_fcs_err;
1247581c25f8SMichal Kazior 	bool has_crypto_err;
1248581c25f8SMichal Kazior 	bool has_tkip_err;
1249581c25f8SMichal Kazior 	bool has_peer_idx_invalid;
1250581c25f8SMichal Kazior 	bool is_decrypted;
1251581c25f8SMichal Kazior 	u32 attention;
1252581c25f8SMichal Kazior 
1253581c25f8SMichal Kazior 	if (skb_queue_empty(amsdu))
1254581c25f8SMichal Kazior 		return;
1255581c25f8SMichal Kazior 
1256581c25f8SMichal Kazior 	first = skb_peek(amsdu);
1257581c25f8SMichal Kazior 	rxd = (void *)first->data - sizeof(*rxd);
1258581c25f8SMichal Kazior 
1259581c25f8SMichal Kazior 	enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1260581c25f8SMichal Kazior 		     RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1261581c25f8SMichal Kazior 
1262581c25f8SMichal Kazior 	/* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
1263581c25f8SMichal Kazior 	 * decapped header. It'll be used for undecapping of each MSDU.
1264581c25f8SMichal Kazior 	 */
1265581c25f8SMichal Kazior 	hdr = (void *)rxd->rx_hdr_status;
1266581c25f8SMichal Kazior 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1267581c25f8SMichal Kazior 	memcpy(first_hdr, hdr, hdr_len);
1268581c25f8SMichal Kazior 
1269581c25f8SMichal Kazior 	/* Each A-MSDU subframe will use the original header as the base and be
1270581c25f8SMichal Kazior 	 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1271581c25f8SMichal Kazior 	 */
1272581c25f8SMichal Kazior 	hdr = (void *)first_hdr;
1273581c25f8SMichal Kazior 	qos = ieee80211_get_qos_ctl(hdr);
1274581c25f8SMichal Kazior 	qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1275581c25f8SMichal Kazior 
1276581c25f8SMichal Kazior 	/* Some attention flags are valid only in the last MSDU. */
1277581c25f8SMichal Kazior 	last = skb_peek_tail(amsdu);
1278581c25f8SMichal Kazior 	rxd = (void *)last->data - sizeof(*rxd);
1279581c25f8SMichal Kazior 	attention = __le32_to_cpu(rxd->attention.flags);
1280581c25f8SMichal Kazior 
1281581c25f8SMichal Kazior 	has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
1282581c25f8SMichal Kazior 	has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
1283581c25f8SMichal Kazior 	has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
1284581c25f8SMichal Kazior 	has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
1285581c25f8SMichal Kazior 
1286581c25f8SMichal Kazior 	/* Note: If hardware captures an encrypted frame that it can't decrypt,
1287581c25f8SMichal Kazior 	 * e.g. due to fcs error, missing peer or invalid key data it will
1288581c25f8SMichal Kazior 	 * report the frame as raw.
1289581c25f8SMichal Kazior 	 */
1290581c25f8SMichal Kazior 	is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
1291581c25f8SMichal Kazior 			!has_fcs_err &&
1292581c25f8SMichal Kazior 			!has_crypto_err &&
1293581c25f8SMichal Kazior 			!has_peer_idx_invalid);
1294581c25f8SMichal Kazior 
1295581c25f8SMichal Kazior 	/* Clear per-MPDU flags while leaving per-PPDU flags intact. */
1296581c25f8SMichal Kazior 	status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
1297581c25f8SMichal Kazior 			  RX_FLAG_MMIC_ERROR |
1298581c25f8SMichal Kazior 			  RX_FLAG_DECRYPTED |
1299581c25f8SMichal Kazior 			  RX_FLAG_IV_STRIPPED |
1300581c25f8SMichal Kazior 			  RX_FLAG_MMIC_STRIPPED);
1301581c25f8SMichal Kazior 
1302581c25f8SMichal Kazior 	if (has_fcs_err)
1303581c25f8SMichal Kazior 		status->flag |= RX_FLAG_FAILED_FCS_CRC;
1304581c25f8SMichal Kazior 
1305581c25f8SMichal Kazior 	if (has_tkip_err)
1306581c25f8SMichal Kazior 		status->flag |= RX_FLAG_MMIC_ERROR;
1307581c25f8SMichal Kazior 
1308581c25f8SMichal Kazior 	if (is_decrypted)
1309581c25f8SMichal Kazior 		status->flag |= RX_FLAG_DECRYPTED |
1310581c25f8SMichal Kazior 				RX_FLAG_IV_STRIPPED |
1311581c25f8SMichal Kazior 				RX_FLAG_MMIC_STRIPPED;
1312581c25f8SMichal Kazior 
1313581c25f8SMichal Kazior 	skb_queue_walk(amsdu, msdu) {
1314581c25f8SMichal Kazior 		ath10k_htt_rx_h_csum_offload(msdu);
1315581c25f8SMichal Kazior 		ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
1316581c25f8SMichal Kazior 					is_decrypted);
1317581c25f8SMichal Kazior 
1318581c25f8SMichal Kazior 		/* Undecapping involves copying the original 802.11 header back
1319581c25f8SMichal Kazior 		 * to sk_buff. If frame is protected and hardware has decrypted
1320581c25f8SMichal Kazior 		 * it then remove the protected bit.
1321581c25f8SMichal Kazior 		 */
1322581c25f8SMichal Kazior 		if (!is_decrypted)
1323581c25f8SMichal Kazior 			continue;
1324581c25f8SMichal Kazior 
1325581c25f8SMichal Kazior 		hdr = (void *)msdu->data;
1326581c25f8SMichal Kazior 		hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1327581c25f8SMichal Kazior 	}
1328581c25f8SMichal Kazior }
1329581c25f8SMichal Kazior 
1330581c25f8SMichal Kazior static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
1331581c25f8SMichal Kazior 				    struct sk_buff_head *amsdu,
1332581c25f8SMichal Kazior 				    struct ieee80211_rx_status *status)
1333581c25f8SMichal Kazior {
1334581c25f8SMichal Kazior 	struct sk_buff *msdu;
1335581c25f8SMichal Kazior 
1336581c25f8SMichal Kazior 	while ((msdu = __skb_dequeue(amsdu))) {
1337581c25f8SMichal Kazior 		/* Setup per-MSDU flags */
1338581c25f8SMichal Kazior 		if (skb_queue_empty(amsdu))
1339581c25f8SMichal Kazior 			status->flag &= ~RX_FLAG_AMSDU_MORE;
1340581c25f8SMichal Kazior 		else
1341581c25f8SMichal Kazior 			status->flag |= RX_FLAG_AMSDU_MORE;
1342581c25f8SMichal Kazior 
1343581c25f8SMichal Kazior 		ath10k_process_rx(ar, status, msdu);
1344581c25f8SMichal Kazior 	}
1345581c25f8SMichal Kazior }
1346581c25f8SMichal Kazior 
13479aa505d2SMichal Kazior static int ath10k_unchain_msdu(struct sk_buff_head *amsdu)
1348bfa35368SBen Greear {
13499aa505d2SMichal Kazior 	struct sk_buff *skb, *first;
1350bfa35368SBen Greear 	int space;
1351bfa35368SBen Greear 	int total_len = 0;
1352bfa35368SBen Greear 
1353bfa35368SBen Greear 	/* TODO:  Might could optimize this by using
1354bfa35368SBen Greear 	 * skb_try_coalesce or similar method to
1355bfa35368SBen Greear 	 * decrease copying, or maybe get mac80211 to
1356bfa35368SBen Greear 	 * provide a way to just receive a list of
1357bfa35368SBen Greear 	 * skb?
1358bfa35368SBen Greear 	 */
1359bfa35368SBen Greear 
13609aa505d2SMichal Kazior 	first = __skb_dequeue(amsdu);
1361bfa35368SBen Greear 
1362bfa35368SBen Greear 	/* Allocate total length all at once. */
13639aa505d2SMichal Kazior 	skb_queue_walk(amsdu, skb)
13649aa505d2SMichal Kazior 		total_len += skb->len;
1365bfa35368SBen Greear 
13669aa505d2SMichal Kazior 	space = total_len - skb_tailroom(first);
1367bfa35368SBen Greear 	if ((space > 0) &&
13689aa505d2SMichal Kazior 	    (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
1369bfa35368SBen Greear 		/* TODO:  bump some rx-oom error stat */
1370bfa35368SBen Greear 		/* put it back together so we can free the
1371bfa35368SBen Greear 		 * whole list at once.
1372bfa35368SBen Greear 		 */
13739aa505d2SMichal Kazior 		__skb_queue_head(amsdu, first);
1374bfa35368SBen Greear 		return -1;
1375bfa35368SBen Greear 	}
1376bfa35368SBen Greear 
1377bfa35368SBen Greear 	/* Walk list again, copying contents into
1378bfa35368SBen Greear 	 * msdu_head
1379bfa35368SBen Greear 	 */
13809aa505d2SMichal Kazior 	while ((skb = __skb_dequeue(amsdu))) {
13819aa505d2SMichal Kazior 		skb_copy_from_linear_data(skb, skb_put(first, skb->len),
13829aa505d2SMichal Kazior 					  skb->len);
13839aa505d2SMichal Kazior 		dev_kfree_skb_any(skb);
1384bfa35368SBen Greear 	}
1385bfa35368SBen Greear 
13869aa505d2SMichal Kazior 	__skb_queue_head(amsdu, first);
1387bfa35368SBen Greear 	return 0;
1388bfa35368SBen Greear }
1389bfa35368SBen Greear 
1390581c25f8SMichal Kazior static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
1391581c25f8SMichal Kazior 				    struct sk_buff_head *amsdu,
1392581c25f8SMichal Kazior 				    bool chained)
13932acc4eb2SJanusz Dziedzic {
1394581c25f8SMichal Kazior 	struct sk_buff *first;
1395581c25f8SMichal Kazior 	struct htt_rx_desc *rxd;
1396581c25f8SMichal Kazior 	enum rx_msdu_decap_format decap;
13977aa7a72aSMichal Kazior 
1398581c25f8SMichal Kazior 	first = skb_peek(amsdu);
1399581c25f8SMichal Kazior 	rxd = (void *)first->data - sizeof(*rxd);
1400581c25f8SMichal Kazior 	decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
1401581c25f8SMichal Kazior 		   RX_MSDU_START_INFO1_DECAP_FORMAT);
1402581c25f8SMichal Kazior 
1403581c25f8SMichal Kazior 	if (!chained)
1404581c25f8SMichal Kazior 		return;
1405581c25f8SMichal Kazior 
1406581c25f8SMichal Kazior 	/* FIXME: Current unchaining logic can only handle simple case of raw
1407581c25f8SMichal Kazior 	 * msdu chaining. If decapping is other than raw the chaining may be
1408581c25f8SMichal Kazior 	 * more complex and this isn't handled by the current code. Don't even
1409581c25f8SMichal Kazior 	 * try re-constructing such frames - it'll be pretty much garbage.
1410581c25f8SMichal Kazior 	 */
1411581c25f8SMichal Kazior 	if (decap != RX_MSDU_DECAP_RAW ||
1412581c25f8SMichal Kazior 	    skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
1413581c25f8SMichal Kazior 		__skb_queue_purge(amsdu);
1414581c25f8SMichal Kazior 		return;
1415581c25f8SMichal Kazior 	}
1416581c25f8SMichal Kazior 
1417581c25f8SMichal Kazior 	ath10k_unchain_msdu(amsdu);
1418581c25f8SMichal Kazior }
1419581c25f8SMichal Kazior 
1420581c25f8SMichal Kazior static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
1421581c25f8SMichal Kazior 					struct sk_buff_head *amsdu,
1422581c25f8SMichal Kazior 					struct ieee80211_rx_status *rx_status)
1423581c25f8SMichal Kazior {
1424581c25f8SMichal Kazior 	struct sk_buff *msdu;
1425581c25f8SMichal Kazior 	struct htt_rx_desc *rxd;
1426d67d0a02SMichal Kazior 	bool is_mgmt;
1427d67d0a02SMichal Kazior 	bool has_fcs_err;
1428581c25f8SMichal Kazior 
1429581c25f8SMichal Kazior 	msdu = skb_peek(amsdu);
1430581c25f8SMichal Kazior 	rxd = (void *)msdu->data - sizeof(*rxd);
1431581c25f8SMichal Kazior 
1432581c25f8SMichal Kazior 	/* FIXME: It might be a good idea to do some fuzzy-testing to drop
1433581c25f8SMichal Kazior 	 * invalid/dangerous frames.
1434581c25f8SMichal Kazior 	 */
1435581c25f8SMichal Kazior 
1436581c25f8SMichal Kazior 	if (!rx_status->freq) {
1437581c25f8SMichal Kazior 		ath10k_warn(ar, "no channel configured; ignoring frame(s)!\n");
14382acc4eb2SJanusz Dziedzic 		return false;
14392acc4eb2SJanusz Dziedzic 	}
14402acc4eb2SJanusz Dziedzic 
1441d67d0a02SMichal Kazior 	is_mgmt = !!(rxd->attention.flags &
1442d67d0a02SMichal Kazior 		     __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
1443d67d0a02SMichal Kazior 	has_fcs_err = !!(rxd->attention.flags &
1444d67d0a02SMichal Kazior 			 __cpu_to_le32(RX_ATTENTION_FLAGS_FCS_ERR));
1445d67d0a02SMichal Kazior 
1446581c25f8SMichal Kazior 	/* Management frames are handled via WMI events. The pros of such
1447581c25f8SMichal Kazior 	 * approach is that channel is explicitly provided in WMI events
1448581c25f8SMichal Kazior 	 * whereas HTT doesn't provide channel information for Rxed frames.
1449d67d0a02SMichal Kazior 	 *
1450d67d0a02SMichal Kazior 	 * However some firmware revisions don't report corrupted frames via
1451d67d0a02SMichal Kazior 	 * WMI so don't drop them.
1452581c25f8SMichal Kazior 	 */
1453d67d0a02SMichal Kazior 	if (is_mgmt && !has_fcs_err) {
14547aa7a72aSMichal Kazior 		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
14552acc4eb2SJanusz Dziedzic 		return false;
14562acc4eb2SJanusz Dziedzic 	}
14572acc4eb2SJanusz Dziedzic 
1458581c25f8SMichal Kazior 	if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
1459581c25f8SMichal Kazior 		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
14602acc4eb2SJanusz Dziedzic 		return false;
14612acc4eb2SJanusz Dziedzic 	}
14622acc4eb2SJanusz Dziedzic 
14632acc4eb2SJanusz Dziedzic 	return true;
14642acc4eb2SJanusz Dziedzic }
14652acc4eb2SJanusz Dziedzic 
1466581c25f8SMichal Kazior static void ath10k_htt_rx_h_filter(struct ath10k *ar,
1467581c25f8SMichal Kazior 				   struct sk_buff_head *amsdu,
1468581c25f8SMichal Kazior 				   struct ieee80211_rx_status *rx_status)
1469581c25f8SMichal Kazior {
1470581c25f8SMichal Kazior 	if (skb_queue_empty(amsdu))
1471581c25f8SMichal Kazior 		return;
1472581c25f8SMichal Kazior 
1473581c25f8SMichal Kazior 	if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
1474581c25f8SMichal Kazior 		return;
1475581c25f8SMichal Kazior 
1476581c25f8SMichal Kazior 	__skb_queue_purge(amsdu);
1477581c25f8SMichal Kazior }
1478581c25f8SMichal Kazior 
14795e3dd157SKalle Valo static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
14805e3dd157SKalle Valo 				  struct htt_rx_indication *rx)
14815e3dd157SKalle Valo {
14827aa7a72aSMichal Kazior 	struct ath10k *ar = htt->ar;
14836df92a3dSJanusz Dziedzic 	struct ieee80211_rx_status *rx_status = &htt->rx_status;
14845e3dd157SKalle Valo 	struct htt_rx_indication_mpdu_range *mpdu_ranges;
14859aa505d2SMichal Kazior 	struct sk_buff_head amsdu;
14865e3dd157SKalle Valo 	int num_mpdu_ranges;
14875e3dd157SKalle Valo 	int fw_desc_len;
14885e3dd157SKalle Valo 	u8 *fw_desc;
1489d540690dSMichal Kazior 	int i, ret, mpdu_count = 0;
14905e3dd157SKalle Valo 
149145967089SMichal Kazior 	lockdep_assert_held(&htt->rx_ring.lock);
149245967089SMichal Kazior 
1493e0bd7513SMichal Kazior 	if (htt->rx_confused)
1494e0bd7513SMichal Kazior 		return;
1495e0bd7513SMichal Kazior 
14965e3dd157SKalle Valo 	fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
14975e3dd157SKalle Valo 	fw_desc = (u8 *)&rx->fw_desc;
14985e3dd157SKalle Valo 
14995e3dd157SKalle Valo 	num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
15005e3dd157SKalle Valo 			     HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
15015e3dd157SKalle Valo 	mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
15025e3dd157SKalle Valo 
15037aa7a72aSMichal Kazior 	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
15045e3dd157SKalle Valo 			rx, sizeof(*rx) +
15055e3dd157SKalle Valo 			(sizeof(struct htt_rx_indication_mpdu_range) *
15065e3dd157SKalle Valo 				num_mpdu_ranges));
15075e3dd157SKalle Valo 
1508d540690dSMichal Kazior 	for (i = 0; i < num_mpdu_ranges; i++)
1509d540690dSMichal Kazior 		mpdu_count += mpdu_ranges[i].mpdu_count;
1510d540690dSMichal Kazior 
1511d540690dSMichal Kazior 	while (mpdu_count--) {
15129aa505d2SMichal Kazior 		__skb_queue_head_init(&amsdu);
15139aa505d2SMichal Kazior 		ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc,
1514f0e2770fSMichal Kazior 					      &fw_desc_len, &amsdu);
1515d84dd60fSJanusz Dziedzic 		if (ret < 0) {
1516e0bd7513SMichal Kazior 			ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
15179aa505d2SMichal Kazior 			__skb_queue_purge(&amsdu);
1518e0bd7513SMichal Kazior 			/* FIXME: It's probably a good idea to reboot the
1519e0bd7513SMichal Kazior 			 * device instead of leaving it inoperable.
1520e0bd7513SMichal Kazior 			 */
1521e0bd7513SMichal Kazior 			htt->rx_confused = true;
1522e0bd7513SMichal Kazior 			break;
1523d84dd60fSJanusz Dziedzic 		}
1524d84dd60fSJanusz Dziedzic 
1525b9fd8a84SMichal Kazior 		ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status);
1526581c25f8SMichal Kazior 		ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
1527581c25f8SMichal Kazior 		ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
1528581c25f8SMichal Kazior 		ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
1529581c25f8SMichal Kazior 		ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
15305e3dd157SKalle Valo 	}
15315e3dd157SKalle Valo 
15326e712d42SMichal Kazior 	tasklet_schedule(&htt->rx_replenish_task);
15335e3dd157SKalle Valo }
15345e3dd157SKalle Valo 
15355e3dd157SKalle Valo static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
15365e3dd157SKalle Valo 				       struct htt_rx_fragment_indication *frag)
15375e3dd157SKalle Valo {
15387aa7a72aSMichal Kazior 	struct ath10k *ar = htt->ar;
15396df92a3dSJanusz Dziedzic 	struct ieee80211_rx_status *rx_status = &htt->rx_status;
15409aa505d2SMichal Kazior 	struct sk_buff_head amsdu;
1541d84dd60fSJanusz Dziedzic 	int ret;
15425e3dd157SKalle Valo 	u8 *fw_desc;
1543581c25f8SMichal Kazior 	int fw_desc_len;
15445e3dd157SKalle Valo 
15455e3dd157SKalle Valo 	fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
15465e3dd157SKalle Valo 	fw_desc = (u8 *)frag->fw_msdu_rx_desc;
15475e3dd157SKalle Valo 
15489aa505d2SMichal Kazior 	__skb_queue_head_init(&amsdu);
154945967089SMichal Kazior 
155045967089SMichal Kazior 	spin_lock_bh(&htt->rx_ring.lock);
1551d84dd60fSJanusz Dziedzic 	ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
1552f0e2770fSMichal Kazior 				      &amsdu);
155345967089SMichal Kazior 	spin_unlock_bh(&htt->rx_ring.lock);
15545e3dd157SKalle Valo 
1555686687c9SMichal Kazior 	tasklet_schedule(&htt->rx_replenish_task);
1556686687c9SMichal Kazior 
15577aa7a72aSMichal Kazior 	ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
15585e3dd157SKalle Valo 
1559d84dd60fSJanusz Dziedzic 	if (ret) {
15607aa7a72aSMichal Kazior 		ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n",
1561d84dd60fSJanusz Dziedzic 			    ret);
15629aa505d2SMichal Kazior 		__skb_queue_purge(&amsdu);
15635e3dd157SKalle Valo 		return;
15645e3dd157SKalle Valo 	}
15655e3dd157SKalle Valo 
15669aa505d2SMichal Kazior 	if (skb_queue_len(&amsdu) != 1) {
15679aa505d2SMichal Kazior 		ath10k_warn(ar, "failed to pop frag amsdu: too many msdus\n");
15689aa505d2SMichal Kazior 		__skb_queue_purge(&amsdu);
15699aa505d2SMichal Kazior 		return;
15709aa505d2SMichal Kazior 	}
15719aa505d2SMichal Kazior 
157289a5a317SMichal Kazior 	ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status);
1573581c25f8SMichal Kazior 	ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
1574581c25f8SMichal Kazior 	ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
1575581c25f8SMichal Kazior 	ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
15765e3dd157SKalle Valo 
15775e3dd157SKalle Valo 	if (fw_desc_len > 0) {
15787aa7a72aSMichal Kazior 		ath10k_dbg(ar, ATH10K_DBG_HTT,
15795e3dd157SKalle Valo 			   "expecting more fragmented rx in one indication %d\n",
15805e3dd157SKalle Valo 			   fw_desc_len);
15815e3dd157SKalle Valo 	}
15825e3dd157SKalle Valo }
15835e3dd157SKalle Valo 
15846c5151a9SMichal Kazior static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
15856c5151a9SMichal Kazior 				       struct sk_buff *skb)
15866c5151a9SMichal Kazior {
15876c5151a9SMichal Kazior 	struct ath10k_htt *htt = &ar->htt;
15886c5151a9SMichal Kazior 	struct htt_resp *resp = (struct htt_resp *)skb->data;
15896c5151a9SMichal Kazior 	struct htt_tx_done tx_done = {};
15906c5151a9SMichal Kazior 	int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
15916c5151a9SMichal Kazior 	__le16 msdu_id;
15926c5151a9SMichal Kazior 	int i;
15936c5151a9SMichal Kazior 
159445967089SMichal Kazior 	lockdep_assert_held(&htt->tx_lock);
159545967089SMichal Kazior 
15966c5151a9SMichal Kazior 	switch (status) {
15976c5151a9SMichal Kazior 	case HTT_DATA_TX_STATUS_NO_ACK:
15986c5151a9SMichal Kazior 		tx_done.no_ack = true;
15996c5151a9SMichal Kazior 		break;
16006c5151a9SMichal Kazior 	case HTT_DATA_TX_STATUS_OK:
16016c5151a9SMichal Kazior 		break;
16026c5151a9SMichal Kazior 	case HTT_DATA_TX_STATUS_DISCARD:
16036c5151a9SMichal Kazior 	case HTT_DATA_TX_STATUS_POSTPONE:
16046c5151a9SMichal Kazior 	case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
16056c5151a9SMichal Kazior 		tx_done.discard = true;
16066c5151a9SMichal Kazior 		break;
16076c5151a9SMichal Kazior 	default:
16087aa7a72aSMichal Kazior 		ath10k_warn(ar, "unhandled tx completion status %d\n", status);
16096c5151a9SMichal Kazior 		tx_done.discard = true;
16106c5151a9SMichal Kazior 		break;
16116c5151a9SMichal Kazior 	}
16126c5151a9SMichal Kazior 
16137aa7a72aSMichal Kazior 	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
16146c5151a9SMichal Kazior 		   resp->data_tx_completion.num_msdus);
16156c5151a9SMichal Kazior 
16166c5151a9SMichal Kazior 	for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
16176c5151a9SMichal Kazior 		msdu_id = resp->data_tx_completion.msdus[i];
16186c5151a9SMichal Kazior 		tx_done.msdu_id = __le16_to_cpu(msdu_id);
16196c5151a9SMichal Kazior 		ath10k_txrx_tx_unref(htt, &tx_done);
16206c5151a9SMichal Kazior 	}
16216c5151a9SMichal Kazior }
16226c5151a9SMichal Kazior 
1623aa5b4fbcSMichal Kazior static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
1624aa5b4fbcSMichal Kazior {
1625aa5b4fbcSMichal Kazior 	struct htt_rx_addba *ev = &resp->rx_addba;
1626aa5b4fbcSMichal Kazior 	struct ath10k_peer *peer;
1627aa5b4fbcSMichal Kazior 	struct ath10k_vif *arvif;
1628aa5b4fbcSMichal Kazior 	u16 info0, tid, peer_id;
1629aa5b4fbcSMichal Kazior 
1630aa5b4fbcSMichal Kazior 	info0 = __le16_to_cpu(ev->info0);
1631aa5b4fbcSMichal Kazior 	tid = MS(info0, HTT_RX_BA_INFO0_TID);
1632aa5b4fbcSMichal Kazior 	peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1633aa5b4fbcSMichal Kazior 
16347aa7a72aSMichal Kazior 	ath10k_dbg(ar, ATH10K_DBG_HTT,
1635aa5b4fbcSMichal Kazior 		   "htt rx addba tid %hu peer_id %hu size %hhu\n",
1636aa5b4fbcSMichal Kazior 		   tid, peer_id, ev->window_size);
1637aa5b4fbcSMichal Kazior 
1638aa5b4fbcSMichal Kazior 	spin_lock_bh(&ar->data_lock);
1639aa5b4fbcSMichal Kazior 	peer = ath10k_peer_find_by_id(ar, peer_id);
1640aa5b4fbcSMichal Kazior 	if (!peer) {
16417aa7a72aSMichal Kazior 		ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
1642aa5b4fbcSMichal Kazior 			    peer_id);
1643aa5b4fbcSMichal Kazior 		spin_unlock_bh(&ar->data_lock);
1644aa5b4fbcSMichal Kazior 		return;
1645aa5b4fbcSMichal Kazior 	}
1646aa5b4fbcSMichal Kazior 
1647aa5b4fbcSMichal Kazior 	arvif = ath10k_get_arvif(ar, peer->vdev_id);
1648aa5b4fbcSMichal Kazior 	if (!arvif) {
16497aa7a72aSMichal Kazior 		ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
1650aa5b4fbcSMichal Kazior 			    peer->vdev_id);
1651aa5b4fbcSMichal Kazior 		spin_unlock_bh(&ar->data_lock);
1652aa5b4fbcSMichal Kazior 		return;
1653aa5b4fbcSMichal Kazior 	}
1654aa5b4fbcSMichal Kazior 
16557aa7a72aSMichal Kazior 	ath10k_dbg(ar, ATH10K_DBG_HTT,
1656aa5b4fbcSMichal Kazior 		   "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
1657aa5b4fbcSMichal Kazior 		   peer->addr, tid, ev->window_size);
1658aa5b4fbcSMichal Kazior 
1659aa5b4fbcSMichal Kazior 	ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1660aa5b4fbcSMichal Kazior 	spin_unlock_bh(&ar->data_lock);
1661aa5b4fbcSMichal Kazior }
1662aa5b4fbcSMichal Kazior 
1663aa5b4fbcSMichal Kazior static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
1664aa5b4fbcSMichal Kazior {
1665aa5b4fbcSMichal Kazior 	struct htt_rx_delba *ev = &resp->rx_delba;
1666aa5b4fbcSMichal Kazior 	struct ath10k_peer *peer;
1667aa5b4fbcSMichal Kazior 	struct ath10k_vif *arvif;
1668aa5b4fbcSMichal Kazior 	u16 info0, tid, peer_id;
1669aa5b4fbcSMichal Kazior 
1670aa5b4fbcSMichal Kazior 	info0 = __le16_to_cpu(ev->info0);
1671aa5b4fbcSMichal Kazior 	tid = MS(info0, HTT_RX_BA_INFO0_TID);
1672aa5b4fbcSMichal Kazior 	peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1673aa5b4fbcSMichal Kazior 
16747aa7a72aSMichal Kazior 	ath10k_dbg(ar, ATH10K_DBG_HTT,
1675aa5b4fbcSMichal Kazior 		   "htt rx delba tid %hu peer_id %hu\n",
1676aa5b4fbcSMichal Kazior 		   tid, peer_id);
1677aa5b4fbcSMichal Kazior 
1678aa5b4fbcSMichal Kazior 	spin_lock_bh(&ar->data_lock);
1679aa5b4fbcSMichal Kazior 	peer = ath10k_peer_find_by_id(ar, peer_id);
1680aa5b4fbcSMichal Kazior 	if (!peer) {
16817aa7a72aSMichal Kazior 		ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
1682aa5b4fbcSMichal Kazior 			    peer_id);
1683aa5b4fbcSMichal Kazior 		spin_unlock_bh(&ar->data_lock);
1684aa5b4fbcSMichal Kazior 		return;
1685aa5b4fbcSMichal Kazior 	}
1686aa5b4fbcSMichal Kazior 
1687aa5b4fbcSMichal Kazior 	arvif = ath10k_get_arvif(ar, peer->vdev_id);
1688aa5b4fbcSMichal Kazior 	if (!arvif) {
16897aa7a72aSMichal Kazior 		ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
1690aa5b4fbcSMichal Kazior 			    peer->vdev_id);
1691aa5b4fbcSMichal Kazior 		spin_unlock_bh(&ar->data_lock);
1692aa5b4fbcSMichal Kazior 		return;
1693aa5b4fbcSMichal Kazior 	}
1694aa5b4fbcSMichal Kazior 
16957aa7a72aSMichal Kazior 	ath10k_dbg(ar, ATH10K_DBG_HTT,
1696aa5b4fbcSMichal Kazior 		   "htt rx stop rx ba session sta %pM tid %hu\n",
1697aa5b4fbcSMichal Kazior 		   peer->addr, tid);
1698aa5b4fbcSMichal Kazior 
1699aa5b4fbcSMichal Kazior 	ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1700aa5b4fbcSMichal Kazior 	spin_unlock_bh(&ar->data_lock);
1701aa5b4fbcSMichal Kazior }
1702aa5b4fbcSMichal Kazior 
1703c545070eSMichal Kazior static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
1704c545070eSMichal Kazior 				       struct sk_buff_head *amsdu)
1705c545070eSMichal Kazior {
1706c545070eSMichal Kazior 	struct sk_buff *msdu;
1707c545070eSMichal Kazior 	struct htt_rx_desc *rxd;
1708c545070eSMichal Kazior 
1709c545070eSMichal Kazior 	if (skb_queue_empty(list))
1710c545070eSMichal Kazior 		return -ENOBUFS;
1711c545070eSMichal Kazior 
1712c545070eSMichal Kazior 	if (WARN_ON(!skb_queue_empty(amsdu)))
1713c545070eSMichal Kazior 		return -EINVAL;
1714c545070eSMichal Kazior 
1715c545070eSMichal Kazior 	while ((msdu = __skb_dequeue(list))) {
1716c545070eSMichal Kazior 		__skb_queue_tail(amsdu, msdu);
1717c545070eSMichal Kazior 
1718c545070eSMichal Kazior 		rxd = (void *)msdu->data - sizeof(*rxd);
1719c545070eSMichal Kazior 		if (rxd->msdu_end.info0 &
1720c545070eSMichal Kazior 		    __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
1721c545070eSMichal Kazior 			break;
1722c545070eSMichal Kazior 	}
1723c545070eSMichal Kazior 
1724c545070eSMichal Kazior 	msdu = skb_peek_tail(amsdu);
1725c545070eSMichal Kazior 	rxd = (void *)msdu->data - sizeof(*rxd);
1726c545070eSMichal Kazior 	if (!(rxd->msdu_end.info0 &
1727c545070eSMichal Kazior 	      __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
1728c545070eSMichal Kazior 		skb_queue_splice_init(amsdu, list);
1729c545070eSMichal Kazior 		return -EAGAIN;
1730c545070eSMichal Kazior 	}
1731c545070eSMichal Kazior 
1732c545070eSMichal Kazior 	return 0;
1733c545070eSMichal Kazior }
1734c545070eSMichal Kazior 
1735c545070eSMichal Kazior static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
1736c545070eSMichal Kazior 					    struct sk_buff *skb)
1737c545070eSMichal Kazior {
1738c545070eSMichal Kazior 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1739c545070eSMichal Kazior 
1740c545070eSMichal Kazior 	if (!ieee80211_has_protected(hdr->frame_control))
1741c545070eSMichal Kazior 		return;
1742c545070eSMichal Kazior 
1743c545070eSMichal Kazior 	/* Offloaded frames are already decrypted but firmware insists they are
1744c545070eSMichal Kazior 	 * protected in the 802.11 header. Strip the flag.  Otherwise mac80211
1745c545070eSMichal Kazior 	 * will drop the frame.
1746c545070eSMichal Kazior 	 */
1747c545070eSMichal Kazior 
1748c545070eSMichal Kazior 	hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1749c545070eSMichal Kazior 	status->flag |= RX_FLAG_DECRYPTED |
1750c545070eSMichal Kazior 			RX_FLAG_IV_STRIPPED |
1751c545070eSMichal Kazior 			RX_FLAG_MMIC_STRIPPED;
1752c545070eSMichal Kazior }
1753c545070eSMichal Kazior 
1754c545070eSMichal Kazior static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
1755c545070eSMichal Kazior 				       struct sk_buff_head *list)
1756c545070eSMichal Kazior {
1757c545070eSMichal Kazior 	struct ath10k_htt *htt = &ar->htt;
1758c545070eSMichal Kazior 	struct ieee80211_rx_status *status = &htt->rx_status;
1759c545070eSMichal Kazior 	struct htt_rx_offload_msdu *rx;
1760c545070eSMichal Kazior 	struct sk_buff *msdu;
1761c545070eSMichal Kazior 	size_t offset;
1762c545070eSMichal Kazior 
1763c545070eSMichal Kazior 	while ((msdu = __skb_dequeue(list))) {
1764c545070eSMichal Kazior 		/* Offloaded frames don't have Rx descriptor. Instead they have
1765c545070eSMichal Kazior 		 * a short meta information header.
1766c545070eSMichal Kazior 		 */
1767c545070eSMichal Kazior 
1768c545070eSMichal Kazior 		rx = (void *)msdu->data;
1769c545070eSMichal Kazior 
1770c545070eSMichal Kazior 		skb_put(msdu, sizeof(*rx));
1771c545070eSMichal Kazior 		skb_pull(msdu, sizeof(*rx));
1772c545070eSMichal Kazior 
1773c545070eSMichal Kazior 		if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
1774c545070eSMichal Kazior 			ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
1775c545070eSMichal Kazior 			dev_kfree_skb_any(msdu);
1776c545070eSMichal Kazior 			continue;
1777c545070eSMichal Kazior 		}
1778c545070eSMichal Kazior 
1779c545070eSMichal Kazior 		skb_put(msdu, __le16_to_cpu(rx->msdu_len));
1780c545070eSMichal Kazior 
1781c545070eSMichal Kazior 		/* Offloaded rx header length isn't multiple of 2 nor 4 so the
1782c545070eSMichal Kazior 		 * actual payload is unaligned. Align the frame.  Otherwise
1783c545070eSMichal Kazior 		 * mac80211 complains.  This shouldn't reduce performance much
1784c545070eSMichal Kazior 		 * because these offloaded frames are rare.
1785c545070eSMichal Kazior 		 */
1786c545070eSMichal Kazior 		offset = 4 - ((unsigned long)msdu->data & 3);
1787c545070eSMichal Kazior 		skb_put(msdu, offset);
1788c545070eSMichal Kazior 		memmove(msdu->data + offset, msdu->data, msdu->len);
1789c545070eSMichal Kazior 		skb_pull(msdu, offset);
1790c545070eSMichal Kazior 
1791c545070eSMichal Kazior 		/* FIXME: The frame is NWifi. Re-construct QoS Control
1792c545070eSMichal Kazior 		 * if possible later.
1793c545070eSMichal Kazior 		 */
1794c545070eSMichal Kazior 
1795c545070eSMichal Kazior 		memset(status, 0, sizeof(*status));
1796c545070eSMichal Kazior 		status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1797c545070eSMichal Kazior 
1798c545070eSMichal Kazior 		ath10k_htt_rx_h_rx_offload_prot(status, msdu);
1799c545070eSMichal Kazior 		ath10k_htt_rx_h_channel(ar, status);
1800c545070eSMichal Kazior 		ath10k_process_rx(ar, status, msdu);
1801c545070eSMichal Kazior 	}
1802c545070eSMichal Kazior }
1803c545070eSMichal Kazior 
1804c545070eSMichal Kazior static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
1805c545070eSMichal Kazior {
1806c545070eSMichal Kazior 	struct ath10k_htt *htt = &ar->htt;
1807c545070eSMichal Kazior 	struct htt_resp *resp = (void *)skb->data;
1808c545070eSMichal Kazior 	struct ieee80211_rx_status *status = &htt->rx_status;
1809c545070eSMichal Kazior 	struct sk_buff_head list;
1810c545070eSMichal Kazior 	struct sk_buff_head amsdu;
1811c545070eSMichal Kazior 	u16 peer_id;
1812c545070eSMichal Kazior 	u16 msdu_count;
1813c545070eSMichal Kazior 	u8 vdev_id;
1814c545070eSMichal Kazior 	u8 tid;
1815c545070eSMichal Kazior 	bool offload;
1816c545070eSMichal Kazior 	bool frag;
1817c545070eSMichal Kazior 	int ret;
1818c545070eSMichal Kazior 
1819c545070eSMichal Kazior 	lockdep_assert_held(&htt->rx_ring.lock);
1820c545070eSMichal Kazior 
1821c545070eSMichal Kazior 	if (htt->rx_confused)
1822c545070eSMichal Kazior 		return;
1823c545070eSMichal Kazior 
1824c545070eSMichal Kazior 	skb_pull(skb, sizeof(resp->hdr));
1825c545070eSMichal Kazior 	skb_pull(skb, sizeof(resp->rx_in_ord_ind));
1826c545070eSMichal Kazior 
1827c545070eSMichal Kazior 	peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
1828c545070eSMichal Kazior 	msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
1829c545070eSMichal Kazior 	vdev_id = resp->rx_in_ord_ind.vdev_id;
1830c545070eSMichal Kazior 	tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
1831c545070eSMichal Kazior 	offload = !!(resp->rx_in_ord_ind.info &
1832c545070eSMichal Kazior 			HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
1833c545070eSMichal Kazior 	frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
1834c545070eSMichal Kazior 
1835c545070eSMichal Kazior 	ath10k_dbg(ar, ATH10K_DBG_HTT,
1836c545070eSMichal Kazior 		   "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
1837c545070eSMichal Kazior 		   vdev_id, peer_id, tid, offload, frag, msdu_count);
1838c545070eSMichal Kazior 
1839c545070eSMichal Kazior 	if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
1840c545070eSMichal Kazior 		ath10k_warn(ar, "dropping invalid in order rx indication\n");
1841c545070eSMichal Kazior 		return;
1842c545070eSMichal Kazior 	}
1843c545070eSMichal Kazior 
1844c545070eSMichal Kazior 	/* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
1845c545070eSMichal Kazior 	 * extracted and processed.
1846c545070eSMichal Kazior 	 */
1847c545070eSMichal Kazior 	__skb_queue_head_init(&list);
1848c545070eSMichal Kazior 	ret = ath10k_htt_rx_pop_paddr_list(htt, &resp->rx_in_ord_ind, &list);
1849c545070eSMichal Kazior 	if (ret < 0) {
1850c545070eSMichal Kazior 		ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
1851c545070eSMichal Kazior 		htt->rx_confused = true;
1852c545070eSMichal Kazior 		return;
1853c545070eSMichal Kazior 	}
1854c545070eSMichal Kazior 
1855c545070eSMichal Kazior 	/* Offloaded frames are very different and need to be handled
1856c545070eSMichal Kazior 	 * separately.
1857c545070eSMichal Kazior 	 */
1858c545070eSMichal Kazior 	if (offload)
1859c545070eSMichal Kazior 		ath10k_htt_rx_h_rx_offload(ar, &list);
1860c545070eSMichal Kazior 
1861c545070eSMichal Kazior 	while (!skb_queue_empty(&list)) {
1862c545070eSMichal Kazior 		__skb_queue_head_init(&amsdu);
1863c545070eSMichal Kazior 		ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
1864c545070eSMichal Kazior 		switch (ret) {
1865c545070eSMichal Kazior 		case 0:
1866c545070eSMichal Kazior 			/* Note: The in-order indication may report interleaved
1867c545070eSMichal Kazior 			 * frames from different PPDUs meaning reported rx rate
1868c545070eSMichal Kazior 			 * to mac80211 isn't accurate/reliable. It's still
1869c545070eSMichal Kazior 			 * better to report something than nothing though. This
1870c545070eSMichal Kazior 			 * should still give an idea about rx rate to the user.
1871c545070eSMichal Kazior 			 */
1872c545070eSMichal Kazior 			ath10k_htt_rx_h_ppdu(ar, &amsdu, status);
1873c545070eSMichal Kazior 			ath10k_htt_rx_h_filter(ar, &amsdu, status);
1874c545070eSMichal Kazior 			ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
1875c545070eSMichal Kazior 			ath10k_htt_rx_h_deliver(ar, &amsdu, status);
1876c545070eSMichal Kazior 			break;
1877c545070eSMichal Kazior 		case -EAGAIN:
1878c545070eSMichal Kazior 			/* fall through */
1879c545070eSMichal Kazior 		default:
1880c545070eSMichal Kazior 			/* Should not happen. */
1881c545070eSMichal Kazior 			ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
1882c545070eSMichal Kazior 			htt->rx_confused = true;
1883c545070eSMichal Kazior 			__skb_queue_purge(&list);
1884c545070eSMichal Kazior 			return;
1885c545070eSMichal Kazior 		}
1886c545070eSMichal Kazior 	}
1887c545070eSMichal Kazior 
1888c545070eSMichal Kazior 	tasklet_schedule(&htt->rx_replenish_task);
1889c545070eSMichal Kazior }
1890c545070eSMichal Kazior 
18915e3dd157SKalle Valo void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
18925e3dd157SKalle Valo {
1893edb8236dSMichal Kazior 	struct ath10k_htt *htt = &ar->htt;
18945e3dd157SKalle Valo 	struct htt_resp *resp = (struct htt_resp *)skb->data;
18955e3dd157SKalle Valo 
18965e3dd157SKalle Valo 	/* confirm alignment */
18975e3dd157SKalle Valo 	if (!IS_ALIGNED((unsigned long)skb->data, 4))
18987aa7a72aSMichal Kazior 		ath10k_warn(ar, "unaligned htt message, expect trouble\n");
18995e3dd157SKalle Valo 
19007aa7a72aSMichal Kazior 	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
19015e3dd157SKalle Valo 		   resp->hdr.msg_type);
19025e3dd157SKalle Valo 	switch (resp->hdr.msg_type) {
19035e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_VERSION_CONF: {
19045e3dd157SKalle Valo 		htt->target_version_major = resp->ver_resp.major;
19055e3dd157SKalle Valo 		htt->target_version_minor = resp->ver_resp.minor;
19065e3dd157SKalle Valo 		complete(&htt->target_version_received);
19075e3dd157SKalle Valo 		break;
19085e3dd157SKalle Valo 	}
19096c5151a9SMichal Kazior 	case HTT_T2H_MSG_TYPE_RX_IND:
191045967089SMichal Kazior 		spin_lock_bh(&htt->rx_ring.lock);
191145967089SMichal Kazior 		__skb_queue_tail(&htt->rx_compl_q, skb);
191245967089SMichal Kazior 		spin_unlock_bh(&htt->rx_ring.lock);
19136c5151a9SMichal Kazior 		tasklet_schedule(&htt->txrx_compl_task);
19146c5151a9SMichal Kazior 		return;
19155e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_PEER_MAP: {
19165e3dd157SKalle Valo 		struct htt_peer_map_event ev = {
19175e3dd157SKalle Valo 			.vdev_id = resp->peer_map.vdev_id,
19185e3dd157SKalle Valo 			.peer_id = __le16_to_cpu(resp->peer_map.peer_id),
19195e3dd157SKalle Valo 		};
19205e3dd157SKalle Valo 		memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
19215e3dd157SKalle Valo 		ath10k_peer_map_event(htt, &ev);
19225e3dd157SKalle Valo 		break;
19235e3dd157SKalle Valo 	}
19245e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
19255e3dd157SKalle Valo 		struct htt_peer_unmap_event ev = {
19265e3dd157SKalle Valo 			.peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
19275e3dd157SKalle Valo 		};
19285e3dd157SKalle Valo 		ath10k_peer_unmap_event(htt, &ev);
19295e3dd157SKalle Valo 		break;
19305e3dd157SKalle Valo 	}
19315e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
19325e3dd157SKalle Valo 		struct htt_tx_done tx_done = {};
19335e3dd157SKalle Valo 		int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
19345e3dd157SKalle Valo 
19355e3dd157SKalle Valo 		tx_done.msdu_id =
19365e3dd157SKalle Valo 			__le32_to_cpu(resp->mgmt_tx_completion.desc_id);
19375e3dd157SKalle Valo 
19385e3dd157SKalle Valo 		switch (status) {
19395e3dd157SKalle Valo 		case HTT_MGMT_TX_STATUS_OK:
19405e3dd157SKalle Valo 			break;
19415e3dd157SKalle Valo 		case HTT_MGMT_TX_STATUS_RETRY:
19425e3dd157SKalle Valo 			tx_done.no_ack = true;
19435e3dd157SKalle Valo 			break;
19445e3dd157SKalle Valo 		case HTT_MGMT_TX_STATUS_DROP:
19455e3dd157SKalle Valo 			tx_done.discard = true;
19465e3dd157SKalle Valo 			break;
19475e3dd157SKalle Valo 		}
19485e3dd157SKalle Valo 
19496c5151a9SMichal Kazior 		spin_lock_bh(&htt->tx_lock);
19500a89f8a0SMichal Kazior 		ath10k_txrx_tx_unref(htt, &tx_done);
19516c5151a9SMichal Kazior 		spin_unlock_bh(&htt->tx_lock);
19525e3dd157SKalle Valo 		break;
19535e3dd157SKalle Valo 	}
19546c5151a9SMichal Kazior 	case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
19556c5151a9SMichal Kazior 		spin_lock_bh(&htt->tx_lock);
19566c5151a9SMichal Kazior 		__skb_queue_tail(&htt->tx_compl_q, skb);
19576c5151a9SMichal Kazior 		spin_unlock_bh(&htt->tx_lock);
19586c5151a9SMichal Kazior 		tasklet_schedule(&htt->txrx_compl_task);
19596c5151a9SMichal Kazior 		return;
19605e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_SEC_IND: {
19615e3dd157SKalle Valo 		struct ath10k *ar = htt->ar;
19625e3dd157SKalle Valo 		struct htt_security_indication *ev = &resp->security_indication;
19635e3dd157SKalle Valo 
19647aa7a72aSMichal Kazior 		ath10k_dbg(ar, ATH10K_DBG_HTT,
19655e3dd157SKalle Valo 			   "sec ind peer_id %d unicast %d type %d\n",
19665e3dd157SKalle Valo 			  __le16_to_cpu(ev->peer_id),
19675e3dd157SKalle Valo 			  !!(ev->flags & HTT_SECURITY_IS_UNICAST),
19685e3dd157SKalle Valo 			  MS(ev->flags, HTT_SECURITY_TYPE));
19695e3dd157SKalle Valo 		complete(&ar->install_key_done);
19705e3dd157SKalle Valo 		break;
19715e3dd157SKalle Valo 	}
19725e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
19737aa7a72aSMichal Kazior 		ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
19745e3dd157SKalle Valo 				skb->data, skb->len);
19755e3dd157SKalle Valo 		ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
19765e3dd157SKalle Valo 		break;
19775e3dd157SKalle Valo 	}
19785e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_TEST:
19795e3dd157SKalle Valo 		/* FIX THIS */
19805e3dd157SKalle Valo 		break;
19815e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_STATS_CONF:
1982d35a6c18SMichal Kazior 		trace_ath10k_htt_stats(ar, skb->data, skb->len);
1983a9bf0506SKalle Valo 		break;
1984a9bf0506SKalle Valo 	case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
1985708b9bdeSMichal Kazior 		/* Firmware can return tx frames if it's unable to fully
1986708b9bdeSMichal Kazior 		 * process them and suspects host may be able to fix it. ath10k
1987708b9bdeSMichal Kazior 		 * sends all tx frames as already inspected so this shouldn't
1988708b9bdeSMichal Kazior 		 * happen unless fw has a bug.
1989708b9bdeSMichal Kazior 		 */
19907aa7a72aSMichal Kazior 		ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
1991708b9bdeSMichal Kazior 		break;
19925e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
1993aa5b4fbcSMichal Kazior 		ath10k_htt_rx_addba(ar, resp);
1994aa5b4fbcSMichal Kazior 		break;
19955e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_RX_DELBA:
1996aa5b4fbcSMichal Kazior 		ath10k_htt_rx_delba(ar, resp);
1997aa5b4fbcSMichal Kazior 		break;
1998bfdd7937SRajkumar Manoharan 	case HTT_T2H_MSG_TYPE_PKTLOG: {
1999bfdd7937SRajkumar Manoharan 		struct ath10k_pktlog_hdr *hdr =
2000bfdd7937SRajkumar Manoharan 			(struct ath10k_pktlog_hdr *)resp->pktlog_msg.payload;
2001bfdd7937SRajkumar Manoharan 
2002bfdd7937SRajkumar Manoharan 		trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
2003bfdd7937SRajkumar Manoharan 					sizeof(*hdr) +
2004bfdd7937SRajkumar Manoharan 					__le16_to_cpu(hdr->size));
2005bfdd7937SRajkumar Manoharan 		break;
2006bfdd7937SRajkumar Manoharan 	}
2007aa5b4fbcSMichal Kazior 	case HTT_T2H_MSG_TYPE_RX_FLUSH: {
2008aa5b4fbcSMichal Kazior 		/* Ignore this event because mac80211 takes care of Rx
2009aa5b4fbcSMichal Kazior 		 * aggregation reordering.
2010aa5b4fbcSMichal Kazior 		 */
2011aa5b4fbcSMichal Kazior 		break;
2012aa5b4fbcSMichal Kazior 	}
2013c545070eSMichal Kazior 	case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
2014c545070eSMichal Kazior 		spin_lock_bh(&htt->rx_ring.lock);
2015c545070eSMichal Kazior 		__skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
2016c545070eSMichal Kazior 		spin_unlock_bh(&htt->rx_ring.lock);
2017c545070eSMichal Kazior 		tasklet_schedule(&htt->txrx_compl_task);
2018c545070eSMichal Kazior 		return;
2019c545070eSMichal Kazior 	}
2020c545070eSMichal Kazior 	case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
2021c545070eSMichal Kazior 		/* FIXME: This WMI-TLV event is overlapping with 10.2
2022c545070eSMichal Kazior 		 * CHAN_CHANGE - both being 0xF. Neither is being used in
2023c545070eSMichal Kazior 		 * practice so no immediate action is necessary. Nevertheless
2024c545070eSMichal Kazior 		 * HTT may need an abstraction layer like WMI has one day.
2025c545070eSMichal Kazior 		 */
2026c545070eSMichal Kazior 		break;
20275e3dd157SKalle Valo 	default:
20282358a544SMichal Kazior 		ath10k_warn(ar, "htt event (%d) not handled\n",
20295e3dd157SKalle Valo 			    resp->hdr.msg_type);
20307aa7a72aSMichal Kazior 		ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
20315e3dd157SKalle Valo 				skb->data, skb->len);
20325e3dd157SKalle Valo 		break;
20335e3dd157SKalle Valo 	};
20345e3dd157SKalle Valo 
20355e3dd157SKalle Valo 	/* Free the indication buffer */
20365e3dd157SKalle Valo 	dev_kfree_skb_any(skb);
20375e3dd157SKalle Valo }
20386c5151a9SMichal Kazior 
20396c5151a9SMichal Kazior static void ath10k_htt_txrx_compl_task(unsigned long ptr)
20406c5151a9SMichal Kazior {
20416c5151a9SMichal Kazior 	struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
2042c545070eSMichal Kazior 	struct ath10k *ar = htt->ar;
20436c5151a9SMichal Kazior 	struct htt_resp *resp;
20446c5151a9SMichal Kazior 	struct sk_buff *skb;
20456c5151a9SMichal Kazior 
204645967089SMichal Kazior 	spin_lock_bh(&htt->tx_lock);
204745967089SMichal Kazior 	while ((skb = __skb_dequeue(&htt->tx_compl_q))) {
20486c5151a9SMichal Kazior 		ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
20496c5151a9SMichal Kazior 		dev_kfree_skb_any(skb);
20506c5151a9SMichal Kazior 	}
205145967089SMichal Kazior 	spin_unlock_bh(&htt->tx_lock);
20526c5151a9SMichal Kazior 
205345967089SMichal Kazior 	spin_lock_bh(&htt->rx_ring.lock);
205445967089SMichal Kazior 	while ((skb = __skb_dequeue(&htt->rx_compl_q))) {
20556c5151a9SMichal Kazior 		resp = (struct htt_resp *)skb->data;
20566c5151a9SMichal Kazior 		ath10k_htt_rx_handler(htt, &resp->rx_ind);
20576c5151a9SMichal Kazior 		dev_kfree_skb_any(skb);
20586c5151a9SMichal Kazior 	}
2059c545070eSMichal Kazior 
2060c545070eSMichal Kazior 	while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) {
2061c545070eSMichal Kazior 		ath10k_htt_rx_in_ord_ind(ar, skb);
2062c545070eSMichal Kazior 		dev_kfree_skb_any(skb);
2063c545070eSMichal Kazior 	}
206445967089SMichal Kazior 	spin_unlock_bh(&htt->rx_ring.lock);
20656c5151a9SMichal Kazior }
2066