15e3dd157SKalle Valo /*
25e3dd157SKalle Valo  * Copyright (c) 2005-2011 Atheros Communications Inc.
35e3dd157SKalle Valo  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
45e3dd157SKalle Valo  *
55e3dd157SKalle Valo  * Permission to use, copy, modify, and/or distribute this software for any
65e3dd157SKalle Valo  * purpose with or without fee is hereby granted, provided that the above
75e3dd157SKalle Valo  * copyright notice and this permission notice appear in all copies.
85e3dd157SKalle Valo  *
95e3dd157SKalle Valo  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
105e3dd157SKalle Valo  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
115e3dd157SKalle Valo  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
125e3dd157SKalle Valo  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
135e3dd157SKalle Valo  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
145e3dd157SKalle Valo  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
155e3dd157SKalle Valo  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
165e3dd157SKalle Valo  */
175e3dd157SKalle Valo 
18edb8236dSMichal Kazior #include "core.h"
195e3dd157SKalle Valo #include "htc.h"
205e3dd157SKalle Valo #include "htt.h"
215e3dd157SKalle Valo #include "txrx.h"
225e3dd157SKalle Valo #include "debug.h"
23a9bf0506SKalle Valo #include "trace.h"
245e3dd157SKalle Valo 
255e3dd157SKalle Valo #include <linux/log2.h>
265e3dd157SKalle Valo 
275e3dd157SKalle Valo /* slightly larger than one large A-MPDU */
285e3dd157SKalle Valo #define HTT_RX_RING_SIZE_MIN 128
295e3dd157SKalle Valo 
305e3dd157SKalle Valo /* roughly 20 ms @ 1 Gbps of 1500B MSDUs */
315e3dd157SKalle Valo #define HTT_RX_RING_SIZE_MAX 2048
325e3dd157SKalle Valo 
335e3dd157SKalle Valo #define HTT_RX_AVG_FRM_BYTES 1000
345e3dd157SKalle Valo 
355e3dd157SKalle Valo /* ms, very conservative */
365e3dd157SKalle Valo #define HTT_RX_HOST_LATENCY_MAX_MS 20
375e3dd157SKalle Valo 
385e3dd157SKalle Valo /* ms, conservative */
395e3dd157SKalle Valo #define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10
405e3dd157SKalle Valo 
415e3dd157SKalle Valo /* when under memory pressure rx ring refill may fail and needs a retry */
425e3dd157SKalle Valo #define HTT_RX_RING_REFILL_RETRY_MS 50
435e3dd157SKalle Valo 
44f6dc2095SMichal Kazior 
45f6dc2095SMichal Kazior static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
466c5151a9SMichal Kazior static void ath10k_htt_txrx_compl_task(unsigned long ptr);
47f6dc2095SMichal Kazior 
485e3dd157SKalle Valo static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
495e3dd157SKalle Valo {
505e3dd157SKalle Valo 	int size;
515e3dd157SKalle Valo 
525e3dd157SKalle Valo 	/*
535e3dd157SKalle Valo 	 * It is expected that the host CPU will typically be able to
545e3dd157SKalle Valo 	 * service the rx indication from one A-MPDU before the rx
555e3dd157SKalle Valo 	 * indication from the subsequent A-MPDU happens, roughly 1-2 ms
565e3dd157SKalle Valo 	 * later. However, the rx ring should be sized very conservatively,
575e3dd157SKalle Valo 	 * to accomodate the worst reasonable delay before the host CPU
585e3dd157SKalle Valo 	 * services a rx indication interrupt.
595e3dd157SKalle Valo 	 *
605e3dd157SKalle Valo 	 * The rx ring need not be kept full of empty buffers. In theory,
615e3dd157SKalle Valo 	 * the htt host SW can dynamically track the low-water mark in the
625e3dd157SKalle Valo 	 * rx ring, and dynamically adjust the level to which the rx ring
635e3dd157SKalle Valo 	 * is filled with empty buffers, to dynamically meet the desired
645e3dd157SKalle Valo 	 * low-water mark.
655e3dd157SKalle Valo 	 *
665e3dd157SKalle Valo 	 * In contrast, it's difficult to resize the rx ring itself, once
675e3dd157SKalle Valo 	 * it's in use. Thus, the ring itself should be sized very
685e3dd157SKalle Valo 	 * conservatively, while the degree to which the ring is filled
695e3dd157SKalle Valo 	 * with empty buffers should be sized moderately conservatively.
705e3dd157SKalle Valo 	 */
715e3dd157SKalle Valo 
725e3dd157SKalle Valo 	/* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
735e3dd157SKalle Valo 	size =
745e3dd157SKalle Valo 	    htt->max_throughput_mbps +
755e3dd157SKalle Valo 	    1000  /
765e3dd157SKalle Valo 	    (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS;
775e3dd157SKalle Valo 
785e3dd157SKalle Valo 	if (size < HTT_RX_RING_SIZE_MIN)
795e3dd157SKalle Valo 		size = HTT_RX_RING_SIZE_MIN;
805e3dd157SKalle Valo 
815e3dd157SKalle Valo 	if (size > HTT_RX_RING_SIZE_MAX)
825e3dd157SKalle Valo 		size = HTT_RX_RING_SIZE_MAX;
835e3dd157SKalle Valo 
845e3dd157SKalle Valo 	size = roundup_pow_of_two(size);
855e3dd157SKalle Valo 
865e3dd157SKalle Valo 	return size;
875e3dd157SKalle Valo }
885e3dd157SKalle Valo 
895e3dd157SKalle Valo static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt *htt)
905e3dd157SKalle Valo {
915e3dd157SKalle Valo 	int size;
925e3dd157SKalle Valo 
935e3dd157SKalle Valo 	/* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
945e3dd157SKalle Valo 	size =
955e3dd157SKalle Valo 	    htt->max_throughput_mbps *
965e3dd157SKalle Valo 	    1000  /
975e3dd157SKalle Valo 	    (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS;
985e3dd157SKalle Valo 
995e3dd157SKalle Valo 	/*
1005e3dd157SKalle Valo 	 * Make sure the fill level is at least 1 less than the ring size.
1015e3dd157SKalle Valo 	 * Leaving 1 element empty allows the SW to easily distinguish
1025e3dd157SKalle Valo 	 * between a full ring vs. an empty ring.
1035e3dd157SKalle Valo 	 */
1045e3dd157SKalle Valo 	if (size >= htt->rx_ring.size)
1055e3dd157SKalle Valo 		size = htt->rx_ring.size - 1;
1065e3dd157SKalle Valo 
1075e3dd157SKalle Valo 	return size;
1085e3dd157SKalle Valo }
1095e3dd157SKalle Valo 
1105e3dd157SKalle Valo static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
1115e3dd157SKalle Valo {
1125e3dd157SKalle Valo 	struct sk_buff *skb;
1135e3dd157SKalle Valo 	struct ath10k_skb_cb *cb;
1145e3dd157SKalle Valo 	int i;
1155e3dd157SKalle Valo 
1165e3dd157SKalle Valo 	for (i = 0; i < htt->rx_ring.fill_cnt; i++) {
1175e3dd157SKalle Valo 		skb = htt->rx_ring.netbufs_ring[i];
1185e3dd157SKalle Valo 		cb = ATH10K_SKB_CB(skb);
1195e3dd157SKalle Valo 		dma_unmap_single(htt->ar->dev, cb->paddr,
1205e3dd157SKalle Valo 				 skb->len + skb_tailroom(skb),
1215e3dd157SKalle Valo 				 DMA_FROM_DEVICE);
1225e3dd157SKalle Valo 		dev_kfree_skb_any(skb);
1235e3dd157SKalle Valo 	}
1245e3dd157SKalle Valo 
1255e3dd157SKalle Valo 	htt->rx_ring.fill_cnt = 0;
1265e3dd157SKalle Valo }
1275e3dd157SKalle Valo 
1285e3dd157SKalle Valo static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
1295e3dd157SKalle Valo {
1305e3dd157SKalle Valo 	struct htt_rx_desc *rx_desc;
1315e3dd157SKalle Valo 	struct sk_buff *skb;
1325e3dd157SKalle Valo 	dma_addr_t paddr;
1335e3dd157SKalle Valo 	int ret = 0, idx;
1345e3dd157SKalle Valo 
1355e3dd157SKalle Valo 	idx = __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr));
1365e3dd157SKalle Valo 	while (num > 0) {
1375e3dd157SKalle Valo 		skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
1385e3dd157SKalle Valo 		if (!skb) {
1395e3dd157SKalle Valo 			ret = -ENOMEM;
1405e3dd157SKalle Valo 			goto fail;
1415e3dd157SKalle Valo 		}
1425e3dd157SKalle Valo 
1435e3dd157SKalle Valo 		if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
1445e3dd157SKalle Valo 			skb_pull(skb,
1455e3dd157SKalle Valo 				 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
1465e3dd157SKalle Valo 				 skb->data);
1475e3dd157SKalle Valo 
1485e3dd157SKalle Valo 		/* Clear rx_desc attention word before posting to Rx ring */
1495e3dd157SKalle Valo 		rx_desc = (struct htt_rx_desc *)skb->data;
1505e3dd157SKalle Valo 		rx_desc->attention.flags = __cpu_to_le32(0);
1515e3dd157SKalle Valo 
1525e3dd157SKalle Valo 		paddr = dma_map_single(htt->ar->dev, skb->data,
1535e3dd157SKalle Valo 				       skb->len + skb_tailroom(skb),
1545e3dd157SKalle Valo 				       DMA_FROM_DEVICE);
1555e3dd157SKalle Valo 
1565e3dd157SKalle Valo 		if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
1575e3dd157SKalle Valo 			dev_kfree_skb_any(skb);
1585e3dd157SKalle Valo 			ret = -ENOMEM;
1595e3dd157SKalle Valo 			goto fail;
1605e3dd157SKalle Valo 		}
1615e3dd157SKalle Valo 
1625e3dd157SKalle Valo 		ATH10K_SKB_CB(skb)->paddr = paddr;
1635e3dd157SKalle Valo 		htt->rx_ring.netbufs_ring[idx] = skb;
1645e3dd157SKalle Valo 		htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
1655e3dd157SKalle Valo 		htt->rx_ring.fill_cnt++;
1665e3dd157SKalle Valo 
1675e3dd157SKalle Valo 		num--;
1685e3dd157SKalle Valo 		idx++;
1695e3dd157SKalle Valo 		idx &= htt->rx_ring.size_mask;
1705e3dd157SKalle Valo 	}
1715e3dd157SKalle Valo 
1725e3dd157SKalle Valo fail:
1735e3dd157SKalle Valo 	*(htt->rx_ring.alloc_idx.vaddr) = __cpu_to_le32(idx);
1745e3dd157SKalle Valo 	return ret;
1755e3dd157SKalle Valo }
1765e3dd157SKalle Valo 
1775e3dd157SKalle Valo static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
1785e3dd157SKalle Valo {
1795e3dd157SKalle Valo 	lockdep_assert_held(&htt->rx_ring.lock);
1805e3dd157SKalle Valo 	return __ath10k_htt_rx_ring_fill_n(htt, num);
1815e3dd157SKalle Valo }
1825e3dd157SKalle Valo 
1835e3dd157SKalle Valo static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
1845e3dd157SKalle Valo {
1856e712d42SMichal Kazior 	int ret, num_deficit, num_to_fill;
1865e3dd157SKalle Valo 
1876e712d42SMichal Kazior 	/* Refilling the whole RX ring buffer proves to be a bad idea. The
1886e712d42SMichal Kazior 	 * reason is RX may take up significant amount of CPU cycles and starve
1896e712d42SMichal Kazior 	 * other tasks, e.g. TX on an ethernet device while acting as a bridge
1906e712d42SMichal Kazior 	 * with ath10k wlan interface. This ended up with very poor performance
1916e712d42SMichal Kazior 	 * once CPU the host system was overwhelmed with RX on ath10k.
1926e712d42SMichal Kazior 	 *
1936e712d42SMichal Kazior 	 * By limiting the number of refills the replenishing occurs
1946e712d42SMichal Kazior 	 * progressively. This in turns makes use of the fact tasklets are
1956e712d42SMichal Kazior 	 * processed in FIFO order. This means actual RX processing can starve
1966e712d42SMichal Kazior 	 * out refilling. If there's not enough buffers on RX ring FW will not
1976e712d42SMichal Kazior 	 * report RX until it is refilled with enough buffers. This
1986e712d42SMichal Kazior 	 * automatically balances load wrt to CPU power.
1996e712d42SMichal Kazior 	 *
2006e712d42SMichal Kazior 	 * This probably comes at a cost of lower maximum throughput but
2016e712d42SMichal Kazior 	 * improves the avarage and stability. */
2025e3dd157SKalle Valo 	spin_lock_bh(&htt->rx_ring.lock);
2036e712d42SMichal Kazior 	num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
2046e712d42SMichal Kazior 	num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
2056e712d42SMichal Kazior 	num_deficit -= num_to_fill;
2065e3dd157SKalle Valo 	ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
2075e3dd157SKalle Valo 	if (ret == -ENOMEM) {
2085e3dd157SKalle Valo 		/*
2095e3dd157SKalle Valo 		 * Failed to fill it to the desired level -
2105e3dd157SKalle Valo 		 * we'll start a timer and try again next time.
2115e3dd157SKalle Valo 		 * As long as enough buffers are left in the ring for
2125e3dd157SKalle Valo 		 * another A-MPDU rx, no special recovery is needed.
2135e3dd157SKalle Valo 		 */
2145e3dd157SKalle Valo 		mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
2155e3dd157SKalle Valo 			  msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
2166e712d42SMichal Kazior 	} else if (num_deficit > 0) {
2176e712d42SMichal Kazior 		tasklet_schedule(&htt->rx_replenish_task);
2185e3dd157SKalle Valo 	}
2195e3dd157SKalle Valo 	spin_unlock_bh(&htt->rx_ring.lock);
2205e3dd157SKalle Valo }
2215e3dd157SKalle Valo 
2225e3dd157SKalle Valo static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
2235e3dd157SKalle Valo {
2245e3dd157SKalle Valo 	struct ath10k_htt *htt = (struct ath10k_htt *)arg;
2255e3dd157SKalle Valo 	ath10k_htt_rx_msdu_buff_replenish(htt);
2265e3dd157SKalle Valo }
2275e3dd157SKalle Valo 
2285e3dd157SKalle Valo static unsigned ath10k_htt_rx_ring_elems(struct ath10k_htt *htt)
2295e3dd157SKalle Valo {
2305e3dd157SKalle Valo 	return (__le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr) -
2315e3dd157SKalle Valo 		htt->rx_ring.sw_rd_idx.msdu_payld) & htt->rx_ring.size_mask;
2325e3dd157SKalle Valo }
2335e3dd157SKalle Valo 
2345e3dd157SKalle Valo void ath10k_htt_rx_detach(struct ath10k_htt *htt)
2355e3dd157SKalle Valo {
2365e3dd157SKalle Valo 	int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld;
2375e3dd157SKalle Valo 
2385e3dd157SKalle Valo 	del_timer_sync(&htt->rx_ring.refill_retry_timer);
2396e712d42SMichal Kazior 	tasklet_kill(&htt->rx_replenish_task);
2406c5151a9SMichal Kazior 	tasklet_kill(&htt->txrx_compl_task);
2416c5151a9SMichal Kazior 
2426c5151a9SMichal Kazior 	skb_queue_purge(&htt->tx_compl_q);
2436c5151a9SMichal Kazior 	skb_queue_purge(&htt->rx_compl_q);
2445e3dd157SKalle Valo 
2455e3dd157SKalle Valo 	while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) {
2465e3dd157SKalle Valo 		struct sk_buff *skb =
2475e3dd157SKalle Valo 				htt->rx_ring.netbufs_ring[sw_rd_idx];
2485e3dd157SKalle Valo 		struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
2495e3dd157SKalle Valo 
2505e3dd157SKalle Valo 		dma_unmap_single(htt->ar->dev, cb->paddr,
2515e3dd157SKalle Valo 				 skb->len + skb_tailroom(skb),
2525e3dd157SKalle Valo 				 DMA_FROM_DEVICE);
2535e3dd157SKalle Valo 		dev_kfree_skb_any(htt->rx_ring.netbufs_ring[sw_rd_idx]);
2545e3dd157SKalle Valo 		sw_rd_idx++;
2555e3dd157SKalle Valo 		sw_rd_idx &= htt->rx_ring.size_mask;
2565e3dd157SKalle Valo 	}
2575e3dd157SKalle Valo 
2585e3dd157SKalle Valo 	dma_free_coherent(htt->ar->dev,
2595e3dd157SKalle Valo 			  (htt->rx_ring.size *
2605e3dd157SKalle Valo 			   sizeof(htt->rx_ring.paddrs_ring)),
2615e3dd157SKalle Valo 			  htt->rx_ring.paddrs_ring,
2625e3dd157SKalle Valo 			  htt->rx_ring.base_paddr);
2635e3dd157SKalle Valo 
2645e3dd157SKalle Valo 	dma_free_coherent(htt->ar->dev,
2655e3dd157SKalle Valo 			  sizeof(*htt->rx_ring.alloc_idx.vaddr),
2665e3dd157SKalle Valo 			  htt->rx_ring.alloc_idx.vaddr,
2675e3dd157SKalle Valo 			  htt->rx_ring.alloc_idx.paddr);
2685e3dd157SKalle Valo 
2695e3dd157SKalle Valo 	kfree(htt->rx_ring.netbufs_ring);
2705e3dd157SKalle Valo }
2715e3dd157SKalle Valo 
2725e3dd157SKalle Valo static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
2735e3dd157SKalle Valo {
2745e3dd157SKalle Valo 	int idx;
2755e3dd157SKalle Valo 	struct sk_buff *msdu;
2765e3dd157SKalle Valo 
2775e3dd157SKalle Valo 	spin_lock_bh(&htt->rx_ring.lock);
2785e3dd157SKalle Valo 
2795e3dd157SKalle Valo 	if (ath10k_htt_rx_ring_elems(htt) == 0)
2805e3dd157SKalle Valo 		ath10k_warn("htt rx ring is empty!\n");
2815e3dd157SKalle Valo 
2825e3dd157SKalle Valo 	idx = htt->rx_ring.sw_rd_idx.msdu_payld;
2835e3dd157SKalle Valo 	msdu = htt->rx_ring.netbufs_ring[idx];
2845e3dd157SKalle Valo 
2855e3dd157SKalle Valo 	idx++;
2865e3dd157SKalle Valo 	idx &= htt->rx_ring.size_mask;
2875e3dd157SKalle Valo 	htt->rx_ring.sw_rd_idx.msdu_payld = idx;
2885e3dd157SKalle Valo 	htt->rx_ring.fill_cnt--;
2895e3dd157SKalle Valo 
2905e3dd157SKalle Valo 	spin_unlock_bh(&htt->rx_ring.lock);
2915e3dd157SKalle Valo 	return msdu;
2925e3dd157SKalle Valo }
2935e3dd157SKalle Valo 
2945e3dd157SKalle Valo static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb)
2955e3dd157SKalle Valo {
2965e3dd157SKalle Valo 	struct sk_buff *next;
2975e3dd157SKalle Valo 
2985e3dd157SKalle Valo 	while (skb) {
2995e3dd157SKalle Valo 		next = skb->next;
3005e3dd157SKalle Valo 		dev_kfree_skb_any(skb);
3015e3dd157SKalle Valo 		skb = next;
3025e3dd157SKalle Valo 	}
3035e3dd157SKalle Valo }
3045e3dd157SKalle Valo 
3055e3dd157SKalle Valo static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
3065e3dd157SKalle Valo 				   u8 **fw_desc, int *fw_desc_len,
3075e3dd157SKalle Valo 				   struct sk_buff **head_msdu,
3085e3dd157SKalle Valo 				   struct sk_buff **tail_msdu)
3095e3dd157SKalle Valo {
3105e3dd157SKalle Valo 	int msdu_len, msdu_chaining = 0;
3115e3dd157SKalle Valo 	struct sk_buff *msdu;
3125e3dd157SKalle Valo 	struct htt_rx_desc *rx_desc;
3135e3dd157SKalle Valo 
3145e3dd157SKalle Valo 	if (ath10k_htt_rx_ring_elems(htt) == 0)
3155e3dd157SKalle Valo 		ath10k_warn("htt rx ring is empty!\n");
3165e3dd157SKalle Valo 
3175e3dd157SKalle Valo 	if (htt->rx_confused) {
3185e3dd157SKalle Valo 		ath10k_warn("htt is confused. refusing rx\n");
3195e3dd157SKalle Valo 		return 0;
3205e3dd157SKalle Valo 	}
3215e3dd157SKalle Valo 
3225e3dd157SKalle Valo 	msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt);
3235e3dd157SKalle Valo 	while (msdu) {
3245e3dd157SKalle Valo 		int last_msdu, msdu_len_invalid, msdu_chained;
3255e3dd157SKalle Valo 
3265e3dd157SKalle Valo 		dma_unmap_single(htt->ar->dev,
3275e3dd157SKalle Valo 				 ATH10K_SKB_CB(msdu)->paddr,
3285e3dd157SKalle Valo 				 msdu->len + skb_tailroom(msdu),
3295e3dd157SKalle Valo 				 DMA_FROM_DEVICE);
3305e3dd157SKalle Valo 
33175fb2f94SBen Greear 		ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx pop: ",
3325e3dd157SKalle Valo 				msdu->data, msdu->len + skb_tailroom(msdu));
3335e3dd157SKalle Valo 
3345e3dd157SKalle Valo 		rx_desc = (struct htt_rx_desc *)msdu->data;
3355e3dd157SKalle Valo 
3365e3dd157SKalle Valo 		/* FIXME: we must report msdu payload since this is what caller
3375e3dd157SKalle Valo 		 *        expects now */
3385e3dd157SKalle Valo 		skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
3395e3dd157SKalle Valo 		skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
3405e3dd157SKalle Valo 
3415e3dd157SKalle Valo 		/*
3425e3dd157SKalle Valo 		 * Sanity check - confirm the HW is finished filling in the
3435e3dd157SKalle Valo 		 * rx data.
3445e3dd157SKalle Valo 		 * If the HW and SW are working correctly, then it's guaranteed
3455e3dd157SKalle Valo 		 * that the HW's MAC DMA is done before this point in the SW.
3465e3dd157SKalle Valo 		 * To prevent the case that we handle a stale Rx descriptor,
3475e3dd157SKalle Valo 		 * just assert for now until we have a way to recover.
3485e3dd157SKalle Valo 		 */
3495e3dd157SKalle Valo 		if (!(__le32_to_cpu(rx_desc->attention.flags)
3505e3dd157SKalle Valo 				& RX_ATTENTION_FLAGS_MSDU_DONE)) {
3515e3dd157SKalle Valo 			ath10k_htt_rx_free_msdu_chain(*head_msdu);
3525e3dd157SKalle Valo 			*head_msdu = NULL;
3535e3dd157SKalle Valo 			msdu = NULL;
3545e3dd157SKalle Valo 			ath10k_err("htt rx stopped. cannot recover\n");
3555e3dd157SKalle Valo 			htt->rx_confused = true;
3565e3dd157SKalle Valo 			break;
3575e3dd157SKalle Valo 		}
3585e3dd157SKalle Valo 
3595e3dd157SKalle Valo 		/*
3605e3dd157SKalle Valo 		 * Copy the FW rx descriptor for this MSDU from the rx
3615e3dd157SKalle Valo 		 * indication message into the MSDU's netbuf. HL uses the
3625e3dd157SKalle Valo 		 * same rx indication message definition as LL, and simply
3635e3dd157SKalle Valo 		 * appends new info (fields from the HW rx desc, and the
3645e3dd157SKalle Valo 		 * MSDU payload itself). So, the offset into the rx
3655e3dd157SKalle Valo 		 * indication message only has to account for the standard
3665e3dd157SKalle Valo 		 * offset of the per-MSDU FW rx desc info within the
3675e3dd157SKalle Valo 		 * message, and how many bytes of the per-MSDU FW rx desc
3685e3dd157SKalle Valo 		 * info have already been consumed. (And the endianness of
3695e3dd157SKalle Valo 		 * the host, since for a big-endian host, the rx ind
3705e3dd157SKalle Valo 		 * message contents, including the per-MSDU rx desc bytes,
3715e3dd157SKalle Valo 		 * were byteswapped during upload.)
3725e3dd157SKalle Valo 		 */
3735e3dd157SKalle Valo 		if (*fw_desc_len > 0) {
3745e3dd157SKalle Valo 			rx_desc->fw_desc.info0 = **fw_desc;
3755e3dd157SKalle Valo 			/*
3765e3dd157SKalle Valo 			 * The target is expected to only provide the basic
3775e3dd157SKalle Valo 			 * per-MSDU rx descriptors. Just to be sure, verify
3785e3dd157SKalle Valo 			 * that the target has not attached extension data
3795e3dd157SKalle Valo 			 * (e.g. LRO flow ID).
3805e3dd157SKalle Valo 			 */
3815e3dd157SKalle Valo 
3825e3dd157SKalle Valo 			/* or more, if there's extension data */
3835e3dd157SKalle Valo 			(*fw_desc)++;
3845e3dd157SKalle Valo 			(*fw_desc_len)--;
3855e3dd157SKalle Valo 		} else {
3865e3dd157SKalle Valo 			/*
3875e3dd157SKalle Valo 			 * When an oversized AMSDU happened, FW will lost
3885e3dd157SKalle Valo 			 * some of MSDU status - in this case, the FW
3895e3dd157SKalle Valo 			 * descriptors provided will be less than the
3905e3dd157SKalle Valo 			 * actual MSDUs inside this MPDU. Mark the FW
3915e3dd157SKalle Valo 			 * descriptors so that it will still deliver to
3925e3dd157SKalle Valo 			 * upper stack, if no CRC error for this MPDU.
3935e3dd157SKalle Valo 			 *
3945e3dd157SKalle Valo 			 * FIX THIS - the FW descriptors are actually for
3955e3dd157SKalle Valo 			 * MSDUs in the end of this A-MSDU instead of the
3965e3dd157SKalle Valo 			 * beginning.
3975e3dd157SKalle Valo 			 */
3985e3dd157SKalle Valo 			rx_desc->fw_desc.info0 = 0;
3995e3dd157SKalle Valo 		}
4005e3dd157SKalle Valo 
4015e3dd157SKalle Valo 		msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
4025e3dd157SKalle Valo 					& (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
4035e3dd157SKalle Valo 					   RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
4045e3dd157SKalle Valo 		msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
4055e3dd157SKalle Valo 			      RX_MSDU_START_INFO0_MSDU_LENGTH);
4065e3dd157SKalle Valo 		msdu_chained = rx_desc->frag_info.ring2_more_count;
4075e3dd157SKalle Valo 
4085e3dd157SKalle Valo 		if (msdu_len_invalid)
4095e3dd157SKalle Valo 			msdu_len = 0;
4105e3dd157SKalle Valo 
4115e3dd157SKalle Valo 		skb_trim(msdu, 0);
4125e3dd157SKalle Valo 		skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
4135e3dd157SKalle Valo 		msdu_len -= msdu->len;
4145e3dd157SKalle Valo 
4155e3dd157SKalle Valo 		/* FIXME: Do chained buffers include htt_rx_desc or not? */
4165e3dd157SKalle Valo 		while (msdu_chained--) {
4175e3dd157SKalle Valo 			struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt);
4185e3dd157SKalle Valo 
4195e3dd157SKalle Valo 			dma_unmap_single(htt->ar->dev,
4205e3dd157SKalle Valo 					 ATH10K_SKB_CB(next)->paddr,
4215e3dd157SKalle Valo 					 next->len + skb_tailroom(next),
4225e3dd157SKalle Valo 					 DMA_FROM_DEVICE);
4235e3dd157SKalle Valo 
42475fb2f94SBen Greear 			ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL,
42575fb2f94SBen Greear 					"htt rx chained: ", next->data,
4265e3dd157SKalle Valo 					next->len + skb_tailroom(next));
4275e3dd157SKalle Valo 
4285e3dd157SKalle Valo 			skb_trim(next, 0);
4295e3dd157SKalle Valo 			skb_put(next, min(msdu_len, HTT_RX_BUF_SIZE));
4305e3dd157SKalle Valo 			msdu_len -= next->len;
4315e3dd157SKalle Valo 
4325e3dd157SKalle Valo 			msdu->next = next;
4335e3dd157SKalle Valo 			msdu = next;
4345e3dd157SKalle Valo 			msdu_chaining = 1;
4355e3dd157SKalle Valo 		}
4365e3dd157SKalle Valo 
4375e3dd157SKalle Valo 		last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
4385e3dd157SKalle Valo 				RX_MSDU_END_INFO0_LAST_MSDU;
4395e3dd157SKalle Valo 
4405e3dd157SKalle Valo 		if (last_msdu) {
4415e3dd157SKalle Valo 			msdu->next = NULL;
4425e3dd157SKalle Valo 			break;
4435e3dd157SKalle Valo 		} else {
4445e3dd157SKalle Valo 			struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt);
4455e3dd157SKalle Valo 			msdu->next = next;
4465e3dd157SKalle Valo 			msdu = next;
4475e3dd157SKalle Valo 		}
4485e3dd157SKalle Valo 	}
4495e3dd157SKalle Valo 	*tail_msdu = msdu;
4505e3dd157SKalle Valo 
4515e3dd157SKalle Valo 	/*
4525e3dd157SKalle Valo 	 * Don't refill the ring yet.
4535e3dd157SKalle Valo 	 *
4545e3dd157SKalle Valo 	 * First, the elements popped here are still in use - it is not
4555e3dd157SKalle Valo 	 * safe to overwrite them until the matching call to
4565e3dd157SKalle Valo 	 * mpdu_desc_list_next. Second, for efficiency it is preferable to
4575e3dd157SKalle Valo 	 * refill the rx ring with 1 PPDU's worth of rx buffers (something
4585e3dd157SKalle Valo 	 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
4595e3dd157SKalle Valo 	 * (something like 3 buffers). Consequently, we'll rely on the txrx
4605e3dd157SKalle Valo 	 * SW to tell us when it is done pulling all the PPDU's rx buffers
4615e3dd157SKalle Valo 	 * out of the rx ring, and then refill it just once.
4625e3dd157SKalle Valo 	 */
4635e3dd157SKalle Valo 
4645e3dd157SKalle Valo 	return msdu_chaining;
4655e3dd157SKalle Valo }
4665e3dd157SKalle Valo 
4676e712d42SMichal Kazior static void ath10k_htt_rx_replenish_task(unsigned long ptr)
4686e712d42SMichal Kazior {
4696e712d42SMichal Kazior 	struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
4706e712d42SMichal Kazior 	ath10k_htt_rx_msdu_buff_replenish(htt);
4716e712d42SMichal Kazior }
4726e712d42SMichal Kazior 
4735e3dd157SKalle Valo int ath10k_htt_rx_attach(struct ath10k_htt *htt)
4745e3dd157SKalle Valo {
4755e3dd157SKalle Valo 	dma_addr_t paddr;
4765e3dd157SKalle Valo 	void *vaddr;
4775e3dd157SKalle Valo 	struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
4785e3dd157SKalle Valo 
4795e3dd157SKalle Valo 	htt->rx_ring.size = ath10k_htt_rx_ring_size(htt);
4805e3dd157SKalle Valo 	if (!is_power_of_2(htt->rx_ring.size)) {
4815e3dd157SKalle Valo 		ath10k_warn("htt rx ring size is not power of 2\n");
4825e3dd157SKalle Valo 		return -EINVAL;
4835e3dd157SKalle Valo 	}
4845e3dd157SKalle Valo 
4855e3dd157SKalle Valo 	htt->rx_ring.size_mask = htt->rx_ring.size - 1;
4865e3dd157SKalle Valo 
4875e3dd157SKalle Valo 	/*
4885e3dd157SKalle Valo 	 * Set the initial value for the level to which the rx ring
4895e3dd157SKalle Valo 	 * should be filled, based on the max throughput and the
4905e3dd157SKalle Valo 	 * worst likely latency for the host to fill the rx ring
4915e3dd157SKalle Valo 	 * with new buffers. In theory, this fill level can be
4925e3dd157SKalle Valo 	 * dynamically adjusted from the initial value set here, to
4935e3dd157SKalle Valo 	 * reflect the actual host latency rather than a
4945e3dd157SKalle Valo 	 * conservative assumption about the host latency.
4955e3dd157SKalle Valo 	 */
4965e3dd157SKalle Valo 	htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt);
4975e3dd157SKalle Valo 
4985e3dd157SKalle Valo 	htt->rx_ring.netbufs_ring =
4995e3dd157SKalle Valo 		kmalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
5005e3dd157SKalle Valo 			GFP_KERNEL);
5015e3dd157SKalle Valo 	if (!htt->rx_ring.netbufs_ring)
5025e3dd157SKalle Valo 		goto err_netbuf;
5035e3dd157SKalle Valo 
5045e3dd157SKalle Valo 	vaddr = dma_alloc_coherent(htt->ar->dev,
5055e3dd157SKalle Valo 		   (htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring)),
5065e3dd157SKalle Valo 		   &paddr, GFP_DMA);
5075e3dd157SKalle Valo 	if (!vaddr)
5085e3dd157SKalle Valo 		goto err_dma_ring;
5095e3dd157SKalle Valo 
5105e3dd157SKalle Valo 	htt->rx_ring.paddrs_ring = vaddr;
5115e3dd157SKalle Valo 	htt->rx_ring.base_paddr = paddr;
5125e3dd157SKalle Valo 
5135e3dd157SKalle Valo 	vaddr = dma_alloc_coherent(htt->ar->dev,
5145e3dd157SKalle Valo 				   sizeof(*htt->rx_ring.alloc_idx.vaddr),
5155e3dd157SKalle Valo 				   &paddr, GFP_DMA);
5165e3dd157SKalle Valo 	if (!vaddr)
5175e3dd157SKalle Valo 		goto err_dma_idx;
5185e3dd157SKalle Valo 
5195e3dd157SKalle Valo 	htt->rx_ring.alloc_idx.vaddr = vaddr;
5205e3dd157SKalle Valo 	htt->rx_ring.alloc_idx.paddr = paddr;
5215e3dd157SKalle Valo 	htt->rx_ring.sw_rd_idx.msdu_payld = 0;
5225e3dd157SKalle Valo 	*htt->rx_ring.alloc_idx.vaddr = 0;
5235e3dd157SKalle Valo 
5245e3dd157SKalle Valo 	/* Initialize the Rx refill retry timer */
5255e3dd157SKalle Valo 	setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
5265e3dd157SKalle Valo 
5275e3dd157SKalle Valo 	spin_lock_init(&htt->rx_ring.lock);
5285e3dd157SKalle Valo 
5295e3dd157SKalle Valo 	htt->rx_ring.fill_cnt = 0;
5305e3dd157SKalle Valo 	if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level))
5315e3dd157SKalle Valo 		goto err_fill_ring;
5325e3dd157SKalle Valo 
5336e712d42SMichal Kazior 	tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
5346e712d42SMichal Kazior 		     (unsigned long)htt);
5356e712d42SMichal Kazior 
5366c5151a9SMichal Kazior 	skb_queue_head_init(&htt->tx_compl_q);
5376c5151a9SMichal Kazior 	skb_queue_head_init(&htt->rx_compl_q);
5386c5151a9SMichal Kazior 
5396c5151a9SMichal Kazior 	tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
5406c5151a9SMichal Kazior 		     (unsigned long)htt);
5416c5151a9SMichal Kazior 
542aad0b65fSKalle Valo 	ath10k_dbg(ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
5435e3dd157SKalle Valo 		   htt->rx_ring.size, htt->rx_ring.fill_level);
5445e3dd157SKalle Valo 	return 0;
5455e3dd157SKalle Valo 
5465e3dd157SKalle Valo err_fill_ring:
5475e3dd157SKalle Valo 	ath10k_htt_rx_ring_free(htt);
5485e3dd157SKalle Valo 	dma_free_coherent(htt->ar->dev,
5495e3dd157SKalle Valo 			  sizeof(*htt->rx_ring.alloc_idx.vaddr),
5505e3dd157SKalle Valo 			  htt->rx_ring.alloc_idx.vaddr,
5515e3dd157SKalle Valo 			  htt->rx_ring.alloc_idx.paddr);
5525e3dd157SKalle Valo err_dma_idx:
5535e3dd157SKalle Valo 	dma_free_coherent(htt->ar->dev,
5545e3dd157SKalle Valo 			  (htt->rx_ring.size *
5555e3dd157SKalle Valo 			   sizeof(htt->rx_ring.paddrs_ring)),
5565e3dd157SKalle Valo 			  htt->rx_ring.paddrs_ring,
5575e3dd157SKalle Valo 			  htt->rx_ring.base_paddr);
5585e3dd157SKalle Valo err_dma_ring:
5595e3dd157SKalle Valo 	kfree(htt->rx_ring.netbufs_ring);
5605e3dd157SKalle Valo err_netbuf:
5615e3dd157SKalle Valo 	return -ENOMEM;
5625e3dd157SKalle Valo }
5635e3dd157SKalle Valo 
5645e3dd157SKalle Valo static int ath10k_htt_rx_crypto_param_len(enum htt_rx_mpdu_encrypt_type type)
5655e3dd157SKalle Valo {
5665e3dd157SKalle Valo 	switch (type) {
5675e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_WEP40:
5685e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_WEP104:
5695e3dd157SKalle Valo 		return 4;
5705e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
5715e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_WEP128: /* not tested */
5725e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
5735e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_WAPI: /* not tested */
5745e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
5755e3dd157SKalle Valo 		return 8;
5765e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_NONE:
5775e3dd157SKalle Valo 		return 0;
5785e3dd157SKalle Valo 	}
5795e3dd157SKalle Valo 
5805e3dd157SKalle Valo 	ath10k_warn("unknown encryption type %d\n", type);
5815e3dd157SKalle Valo 	return 0;
5825e3dd157SKalle Valo }
5835e3dd157SKalle Valo 
5845e3dd157SKalle Valo static int ath10k_htt_rx_crypto_tail_len(enum htt_rx_mpdu_encrypt_type type)
5855e3dd157SKalle Valo {
5865e3dd157SKalle Valo 	switch (type) {
5875e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_NONE:
5885e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_WEP40:
5895e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_WEP104:
5905e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_WEP128:
5915e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_WAPI:
5925e3dd157SKalle Valo 		return 0;
5935e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
5945e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
5955e3dd157SKalle Valo 		return 4;
5965e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
5975e3dd157SKalle Valo 		return 8;
5985e3dd157SKalle Valo 	}
5995e3dd157SKalle Valo 
6005e3dd157SKalle Valo 	ath10k_warn("unknown encryption type %d\n", type);
6015e3dd157SKalle Valo 	return 0;
6025e3dd157SKalle Valo }
6035e3dd157SKalle Valo 
6045e3dd157SKalle Valo /* Applies for first msdu in chain, before altering it. */
6055e3dd157SKalle Valo static struct ieee80211_hdr *ath10k_htt_rx_skb_get_hdr(struct sk_buff *skb)
6065e3dd157SKalle Valo {
6075e3dd157SKalle Valo 	struct htt_rx_desc *rxd;
6085e3dd157SKalle Valo 	enum rx_msdu_decap_format fmt;
6095e3dd157SKalle Valo 
6105e3dd157SKalle Valo 	rxd = (void *)skb->data - sizeof(*rxd);
6115e3dd157SKalle Valo 	fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
6125e3dd157SKalle Valo 			RX_MSDU_START_INFO1_DECAP_FORMAT);
6135e3dd157SKalle Valo 
6145e3dd157SKalle Valo 	if (fmt == RX_MSDU_DECAP_RAW)
6155e3dd157SKalle Valo 		return (void *)skb->data;
6165e3dd157SKalle Valo 	else
6175e3dd157SKalle Valo 		return (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
6185e3dd157SKalle Valo }
6195e3dd157SKalle Valo 
6205e3dd157SKalle Valo /* This function only applies for first msdu in an msdu chain */
6215e3dd157SKalle Valo static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr)
6225e3dd157SKalle Valo {
6235e3dd157SKalle Valo 	if (ieee80211_is_data_qos(hdr->frame_control)) {
6245e3dd157SKalle Valo 		u8 *qc = ieee80211_get_qos_ctl(hdr);
6255e3dd157SKalle Valo 		if (qc[0] & 0x80)
6265e3dd157SKalle Valo 			return true;
6275e3dd157SKalle Valo 	}
6285e3dd157SKalle Valo 	return false;
6295e3dd157SKalle Valo }
6305e3dd157SKalle Valo 
631f6dc2095SMichal Kazior struct rfc1042_hdr {
632f6dc2095SMichal Kazior 	u8 llc_dsap;
633f6dc2095SMichal Kazior 	u8 llc_ssap;
634f6dc2095SMichal Kazior 	u8 llc_ctrl;
635f6dc2095SMichal Kazior 	u8 snap_oui[3];
636f6dc2095SMichal Kazior 	__be16 snap_type;
637f6dc2095SMichal Kazior } __packed;
638f6dc2095SMichal Kazior 
639f6dc2095SMichal Kazior struct amsdu_subframe_hdr {
640f6dc2095SMichal Kazior 	u8 dst[ETH_ALEN];
641f6dc2095SMichal Kazior 	u8 src[ETH_ALEN];
642f6dc2095SMichal Kazior 	__be16 len;
643f6dc2095SMichal Kazior } __packed;
644f6dc2095SMichal Kazior 
645d960c369SMichal Kazior static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
646d960c369SMichal Kazior {
647d960c369SMichal Kazior 	/* nwifi header is padded to 4 bytes. this fixes 4addr rx */
648d960c369SMichal Kazior 	return round_up(ieee80211_hdrlen(hdr->frame_control), 4);
649d960c369SMichal Kazior }
650d960c369SMichal Kazior 
651f6dc2095SMichal Kazior static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
6525e3dd157SKalle Valo 				struct htt_rx_info *info)
6535e3dd157SKalle Valo {
6545e3dd157SKalle Valo 	struct htt_rx_desc *rxd;
6555e3dd157SKalle Valo 	struct sk_buff *first;
6565e3dd157SKalle Valo 	struct sk_buff *skb = info->skb;
6575e3dd157SKalle Valo 	enum rx_msdu_decap_format fmt;
6585e3dd157SKalle Valo 	enum htt_rx_mpdu_encrypt_type enctype;
659f6dc2095SMichal Kazior 	struct ieee80211_hdr *hdr;
660784f69d3SMichal Kazior 	u8 hdr_buf[64], addr[ETH_ALEN], *qos;
6615e3dd157SKalle Valo 	unsigned int hdr_len;
6625e3dd157SKalle Valo 
6635e3dd157SKalle Valo 	rxd = (void *)skb->data - sizeof(*rxd);
6645e3dd157SKalle Valo 	enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
6655e3dd157SKalle Valo 			RX_MPDU_START_INFO0_ENCRYPT_TYPE);
6665e3dd157SKalle Valo 
667f6dc2095SMichal Kazior 	hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
668f6dc2095SMichal Kazior 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
669f6dc2095SMichal Kazior 	memcpy(hdr_buf, hdr, hdr_len);
670f6dc2095SMichal Kazior 	hdr = (struct ieee80211_hdr *)hdr_buf;
6715e3dd157SKalle Valo 
6725e3dd157SKalle Valo 	first = skb;
6735e3dd157SKalle Valo 	while (skb) {
6745e3dd157SKalle Valo 		void *decap_hdr;
675f6dc2095SMichal Kazior 		int len;
6765e3dd157SKalle Valo 
6775e3dd157SKalle Valo 		rxd = (void *)skb->data - sizeof(*rxd);
6785e3dd157SKalle Valo 		fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
6795e3dd157SKalle Valo 			 RX_MSDU_START_INFO1_DECAP_FORMAT);
6805e3dd157SKalle Valo 		decap_hdr = (void *)rxd->rx_hdr_status;
6815e3dd157SKalle Valo 
682f6dc2095SMichal Kazior 		skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
683f6dc2095SMichal Kazior 
684f6dc2095SMichal Kazior 		/* First frame in an A-MSDU chain has more decapped data. */
6855e3dd157SKalle Valo 		if (skb == first) {
686f6dc2095SMichal Kazior 			len = round_up(ieee80211_hdrlen(hdr->frame_control), 4);
687f6dc2095SMichal Kazior 			len += round_up(ath10k_htt_rx_crypto_param_len(enctype),
688f6dc2095SMichal Kazior 					4);
689f6dc2095SMichal Kazior 			decap_hdr += len;
6905e3dd157SKalle Valo 		}
6915e3dd157SKalle Valo 
692f6dc2095SMichal Kazior 		switch (fmt) {
693f6dc2095SMichal Kazior 		case RX_MSDU_DECAP_RAW:
694e3fbf8d2SMichal Kazior 			/* remove trailing FCS */
695f6dc2095SMichal Kazior 			skb_trim(skb, skb->len - FCS_LEN);
696f6dc2095SMichal Kazior 			break;
697f6dc2095SMichal Kazior 		case RX_MSDU_DECAP_NATIVE_WIFI:
698784f69d3SMichal Kazior 			/* pull decapped header and copy DA */
699784f69d3SMichal Kazior 			hdr = (struct ieee80211_hdr *)skb->data;
700d960c369SMichal Kazior 			hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
701784f69d3SMichal Kazior 			memcpy(addr, ieee80211_get_DA(hdr), ETH_ALEN);
702784f69d3SMichal Kazior 			skb_pull(skb, hdr_len);
703784f69d3SMichal Kazior 
704784f69d3SMichal Kazior 			/* push original 802.11 header */
705784f69d3SMichal Kazior 			hdr = (struct ieee80211_hdr *)hdr_buf;
706784f69d3SMichal Kazior 			hdr_len = ieee80211_hdrlen(hdr->frame_control);
707784f69d3SMichal Kazior 			memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
708784f69d3SMichal Kazior 
709784f69d3SMichal Kazior 			/* original A-MSDU header has the bit set but we're
710784f69d3SMichal Kazior 			 * not including A-MSDU subframe header */
711784f69d3SMichal Kazior 			hdr = (struct ieee80211_hdr *)skb->data;
712784f69d3SMichal Kazior 			qos = ieee80211_get_qos_ctl(hdr);
713784f69d3SMichal Kazior 			qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
714784f69d3SMichal Kazior 
715784f69d3SMichal Kazior 			/* original 802.11 header has a different DA */
716784f69d3SMichal Kazior 			memcpy(ieee80211_get_DA(hdr), addr, ETH_ALEN);
717f6dc2095SMichal Kazior 			break;
718f6dc2095SMichal Kazior 		case RX_MSDU_DECAP_ETHERNET2_DIX:
719e3fbf8d2SMichal Kazior 			/* strip ethernet header and insert decapped 802.11
720e3fbf8d2SMichal Kazior 			 * header, amsdu subframe header and rfc1042 header */
721e3fbf8d2SMichal Kazior 
722f6dc2095SMichal Kazior 			len = 0;
723f6dc2095SMichal Kazior 			len += sizeof(struct rfc1042_hdr);
724f6dc2095SMichal Kazior 			len += sizeof(struct amsdu_subframe_hdr);
725dfa95b50SMichal Kazior 
726f6dc2095SMichal Kazior 			skb_pull(skb, sizeof(struct ethhdr));
727f6dc2095SMichal Kazior 			memcpy(skb_push(skb, len), decap_hdr, len);
728f6dc2095SMichal Kazior 			memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
729f6dc2095SMichal Kazior 			break;
730f6dc2095SMichal Kazior 		case RX_MSDU_DECAP_8023_SNAP_LLC:
731e3fbf8d2SMichal Kazior 			/* insert decapped 802.11 header making a singly
732e3fbf8d2SMichal Kazior 			 * A-MSDU */
733f6dc2095SMichal Kazior 			memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
734f6dc2095SMichal Kazior 			break;
7355e3dd157SKalle Valo 		}
7365e3dd157SKalle Valo 
737f6dc2095SMichal Kazior 		info->skb = skb;
7385e3dd157SKalle Valo 		info->encrypt_type = enctype;
739f6dc2095SMichal Kazior 		skb = skb->next;
740f6dc2095SMichal Kazior 		info->skb->next = NULL;
7415e3dd157SKalle Valo 
742652de35eSKalle Valo 		if (skb)
743652de35eSKalle Valo 			info->amsdu_more = true;
744652de35eSKalle Valo 
745f6dc2095SMichal Kazior 		ath10k_process_rx(htt->ar, info);
7465e3dd157SKalle Valo 	}
7475e3dd157SKalle Valo 
748f6dc2095SMichal Kazior 	/* FIXME: It might be nice to re-assemble the A-MSDU when there's a
749f6dc2095SMichal Kazior 	 * monitor interface active for sniffing purposes. */
750f6dc2095SMichal Kazior }
751f6dc2095SMichal Kazior 
752f6dc2095SMichal Kazior static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
7535e3dd157SKalle Valo {
7545e3dd157SKalle Valo 	struct sk_buff *skb = info->skb;
7555e3dd157SKalle Valo 	struct htt_rx_desc *rxd;
7565e3dd157SKalle Valo 	struct ieee80211_hdr *hdr;
7575e3dd157SKalle Valo 	enum rx_msdu_decap_format fmt;
7585e3dd157SKalle Valo 	enum htt_rx_mpdu_encrypt_type enctype;
759e3fbf8d2SMichal Kazior 	int hdr_len;
760e3fbf8d2SMichal Kazior 	void *rfc1042;
7615e3dd157SKalle Valo 
7625e3dd157SKalle Valo 	/* This shouldn't happen. If it does than it may be a FW bug. */
7635e3dd157SKalle Valo 	if (skb->next) {
76475fb2f94SBen Greear 		ath10k_warn("htt rx received chained non A-MSDU frame\n");
7655e3dd157SKalle Valo 		ath10k_htt_rx_free_msdu_chain(skb->next);
7665e3dd157SKalle Valo 		skb->next = NULL;
7675e3dd157SKalle Valo 	}
7685e3dd157SKalle Valo 
7695e3dd157SKalle Valo 	rxd = (void *)skb->data - sizeof(*rxd);
7705e3dd157SKalle Valo 	fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
7715e3dd157SKalle Valo 			RX_MSDU_START_INFO1_DECAP_FORMAT);
7725e3dd157SKalle Valo 	enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
7735e3dd157SKalle Valo 			RX_MPDU_START_INFO0_ENCRYPT_TYPE);
774e3fbf8d2SMichal Kazior 	hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
775e3fbf8d2SMichal Kazior 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
7765e3dd157SKalle Valo 
777f6dc2095SMichal Kazior 	skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
778f6dc2095SMichal Kazior 
7795e3dd157SKalle Valo 	switch (fmt) {
7805e3dd157SKalle Valo 	case RX_MSDU_DECAP_RAW:
7815e3dd157SKalle Valo 		/* remove trailing FCS */
782e3fbf8d2SMichal Kazior 		skb_trim(skb, skb->len - FCS_LEN);
7835e3dd157SKalle Valo 		break;
7845e3dd157SKalle Valo 	case RX_MSDU_DECAP_NATIVE_WIFI:
785784f69d3SMichal Kazior 		/* Pull decapped header */
786784f69d3SMichal Kazior 		hdr = (struct ieee80211_hdr *)skb->data;
787d960c369SMichal Kazior 		hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
788784f69d3SMichal Kazior 		skb_pull(skb, hdr_len);
789784f69d3SMichal Kazior 
790784f69d3SMichal Kazior 		/* Push original header */
791784f69d3SMichal Kazior 		hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
792784f69d3SMichal Kazior 		hdr_len = ieee80211_hdrlen(hdr->frame_control);
793784f69d3SMichal Kazior 		memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
7945e3dd157SKalle Valo 		break;
7955e3dd157SKalle Valo 	case RX_MSDU_DECAP_ETHERNET2_DIX:
796e3fbf8d2SMichal Kazior 		/* strip ethernet header and insert decapped 802.11 header and
797e3fbf8d2SMichal Kazior 		 * rfc1042 header */
798e3fbf8d2SMichal Kazior 
799e3fbf8d2SMichal Kazior 		rfc1042 = hdr;
800e3fbf8d2SMichal Kazior 		rfc1042 += roundup(hdr_len, 4);
801e3fbf8d2SMichal Kazior 		rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4);
802e3fbf8d2SMichal Kazior 
803e3fbf8d2SMichal Kazior 		skb_pull(skb, sizeof(struct ethhdr));
804e3fbf8d2SMichal Kazior 		memcpy(skb_push(skb, sizeof(struct rfc1042_hdr)),
805e3fbf8d2SMichal Kazior 		       rfc1042, sizeof(struct rfc1042_hdr));
806e3fbf8d2SMichal Kazior 		memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
8075e3dd157SKalle Valo 		break;
8085e3dd157SKalle Valo 	case RX_MSDU_DECAP_8023_SNAP_LLC:
809e3fbf8d2SMichal Kazior 		/* remove A-MSDU subframe header and insert
810e3fbf8d2SMichal Kazior 		 * decapped 802.11 header. rfc1042 header is already there */
811e3fbf8d2SMichal Kazior 
812e3fbf8d2SMichal Kazior 		skb_pull(skb, sizeof(struct amsdu_subframe_hdr));
813e3fbf8d2SMichal Kazior 		memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
8145e3dd157SKalle Valo 		break;
8155e3dd157SKalle Valo 	}
8165e3dd157SKalle Valo 
8175e3dd157SKalle Valo 	info->skb = skb;
8185e3dd157SKalle Valo 	info->encrypt_type = enctype;
819f6dc2095SMichal Kazior 
820f6dc2095SMichal Kazior 	ath10k_process_rx(htt->ar, info);
8215e3dd157SKalle Valo }
8225e3dd157SKalle Valo 
8235e3dd157SKalle Valo static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb)
8245e3dd157SKalle Valo {
8255e3dd157SKalle Valo 	struct htt_rx_desc *rxd;
8265e3dd157SKalle Valo 	u32 flags;
8275e3dd157SKalle Valo 
8285e3dd157SKalle Valo 	rxd = (void *)skb->data - sizeof(*rxd);
8295e3dd157SKalle Valo 	flags = __le32_to_cpu(rxd->attention.flags);
8305e3dd157SKalle Valo 
8315e3dd157SKalle Valo 	if (flags & RX_ATTENTION_FLAGS_DECRYPT_ERR)
8325e3dd157SKalle Valo 		return true;
8335e3dd157SKalle Valo 
8345e3dd157SKalle Valo 	return false;
8355e3dd157SKalle Valo }
8365e3dd157SKalle Valo 
8375e3dd157SKalle Valo static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb)
8385e3dd157SKalle Valo {
8395e3dd157SKalle Valo 	struct htt_rx_desc *rxd;
8405e3dd157SKalle Valo 	u32 flags;
8415e3dd157SKalle Valo 
8425e3dd157SKalle Valo 	rxd = (void *)skb->data - sizeof(*rxd);
8435e3dd157SKalle Valo 	flags = __le32_to_cpu(rxd->attention.flags);
8445e3dd157SKalle Valo 
8455e3dd157SKalle Valo 	if (flags & RX_ATTENTION_FLAGS_FCS_ERR)
8465e3dd157SKalle Valo 		return true;
8475e3dd157SKalle Valo 
8485e3dd157SKalle Valo 	return false;
8495e3dd157SKalle Valo }
8505e3dd157SKalle Valo 
85122569400SJanusz Dziedzic static bool ath10k_htt_rx_has_mic_err(struct sk_buff *skb)
85222569400SJanusz Dziedzic {
85322569400SJanusz Dziedzic 	struct htt_rx_desc *rxd;
85422569400SJanusz Dziedzic 	u32 flags;
85522569400SJanusz Dziedzic 
85622569400SJanusz Dziedzic 	rxd = (void *)skb->data - sizeof(*rxd);
85722569400SJanusz Dziedzic 	flags = __le32_to_cpu(rxd->attention.flags);
85822569400SJanusz Dziedzic 
85922569400SJanusz Dziedzic 	if (flags & RX_ATTENTION_FLAGS_TKIP_MIC_ERR)
86022569400SJanusz Dziedzic 		return true;
86122569400SJanusz Dziedzic 
86222569400SJanusz Dziedzic 	return false;
86322569400SJanusz Dziedzic }
86422569400SJanusz Dziedzic 
865a80ddb00SJanusz Dziedzic static bool ath10k_htt_rx_is_mgmt(struct sk_buff *skb)
866a80ddb00SJanusz Dziedzic {
867a80ddb00SJanusz Dziedzic 	struct htt_rx_desc *rxd;
868a80ddb00SJanusz Dziedzic 	u32 flags;
869a80ddb00SJanusz Dziedzic 
870a80ddb00SJanusz Dziedzic 	rxd = (void *)skb->data - sizeof(*rxd);
871a80ddb00SJanusz Dziedzic 	flags = __le32_to_cpu(rxd->attention.flags);
872a80ddb00SJanusz Dziedzic 
873a80ddb00SJanusz Dziedzic 	if (flags & RX_ATTENTION_FLAGS_MGMT_TYPE)
874a80ddb00SJanusz Dziedzic 		return true;
875a80ddb00SJanusz Dziedzic 
876a80ddb00SJanusz Dziedzic 	return false;
877a80ddb00SJanusz Dziedzic }
878a80ddb00SJanusz Dziedzic 
879605f81aaSMichal Kazior static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
880605f81aaSMichal Kazior {
881605f81aaSMichal Kazior 	struct htt_rx_desc *rxd;
882605f81aaSMichal Kazior 	u32 flags, info;
883605f81aaSMichal Kazior 	bool is_ip4, is_ip6;
884605f81aaSMichal Kazior 	bool is_tcp, is_udp;
885605f81aaSMichal Kazior 	bool ip_csum_ok, tcpudp_csum_ok;
886605f81aaSMichal Kazior 
887605f81aaSMichal Kazior 	rxd = (void *)skb->data - sizeof(*rxd);
888605f81aaSMichal Kazior 	flags = __le32_to_cpu(rxd->attention.flags);
889605f81aaSMichal Kazior 	info = __le32_to_cpu(rxd->msdu_start.info1);
890605f81aaSMichal Kazior 
891605f81aaSMichal Kazior 	is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
892605f81aaSMichal Kazior 	is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
893605f81aaSMichal Kazior 	is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
894605f81aaSMichal Kazior 	is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
895605f81aaSMichal Kazior 	ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
896605f81aaSMichal Kazior 	tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
897605f81aaSMichal Kazior 
898605f81aaSMichal Kazior 	if (!is_ip4 && !is_ip6)
899605f81aaSMichal Kazior 		return CHECKSUM_NONE;
900605f81aaSMichal Kazior 	if (!is_tcp && !is_udp)
901605f81aaSMichal Kazior 		return CHECKSUM_NONE;
902605f81aaSMichal Kazior 	if (!ip_csum_ok)
903605f81aaSMichal Kazior 		return CHECKSUM_NONE;
904605f81aaSMichal Kazior 	if (!tcpudp_csum_ok)
905605f81aaSMichal Kazior 		return CHECKSUM_NONE;
906605f81aaSMichal Kazior 
907605f81aaSMichal Kazior 	return CHECKSUM_UNNECESSARY;
908605f81aaSMichal Kazior }
909605f81aaSMichal Kazior 
9105e3dd157SKalle Valo static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
9115e3dd157SKalle Valo 				  struct htt_rx_indication *rx)
9125e3dd157SKalle Valo {
9135e3dd157SKalle Valo 	struct htt_rx_info info;
9145e3dd157SKalle Valo 	struct htt_rx_indication_mpdu_range *mpdu_ranges;
9155e3dd157SKalle Valo 	struct ieee80211_hdr *hdr;
9165e3dd157SKalle Valo 	int num_mpdu_ranges;
9175e3dd157SKalle Valo 	int fw_desc_len;
9185e3dd157SKalle Valo 	u8 *fw_desc;
9195e3dd157SKalle Valo 	int i, j;
9205e3dd157SKalle Valo 
9215e3dd157SKalle Valo 	memset(&info, 0, sizeof(info));
9225e3dd157SKalle Valo 
9235e3dd157SKalle Valo 	fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
9245e3dd157SKalle Valo 	fw_desc = (u8 *)&rx->fw_desc;
9255e3dd157SKalle Valo 
9265e3dd157SKalle Valo 	num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
9275e3dd157SKalle Valo 			     HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
9285e3dd157SKalle Valo 	mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
9295e3dd157SKalle Valo 
9305e3dd157SKalle Valo 	ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
9315e3dd157SKalle Valo 			rx, sizeof(*rx) +
9325e3dd157SKalle Valo 			(sizeof(struct htt_rx_indication_mpdu_range) *
9335e3dd157SKalle Valo 				num_mpdu_ranges));
9345e3dd157SKalle Valo 
9355e3dd157SKalle Valo 	for (i = 0; i < num_mpdu_ranges; i++) {
9365e3dd157SKalle Valo 		info.status = mpdu_ranges[i].mpdu_range_status;
9375e3dd157SKalle Valo 
9385e3dd157SKalle Valo 		for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) {
9395e3dd157SKalle Valo 			struct sk_buff *msdu_head, *msdu_tail;
9405e3dd157SKalle Valo 			enum htt_rx_mpdu_status status;
9415e3dd157SKalle Valo 			int msdu_chaining;
9425e3dd157SKalle Valo 
9435e3dd157SKalle Valo 			msdu_head = NULL;
9445e3dd157SKalle Valo 			msdu_tail = NULL;
9455e3dd157SKalle Valo 			msdu_chaining = ath10k_htt_rx_amsdu_pop(htt,
9465e3dd157SKalle Valo 							 &fw_desc,
9475e3dd157SKalle Valo 							 &fw_desc_len,
9485e3dd157SKalle Valo 							 &msdu_head,
9495e3dd157SKalle Valo 							 &msdu_tail);
9505e3dd157SKalle Valo 
9515e3dd157SKalle Valo 			if (!msdu_head) {
9525e3dd157SKalle Valo 				ath10k_warn("htt rx no data!\n");
9535e3dd157SKalle Valo 				continue;
9545e3dd157SKalle Valo 			}
9555e3dd157SKalle Valo 
9565e3dd157SKalle Valo 			if (msdu_head->len == 0) {
9575e3dd157SKalle Valo 				ath10k_dbg(ATH10K_DBG_HTT,
9585e3dd157SKalle Valo 					   "htt rx dropping due to zero-len\n");
9595e3dd157SKalle Valo 				ath10k_htt_rx_free_msdu_chain(msdu_head);
9605e3dd157SKalle Valo 				continue;
9615e3dd157SKalle Valo 			}
9625e3dd157SKalle Valo 
9635e3dd157SKalle Valo 			if (ath10k_htt_rx_has_decrypt_err(msdu_head)) {
964c6b56b03SBen Greear 				ath10k_dbg(ATH10K_DBG_HTT,
965c6b56b03SBen Greear 					   "htt rx dropping due to decrypt-err\n");
9665e3dd157SKalle Valo 				ath10k_htt_rx_free_msdu_chain(msdu_head);
9675e3dd157SKalle Valo 				continue;
9685e3dd157SKalle Valo 			}
9695e3dd157SKalle Valo 
9705e3dd157SKalle Valo 			status = info.status;
9715e3dd157SKalle Valo 
9725e3dd157SKalle Valo 			/* Skip mgmt frames while we handle this in WMI */
973a80ddb00SJanusz Dziedzic 			if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL ||
974a80ddb00SJanusz Dziedzic 			    ath10k_htt_rx_is_mgmt(msdu_head)) {
97575fb2f94SBen Greear 				ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
9765e3dd157SKalle Valo 				ath10k_htt_rx_free_msdu_chain(msdu_head);
9775e3dd157SKalle Valo 				continue;
9785e3dd157SKalle Valo 			}
9795e3dd157SKalle Valo 
9805e3dd157SKalle Valo 			if (status != HTT_RX_IND_MPDU_STATUS_OK &&
9815e3dd157SKalle Valo 			    status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
982716ae53cSJanusz Dziedzic 			    status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER &&
9835e3dd157SKalle Valo 			    !htt->ar->monitor_enabled) {
9845e3dd157SKalle Valo 				ath10k_dbg(ATH10K_DBG_HTT,
9855e3dd157SKalle Valo 					   "htt rx ignoring frame w/ status %d\n",
9865e3dd157SKalle Valo 					   status);
9875e3dd157SKalle Valo 				ath10k_htt_rx_free_msdu_chain(msdu_head);
9885e3dd157SKalle Valo 				continue;
9895e3dd157SKalle Valo 			}
9905e3dd157SKalle Valo 
991e8a50f8bSMarek Puzyniak 			if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
99275fb2f94SBen Greear 				ath10k_dbg(ATH10K_DBG_HTT,
99375fb2f94SBen Greear 					   "htt rx CAC running\n");
994e8a50f8bSMarek Puzyniak 				ath10k_htt_rx_free_msdu_chain(msdu_head);
995e8a50f8bSMarek Puzyniak 				continue;
996e8a50f8bSMarek Puzyniak 			}
997e8a50f8bSMarek Puzyniak 
9985e3dd157SKalle Valo 			/* FIXME: we do not support chaining yet.
9995e3dd157SKalle Valo 			 * this needs investigation */
10005e3dd157SKalle Valo 			if (msdu_chaining) {
100175fb2f94SBen Greear 				ath10k_warn("htt rx msdu_chaining is true\n");
10025e3dd157SKalle Valo 				ath10k_htt_rx_free_msdu_chain(msdu_head);
10035e3dd157SKalle Valo 				continue;
10045e3dd157SKalle Valo 			}
10055e3dd157SKalle Valo 
10065e3dd157SKalle Valo 			info.skb     = msdu_head;
10075e3dd157SKalle Valo 			info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
100822569400SJanusz Dziedzic 			info.mic_err = ath10k_htt_rx_has_mic_err(msdu_head);
1009c6b56b03SBen Greear 
1010c6b56b03SBen Greear 			if (info.fcs_err)
1011c6b56b03SBen Greear 				ath10k_dbg(ATH10K_DBG_HTT,
1012c6b56b03SBen Greear 					   "htt rx has FCS err\n");
1013c6b56b03SBen Greear 
1014c6b56b03SBen Greear 			if (info.mic_err)
1015c6b56b03SBen Greear 				ath10k_dbg(ATH10K_DBG_HTT,
1016c6b56b03SBen Greear 					   "htt rx has MIC err\n");
1017c6b56b03SBen Greear 
10185e3dd157SKalle Valo 			info.signal  = ATH10K_DEFAULT_NOISE_FLOOR;
10195e3dd157SKalle Valo 			info.signal += rx->ppdu.combined_rssi;
10205e3dd157SKalle Valo 
10215e3dd157SKalle Valo 			info.rate.info0 = rx->ppdu.info0;
10225e3dd157SKalle Valo 			info.rate.info1 = __le32_to_cpu(rx->ppdu.info1);
10235e3dd157SKalle Valo 			info.rate.info2 = __le32_to_cpu(rx->ppdu.info2);
1024e72698f8SChun-Yeow Yeoh 			info.tsf = __le32_to_cpu(rx->ppdu.tsf);
10255e3dd157SKalle Valo 
10265e3dd157SKalle Valo 			hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
10275e3dd157SKalle Valo 
10285e3dd157SKalle Valo 			if (ath10k_htt_rx_hdr_is_amsdu(hdr))
1029f6dc2095SMichal Kazior 				ath10k_htt_rx_amsdu(htt, &info);
10305e3dd157SKalle Valo 			else
1031f6dc2095SMichal Kazior 				ath10k_htt_rx_msdu(htt, &info);
10325e3dd157SKalle Valo 		}
10335e3dd157SKalle Valo 	}
10345e3dd157SKalle Valo 
10356e712d42SMichal Kazior 	tasklet_schedule(&htt->rx_replenish_task);
10365e3dd157SKalle Valo }
10375e3dd157SKalle Valo 
10385e3dd157SKalle Valo static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
10395e3dd157SKalle Valo 				struct htt_rx_fragment_indication *frag)
10405e3dd157SKalle Valo {
10415e3dd157SKalle Valo 	struct sk_buff *msdu_head, *msdu_tail;
10425e3dd157SKalle Valo 	struct htt_rx_desc *rxd;
10435e3dd157SKalle Valo 	enum rx_msdu_decap_format fmt;
10445e3dd157SKalle Valo 	struct htt_rx_info info = {};
10455e3dd157SKalle Valo 	struct ieee80211_hdr *hdr;
10465e3dd157SKalle Valo 	int msdu_chaining;
10475e3dd157SKalle Valo 	bool tkip_mic_err;
10485e3dd157SKalle Valo 	bool decrypt_err;
10495e3dd157SKalle Valo 	u8 *fw_desc;
10505e3dd157SKalle Valo 	int fw_desc_len, hdrlen, paramlen;
10515e3dd157SKalle Valo 	int trim;
10525e3dd157SKalle Valo 
10535e3dd157SKalle Valo 	fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
10545e3dd157SKalle Valo 	fw_desc = (u8 *)frag->fw_msdu_rx_desc;
10555e3dd157SKalle Valo 
10565e3dd157SKalle Valo 	msdu_head = NULL;
10575e3dd157SKalle Valo 	msdu_tail = NULL;
10585e3dd157SKalle Valo 	msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
10595e3dd157SKalle Valo 						&msdu_head, &msdu_tail);
10605e3dd157SKalle Valo 
10615e3dd157SKalle Valo 	ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
10625e3dd157SKalle Valo 
10635e3dd157SKalle Valo 	if (!msdu_head) {
10645e3dd157SKalle Valo 		ath10k_warn("htt rx frag no data\n");
10655e3dd157SKalle Valo 		return;
10665e3dd157SKalle Valo 	}
10675e3dd157SKalle Valo 
10685e3dd157SKalle Valo 	if (msdu_chaining || msdu_head != msdu_tail) {
10695e3dd157SKalle Valo 		ath10k_warn("aggregation with fragmentation?!\n");
10705e3dd157SKalle Valo 		ath10k_htt_rx_free_msdu_chain(msdu_head);
10715e3dd157SKalle Valo 		return;
10725e3dd157SKalle Valo 	}
10735e3dd157SKalle Valo 
10745e3dd157SKalle Valo 	/* FIXME: implement signal strength */
10755e3dd157SKalle Valo 
10765e3dd157SKalle Valo 	hdr = (struct ieee80211_hdr *)msdu_head->data;
10775e3dd157SKalle Valo 	rxd = (void *)msdu_head->data - sizeof(*rxd);
10785e3dd157SKalle Valo 	tkip_mic_err = !!(__le32_to_cpu(rxd->attention.flags) &
10795e3dd157SKalle Valo 				RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
10805e3dd157SKalle Valo 	decrypt_err = !!(__le32_to_cpu(rxd->attention.flags) &
10815e3dd157SKalle Valo 				RX_ATTENTION_FLAGS_DECRYPT_ERR);
10825e3dd157SKalle Valo 	fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
10835e3dd157SKalle Valo 			RX_MSDU_START_INFO1_DECAP_FORMAT);
10845e3dd157SKalle Valo 
10855e3dd157SKalle Valo 	if (fmt != RX_MSDU_DECAP_RAW) {
10865e3dd157SKalle Valo 		ath10k_warn("we dont support non-raw fragmented rx yet\n");
10875e3dd157SKalle Valo 		dev_kfree_skb_any(msdu_head);
10885e3dd157SKalle Valo 		goto end;
10895e3dd157SKalle Valo 	}
10905e3dd157SKalle Valo 
10915e3dd157SKalle Valo 	info.skb = msdu_head;
10925e3dd157SKalle Valo 	info.status = HTT_RX_IND_MPDU_STATUS_OK;
10935e3dd157SKalle Valo 	info.encrypt_type = MS(__le32_to_cpu(rxd->mpdu_start.info0),
10945e3dd157SKalle Valo 				RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1095605f81aaSMichal Kazior 	info.skb->ip_summed = ath10k_htt_rx_get_csum_state(info.skb);
10965e3dd157SKalle Valo 
10975e3dd157SKalle Valo 	if (tkip_mic_err) {
10985e3dd157SKalle Valo 		ath10k_warn("tkip mic error\n");
10995e3dd157SKalle Valo 		info.status = HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR;
11005e3dd157SKalle Valo 	}
11015e3dd157SKalle Valo 
11025e3dd157SKalle Valo 	if (decrypt_err) {
11035e3dd157SKalle Valo 		ath10k_warn("decryption err in fragmented rx\n");
11045e3dd157SKalle Valo 		dev_kfree_skb_any(info.skb);
11055e3dd157SKalle Valo 		goto end;
11065e3dd157SKalle Valo 	}
11075e3dd157SKalle Valo 
11085e3dd157SKalle Valo 	if (info.encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) {
11095e3dd157SKalle Valo 		hdrlen = ieee80211_hdrlen(hdr->frame_control);
11105e3dd157SKalle Valo 		paramlen = ath10k_htt_rx_crypto_param_len(info.encrypt_type);
11115e3dd157SKalle Valo 
11125e3dd157SKalle Valo 		/* It is more efficient to move the header than the payload */
11135e3dd157SKalle Valo 		memmove((void *)info.skb->data + paramlen,
11145e3dd157SKalle Valo 			(void *)info.skb->data,
11155e3dd157SKalle Valo 			hdrlen);
11165e3dd157SKalle Valo 		skb_pull(info.skb, paramlen);
11175e3dd157SKalle Valo 		hdr = (struct ieee80211_hdr *)info.skb->data;
11185e3dd157SKalle Valo 	}
11195e3dd157SKalle Valo 
11205e3dd157SKalle Valo 	/* remove trailing FCS */
11215e3dd157SKalle Valo 	trim  = 4;
11225e3dd157SKalle Valo 
11235e3dd157SKalle Valo 	/* remove crypto trailer */
11245e3dd157SKalle Valo 	trim += ath10k_htt_rx_crypto_tail_len(info.encrypt_type);
11255e3dd157SKalle Valo 
11265e3dd157SKalle Valo 	/* last fragment of TKIP frags has MIC */
11275e3dd157SKalle Valo 	if (!ieee80211_has_morefrags(hdr->frame_control) &&
11285e3dd157SKalle Valo 	    info.encrypt_type == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
11295e3dd157SKalle Valo 		trim += 8;
11305e3dd157SKalle Valo 
11315e3dd157SKalle Valo 	if (trim > info.skb->len) {
11325e3dd157SKalle Valo 		ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n");
11335e3dd157SKalle Valo 		dev_kfree_skb_any(info.skb);
11345e3dd157SKalle Valo 		goto end;
11355e3dd157SKalle Valo 	}
11365e3dd157SKalle Valo 
11375e3dd157SKalle Valo 	skb_trim(info.skb, info.skb->len - trim);
11385e3dd157SKalle Valo 
113975fb2f94SBen Greear 	ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ",
11405e3dd157SKalle Valo 			info.skb->data, info.skb->len);
11415e3dd157SKalle Valo 	ath10k_process_rx(htt->ar, &info);
11425e3dd157SKalle Valo 
11435e3dd157SKalle Valo end:
11445e3dd157SKalle Valo 	if (fw_desc_len > 0) {
11455e3dd157SKalle Valo 		ath10k_dbg(ATH10K_DBG_HTT,
11465e3dd157SKalle Valo 			   "expecting more fragmented rx in one indication %d\n",
11475e3dd157SKalle Valo 			   fw_desc_len);
11485e3dd157SKalle Valo 	}
11495e3dd157SKalle Valo }
11505e3dd157SKalle Valo 
11516c5151a9SMichal Kazior static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
11526c5151a9SMichal Kazior 				       struct sk_buff *skb)
11536c5151a9SMichal Kazior {
11546c5151a9SMichal Kazior 	struct ath10k_htt *htt = &ar->htt;
11556c5151a9SMichal Kazior 	struct htt_resp *resp = (struct htt_resp *)skb->data;
11566c5151a9SMichal Kazior 	struct htt_tx_done tx_done = {};
11576c5151a9SMichal Kazior 	int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
11586c5151a9SMichal Kazior 	__le16 msdu_id;
11596c5151a9SMichal Kazior 	int i;
11606c5151a9SMichal Kazior 
11616c5151a9SMichal Kazior 	switch (status) {
11626c5151a9SMichal Kazior 	case HTT_DATA_TX_STATUS_NO_ACK:
11636c5151a9SMichal Kazior 		tx_done.no_ack = true;
11646c5151a9SMichal Kazior 		break;
11656c5151a9SMichal Kazior 	case HTT_DATA_TX_STATUS_OK:
11666c5151a9SMichal Kazior 		break;
11676c5151a9SMichal Kazior 	case HTT_DATA_TX_STATUS_DISCARD:
11686c5151a9SMichal Kazior 	case HTT_DATA_TX_STATUS_POSTPONE:
11696c5151a9SMichal Kazior 	case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
11706c5151a9SMichal Kazior 		tx_done.discard = true;
11716c5151a9SMichal Kazior 		break;
11726c5151a9SMichal Kazior 	default:
11736c5151a9SMichal Kazior 		ath10k_warn("unhandled tx completion status %d\n", status);
11746c5151a9SMichal Kazior 		tx_done.discard = true;
11756c5151a9SMichal Kazior 		break;
11766c5151a9SMichal Kazior 	}
11776c5151a9SMichal Kazior 
11786c5151a9SMichal Kazior 	ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
11796c5151a9SMichal Kazior 		   resp->data_tx_completion.num_msdus);
11806c5151a9SMichal Kazior 
11816c5151a9SMichal Kazior 	for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
11826c5151a9SMichal Kazior 		msdu_id = resp->data_tx_completion.msdus[i];
11836c5151a9SMichal Kazior 		tx_done.msdu_id = __le16_to_cpu(msdu_id);
11846c5151a9SMichal Kazior 		ath10k_txrx_tx_unref(htt, &tx_done);
11856c5151a9SMichal Kazior 	}
11866c5151a9SMichal Kazior }
11876c5151a9SMichal Kazior 
11885e3dd157SKalle Valo void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
11895e3dd157SKalle Valo {
1190edb8236dSMichal Kazior 	struct ath10k_htt *htt = &ar->htt;
11915e3dd157SKalle Valo 	struct htt_resp *resp = (struct htt_resp *)skb->data;
11925e3dd157SKalle Valo 
11935e3dd157SKalle Valo 	/* confirm alignment */
11945e3dd157SKalle Valo 	if (!IS_ALIGNED((unsigned long)skb->data, 4))
11955e3dd157SKalle Valo 		ath10k_warn("unaligned htt message, expect trouble\n");
11965e3dd157SKalle Valo 
119775fb2f94SBen Greear 	ath10k_dbg(ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
11985e3dd157SKalle Valo 		   resp->hdr.msg_type);
11995e3dd157SKalle Valo 	switch (resp->hdr.msg_type) {
12005e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_VERSION_CONF: {
12015e3dd157SKalle Valo 		htt->target_version_major = resp->ver_resp.major;
12025e3dd157SKalle Valo 		htt->target_version_minor = resp->ver_resp.minor;
12035e3dd157SKalle Valo 		complete(&htt->target_version_received);
12045e3dd157SKalle Valo 		break;
12055e3dd157SKalle Valo 	}
12066c5151a9SMichal Kazior 	case HTT_T2H_MSG_TYPE_RX_IND:
12076c5151a9SMichal Kazior 		skb_queue_tail(&htt->rx_compl_q, skb);
12086c5151a9SMichal Kazior 		tasklet_schedule(&htt->txrx_compl_task);
12096c5151a9SMichal Kazior 		return;
12105e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_PEER_MAP: {
12115e3dd157SKalle Valo 		struct htt_peer_map_event ev = {
12125e3dd157SKalle Valo 			.vdev_id = resp->peer_map.vdev_id,
12135e3dd157SKalle Valo 			.peer_id = __le16_to_cpu(resp->peer_map.peer_id),
12145e3dd157SKalle Valo 		};
12155e3dd157SKalle Valo 		memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
12165e3dd157SKalle Valo 		ath10k_peer_map_event(htt, &ev);
12175e3dd157SKalle Valo 		break;
12185e3dd157SKalle Valo 	}
12195e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
12205e3dd157SKalle Valo 		struct htt_peer_unmap_event ev = {
12215e3dd157SKalle Valo 			.peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
12225e3dd157SKalle Valo 		};
12235e3dd157SKalle Valo 		ath10k_peer_unmap_event(htt, &ev);
12245e3dd157SKalle Valo 		break;
12255e3dd157SKalle Valo 	}
12265e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
12275e3dd157SKalle Valo 		struct htt_tx_done tx_done = {};
12285e3dd157SKalle Valo 		int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
12295e3dd157SKalle Valo 
12305e3dd157SKalle Valo 		tx_done.msdu_id =
12315e3dd157SKalle Valo 			__le32_to_cpu(resp->mgmt_tx_completion.desc_id);
12325e3dd157SKalle Valo 
12335e3dd157SKalle Valo 		switch (status) {
12345e3dd157SKalle Valo 		case HTT_MGMT_TX_STATUS_OK:
12355e3dd157SKalle Valo 			break;
12365e3dd157SKalle Valo 		case HTT_MGMT_TX_STATUS_RETRY:
12375e3dd157SKalle Valo 			tx_done.no_ack = true;
12385e3dd157SKalle Valo 			break;
12395e3dd157SKalle Valo 		case HTT_MGMT_TX_STATUS_DROP:
12405e3dd157SKalle Valo 			tx_done.discard = true;
12415e3dd157SKalle Valo 			break;
12425e3dd157SKalle Valo 		}
12435e3dd157SKalle Valo 
12446c5151a9SMichal Kazior 		spin_lock_bh(&htt->tx_lock);
12450a89f8a0SMichal Kazior 		ath10k_txrx_tx_unref(htt, &tx_done);
12466c5151a9SMichal Kazior 		spin_unlock_bh(&htt->tx_lock);
12475e3dd157SKalle Valo 		break;
12485e3dd157SKalle Valo 	}
12496c5151a9SMichal Kazior 	case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
12506c5151a9SMichal Kazior 		spin_lock_bh(&htt->tx_lock);
12516c5151a9SMichal Kazior 		__skb_queue_tail(&htt->tx_compl_q, skb);
12526c5151a9SMichal Kazior 		spin_unlock_bh(&htt->tx_lock);
12536c5151a9SMichal Kazior 		tasklet_schedule(&htt->txrx_compl_task);
12546c5151a9SMichal Kazior 		return;
12555e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_SEC_IND: {
12565e3dd157SKalle Valo 		struct ath10k *ar = htt->ar;
12575e3dd157SKalle Valo 		struct htt_security_indication *ev = &resp->security_indication;
12585e3dd157SKalle Valo 
12595e3dd157SKalle Valo 		ath10k_dbg(ATH10K_DBG_HTT,
12605e3dd157SKalle Valo 			   "sec ind peer_id %d unicast %d type %d\n",
12615e3dd157SKalle Valo 			  __le16_to_cpu(ev->peer_id),
12625e3dd157SKalle Valo 			  !!(ev->flags & HTT_SECURITY_IS_UNICAST),
12635e3dd157SKalle Valo 			  MS(ev->flags, HTT_SECURITY_TYPE));
12645e3dd157SKalle Valo 		complete(&ar->install_key_done);
12655e3dd157SKalle Valo 		break;
12665e3dd157SKalle Valo 	}
12675e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
12685e3dd157SKalle Valo 		ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
12695e3dd157SKalle Valo 				skb->data, skb->len);
12705e3dd157SKalle Valo 		ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
12715e3dd157SKalle Valo 		break;
12725e3dd157SKalle Valo 	}
12735e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_TEST:
12745e3dd157SKalle Valo 		/* FIX THIS */
12755e3dd157SKalle Valo 		break;
12765e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_STATS_CONF:
1277a9bf0506SKalle Valo 		trace_ath10k_htt_stats(skb->data, skb->len);
1278a9bf0506SKalle Valo 		break;
1279a9bf0506SKalle Valo 	case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
12805e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
12815e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_RX_DELBA:
12825e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_RX_FLUSH:
12835e3dd157SKalle Valo 	default:
12845e3dd157SKalle Valo 		ath10k_dbg(ATH10K_DBG_HTT, "htt event (%d) not handled\n",
12855e3dd157SKalle Valo 			   resp->hdr.msg_type);
12865e3dd157SKalle Valo 		ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
12875e3dd157SKalle Valo 				skb->data, skb->len);
12885e3dd157SKalle Valo 		break;
12895e3dd157SKalle Valo 	};
12905e3dd157SKalle Valo 
12915e3dd157SKalle Valo 	/* Free the indication buffer */
12925e3dd157SKalle Valo 	dev_kfree_skb_any(skb);
12935e3dd157SKalle Valo }
12946c5151a9SMichal Kazior 
12956c5151a9SMichal Kazior static void ath10k_htt_txrx_compl_task(unsigned long ptr)
12966c5151a9SMichal Kazior {
12976c5151a9SMichal Kazior 	struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
12986c5151a9SMichal Kazior 	struct htt_resp *resp;
12996c5151a9SMichal Kazior 	struct sk_buff *skb;
13006c5151a9SMichal Kazior 
13016c5151a9SMichal Kazior 	while ((skb = skb_dequeue(&htt->tx_compl_q))) {
13026c5151a9SMichal Kazior 		ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
13036c5151a9SMichal Kazior 		dev_kfree_skb_any(skb);
13046c5151a9SMichal Kazior 	}
13056c5151a9SMichal Kazior 
13066c5151a9SMichal Kazior 	while ((skb = skb_dequeue(&htt->rx_compl_q))) {
13076c5151a9SMichal Kazior 		resp = (struct htt_resp *)skb->data;
13086c5151a9SMichal Kazior 		ath10k_htt_rx_handler(htt, &resp->rx_ind);
13096c5151a9SMichal Kazior 		dev_kfree_skb_any(skb);
13106c5151a9SMichal Kazior 	}
13116c5151a9SMichal Kazior }
1312