15e3dd157SKalle Valo /*
25e3dd157SKalle Valo  * Copyright (c) 2005-2011 Atheros Communications Inc.
35e3dd157SKalle Valo  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
45e3dd157SKalle Valo  *
55e3dd157SKalle Valo  * Permission to use, copy, modify, and/or distribute this software for any
65e3dd157SKalle Valo  * purpose with or without fee is hereby granted, provided that the above
75e3dd157SKalle Valo  * copyright notice and this permission notice appear in all copies.
85e3dd157SKalle Valo  *
95e3dd157SKalle Valo  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
105e3dd157SKalle Valo  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
115e3dd157SKalle Valo  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
125e3dd157SKalle Valo  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
135e3dd157SKalle Valo  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
145e3dd157SKalle Valo  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
155e3dd157SKalle Valo  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
165e3dd157SKalle Valo  */
175e3dd157SKalle Valo 
18edb8236dSMichal Kazior #include "core.h"
195e3dd157SKalle Valo #include "htc.h"
205e3dd157SKalle Valo #include "htt.h"
215e3dd157SKalle Valo #include "txrx.h"
225e3dd157SKalle Valo #include "debug.h"
23a9bf0506SKalle Valo #include "trace.h"
24aa5b4fbcSMichal Kazior #include "mac.h"
255e3dd157SKalle Valo 
265e3dd157SKalle Valo #include <linux/log2.h>
275e3dd157SKalle Valo 
285e3dd157SKalle Valo /* slightly larger than one large A-MPDU */
295e3dd157SKalle Valo #define HTT_RX_RING_SIZE_MIN 128
305e3dd157SKalle Valo 
315e3dd157SKalle Valo /* roughly 20 ms @ 1 Gbps of 1500B MSDUs */
325e3dd157SKalle Valo #define HTT_RX_RING_SIZE_MAX 2048
335e3dd157SKalle Valo 
345e3dd157SKalle Valo #define HTT_RX_AVG_FRM_BYTES 1000
355e3dd157SKalle Valo 
365e3dd157SKalle Valo /* ms, very conservative */
375e3dd157SKalle Valo #define HTT_RX_HOST_LATENCY_MAX_MS 20
385e3dd157SKalle Valo 
395e3dd157SKalle Valo /* ms, conservative */
405e3dd157SKalle Valo #define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10
415e3dd157SKalle Valo 
425e3dd157SKalle Valo /* when under memory pressure rx ring refill may fail and needs a retry */
435e3dd157SKalle Valo #define HTT_RX_RING_REFILL_RETRY_MS 50
445e3dd157SKalle Valo 
45f6dc2095SMichal Kazior 
46f6dc2095SMichal Kazior static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
476c5151a9SMichal Kazior static void ath10k_htt_txrx_compl_task(unsigned long ptr);
48f6dc2095SMichal Kazior 
495e3dd157SKalle Valo static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
505e3dd157SKalle Valo {
515e3dd157SKalle Valo 	int size;
525e3dd157SKalle Valo 
535e3dd157SKalle Valo 	/*
545e3dd157SKalle Valo 	 * It is expected that the host CPU will typically be able to
555e3dd157SKalle Valo 	 * service the rx indication from one A-MPDU before the rx
565e3dd157SKalle Valo 	 * indication from the subsequent A-MPDU happens, roughly 1-2 ms
575e3dd157SKalle Valo 	 * later. However, the rx ring should be sized very conservatively,
585e3dd157SKalle Valo 	 * to accomodate the worst reasonable delay before the host CPU
595e3dd157SKalle Valo 	 * services a rx indication interrupt.
605e3dd157SKalle Valo 	 *
615e3dd157SKalle Valo 	 * The rx ring need not be kept full of empty buffers. In theory,
625e3dd157SKalle Valo 	 * the htt host SW can dynamically track the low-water mark in the
635e3dd157SKalle Valo 	 * rx ring, and dynamically adjust the level to which the rx ring
645e3dd157SKalle Valo 	 * is filled with empty buffers, to dynamically meet the desired
655e3dd157SKalle Valo 	 * low-water mark.
665e3dd157SKalle Valo 	 *
675e3dd157SKalle Valo 	 * In contrast, it's difficult to resize the rx ring itself, once
685e3dd157SKalle Valo 	 * it's in use. Thus, the ring itself should be sized very
695e3dd157SKalle Valo 	 * conservatively, while the degree to which the ring is filled
705e3dd157SKalle Valo 	 * with empty buffers should be sized moderately conservatively.
715e3dd157SKalle Valo 	 */
725e3dd157SKalle Valo 
735e3dd157SKalle Valo 	/* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
745e3dd157SKalle Valo 	size =
755e3dd157SKalle Valo 	    htt->max_throughput_mbps +
765e3dd157SKalle Valo 	    1000  /
775e3dd157SKalle Valo 	    (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS;
785e3dd157SKalle Valo 
795e3dd157SKalle Valo 	if (size < HTT_RX_RING_SIZE_MIN)
805e3dd157SKalle Valo 		size = HTT_RX_RING_SIZE_MIN;
815e3dd157SKalle Valo 
825e3dd157SKalle Valo 	if (size > HTT_RX_RING_SIZE_MAX)
835e3dd157SKalle Valo 		size = HTT_RX_RING_SIZE_MAX;
845e3dd157SKalle Valo 
855e3dd157SKalle Valo 	size = roundup_pow_of_two(size);
865e3dd157SKalle Valo 
875e3dd157SKalle Valo 	return size;
885e3dd157SKalle Valo }
895e3dd157SKalle Valo 
905e3dd157SKalle Valo static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt *htt)
915e3dd157SKalle Valo {
925e3dd157SKalle Valo 	int size;
935e3dd157SKalle Valo 
945e3dd157SKalle Valo 	/* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
955e3dd157SKalle Valo 	size =
965e3dd157SKalle Valo 	    htt->max_throughput_mbps *
975e3dd157SKalle Valo 	    1000  /
985e3dd157SKalle Valo 	    (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS;
995e3dd157SKalle Valo 
1005e3dd157SKalle Valo 	/*
1015e3dd157SKalle Valo 	 * Make sure the fill level is at least 1 less than the ring size.
1025e3dd157SKalle Valo 	 * Leaving 1 element empty allows the SW to easily distinguish
1035e3dd157SKalle Valo 	 * between a full ring vs. an empty ring.
1045e3dd157SKalle Valo 	 */
1055e3dd157SKalle Valo 	if (size >= htt->rx_ring.size)
1065e3dd157SKalle Valo 		size = htt->rx_ring.size - 1;
1075e3dd157SKalle Valo 
1085e3dd157SKalle Valo 	return size;
1095e3dd157SKalle Valo }
1105e3dd157SKalle Valo 
1115e3dd157SKalle Valo static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
1125e3dd157SKalle Valo {
1135e3dd157SKalle Valo 	struct sk_buff *skb;
1145e3dd157SKalle Valo 	struct ath10k_skb_cb *cb;
1155e3dd157SKalle Valo 	int i;
1165e3dd157SKalle Valo 
1175e3dd157SKalle Valo 	for (i = 0; i < htt->rx_ring.fill_cnt; i++) {
1185e3dd157SKalle Valo 		skb = htt->rx_ring.netbufs_ring[i];
1195e3dd157SKalle Valo 		cb = ATH10K_SKB_CB(skb);
1205e3dd157SKalle Valo 		dma_unmap_single(htt->ar->dev, cb->paddr,
1215e3dd157SKalle Valo 				 skb->len + skb_tailroom(skb),
1225e3dd157SKalle Valo 				 DMA_FROM_DEVICE);
1235e3dd157SKalle Valo 		dev_kfree_skb_any(skb);
1245e3dd157SKalle Valo 	}
1255e3dd157SKalle Valo 
1265e3dd157SKalle Valo 	htt->rx_ring.fill_cnt = 0;
1275e3dd157SKalle Valo }
1285e3dd157SKalle Valo 
1295e3dd157SKalle Valo static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
1305e3dd157SKalle Valo {
1315e3dd157SKalle Valo 	struct htt_rx_desc *rx_desc;
1325e3dd157SKalle Valo 	struct sk_buff *skb;
1335e3dd157SKalle Valo 	dma_addr_t paddr;
1345e3dd157SKalle Valo 	int ret = 0, idx;
1355e3dd157SKalle Valo 
1365e3dd157SKalle Valo 	idx = __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr));
1375e3dd157SKalle Valo 	while (num > 0) {
1385e3dd157SKalle Valo 		skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
1395e3dd157SKalle Valo 		if (!skb) {
1405e3dd157SKalle Valo 			ret = -ENOMEM;
1415e3dd157SKalle Valo 			goto fail;
1425e3dd157SKalle Valo 		}
1435e3dd157SKalle Valo 
1445e3dd157SKalle Valo 		if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
1455e3dd157SKalle Valo 			skb_pull(skb,
1465e3dd157SKalle Valo 				 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
1475e3dd157SKalle Valo 				 skb->data);
1485e3dd157SKalle Valo 
1495e3dd157SKalle Valo 		/* Clear rx_desc attention word before posting to Rx ring */
1505e3dd157SKalle Valo 		rx_desc = (struct htt_rx_desc *)skb->data;
1515e3dd157SKalle Valo 		rx_desc->attention.flags = __cpu_to_le32(0);
1525e3dd157SKalle Valo 
1535e3dd157SKalle Valo 		paddr = dma_map_single(htt->ar->dev, skb->data,
1545e3dd157SKalle Valo 				       skb->len + skb_tailroom(skb),
1555e3dd157SKalle Valo 				       DMA_FROM_DEVICE);
1565e3dd157SKalle Valo 
1575e3dd157SKalle Valo 		if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
1585e3dd157SKalle Valo 			dev_kfree_skb_any(skb);
1595e3dd157SKalle Valo 			ret = -ENOMEM;
1605e3dd157SKalle Valo 			goto fail;
1615e3dd157SKalle Valo 		}
1625e3dd157SKalle Valo 
1635e3dd157SKalle Valo 		ATH10K_SKB_CB(skb)->paddr = paddr;
1645e3dd157SKalle Valo 		htt->rx_ring.netbufs_ring[idx] = skb;
1655e3dd157SKalle Valo 		htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
1665e3dd157SKalle Valo 		htt->rx_ring.fill_cnt++;
1675e3dd157SKalle Valo 
1685e3dd157SKalle Valo 		num--;
1695e3dd157SKalle Valo 		idx++;
1705e3dd157SKalle Valo 		idx &= htt->rx_ring.size_mask;
1715e3dd157SKalle Valo 	}
1725e3dd157SKalle Valo 
1735e3dd157SKalle Valo fail:
1745e3dd157SKalle Valo 	*(htt->rx_ring.alloc_idx.vaddr) = __cpu_to_le32(idx);
1755e3dd157SKalle Valo 	return ret;
1765e3dd157SKalle Valo }
1775e3dd157SKalle Valo 
1785e3dd157SKalle Valo static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
1795e3dd157SKalle Valo {
1805e3dd157SKalle Valo 	lockdep_assert_held(&htt->rx_ring.lock);
1815e3dd157SKalle Valo 	return __ath10k_htt_rx_ring_fill_n(htt, num);
1825e3dd157SKalle Valo }
1835e3dd157SKalle Valo 
1845e3dd157SKalle Valo static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
1855e3dd157SKalle Valo {
1866e712d42SMichal Kazior 	int ret, num_deficit, num_to_fill;
1875e3dd157SKalle Valo 
1886e712d42SMichal Kazior 	/* Refilling the whole RX ring buffer proves to be a bad idea. The
1896e712d42SMichal Kazior 	 * reason is RX may take up significant amount of CPU cycles and starve
1906e712d42SMichal Kazior 	 * other tasks, e.g. TX on an ethernet device while acting as a bridge
1916e712d42SMichal Kazior 	 * with ath10k wlan interface. This ended up with very poor performance
1926e712d42SMichal Kazior 	 * once CPU the host system was overwhelmed with RX on ath10k.
1936e712d42SMichal Kazior 	 *
1946e712d42SMichal Kazior 	 * By limiting the number of refills the replenishing occurs
1956e712d42SMichal Kazior 	 * progressively. This in turns makes use of the fact tasklets are
1966e712d42SMichal Kazior 	 * processed in FIFO order. This means actual RX processing can starve
1976e712d42SMichal Kazior 	 * out refilling. If there's not enough buffers on RX ring FW will not
1986e712d42SMichal Kazior 	 * report RX until it is refilled with enough buffers. This
1996e712d42SMichal Kazior 	 * automatically balances load wrt to CPU power.
2006e712d42SMichal Kazior 	 *
2016e712d42SMichal Kazior 	 * This probably comes at a cost of lower maximum throughput but
2026e712d42SMichal Kazior 	 * improves the avarage and stability. */
2035e3dd157SKalle Valo 	spin_lock_bh(&htt->rx_ring.lock);
2046e712d42SMichal Kazior 	num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
2056e712d42SMichal Kazior 	num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
2066e712d42SMichal Kazior 	num_deficit -= num_to_fill;
2075e3dd157SKalle Valo 	ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
2085e3dd157SKalle Valo 	if (ret == -ENOMEM) {
2095e3dd157SKalle Valo 		/*
2105e3dd157SKalle Valo 		 * Failed to fill it to the desired level -
2115e3dd157SKalle Valo 		 * we'll start a timer and try again next time.
2125e3dd157SKalle Valo 		 * As long as enough buffers are left in the ring for
2135e3dd157SKalle Valo 		 * another A-MPDU rx, no special recovery is needed.
2145e3dd157SKalle Valo 		 */
2155e3dd157SKalle Valo 		mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
2165e3dd157SKalle Valo 			  msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
2176e712d42SMichal Kazior 	} else if (num_deficit > 0) {
2186e712d42SMichal Kazior 		tasklet_schedule(&htt->rx_replenish_task);
2195e3dd157SKalle Valo 	}
2205e3dd157SKalle Valo 	spin_unlock_bh(&htt->rx_ring.lock);
2215e3dd157SKalle Valo }
2225e3dd157SKalle Valo 
2235e3dd157SKalle Valo static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
2245e3dd157SKalle Valo {
2255e3dd157SKalle Valo 	struct ath10k_htt *htt = (struct ath10k_htt *)arg;
2265e3dd157SKalle Valo 	ath10k_htt_rx_msdu_buff_replenish(htt);
2275e3dd157SKalle Valo }
2285e3dd157SKalle Valo 
2293e841fd0SMichal Kazior static void ath10k_htt_rx_ring_clean_up(struct ath10k_htt *htt)
2303e841fd0SMichal Kazior {
2313e841fd0SMichal Kazior 	struct sk_buff *skb;
2323e841fd0SMichal Kazior 	int i;
2333e841fd0SMichal Kazior 
2343e841fd0SMichal Kazior 	for (i = 0; i < htt->rx_ring.size; i++) {
2353e841fd0SMichal Kazior 		skb = htt->rx_ring.netbufs_ring[i];
2363e841fd0SMichal Kazior 		if (!skb)
2373e841fd0SMichal Kazior 			continue;
2383e841fd0SMichal Kazior 
2393e841fd0SMichal Kazior 		dma_unmap_single(htt->ar->dev, ATH10K_SKB_CB(skb)->paddr,
2403e841fd0SMichal Kazior 				 skb->len + skb_tailroom(skb),
2413e841fd0SMichal Kazior 				 DMA_FROM_DEVICE);
2423e841fd0SMichal Kazior 		dev_kfree_skb_any(skb);
2433e841fd0SMichal Kazior 		htt->rx_ring.netbufs_ring[i] = NULL;
2443e841fd0SMichal Kazior 	}
2453e841fd0SMichal Kazior }
2463e841fd0SMichal Kazior 
24795bf21f9SMichal Kazior void ath10k_htt_rx_free(struct ath10k_htt *htt)
2485e3dd157SKalle Valo {
2495e3dd157SKalle Valo 	del_timer_sync(&htt->rx_ring.refill_retry_timer);
2506e712d42SMichal Kazior 	tasklet_kill(&htt->rx_replenish_task);
2516c5151a9SMichal Kazior 	tasklet_kill(&htt->txrx_compl_task);
2526c5151a9SMichal Kazior 
2536c5151a9SMichal Kazior 	skb_queue_purge(&htt->tx_compl_q);
2546c5151a9SMichal Kazior 	skb_queue_purge(&htt->rx_compl_q);
2555e3dd157SKalle Valo 
2563e841fd0SMichal Kazior 	ath10k_htt_rx_ring_clean_up(htt);
2575e3dd157SKalle Valo 
2585e3dd157SKalle Valo 	dma_free_coherent(htt->ar->dev,
2595e3dd157SKalle Valo 			  (htt->rx_ring.size *
2605e3dd157SKalle Valo 			   sizeof(htt->rx_ring.paddrs_ring)),
2615e3dd157SKalle Valo 			  htt->rx_ring.paddrs_ring,
2625e3dd157SKalle Valo 			  htt->rx_ring.base_paddr);
2635e3dd157SKalle Valo 
2645e3dd157SKalle Valo 	dma_free_coherent(htt->ar->dev,
2655e3dd157SKalle Valo 			  sizeof(*htt->rx_ring.alloc_idx.vaddr),
2665e3dd157SKalle Valo 			  htt->rx_ring.alloc_idx.vaddr,
2675e3dd157SKalle Valo 			  htt->rx_ring.alloc_idx.paddr);
2685e3dd157SKalle Valo 
2695e3dd157SKalle Valo 	kfree(htt->rx_ring.netbufs_ring);
2705e3dd157SKalle Valo }
2715e3dd157SKalle Valo 
2725e3dd157SKalle Valo static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
2735e3dd157SKalle Valo {
2745e3dd157SKalle Valo 	int idx;
2755e3dd157SKalle Valo 	struct sk_buff *msdu;
2765e3dd157SKalle Valo 
27745967089SMichal Kazior 	lockdep_assert_held(&htt->rx_ring.lock);
2785e3dd157SKalle Valo 
2798d60ee87SMichal Kazior 	if (htt->rx_ring.fill_cnt == 0) {
2808d60ee87SMichal Kazior 		ath10k_warn("tried to pop sk_buff from an empty rx ring\n");
2818d60ee87SMichal Kazior 		return NULL;
2828d60ee87SMichal Kazior 	}
2835e3dd157SKalle Valo 
2845e3dd157SKalle Valo 	idx = htt->rx_ring.sw_rd_idx.msdu_payld;
2855e3dd157SKalle Valo 	msdu = htt->rx_ring.netbufs_ring[idx];
2863e841fd0SMichal Kazior 	htt->rx_ring.netbufs_ring[idx] = NULL;
2875e3dd157SKalle Valo 
2885e3dd157SKalle Valo 	idx++;
2895e3dd157SKalle Valo 	idx &= htt->rx_ring.size_mask;
2905e3dd157SKalle Valo 	htt->rx_ring.sw_rd_idx.msdu_payld = idx;
2915e3dd157SKalle Valo 	htt->rx_ring.fill_cnt--;
2925e3dd157SKalle Valo 
2935e3dd157SKalle Valo 	return msdu;
2945e3dd157SKalle Valo }
2955e3dd157SKalle Valo 
2965e3dd157SKalle Valo static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb)
2975e3dd157SKalle Valo {
2985e3dd157SKalle Valo 	struct sk_buff *next;
2995e3dd157SKalle Valo 
3005e3dd157SKalle Valo 	while (skb) {
3015e3dd157SKalle Valo 		next = skb->next;
3025e3dd157SKalle Valo 		dev_kfree_skb_any(skb);
3035e3dd157SKalle Valo 		skb = next;
3045e3dd157SKalle Valo 	}
3055e3dd157SKalle Valo }
3065e3dd157SKalle Valo 
307d84dd60fSJanusz Dziedzic /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
3085e3dd157SKalle Valo static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
3095e3dd157SKalle Valo 				   u8 **fw_desc, int *fw_desc_len,
3105e3dd157SKalle Valo 				   struct sk_buff **head_msdu,
3110ccb7a34SJanusz Dziedzic 				   struct sk_buff **tail_msdu,
3120ccb7a34SJanusz Dziedzic 				   u32 *attention)
3135e3dd157SKalle Valo {
3145e3dd157SKalle Valo 	int msdu_len, msdu_chaining = 0;
3155e3dd157SKalle Valo 	struct sk_buff *msdu;
3165e3dd157SKalle Valo 	struct htt_rx_desc *rx_desc;
3175e3dd157SKalle Valo 
31845967089SMichal Kazior 	lockdep_assert_held(&htt->rx_ring.lock);
31945967089SMichal Kazior 
3205e3dd157SKalle Valo 	if (htt->rx_confused) {
3215e3dd157SKalle Valo 		ath10k_warn("htt is confused. refusing rx\n");
322d84dd60fSJanusz Dziedzic 		return -1;
3235e3dd157SKalle Valo 	}
3245e3dd157SKalle Valo 
3255e3dd157SKalle Valo 	msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt);
3265e3dd157SKalle Valo 	while (msdu) {
3275e3dd157SKalle Valo 		int last_msdu, msdu_len_invalid, msdu_chained;
3285e3dd157SKalle Valo 
3295e3dd157SKalle Valo 		dma_unmap_single(htt->ar->dev,
3305e3dd157SKalle Valo 				 ATH10K_SKB_CB(msdu)->paddr,
3315e3dd157SKalle Valo 				 msdu->len + skb_tailroom(msdu),
3325e3dd157SKalle Valo 				 DMA_FROM_DEVICE);
3335e3dd157SKalle Valo 
33475fb2f94SBen Greear 		ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx pop: ",
3355e3dd157SKalle Valo 				msdu->data, msdu->len + skb_tailroom(msdu));
3365e3dd157SKalle Valo 
3375e3dd157SKalle Valo 		rx_desc = (struct htt_rx_desc *)msdu->data;
3385e3dd157SKalle Valo 
3395e3dd157SKalle Valo 		/* FIXME: we must report msdu payload since this is what caller
3405e3dd157SKalle Valo 		 *        expects now */
3415e3dd157SKalle Valo 		skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
3425e3dd157SKalle Valo 		skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
3435e3dd157SKalle Valo 
3445e3dd157SKalle Valo 		/*
3455e3dd157SKalle Valo 		 * Sanity check - confirm the HW is finished filling in the
3465e3dd157SKalle Valo 		 * rx data.
3475e3dd157SKalle Valo 		 * If the HW and SW are working correctly, then it's guaranteed
3485e3dd157SKalle Valo 		 * that the HW's MAC DMA is done before this point in the SW.
3495e3dd157SKalle Valo 		 * To prevent the case that we handle a stale Rx descriptor,
3505e3dd157SKalle Valo 		 * just assert for now until we have a way to recover.
3515e3dd157SKalle Valo 		 */
3525e3dd157SKalle Valo 		if (!(__le32_to_cpu(rx_desc->attention.flags)
3535e3dd157SKalle Valo 				& RX_ATTENTION_FLAGS_MSDU_DONE)) {
3545e3dd157SKalle Valo 			ath10k_htt_rx_free_msdu_chain(*head_msdu);
3555e3dd157SKalle Valo 			*head_msdu = NULL;
3565e3dd157SKalle Valo 			msdu = NULL;
3575e3dd157SKalle Valo 			ath10k_err("htt rx stopped. cannot recover\n");
3585e3dd157SKalle Valo 			htt->rx_confused = true;
3595e3dd157SKalle Valo 			break;
3605e3dd157SKalle Valo 		}
3615e3dd157SKalle Valo 
3620ccb7a34SJanusz Dziedzic 		*attention |= __le32_to_cpu(rx_desc->attention.flags) &
3630ccb7a34SJanusz Dziedzic 					    (RX_ATTENTION_FLAGS_TKIP_MIC_ERR |
3640ccb7a34SJanusz Dziedzic 					     RX_ATTENTION_FLAGS_DECRYPT_ERR |
3650ccb7a34SJanusz Dziedzic 					     RX_ATTENTION_FLAGS_FCS_ERR |
3660ccb7a34SJanusz Dziedzic 					     RX_ATTENTION_FLAGS_MGMT_TYPE);
3675e3dd157SKalle Valo 		/*
3685e3dd157SKalle Valo 		 * Copy the FW rx descriptor for this MSDU from the rx
3695e3dd157SKalle Valo 		 * indication message into the MSDU's netbuf. HL uses the
3705e3dd157SKalle Valo 		 * same rx indication message definition as LL, and simply
3715e3dd157SKalle Valo 		 * appends new info (fields from the HW rx desc, and the
3725e3dd157SKalle Valo 		 * MSDU payload itself). So, the offset into the rx
3735e3dd157SKalle Valo 		 * indication message only has to account for the standard
3745e3dd157SKalle Valo 		 * offset of the per-MSDU FW rx desc info within the
3755e3dd157SKalle Valo 		 * message, and how many bytes of the per-MSDU FW rx desc
3765e3dd157SKalle Valo 		 * info have already been consumed. (And the endianness of
3775e3dd157SKalle Valo 		 * the host, since for a big-endian host, the rx ind
3785e3dd157SKalle Valo 		 * message contents, including the per-MSDU rx desc bytes,
3795e3dd157SKalle Valo 		 * were byteswapped during upload.)
3805e3dd157SKalle Valo 		 */
3815e3dd157SKalle Valo 		if (*fw_desc_len > 0) {
3825e3dd157SKalle Valo 			rx_desc->fw_desc.info0 = **fw_desc;
3835e3dd157SKalle Valo 			/*
3845e3dd157SKalle Valo 			 * The target is expected to only provide the basic
3855e3dd157SKalle Valo 			 * per-MSDU rx descriptors. Just to be sure, verify
3865e3dd157SKalle Valo 			 * that the target has not attached extension data
3875e3dd157SKalle Valo 			 * (e.g. LRO flow ID).
3885e3dd157SKalle Valo 			 */
3895e3dd157SKalle Valo 
3905e3dd157SKalle Valo 			/* or more, if there's extension data */
3915e3dd157SKalle Valo 			(*fw_desc)++;
3925e3dd157SKalle Valo 			(*fw_desc_len)--;
3935e3dd157SKalle Valo 		} else {
3945e3dd157SKalle Valo 			/*
3955e3dd157SKalle Valo 			 * When an oversized AMSDU happened, FW will lost
3965e3dd157SKalle Valo 			 * some of MSDU status - in this case, the FW
3975e3dd157SKalle Valo 			 * descriptors provided will be less than the
3985e3dd157SKalle Valo 			 * actual MSDUs inside this MPDU. Mark the FW
3995e3dd157SKalle Valo 			 * descriptors so that it will still deliver to
4005e3dd157SKalle Valo 			 * upper stack, if no CRC error for this MPDU.
4015e3dd157SKalle Valo 			 *
4025e3dd157SKalle Valo 			 * FIX THIS - the FW descriptors are actually for
4035e3dd157SKalle Valo 			 * MSDUs in the end of this A-MSDU instead of the
4045e3dd157SKalle Valo 			 * beginning.
4055e3dd157SKalle Valo 			 */
4065e3dd157SKalle Valo 			rx_desc->fw_desc.info0 = 0;
4075e3dd157SKalle Valo 		}
4085e3dd157SKalle Valo 
4095e3dd157SKalle Valo 		msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
4105e3dd157SKalle Valo 					& (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
4115e3dd157SKalle Valo 					   RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
4125e3dd157SKalle Valo 		msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
4135e3dd157SKalle Valo 			      RX_MSDU_START_INFO0_MSDU_LENGTH);
4145e3dd157SKalle Valo 		msdu_chained = rx_desc->frag_info.ring2_more_count;
4155e3dd157SKalle Valo 
4165e3dd157SKalle Valo 		if (msdu_len_invalid)
4175e3dd157SKalle Valo 			msdu_len = 0;
4185e3dd157SKalle Valo 
4195e3dd157SKalle Valo 		skb_trim(msdu, 0);
4205e3dd157SKalle Valo 		skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
4215e3dd157SKalle Valo 		msdu_len -= msdu->len;
4225e3dd157SKalle Valo 
4235e3dd157SKalle Valo 		/* FIXME: Do chained buffers include htt_rx_desc or not? */
4245e3dd157SKalle Valo 		while (msdu_chained--) {
4255e3dd157SKalle Valo 			struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt);
4265e3dd157SKalle Valo 
4275e3dd157SKalle Valo 			dma_unmap_single(htt->ar->dev,
4285e3dd157SKalle Valo 					 ATH10K_SKB_CB(next)->paddr,
4295e3dd157SKalle Valo 					 next->len + skb_tailroom(next),
4305e3dd157SKalle Valo 					 DMA_FROM_DEVICE);
4315e3dd157SKalle Valo 
43275fb2f94SBen Greear 			ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL,
43375fb2f94SBen Greear 					"htt rx chained: ", next->data,
4345e3dd157SKalle Valo 					next->len + skb_tailroom(next));
4355e3dd157SKalle Valo 
4365e3dd157SKalle Valo 			skb_trim(next, 0);
4375e3dd157SKalle Valo 			skb_put(next, min(msdu_len, HTT_RX_BUF_SIZE));
4385e3dd157SKalle Valo 			msdu_len -= next->len;
4395e3dd157SKalle Valo 
4405e3dd157SKalle Valo 			msdu->next = next;
4415e3dd157SKalle Valo 			msdu = next;
442ede9c8e0SMichal Kazior 			msdu_chaining = 1;
4435e3dd157SKalle Valo 		}
4445e3dd157SKalle Valo 
4455e3dd157SKalle Valo 		last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
4465e3dd157SKalle Valo 				RX_MSDU_END_INFO0_LAST_MSDU;
4475e3dd157SKalle Valo 
4485e3dd157SKalle Valo 		if (last_msdu) {
4495e3dd157SKalle Valo 			msdu->next = NULL;
4505e3dd157SKalle Valo 			break;
4515e3dd157SKalle Valo 		} else {
4525e3dd157SKalle Valo 			struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt);
4535e3dd157SKalle Valo 			msdu->next = next;
4545e3dd157SKalle Valo 			msdu = next;
4555e3dd157SKalle Valo 		}
4565e3dd157SKalle Valo 	}
4575e3dd157SKalle Valo 	*tail_msdu = msdu;
4585e3dd157SKalle Valo 
459d84dd60fSJanusz Dziedzic 	if (*head_msdu == NULL)
460d84dd60fSJanusz Dziedzic 		msdu_chaining = -1;
461d84dd60fSJanusz Dziedzic 
4625e3dd157SKalle Valo 	/*
4635e3dd157SKalle Valo 	 * Don't refill the ring yet.
4645e3dd157SKalle Valo 	 *
4655e3dd157SKalle Valo 	 * First, the elements popped here are still in use - it is not
4665e3dd157SKalle Valo 	 * safe to overwrite them until the matching call to
4675e3dd157SKalle Valo 	 * mpdu_desc_list_next. Second, for efficiency it is preferable to
4685e3dd157SKalle Valo 	 * refill the rx ring with 1 PPDU's worth of rx buffers (something
4695e3dd157SKalle Valo 	 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
4705e3dd157SKalle Valo 	 * (something like 3 buffers). Consequently, we'll rely on the txrx
4715e3dd157SKalle Valo 	 * SW to tell us when it is done pulling all the PPDU's rx buffers
4725e3dd157SKalle Valo 	 * out of the rx ring, and then refill it just once.
4735e3dd157SKalle Valo 	 */
4745e3dd157SKalle Valo 
4755e3dd157SKalle Valo 	return msdu_chaining;
4765e3dd157SKalle Valo }
4775e3dd157SKalle Valo 
4786e712d42SMichal Kazior static void ath10k_htt_rx_replenish_task(unsigned long ptr)
4796e712d42SMichal Kazior {
4806e712d42SMichal Kazior 	struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
4816e712d42SMichal Kazior 	ath10k_htt_rx_msdu_buff_replenish(htt);
4826e712d42SMichal Kazior }
4836e712d42SMichal Kazior 
48495bf21f9SMichal Kazior int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
4855e3dd157SKalle Valo {
4865e3dd157SKalle Valo 	dma_addr_t paddr;
4875e3dd157SKalle Valo 	void *vaddr;
4885e3dd157SKalle Valo 	struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
4895e3dd157SKalle Valo 
4905e3dd157SKalle Valo 	htt->rx_ring.size = ath10k_htt_rx_ring_size(htt);
4915e3dd157SKalle Valo 	if (!is_power_of_2(htt->rx_ring.size)) {
4925e3dd157SKalle Valo 		ath10k_warn("htt rx ring size is not power of 2\n");
4935e3dd157SKalle Valo 		return -EINVAL;
4945e3dd157SKalle Valo 	}
4955e3dd157SKalle Valo 
4965e3dd157SKalle Valo 	htt->rx_ring.size_mask = htt->rx_ring.size - 1;
4975e3dd157SKalle Valo 
4985e3dd157SKalle Valo 	/*
4995e3dd157SKalle Valo 	 * Set the initial value for the level to which the rx ring
5005e3dd157SKalle Valo 	 * should be filled, based on the max throughput and the
5015e3dd157SKalle Valo 	 * worst likely latency for the host to fill the rx ring
5025e3dd157SKalle Valo 	 * with new buffers. In theory, this fill level can be
5035e3dd157SKalle Valo 	 * dynamically adjusted from the initial value set here, to
5045e3dd157SKalle Valo 	 * reflect the actual host latency rather than a
5055e3dd157SKalle Valo 	 * conservative assumption about the host latency.
5065e3dd157SKalle Valo 	 */
5075e3dd157SKalle Valo 	htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt);
5085e3dd157SKalle Valo 
5095e3dd157SKalle Valo 	htt->rx_ring.netbufs_ring =
5103e841fd0SMichal Kazior 		kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
5115e3dd157SKalle Valo 			GFP_KERNEL);
5125e3dd157SKalle Valo 	if (!htt->rx_ring.netbufs_ring)
5135e3dd157SKalle Valo 		goto err_netbuf;
5145e3dd157SKalle Valo 
5155e3dd157SKalle Valo 	vaddr = dma_alloc_coherent(htt->ar->dev,
5165e3dd157SKalle Valo 		   (htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring)),
5175e3dd157SKalle Valo 		   &paddr, GFP_DMA);
5185e3dd157SKalle Valo 	if (!vaddr)
5195e3dd157SKalle Valo 		goto err_dma_ring;
5205e3dd157SKalle Valo 
5215e3dd157SKalle Valo 	htt->rx_ring.paddrs_ring = vaddr;
5225e3dd157SKalle Valo 	htt->rx_ring.base_paddr = paddr;
5235e3dd157SKalle Valo 
5245e3dd157SKalle Valo 	vaddr = dma_alloc_coherent(htt->ar->dev,
5255e3dd157SKalle Valo 				   sizeof(*htt->rx_ring.alloc_idx.vaddr),
5265e3dd157SKalle Valo 				   &paddr, GFP_DMA);
5275e3dd157SKalle Valo 	if (!vaddr)
5285e3dd157SKalle Valo 		goto err_dma_idx;
5295e3dd157SKalle Valo 
5305e3dd157SKalle Valo 	htt->rx_ring.alloc_idx.vaddr = vaddr;
5315e3dd157SKalle Valo 	htt->rx_ring.alloc_idx.paddr = paddr;
5325e3dd157SKalle Valo 	htt->rx_ring.sw_rd_idx.msdu_payld = 0;
5335e3dd157SKalle Valo 	*htt->rx_ring.alloc_idx.vaddr = 0;
5345e3dd157SKalle Valo 
5355e3dd157SKalle Valo 	/* Initialize the Rx refill retry timer */
5365e3dd157SKalle Valo 	setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
5375e3dd157SKalle Valo 
5385e3dd157SKalle Valo 	spin_lock_init(&htt->rx_ring.lock);
5395e3dd157SKalle Valo 
5405e3dd157SKalle Valo 	htt->rx_ring.fill_cnt = 0;
5415e3dd157SKalle Valo 	if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level))
5425e3dd157SKalle Valo 		goto err_fill_ring;
5435e3dd157SKalle Valo 
5446e712d42SMichal Kazior 	tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
5456e712d42SMichal Kazior 		     (unsigned long)htt);
5466e712d42SMichal Kazior 
5476c5151a9SMichal Kazior 	skb_queue_head_init(&htt->tx_compl_q);
5486c5151a9SMichal Kazior 	skb_queue_head_init(&htt->rx_compl_q);
5496c5151a9SMichal Kazior 
5506c5151a9SMichal Kazior 	tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
5516c5151a9SMichal Kazior 		     (unsigned long)htt);
5526c5151a9SMichal Kazior 
553aad0b65fSKalle Valo 	ath10k_dbg(ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
5545e3dd157SKalle Valo 		   htt->rx_ring.size, htt->rx_ring.fill_level);
5555e3dd157SKalle Valo 	return 0;
5565e3dd157SKalle Valo 
5575e3dd157SKalle Valo err_fill_ring:
5585e3dd157SKalle Valo 	ath10k_htt_rx_ring_free(htt);
5595e3dd157SKalle Valo 	dma_free_coherent(htt->ar->dev,
5605e3dd157SKalle Valo 			  sizeof(*htt->rx_ring.alloc_idx.vaddr),
5615e3dd157SKalle Valo 			  htt->rx_ring.alloc_idx.vaddr,
5625e3dd157SKalle Valo 			  htt->rx_ring.alloc_idx.paddr);
5635e3dd157SKalle Valo err_dma_idx:
5645e3dd157SKalle Valo 	dma_free_coherent(htt->ar->dev,
5655e3dd157SKalle Valo 			  (htt->rx_ring.size *
5665e3dd157SKalle Valo 			   sizeof(htt->rx_ring.paddrs_ring)),
5675e3dd157SKalle Valo 			  htt->rx_ring.paddrs_ring,
5685e3dd157SKalle Valo 			  htt->rx_ring.base_paddr);
5695e3dd157SKalle Valo err_dma_ring:
5705e3dd157SKalle Valo 	kfree(htt->rx_ring.netbufs_ring);
5715e3dd157SKalle Valo err_netbuf:
5725e3dd157SKalle Valo 	return -ENOMEM;
5735e3dd157SKalle Valo }
5745e3dd157SKalle Valo 
5755e3dd157SKalle Valo static int ath10k_htt_rx_crypto_param_len(enum htt_rx_mpdu_encrypt_type type)
5765e3dd157SKalle Valo {
5775e3dd157SKalle Valo 	switch (type) {
5785e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_WEP40:
5795e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_WEP104:
5805e3dd157SKalle Valo 		return 4;
5815e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
5825e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_WEP128: /* not tested */
5835e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
5845e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_WAPI: /* not tested */
5855e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
5865e3dd157SKalle Valo 		return 8;
5875e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_NONE:
5885e3dd157SKalle Valo 		return 0;
5895e3dd157SKalle Valo 	}
5905e3dd157SKalle Valo 
5915e3dd157SKalle Valo 	ath10k_warn("unknown encryption type %d\n", type);
5925e3dd157SKalle Valo 	return 0;
5935e3dd157SKalle Valo }
5945e3dd157SKalle Valo 
5955e3dd157SKalle Valo static int ath10k_htt_rx_crypto_tail_len(enum htt_rx_mpdu_encrypt_type type)
5965e3dd157SKalle Valo {
5975e3dd157SKalle Valo 	switch (type) {
5985e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_NONE:
5995e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_WEP40:
6005e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_WEP104:
6015e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_WEP128:
6025e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_WAPI:
6035e3dd157SKalle Valo 		return 0;
6045e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
6055e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
6065e3dd157SKalle Valo 		return 4;
6075e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
6085e3dd157SKalle Valo 		return 8;
6095e3dd157SKalle Valo 	}
6105e3dd157SKalle Valo 
6115e3dd157SKalle Valo 	ath10k_warn("unknown encryption type %d\n", type);
6125e3dd157SKalle Valo 	return 0;
6135e3dd157SKalle Valo }
6145e3dd157SKalle Valo 
6155e3dd157SKalle Valo /* Applies for first msdu in chain, before altering it. */
6165e3dd157SKalle Valo static struct ieee80211_hdr *ath10k_htt_rx_skb_get_hdr(struct sk_buff *skb)
6175e3dd157SKalle Valo {
6185e3dd157SKalle Valo 	struct htt_rx_desc *rxd;
6195e3dd157SKalle Valo 	enum rx_msdu_decap_format fmt;
6205e3dd157SKalle Valo 
6215e3dd157SKalle Valo 	rxd = (void *)skb->data - sizeof(*rxd);
6225e3dd157SKalle Valo 	fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
6235e3dd157SKalle Valo 			RX_MSDU_START_INFO1_DECAP_FORMAT);
6245e3dd157SKalle Valo 
6255e3dd157SKalle Valo 	if (fmt == RX_MSDU_DECAP_RAW)
6265e3dd157SKalle Valo 		return (void *)skb->data;
6275e3dd157SKalle Valo 	else
6285e3dd157SKalle Valo 		return (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
6295e3dd157SKalle Valo }
6305e3dd157SKalle Valo 
6315e3dd157SKalle Valo /* This function only applies for first msdu in an msdu chain */
6325e3dd157SKalle Valo static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr)
6335e3dd157SKalle Valo {
6345e3dd157SKalle Valo 	if (ieee80211_is_data_qos(hdr->frame_control)) {
6355e3dd157SKalle Valo 		u8 *qc = ieee80211_get_qos_ctl(hdr);
6365e3dd157SKalle Valo 		if (qc[0] & 0x80)
6375e3dd157SKalle Valo 			return true;
6385e3dd157SKalle Valo 	}
6395e3dd157SKalle Valo 	return false;
6405e3dd157SKalle Valo }
6415e3dd157SKalle Valo 
642f6dc2095SMichal Kazior struct rfc1042_hdr {
643f6dc2095SMichal Kazior 	u8 llc_dsap;
644f6dc2095SMichal Kazior 	u8 llc_ssap;
645f6dc2095SMichal Kazior 	u8 llc_ctrl;
646f6dc2095SMichal Kazior 	u8 snap_oui[3];
647f6dc2095SMichal Kazior 	__be16 snap_type;
648f6dc2095SMichal Kazior } __packed;
649f6dc2095SMichal Kazior 
650f6dc2095SMichal Kazior struct amsdu_subframe_hdr {
651f6dc2095SMichal Kazior 	u8 dst[ETH_ALEN];
652f6dc2095SMichal Kazior 	u8 src[ETH_ALEN];
653f6dc2095SMichal Kazior 	__be16 len;
654f6dc2095SMichal Kazior } __packed;
655f6dc2095SMichal Kazior 
65673539b40SJanusz Dziedzic static const u8 rx_legacy_rate_idx[] = {
65773539b40SJanusz Dziedzic 	3,	/* 0x00  - 11Mbps  */
65873539b40SJanusz Dziedzic 	2,	/* 0x01  - 5.5Mbps */
65973539b40SJanusz Dziedzic 	1,	/* 0x02  - 2Mbps   */
66073539b40SJanusz Dziedzic 	0,	/* 0x03  - 1Mbps   */
66173539b40SJanusz Dziedzic 	3,	/* 0x04  - 11Mbps  */
66273539b40SJanusz Dziedzic 	2,	/* 0x05  - 5.5Mbps */
66373539b40SJanusz Dziedzic 	1,	/* 0x06  - 2Mbps   */
66473539b40SJanusz Dziedzic 	0,	/* 0x07  - 1Mbps   */
66573539b40SJanusz Dziedzic 	10,	/* 0x08  - 48Mbps  */
66673539b40SJanusz Dziedzic 	8,	/* 0x09  - 24Mbps  */
66773539b40SJanusz Dziedzic 	6,	/* 0x0A  - 12Mbps  */
66873539b40SJanusz Dziedzic 	4,	/* 0x0B  - 6Mbps   */
66973539b40SJanusz Dziedzic 	11,	/* 0x0C  - 54Mbps  */
67073539b40SJanusz Dziedzic 	9,	/* 0x0D  - 36Mbps  */
67173539b40SJanusz Dziedzic 	7,	/* 0x0E  - 18Mbps  */
67273539b40SJanusz Dziedzic 	5,	/* 0x0F  - 9Mbps   */
67373539b40SJanusz Dziedzic };
67473539b40SJanusz Dziedzic 
67587326c97SJanusz Dziedzic static void ath10k_htt_rx_h_rates(struct ath10k *ar,
67673539b40SJanusz Dziedzic 				  enum ieee80211_band band,
67787326c97SJanusz Dziedzic 				  u8 info0, u32 info1, u32 info2,
67873539b40SJanusz Dziedzic 				  struct ieee80211_rx_status *status)
67973539b40SJanusz Dziedzic {
68073539b40SJanusz Dziedzic 	u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
68173539b40SJanusz Dziedzic 	u8 preamble = 0;
68273539b40SJanusz Dziedzic 
68373539b40SJanusz Dziedzic 	/* Check if valid fields */
68473539b40SJanusz Dziedzic 	if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID))
68573539b40SJanusz Dziedzic 		return;
68673539b40SJanusz Dziedzic 
68773539b40SJanusz Dziedzic 	preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE);
68873539b40SJanusz Dziedzic 
68973539b40SJanusz Dziedzic 	switch (preamble) {
69073539b40SJanusz Dziedzic 	case HTT_RX_LEGACY:
69173539b40SJanusz Dziedzic 		cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK;
69273539b40SJanusz Dziedzic 		rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE);
69373539b40SJanusz Dziedzic 		rate_idx = 0;
69473539b40SJanusz Dziedzic 
69573539b40SJanusz Dziedzic 		if (rate < 0x08 || rate > 0x0F)
69673539b40SJanusz Dziedzic 			break;
69773539b40SJanusz Dziedzic 
69873539b40SJanusz Dziedzic 		switch (band) {
69973539b40SJanusz Dziedzic 		case IEEE80211_BAND_2GHZ:
70073539b40SJanusz Dziedzic 			if (cck)
70173539b40SJanusz Dziedzic 				rate &= ~BIT(3);
70273539b40SJanusz Dziedzic 			rate_idx = rx_legacy_rate_idx[rate];
70373539b40SJanusz Dziedzic 			break;
70473539b40SJanusz Dziedzic 		case IEEE80211_BAND_5GHZ:
70573539b40SJanusz Dziedzic 			rate_idx = rx_legacy_rate_idx[rate];
70673539b40SJanusz Dziedzic 			/* We are using same rate table registering
70773539b40SJanusz Dziedzic 			   HW - ath10k_rates[]. In case of 5GHz skip
70873539b40SJanusz Dziedzic 			   CCK rates, so -4 here */
70973539b40SJanusz Dziedzic 			rate_idx -= 4;
71073539b40SJanusz Dziedzic 			break;
71173539b40SJanusz Dziedzic 		default:
71273539b40SJanusz Dziedzic 			break;
71373539b40SJanusz Dziedzic 		}
71473539b40SJanusz Dziedzic 
71573539b40SJanusz Dziedzic 		status->rate_idx = rate_idx;
71673539b40SJanusz Dziedzic 		break;
71773539b40SJanusz Dziedzic 	case HTT_RX_HT:
71873539b40SJanusz Dziedzic 	case HTT_RX_HT_WITH_TXBF:
71973539b40SJanusz Dziedzic 		/* HT-SIG - Table 20-11 in info1 and info2 */
72073539b40SJanusz Dziedzic 		mcs = info1 & 0x1F;
72173539b40SJanusz Dziedzic 		nss = mcs >> 3;
72273539b40SJanusz Dziedzic 		bw = (info1 >> 7) & 1;
72373539b40SJanusz Dziedzic 		sgi = (info2 >> 7) & 1;
72473539b40SJanusz Dziedzic 
72573539b40SJanusz Dziedzic 		status->rate_idx = mcs;
72673539b40SJanusz Dziedzic 		status->flag |= RX_FLAG_HT;
72773539b40SJanusz Dziedzic 		if (sgi)
72873539b40SJanusz Dziedzic 			status->flag |= RX_FLAG_SHORT_GI;
72973539b40SJanusz Dziedzic 		if (bw)
73073539b40SJanusz Dziedzic 			status->flag |= RX_FLAG_40MHZ;
73173539b40SJanusz Dziedzic 		break;
73273539b40SJanusz Dziedzic 	case HTT_RX_VHT:
73373539b40SJanusz Dziedzic 	case HTT_RX_VHT_WITH_TXBF:
73473539b40SJanusz Dziedzic 		/* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
73573539b40SJanusz Dziedzic 		   TODO check this */
73673539b40SJanusz Dziedzic 		mcs = (info2 >> 4) & 0x0F;
73773539b40SJanusz Dziedzic 		nss = ((info1 >> 10) & 0x07) + 1;
73873539b40SJanusz Dziedzic 		bw = info1 & 3;
73973539b40SJanusz Dziedzic 		sgi = info2 & 1;
74073539b40SJanusz Dziedzic 
74173539b40SJanusz Dziedzic 		status->rate_idx = mcs;
74273539b40SJanusz Dziedzic 		status->vht_nss = nss;
74373539b40SJanusz Dziedzic 
74473539b40SJanusz Dziedzic 		if (sgi)
74573539b40SJanusz Dziedzic 			status->flag |= RX_FLAG_SHORT_GI;
74673539b40SJanusz Dziedzic 
74773539b40SJanusz Dziedzic 		switch (bw) {
74873539b40SJanusz Dziedzic 		/* 20MHZ */
74973539b40SJanusz Dziedzic 		case 0:
75073539b40SJanusz Dziedzic 			break;
75173539b40SJanusz Dziedzic 		/* 40MHZ */
75273539b40SJanusz Dziedzic 		case 1:
75373539b40SJanusz Dziedzic 			status->flag |= RX_FLAG_40MHZ;
75473539b40SJanusz Dziedzic 			break;
75573539b40SJanusz Dziedzic 		/* 80MHZ */
75673539b40SJanusz Dziedzic 		case 2:
75773539b40SJanusz Dziedzic 			status->vht_flag |= RX_VHT_FLAG_80MHZ;
75873539b40SJanusz Dziedzic 		}
75973539b40SJanusz Dziedzic 
76073539b40SJanusz Dziedzic 		status->flag |= RX_FLAG_VHT;
76173539b40SJanusz Dziedzic 		break;
76273539b40SJanusz Dziedzic 	default:
76373539b40SJanusz Dziedzic 		break;
76473539b40SJanusz Dziedzic 	}
76573539b40SJanusz Dziedzic }
76673539b40SJanusz Dziedzic 
76787326c97SJanusz Dziedzic static void ath10k_htt_rx_h_protected(struct ath10k_htt *htt,
76885f6d7cfSJanusz Dziedzic 				      struct ieee80211_rx_status *rx_status,
76985f6d7cfSJanusz Dziedzic 				      struct sk_buff *skb,
770c071dcb2SMichal Kazior 				      enum htt_rx_mpdu_encrypt_type enctype,
771c071dcb2SMichal Kazior 				      enum rx_msdu_decap_format fmt,
772c071dcb2SMichal Kazior 				      bool dot11frag)
77387326c97SJanusz Dziedzic {
77485f6d7cfSJanusz Dziedzic 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
77587326c97SJanusz Dziedzic 
77685f6d7cfSJanusz Dziedzic 	rx_status->flag &= ~(RX_FLAG_DECRYPTED |
77787326c97SJanusz Dziedzic 			     RX_FLAG_IV_STRIPPED |
77887326c97SJanusz Dziedzic 			     RX_FLAG_MMIC_STRIPPED);
779c071dcb2SMichal Kazior 
780c071dcb2SMichal Kazior 	if (enctype == HTT_RX_MPDU_ENCRYPT_NONE)
78187326c97SJanusz Dziedzic 		return;
782c071dcb2SMichal Kazior 
783c071dcb2SMichal Kazior 	/*
784c071dcb2SMichal Kazior 	 * There's no explicit rx descriptor flag to indicate whether a given
785c071dcb2SMichal Kazior 	 * frame has been decrypted or not. We're forced to use the decap
786c071dcb2SMichal Kazior 	 * format as an implicit indication. However fragmentation rx is always
787c071dcb2SMichal Kazior 	 * raw and it probably never reports undecrypted raws.
788c071dcb2SMichal Kazior 	 *
789c071dcb2SMichal Kazior 	 * This makes sure sniffed frames are reported as-is without stripping
790c071dcb2SMichal Kazior 	 * the protected flag.
791c071dcb2SMichal Kazior 	 */
792c071dcb2SMichal Kazior 	if (fmt == RX_MSDU_DECAP_RAW && !dot11frag)
793c071dcb2SMichal Kazior 		return;
79487326c97SJanusz Dziedzic 
79585f6d7cfSJanusz Dziedzic 	rx_status->flag |= RX_FLAG_DECRYPTED |
79687326c97SJanusz Dziedzic 			   RX_FLAG_IV_STRIPPED |
79787326c97SJanusz Dziedzic 			   RX_FLAG_MMIC_STRIPPED;
79887326c97SJanusz Dziedzic 	hdr->frame_control = __cpu_to_le16(__le16_to_cpu(hdr->frame_control) &
79987326c97SJanusz Dziedzic 					   ~IEEE80211_FCTL_PROTECTED);
80087326c97SJanusz Dziedzic }
80187326c97SJanusz Dziedzic 
80236653f05SJanusz Dziedzic static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
80336653f05SJanusz Dziedzic 				    struct ieee80211_rx_status *status)
80436653f05SJanusz Dziedzic {
80536653f05SJanusz Dziedzic 	struct ieee80211_channel *ch;
80636653f05SJanusz Dziedzic 
80736653f05SJanusz Dziedzic 	spin_lock_bh(&ar->data_lock);
80836653f05SJanusz Dziedzic 	ch = ar->scan_channel;
80936653f05SJanusz Dziedzic 	if (!ch)
81036653f05SJanusz Dziedzic 		ch = ar->rx_channel;
81136653f05SJanusz Dziedzic 	spin_unlock_bh(&ar->data_lock);
81236653f05SJanusz Dziedzic 
81336653f05SJanusz Dziedzic 	if (!ch)
81436653f05SJanusz Dziedzic 		return false;
81536653f05SJanusz Dziedzic 
81636653f05SJanusz Dziedzic 	status->band = ch->band;
81736653f05SJanusz Dziedzic 	status->freq = ch->center_freq;
81836653f05SJanusz Dziedzic 
81936653f05SJanusz Dziedzic 	return true;
82036653f05SJanusz Dziedzic }
82136653f05SJanusz Dziedzic 
82285f6d7cfSJanusz Dziedzic static void ath10k_process_rx(struct ath10k *ar,
82385f6d7cfSJanusz Dziedzic 			      struct ieee80211_rx_status *rx_status,
82485f6d7cfSJanusz Dziedzic 			      struct sk_buff *skb)
82573539b40SJanusz Dziedzic {
82673539b40SJanusz Dziedzic 	struct ieee80211_rx_status *status;
82773539b40SJanusz Dziedzic 
82885f6d7cfSJanusz Dziedzic 	status = IEEE80211_SKB_RXCB(skb);
82985f6d7cfSJanusz Dziedzic 	*status = *rx_status;
83073539b40SJanusz Dziedzic 
83173539b40SJanusz Dziedzic 	ath10k_dbg(ATH10K_DBG_DATA,
83278433f96SJanusz Dziedzic 		   "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %imic-err %i\n",
83385f6d7cfSJanusz Dziedzic 		   skb,
83485f6d7cfSJanusz Dziedzic 		   skb->len,
83573539b40SJanusz Dziedzic 		   status->flag == 0 ? "legacy" : "",
83673539b40SJanusz Dziedzic 		   status->flag & RX_FLAG_HT ? "ht" : "",
83773539b40SJanusz Dziedzic 		   status->flag & RX_FLAG_VHT ? "vht" : "",
83873539b40SJanusz Dziedzic 		   status->flag & RX_FLAG_40MHZ ? "40" : "",
83973539b40SJanusz Dziedzic 		   status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
84073539b40SJanusz Dziedzic 		   status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
84173539b40SJanusz Dziedzic 		   status->rate_idx,
84273539b40SJanusz Dziedzic 		   status->vht_nss,
84373539b40SJanusz Dziedzic 		   status->freq,
84487326c97SJanusz Dziedzic 		   status->band, status->flag,
84578433f96SJanusz Dziedzic 		   !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
84678433f96SJanusz Dziedzic 		   !!(status->flag & RX_FLAG_MMIC_ERROR));
84773539b40SJanusz Dziedzic 	ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
84885f6d7cfSJanusz Dziedzic 			skb->data, skb->len);
84973539b40SJanusz Dziedzic 
85085f6d7cfSJanusz Dziedzic 	ieee80211_rx(ar->hw, skb);
85173539b40SJanusz Dziedzic }
85273539b40SJanusz Dziedzic 
853d960c369SMichal Kazior static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
854d960c369SMichal Kazior {
855d960c369SMichal Kazior 	/* nwifi header is padded to 4 bytes. this fixes 4addr rx */
856d960c369SMichal Kazior 	return round_up(ieee80211_hdrlen(hdr->frame_control), 4);
857d960c369SMichal Kazior }
858d960c369SMichal Kazior 
859f6dc2095SMichal Kazior static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
86085f6d7cfSJanusz Dziedzic 				struct ieee80211_rx_status *rx_status,
86185f6d7cfSJanusz Dziedzic 				struct sk_buff *skb_in)
8625e3dd157SKalle Valo {
8635e3dd157SKalle Valo 	struct htt_rx_desc *rxd;
86485f6d7cfSJanusz Dziedzic 	struct sk_buff *skb = skb_in;
8655e3dd157SKalle Valo 	struct sk_buff *first;
8665e3dd157SKalle Valo 	enum rx_msdu_decap_format fmt;
8675e3dd157SKalle Valo 	enum htt_rx_mpdu_encrypt_type enctype;
868f6dc2095SMichal Kazior 	struct ieee80211_hdr *hdr;
869784f69d3SMichal Kazior 	u8 hdr_buf[64], addr[ETH_ALEN], *qos;
8705e3dd157SKalle Valo 	unsigned int hdr_len;
8715e3dd157SKalle Valo 
8725e3dd157SKalle Valo 	rxd = (void *)skb->data - sizeof(*rxd);
8735e3dd157SKalle Valo 	enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
8745e3dd157SKalle Valo 			RX_MPDU_START_INFO0_ENCRYPT_TYPE);
8755e3dd157SKalle Valo 
876f6dc2095SMichal Kazior 	hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
877f6dc2095SMichal Kazior 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
878f6dc2095SMichal Kazior 	memcpy(hdr_buf, hdr, hdr_len);
879f6dc2095SMichal Kazior 	hdr = (struct ieee80211_hdr *)hdr_buf;
8805e3dd157SKalle Valo 
8815e3dd157SKalle Valo 	first = skb;
8825e3dd157SKalle Valo 	while (skb) {
8835e3dd157SKalle Valo 		void *decap_hdr;
884f6dc2095SMichal Kazior 		int len;
8855e3dd157SKalle Valo 
8865e3dd157SKalle Valo 		rxd = (void *)skb->data - sizeof(*rxd);
8875e3dd157SKalle Valo 		fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
8885e3dd157SKalle Valo 			 RX_MSDU_START_INFO1_DECAP_FORMAT);
8895e3dd157SKalle Valo 		decap_hdr = (void *)rxd->rx_hdr_status;
8905e3dd157SKalle Valo 
891f6dc2095SMichal Kazior 		skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
892f6dc2095SMichal Kazior 
893f6dc2095SMichal Kazior 		/* First frame in an A-MSDU chain has more decapped data. */
8945e3dd157SKalle Valo 		if (skb == first) {
895f6dc2095SMichal Kazior 			len = round_up(ieee80211_hdrlen(hdr->frame_control), 4);
896f6dc2095SMichal Kazior 			len += round_up(ath10k_htt_rx_crypto_param_len(enctype),
897f6dc2095SMichal Kazior 					4);
898f6dc2095SMichal Kazior 			decap_hdr += len;
8995e3dd157SKalle Valo 		}
9005e3dd157SKalle Valo 
901f6dc2095SMichal Kazior 		switch (fmt) {
902f6dc2095SMichal Kazior 		case RX_MSDU_DECAP_RAW:
903e3fbf8d2SMichal Kazior 			/* remove trailing FCS */
904f6dc2095SMichal Kazior 			skb_trim(skb, skb->len - FCS_LEN);
905f6dc2095SMichal Kazior 			break;
906f6dc2095SMichal Kazior 		case RX_MSDU_DECAP_NATIVE_WIFI:
907784f69d3SMichal Kazior 			/* pull decapped header and copy DA */
908784f69d3SMichal Kazior 			hdr = (struct ieee80211_hdr *)skb->data;
909d960c369SMichal Kazior 			hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
910784f69d3SMichal Kazior 			memcpy(addr, ieee80211_get_DA(hdr), ETH_ALEN);
911784f69d3SMichal Kazior 			skb_pull(skb, hdr_len);
912784f69d3SMichal Kazior 
913784f69d3SMichal Kazior 			/* push original 802.11 header */
914784f69d3SMichal Kazior 			hdr = (struct ieee80211_hdr *)hdr_buf;
915784f69d3SMichal Kazior 			hdr_len = ieee80211_hdrlen(hdr->frame_control);
916784f69d3SMichal Kazior 			memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
917784f69d3SMichal Kazior 
918784f69d3SMichal Kazior 			/* original A-MSDU header has the bit set but we're
919784f69d3SMichal Kazior 			 * not including A-MSDU subframe header */
920784f69d3SMichal Kazior 			hdr = (struct ieee80211_hdr *)skb->data;
921784f69d3SMichal Kazior 			qos = ieee80211_get_qos_ctl(hdr);
922784f69d3SMichal Kazior 			qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
923784f69d3SMichal Kazior 
924784f69d3SMichal Kazior 			/* original 802.11 header has a different DA */
925784f69d3SMichal Kazior 			memcpy(ieee80211_get_DA(hdr), addr, ETH_ALEN);
926f6dc2095SMichal Kazior 			break;
927f6dc2095SMichal Kazior 		case RX_MSDU_DECAP_ETHERNET2_DIX:
928e3fbf8d2SMichal Kazior 			/* strip ethernet header and insert decapped 802.11
929e3fbf8d2SMichal Kazior 			 * header, amsdu subframe header and rfc1042 header */
930e3fbf8d2SMichal Kazior 
931f6dc2095SMichal Kazior 			len = 0;
932f6dc2095SMichal Kazior 			len += sizeof(struct rfc1042_hdr);
933f6dc2095SMichal Kazior 			len += sizeof(struct amsdu_subframe_hdr);
934dfa95b50SMichal Kazior 
935f6dc2095SMichal Kazior 			skb_pull(skb, sizeof(struct ethhdr));
936f6dc2095SMichal Kazior 			memcpy(skb_push(skb, len), decap_hdr, len);
937f6dc2095SMichal Kazior 			memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
938f6dc2095SMichal Kazior 			break;
939f6dc2095SMichal Kazior 		case RX_MSDU_DECAP_8023_SNAP_LLC:
940e3fbf8d2SMichal Kazior 			/* insert decapped 802.11 header making a singly
941e3fbf8d2SMichal Kazior 			 * A-MSDU */
942f6dc2095SMichal Kazior 			memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
943f6dc2095SMichal Kazior 			break;
9445e3dd157SKalle Valo 		}
9455e3dd157SKalle Valo 
94685f6d7cfSJanusz Dziedzic 		skb_in = skb;
947c071dcb2SMichal Kazior 		ath10k_htt_rx_h_protected(htt, rx_status, skb_in, enctype, fmt,
948c071dcb2SMichal Kazior 					  false);
949f6dc2095SMichal Kazior 		skb = skb->next;
95085f6d7cfSJanusz Dziedzic 		skb_in->next = NULL;
9515e3dd157SKalle Valo 
952652de35eSKalle Valo 		if (skb)
95385f6d7cfSJanusz Dziedzic 			rx_status->flag |= RX_FLAG_AMSDU_MORE;
95487326c97SJanusz Dziedzic 		else
95585f6d7cfSJanusz Dziedzic 			rx_status->flag &= ~RX_FLAG_AMSDU_MORE;
956652de35eSKalle Valo 
95785f6d7cfSJanusz Dziedzic 		ath10k_process_rx(htt->ar, rx_status, skb_in);
9585e3dd157SKalle Valo 	}
9595e3dd157SKalle Valo 
960f6dc2095SMichal Kazior 	/* FIXME: It might be nice to re-assemble the A-MSDU when there's a
961f6dc2095SMichal Kazior 	 * monitor interface active for sniffing purposes. */
962f6dc2095SMichal Kazior }
963f6dc2095SMichal Kazior 
96485f6d7cfSJanusz Dziedzic static void ath10k_htt_rx_msdu(struct ath10k_htt *htt,
96585f6d7cfSJanusz Dziedzic 			       struct ieee80211_rx_status *rx_status,
96685f6d7cfSJanusz Dziedzic 			       struct sk_buff *skb)
9675e3dd157SKalle Valo {
9685e3dd157SKalle Valo 	struct htt_rx_desc *rxd;
9695e3dd157SKalle Valo 	struct ieee80211_hdr *hdr;
9705e3dd157SKalle Valo 	enum rx_msdu_decap_format fmt;
9715e3dd157SKalle Valo 	enum htt_rx_mpdu_encrypt_type enctype;
972e3fbf8d2SMichal Kazior 	int hdr_len;
973e3fbf8d2SMichal Kazior 	void *rfc1042;
9745e3dd157SKalle Valo 
9755e3dd157SKalle Valo 	/* This shouldn't happen. If it does than it may be a FW bug. */
9765e3dd157SKalle Valo 	if (skb->next) {
97775fb2f94SBen Greear 		ath10k_warn("htt rx received chained non A-MSDU frame\n");
9785e3dd157SKalle Valo 		ath10k_htt_rx_free_msdu_chain(skb->next);
9795e3dd157SKalle Valo 		skb->next = NULL;
9805e3dd157SKalle Valo 	}
9815e3dd157SKalle Valo 
9825e3dd157SKalle Valo 	rxd = (void *)skb->data - sizeof(*rxd);
9835e3dd157SKalle Valo 	fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
9845e3dd157SKalle Valo 			RX_MSDU_START_INFO1_DECAP_FORMAT);
9855e3dd157SKalle Valo 	enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
9865e3dd157SKalle Valo 			RX_MPDU_START_INFO0_ENCRYPT_TYPE);
987e3fbf8d2SMichal Kazior 	hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
988e3fbf8d2SMichal Kazior 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
9895e3dd157SKalle Valo 
990f6dc2095SMichal Kazior 	skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
991f6dc2095SMichal Kazior 
9925e3dd157SKalle Valo 	switch (fmt) {
9935e3dd157SKalle Valo 	case RX_MSDU_DECAP_RAW:
9945e3dd157SKalle Valo 		/* remove trailing FCS */
995e3fbf8d2SMichal Kazior 		skb_trim(skb, skb->len - FCS_LEN);
9965e3dd157SKalle Valo 		break;
9975e3dd157SKalle Valo 	case RX_MSDU_DECAP_NATIVE_WIFI:
998784f69d3SMichal Kazior 		/* Pull decapped header */
999784f69d3SMichal Kazior 		hdr = (struct ieee80211_hdr *)skb->data;
1000d960c369SMichal Kazior 		hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
1001784f69d3SMichal Kazior 		skb_pull(skb, hdr_len);
1002784f69d3SMichal Kazior 
1003784f69d3SMichal Kazior 		/* Push original header */
1004784f69d3SMichal Kazior 		hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
1005784f69d3SMichal Kazior 		hdr_len = ieee80211_hdrlen(hdr->frame_control);
1006784f69d3SMichal Kazior 		memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
10075e3dd157SKalle Valo 		break;
10085e3dd157SKalle Valo 	case RX_MSDU_DECAP_ETHERNET2_DIX:
1009e3fbf8d2SMichal Kazior 		/* strip ethernet header and insert decapped 802.11 header and
1010e3fbf8d2SMichal Kazior 		 * rfc1042 header */
1011e3fbf8d2SMichal Kazior 
1012e3fbf8d2SMichal Kazior 		rfc1042 = hdr;
1013e3fbf8d2SMichal Kazior 		rfc1042 += roundup(hdr_len, 4);
1014e3fbf8d2SMichal Kazior 		rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4);
1015e3fbf8d2SMichal Kazior 
1016e3fbf8d2SMichal Kazior 		skb_pull(skb, sizeof(struct ethhdr));
1017e3fbf8d2SMichal Kazior 		memcpy(skb_push(skb, sizeof(struct rfc1042_hdr)),
1018e3fbf8d2SMichal Kazior 		       rfc1042, sizeof(struct rfc1042_hdr));
1019e3fbf8d2SMichal Kazior 		memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
10205e3dd157SKalle Valo 		break;
10215e3dd157SKalle Valo 	case RX_MSDU_DECAP_8023_SNAP_LLC:
1022e3fbf8d2SMichal Kazior 		/* remove A-MSDU subframe header and insert
1023e3fbf8d2SMichal Kazior 		 * decapped 802.11 header. rfc1042 header is already there */
1024e3fbf8d2SMichal Kazior 
1025e3fbf8d2SMichal Kazior 		skb_pull(skb, sizeof(struct amsdu_subframe_hdr));
1026e3fbf8d2SMichal Kazior 		memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
10275e3dd157SKalle Valo 		break;
10285e3dd157SKalle Valo 	}
10295e3dd157SKalle Valo 
1030c071dcb2SMichal Kazior 	ath10k_htt_rx_h_protected(htt, rx_status, skb, enctype, fmt, false);
1031f6dc2095SMichal Kazior 
103285f6d7cfSJanusz Dziedzic 	ath10k_process_rx(htt->ar, rx_status, skb);
10335e3dd157SKalle Valo }
10345e3dd157SKalle Valo 
1035605f81aaSMichal Kazior static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
1036605f81aaSMichal Kazior {
1037605f81aaSMichal Kazior 	struct htt_rx_desc *rxd;
1038605f81aaSMichal Kazior 	u32 flags, info;
1039605f81aaSMichal Kazior 	bool is_ip4, is_ip6;
1040605f81aaSMichal Kazior 	bool is_tcp, is_udp;
1041605f81aaSMichal Kazior 	bool ip_csum_ok, tcpudp_csum_ok;
1042605f81aaSMichal Kazior 
1043605f81aaSMichal Kazior 	rxd = (void *)skb->data - sizeof(*rxd);
1044605f81aaSMichal Kazior 	flags = __le32_to_cpu(rxd->attention.flags);
1045605f81aaSMichal Kazior 	info = __le32_to_cpu(rxd->msdu_start.info1);
1046605f81aaSMichal Kazior 
1047605f81aaSMichal Kazior 	is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
1048605f81aaSMichal Kazior 	is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
1049605f81aaSMichal Kazior 	is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
1050605f81aaSMichal Kazior 	is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
1051605f81aaSMichal Kazior 	ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
1052605f81aaSMichal Kazior 	tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
1053605f81aaSMichal Kazior 
1054605f81aaSMichal Kazior 	if (!is_ip4 && !is_ip6)
1055605f81aaSMichal Kazior 		return CHECKSUM_NONE;
1056605f81aaSMichal Kazior 	if (!is_tcp && !is_udp)
1057605f81aaSMichal Kazior 		return CHECKSUM_NONE;
1058605f81aaSMichal Kazior 	if (!ip_csum_ok)
1059605f81aaSMichal Kazior 		return CHECKSUM_NONE;
1060605f81aaSMichal Kazior 	if (!tcpudp_csum_ok)
1061605f81aaSMichal Kazior 		return CHECKSUM_NONE;
1062605f81aaSMichal Kazior 
1063605f81aaSMichal Kazior 	return CHECKSUM_UNNECESSARY;
1064605f81aaSMichal Kazior }
1065605f81aaSMichal Kazior 
1066bfa35368SBen Greear static int ath10k_unchain_msdu(struct sk_buff *msdu_head)
1067bfa35368SBen Greear {
1068bfa35368SBen Greear 	struct sk_buff *next = msdu_head->next;
1069bfa35368SBen Greear 	struct sk_buff *to_free = next;
1070bfa35368SBen Greear 	int space;
1071bfa35368SBen Greear 	int total_len = 0;
1072bfa35368SBen Greear 
1073bfa35368SBen Greear 	/* TODO:  Might could optimize this by using
1074bfa35368SBen Greear 	 * skb_try_coalesce or similar method to
1075bfa35368SBen Greear 	 * decrease copying, or maybe get mac80211 to
1076bfa35368SBen Greear 	 * provide a way to just receive a list of
1077bfa35368SBen Greear 	 * skb?
1078bfa35368SBen Greear 	 */
1079bfa35368SBen Greear 
1080bfa35368SBen Greear 	msdu_head->next = NULL;
1081bfa35368SBen Greear 
1082bfa35368SBen Greear 	/* Allocate total length all at once. */
1083bfa35368SBen Greear 	while (next) {
1084bfa35368SBen Greear 		total_len += next->len;
1085bfa35368SBen Greear 		next = next->next;
1086bfa35368SBen Greear 	}
1087bfa35368SBen Greear 
1088bfa35368SBen Greear 	space = total_len - skb_tailroom(msdu_head);
1089bfa35368SBen Greear 	if ((space > 0) &&
1090bfa35368SBen Greear 	    (pskb_expand_head(msdu_head, 0, space, GFP_ATOMIC) < 0)) {
1091bfa35368SBen Greear 		/* TODO:  bump some rx-oom error stat */
1092bfa35368SBen Greear 		/* put it back together so we can free the
1093bfa35368SBen Greear 		 * whole list at once.
1094bfa35368SBen Greear 		 */
1095bfa35368SBen Greear 		msdu_head->next = to_free;
1096bfa35368SBen Greear 		return -1;
1097bfa35368SBen Greear 	}
1098bfa35368SBen Greear 
1099bfa35368SBen Greear 	/* Walk list again, copying contents into
1100bfa35368SBen Greear 	 * msdu_head
1101bfa35368SBen Greear 	 */
1102bfa35368SBen Greear 	next = to_free;
1103bfa35368SBen Greear 	while (next) {
1104bfa35368SBen Greear 		skb_copy_from_linear_data(next, skb_put(msdu_head, next->len),
1105bfa35368SBen Greear 					  next->len);
1106bfa35368SBen Greear 		next = next->next;
1107bfa35368SBen Greear 	}
1108bfa35368SBen Greear 
1109bfa35368SBen Greear 	/* If here, we have consolidated skb.  Free the
1110bfa35368SBen Greear 	 * fragments and pass the main skb on up the
1111bfa35368SBen Greear 	 * stack.
1112bfa35368SBen Greear 	 */
1113bfa35368SBen Greear 	ath10k_htt_rx_free_msdu_chain(to_free);
1114bfa35368SBen Greear 	return 0;
1115bfa35368SBen Greear }
1116bfa35368SBen Greear 
11172acc4eb2SJanusz Dziedzic static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt *htt,
11182acc4eb2SJanusz Dziedzic 					struct sk_buff *head,
111987326c97SJanusz Dziedzic 					enum htt_rx_mpdu_status status,
112078433f96SJanusz Dziedzic 					bool channel_set,
112178433f96SJanusz Dziedzic 					u32 attention)
11222acc4eb2SJanusz Dziedzic {
11232acc4eb2SJanusz Dziedzic 	if (head->len == 0) {
11242acc4eb2SJanusz Dziedzic 		ath10k_dbg(ATH10K_DBG_HTT,
11252acc4eb2SJanusz Dziedzic 			   "htt rx dropping due to zero-len\n");
11262acc4eb2SJanusz Dziedzic 		return false;
11272acc4eb2SJanusz Dziedzic 	}
11282acc4eb2SJanusz Dziedzic 
112978433f96SJanusz Dziedzic 	if (attention & RX_ATTENTION_FLAGS_DECRYPT_ERR) {
11302acc4eb2SJanusz Dziedzic 		ath10k_dbg(ATH10K_DBG_HTT,
11312acc4eb2SJanusz Dziedzic 			   "htt rx dropping due to decrypt-err\n");
11322acc4eb2SJanusz Dziedzic 		return false;
11332acc4eb2SJanusz Dziedzic 	}
11342acc4eb2SJanusz Dziedzic 
113536653f05SJanusz Dziedzic 	if (!channel_set) {
113636653f05SJanusz Dziedzic 		ath10k_warn("no channel configured; ignoring frame!\n");
113736653f05SJanusz Dziedzic 		return false;
113836653f05SJanusz Dziedzic 	}
113936653f05SJanusz Dziedzic 
11402acc4eb2SJanusz Dziedzic 	/* Skip mgmt frames while we handle this in WMI */
11412acc4eb2SJanusz Dziedzic 	if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL ||
114278433f96SJanusz Dziedzic 	    attention & RX_ATTENTION_FLAGS_MGMT_TYPE) {
11432acc4eb2SJanusz Dziedzic 		ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
11442acc4eb2SJanusz Dziedzic 		return false;
11452acc4eb2SJanusz Dziedzic 	}
11462acc4eb2SJanusz Dziedzic 
11472acc4eb2SJanusz Dziedzic 	if (status != HTT_RX_IND_MPDU_STATUS_OK &&
11482acc4eb2SJanusz Dziedzic 	    status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
11492acc4eb2SJanusz Dziedzic 	    status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER &&
11501bbc0975SMichal Kazior 	    !htt->ar->monitor_started) {
11512acc4eb2SJanusz Dziedzic 		ath10k_dbg(ATH10K_DBG_HTT,
11522acc4eb2SJanusz Dziedzic 			   "htt rx ignoring frame w/ status %d\n",
11532acc4eb2SJanusz Dziedzic 			   status);
11542acc4eb2SJanusz Dziedzic 		return false;
11552acc4eb2SJanusz Dziedzic 	}
11562acc4eb2SJanusz Dziedzic 
11572acc4eb2SJanusz Dziedzic 	if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
11582acc4eb2SJanusz Dziedzic 		ath10k_dbg(ATH10K_DBG_HTT,
11592acc4eb2SJanusz Dziedzic 			   "htt rx CAC running\n");
11602acc4eb2SJanusz Dziedzic 		return false;
11612acc4eb2SJanusz Dziedzic 	}
11622acc4eb2SJanusz Dziedzic 
11632acc4eb2SJanusz Dziedzic 	return true;
11642acc4eb2SJanusz Dziedzic }
11652acc4eb2SJanusz Dziedzic 
11665e3dd157SKalle Valo static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
11675e3dd157SKalle Valo 				  struct htt_rx_indication *rx)
11685e3dd157SKalle Valo {
11696df92a3dSJanusz Dziedzic 	struct ieee80211_rx_status *rx_status = &htt->rx_status;
11705e3dd157SKalle Valo 	struct htt_rx_indication_mpdu_range *mpdu_ranges;
117178433f96SJanusz Dziedzic 	struct htt_rx_desc *rxd;
117287326c97SJanusz Dziedzic 	enum htt_rx_mpdu_status status;
11735e3dd157SKalle Valo 	struct ieee80211_hdr *hdr;
11745e3dd157SKalle Valo 	int num_mpdu_ranges;
117578433f96SJanusz Dziedzic 	u32 attention;
11765e3dd157SKalle Valo 	int fw_desc_len;
11775e3dd157SKalle Valo 	u8 *fw_desc;
117878433f96SJanusz Dziedzic 	bool channel_set;
11795e3dd157SKalle Valo 	int i, j;
1180d84dd60fSJanusz Dziedzic 	int ret;
11815e3dd157SKalle Valo 
118245967089SMichal Kazior 	lockdep_assert_held(&htt->rx_ring.lock);
118345967089SMichal Kazior 
11845e3dd157SKalle Valo 	fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
11855e3dd157SKalle Valo 	fw_desc = (u8 *)&rx->fw_desc;
11865e3dd157SKalle Valo 
11875e3dd157SKalle Valo 	num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
11885e3dd157SKalle Valo 			     HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
11895e3dd157SKalle Valo 	mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
11905e3dd157SKalle Valo 
1191e8dc1a96SJanusz Dziedzic 	/* Fill this once, while this is per-ppdu */
11922289188cSJanusz Dziedzic 	if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_START_VALID) {
11932289188cSJanusz Dziedzic 		memset(rx_status, 0, sizeof(*rx_status));
11942289188cSJanusz Dziedzic 		rx_status->signal  = ATH10K_DEFAULT_NOISE_FLOOR +
11952289188cSJanusz Dziedzic 				     rx->ppdu.combined_rssi;
11962289188cSJanusz Dziedzic 	}
119787326c97SJanusz Dziedzic 
119887326c97SJanusz Dziedzic 	if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_END_VALID) {
119987326c97SJanusz Dziedzic 		/* TSF available only in 32-bit */
12006df92a3dSJanusz Dziedzic 		rx_status->mactime = __le32_to_cpu(rx->ppdu.tsf) & 0xffffffff;
12016df92a3dSJanusz Dziedzic 		rx_status->flag |= RX_FLAG_MACTIME_END;
120287326c97SJanusz Dziedzic 	}
1203e8dc1a96SJanusz Dziedzic 
12046df92a3dSJanusz Dziedzic 	channel_set = ath10k_htt_rx_h_channel(htt->ar, rx_status);
120536653f05SJanusz Dziedzic 
120687326c97SJanusz Dziedzic 	if (channel_set) {
12076df92a3dSJanusz Dziedzic 		ath10k_htt_rx_h_rates(htt->ar, rx_status->band,
120887326c97SJanusz Dziedzic 				      rx->ppdu.info0,
120987326c97SJanusz Dziedzic 				      __le32_to_cpu(rx->ppdu.info1),
121087326c97SJanusz Dziedzic 				      __le32_to_cpu(rx->ppdu.info2),
12116df92a3dSJanusz Dziedzic 				      rx_status);
121287326c97SJanusz Dziedzic 	}
1213e8dc1a96SJanusz Dziedzic 
12145e3dd157SKalle Valo 	ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
12155e3dd157SKalle Valo 			rx, sizeof(*rx) +
12165e3dd157SKalle Valo 			(sizeof(struct htt_rx_indication_mpdu_range) *
12175e3dd157SKalle Valo 				num_mpdu_ranges));
12185e3dd157SKalle Valo 
12195e3dd157SKalle Valo 	for (i = 0; i < num_mpdu_ranges; i++) {
122087326c97SJanusz Dziedzic 		status = mpdu_ranges[i].mpdu_range_status;
12215e3dd157SKalle Valo 
12225e3dd157SKalle Valo 		for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) {
12235e3dd157SKalle Valo 			struct sk_buff *msdu_head, *msdu_tail;
12245e3dd157SKalle Valo 
12250ccb7a34SJanusz Dziedzic 			attention = 0;
12265e3dd157SKalle Valo 			msdu_head = NULL;
12275e3dd157SKalle Valo 			msdu_tail = NULL;
1228d84dd60fSJanusz Dziedzic 			ret = ath10k_htt_rx_amsdu_pop(htt,
12295e3dd157SKalle Valo 						      &fw_desc,
12305e3dd157SKalle Valo 						      &fw_desc_len,
12315e3dd157SKalle Valo 						      &msdu_head,
12320ccb7a34SJanusz Dziedzic 						      &msdu_tail,
12330ccb7a34SJanusz Dziedzic 						      &attention);
12345e3dd157SKalle Valo 
1235d84dd60fSJanusz Dziedzic 			if (ret < 0) {
1236d84dd60fSJanusz Dziedzic 				ath10k_warn("failed to pop amsdu from htt rx ring %d\n",
1237d84dd60fSJanusz Dziedzic 					    ret);
1238d84dd60fSJanusz Dziedzic 				ath10k_htt_rx_free_msdu_chain(msdu_head);
1239d84dd60fSJanusz Dziedzic 				continue;
1240d84dd60fSJanusz Dziedzic 			}
1241d84dd60fSJanusz Dziedzic 
124278433f96SJanusz Dziedzic 			rxd = container_of((void *)msdu_head->data,
124378433f96SJanusz Dziedzic 					   struct htt_rx_desc,
124478433f96SJanusz Dziedzic 					   msdu_payload);
124578433f96SJanusz Dziedzic 
12462acc4eb2SJanusz Dziedzic 			if (!ath10k_htt_rx_amsdu_allowed(htt, msdu_head,
124787326c97SJanusz Dziedzic 							 status,
124878433f96SJanusz Dziedzic 							 channel_set,
124978433f96SJanusz Dziedzic 							 attention)) {
1250e8a50f8bSMarek Puzyniak 				ath10k_htt_rx_free_msdu_chain(msdu_head);
1251e8a50f8bSMarek Puzyniak 				continue;
1252e8a50f8bSMarek Puzyniak 			}
1253e8a50f8bSMarek Puzyniak 
1254d84dd60fSJanusz Dziedzic 			if (ret > 0 &&
1255d84dd60fSJanusz Dziedzic 			    ath10k_unchain_msdu(msdu_head) < 0) {
12565e3dd157SKalle Valo 				ath10k_htt_rx_free_msdu_chain(msdu_head);
12575e3dd157SKalle Valo 				continue;
12585e3dd157SKalle Valo 			}
12595e3dd157SKalle Valo 
126078433f96SJanusz Dziedzic 			if (attention & RX_ATTENTION_FLAGS_FCS_ERR)
12616df92a3dSJanusz Dziedzic 				rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
126287326c97SJanusz Dziedzic 			else
12636df92a3dSJanusz Dziedzic 				rx_status->flag &= ~RX_FLAG_FAILED_FCS_CRC;
126487326c97SJanusz Dziedzic 
126578433f96SJanusz Dziedzic 			if (attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR)
12666df92a3dSJanusz Dziedzic 				rx_status->flag |= RX_FLAG_MMIC_ERROR;
126787326c97SJanusz Dziedzic 			else
12686df92a3dSJanusz Dziedzic 				rx_status->flag &= ~RX_FLAG_MMIC_ERROR;
126987326c97SJanusz Dziedzic 
12705e3dd157SKalle Valo 			hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
12715e3dd157SKalle Valo 
12725e3dd157SKalle Valo 			if (ath10k_htt_rx_hdr_is_amsdu(hdr))
12736df92a3dSJanusz Dziedzic 				ath10k_htt_rx_amsdu(htt, rx_status, msdu_head);
12745e3dd157SKalle Valo 			else
12756df92a3dSJanusz Dziedzic 				ath10k_htt_rx_msdu(htt, rx_status, msdu_head);
12765e3dd157SKalle Valo 		}
12775e3dd157SKalle Valo 	}
12785e3dd157SKalle Valo 
12796e712d42SMichal Kazior 	tasklet_schedule(&htt->rx_replenish_task);
12805e3dd157SKalle Valo }
12815e3dd157SKalle Valo 
12825e3dd157SKalle Valo static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
12835e3dd157SKalle Valo 				struct htt_rx_fragment_indication *frag)
12845e3dd157SKalle Valo {
12855e3dd157SKalle Valo 	struct sk_buff *msdu_head, *msdu_tail;
128687326c97SJanusz Dziedzic 	enum htt_rx_mpdu_encrypt_type enctype;
12875e3dd157SKalle Valo 	struct htt_rx_desc *rxd;
12885e3dd157SKalle Valo 	enum rx_msdu_decap_format fmt;
12896df92a3dSJanusz Dziedzic 	struct ieee80211_rx_status *rx_status = &htt->rx_status;
12905e3dd157SKalle Valo 	struct ieee80211_hdr *hdr;
1291d84dd60fSJanusz Dziedzic 	int ret;
12925e3dd157SKalle Valo 	bool tkip_mic_err;
12935e3dd157SKalle Valo 	bool decrypt_err;
12945e3dd157SKalle Valo 	u8 *fw_desc;
12955e3dd157SKalle Valo 	int fw_desc_len, hdrlen, paramlen;
12965e3dd157SKalle Valo 	int trim;
12970ccb7a34SJanusz Dziedzic 	u32 attention = 0;
12985e3dd157SKalle Valo 
12995e3dd157SKalle Valo 	fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
13005e3dd157SKalle Valo 	fw_desc = (u8 *)frag->fw_msdu_rx_desc;
13015e3dd157SKalle Valo 
13025e3dd157SKalle Valo 	msdu_head = NULL;
13035e3dd157SKalle Valo 	msdu_tail = NULL;
130445967089SMichal Kazior 
130545967089SMichal Kazior 	spin_lock_bh(&htt->rx_ring.lock);
1306d84dd60fSJanusz Dziedzic 	ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
13070ccb7a34SJanusz Dziedzic 				      &msdu_head, &msdu_tail,
13080ccb7a34SJanusz Dziedzic 				      &attention);
130945967089SMichal Kazior 	spin_unlock_bh(&htt->rx_ring.lock);
13105e3dd157SKalle Valo 
13115e3dd157SKalle Valo 	ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
13125e3dd157SKalle Valo 
1313d84dd60fSJanusz Dziedzic 	if (ret) {
1314d84dd60fSJanusz Dziedzic 		ath10k_warn("failed to pop amsdu from httr rx ring for fragmented rx %d\n",
1315d84dd60fSJanusz Dziedzic 			    ret);
13165e3dd157SKalle Valo 		ath10k_htt_rx_free_msdu_chain(msdu_head);
13175e3dd157SKalle Valo 		return;
13185e3dd157SKalle Valo 	}
13195e3dd157SKalle Valo 
13205e3dd157SKalle Valo 	/* FIXME: implement signal strength */
13214b81d177SBen Greear 	rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
13225e3dd157SKalle Valo 
13235e3dd157SKalle Valo 	hdr = (struct ieee80211_hdr *)msdu_head->data;
13245e3dd157SKalle Valo 	rxd = (void *)msdu_head->data - sizeof(*rxd);
13250ccb7a34SJanusz Dziedzic 	tkip_mic_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
13260ccb7a34SJanusz Dziedzic 	decrypt_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
13275e3dd157SKalle Valo 	fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
13285e3dd157SKalle Valo 			RX_MSDU_START_INFO1_DECAP_FORMAT);
13295e3dd157SKalle Valo 
13305e3dd157SKalle Valo 	if (fmt != RX_MSDU_DECAP_RAW) {
13315e3dd157SKalle Valo 		ath10k_warn("we dont support non-raw fragmented rx yet\n");
13325e3dd157SKalle Valo 		dev_kfree_skb_any(msdu_head);
13335e3dd157SKalle Valo 		goto end;
13345e3dd157SKalle Valo 	}
13355e3dd157SKalle Valo 
133687326c97SJanusz Dziedzic 	enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
13375e3dd157SKalle Valo 		     RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1338c071dcb2SMichal Kazior 	ath10k_htt_rx_h_protected(htt, rx_status, msdu_head, enctype, fmt,
1339c071dcb2SMichal Kazior 				  true);
134085f6d7cfSJanusz Dziedzic 	msdu_head->ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
13415e3dd157SKalle Valo 
134287326c97SJanusz Dziedzic 	if (tkip_mic_err)
13435e3dd157SKalle Valo 		ath10k_warn("tkip mic error\n");
13445e3dd157SKalle Valo 
13455e3dd157SKalle Valo 	if (decrypt_err) {
13465e3dd157SKalle Valo 		ath10k_warn("decryption err in fragmented rx\n");
134785f6d7cfSJanusz Dziedzic 		dev_kfree_skb_any(msdu_head);
13485e3dd157SKalle Valo 		goto end;
13495e3dd157SKalle Valo 	}
13505e3dd157SKalle Valo 
135187326c97SJanusz Dziedzic 	if (enctype != HTT_RX_MPDU_ENCRYPT_NONE) {
13525e3dd157SKalle Valo 		hdrlen = ieee80211_hdrlen(hdr->frame_control);
135387326c97SJanusz Dziedzic 		paramlen = ath10k_htt_rx_crypto_param_len(enctype);
13545e3dd157SKalle Valo 
13555e3dd157SKalle Valo 		/* It is more efficient to move the header than the payload */
135685f6d7cfSJanusz Dziedzic 		memmove((void *)msdu_head->data + paramlen,
135785f6d7cfSJanusz Dziedzic 			(void *)msdu_head->data,
13585e3dd157SKalle Valo 			hdrlen);
135985f6d7cfSJanusz Dziedzic 		skb_pull(msdu_head, paramlen);
136085f6d7cfSJanusz Dziedzic 		hdr = (struct ieee80211_hdr *)msdu_head->data;
13615e3dd157SKalle Valo 	}
13625e3dd157SKalle Valo 
13635e3dd157SKalle Valo 	/* remove trailing FCS */
13645e3dd157SKalle Valo 	trim  = 4;
13655e3dd157SKalle Valo 
13665e3dd157SKalle Valo 	/* remove crypto trailer */
136787326c97SJanusz Dziedzic 	trim += ath10k_htt_rx_crypto_tail_len(enctype);
13685e3dd157SKalle Valo 
13695e3dd157SKalle Valo 	/* last fragment of TKIP frags has MIC */
13705e3dd157SKalle Valo 	if (!ieee80211_has_morefrags(hdr->frame_control) &&
137187326c97SJanusz Dziedzic 	    enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
13725e3dd157SKalle Valo 		trim += 8;
13735e3dd157SKalle Valo 
137485f6d7cfSJanusz Dziedzic 	if (trim > msdu_head->len) {
13755e3dd157SKalle Valo 		ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n");
137685f6d7cfSJanusz Dziedzic 		dev_kfree_skb_any(msdu_head);
13775e3dd157SKalle Valo 		goto end;
13785e3dd157SKalle Valo 	}
13795e3dd157SKalle Valo 
138085f6d7cfSJanusz Dziedzic 	skb_trim(msdu_head, msdu_head->len - trim);
13815e3dd157SKalle Valo 
138275fb2f94SBen Greear 	ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ",
138385f6d7cfSJanusz Dziedzic 			msdu_head->data, msdu_head->len);
13846df92a3dSJanusz Dziedzic 	ath10k_process_rx(htt->ar, rx_status, msdu_head);
13855e3dd157SKalle Valo 
13865e3dd157SKalle Valo end:
13875e3dd157SKalle Valo 	if (fw_desc_len > 0) {
13885e3dd157SKalle Valo 		ath10k_dbg(ATH10K_DBG_HTT,
13895e3dd157SKalle Valo 			   "expecting more fragmented rx in one indication %d\n",
13905e3dd157SKalle Valo 			   fw_desc_len);
13915e3dd157SKalle Valo 	}
13925e3dd157SKalle Valo }
13935e3dd157SKalle Valo 
13946c5151a9SMichal Kazior static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
13956c5151a9SMichal Kazior 				       struct sk_buff *skb)
13966c5151a9SMichal Kazior {
13976c5151a9SMichal Kazior 	struct ath10k_htt *htt = &ar->htt;
13986c5151a9SMichal Kazior 	struct htt_resp *resp = (struct htt_resp *)skb->data;
13996c5151a9SMichal Kazior 	struct htt_tx_done tx_done = {};
14006c5151a9SMichal Kazior 	int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
14016c5151a9SMichal Kazior 	__le16 msdu_id;
14026c5151a9SMichal Kazior 	int i;
14036c5151a9SMichal Kazior 
140445967089SMichal Kazior 	lockdep_assert_held(&htt->tx_lock);
140545967089SMichal Kazior 
14066c5151a9SMichal Kazior 	switch (status) {
14076c5151a9SMichal Kazior 	case HTT_DATA_TX_STATUS_NO_ACK:
14086c5151a9SMichal Kazior 		tx_done.no_ack = true;
14096c5151a9SMichal Kazior 		break;
14106c5151a9SMichal Kazior 	case HTT_DATA_TX_STATUS_OK:
14116c5151a9SMichal Kazior 		break;
14126c5151a9SMichal Kazior 	case HTT_DATA_TX_STATUS_DISCARD:
14136c5151a9SMichal Kazior 	case HTT_DATA_TX_STATUS_POSTPONE:
14146c5151a9SMichal Kazior 	case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
14156c5151a9SMichal Kazior 		tx_done.discard = true;
14166c5151a9SMichal Kazior 		break;
14176c5151a9SMichal Kazior 	default:
14186c5151a9SMichal Kazior 		ath10k_warn("unhandled tx completion status %d\n", status);
14196c5151a9SMichal Kazior 		tx_done.discard = true;
14206c5151a9SMichal Kazior 		break;
14216c5151a9SMichal Kazior 	}
14226c5151a9SMichal Kazior 
14236c5151a9SMichal Kazior 	ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
14246c5151a9SMichal Kazior 		   resp->data_tx_completion.num_msdus);
14256c5151a9SMichal Kazior 
14266c5151a9SMichal Kazior 	for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
14276c5151a9SMichal Kazior 		msdu_id = resp->data_tx_completion.msdus[i];
14286c5151a9SMichal Kazior 		tx_done.msdu_id = __le16_to_cpu(msdu_id);
14296c5151a9SMichal Kazior 		ath10k_txrx_tx_unref(htt, &tx_done);
14306c5151a9SMichal Kazior 	}
14316c5151a9SMichal Kazior }
14326c5151a9SMichal Kazior 
1433aa5b4fbcSMichal Kazior static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
1434aa5b4fbcSMichal Kazior {
1435aa5b4fbcSMichal Kazior 	struct htt_rx_addba *ev = &resp->rx_addba;
1436aa5b4fbcSMichal Kazior 	struct ath10k_peer *peer;
1437aa5b4fbcSMichal Kazior 	struct ath10k_vif *arvif;
1438aa5b4fbcSMichal Kazior 	u16 info0, tid, peer_id;
1439aa5b4fbcSMichal Kazior 
1440aa5b4fbcSMichal Kazior 	info0 = __le16_to_cpu(ev->info0);
1441aa5b4fbcSMichal Kazior 	tid = MS(info0, HTT_RX_BA_INFO0_TID);
1442aa5b4fbcSMichal Kazior 	peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1443aa5b4fbcSMichal Kazior 
1444aa5b4fbcSMichal Kazior 	ath10k_dbg(ATH10K_DBG_HTT,
1445aa5b4fbcSMichal Kazior 		   "htt rx addba tid %hu peer_id %hu size %hhu\n",
1446aa5b4fbcSMichal Kazior 		   tid, peer_id, ev->window_size);
1447aa5b4fbcSMichal Kazior 
1448aa5b4fbcSMichal Kazior 	spin_lock_bh(&ar->data_lock);
1449aa5b4fbcSMichal Kazior 	peer = ath10k_peer_find_by_id(ar, peer_id);
1450aa5b4fbcSMichal Kazior 	if (!peer) {
1451aa5b4fbcSMichal Kazior 		ath10k_warn("received addba event for invalid peer_id: %hu\n",
1452aa5b4fbcSMichal Kazior 			    peer_id);
1453aa5b4fbcSMichal Kazior 		spin_unlock_bh(&ar->data_lock);
1454aa5b4fbcSMichal Kazior 		return;
1455aa5b4fbcSMichal Kazior 	}
1456aa5b4fbcSMichal Kazior 
1457aa5b4fbcSMichal Kazior 	arvif = ath10k_get_arvif(ar, peer->vdev_id);
1458aa5b4fbcSMichal Kazior 	if (!arvif) {
1459aa5b4fbcSMichal Kazior 		ath10k_warn("received addba event for invalid vdev_id: %u\n",
1460aa5b4fbcSMichal Kazior 			    peer->vdev_id);
1461aa5b4fbcSMichal Kazior 		spin_unlock_bh(&ar->data_lock);
1462aa5b4fbcSMichal Kazior 		return;
1463aa5b4fbcSMichal Kazior 	}
1464aa5b4fbcSMichal Kazior 
1465aa5b4fbcSMichal Kazior 	ath10k_dbg(ATH10K_DBG_HTT,
1466aa5b4fbcSMichal Kazior 		   "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
1467aa5b4fbcSMichal Kazior 		   peer->addr, tid, ev->window_size);
1468aa5b4fbcSMichal Kazior 
1469aa5b4fbcSMichal Kazior 	ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1470aa5b4fbcSMichal Kazior 	spin_unlock_bh(&ar->data_lock);
1471aa5b4fbcSMichal Kazior }
1472aa5b4fbcSMichal Kazior 
1473aa5b4fbcSMichal Kazior static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
1474aa5b4fbcSMichal Kazior {
1475aa5b4fbcSMichal Kazior 	struct htt_rx_delba *ev = &resp->rx_delba;
1476aa5b4fbcSMichal Kazior 	struct ath10k_peer *peer;
1477aa5b4fbcSMichal Kazior 	struct ath10k_vif *arvif;
1478aa5b4fbcSMichal Kazior 	u16 info0, tid, peer_id;
1479aa5b4fbcSMichal Kazior 
1480aa5b4fbcSMichal Kazior 	info0 = __le16_to_cpu(ev->info0);
1481aa5b4fbcSMichal Kazior 	tid = MS(info0, HTT_RX_BA_INFO0_TID);
1482aa5b4fbcSMichal Kazior 	peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1483aa5b4fbcSMichal Kazior 
1484aa5b4fbcSMichal Kazior 	ath10k_dbg(ATH10K_DBG_HTT,
1485aa5b4fbcSMichal Kazior 		   "htt rx delba tid %hu peer_id %hu\n",
1486aa5b4fbcSMichal Kazior 		   tid, peer_id);
1487aa5b4fbcSMichal Kazior 
1488aa5b4fbcSMichal Kazior 	spin_lock_bh(&ar->data_lock);
1489aa5b4fbcSMichal Kazior 	peer = ath10k_peer_find_by_id(ar, peer_id);
1490aa5b4fbcSMichal Kazior 	if (!peer) {
1491aa5b4fbcSMichal Kazior 		ath10k_warn("received addba event for invalid peer_id: %hu\n",
1492aa5b4fbcSMichal Kazior 			    peer_id);
1493aa5b4fbcSMichal Kazior 		spin_unlock_bh(&ar->data_lock);
1494aa5b4fbcSMichal Kazior 		return;
1495aa5b4fbcSMichal Kazior 	}
1496aa5b4fbcSMichal Kazior 
1497aa5b4fbcSMichal Kazior 	arvif = ath10k_get_arvif(ar, peer->vdev_id);
1498aa5b4fbcSMichal Kazior 	if (!arvif) {
1499aa5b4fbcSMichal Kazior 		ath10k_warn("received addba event for invalid vdev_id: %u\n",
1500aa5b4fbcSMichal Kazior 			    peer->vdev_id);
1501aa5b4fbcSMichal Kazior 		spin_unlock_bh(&ar->data_lock);
1502aa5b4fbcSMichal Kazior 		return;
1503aa5b4fbcSMichal Kazior 	}
1504aa5b4fbcSMichal Kazior 
1505aa5b4fbcSMichal Kazior 	ath10k_dbg(ATH10K_DBG_HTT,
1506aa5b4fbcSMichal Kazior 		   "htt rx stop rx ba session sta %pM tid %hu\n",
1507aa5b4fbcSMichal Kazior 		   peer->addr, tid);
1508aa5b4fbcSMichal Kazior 
1509aa5b4fbcSMichal Kazior 	ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1510aa5b4fbcSMichal Kazior 	spin_unlock_bh(&ar->data_lock);
1511aa5b4fbcSMichal Kazior }
1512aa5b4fbcSMichal Kazior 
15135e3dd157SKalle Valo void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
15145e3dd157SKalle Valo {
1515edb8236dSMichal Kazior 	struct ath10k_htt *htt = &ar->htt;
15165e3dd157SKalle Valo 	struct htt_resp *resp = (struct htt_resp *)skb->data;
15175e3dd157SKalle Valo 
15185e3dd157SKalle Valo 	/* confirm alignment */
15195e3dd157SKalle Valo 	if (!IS_ALIGNED((unsigned long)skb->data, 4))
15205e3dd157SKalle Valo 		ath10k_warn("unaligned htt message, expect trouble\n");
15215e3dd157SKalle Valo 
152275fb2f94SBen Greear 	ath10k_dbg(ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
15235e3dd157SKalle Valo 		   resp->hdr.msg_type);
15245e3dd157SKalle Valo 	switch (resp->hdr.msg_type) {
15255e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_VERSION_CONF: {
15265e3dd157SKalle Valo 		htt->target_version_major = resp->ver_resp.major;
15275e3dd157SKalle Valo 		htt->target_version_minor = resp->ver_resp.minor;
15285e3dd157SKalle Valo 		complete(&htt->target_version_received);
15295e3dd157SKalle Valo 		break;
15305e3dd157SKalle Valo 	}
15316c5151a9SMichal Kazior 	case HTT_T2H_MSG_TYPE_RX_IND:
153245967089SMichal Kazior 		spin_lock_bh(&htt->rx_ring.lock);
153345967089SMichal Kazior 		__skb_queue_tail(&htt->rx_compl_q, skb);
153445967089SMichal Kazior 		spin_unlock_bh(&htt->rx_ring.lock);
15356c5151a9SMichal Kazior 		tasklet_schedule(&htt->txrx_compl_task);
15366c5151a9SMichal Kazior 		return;
15375e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_PEER_MAP: {
15385e3dd157SKalle Valo 		struct htt_peer_map_event ev = {
15395e3dd157SKalle Valo 			.vdev_id = resp->peer_map.vdev_id,
15405e3dd157SKalle Valo 			.peer_id = __le16_to_cpu(resp->peer_map.peer_id),
15415e3dd157SKalle Valo 		};
15425e3dd157SKalle Valo 		memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
15435e3dd157SKalle Valo 		ath10k_peer_map_event(htt, &ev);
15445e3dd157SKalle Valo 		break;
15455e3dd157SKalle Valo 	}
15465e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
15475e3dd157SKalle Valo 		struct htt_peer_unmap_event ev = {
15485e3dd157SKalle Valo 			.peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
15495e3dd157SKalle Valo 		};
15505e3dd157SKalle Valo 		ath10k_peer_unmap_event(htt, &ev);
15515e3dd157SKalle Valo 		break;
15525e3dd157SKalle Valo 	}
15535e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
15545e3dd157SKalle Valo 		struct htt_tx_done tx_done = {};
15555e3dd157SKalle Valo 		int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
15565e3dd157SKalle Valo 
15575e3dd157SKalle Valo 		tx_done.msdu_id =
15585e3dd157SKalle Valo 			__le32_to_cpu(resp->mgmt_tx_completion.desc_id);
15595e3dd157SKalle Valo 
15605e3dd157SKalle Valo 		switch (status) {
15615e3dd157SKalle Valo 		case HTT_MGMT_TX_STATUS_OK:
15625e3dd157SKalle Valo 			break;
15635e3dd157SKalle Valo 		case HTT_MGMT_TX_STATUS_RETRY:
15645e3dd157SKalle Valo 			tx_done.no_ack = true;
15655e3dd157SKalle Valo 			break;
15665e3dd157SKalle Valo 		case HTT_MGMT_TX_STATUS_DROP:
15675e3dd157SKalle Valo 			tx_done.discard = true;
15685e3dd157SKalle Valo 			break;
15695e3dd157SKalle Valo 		}
15705e3dd157SKalle Valo 
15716c5151a9SMichal Kazior 		spin_lock_bh(&htt->tx_lock);
15720a89f8a0SMichal Kazior 		ath10k_txrx_tx_unref(htt, &tx_done);
15736c5151a9SMichal Kazior 		spin_unlock_bh(&htt->tx_lock);
15745e3dd157SKalle Valo 		break;
15755e3dd157SKalle Valo 	}
15766c5151a9SMichal Kazior 	case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
15776c5151a9SMichal Kazior 		spin_lock_bh(&htt->tx_lock);
15786c5151a9SMichal Kazior 		__skb_queue_tail(&htt->tx_compl_q, skb);
15796c5151a9SMichal Kazior 		spin_unlock_bh(&htt->tx_lock);
15806c5151a9SMichal Kazior 		tasklet_schedule(&htt->txrx_compl_task);
15816c5151a9SMichal Kazior 		return;
15825e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_SEC_IND: {
15835e3dd157SKalle Valo 		struct ath10k *ar = htt->ar;
15845e3dd157SKalle Valo 		struct htt_security_indication *ev = &resp->security_indication;
15855e3dd157SKalle Valo 
15865e3dd157SKalle Valo 		ath10k_dbg(ATH10K_DBG_HTT,
15875e3dd157SKalle Valo 			   "sec ind peer_id %d unicast %d type %d\n",
15885e3dd157SKalle Valo 			  __le16_to_cpu(ev->peer_id),
15895e3dd157SKalle Valo 			  !!(ev->flags & HTT_SECURITY_IS_UNICAST),
15905e3dd157SKalle Valo 			  MS(ev->flags, HTT_SECURITY_TYPE));
15915e3dd157SKalle Valo 		complete(&ar->install_key_done);
15925e3dd157SKalle Valo 		break;
15935e3dd157SKalle Valo 	}
15945e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
15955e3dd157SKalle Valo 		ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
15965e3dd157SKalle Valo 				skb->data, skb->len);
15975e3dd157SKalle Valo 		ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
15985e3dd157SKalle Valo 		break;
15995e3dd157SKalle Valo 	}
16005e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_TEST:
16015e3dd157SKalle Valo 		/* FIX THIS */
16025e3dd157SKalle Valo 		break;
16035e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_STATS_CONF:
1604a9bf0506SKalle Valo 		trace_ath10k_htt_stats(skb->data, skb->len);
1605a9bf0506SKalle Valo 		break;
1606a9bf0506SKalle Valo 	case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
1607708b9bdeSMichal Kazior 		/* Firmware can return tx frames if it's unable to fully
1608708b9bdeSMichal Kazior 		 * process them and suspects host may be able to fix it. ath10k
1609708b9bdeSMichal Kazior 		 * sends all tx frames as already inspected so this shouldn't
1610708b9bdeSMichal Kazior 		 * happen unless fw has a bug.
1611708b9bdeSMichal Kazior 		 */
1612708b9bdeSMichal Kazior 		ath10k_warn("received an unexpected htt tx inspect event\n");
1613708b9bdeSMichal Kazior 		break;
16145e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
1615aa5b4fbcSMichal Kazior 		ath10k_htt_rx_addba(ar, resp);
1616aa5b4fbcSMichal Kazior 		break;
16175e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_RX_DELBA:
1618aa5b4fbcSMichal Kazior 		ath10k_htt_rx_delba(ar, resp);
1619aa5b4fbcSMichal Kazior 		break;
1620aa5b4fbcSMichal Kazior 	case HTT_T2H_MSG_TYPE_RX_FLUSH: {
1621aa5b4fbcSMichal Kazior 		/* Ignore this event because mac80211 takes care of Rx
1622aa5b4fbcSMichal Kazior 		 * aggregation reordering.
1623aa5b4fbcSMichal Kazior 		 */
1624aa5b4fbcSMichal Kazior 		break;
1625aa5b4fbcSMichal Kazior 	}
16265e3dd157SKalle Valo 	default:
16275e3dd157SKalle Valo 		ath10k_dbg(ATH10K_DBG_HTT, "htt event (%d) not handled\n",
16285e3dd157SKalle Valo 			   resp->hdr.msg_type);
16295e3dd157SKalle Valo 		ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
16305e3dd157SKalle Valo 				skb->data, skb->len);
16315e3dd157SKalle Valo 		break;
16325e3dd157SKalle Valo 	};
16335e3dd157SKalle Valo 
16345e3dd157SKalle Valo 	/* Free the indication buffer */
16355e3dd157SKalle Valo 	dev_kfree_skb_any(skb);
16365e3dd157SKalle Valo }
16376c5151a9SMichal Kazior 
16386c5151a9SMichal Kazior static void ath10k_htt_txrx_compl_task(unsigned long ptr)
16396c5151a9SMichal Kazior {
16406c5151a9SMichal Kazior 	struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
16416c5151a9SMichal Kazior 	struct htt_resp *resp;
16426c5151a9SMichal Kazior 	struct sk_buff *skb;
16436c5151a9SMichal Kazior 
164445967089SMichal Kazior 	spin_lock_bh(&htt->tx_lock);
164545967089SMichal Kazior 	while ((skb = __skb_dequeue(&htt->tx_compl_q))) {
16466c5151a9SMichal Kazior 		ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
16476c5151a9SMichal Kazior 		dev_kfree_skb_any(skb);
16486c5151a9SMichal Kazior 	}
164945967089SMichal Kazior 	spin_unlock_bh(&htt->tx_lock);
16506c5151a9SMichal Kazior 
165145967089SMichal Kazior 	spin_lock_bh(&htt->rx_ring.lock);
165245967089SMichal Kazior 	while ((skb = __skb_dequeue(&htt->rx_compl_q))) {
16536c5151a9SMichal Kazior 		resp = (struct htt_resp *)skb->data;
16546c5151a9SMichal Kazior 		ath10k_htt_rx_handler(htt, &resp->rx_ind);
16556c5151a9SMichal Kazior 		dev_kfree_skb_any(skb);
16566c5151a9SMichal Kazior 	}
165745967089SMichal Kazior 	spin_unlock_bh(&htt->rx_ring.lock);
16586c5151a9SMichal Kazior }
1659