15e3dd157SKalle Valo /*
25e3dd157SKalle Valo  * Copyright (c) 2005-2011 Atheros Communications Inc.
35e3dd157SKalle Valo  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
45e3dd157SKalle Valo  *
55e3dd157SKalle Valo  * Permission to use, copy, modify, and/or distribute this software for any
65e3dd157SKalle Valo  * purpose with or without fee is hereby granted, provided that the above
75e3dd157SKalle Valo  * copyright notice and this permission notice appear in all copies.
85e3dd157SKalle Valo  *
95e3dd157SKalle Valo  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
105e3dd157SKalle Valo  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
115e3dd157SKalle Valo  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
125e3dd157SKalle Valo  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
135e3dd157SKalle Valo  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
145e3dd157SKalle Valo  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
155e3dd157SKalle Valo  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
165e3dd157SKalle Valo  */
175e3dd157SKalle Valo 
18edb8236dSMichal Kazior #include "core.h"
195e3dd157SKalle Valo #include "htc.h"
205e3dd157SKalle Valo #include "htt.h"
215e3dd157SKalle Valo #include "txrx.h"
225e3dd157SKalle Valo #include "debug.h"
23a9bf0506SKalle Valo #include "trace.h"
24aa5b4fbcSMichal Kazior #include "mac.h"
255e3dd157SKalle Valo 
265e3dd157SKalle Valo #include <linux/log2.h>
275e3dd157SKalle Valo 
285e3dd157SKalle Valo /* slightly larger than one large A-MPDU */
295e3dd157SKalle Valo #define HTT_RX_RING_SIZE_MIN 128
305e3dd157SKalle Valo 
315e3dd157SKalle Valo /* roughly 20 ms @ 1 Gbps of 1500B MSDUs */
325e3dd157SKalle Valo #define HTT_RX_RING_SIZE_MAX 2048
335e3dd157SKalle Valo 
345e3dd157SKalle Valo #define HTT_RX_AVG_FRM_BYTES 1000
355e3dd157SKalle Valo 
365e3dd157SKalle Valo /* ms, very conservative */
375e3dd157SKalle Valo #define HTT_RX_HOST_LATENCY_MAX_MS 20
385e3dd157SKalle Valo 
395e3dd157SKalle Valo /* ms, conservative */
405e3dd157SKalle Valo #define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10
415e3dd157SKalle Valo 
425e3dd157SKalle Valo /* when under memory pressure rx ring refill may fail and needs a retry */
435e3dd157SKalle Valo #define HTT_RX_RING_REFILL_RETRY_MS 50
445e3dd157SKalle Valo 
45f6dc2095SMichal Kazior static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
466c5151a9SMichal Kazior static void ath10k_htt_txrx_compl_task(unsigned long ptr);
47f6dc2095SMichal Kazior 
485e3dd157SKalle Valo static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
495e3dd157SKalle Valo {
505e3dd157SKalle Valo 	int size;
515e3dd157SKalle Valo 
525e3dd157SKalle Valo 	/*
535e3dd157SKalle Valo 	 * It is expected that the host CPU will typically be able to
545e3dd157SKalle Valo 	 * service the rx indication from one A-MPDU before the rx
555e3dd157SKalle Valo 	 * indication from the subsequent A-MPDU happens, roughly 1-2 ms
565e3dd157SKalle Valo 	 * later. However, the rx ring should be sized very conservatively,
575e3dd157SKalle Valo 	 * to accomodate the worst reasonable delay before the host CPU
585e3dd157SKalle Valo 	 * services a rx indication interrupt.
595e3dd157SKalle Valo 	 *
605e3dd157SKalle Valo 	 * The rx ring need not be kept full of empty buffers. In theory,
615e3dd157SKalle Valo 	 * the htt host SW can dynamically track the low-water mark in the
625e3dd157SKalle Valo 	 * rx ring, and dynamically adjust the level to which the rx ring
635e3dd157SKalle Valo 	 * is filled with empty buffers, to dynamically meet the desired
645e3dd157SKalle Valo 	 * low-water mark.
655e3dd157SKalle Valo 	 *
665e3dd157SKalle Valo 	 * In contrast, it's difficult to resize the rx ring itself, once
675e3dd157SKalle Valo 	 * it's in use. Thus, the ring itself should be sized very
685e3dd157SKalle Valo 	 * conservatively, while the degree to which the ring is filled
695e3dd157SKalle Valo 	 * with empty buffers should be sized moderately conservatively.
705e3dd157SKalle Valo 	 */
715e3dd157SKalle Valo 
725e3dd157SKalle Valo 	/* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
735e3dd157SKalle Valo 	size =
745e3dd157SKalle Valo 	    htt->max_throughput_mbps +
755e3dd157SKalle Valo 	    1000  /
765e3dd157SKalle Valo 	    (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS;
775e3dd157SKalle Valo 
785e3dd157SKalle Valo 	if (size < HTT_RX_RING_SIZE_MIN)
795e3dd157SKalle Valo 		size = HTT_RX_RING_SIZE_MIN;
805e3dd157SKalle Valo 
815e3dd157SKalle Valo 	if (size > HTT_RX_RING_SIZE_MAX)
825e3dd157SKalle Valo 		size = HTT_RX_RING_SIZE_MAX;
835e3dd157SKalle Valo 
845e3dd157SKalle Valo 	size = roundup_pow_of_two(size);
855e3dd157SKalle Valo 
865e3dd157SKalle Valo 	return size;
875e3dd157SKalle Valo }
885e3dd157SKalle Valo 
895e3dd157SKalle Valo static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt *htt)
905e3dd157SKalle Valo {
915e3dd157SKalle Valo 	int size;
925e3dd157SKalle Valo 
935e3dd157SKalle Valo 	/* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
945e3dd157SKalle Valo 	size =
955e3dd157SKalle Valo 	    htt->max_throughput_mbps *
965e3dd157SKalle Valo 	    1000  /
975e3dd157SKalle Valo 	    (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS;
985e3dd157SKalle Valo 
995e3dd157SKalle Valo 	/*
1005e3dd157SKalle Valo 	 * Make sure the fill level is at least 1 less than the ring size.
1015e3dd157SKalle Valo 	 * Leaving 1 element empty allows the SW to easily distinguish
1025e3dd157SKalle Valo 	 * between a full ring vs. an empty ring.
1035e3dd157SKalle Valo 	 */
1045e3dd157SKalle Valo 	if (size >= htt->rx_ring.size)
1055e3dd157SKalle Valo 		size = htt->rx_ring.size - 1;
1065e3dd157SKalle Valo 
1075e3dd157SKalle Valo 	return size;
1085e3dd157SKalle Valo }
1095e3dd157SKalle Valo 
1105e3dd157SKalle Valo static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
1115e3dd157SKalle Valo {
1125e3dd157SKalle Valo 	struct sk_buff *skb;
1135e3dd157SKalle Valo 	struct ath10k_skb_cb *cb;
1145e3dd157SKalle Valo 	int i;
1155e3dd157SKalle Valo 
1165e3dd157SKalle Valo 	for (i = 0; i < htt->rx_ring.fill_cnt; i++) {
1175e3dd157SKalle Valo 		skb = htt->rx_ring.netbufs_ring[i];
1185e3dd157SKalle Valo 		cb = ATH10K_SKB_CB(skb);
1195e3dd157SKalle Valo 		dma_unmap_single(htt->ar->dev, cb->paddr,
1205e3dd157SKalle Valo 				 skb->len + skb_tailroom(skb),
1215e3dd157SKalle Valo 				 DMA_FROM_DEVICE);
1225e3dd157SKalle Valo 		dev_kfree_skb_any(skb);
1235e3dd157SKalle Valo 	}
1245e3dd157SKalle Valo 
1255e3dd157SKalle Valo 	htt->rx_ring.fill_cnt = 0;
1265e3dd157SKalle Valo }
1275e3dd157SKalle Valo 
1285e3dd157SKalle Valo static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
1295e3dd157SKalle Valo {
1305e3dd157SKalle Valo 	struct htt_rx_desc *rx_desc;
1315e3dd157SKalle Valo 	struct sk_buff *skb;
1325e3dd157SKalle Valo 	dma_addr_t paddr;
1335e3dd157SKalle Valo 	int ret = 0, idx;
1345e3dd157SKalle Valo 
1358cc7f26cSKalle Valo 	idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
1365e3dd157SKalle Valo 	while (num > 0) {
1375e3dd157SKalle Valo 		skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
1385e3dd157SKalle Valo 		if (!skb) {
1395e3dd157SKalle Valo 			ret = -ENOMEM;
1405e3dd157SKalle Valo 			goto fail;
1415e3dd157SKalle Valo 		}
1425e3dd157SKalle Valo 
1435e3dd157SKalle Valo 		if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
1445e3dd157SKalle Valo 			skb_pull(skb,
1455e3dd157SKalle Valo 				 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
1465e3dd157SKalle Valo 				 skb->data);
1475e3dd157SKalle Valo 
1485e3dd157SKalle Valo 		/* Clear rx_desc attention word before posting to Rx ring */
1495e3dd157SKalle Valo 		rx_desc = (struct htt_rx_desc *)skb->data;
1505e3dd157SKalle Valo 		rx_desc->attention.flags = __cpu_to_le32(0);
1515e3dd157SKalle Valo 
1525e3dd157SKalle Valo 		paddr = dma_map_single(htt->ar->dev, skb->data,
1535e3dd157SKalle Valo 				       skb->len + skb_tailroom(skb),
1545e3dd157SKalle Valo 				       DMA_FROM_DEVICE);
1555e3dd157SKalle Valo 
1565e3dd157SKalle Valo 		if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
1575e3dd157SKalle Valo 			dev_kfree_skb_any(skb);
1585e3dd157SKalle Valo 			ret = -ENOMEM;
1595e3dd157SKalle Valo 			goto fail;
1605e3dd157SKalle Valo 		}
1615e3dd157SKalle Valo 
1625e3dd157SKalle Valo 		ATH10K_SKB_CB(skb)->paddr = paddr;
1635e3dd157SKalle Valo 		htt->rx_ring.netbufs_ring[idx] = skb;
1645e3dd157SKalle Valo 		htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
1655e3dd157SKalle Valo 		htt->rx_ring.fill_cnt++;
1665e3dd157SKalle Valo 
1675e3dd157SKalle Valo 		num--;
1685e3dd157SKalle Valo 		idx++;
1695e3dd157SKalle Valo 		idx &= htt->rx_ring.size_mask;
1705e3dd157SKalle Valo 	}
1715e3dd157SKalle Valo 
1725e3dd157SKalle Valo fail:
1738cc7f26cSKalle Valo 	*htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
1745e3dd157SKalle Valo 	return ret;
1755e3dd157SKalle Valo }
1765e3dd157SKalle Valo 
1775e3dd157SKalle Valo static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
1785e3dd157SKalle Valo {
1795e3dd157SKalle Valo 	lockdep_assert_held(&htt->rx_ring.lock);
1805e3dd157SKalle Valo 	return __ath10k_htt_rx_ring_fill_n(htt, num);
1815e3dd157SKalle Valo }
1825e3dd157SKalle Valo 
1835e3dd157SKalle Valo static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
1845e3dd157SKalle Valo {
1856e712d42SMichal Kazior 	int ret, num_deficit, num_to_fill;
1865e3dd157SKalle Valo 
1876e712d42SMichal Kazior 	/* Refilling the whole RX ring buffer proves to be a bad idea. The
1886e712d42SMichal Kazior 	 * reason is RX may take up significant amount of CPU cycles and starve
1896e712d42SMichal Kazior 	 * other tasks, e.g. TX on an ethernet device while acting as a bridge
1906e712d42SMichal Kazior 	 * with ath10k wlan interface. This ended up with very poor performance
1916e712d42SMichal Kazior 	 * once CPU the host system was overwhelmed with RX on ath10k.
1926e712d42SMichal Kazior 	 *
1936e712d42SMichal Kazior 	 * By limiting the number of refills the replenishing occurs
1946e712d42SMichal Kazior 	 * progressively. This in turns makes use of the fact tasklets are
1956e712d42SMichal Kazior 	 * processed in FIFO order. This means actual RX processing can starve
1966e712d42SMichal Kazior 	 * out refilling. If there's not enough buffers on RX ring FW will not
1976e712d42SMichal Kazior 	 * report RX until it is refilled with enough buffers. This
1986e712d42SMichal Kazior 	 * automatically balances load wrt to CPU power.
1996e712d42SMichal Kazior 	 *
2006e712d42SMichal Kazior 	 * This probably comes at a cost of lower maximum throughput but
2016e712d42SMichal Kazior 	 * improves the avarage and stability. */
2025e3dd157SKalle Valo 	spin_lock_bh(&htt->rx_ring.lock);
2036e712d42SMichal Kazior 	num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
2046e712d42SMichal Kazior 	num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
2056e712d42SMichal Kazior 	num_deficit -= num_to_fill;
2065e3dd157SKalle Valo 	ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
2075e3dd157SKalle Valo 	if (ret == -ENOMEM) {
2085e3dd157SKalle Valo 		/*
2095e3dd157SKalle Valo 		 * Failed to fill it to the desired level -
2105e3dd157SKalle Valo 		 * we'll start a timer and try again next time.
2115e3dd157SKalle Valo 		 * As long as enough buffers are left in the ring for
2125e3dd157SKalle Valo 		 * another A-MPDU rx, no special recovery is needed.
2135e3dd157SKalle Valo 		 */
2145e3dd157SKalle Valo 		mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
2155e3dd157SKalle Valo 			  msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
2166e712d42SMichal Kazior 	} else if (num_deficit > 0) {
2176e712d42SMichal Kazior 		tasklet_schedule(&htt->rx_replenish_task);
2185e3dd157SKalle Valo 	}
2195e3dd157SKalle Valo 	spin_unlock_bh(&htt->rx_ring.lock);
2205e3dd157SKalle Valo }
2215e3dd157SKalle Valo 
2225e3dd157SKalle Valo static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
2235e3dd157SKalle Valo {
2245e3dd157SKalle Valo 	struct ath10k_htt *htt = (struct ath10k_htt *)arg;
225af762c0bSKalle Valo 
2265e3dd157SKalle Valo 	ath10k_htt_rx_msdu_buff_replenish(htt);
2275e3dd157SKalle Valo }
2285e3dd157SKalle Valo 
2293e841fd0SMichal Kazior static void ath10k_htt_rx_ring_clean_up(struct ath10k_htt *htt)
2303e841fd0SMichal Kazior {
2313e841fd0SMichal Kazior 	struct sk_buff *skb;
2323e841fd0SMichal Kazior 	int i;
2333e841fd0SMichal Kazior 
2343e841fd0SMichal Kazior 	for (i = 0; i < htt->rx_ring.size; i++) {
2353e841fd0SMichal Kazior 		skb = htt->rx_ring.netbufs_ring[i];
2363e841fd0SMichal Kazior 		if (!skb)
2373e841fd0SMichal Kazior 			continue;
2383e841fd0SMichal Kazior 
2393e841fd0SMichal Kazior 		dma_unmap_single(htt->ar->dev, ATH10K_SKB_CB(skb)->paddr,
2403e841fd0SMichal Kazior 				 skb->len + skb_tailroom(skb),
2413e841fd0SMichal Kazior 				 DMA_FROM_DEVICE);
2423e841fd0SMichal Kazior 		dev_kfree_skb_any(skb);
2433e841fd0SMichal Kazior 		htt->rx_ring.netbufs_ring[i] = NULL;
2443e841fd0SMichal Kazior 	}
2453e841fd0SMichal Kazior }
2463e841fd0SMichal Kazior 
24795bf21f9SMichal Kazior void ath10k_htt_rx_free(struct ath10k_htt *htt)
2485e3dd157SKalle Valo {
2495e3dd157SKalle Valo 	del_timer_sync(&htt->rx_ring.refill_retry_timer);
2506e712d42SMichal Kazior 	tasklet_kill(&htt->rx_replenish_task);
2516c5151a9SMichal Kazior 	tasklet_kill(&htt->txrx_compl_task);
2526c5151a9SMichal Kazior 
2536c5151a9SMichal Kazior 	skb_queue_purge(&htt->tx_compl_q);
2546c5151a9SMichal Kazior 	skb_queue_purge(&htt->rx_compl_q);
2555e3dd157SKalle Valo 
2563e841fd0SMichal Kazior 	ath10k_htt_rx_ring_clean_up(htt);
2575e3dd157SKalle Valo 
2585e3dd157SKalle Valo 	dma_free_coherent(htt->ar->dev,
2595e3dd157SKalle Valo 			  (htt->rx_ring.size *
2605e3dd157SKalle Valo 			   sizeof(htt->rx_ring.paddrs_ring)),
2615e3dd157SKalle Valo 			  htt->rx_ring.paddrs_ring,
2625e3dd157SKalle Valo 			  htt->rx_ring.base_paddr);
2635e3dd157SKalle Valo 
2645e3dd157SKalle Valo 	dma_free_coherent(htt->ar->dev,
2655e3dd157SKalle Valo 			  sizeof(*htt->rx_ring.alloc_idx.vaddr),
2665e3dd157SKalle Valo 			  htt->rx_ring.alloc_idx.vaddr,
2675e3dd157SKalle Valo 			  htt->rx_ring.alloc_idx.paddr);
2685e3dd157SKalle Valo 
2695e3dd157SKalle Valo 	kfree(htt->rx_ring.netbufs_ring);
2705e3dd157SKalle Valo }
2715e3dd157SKalle Valo 
2725e3dd157SKalle Valo static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
2735e3dd157SKalle Valo {
2747aa7a72aSMichal Kazior 	struct ath10k *ar = htt->ar;
2755e3dd157SKalle Valo 	int idx;
2765e3dd157SKalle Valo 	struct sk_buff *msdu;
2775e3dd157SKalle Valo 
27845967089SMichal Kazior 	lockdep_assert_held(&htt->rx_ring.lock);
2795e3dd157SKalle Valo 
2808d60ee87SMichal Kazior 	if (htt->rx_ring.fill_cnt == 0) {
2817aa7a72aSMichal Kazior 		ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
2828d60ee87SMichal Kazior 		return NULL;
2838d60ee87SMichal Kazior 	}
2845e3dd157SKalle Valo 
2855e3dd157SKalle Valo 	idx = htt->rx_ring.sw_rd_idx.msdu_payld;
2865e3dd157SKalle Valo 	msdu = htt->rx_ring.netbufs_ring[idx];
2873e841fd0SMichal Kazior 	htt->rx_ring.netbufs_ring[idx] = NULL;
2885e3dd157SKalle Valo 
2895e3dd157SKalle Valo 	idx++;
2905e3dd157SKalle Valo 	idx &= htt->rx_ring.size_mask;
2915e3dd157SKalle Valo 	htt->rx_ring.sw_rd_idx.msdu_payld = idx;
2925e3dd157SKalle Valo 	htt->rx_ring.fill_cnt--;
2935e3dd157SKalle Valo 
2944de02806SMichal Kazior 	dma_unmap_single(htt->ar->dev,
2954de02806SMichal Kazior 			 ATH10K_SKB_CB(msdu)->paddr,
2964de02806SMichal Kazior 			 msdu->len + skb_tailroom(msdu),
2974de02806SMichal Kazior 			 DMA_FROM_DEVICE);
2984de02806SMichal Kazior 	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
2994de02806SMichal Kazior 			msdu->data, msdu->len + skb_tailroom(msdu));
3004de02806SMichal Kazior 
3015e3dd157SKalle Valo 	return msdu;
3025e3dd157SKalle Valo }
3035e3dd157SKalle Valo 
304d84dd60fSJanusz Dziedzic /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
3055e3dd157SKalle Valo static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
3065e3dd157SKalle Valo 				   u8 **fw_desc, int *fw_desc_len,
307f0e2770fSMichal Kazior 				   struct sk_buff_head *amsdu)
3085e3dd157SKalle Valo {
3097aa7a72aSMichal Kazior 	struct ath10k *ar = htt->ar;
3105e3dd157SKalle Valo 	int msdu_len, msdu_chaining = 0;
3119aa505d2SMichal Kazior 	struct sk_buff *msdu;
3125e3dd157SKalle Valo 	struct htt_rx_desc *rx_desc;
3135e3dd157SKalle Valo 
31445967089SMichal Kazior 	lockdep_assert_held(&htt->rx_ring.lock);
31545967089SMichal Kazior 
3169aa505d2SMichal Kazior 	for (;;) {
3175e3dd157SKalle Valo 		int last_msdu, msdu_len_invalid, msdu_chained;
3185e3dd157SKalle Valo 
3199aa505d2SMichal Kazior 		msdu = ath10k_htt_rx_netbuf_pop(htt);
3209aa505d2SMichal Kazior 		if (!msdu) {
3219aa505d2SMichal Kazior 			__skb_queue_purge(amsdu);
322e0bd7513SMichal Kazior 			return -ENOENT;
3239aa505d2SMichal Kazior 		}
3249aa505d2SMichal Kazior 
3259aa505d2SMichal Kazior 		__skb_queue_tail(amsdu, msdu);
3269aa505d2SMichal Kazior 
3275e3dd157SKalle Valo 		rx_desc = (struct htt_rx_desc *)msdu->data;
3285e3dd157SKalle Valo 
3295e3dd157SKalle Valo 		/* FIXME: we must report msdu payload since this is what caller
3305e3dd157SKalle Valo 		 *        expects now */
3315e3dd157SKalle Valo 		skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
3325e3dd157SKalle Valo 		skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
3335e3dd157SKalle Valo 
3345e3dd157SKalle Valo 		/*
3355e3dd157SKalle Valo 		 * Sanity check - confirm the HW is finished filling in the
3365e3dd157SKalle Valo 		 * rx data.
3375e3dd157SKalle Valo 		 * If the HW and SW are working correctly, then it's guaranteed
3385e3dd157SKalle Valo 		 * that the HW's MAC DMA is done before this point in the SW.
3395e3dd157SKalle Valo 		 * To prevent the case that we handle a stale Rx descriptor,
3405e3dd157SKalle Valo 		 * just assert for now until we have a way to recover.
3415e3dd157SKalle Valo 		 */
3425e3dd157SKalle Valo 		if (!(__le32_to_cpu(rx_desc->attention.flags)
3435e3dd157SKalle Valo 				& RX_ATTENTION_FLAGS_MSDU_DONE)) {
3449aa505d2SMichal Kazior 			__skb_queue_purge(amsdu);
345e0bd7513SMichal Kazior 			return -EIO;
3465e3dd157SKalle Valo 		}
3475e3dd157SKalle Valo 
3485e3dd157SKalle Valo 		/*
3495e3dd157SKalle Valo 		 * Copy the FW rx descriptor for this MSDU from the rx
3505e3dd157SKalle Valo 		 * indication message into the MSDU's netbuf. HL uses the
3515e3dd157SKalle Valo 		 * same rx indication message definition as LL, and simply
3525e3dd157SKalle Valo 		 * appends new info (fields from the HW rx desc, and the
3535e3dd157SKalle Valo 		 * MSDU payload itself). So, the offset into the rx
3545e3dd157SKalle Valo 		 * indication message only has to account for the standard
3555e3dd157SKalle Valo 		 * offset of the per-MSDU FW rx desc info within the
3565e3dd157SKalle Valo 		 * message, and how many bytes of the per-MSDU FW rx desc
3575e3dd157SKalle Valo 		 * info have already been consumed. (And the endianness of
3585e3dd157SKalle Valo 		 * the host, since for a big-endian host, the rx ind
3595e3dd157SKalle Valo 		 * message contents, including the per-MSDU rx desc bytes,
3605e3dd157SKalle Valo 		 * were byteswapped during upload.)
3615e3dd157SKalle Valo 		 */
3625e3dd157SKalle Valo 		if (*fw_desc_len > 0) {
3635e3dd157SKalle Valo 			rx_desc->fw_desc.info0 = **fw_desc;
3645e3dd157SKalle Valo 			/*
3655e3dd157SKalle Valo 			 * The target is expected to only provide the basic
3665e3dd157SKalle Valo 			 * per-MSDU rx descriptors. Just to be sure, verify
3675e3dd157SKalle Valo 			 * that the target has not attached extension data
3685e3dd157SKalle Valo 			 * (e.g. LRO flow ID).
3695e3dd157SKalle Valo 			 */
3705e3dd157SKalle Valo 
3715e3dd157SKalle Valo 			/* or more, if there's extension data */
3725e3dd157SKalle Valo 			(*fw_desc)++;
3735e3dd157SKalle Valo 			(*fw_desc_len)--;
3745e3dd157SKalle Valo 		} else {
3755e3dd157SKalle Valo 			/*
3765e3dd157SKalle Valo 			 * When an oversized AMSDU happened, FW will lost
3775e3dd157SKalle Valo 			 * some of MSDU status - in this case, the FW
3785e3dd157SKalle Valo 			 * descriptors provided will be less than the
3795e3dd157SKalle Valo 			 * actual MSDUs inside this MPDU. Mark the FW
3805e3dd157SKalle Valo 			 * descriptors so that it will still deliver to
3815e3dd157SKalle Valo 			 * upper stack, if no CRC error for this MPDU.
3825e3dd157SKalle Valo 			 *
3835e3dd157SKalle Valo 			 * FIX THIS - the FW descriptors are actually for
3845e3dd157SKalle Valo 			 * MSDUs in the end of this A-MSDU instead of the
3855e3dd157SKalle Valo 			 * beginning.
3865e3dd157SKalle Valo 			 */
3875e3dd157SKalle Valo 			rx_desc->fw_desc.info0 = 0;
3885e3dd157SKalle Valo 		}
3895e3dd157SKalle Valo 
3905e3dd157SKalle Valo 		msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
3915e3dd157SKalle Valo 					& (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
3925e3dd157SKalle Valo 					   RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
3935e3dd157SKalle Valo 		msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
3945e3dd157SKalle Valo 			      RX_MSDU_START_INFO0_MSDU_LENGTH);
3955e3dd157SKalle Valo 		msdu_chained = rx_desc->frag_info.ring2_more_count;
3965e3dd157SKalle Valo 
3975e3dd157SKalle Valo 		if (msdu_len_invalid)
3985e3dd157SKalle Valo 			msdu_len = 0;
3995e3dd157SKalle Valo 
4005e3dd157SKalle Valo 		skb_trim(msdu, 0);
4015e3dd157SKalle Valo 		skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
4025e3dd157SKalle Valo 		msdu_len -= msdu->len;
4035e3dd157SKalle Valo 
4049aa505d2SMichal Kazior 		/* Note: Chained buffers do not contain rx descriptor */
4055e3dd157SKalle Valo 		while (msdu_chained--) {
4069aa505d2SMichal Kazior 			msdu = ath10k_htt_rx_netbuf_pop(htt);
4079aa505d2SMichal Kazior 			if (!msdu) {
4089aa505d2SMichal Kazior 				__skb_queue_purge(amsdu);
409e0bd7513SMichal Kazior 				return -ENOENT;
410b30595aeSMichal Kazior 			}
411b30595aeSMichal Kazior 
4129aa505d2SMichal Kazior 			__skb_queue_tail(amsdu, msdu);
4139aa505d2SMichal Kazior 			skb_trim(msdu, 0);
4149aa505d2SMichal Kazior 			skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
4159aa505d2SMichal Kazior 			msdu_len -= msdu->len;
416ede9c8e0SMichal Kazior 			msdu_chaining = 1;
4175e3dd157SKalle Valo 		}
4185e3dd157SKalle Valo 
4195e3dd157SKalle Valo 		last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
4205e3dd157SKalle Valo 				RX_MSDU_END_INFO0_LAST_MSDU;
4215e3dd157SKalle Valo 
422b04e204fSMichal Kazior 		trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
423a0883cf7SRajkumar Manoharan 					 sizeof(*rx_desc) - sizeof(u32));
4249aa505d2SMichal Kazior 
4259aa505d2SMichal Kazior 		if (last_msdu)
4265e3dd157SKalle Valo 			break;
427d8bb26b9SKalle Valo 	}
428d8bb26b9SKalle Valo 
4299aa505d2SMichal Kazior 	if (skb_queue_empty(amsdu))
430d84dd60fSJanusz Dziedzic 		msdu_chaining = -1;
431d84dd60fSJanusz Dziedzic 
4325e3dd157SKalle Valo 	/*
4335e3dd157SKalle Valo 	 * Don't refill the ring yet.
4345e3dd157SKalle Valo 	 *
4355e3dd157SKalle Valo 	 * First, the elements popped here are still in use - it is not
4365e3dd157SKalle Valo 	 * safe to overwrite them until the matching call to
4375e3dd157SKalle Valo 	 * mpdu_desc_list_next. Second, for efficiency it is preferable to
4385e3dd157SKalle Valo 	 * refill the rx ring with 1 PPDU's worth of rx buffers (something
4395e3dd157SKalle Valo 	 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
4405e3dd157SKalle Valo 	 * (something like 3 buffers). Consequently, we'll rely on the txrx
4415e3dd157SKalle Valo 	 * SW to tell us when it is done pulling all the PPDU's rx buffers
4425e3dd157SKalle Valo 	 * out of the rx ring, and then refill it just once.
4435e3dd157SKalle Valo 	 */
4445e3dd157SKalle Valo 
4455e3dd157SKalle Valo 	return msdu_chaining;
4465e3dd157SKalle Valo }
4475e3dd157SKalle Valo 
4486e712d42SMichal Kazior static void ath10k_htt_rx_replenish_task(unsigned long ptr)
4496e712d42SMichal Kazior {
4506e712d42SMichal Kazior 	struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
451af762c0bSKalle Valo 
4526e712d42SMichal Kazior 	ath10k_htt_rx_msdu_buff_replenish(htt);
4536e712d42SMichal Kazior }
4546e712d42SMichal Kazior 
45595bf21f9SMichal Kazior int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
4565e3dd157SKalle Valo {
4577aa7a72aSMichal Kazior 	struct ath10k *ar = htt->ar;
4585e3dd157SKalle Valo 	dma_addr_t paddr;
4595e3dd157SKalle Valo 	void *vaddr;
460bd8bdbb6SKalle Valo 	size_t size;
4615e3dd157SKalle Valo 	struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
4625e3dd157SKalle Valo 
46351fc7d74SMichal Kazior 	htt->rx_confused = false;
46451fc7d74SMichal Kazior 
4655e3dd157SKalle Valo 	htt->rx_ring.size = ath10k_htt_rx_ring_size(htt);
4665e3dd157SKalle Valo 	if (!is_power_of_2(htt->rx_ring.size)) {
4677aa7a72aSMichal Kazior 		ath10k_warn(ar, "htt rx ring size is not power of 2\n");
4685e3dd157SKalle Valo 		return -EINVAL;
4695e3dd157SKalle Valo 	}
4705e3dd157SKalle Valo 
4715e3dd157SKalle Valo 	htt->rx_ring.size_mask = htt->rx_ring.size - 1;
4725e3dd157SKalle Valo 
4735e3dd157SKalle Valo 	/*
4745e3dd157SKalle Valo 	 * Set the initial value for the level to which the rx ring
4755e3dd157SKalle Valo 	 * should be filled, based on the max throughput and the
4765e3dd157SKalle Valo 	 * worst likely latency for the host to fill the rx ring
4775e3dd157SKalle Valo 	 * with new buffers. In theory, this fill level can be
4785e3dd157SKalle Valo 	 * dynamically adjusted from the initial value set here, to
4795e3dd157SKalle Valo 	 * reflect the actual host latency rather than a
4805e3dd157SKalle Valo 	 * conservative assumption about the host latency.
4815e3dd157SKalle Valo 	 */
4825e3dd157SKalle Valo 	htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt);
4835e3dd157SKalle Valo 
4845e3dd157SKalle Valo 	htt->rx_ring.netbufs_ring =
4853e841fd0SMichal Kazior 		kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
4865e3dd157SKalle Valo 			GFP_KERNEL);
4875e3dd157SKalle Valo 	if (!htt->rx_ring.netbufs_ring)
4885e3dd157SKalle Valo 		goto err_netbuf;
4895e3dd157SKalle Valo 
490bd8bdbb6SKalle Valo 	size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring);
491bd8bdbb6SKalle Valo 
492bd8bdbb6SKalle Valo 	vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_DMA);
4935e3dd157SKalle Valo 	if (!vaddr)
4945e3dd157SKalle Valo 		goto err_dma_ring;
4955e3dd157SKalle Valo 
4965e3dd157SKalle Valo 	htt->rx_ring.paddrs_ring = vaddr;
4975e3dd157SKalle Valo 	htt->rx_ring.base_paddr = paddr;
4985e3dd157SKalle Valo 
4995e3dd157SKalle Valo 	vaddr = dma_alloc_coherent(htt->ar->dev,
5005e3dd157SKalle Valo 				   sizeof(*htt->rx_ring.alloc_idx.vaddr),
5015e3dd157SKalle Valo 				   &paddr, GFP_DMA);
5025e3dd157SKalle Valo 	if (!vaddr)
5035e3dd157SKalle Valo 		goto err_dma_idx;
5045e3dd157SKalle Valo 
5055e3dd157SKalle Valo 	htt->rx_ring.alloc_idx.vaddr = vaddr;
5065e3dd157SKalle Valo 	htt->rx_ring.alloc_idx.paddr = paddr;
5075e3dd157SKalle Valo 	htt->rx_ring.sw_rd_idx.msdu_payld = 0;
5085e3dd157SKalle Valo 	*htt->rx_ring.alloc_idx.vaddr = 0;
5095e3dd157SKalle Valo 
5105e3dd157SKalle Valo 	/* Initialize the Rx refill retry timer */
5115e3dd157SKalle Valo 	setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
5125e3dd157SKalle Valo 
5135e3dd157SKalle Valo 	spin_lock_init(&htt->rx_ring.lock);
5145e3dd157SKalle Valo 
5155e3dd157SKalle Valo 	htt->rx_ring.fill_cnt = 0;
5165e3dd157SKalle Valo 	if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level))
5175e3dd157SKalle Valo 		goto err_fill_ring;
5185e3dd157SKalle Valo 
5196e712d42SMichal Kazior 	tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
5206e712d42SMichal Kazior 		     (unsigned long)htt);
5216e712d42SMichal Kazior 
5226c5151a9SMichal Kazior 	skb_queue_head_init(&htt->tx_compl_q);
5236c5151a9SMichal Kazior 	skb_queue_head_init(&htt->rx_compl_q);
5246c5151a9SMichal Kazior 
5256c5151a9SMichal Kazior 	tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
5266c5151a9SMichal Kazior 		     (unsigned long)htt);
5276c5151a9SMichal Kazior 
5287aa7a72aSMichal Kazior 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
5295e3dd157SKalle Valo 		   htt->rx_ring.size, htt->rx_ring.fill_level);
5305e3dd157SKalle Valo 	return 0;
5315e3dd157SKalle Valo 
5325e3dd157SKalle Valo err_fill_ring:
5335e3dd157SKalle Valo 	ath10k_htt_rx_ring_free(htt);
5345e3dd157SKalle Valo 	dma_free_coherent(htt->ar->dev,
5355e3dd157SKalle Valo 			  sizeof(*htt->rx_ring.alloc_idx.vaddr),
5365e3dd157SKalle Valo 			  htt->rx_ring.alloc_idx.vaddr,
5375e3dd157SKalle Valo 			  htt->rx_ring.alloc_idx.paddr);
5385e3dd157SKalle Valo err_dma_idx:
5395e3dd157SKalle Valo 	dma_free_coherent(htt->ar->dev,
5405e3dd157SKalle Valo 			  (htt->rx_ring.size *
5415e3dd157SKalle Valo 			   sizeof(htt->rx_ring.paddrs_ring)),
5425e3dd157SKalle Valo 			  htt->rx_ring.paddrs_ring,
5435e3dd157SKalle Valo 			  htt->rx_ring.base_paddr);
5445e3dd157SKalle Valo err_dma_ring:
5455e3dd157SKalle Valo 	kfree(htt->rx_ring.netbufs_ring);
5465e3dd157SKalle Valo err_netbuf:
5475e3dd157SKalle Valo 	return -ENOMEM;
5485e3dd157SKalle Valo }
5495e3dd157SKalle Valo 
5507aa7a72aSMichal Kazior static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
5517aa7a72aSMichal Kazior 					  enum htt_rx_mpdu_encrypt_type type)
5525e3dd157SKalle Valo {
5535e3dd157SKalle Valo 	switch (type) {
554890d3b2aSMichal Kazior 	case HTT_RX_MPDU_ENCRYPT_NONE:
555890d3b2aSMichal Kazior 		return 0;
5565e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_WEP40:
5575e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_WEP104:
558890d3b2aSMichal Kazior 		return IEEE80211_WEP_IV_LEN;
5595e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
5605e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
561890d3b2aSMichal Kazior 		return IEEE80211_TKIP_IV_LEN;
5625e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
563890d3b2aSMichal Kazior 		return IEEE80211_CCMP_HDR_LEN;
564890d3b2aSMichal Kazior 	case HTT_RX_MPDU_ENCRYPT_WEP128:
565890d3b2aSMichal Kazior 	case HTT_RX_MPDU_ENCRYPT_WAPI:
566890d3b2aSMichal Kazior 		break;
567890d3b2aSMichal Kazior 	}
568890d3b2aSMichal Kazior 
569890d3b2aSMichal Kazior 	ath10k_warn(ar, "unsupported encryption type %d\n", type);
5705e3dd157SKalle Valo 	return 0;
5715e3dd157SKalle Valo }
5725e3dd157SKalle Valo 
573890d3b2aSMichal Kazior #define MICHAEL_MIC_LEN 8
5745e3dd157SKalle Valo 
5757aa7a72aSMichal Kazior static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
5767aa7a72aSMichal Kazior 					 enum htt_rx_mpdu_encrypt_type type)
5775e3dd157SKalle Valo {
5785e3dd157SKalle Valo 	switch (type) {
5795e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_NONE:
580890d3b2aSMichal Kazior 		return 0;
5815e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_WEP40:
5825e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_WEP104:
583890d3b2aSMichal Kazior 		return IEEE80211_WEP_ICV_LEN;
5845e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
5855e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
586890d3b2aSMichal Kazior 		return IEEE80211_TKIP_ICV_LEN;
5875e3dd157SKalle Valo 	case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
588890d3b2aSMichal Kazior 		return IEEE80211_CCMP_MIC_LEN;
589890d3b2aSMichal Kazior 	case HTT_RX_MPDU_ENCRYPT_WEP128:
590890d3b2aSMichal Kazior 	case HTT_RX_MPDU_ENCRYPT_WAPI:
591890d3b2aSMichal Kazior 		break;
5925e3dd157SKalle Valo 	}
5935e3dd157SKalle Valo 
594890d3b2aSMichal Kazior 	ath10k_warn(ar, "unsupported encryption type %d\n", type);
5955e3dd157SKalle Valo 	return 0;
5965e3dd157SKalle Valo }
5975e3dd157SKalle Valo 
598f6dc2095SMichal Kazior struct rfc1042_hdr {
599f6dc2095SMichal Kazior 	u8 llc_dsap;
600f6dc2095SMichal Kazior 	u8 llc_ssap;
601f6dc2095SMichal Kazior 	u8 llc_ctrl;
602f6dc2095SMichal Kazior 	u8 snap_oui[3];
603f6dc2095SMichal Kazior 	__be16 snap_type;
604f6dc2095SMichal Kazior } __packed;
605f6dc2095SMichal Kazior 
606f6dc2095SMichal Kazior struct amsdu_subframe_hdr {
607f6dc2095SMichal Kazior 	u8 dst[ETH_ALEN];
608f6dc2095SMichal Kazior 	u8 src[ETH_ALEN];
609f6dc2095SMichal Kazior 	__be16 len;
610f6dc2095SMichal Kazior } __packed;
611f6dc2095SMichal Kazior 
61273539b40SJanusz Dziedzic static const u8 rx_legacy_rate_idx[] = {
61373539b40SJanusz Dziedzic 	3,	/* 0x00  - 11Mbps  */
61473539b40SJanusz Dziedzic 	2,	/* 0x01  - 5.5Mbps */
61573539b40SJanusz Dziedzic 	1,	/* 0x02  - 2Mbps   */
61673539b40SJanusz Dziedzic 	0,	/* 0x03  - 1Mbps   */
61773539b40SJanusz Dziedzic 	3,	/* 0x04  - 11Mbps  */
61873539b40SJanusz Dziedzic 	2,	/* 0x05  - 5.5Mbps */
61973539b40SJanusz Dziedzic 	1,	/* 0x06  - 2Mbps   */
62073539b40SJanusz Dziedzic 	0,	/* 0x07  - 1Mbps   */
62173539b40SJanusz Dziedzic 	10,	/* 0x08  - 48Mbps  */
62273539b40SJanusz Dziedzic 	8,	/* 0x09  - 24Mbps  */
62373539b40SJanusz Dziedzic 	6,	/* 0x0A  - 12Mbps  */
62473539b40SJanusz Dziedzic 	4,	/* 0x0B  - 6Mbps   */
62573539b40SJanusz Dziedzic 	11,	/* 0x0C  - 54Mbps  */
62673539b40SJanusz Dziedzic 	9,	/* 0x0D  - 36Mbps  */
62773539b40SJanusz Dziedzic 	7,	/* 0x0E  - 18Mbps  */
62873539b40SJanusz Dziedzic 	5,	/* 0x0F  - 9Mbps   */
62973539b40SJanusz Dziedzic };
63073539b40SJanusz Dziedzic 
63187326c97SJanusz Dziedzic static void ath10k_htt_rx_h_rates(struct ath10k *ar,
632b9fd8a84SMichal Kazior 				  struct ieee80211_rx_status *status,
633b9fd8a84SMichal Kazior 				  struct htt_rx_desc *rxd)
63473539b40SJanusz Dziedzic {
635b9fd8a84SMichal Kazior 	enum ieee80211_band band;
63673539b40SJanusz Dziedzic 	u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
63773539b40SJanusz Dziedzic 	u8 preamble = 0;
638b9fd8a84SMichal Kazior 	u32 info1, info2, info3;
63973539b40SJanusz Dziedzic 
640b9fd8a84SMichal Kazior 	/* Band value can't be set as undefined but freq can be 0 - use that to
641b9fd8a84SMichal Kazior 	 * determine whether band is provided.
642b9fd8a84SMichal Kazior 	 *
643b9fd8a84SMichal Kazior 	 * FIXME: Perhaps this can go away if CCK rate reporting is a little
644b9fd8a84SMichal Kazior 	 * reworked?
645b9fd8a84SMichal Kazior 	 */
646b9fd8a84SMichal Kazior 	if (!status->freq)
64773539b40SJanusz Dziedzic 		return;
64873539b40SJanusz Dziedzic 
649b9fd8a84SMichal Kazior 	band = status->band;
650b9fd8a84SMichal Kazior 	info1 = __le32_to_cpu(rxd->ppdu_start.info1);
651b9fd8a84SMichal Kazior 	info2 = __le32_to_cpu(rxd->ppdu_start.info2);
652b9fd8a84SMichal Kazior 	info3 = __le32_to_cpu(rxd->ppdu_start.info3);
653b9fd8a84SMichal Kazior 
654b9fd8a84SMichal Kazior 	preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
65573539b40SJanusz Dziedzic 
65673539b40SJanusz Dziedzic 	switch (preamble) {
65773539b40SJanusz Dziedzic 	case HTT_RX_LEGACY:
658b9fd8a84SMichal Kazior 		cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
659b9fd8a84SMichal Kazior 		rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
66073539b40SJanusz Dziedzic 		rate_idx = 0;
66173539b40SJanusz Dziedzic 
66273539b40SJanusz Dziedzic 		if (rate < 0x08 || rate > 0x0F)
66373539b40SJanusz Dziedzic 			break;
66473539b40SJanusz Dziedzic 
66573539b40SJanusz Dziedzic 		switch (band) {
66673539b40SJanusz Dziedzic 		case IEEE80211_BAND_2GHZ:
66773539b40SJanusz Dziedzic 			if (cck)
66873539b40SJanusz Dziedzic 				rate &= ~BIT(3);
66973539b40SJanusz Dziedzic 			rate_idx = rx_legacy_rate_idx[rate];
67073539b40SJanusz Dziedzic 			break;
67173539b40SJanusz Dziedzic 		case IEEE80211_BAND_5GHZ:
67273539b40SJanusz Dziedzic 			rate_idx = rx_legacy_rate_idx[rate];
67373539b40SJanusz Dziedzic 			/* We are using same rate table registering
67473539b40SJanusz Dziedzic 			   HW - ath10k_rates[]. In case of 5GHz skip
67573539b40SJanusz Dziedzic 			   CCK rates, so -4 here */
67673539b40SJanusz Dziedzic 			rate_idx -= 4;
67773539b40SJanusz Dziedzic 			break;
67873539b40SJanusz Dziedzic 		default:
67973539b40SJanusz Dziedzic 			break;
68073539b40SJanusz Dziedzic 		}
68173539b40SJanusz Dziedzic 
68273539b40SJanusz Dziedzic 		status->rate_idx = rate_idx;
68373539b40SJanusz Dziedzic 		break;
68473539b40SJanusz Dziedzic 	case HTT_RX_HT:
68573539b40SJanusz Dziedzic 	case HTT_RX_HT_WITH_TXBF:
686b9fd8a84SMichal Kazior 		/* HT-SIG - Table 20-11 in info2 and info3 */
687b9fd8a84SMichal Kazior 		mcs = info2 & 0x1F;
68873539b40SJanusz Dziedzic 		nss = mcs >> 3;
689b9fd8a84SMichal Kazior 		bw = (info2 >> 7) & 1;
690b9fd8a84SMichal Kazior 		sgi = (info3 >> 7) & 1;
69173539b40SJanusz Dziedzic 
69273539b40SJanusz Dziedzic 		status->rate_idx = mcs;
69373539b40SJanusz Dziedzic 		status->flag |= RX_FLAG_HT;
69473539b40SJanusz Dziedzic 		if (sgi)
69573539b40SJanusz Dziedzic 			status->flag |= RX_FLAG_SHORT_GI;
69673539b40SJanusz Dziedzic 		if (bw)
69773539b40SJanusz Dziedzic 			status->flag |= RX_FLAG_40MHZ;
69873539b40SJanusz Dziedzic 		break;
69973539b40SJanusz Dziedzic 	case HTT_RX_VHT:
70073539b40SJanusz Dziedzic 	case HTT_RX_VHT_WITH_TXBF:
701b9fd8a84SMichal Kazior 		/* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
70273539b40SJanusz Dziedzic 		   TODO check this */
703b9fd8a84SMichal Kazior 		mcs = (info3 >> 4) & 0x0F;
704b9fd8a84SMichal Kazior 		nss = ((info2 >> 10) & 0x07) + 1;
705b9fd8a84SMichal Kazior 		bw = info2 & 3;
706b9fd8a84SMichal Kazior 		sgi = info3 & 1;
70773539b40SJanusz Dziedzic 
70873539b40SJanusz Dziedzic 		status->rate_idx = mcs;
70973539b40SJanusz Dziedzic 		status->vht_nss = nss;
71073539b40SJanusz Dziedzic 
71173539b40SJanusz Dziedzic 		if (sgi)
71273539b40SJanusz Dziedzic 			status->flag |= RX_FLAG_SHORT_GI;
71373539b40SJanusz Dziedzic 
71473539b40SJanusz Dziedzic 		switch (bw) {
71573539b40SJanusz Dziedzic 		/* 20MHZ */
71673539b40SJanusz Dziedzic 		case 0:
71773539b40SJanusz Dziedzic 			break;
71873539b40SJanusz Dziedzic 		/* 40MHZ */
71973539b40SJanusz Dziedzic 		case 1:
72073539b40SJanusz Dziedzic 			status->flag |= RX_FLAG_40MHZ;
72173539b40SJanusz Dziedzic 			break;
72273539b40SJanusz Dziedzic 		/* 80MHZ */
72373539b40SJanusz Dziedzic 		case 2:
72473539b40SJanusz Dziedzic 			status->vht_flag |= RX_VHT_FLAG_80MHZ;
72573539b40SJanusz Dziedzic 		}
72673539b40SJanusz Dziedzic 
72773539b40SJanusz Dziedzic 		status->flag |= RX_FLAG_VHT;
72873539b40SJanusz Dziedzic 		break;
72973539b40SJanusz Dziedzic 	default:
73073539b40SJanusz Dziedzic 		break;
73173539b40SJanusz Dziedzic 	}
73273539b40SJanusz Dziedzic }
73373539b40SJanusz Dziedzic 
73436653f05SJanusz Dziedzic static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
73536653f05SJanusz Dziedzic 				    struct ieee80211_rx_status *status)
73636653f05SJanusz Dziedzic {
73736653f05SJanusz Dziedzic 	struct ieee80211_channel *ch;
73836653f05SJanusz Dziedzic 
73936653f05SJanusz Dziedzic 	spin_lock_bh(&ar->data_lock);
74036653f05SJanusz Dziedzic 	ch = ar->scan_channel;
74136653f05SJanusz Dziedzic 	if (!ch)
74236653f05SJanusz Dziedzic 		ch = ar->rx_channel;
74336653f05SJanusz Dziedzic 	spin_unlock_bh(&ar->data_lock);
74436653f05SJanusz Dziedzic 
74536653f05SJanusz Dziedzic 	if (!ch)
74636653f05SJanusz Dziedzic 		return false;
74736653f05SJanusz Dziedzic 
74836653f05SJanusz Dziedzic 	status->band = ch->band;
74936653f05SJanusz Dziedzic 	status->freq = ch->center_freq;
75036653f05SJanusz Dziedzic 
75136653f05SJanusz Dziedzic 	return true;
75236653f05SJanusz Dziedzic }
75336653f05SJanusz Dziedzic 
754b9fd8a84SMichal Kazior static void ath10k_htt_rx_h_signal(struct ath10k *ar,
755b9fd8a84SMichal Kazior 				   struct ieee80211_rx_status *status,
756b9fd8a84SMichal Kazior 				   struct htt_rx_desc *rxd)
757b9fd8a84SMichal Kazior {
758b9fd8a84SMichal Kazior 	/* FIXME: Get real NF */
759b9fd8a84SMichal Kazior 	status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
760b9fd8a84SMichal Kazior 			 rxd->ppdu_start.rssi_comb;
761b9fd8a84SMichal Kazior 	status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
762b9fd8a84SMichal Kazior }
763b9fd8a84SMichal Kazior 
764b9fd8a84SMichal Kazior static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
765b9fd8a84SMichal Kazior 				    struct ieee80211_rx_status *status,
766b9fd8a84SMichal Kazior 				    struct htt_rx_desc *rxd)
767b9fd8a84SMichal Kazior {
768b9fd8a84SMichal Kazior 	/* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
769b9fd8a84SMichal Kazior 	 * means all prior MSDUs in a PPDU are reported to mac80211 without the
770b9fd8a84SMichal Kazior 	 * TSF. Is it worth holding frames until end of PPDU is known?
771b9fd8a84SMichal Kazior 	 *
772b9fd8a84SMichal Kazior 	 * FIXME: Can we get/compute 64bit TSF?
773b9fd8a84SMichal Kazior 	 */
774b9fd8a84SMichal Kazior 	status->mactime = __le32_to_cpu(rxd->ppdu_end.tsf_timestamp);
775b9fd8a84SMichal Kazior 	status->flag |= RX_FLAG_MACTIME_END;
776b9fd8a84SMichal Kazior }
777b9fd8a84SMichal Kazior 
778b9fd8a84SMichal Kazior static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
779b9fd8a84SMichal Kazior 				 struct sk_buff_head *amsdu,
780b9fd8a84SMichal Kazior 				 struct ieee80211_rx_status *status)
781b9fd8a84SMichal Kazior {
782b9fd8a84SMichal Kazior 	struct sk_buff *first;
783b9fd8a84SMichal Kazior 	struct htt_rx_desc *rxd;
784b9fd8a84SMichal Kazior 	bool is_first_ppdu;
785b9fd8a84SMichal Kazior 	bool is_last_ppdu;
786b9fd8a84SMichal Kazior 
787b9fd8a84SMichal Kazior 	if (skb_queue_empty(amsdu))
788b9fd8a84SMichal Kazior 		return;
789b9fd8a84SMichal Kazior 
790b9fd8a84SMichal Kazior 	first = skb_peek(amsdu);
791b9fd8a84SMichal Kazior 	rxd = (void *)first->data - sizeof(*rxd);
792b9fd8a84SMichal Kazior 
793b9fd8a84SMichal Kazior 	is_first_ppdu = !!(rxd->attention.flags &
794b9fd8a84SMichal Kazior 			   __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
795b9fd8a84SMichal Kazior 	is_last_ppdu = !!(rxd->attention.flags &
796b9fd8a84SMichal Kazior 			  __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
797b9fd8a84SMichal Kazior 
798b9fd8a84SMichal Kazior 	if (is_first_ppdu) {
799b9fd8a84SMichal Kazior 		/* New PPDU starts so clear out the old per-PPDU status. */
800b9fd8a84SMichal Kazior 		status->freq = 0;
801b9fd8a84SMichal Kazior 		status->rate_idx = 0;
802b9fd8a84SMichal Kazior 		status->vht_nss = 0;
803b9fd8a84SMichal Kazior 		status->vht_flag &= ~RX_VHT_FLAG_80MHZ;
804b9fd8a84SMichal Kazior 		status->flag &= ~(RX_FLAG_HT |
805b9fd8a84SMichal Kazior 				  RX_FLAG_VHT |
806b9fd8a84SMichal Kazior 				  RX_FLAG_SHORT_GI |
807b9fd8a84SMichal Kazior 				  RX_FLAG_40MHZ |
808b9fd8a84SMichal Kazior 				  RX_FLAG_MACTIME_END);
809b9fd8a84SMichal Kazior 		status->flag |= RX_FLAG_NO_SIGNAL_VAL;
810b9fd8a84SMichal Kazior 
811b9fd8a84SMichal Kazior 		ath10k_htt_rx_h_signal(ar, status, rxd);
812b9fd8a84SMichal Kazior 		ath10k_htt_rx_h_channel(ar, status);
813b9fd8a84SMichal Kazior 		ath10k_htt_rx_h_rates(ar, status, rxd);
814b9fd8a84SMichal Kazior 	}
815b9fd8a84SMichal Kazior 
816b9fd8a84SMichal Kazior 	if (is_last_ppdu)
817b9fd8a84SMichal Kazior 		ath10k_htt_rx_h_mactime(ar, status, rxd);
818b9fd8a84SMichal Kazior }
819b9fd8a84SMichal Kazior 
82076f5329aSJanusz Dziedzic static const char * const tid_to_ac[] = {
82176f5329aSJanusz Dziedzic 	"BE",
82276f5329aSJanusz Dziedzic 	"BK",
82376f5329aSJanusz Dziedzic 	"BK",
82476f5329aSJanusz Dziedzic 	"BE",
82576f5329aSJanusz Dziedzic 	"VI",
82676f5329aSJanusz Dziedzic 	"VI",
82776f5329aSJanusz Dziedzic 	"VO",
82876f5329aSJanusz Dziedzic 	"VO",
82976f5329aSJanusz Dziedzic };
83076f5329aSJanusz Dziedzic 
83176f5329aSJanusz Dziedzic static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
83276f5329aSJanusz Dziedzic {
83376f5329aSJanusz Dziedzic 	u8 *qc;
83476f5329aSJanusz Dziedzic 	int tid;
83576f5329aSJanusz Dziedzic 
83676f5329aSJanusz Dziedzic 	if (!ieee80211_is_data_qos(hdr->frame_control))
83776f5329aSJanusz Dziedzic 		return "";
83876f5329aSJanusz Dziedzic 
83976f5329aSJanusz Dziedzic 	qc = ieee80211_get_qos_ctl(hdr);
84076f5329aSJanusz Dziedzic 	tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
84176f5329aSJanusz Dziedzic 	if (tid < 8)
84276f5329aSJanusz Dziedzic 		snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
84376f5329aSJanusz Dziedzic 	else
84476f5329aSJanusz Dziedzic 		snprintf(out, size, "tid %d", tid);
84576f5329aSJanusz Dziedzic 
84676f5329aSJanusz Dziedzic 	return out;
84776f5329aSJanusz Dziedzic }
84876f5329aSJanusz Dziedzic 
84985f6d7cfSJanusz Dziedzic static void ath10k_process_rx(struct ath10k *ar,
85085f6d7cfSJanusz Dziedzic 			      struct ieee80211_rx_status *rx_status,
85185f6d7cfSJanusz Dziedzic 			      struct sk_buff *skb)
85273539b40SJanusz Dziedzic {
85373539b40SJanusz Dziedzic 	struct ieee80211_rx_status *status;
85476f5329aSJanusz Dziedzic 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
85576f5329aSJanusz Dziedzic 	char tid[32];
85673539b40SJanusz Dziedzic 
85785f6d7cfSJanusz Dziedzic 	status = IEEE80211_SKB_RXCB(skb);
85885f6d7cfSJanusz Dziedzic 	*status = *rx_status;
85973539b40SJanusz Dziedzic 
8607aa7a72aSMichal Kazior 	ath10k_dbg(ar, ATH10K_DBG_DATA,
86176f5329aSJanusz Dziedzic 		   "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
86285f6d7cfSJanusz Dziedzic 		   skb,
86385f6d7cfSJanusz Dziedzic 		   skb->len,
86476f5329aSJanusz Dziedzic 		   ieee80211_get_SA(hdr),
86576f5329aSJanusz Dziedzic 		   ath10k_get_tid(hdr, tid, sizeof(tid)),
86676f5329aSJanusz Dziedzic 		   is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
86776f5329aSJanusz Dziedzic 							"mcast" : "ucast",
86876f5329aSJanusz Dziedzic 		   (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
86973539b40SJanusz Dziedzic 		   status->flag == 0 ? "legacy" : "",
87073539b40SJanusz Dziedzic 		   status->flag & RX_FLAG_HT ? "ht" : "",
87173539b40SJanusz Dziedzic 		   status->flag & RX_FLAG_VHT ? "vht" : "",
87273539b40SJanusz Dziedzic 		   status->flag & RX_FLAG_40MHZ ? "40" : "",
87373539b40SJanusz Dziedzic 		   status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
87473539b40SJanusz Dziedzic 		   status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
87573539b40SJanusz Dziedzic 		   status->rate_idx,
87673539b40SJanusz Dziedzic 		   status->vht_nss,
87773539b40SJanusz Dziedzic 		   status->freq,
87887326c97SJanusz Dziedzic 		   status->band, status->flag,
87978433f96SJanusz Dziedzic 		   !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
88076f5329aSJanusz Dziedzic 		   !!(status->flag & RX_FLAG_MMIC_ERROR),
88176f5329aSJanusz Dziedzic 		   !!(status->flag & RX_FLAG_AMSDU_MORE));
8827aa7a72aSMichal Kazior 	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
88385f6d7cfSJanusz Dziedzic 			skb->data, skb->len);
8845ce8e7fdSRajkumar Manoharan 	trace_ath10k_rx_hdr(ar, skb->data, skb->len);
8855ce8e7fdSRajkumar Manoharan 	trace_ath10k_rx_payload(ar, skb->data, skb->len);
88673539b40SJanusz Dziedzic 
88785f6d7cfSJanusz Dziedzic 	ieee80211_rx(ar->hw, skb);
88873539b40SJanusz Dziedzic }
88973539b40SJanusz Dziedzic 
890d960c369SMichal Kazior static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
891d960c369SMichal Kazior {
892d960c369SMichal Kazior 	/* nwifi header is padded to 4 bytes. this fixes 4addr rx */
893d960c369SMichal Kazior 	return round_up(ieee80211_hdrlen(hdr->frame_control), 4);
894d960c369SMichal Kazior }
895d960c369SMichal Kazior 
896581c25f8SMichal Kazior static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
897581c25f8SMichal Kazior 					struct sk_buff *msdu,
898581c25f8SMichal Kazior 					struct ieee80211_rx_status *status,
899581c25f8SMichal Kazior 					enum htt_rx_mpdu_encrypt_type enctype,
900581c25f8SMichal Kazior 					bool is_decrypted)
9015e3dd157SKalle Valo {
902f6dc2095SMichal Kazior 	struct ieee80211_hdr *hdr;
903581c25f8SMichal Kazior 	struct htt_rx_desc *rxd;
904581c25f8SMichal Kazior 	size_t hdr_len;
905581c25f8SMichal Kazior 	size_t crypto_len;
906581c25f8SMichal Kazior 	bool is_first;
907581c25f8SMichal Kazior 	bool is_last;
9085e3dd157SKalle Valo 
909581c25f8SMichal Kazior 	rxd = (void *)msdu->data - sizeof(*rxd);
910581c25f8SMichal Kazior 	is_first = !!(rxd->msdu_end.info0 &
911581c25f8SMichal Kazior 		      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
912581c25f8SMichal Kazior 	is_last = !!(rxd->msdu_end.info0 &
913581c25f8SMichal Kazior 		     __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
9149aa505d2SMichal Kazior 
915581c25f8SMichal Kazior 	/* Delivered decapped frame:
916581c25f8SMichal Kazior 	 * [802.11 header]
917581c25f8SMichal Kazior 	 * [crypto param] <-- can be trimmed if !fcs_err &&
918581c25f8SMichal Kazior 	 *                    !decrypt_err && !peer_idx_invalid
919581c25f8SMichal Kazior 	 * [amsdu header] <-- only if A-MSDU
920581c25f8SMichal Kazior 	 * [rfc1042/llc]
921581c25f8SMichal Kazior 	 * [payload]
922581c25f8SMichal Kazior 	 * [FCS] <-- at end, needs to be trimmed
923581c25f8SMichal Kazior 	 */
9245e3dd157SKalle Valo 
925581c25f8SMichal Kazior 	/* This probably shouldn't happen but warn just in case */
926581c25f8SMichal Kazior 	if (unlikely(WARN_ON_ONCE(!is_first)))
927581c25f8SMichal Kazior 		return;
928581c25f8SMichal Kazior 
929581c25f8SMichal Kazior 	/* This probably shouldn't happen but warn just in case */
930581c25f8SMichal Kazior 	if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
931581c25f8SMichal Kazior 		return;
932581c25f8SMichal Kazior 
933581c25f8SMichal Kazior 	skb_trim(msdu, msdu->len - FCS_LEN);
934581c25f8SMichal Kazior 
935581c25f8SMichal Kazior 	/* In most cases this will be true for sniffed frames. It makes sense
936581c25f8SMichal Kazior 	 * to deliver them as-is without stripping the crypto param. This would
937581c25f8SMichal Kazior 	 * also make sense for software based decryption (which is not
938581c25f8SMichal Kazior 	 * implemented in ath10k).
939581c25f8SMichal Kazior 	 *
940581c25f8SMichal Kazior 	 * If there's no error then the frame is decrypted. At least that is
941581c25f8SMichal Kazior 	 * the case for frames that come in via fragmented rx indication.
942581c25f8SMichal Kazior 	 */
943581c25f8SMichal Kazior 	if (!is_decrypted)
944581c25f8SMichal Kazior 		return;
945581c25f8SMichal Kazior 
946581c25f8SMichal Kazior 	/* The payload is decrypted so strip crypto params. Start from tail
947581c25f8SMichal Kazior 	 * since hdr is used to compute some stuff.
948581c25f8SMichal Kazior 	 */
949581c25f8SMichal Kazior 
950581c25f8SMichal Kazior 	hdr = (void *)msdu->data;
951581c25f8SMichal Kazior 
952581c25f8SMichal Kazior 	/* Tail */
953581c25f8SMichal Kazior 	skb_trim(msdu, msdu->len - ath10k_htt_rx_crypto_tail_len(ar, enctype));
954581c25f8SMichal Kazior 
955581c25f8SMichal Kazior 	/* MMIC */
956581c25f8SMichal Kazior 	if (!ieee80211_has_morefrags(hdr->frame_control) &&
957581c25f8SMichal Kazior 	    enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
958581c25f8SMichal Kazior 		skb_trim(msdu, msdu->len - 8);
959581c25f8SMichal Kazior 
960581c25f8SMichal Kazior 	/* Head */
961f6dc2095SMichal Kazior 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
962581c25f8SMichal Kazior 	crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
9635e3dd157SKalle Valo 
964581c25f8SMichal Kazior 	memmove((void *)msdu->data + crypto_len,
965581c25f8SMichal Kazior 		(void *)msdu->data, hdr_len);
966581c25f8SMichal Kazior 	skb_pull(msdu, crypto_len);
9675e3dd157SKalle Valo }
9685e3dd157SKalle Valo 
969581c25f8SMichal Kazior static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
970581c25f8SMichal Kazior 					  struct sk_buff *msdu,
971581c25f8SMichal Kazior 					  struct ieee80211_rx_status *status,
972581c25f8SMichal Kazior 					  const u8 first_hdr[64])
973581c25f8SMichal Kazior {
974581c25f8SMichal Kazior 	struct ieee80211_hdr *hdr;
975581c25f8SMichal Kazior 	size_t hdr_len;
976581c25f8SMichal Kazior 	u8 da[ETH_ALEN];
977581c25f8SMichal Kazior 	u8 sa[ETH_ALEN];
978581c25f8SMichal Kazior 
979581c25f8SMichal Kazior 	/* Delivered decapped frame:
980581c25f8SMichal Kazior 	 * [nwifi 802.11 header] <-- replaced with 802.11 hdr
981581c25f8SMichal Kazior 	 * [rfc1042/llc]
982581c25f8SMichal Kazior 	 *
983581c25f8SMichal Kazior 	 * Note: The nwifi header doesn't have QoS Control and is
984581c25f8SMichal Kazior 	 * (always?) a 3addr frame.
985581c25f8SMichal Kazior 	 *
986581c25f8SMichal Kazior 	 * Note2: There's no A-MSDU subframe header. Even if it's part
987581c25f8SMichal Kazior 	 * of an A-MSDU.
988581c25f8SMichal Kazior 	 */
989581c25f8SMichal Kazior 
99072bdeb86SMichal Kazior 	/* pull decapped header and copy SA & DA */
991581c25f8SMichal Kazior 	hdr = (struct ieee80211_hdr *)msdu->data;
992d960c369SMichal Kazior 	hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
993b25f32cbSKalle Valo 	ether_addr_copy(da, ieee80211_get_DA(hdr));
994b25f32cbSKalle Valo 	ether_addr_copy(sa, ieee80211_get_SA(hdr));
995581c25f8SMichal Kazior 	skb_pull(msdu, hdr_len);
996784f69d3SMichal Kazior 
997784f69d3SMichal Kazior 	/* push original 802.11 header */
998581c25f8SMichal Kazior 	hdr = (struct ieee80211_hdr *)first_hdr;
999784f69d3SMichal Kazior 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1000581c25f8SMichal Kazior 	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1001784f69d3SMichal Kazior 
100272bdeb86SMichal Kazior 	/* original 802.11 header has a different DA and in
100372bdeb86SMichal Kazior 	 * case of 4addr it may also have different SA
100472bdeb86SMichal Kazior 	 */
1005581c25f8SMichal Kazior 	hdr = (struct ieee80211_hdr *)msdu->data;
1006b25f32cbSKalle Valo 	ether_addr_copy(ieee80211_get_DA(hdr), da);
1007b25f32cbSKalle Valo 	ether_addr_copy(ieee80211_get_SA(hdr), sa);
10085e3dd157SKalle Valo }
10095e3dd157SKalle Valo 
1010581c25f8SMichal Kazior static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
1011581c25f8SMichal Kazior 					  struct sk_buff *msdu,
1012581c25f8SMichal Kazior 					  enum htt_rx_mpdu_encrypt_type enctype)
10135e3dd157SKalle Valo {
10145e3dd157SKalle Valo 	struct ieee80211_hdr *hdr;
1015581c25f8SMichal Kazior 	struct htt_rx_desc *rxd;
1016581c25f8SMichal Kazior 	size_t hdr_len, crypto_len;
1017e3fbf8d2SMichal Kazior 	void *rfc1042;
1018581c25f8SMichal Kazior 	bool is_first, is_last, is_amsdu;
10195e3dd157SKalle Valo 
1020581c25f8SMichal Kazior 	rxd = (void *)msdu->data - sizeof(*rxd);
1021581c25f8SMichal Kazior 	hdr = (void *)rxd->rx_hdr_status;
10225e3dd157SKalle Valo 
1023581c25f8SMichal Kazior 	is_first = !!(rxd->msdu_end.info0 &
1024581c25f8SMichal Kazior 		      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1025581c25f8SMichal Kazior 	is_last = !!(rxd->msdu_end.info0 &
1026581c25f8SMichal Kazior 		     __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1027581c25f8SMichal Kazior 	is_amsdu = !(is_first && is_last);
1028e3fbf8d2SMichal Kazior 
1029e3fbf8d2SMichal Kazior 	rfc1042 = hdr;
1030e3fbf8d2SMichal Kazior 
1031581c25f8SMichal Kazior 	if (is_first) {
1032581c25f8SMichal Kazior 		hdr_len = ieee80211_hdrlen(hdr->frame_control);
1033581c25f8SMichal Kazior 		crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1034e3fbf8d2SMichal Kazior 
1035581c25f8SMichal Kazior 		rfc1042 += round_up(hdr_len, 4) +
1036581c25f8SMichal Kazior 			   round_up(crypto_len, 4);
10375e3dd157SKalle Valo 	}
10385e3dd157SKalle Valo 
1039581c25f8SMichal Kazior 	if (is_amsdu)
1040581c25f8SMichal Kazior 		rfc1042 += sizeof(struct amsdu_subframe_hdr);
1041f6dc2095SMichal Kazior 
1042581c25f8SMichal Kazior 	return rfc1042;
1043581c25f8SMichal Kazior }
1044581c25f8SMichal Kazior 
1045581c25f8SMichal Kazior static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1046581c25f8SMichal Kazior 					struct sk_buff *msdu,
1047581c25f8SMichal Kazior 					struct ieee80211_rx_status *status,
1048581c25f8SMichal Kazior 					const u8 first_hdr[64],
1049581c25f8SMichal Kazior 					enum htt_rx_mpdu_encrypt_type enctype)
1050581c25f8SMichal Kazior {
1051581c25f8SMichal Kazior 	struct ieee80211_hdr *hdr;
1052581c25f8SMichal Kazior 	struct ethhdr *eth;
1053581c25f8SMichal Kazior 	size_t hdr_len;
1054581c25f8SMichal Kazior 	void *rfc1042;
1055581c25f8SMichal Kazior 	u8 da[ETH_ALEN];
1056581c25f8SMichal Kazior 	u8 sa[ETH_ALEN];
1057581c25f8SMichal Kazior 
1058581c25f8SMichal Kazior 	/* Delivered decapped frame:
1059581c25f8SMichal Kazior 	 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
1060581c25f8SMichal Kazior 	 * [payload]
1061581c25f8SMichal Kazior 	 */
1062581c25f8SMichal Kazior 
1063581c25f8SMichal Kazior 	rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
1064581c25f8SMichal Kazior 	if (WARN_ON_ONCE(!rfc1042))
1065581c25f8SMichal Kazior 		return;
1066581c25f8SMichal Kazior 
1067581c25f8SMichal Kazior 	/* pull decapped header and copy SA & DA */
1068581c25f8SMichal Kazior 	eth = (struct ethhdr *)msdu->data;
1069581c25f8SMichal Kazior 	ether_addr_copy(da, eth->h_dest);
1070581c25f8SMichal Kazior 	ether_addr_copy(sa, eth->h_source);
1071581c25f8SMichal Kazior 	skb_pull(msdu, sizeof(struct ethhdr));
1072581c25f8SMichal Kazior 
1073581c25f8SMichal Kazior 	/* push rfc1042/llc/snap */
1074581c25f8SMichal Kazior 	memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
1075581c25f8SMichal Kazior 	       sizeof(struct rfc1042_hdr));
1076581c25f8SMichal Kazior 
1077581c25f8SMichal Kazior 	/* push original 802.11 header */
1078581c25f8SMichal Kazior 	hdr = (struct ieee80211_hdr *)first_hdr;
1079581c25f8SMichal Kazior 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1080581c25f8SMichal Kazior 	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1081581c25f8SMichal Kazior 
1082581c25f8SMichal Kazior 	/* original 802.11 header has a different DA and in
1083581c25f8SMichal Kazior 	 * case of 4addr it may also have different SA
1084581c25f8SMichal Kazior 	 */
1085581c25f8SMichal Kazior 	hdr = (struct ieee80211_hdr *)msdu->data;
1086581c25f8SMichal Kazior 	ether_addr_copy(ieee80211_get_DA(hdr), da);
1087581c25f8SMichal Kazior 	ether_addr_copy(ieee80211_get_SA(hdr), sa);
1088581c25f8SMichal Kazior }
1089581c25f8SMichal Kazior 
1090581c25f8SMichal Kazior static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
1091581c25f8SMichal Kazior 					 struct sk_buff *msdu,
1092581c25f8SMichal Kazior 					 struct ieee80211_rx_status *status,
1093581c25f8SMichal Kazior 					 const u8 first_hdr[64])
1094581c25f8SMichal Kazior {
1095581c25f8SMichal Kazior 	struct ieee80211_hdr *hdr;
1096581c25f8SMichal Kazior 	size_t hdr_len;
1097581c25f8SMichal Kazior 
1098581c25f8SMichal Kazior 	/* Delivered decapped frame:
1099581c25f8SMichal Kazior 	 * [amsdu header] <-- replaced with 802.11 hdr
1100581c25f8SMichal Kazior 	 * [rfc1042/llc]
1101581c25f8SMichal Kazior 	 * [payload]
1102581c25f8SMichal Kazior 	 */
1103581c25f8SMichal Kazior 
1104581c25f8SMichal Kazior 	skb_pull(msdu, sizeof(struct amsdu_subframe_hdr));
1105581c25f8SMichal Kazior 
1106581c25f8SMichal Kazior 	hdr = (struct ieee80211_hdr *)first_hdr;
1107581c25f8SMichal Kazior 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1108581c25f8SMichal Kazior 	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1109581c25f8SMichal Kazior }
1110581c25f8SMichal Kazior 
1111581c25f8SMichal Kazior static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
1112581c25f8SMichal Kazior 				    struct sk_buff *msdu,
1113581c25f8SMichal Kazior 				    struct ieee80211_rx_status *status,
1114581c25f8SMichal Kazior 				    u8 first_hdr[64],
1115581c25f8SMichal Kazior 				    enum htt_rx_mpdu_encrypt_type enctype,
1116581c25f8SMichal Kazior 				    bool is_decrypted)
1117581c25f8SMichal Kazior {
1118581c25f8SMichal Kazior 	struct htt_rx_desc *rxd;
1119581c25f8SMichal Kazior 	enum rx_msdu_decap_format decap;
1120581c25f8SMichal Kazior 	struct ieee80211_hdr *hdr;
1121581c25f8SMichal Kazior 
1122581c25f8SMichal Kazior 	/* First msdu's decapped header:
1123581c25f8SMichal Kazior 	 * [802.11 header] <-- padded to 4 bytes long
1124581c25f8SMichal Kazior 	 * [crypto param] <-- padded to 4 bytes long
1125581c25f8SMichal Kazior 	 * [amsdu header] <-- only if A-MSDU
1126581c25f8SMichal Kazior 	 * [rfc1042/llc]
1127581c25f8SMichal Kazior 	 *
1128581c25f8SMichal Kazior 	 * Other (2nd, 3rd, ..) msdu's decapped header:
1129581c25f8SMichal Kazior 	 * [amsdu header] <-- only if A-MSDU
1130581c25f8SMichal Kazior 	 * [rfc1042/llc]
1131581c25f8SMichal Kazior 	 */
1132581c25f8SMichal Kazior 
1133581c25f8SMichal Kazior 	rxd = (void *)msdu->data - sizeof(*rxd);
1134581c25f8SMichal Kazior 	hdr = (void *)rxd->rx_hdr_status;
1135581c25f8SMichal Kazior 	decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
1136581c25f8SMichal Kazior 		   RX_MSDU_START_INFO1_DECAP_FORMAT);
1137581c25f8SMichal Kazior 
1138581c25f8SMichal Kazior 	switch (decap) {
1139581c25f8SMichal Kazior 	case RX_MSDU_DECAP_RAW:
1140581c25f8SMichal Kazior 		ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
1141581c25f8SMichal Kazior 					    is_decrypted);
1142581c25f8SMichal Kazior 		break;
1143581c25f8SMichal Kazior 	case RX_MSDU_DECAP_NATIVE_WIFI:
1144581c25f8SMichal Kazior 		ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr);
1145581c25f8SMichal Kazior 		break;
1146581c25f8SMichal Kazior 	case RX_MSDU_DECAP_ETHERNET2_DIX:
1147581c25f8SMichal Kazior 		ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
1148581c25f8SMichal Kazior 		break;
1149581c25f8SMichal Kazior 	case RX_MSDU_DECAP_8023_SNAP_LLC:
1150581c25f8SMichal Kazior 		ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr);
1151581c25f8SMichal Kazior 		break;
1152581c25f8SMichal Kazior 	}
11535e3dd157SKalle Valo }
11545e3dd157SKalle Valo 
1155605f81aaSMichal Kazior static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
1156605f81aaSMichal Kazior {
1157605f81aaSMichal Kazior 	struct htt_rx_desc *rxd;
1158605f81aaSMichal Kazior 	u32 flags, info;
1159605f81aaSMichal Kazior 	bool is_ip4, is_ip6;
1160605f81aaSMichal Kazior 	bool is_tcp, is_udp;
1161605f81aaSMichal Kazior 	bool ip_csum_ok, tcpudp_csum_ok;
1162605f81aaSMichal Kazior 
1163605f81aaSMichal Kazior 	rxd = (void *)skb->data - sizeof(*rxd);
1164605f81aaSMichal Kazior 	flags = __le32_to_cpu(rxd->attention.flags);
1165605f81aaSMichal Kazior 	info = __le32_to_cpu(rxd->msdu_start.info1);
1166605f81aaSMichal Kazior 
1167605f81aaSMichal Kazior 	is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
1168605f81aaSMichal Kazior 	is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
1169605f81aaSMichal Kazior 	is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
1170605f81aaSMichal Kazior 	is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
1171605f81aaSMichal Kazior 	ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
1172605f81aaSMichal Kazior 	tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
1173605f81aaSMichal Kazior 
1174605f81aaSMichal Kazior 	if (!is_ip4 && !is_ip6)
1175605f81aaSMichal Kazior 		return CHECKSUM_NONE;
1176605f81aaSMichal Kazior 	if (!is_tcp && !is_udp)
1177605f81aaSMichal Kazior 		return CHECKSUM_NONE;
1178605f81aaSMichal Kazior 	if (!ip_csum_ok)
1179605f81aaSMichal Kazior 		return CHECKSUM_NONE;
1180605f81aaSMichal Kazior 	if (!tcpudp_csum_ok)
1181605f81aaSMichal Kazior 		return CHECKSUM_NONE;
1182605f81aaSMichal Kazior 
1183605f81aaSMichal Kazior 	return CHECKSUM_UNNECESSARY;
1184605f81aaSMichal Kazior }
1185605f81aaSMichal Kazior 
1186581c25f8SMichal Kazior static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
1187581c25f8SMichal Kazior {
1188581c25f8SMichal Kazior 	msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
1189581c25f8SMichal Kazior }
1190581c25f8SMichal Kazior 
1191581c25f8SMichal Kazior static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1192581c25f8SMichal Kazior 				 struct sk_buff_head *amsdu,
1193581c25f8SMichal Kazior 				 struct ieee80211_rx_status *status)
1194581c25f8SMichal Kazior {
1195581c25f8SMichal Kazior 	struct sk_buff *first;
1196581c25f8SMichal Kazior 	struct sk_buff *last;
1197581c25f8SMichal Kazior 	struct sk_buff *msdu;
1198581c25f8SMichal Kazior 	struct htt_rx_desc *rxd;
1199581c25f8SMichal Kazior 	struct ieee80211_hdr *hdr;
1200581c25f8SMichal Kazior 	enum htt_rx_mpdu_encrypt_type enctype;
1201581c25f8SMichal Kazior 	u8 first_hdr[64];
1202581c25f8SMichal Kazior 	u8 *qos;
1203581c25f8SMichal Kazior 	size_t hdr_len;
1204581c25f8SMichal Kazior 	bool has_fcs_err;
1205581c25f8SMichal Kazior 	bool has_crypto_err;
1206581c25f8SMichal Kazior 	bool has_tkip_err;
1207581c25f8SMichal Kazior 	bool has_peer_idx_invalid;
1208581c25f8SMichal Kazior 	bool is_decrypted;
1209581c25f8SMichal Kazior 	u32 attention;
1210581c25f8SMichal Kazior 
1211581c25f8SMichal Kazior 	if (skb_queue_empty(amsdu))
1212581c25f8SMichal Kazior 		return;
1213581c25f8SMichal Kazior 
1214581c25f8SMichal Kazior 	first = skb_peek(amsdu);
1215581c25f8SMichal Kazior 	rxd = (void *)first->data - sizeof(*rxd);
1216581c25f8SMichal Kazior 
1217581c25f8SMichal Kazior 	enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1218581c25f8SMichal Kazior 		     RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1219581c25f8SMichal Kazior 
1220581c25f8SMichal Kazior 	/* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
1221581c25f8SMichal Kazior 	 * decapped header. It'll be used for undecapping of each MSDU.
1222581c25f8SMichal Kazior 	 */
1223581c25f8SMichal Kazior 	hdr = (void *)rxd->rx_hdr_status;
1224581c25f8SMichal Kazior 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1225581c25f8SMichal Kazior 	memcpy(first_hdr, hdr, hdr_len);
1226581c25f8SMichal Kazior 
1227581c25f8SMichal Kazior 	/* Each A-MSDU subframe will use the original header as the base and be
1228581c25f8SMichal Kazior 	 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1229581c25f8SMichal Kazior 	 */
1230581c25f8SMichal Kazior 	hdr = (void *)first_hdr;
1231581c25f8SMichal Kazior 	qos = ieee80211_get_qos_ctl(hdr);
1232581c25f8SMichal Kazior 	qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1233581c25f8SMichal Kazior 
1234581c25f8SMichal Kazior 	/* Some attention flags are valid only in the last MSDU. */
1235581c25f8SMichal Kazior 	last = skb_peek_tail(amsdu);
1236581c25f8SMichal Kazior 	rxd = (void *)last->data - sizeof(*rxd);
1237581c25f8SMichal Kazior 	attention = __le32_to_cpu(rxd->attention.flags);
1238581c25f8SMichal Kazior 
1239581c25f8SMichal Kazior 	has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
1240581c25f8SMichal Kazior 	has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
1241581c25f8SMichal Kazior 	has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
1242581c25f8SMichal Kazior 	has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
1243581c25f8SMichal Kazior 
1244581c25f8SMichal Kazior 	/* Note: If hardware captures an encrypted frame that it can't decrypt,
1245581c25f8SMichal Kazior 	 * e.g. due to fcs error, missing peer or invalid key data it will
1246581c25f8SMichal Kazior 	 * report the frame as raw.
1247581c25f8SMichal Kazior 	 */
1248581c25f8SMichal Kazior 	is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
1249581c25f8SMichal Kazior 			!has_fcs_err &&
1250581c25f8SMichal Kazior 			!has_crypto_err &&
1251581c25f8SMichal Kazior 			!has_peer_idx_invalid);
1252581c25f8SMichal Kazior 
1253581c25f8SMichal Kazior 	/* Clear per-MPDU flags while leaving per-PPDU flags intact. */
1254581c25f8SMichal Kazior 	status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
1255581c25f8SMichal Kazior 			  RX_FLAG_MMIC_ERROR |
1256581c25f8SMichal Kazior 			  RX_FLAG_DECRYPTED |
1257581c25f8SMichal Kazior 			  RX_FLAG_IV_STRIPPED |
1258581c25f8SMichal Kazior 			  RX_FLAG_MMIC_STRIPPED);
1259581c25f8SMichal Kazior 
1260581c25f8SMichal Kazior 	if (has_fcs_err)
1261581c25f8SMichal Kazior 		status->flag |= RX_FLAG_FAILED_FCS_CRC;
1262581c25f8SMichal Kazior 
1263581c25f8SMichal Kazior 	if (has_tkip_err)
1264581c25f8SMichal Kazior 		status->flag |= RX_FLAG_MMIC_ERROR;
1265581c25f8SMichal Kazior 
1266581c25f8SMichal Kazior 	if (is_decrypted)
1267581c25f8SMichal Kazior 		status->flag |= RX_FLAG_DECRYPTED |
1268581c25f8SMichal Kazior 				RX_FLAG_IV_STRIPPED |
1269581c25f8SMichal Kazior 				RX_FLAG_MMIC_STRIPPED;
1270581c25f8SMichal Kazior 
1271581c25f8SMichal Kazior 	skb_queue_walk(amsdu, msdu) {
1272581c25f8SMichal Kazior 		ath10k_htt_rx_h_csum_offload(msdu);
1273581c25f8SMichal Kazior 		ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
1274581c25f8SMichal Kazior 					is_decrypted);
1275581c25f8SMichal Kazior 
1276581c25f8SMichal Kazior 		/* Undecapping involves copying the original 802.11 header back
1277581c25f8SMichal Kazior 		 * to sk_buff. If frame is protected and hardware has decrypted
1278581c25f8SMichal Kazior 		 * it then remove the protected bit.
1279581c25f8SMichal Kazior 		 */
1280581c25f8SMichal Kazior 		if (!is_decrypted)
1281581c25f8SMichal Kazior 			continue;
1282581c25f8SMichal Kazior 
1283581c25f8SMichal Kazior 		hdr = (void *)msdu->data;
1284581c25f8SMichal Kazior 		hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1285581c25f8SMichal Kazior 	}
1286581c25f8SMichal Kazior }
1287581c25f8SMichal Kazior 
1288581c25f8SMichal Kazior static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
1289581c25f8SMichal Kazior 				    struct sk_buff_head *amsdu,
1290581c25f8SMichal Kazior 				    struct ieee80211_rx_status *status)
1291581c25f8SMichal Kazior {
1292581c25f8SMichal Kazior 	struct sk_buff *msdu;
1293581c25f8SMichal Kazior 
1294581c25f8SMichal Kazior 	while ((msdu = __skb_dequeue(amsdu))) {
1295581c25f8SMichal Kazior 		/* Setup per-MSDU flags */
1296581c25f8SMichal Kazior 		if (skb_queue_empty(amsdu))
1297581c25f8SMichal Kazior 			status->flag &= ~RX_FLAG_AMSDU_MORE;
1298581c25f8SMichal Kazior 		else
1299581c25f8SMichal Kazior 			status->flag |= RX_FLAG_AMSDU_MORE;
1300581c25f8SMichal Kazior 
1301581c25f8SMichal Kazior 		ath10k_process_rx(ar, status, msdu);
1302581c25f8SMichal Kazior 	}
1303581c25f8SMichal Kazior }
1304581c25f8SMichal Kazior 
13059aa505d2SMichal Kazior static int ath10k_unchain_msdu(struct sk_buff_head *amsdu)
1306bfa35368SBen Greear {
13079aa505d2SMichal Kazior 	struct sk_buff *skb, *first;
1308bfa35368SBen Greear 	int space;
1309bfa35368SBen Greear 	int total_len = 0;
1310bfa35368SBen Greear 
1311bfa35368SBen Greear 	/* TODO:  Might could optimize this by using
1312bfa35368SBen Greear 	 * skb_try_coalesce or similar method to
1313bfa35368SBen Greear 	 * decrease copying, or maybe get mac80211 to
1314bfa35368SBen Greear 	 * provide a way to just receive a list of
1315bfa35368SBen Greear 	 * skb?
1316bfa35368SBen Greear 	 */
1317bfa35368SBen Greear 
13189aa505d2SMichal Kazior 	first = __skb_dequeue(amsdu);
1319bfa35368SBen Greear 
1320bfa35368SBen Greear 	/* Allocate total length all at once. */
13219aa505d2SMichal Kazior 	skb_queue_walk(amsdu, skb)
13229aa505d2SMichal Kazior 		total_len += skb->len;
1323bfa35368SBen Greear 
13249aa505d2SMichal Kazior 	space = total_len - skb_tailroom(first);
1325bfa35368SBen Greear 	if ((space > 0) &&
13269aa505d2SMichal Kazior 	    (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
1327bfa35368SBen Greear 		/* TODO:  bump some rx-oom error stat */
1328bfa35368SBen Greear 		/* put it back together so we can free the
1329bfa35368SBen Greear 		 * whole list at once.
1330bfa35368SBen Greear 		 */
13319aa505d2SMichal Kazior 		__skb_queue_head(amsdu, first);
1332bfa35368SBen Greear 		return -1;
1333bfa35368SBen Greear 	}
1334bfa35368SBen Greear 
1335bfa35368SBen Greear 	/* Walk list again, copying contents into
1336bfa35368SBen Greear 	 * msdu_head
1337bfa35368SBen Greear 	 */
13389aa505d2SMichal Kazior 	while ((skb = __skb_dequeue(amsdu))) {
13399aa505d2SMichal Kazior 		skb_copy_from_linear_data(skb, skb_put(first, skb->len),
13409aa505d2SMichal Kazior 					  skb->len);
13419aa505d2SMichal Kazior 		dev_kfree_skb_any(skb);
1342bfa35368SBen Greear 	}
1343bfa35368SBen Greear 
13449aa505d2SMichal Kazior 	__skb_queue_head(amsdu, first);
1345bfa35368SBen Greear 	return 0;
1346bfa35368SBen Greear }
1347bfa35368SBen Greear 
1348581c25f8SMichal Kazior static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
1349581c25f8SMichal Kazior 				    struct sk_buff_head *amsdu,
1350581c25f8SMichal Kazior 				    bool chained)
13512acc4eb2SJanusz Dziedzic {
1352581c25f8SMichal Kazior 	struct sk_buff *first;
1353581c25f8SMichal Kazior 	struct htt_rx_desc *rxd;
1354581c25f8SMichal Kazior 	enum rx_msdu_decap_format decap;
13557aa7a72aSMichal Kazior 
1356581c25f8SMichal Kazior 	first = skb_peek(amsdu);
1357581c25f8SMichal Kazior 	rxd = (void *)first->data - sizeof(*rxd);
1358581c25f8SMichal Kazior 	decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
1359581c25f8SMichal Kazior 		   RX_MSDU_START_INFO1_DECAP_FORMAT);
1360581c25f8SMichal Kazior 
1361581c25f8SMichal Kazior 	if (!chained)
1362581c25f8SMichal Kazior 		return;
1363581c25f8SMichal Kazior 
1364581c25f8SMichal Kazior 	/* FIXME: Current unchaining logic can only handle simple case of raw
1365581c25f8SMichal Kazior 	 * msdu chaining. If decapping is other than raw the chaining may be
1366581c25f8SMichal Kazior 	 * more complex and this isn't handled by the current code. Don't even
1367581c25f8SMichal Kazior 	 * try re-constructing such frames - it'll be pretty much garbage.
1368581c25f8SMichal Kazior 	 */
1369581c25f8SMichal Kazior 	if (decap != RX_MSDU_DECAP_RAW ||
1370581c25f8SMichal Kazior 	    skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
1371581c25f8SMichal Kazior 		__skb_queue_purge(amsdu);
1372581c25f8SMichal Kazior 		return;
1373581c25f8SMichal Kazior 	}
1374581c25f8SMichal Kazior 
1375581c25f8SMichal Kazior 	ath10k_unchain_msdu(amsdu);
1376581c25f8SMichal Kazior }
1377581c25f8SMichal Kazior 
1378581c25f8SMichal Kazior static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
1379581c25f8SMichal Kazior 					struct sk_buff_head *amsdu,
1380581c25f8SMichal Kazior 					struct ieee80211_rx_status *rx_status)
1381581c25f8SMichal Kazior {
1382581c25f8SMichal Kazior 	struct sk_buff *msdu;
1383581c25f8SMichal Kazior 	struct htt_rx_desc *rxd;
1384581c25f8SMichal Kazior 
1385581c25f8SMichal Kazior 	msdu = skb_peek(amsdu);
1386581c25f8SMichal Kazior 	rxd = (void *)msdu->data - sizeof(*rxd);
1387581c25f8SMichal Kazior 
1388581c25f8SMichal Kazior 	/* FIXME: It might be a good idea to do some fuzzy-testing to drop
1389581c25f8SMichal Kazior 	 * invalid/dangerous frames.
1390581c25f8SMichal Kazior 	 */
1391581c25f8SMichal Kazior 
1392581c25f8SMichal Kazior 	if (!rx_status->freq) {
1393581c25f8SMichal Kazior 		ath10k_warn(ar, "no channel configured; ignoring frame(s)!\n");
13942acc4eb2SJanusz Dziedzic 		return false;
13952acc4eb2SJanusz Dziedzic 	}
13962acc4eb2SJanusz Dziedzic 
1397581c25f8SMichal Kazior 	/* Management frames are handled via WMI events. The pros of such
1398581c25f8SMichal Kazior 	 * approach is that channel is explicitly provided in WMI events
1399581c25f8SMichal Kazior 	 * whereas HTT doesn't provide channel information for Rxed frames.
1400581c25f8SMichal Kazior 	 */
1401581c25f8SMichal Kazior 	if (rxd->attention.flags &
1402581c25f8SMichal Kazior 	    __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE)) {
14037aa7a72aSMichal Kazior 		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
14042acc4eb2SJanusz Dziedzic 		return false;
14052acc4eb2SJanusz Dziedzic 	}
14062acc4eb2SJanusz Dziedzic 
1407581c25f8SMichal Kazior 	if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
1408581c25f8SMichal Kazior 		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
14092acc4eb2SJanusz Dziedzic 		return false;
14102acc4eb2SJanusz Dziedzic 	}
14112acc4eb2SJanusz Dziedzic 
14122acc4eb2SJanusz Dziedzic 	return true;
14132acc4eb2SJanusz Dziedzic }
14142acc4eb2SJanusz Dziedzic 
1415581c25f8SMichal Kazior static void ath10k_htt_rx_h_filter(struct ath10k *ar,
1416581c25f8SMichal Kazior 				   struct sk_buff_head *amsdu,
1417581c25f8SMichal Kazior 				   struct ieee80211_rx_status *rx_status)
1418581c25f8SMichal Kazior {
1419581c25f8SMichal Kazior 	if (skb_queue_empty(amsdu))
1420581c25f8SMichal Kazior 		return;
1421581c25f8SMichal Kazior 
1422581c25f8SMichal Kazior 	if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
1423581c25f8SMichal Kazior 		return;
1424581c25f8SMichal Kazior 
1425581c25f8SMichal Kazior 	__skb_queue_purge(amsdu);
1426581c25f8SMichal Kazior }
1427581c25f8SMichal Kazior 
14285e3dd157SKalle Valo static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
14295e3dd157SKalle Valo 				  struct htt_rx_indication *rx)
14305e3dd157SKalle Valo {
14317aa7a72aSMichal Kazior 	struct ath10k *ar = htt->ar;
14326df92a3dSJanusz Dziedzic 	struct ieee80211_rx_status *rx_status = &htt->rx_status;
14335e3dd157SKalle Valo 	struct htt_rx_indication_mpdu_range *mpdu_ranges;
14349aa505d2SMichal Kazior 	struct sk_buff_head amsdu;
14355e3dd157SKalle Valo 	int num_mpdu_ranges;
14365e3dd157SKalle Valo 	int fw_desc_len;
14375e3dd157SKalle Valo 	u8 *fw_desc;
1438d540690dSMichal Kazior 	int i, ret, mpdu_count = 0;
14395e3dd157SKalle Valo 
144045967089SMichal Kazior 	lockdep_assert_held(&htt->rx_ring.lock);
144145967089SMichal Kazior 
1442e0bd7513SMichal Kazior 	if (htt->rx_confused)
1443e0bd7513SMichal Kazior 		return;
1444e0bd7513SMichal Kazior 
14455e3dd157SKalle Valo 	fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
14465e3dd157SKalle Valo 	fw_desc = (u8 *)&rx->fw_desc;
14475e3dd157SKalle Valo 
14485e3dd157SKalle Valo 	num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
14495e3dd157SKalle Valo 			     HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
14505e3dd157SKalle Valo 	mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
14515e3dd157SKalle Valo 
14527aa7a72aSMichal Kazior 	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
14535e3dd157SKalle Valo 			rx, sizeof(*rx) +
14545e3dd157SKalle Valo 			(sizeof(struct htt_rx_indication_mpdu_range) *
14555e3dd157SKalle Valo 				num_mpdu_ranges));
14565e3dd157SKalle Valo 
1457d540690dSMichal Kazior 	for (i = 0; i < num_mpdu_ranges; i++)
1458d540690dSMichal Kazior 		mpdu_count += mpdu_ranges[i].mpdu_count;
1459d540690dSMichal Kazior 
1460d540690dSMichal Kazior 	while (mpdu_count--) {
14619aa505d2SMichal Kazior 		__skb_queue_head_init(&amsdu);
14629aa505d2SMichal Kazior 		ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc,
1463f0e2770fSMichal Kazior 					      &fw_desc_len, &amsdu);
1464d84dd60fSJanusz Dziedzic 		if (ret < 0) {
1465e0bd7513SMichal Kazior 			ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
14669aa505d2SMichal Kazior 			__skb_queue_purge(&amsdu);
1467e0bd7513SMichal Kazior 			/* FIXME: It's probably a good idea to reboot the
1468e0bd7513SMichal Kazior 			 * device instead of leaving it inoperable.
1469e0bd7513SMichal Kazior 			 */
1470e0bd7513SMichal Kazior 			htt->rx_confused = true;
1471e0bd7513SMichal Kazior 			break;
1472d84dd60fSJanusz Dziedzic 		}
1473d84dd60fSJanusz Dziedzic 
1474b9fd8a84SMichal Kazior 		ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status);
1475581c25f8SMichal Kazior 		ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
1476581c25f8SMichal Kazior 		ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
1477581c25f8SMichal Kazior 		ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
1478581c25f8SMichal Kazior 		ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
14795e3dd157SKalle Valo 	}
14805e3dd157SKalle Valo 
14816e712d42SMichal Kazior 	tasklet_schedule(&htt->rx_replenish_task);
14825e3dd157SKalle Valo }
14835e3dd157SKalle Valo 
14845e3dd157SKalle Valo static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
14855e3dd157SKalle Valo 				       struct htt_rx_fragment_indication *frag)
14865e3dd157SKalle Valo {
14877aa7a72aSMichal Kazior 	struct ath10k *ar = htt->ar;
14886df92a3dSJanusz Dziedzic 	struct ieee80211_rx_status *rx_status = &htt->rx_status;
14899aa505d2SMichal Kazior 	struct sk_buff_head amsdu;
1490d84dd60fSJanusz Dziedzic 	int ret;
14915e3dd157SKalle Valo 	u8 *fw_desc;
1492581c25f8SMichal Kazior 	int fw_desc_len;
14935e3dd157SKalle Valo 
14945e3dd157SKalle Valo 	fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
14955e3dd157SKalle Valo 	fw_desc = (u8 *)frag->fw_msdu_rx_desc;
14965e3dd157SKalle Valo 
14979aa505d2SMichal Kazior 	__skb_queue_head_init(&amsdu);
149845967089SMichal Kazior 
149945967089SMichal Kazior 	spin_lock_bh(&htt->rx_ring.lock);
1500d84dd60fSJanusz Dziedzic 	ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
1501f0e2770fSMichal Kazior 				      &amsdu);
150245967089SMichal Kazior 	spin_unlock_bh(&htt->rx_ring.lock);
15035e3dd157SKalle Valo 
1504686687c9SMichal Kazior 	tasklet_schedule(&htt->rx_replenish_task);
1505686687c9SMichal Kazior 
15067aa7a72aSMichal Kazior 	ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
15075e3dd157SKalle Valo 
1508d84dd60fSJanusz Dziedzic 	if (ret) {
15097aa7a72aSMichal Kazior 		ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n",
1510d84dd60fSJanusz Dziedzic 			    ret);
15119aa505d2SMichal Kazior 		__skb_queue_purge(&amsdu);
15125e3dd157SKalle Valo 		return;
15135e3dd157SKalle Valo 	}
15145e3dd157SKalle Valo 
15159aa505d2SMichal Kazior 	if (skb_queue_len(&amsdu) != 1) {
15169aa505d2SMichal Kazior 		ath10k_warn(ar, "failed to pop frag amsdu: too many msdus\n");
15179aa505d2SMichal Kazior 		__skb_queue_purge(&amsdu);
15189aa505d2SMichal Kazior 		return;
15199aa505d2SMichal Kazior 	}
15209aa505d2SMichal Kazior 
15215e3dd157SKalle Valo 	/* FIXME: implement signal strength */
15224b81d177SBen Greear 	rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
15235e3dd157SKalle Valo 
1524581c25f8SMichal Kazior 	ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
1525581c25f8SMichal Kazior 	ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
1526581c25f8SMichal Kazior 	ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
15275e3dd157SKalle Valo 
15285e3dd157SKalle Valo 	if (fw_desc_len > 0) {
15297aa7a72aSMichal Kazior 		ath10k_dbg(ar, ATH10K_DBG_HTT,
15305e3dd157SKalle Valo 			   "expecting more fragmented rx in one indication %d\n",
15315e3dd157SKalle Valo 			   fw_desc_len);
15325e3dd157SKalle Valo 	}
15335e3dd157SKalle Valo }
15345e3dd157SKalle Valo 
15356c5151a9SMichal Kazior static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
15366c5151a9SMichal Kazior 				       struct sk_buff *skb)
15376c5151a9SMichal Kazior {
15386c5151a9SMichal Kazior 	struct ath10k_htt *htt = &ar->htt;
15396c5151a9SMichal Kazior 	struct htt_resp *resp = (struct htt_resp *)skb->data;
15406c5151a9SMichal Kazior 	struct htt_tx_done tx_done = {};
15416c5151a9SMichal Kazior 	int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
15426c5151a9SMichal Kazior 	__le16 msdu_id;
15436c5151a9SMichal Kazior 	int i;
15446c5151a9SMichal Kazior 
154545967089SMichal Kazior 	lockdep_assert_held(&htt->tx_lock);
154645967089SMichal Kazior 
15476c5151a9SMichal Kazior 	switch (status) {
15486c5151a9SMichal Kazior 	case HTT_DATA_TX_STATUS_NO_ACK:
15496c5151a9SMichal Kazior 		tx_done.no_ack = true;
15506c5151a9SMichal Kazior 		break;
15516c5151a9SMichal Kazior 	case HTT_DATA_TX_STATUS_OK:
15526c5151a9SMichal Kazior 		break;
15536c5151a9SMichal Kazior 	case HTT_DATA_TX_STATUS_DISCARD:
15546c5151a9SMichal Kazior 	case HTT_DATA_TX_STATUS_POSTPONE:
15556c5151a9SMichal Kazior 	case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
15566c5151a9SMichal Kazior 		tx_done.discard = true;
15576c5151a9SMichal Kazior 		break;
15586c5151a9SMichal Kazior 	default:
15597aa7a72aSMichal Kazior 		ath10k_warn(ar, "unhandled tx completion status %d\n", status);
15606c5151a9SMichal Kazior 		tx_done.discard = true;
15616c5151a9SMichal Kazior 		break;
15626c5151a9SMichal Kazior 	}
15636c5151a9SMichal Kazior 
15647aa7a72aSMichal Kazior 	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
15656c5151a9SMichal Kazior 		   resp->data_tx_completion.num_msdus);
15666c5151a9SMichal Kazior 
15676c5151a9SMichal Kazior 	for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
15686c5151a9SMichal Kazior 		msdu_id = resp->data_tx_completion.msdus[i];
15696c5151a9SMichal Kazior 		tx_done.msdu_id = __le16_to_cpu(msdu_id);
15706c5151a9SMichal Kazior 		ath10k_txrx_tx_unref(htt, &tx_done);
15716c5151a9SMichal Kazior 	}
15726c5151a9SMichal Kazior }
15736c5151a9SMichal Kazior 
1574aa5b4fbcSMichal Kazior static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
1575aa5b4fbcSMichal Kazior {
1576aa5b4fbcSMichal Kazior 	struct htt_rx_addba *ev = &resp->rx_addba;
1577aa5b4fbcSMichal Kazior 	struct ath10k_peer *peer;
1578aa5b4fbcSMichal Kazior 	struct ath10k_vif *arvif;
1579aa5b4fbcSMichal Kazior 	u16 info0, tid, peer_id;
1580aa5b4fbcSMichal Kazior 
1581aa5b4fbcSMichal Kazior 	info0 = __le16_to_cpu(ev->info0);
1582aa5b4fbcSMichal Kazior 	tid = MS(info0, HTT_RX_BA_INFO0_TID);
1583aa5b4fbcSMichal Kazior 	peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1584aa5b4fbcSMichal Kazior 
15857aa7a72aSMichal Kazior 	ath10k_dbg(ar, ATH10K_DBG_HTT,
1586aa5b4fbcSMichal Kazior 		   "htt rx addba tid %hu peer_id %hu size %hhu\n",
1587aa5b4fbcSMichal Kazior 		   tid, peer_id, ev->window_size);
1588aa5b4fbcSMichal Kazior 
1589aa5b4fbcSMichal Kazior 	spin_lock_bh(&ar->data_lock);
1590aa5b4fbcSMichal Kazior 	peer = ath10k_peer_find_by_id(ar, peer_id);
1591aa5b4fbcSMichal Kazior 	if (!peer) {
15927aa7a72aSMichal Kazior 		ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
1593aa5b4fbcSMichal Kazior 			    peer_id);
1594aa5b4fbcSMichal Kazior 		spin_unlock_bh(&ar->data_lock);
1595aa5b4fbcSMichal Kazior 		return;
1596aa5b4fbcSMichal Kazior 	}
1597aa5b4fbcSMichal Kazior 
1598aa5b4fbcSMichal Kazior 	arvif = ath10k_get_arvif(ar, peer->vdev_id);
1599aa5b4fbcSMichal Kazior 	if (!arvif) {
16007aa7a72aSMichal Kazior 		ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
1601aa5b4fbcSMichal Kazior 			    peer->vdev_id);
1602aa5b4fbcSMichal Kazior 		spin_unlock_bh(&ar->data_lock);
1603aa5b4fbcSMichal Kazior 		return;
1604aa5b4fbcSMichal Kazior 	}
1605aa5b4fbcSMichal Kazior 
16067aa7a72aSMichal Kazior 	ath10k_dbg(ar, ATH10K_DBG_HTT,
1607aa5b4fbcSMichal Kazior 		   "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
1608aa5b4fbcSMichal Kazior 		   peer->addr, tid, ev->window_size);
1609aa5b4fbcSMichal Kazior 
1610aa5b4fbcSMichal Kazior 	ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1611aa5b4fbcSMichal Kazior 	spin_unlock_bh(&ar->data_lock);
1612aa5b4fbcSMichal Kazior }
1613aa5b4fbcSMichal Kazior 
1614aa5b4fbcSMichal Kazior static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
1615aa5b4fbcSMichal Kazior {
1616aa5b4fbcSMichal Kazior 	struct htt_rx_delba *ev = &resp->rx_delba;
1617aa5b4fbcSMichal Kazior 	struct ath10k_peer *peer;
1618aa5b4fbcSMichal Kazior 	struct ath10k_vif *arvif;
1619aa5b4fbcSMichal Kazior 	u16 info0, tid, peer_id;
1620aa5b4fbcSMichal Kazior 
1621aa5b4fbcSMichal Kazior 	info0 = __le16_to_cpu(ev->info0);
1622aa5b4fbcSMichal Kazior 	tid = MS(info0, HTT_RX_BA_INFO0_TID);
1623aa5b4fbcSMichal Kazior 	peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1624aa5b4fbcSMichal Kazior 
16257aa7a72aSMichal Kazior 	ath10k_dbg(ar, ATH10K_DBG_HTT,
1626aa5b4fbcSMichal Kazior 		   "htt rx delba tid %hu peer_id %hu\n",
1627aa5b4fbcSMichal Kazior 		   tid, peer_id);
1628aa5b4fbcSMichal Kazior 
1629aa5b4fbcSMichal Kazior 	spin_lock_bh(&ar->data_lock);
1630aa5b4fbcSMichal Kazior 	peer = ath10k_peer_find_by_id(ar, peer_id);
1631aa5b4fbcSMichal Kazior 	if (!peer) {
16327aa7a72aSMichal Kazior 		ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
1633aa5b4fbcSMichal Kazior 			    peer_id);
1634aa5b4fbcSMichal Kazior 		spin_unlock_bh(&ar->data_lock);
1635aa5b4fbcSMichal Kazior 		return;
1636aa5b4fbcSMichal Kazior 	}
1637aa5b4fbcSMichal Kazior 
1638aa5b4fbcSMichal Kazior 	arvif = ath10k_get_arvif(ar, peer->vdev_id);
1639aa5b4fbcSMichal Kazior 	if (!arvif) {
16407aa7a72aSMichal Kazior 		ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
1641aa5b4fbcSMichal Kazior 			    peer->vdev_id);
1642aa5b4fbcSMichal Kazior 		spin_unlock_bh(&ar->data_lock);
1643aa5b4fbcSMichal Kazior 		return;
1644aa5b4fbcSMichal Kazior 	}
1645aa5b4fbcSMichal Kazior 
16467aa7a72aSMichal Kazior 	ath10k_dbg(ar, ATH10K_DBG_HTT,
1647aa5b4fbcSMichal Kazior 		   "htt rx stop rx ba session sta %pM tid %hu\n",
1648aa5b4fbcSMichal Kazior 		   peer->addr, tid);
1649aa5b4fbcSMichal Kazior 
1650aa5b4fbcSMichal Kazior 	ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1651aa5b4fbcSMichal Kazior 	spin_unlock_bh(&ar->data_lock);
1652aa5b4fbcSMichal Kazior }
1653aa5b4fbcSMichal Kazior 
16545e3dd157SKalle Valo void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
16555e3dd157SKalle Valo {
1656edb8236dSMichal Kazior 	struct ath10k_htt *htt = &ar->htt;
16575e3dd157SKalle Valo 	struct htt_resp *resp = (struct htt_resp *)skb->data;
16585e3dd157SKalle Valo 
16595e3dd157SKalle Valo 	/* confirm alignment */
16605e3dd157SKalle Valo 	if (!IS_ALIGNED((unsigned long)skb->data, 4))
16617aa7a72aSMichal Kazior 		ath10k_warn(ar, "unaligned htt message, expect trouble\n");
16625e3dd157SKalle Valo 
16637aa7a72aSMichal Kazior 	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
16645e3dd157SKalle Valo 		   resp->hdr.msg_type);
16655e3dd157SKalle Valo 	switch (resp->hdr.msg_type) {
16665e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_VERSION_CONF: {
16675e3dd157SKalle Valo 		htt->target_version_major = resp->ver_resp.major;
16685e3dd157SKalle Valo 		htt->target_version_minor = resp->ver_resp.minor;
16695e3dd157SKalle Valo 		complete(&htt->target_version_received);
16705e3dd157SKalle Valo 		break;
16715e3dd157SKalle Valo 	}
16726c5151a9SMichal Kazior 	case HTT_T2H_MSG_TYPE_RX_IND:
167345967089SMichal Kazior 		spin_lock_bh(&htt->rx_ring.lock);
167445967089SMichal Kazior 		__skb_queue_tail(&htt->rx_compl_q, skb);
167545967089SMichal Kazior 		spin_unlock_bh(&htt->rx_ring.lock);
16766c5151a9SMichal Kazior 		tasklet_schedule(&htt->txrx_compl_task);
16776c5151a9SMichal Kazior 		return;
16785e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_PEER_MAP: {
16795e3dd157SKalle Valo 		struct htt_peer_map_event ev = {
16805e3dd157SKalle Valo 			.vdev_id = resp->peer_map.vdev_id,
16815e3dd157SKalle Valo 			.peer_id = __le16_to_cpu(resp->peer_map.peer_id),
16825e3dd157SKalle Valo 		};
16835e3dd157SKalle Valo 		memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
16845e3dd157SKalle Valo 		ath10k_peer_map_event(htt, &ev);
16855e3dd157SKalle Valo 		break;
16865e3dd157SKalle Valo 	}
16875e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
16885e3dd157SKalle Valo 		struct htt_peer_unmap_event ev = {
16895e3dd157SKalle Valo 			.peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
16905e3dd157SKalle Valo 		};
16915e3dd157SKalle Valo 		ath10k_peer_unmap_event(htt, &ev);
16925e3dd157SKalle Valo 		break;
16935e3dd157SKalle Valo 	}
16945e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
16955e3dd157SKalle Valo 		struct htt_tx_done tx_done = {};
16965e3dd157SKalle Valo 		int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
16975e3dd157SKalle Valo 
16985e3dd157SKalle Valo 		tx_done.msdu_id =
16995e3dd157SKalle Valo 			__le32_to_cpu(resp->mgmt_tx_completion.desc_id);
17005e3dd157SKalle Valo 
17015e3dd157SKalle Valo 		switch (status) {
17025e3dd157SKalle Valo 		case HTT_MGMT_TX_STATUS_OK:
17035e3dd157SKalle Valo 			break;
17045e3dd157SKalle Valo 		case HTT_MGMT_TX_STATUS_RETRY:
17055e3dd157SKalle Valo 			tx_done.no_ack = true;
17065e3dd157SKalle Valo 			break;
17075e3dd157SKalle Valo 		case HTT_MGMT_TX_STATUS_DROP:
17085e3dd157SKalle Valo 			tx_done.discard = true;
17095e3dd157SKalle Valo 			break;
17105e3dd157SKalle Valo 		}
17115e3dd157SKalle Valo 
17126c5151a9SMichal Kazior 		spin_lock_bh(&htt->tx_lock);
17130a89f8a0SMichal Kazior 		ath10k_txrx_tx_unref(htt, &tx_done);
17146c5151a9SMichal Kazior 		spin_unlock_bh(&htt->tx_lock);
17155e3dd157SKalle Valo 		break;
17165e3dd157SKalle Valo 	}
17176c5151a9SMichal Kazior 	case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
17186c5151a9SMichal Kazior 		spin_lock_bh(&htt->tx_lock);
17196c5151a9SMichal Kazior 		__skb_queue_tail(&htt->tx_compl_q, skb);
17206c5151a9SMichal Kazior 		spin_unlock_bh(&htt->tx_lock);
17216c5151a9SMichal Kazior 		tasklet_schedule(&htt->txrx_compl_task);
17226c5151a9SMichal Kazior 		return;
17235e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_SEC_IND: {
17245e3dd157SKalle Valo 		struct ath10k *ar = htt->ar;
17255e3dd157SKalle Valo 		struct htt_security_indication *ev = &resp->security_indication;
17265e3dd157SKalle Valo 
17277aa7a72aSMichal Kazior 		ath10k_dbg(ar, ATH10K_DBG_HTT,
17285e3dd157SKalle Valo 			   "sec ind peer_id %d unicast %d type %d\n",
17295e3dd157SKalle Valo 			  __le16_to_cpu(ev->peer_id),
17305e3dd157SKalle Valo 			  !!(ev->flags & HTT_SECURITY_IS_UNICAST),
17315e3dd157SKalle Valo 			  MS(ev->flags, HTT_SECURITY_TYPE));
17325e3dd157SKalle Valo 		complete(&ar->install_key_done);
17335e3dd157SKalle Valo 		break;
17345e3dd157SKalle Valo 	}
17355e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
17367aa7a72aSMichal Kazior 		ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
17375e3dd157SKalle Valo 				skb->data, skb->len);
17385e3dd157SKalle Valo 		ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
17395e3dd157SKalle Valo 		break;
17405e3dd157SKalle Valo 	}
17415e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_TEST:
17425e3dd157SKalle Valo 		/* FIX THIS */
17435e3dd157SKalle Valo 		break;
17445e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_STATS_CONF:
1745d35a6c18SMichal Kazior 		trace_ath10k_htt_stats(ar, skb->data, skb->len);
1746a9bf0506SKalle Valo 		break;
1747a9bf0506SKalle Valo 	case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
1748708b9bdeSMichal Kazior 		/* Firmware can return tx frames if it's unable to fully
1749708b9bdeSMichal Kazior 		 * process them and suspects host may be able to fix it. ath10k
1750708b9bdeSMichal Kazior 		 * sends all tx frames as already inspected so this shouldn't
1751708b9bdeSMichal Kazior 		 * happen unless fw has a bug.
1752708b9bdeSMichal Kazior 		 */
17537aa7a72aSMichal Kazior 		ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
1754708b9bdeSMichal Kazior 		break;
17555e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
1756aa5b4fbcSMichal Kazior 		ath10k_htt_rx_addba(ar, resp);
1757aa5b4fbcSMichal Kazior 		break;
17585e3dd157SKalle Valo 	case HTT_T2H_MSG_TYPE_RX_DELBA:
1759aa5b4fbcSMichal Kazior 		ath10k_htt_rx_delba(ar, resp);
1760aa5b4fbcSMichal Kazior 		break;
1761bfdd7937SRajkumar Manoharan 	case HTT_T2H_MSG_TYPE_PKTLOG: {
1762bfdd7937SRajkumar Manoharan 		struct ath10k_pktlog_hdr *hdr =
1763bfdd7937SRajkumar Manoharan 			(struct ath10k_pktlog_hdr *)resp->pktlog_msg.payload;
1764bfdd7937SRajkumar Manoharan 
1765bfdd7937SRajkumar Manoharan 		trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
1766bfdd7937SRajkumar Manoharan 					sizeof(*hdr) +
1767bfdd7937SRajkumar Manoharan 					__le16_to_cpu(hdr->size));
1768bfdd7937SRajkumar Manoharan 		break;
1769bfdd7937SRajkumar Manoharan 	}
1770aa5b4fbcSMichal Kazior 	case HTT_T2H_MSG_TYPE_RX_FLUSH: {
1771aa5b4fbcSMichal Kazior 		/* Ignore this event because mac80211 takes care of Rx
1772aa5b4fbcSMichal Kazior 		 * aggregation reordering.
1773aa5b4fbcSMichal Kazior 		 */
1774aa5b4fbcSMichal Kazior 		break;
1775aa5b4fbcSMichal Kazior 	}
17765e3dd157SKalle Valo 	default:
17772358a544SMichal Kazior 		ath10k_warn(ar, "htt event (%d) not handled\n",
17785e3dd157SKalle Valo 			    resp->hdr.msg_type);
17797aa7a72aSMichal Kazior 		ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
17805e3dd157SKalle Valo 				skb->data, skb->len);
17815e3dd157SKalle Valo 		break;
17825e3dd157SKalle Valo 	};
17835e3dd157SKalle Valo 
17845e3dd157SKalle Valo 	/* Free the indication buffer */
17855e3dd157SKalle Valo 	dev_kfree_skb_any(skb);
17865e3dd157SKalle Valo }
17876c5151a9SMichal Kazior 
17886c5151a9SMichal Kazior static void ath10k_htt_txrx_compl_task(unsigned long ptr)
17896c5151a9SMichal Kazior {
17906c5151a9SMichal Kazior 	struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
17916c5151a9SMichal Kazior 	struct htt_resp *resp;
17926c5151a9SMichal Kazior 	struct sk_buff *skb;
17936c5151a9SMichal Kazior 
179445967089SMichal Kazior 	spin_lock_bh(&htt->tx_lock);
179545967089SMichal Kazior 	while ((skb = __skb_dequeue(&htt->tx_compl_q))) {
17966c5151a9SMichal Kazior 		ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
17976c5151a9SMichal Kazior 		dev_kfree_skb_any(skb);
17986c5151a9SMichal Kazior 	}
179945967089SMichal Kazior 	spin_unlock_bh(&htt->tx_lock);
18006c5151a9SMichal Kazior 
180145967089SMichal Kazior 	spin_lock_bh(&htt->rx_ring.lock);
180245967089SMichal Kazior 	while ((skb = __skb_dequeue(&htt->rx_compl_q))) {
18036c5151a9SMichal Kazior 		resp = (struct htt_resp *)skb->data;
18046c5151a9SMichal Kazior 		ath10k_htt_rx_handler(htt, &resp->rx_ind);
18056c5151a9SMichal Kazior 		dev_kfree_skb_any(skb);
18066c5151a9SMichal Kazior 	}
180745967089SMichal Kazior 	spin_unlock_bh(&htt->rx_ring.lock);
18086c5151a9SMichal Kazior }
1809