1d5c65159SKalle Valo // SPDX-License-Identifier: BSD-3-Clause-Clear
2d5c65159SKalle Valo /*
3d5c65159SKalle Valo  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4d5c65159SKalle Valo  */
5d5c65159SKalle Valo 
6d5c65159SKalle Valo #include <linux/ieee80211.h>
7d5c65159SKalle Valo #include "core.h"
8d5c65159SKalle Valo #include "debug.h"
9d5c65159SKalle Valo #include "hal_desc.h"
10d5c65159SKalle Valo #include "hw.h"
11d5c65159SKalle Valo #include "dp_rx.h"
12d5c65159SKalle Valo #include "hal_rx.h"
13d5c65159SKalle Valo #include "dp_tx.h"
14d5c65159SKalle Valo #include "peer.h"
15d5c65159SKalle Valo 
16d5c65159SKalle Valo static u8 *ath11k_dp_rx_h_80211_hdr(struct hal_rx_desc *desc)
17d5c65159SKalle Valo {
18d5c65159SKalle Valo 	return desc->hdr_status;
19d5c65159SKalle Valo }
20d5c65159SKalle Valo 
21d5c65159SKalle Valo static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct hal_rx_desc *desc)
22d5c65159SKalle Valo {
23d5c65159SKalle Valo 	if (!(__le32_to_cpu(desc->mpdu_start.info1) &
24d5c65159SKalle Valo 	    RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID))
25d5c65159SKalle Valo 		return HAL_ENCRYPT_TYPE_OPEN;
26d5c65159SKalle Valo 
27d5c65159SKalle Valo 	return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE,
28d5c65159SKalle Valo 			 __le32_to_cpu(desc->mpdu_start.info2));
29d5c65159SKalle Valo }
30d5c65159SKalle Valo 
31d5c65159SKalle Valo static u8 ath11k_dp_rx_h_mpdu_start_decap_type(struct hal_rx_desc *desc)
32d5c65159SKalle Valo {
33d5c65159SKalle Valo 	return FIELD_GET(RX_MPDU_START_INFO5_DECAP_TYPE,
34d5c65159SKalle Valo 			 __le32_to_cpu(desc->mpdu_start.info5));
35d5c65159SKalle Valo }
36d5c65159SKalle Valo 
37d5c65159SKalle Valo static bool ath11k_dp_rx_h_attn_msdu_done(struct hal_rx_desc *desc)
38d5c65159SKalle Valo {
39d5c65159SKalle Valo 	return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE,
40d5c65159SKalle Valo 			   __le32_to_cpu(desc->attention.info2));
41d5c65159SKalle Valo }
42d5c65159SKalle Valo 
43d5c65159SKalle Valo static bool ath11k_dp_rx_h_attn_first_mpdu(struct hal_rx_desc *desc)
44d5c65159SKalle Valo {
45d5c65159SKalle Valo 	return !!FIELD_GET(RX_ATTENTION_INFO1_FIRST_MPDU,
46d5c65159SKalle Valo 			   __le32_to_cpu(desc->attention.info1));
47d5c65159SKalle Valo }
48d5c65159SKalle Valo 
49d5c65159SKalle Valo static bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct hal_rx_desc *desc)
50d5c65159SKalle Valo {
51d5c65159SKalle Valo 	return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL,
52d5c65159SKalle Valo 			   __le32_to_cpu(desc->attention.info1));
53d5c65159SKalle Valo }
54d5c65159SKalle Valo 
55d5c65159SKalle Valo static bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct hal_rx_desc *desc)
56d5c65159SKalle Valo {
57d5c65159SKalle Valo 	return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL,
58d5c65159SKalle Valo 			   __le32_to_cpu(desc->attention.info1));
59d5c65159SKalle Valo }
60d5c65159SKalle Valo 
61d5c65159SKalle Valo static bool ath11k_dp_rx_h_attn_is_decrypted(struct hal_rx_desc *desc)
62d5c65159SKalle Valo {
63d5c65159SKalle Valo 	return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE,
64d5c65159SKalle Valo 			  __le32_to_cpu(desc->attention.info2)) ==
65d5c65159SKalle Valo 		RX_DESC_DECRYPT_STATUS_CODE_OK);
66d5c65159SKalle Valo }
67d5c65159SKalle Valo 
68d5c65159SKalle Valo static u32 ath11k_dp_rx_h_attn_mpdu_err(struct hal_rx_desc *desc)
69d5c65159SKalle Valo {
70d5c65159SKalle Valo 	u32 info = __le32_to_cpu(desc->attention.info1);
71d5c65159SKalle Valo 	u32 errmap = 0;
72d5c65159SKalle Valo 
73d5c65159SKalle Valo 	if (info & RX_ATTENTION_INFO1_FCS_ERR)
74d5c65159SKalle Valo 		errmap |= DP_RX_MPDU_ERR_FCS;
75d5c65159SKalle Valo 
76d5c65159SKalle Valo 	if (info & RX_ATTENTION_INFO1_DECRYPT_ERR)
77d5c65159SKalle Valo 		errmap |= DP_RX_MPDU_ERR_DECRYPT;
78d5c65159SKalle Valo 
79d5c65159SKalle Valo 	if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR)
80d5c65159SKalle Valo 		errmap |= DP_RX_MPDU_ERR_TKIP_MIC;
81d5c65159SKalle Valo 
82d5c65159SKalle Valo 	if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR)
83d5c65159SKalle Valo 		errmap |= DP_RX_MPDU_ERR_AMSDU_ERR;
84d5c65159SKalle Valo 
85d5c65159SKalle Valo 	if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR)
86d5c65159SKalle Valo 		errmap |= DP_RX_MPDU_ERR_OVERFLOW;
87d5c65159SKalle Valo 
88d5c65159SKalle Valo 	if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR)
89d5c65159SKalle Valo 		errmap |= DP_RX_MPDU_ERR_MSDU_LEN;
90d5c65159SKalle Valo 
91d5c65159SKalle Valo 	if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR)
92d5c65159SKalle Valo 		errmap |= DP_RX_MPDU_ERR_MPDU_LEN;
93d5c65159SKalle Valo 
94d5c65159SKalle Valo 	return errmap;
95d5c65159SKalle Valo }
96d5c65159SKalle Valo 
97d5c65159SKalle Valo static u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct hal_rx_desc *desc)
98d5c65159SKalle Valo {
99d5c65159SKalle Valo 	return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH,
100d5c65159SKalle Valo 			 __le32_to_cpu(desc->msdu_start.info1));
101d5c65159SKalle Valo }
102d5c65159SKalle Valo 
103d5c65159SKalle Valo static u8 ath11k_dp_rx_h_msdu_start_sgi(struct hal_rx_desc *desc)
104d5c65159SKalle Valo {
105d5c65159SKalle Valo 	return FIELD_GET(RX_MSDU_START_INFO3_SGI,
106d5c65159SKalle Valo 			 __le32_to_cpu(desc->msdu_start.info3));
107d5c65159SKalle Valo }
108d5c65159SKalle Valo 
109d5c65159SKalle Valo static u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct hal_rx_desc *desc)
110d5c65159SKalle Valo {
111d5c65159SKalle Valo 	return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS,
112d5c65159SKalle Valo 			 __le32_to_cpu(desc->msdu_start.info3));
113d5c65159SKalle Valo }
114d5c65159SKalle Valo 
115d5c65159SKalle Valo static u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct hal_rx_desc *desc)
116d5c65159SKalle Valo {
117d5c65159SKalle Valo 	return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW,
118d5c65159SKalle Valo 			 __le32_to_cpu(desc->msdu_start.info3));
119d5c65159SKalle Valo }
120d5c65159SKalle Valo 
121d5c65159SKalle Valo static u32 ath11k_dp_rx_h_msdu_start_freq(struct hal_rx_desc *desc)
122d5c65159SKalle Valo {
123d5c65159SKalle Valo 	return __le32_to_cpu(desc->msdu_start.phy_meta_data);
124d5c65159SKalle Valo }
125d5c65159SKalle Valo 
126d5c65159SKalle Valo static u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct hal_rx_desc *desc)
127d5c65159SKalle Valo {
128d5c65159SKalle Valo 	return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE,
129d5c65159SKalle Valo 			 __le32_to_cpu(desc->msdu_start.info3));
130d5c65159SKalle Valo }
131d5c65159SKalle Valo 
132d5c65159SKalle Valo static u8 ath11k_dp_rx_h_msdu_start_nss(struct hal_rx_desc *desc)
133d5c65159SKalle Valo {
134d5c65159SKalle Valo 	u8 mimo_ss_bitmap = FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP,
135d5c65159SKalle Valo 				      __le32_to_cpu(desc->msdu_start.info3));
136d5c65159SKalle Valo 
137d5c65159SKalle Valo 	return hweight8(mimo_ss_bitmap);
138d5c65159SKalle Valo }
139d5c65159SKalle Valo 
140d5c65159SKalle Valo static u8 ath11k_dp_rx_h_msdu_end_l3pad(struct hal_rx_desc *desc)
141d5c65159SKalle Valo {
142d5c65159SKalle Valo 	return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING,
143d5c65159SKalle Valo 			 __le32_to_cpu(desc->msdu_end.info2));
144d5c65159SKalle Valo }
145d5c65159SKalle Valo 
146d5c65159SKalle Valo static bool ath11k_dp_rx_h_msdu_end_first_msdu(struct hal_rx_desc *desc)
147d5c65159SKalle Valo {
148d5c65159SKalle Valo 	return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU,
149d5c65159SKalle Valo 			   __le32_to_cpu(desc->msdu_end.info2));
150d5c65159SKalle Valo }
151d5c65159SKalle Valo 
152d5c65159SKalle Valo static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct hal_rx_desc *desc)
153d5c65159SKalle Valo {
154d5c65159SKalle Valo 	return !!FIELD_GET(RX_MSDU_END_INFO2_LAST_MSDU,
155d5c65159SKalle Valo 			   __le32_to_cpu(desc->msdu_end.info2));
156d5c65159SKalle Valo }
157d5c65159SKalle Valo 
158d5c65159SKalle Valo static void ath11k_dp_rx_desc_end_tlv_copy(struct hal_rx_desc *fdesc,
159d5c65159SKalle Valo 					   struct hal_rx_desc *ldesc)
160d5c65159SKalle Valo {
161d5c65159SKalle Valo 	memcpy((u8 *)&fdesc->msdu_end, (u8 *)&ldesc->msdu_end,
162d5c65159SKalle Valo 	       sizeof(struct rx_msdu_end));
163d5c65159SKalle Valo 	memcpy((u8 *)&fdesc->attention, (u8 *)&ldesc->attention,
164d5c65159SKalle Valo 	       sizeof(struct rx_attention));
165d5c65159SKalle Valo 	memcpy((u8 *)&fdesc->mpdu_end, (u8 *)&ldesc->mpdu_end,
166d5c65159SKalle Valo 	       sizeof(struct rx_mpdu_end));
167d5c65159SKalle Valo }
168d5c65159SKalle Valo 
169d5c65159SKalle Valo static u32 ath11k_dp_rxdesc_get_mpdulen_err(struct hal_rx_desc *rx_desc)
170d5c65159SKalle Valo {
171d5c65159SKalle Valo 	struct rx_attention *rx_attn;
172d5c65159SKalle Valo 
173d5c65159SKalle Valo 	rx_attn = &rx_desc->attention;
174d5c65159SKalle Valo 
175d5c65159SKalle Valo 	return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR,
176d5c65159SKalle Valo 			 __le32_to_cpu(rx_attn->info1));
177d5c65159SKalle Valo }
178d5c65159SKalle Valo 
179d5c65159SKalle Valo static u32 ath11k_dp_rxdesc_get_decap_format(struct hal_rx_desc *rx_desc)
180d5c65159SKalle Valo {
181d5c65159SKalle Valo 	struct rx_msdu_start *rx_msdu_start;
182d5c65159SKalle Valo 
183d5c65159SKalle Valo 	rx_msdu_start = &rx_desc->msdu_start;
184d5c65159SKalle Valo 
185d5c65159SKalle Valo 	return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
186d5c65159SKalle Valo 			 __le32_to_cpu(rx_msdu_start->info2));
187d5c65159SKalle Valo }
188d5c65159SKalle Valo 
189d5c65159SKalle Valo static u8 *ath11k_dp_rxdesc_get_80211hdr(struct hal_rx_desc *rx_desc)
190d5c65159SKalle Valo {
191d5c65159SKalle Valo 	u8 *rx_pkt_hdr;
192d5c65159SKalle Valo 
193d5c65159SKalle Valo 	rx_pkt_hdr = &rx_desc->msdu_payload[0];
194d5c65159SKalle Valo 
195d5c65159SKalle Valo 	return rx_pkt_hdr;
196d5c65159SKalle Valo }
197d5c65159SKalle Valo 
198d5c65159SKalle Valo static bool ath11k_dp_rxdesc_mpdu_valid(struct hal_rx_desc *rx_desc)
199d5c65159SKalle Valo {
200d5c65159SKalle Valo 	u32 tlv_tag;
201d5c65159SKalle Valo 
202d5c65159SKalle Valo 	tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG,
203d5c65159SKalle Valo 			    __le32_to_cpu(rx_desc->mpdu_start_tag));
204d5c65159SKalle Valo 
205d5c65159SKalle Valo 	return tlv_tag == HAL_RX_MPDU_START ? true : false;
206d5c65159SKalle Valo }
207d5c65159SKalle Valo 
208d5c65159SKalle Valo static u32 ath11k_dp_rxdesc_get_ppduid(struct hal_rx_desc *rx_desc)
209d5c65159SKalle Valo {
210d5c65159SKalle Valo 	return __le16_to_cpu(rx_desc->mpdu_start.phy_ppdu_id);
211d5c65159SKalle Valo }
212d5c65159SKalle Valo 
213d5c65159SKalle Valo /* Returns number of Rx buffers replenished */
214d5c65159SKalle Valo int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
215d5c65159SKalle Valo 			       struct dp_rxdma_ring *rx_ring,
216d5c65159SKalle Valo 			       int req_entries,
217d5c65159SKalle Valo 			       enum hal_rx_buf_return_buf_manager mgr,
218d5c65159SKalle Valo 			       gfp_t gfp)
219d5c65159SKalle Valo {
220d5c65159SKalle Valo 	struct hal_srng *srng;
221d5c65159SKalle Valo 	u32 *desc;
222d5c65159SKalle Valo 	struct sk_buff *skb;
223d5c65159SKalle Valo 	int num_free;
224d5c65159SKalle Valo 	int num_remain;
225d5c65159SKalle Valo 	int buf_id;
226d5c65159SKalle Valo 	u32 cookie;
227d5c65159SKalle Valo 	dma_addr_t paddr;
228d5c65159SKalle Valo 
229d5c65159SKalle Valo 	req_entries = min(req_entries, rx_ring->bufs_max);
230d5c65159SKalle Valo 
231d5c65159SKalle Valo 	srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
232d5c65159SKalle Valo 
233d5c65159SKalle Valo 	spin_lock_bh(&srng->lock);
234d5c65159SKalle Valo 
235d5c65159SKalle Valo 	ath11k_hal_srng_access_begin(ab, srng);
236d5c65159SKalle Valo 
237d5c65159SKalle Valo 	num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
238d5c65159SKalle Valo 	if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
239d5c65159SKalle Valo 		req_entries = num_free;
240d5c65159SKalle Valo 
241d5c65159SKalle Valo 	req_entries = min(num_free, req_entries);
242d5c65159SKalle Valo 	num_remain = req_entries;
243d5c65159SKalle Valo 
244d5c65159SKalle Valo 	while (num_remain > 0) {
245d5c65159SKalle Valo 		skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
246d5c65159SKalle Valo 				    DP_RX_BUFFER_ALIGN_SIZE);
247d5c65159SKalle Valo 		if (!skb)
248d5c65159SKalle Valo 			break;
249d5c65159SKalle Valo 
250d5c65159SKalle Valo 		if (!IS_ALIGNED((unsigned long)skb->data,
251d5c65159SKalle Valo 				DP_RX_BUFFER_ALIGN_SIZE)) {
252d5c65159SKalle Valo 			skb_pull(skb,
253d5c65159SKalle Valo 				 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
254d5c65159SKalle Valo 				 skb->data);
255d5c65159SKalle Valo 		}
256d5c65159SKalle Valo 
257d5c65159SKalle Valo 		paddr = dma_map_single(ab->dev, skb->data,
258d5c65159SKalle Valo 				       skb->len + skb_tailroom(skb),
259d5c65159SKalle Valo 				       DMA_FROM_DEVICE);
260d5c65159SKalle Valo 		if (dma_mapping_error(ab->dev, paddr))
261d5c65159SKalle Valo 			goto fail_free_skb;
262d5c65159SKalle Valo 
263d5c65159SKalle Valo 		spin_lock_bh(&rx_ring->idr_lock);
264d5c65159SKalle Valo 		buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
265d5c65159SKalle Valo 				   rx_ring->bufs_max * 3, gfp);
266d5c65159SKalle Valo 		spin_unlock_bh(&rx_ring->idr_lock);
267d5c65159SKalle Valo 		if (buf_id < 0)
268d5c65159SKalle Valo 			goto fail_dma_unmap;
269d5c65159SKalle Valo 
270d5c65159SKalle Valo 		desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
271d5c65159SKalle Valo 		if (!desc)
272d5c65159SKalle Valo 			goto fail_idr_remove;
273d5c65159SKalle Valo 
274d5c65159SKalle Valo 		ATH11K_SKB_RXCB(skb)->paddr = paddr;
275d5c65159SKalle Valo 
276d5c65159SKalle Valo 		cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
277d5c65159SKalle Valo 			 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
278d5c65159SKalle Valo 
279d5c65159SKalle Valo 		num_remain--;
280d5c65159SKalle Valo 
281d5c65159SKalle Valo 		ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
282d5c65159SKalle Valo 	}
283d5c65159SKalle Valo 
284d5c65159SKalle Valo 	ath11k_hal_srng_access_end(ab, srng);
285d5c65159SKalle Valo 
286d5c65159SKalle Valo 	spin_unlock_bh(&srng->lock);
287d5c65159SKalle Valo 
288d5c65159SKalle Valo 	return req_entries - num_remain;
289d5c65159SKalle Valo 
290d5c65159SKalle Valo fail_idr_remove:
291d5c65159SKalle Valo 	spin_lock_bh(&rx_ring->idr_lock);
292d5c65159SKalle Valo 	idr_remove(&rx_ring->bufs_idr, buf_id);
293d5c65159SKalle Valo 	spin_unlock_bh(&rx_ring->idr_lock);
294d5c65159SKalle Valo fail_dma_unmap:
295d5c65159SKalle Valo 	dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
296d5c65159SKalle Valo 			 DMA_FROM_DEVICE);
297d5c65159SKalle Valo fail_free_skb:
298d5c65159SKalle Valo 	dev_kfree_skb_any(skb);
299d5c65159SKalle Valo 
300d5c65159SKalle Valo 	ath11k_hal_srng_access_end(ab, srng);
301d5c65159SKalle Valo 
302d5c65159SKalle Valo 	spin_unlock_bh(&srng->lock);
303d5c65159SKalle Valo 
304d5c65159SKalle Valo 	return req_entries - num_remain;
305d5c65159SKalle Valo }
306d5c65159SKalle Valo 
307d5c65159SKalle Valo static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar,
308d5c65159SKalle Valo 					 struct dp_rxdma_ring *rx_ring)
309d5c65159SKalle Valo {
310d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
311d5c65159SKalle Valo 	struct sk_buff *skb;
312d5c65159SKalle Valo 	int buf_id;
313d5c65159SKalle Valo 
314d5c65159SKalle Valo 	spin_lock_bh(&rx_ring->idr_lock);
315d5c65159SKalle Valo 	idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
316d5c65159SKalle Valo 		idr_remove(&rx_ring->bufs_idr, buf_id);
317d5c65159SKalle Valo 		/* TODO: Understand where internal driver does this dma_unmap of
318d5c65159SKalle Valo 		 * of rxdma_buffer.
319d5c65159SKalle Valo 		 */
320d5c65159SKalle Valo 		dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
321d5c65159SKalle Valo 				 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
322d5c65159SKalle Valo 		dev_kfree_skb_any(skb);
323d5c65159SKalle Valo 	}
324d5c65159SKalle Valo 
325d5c65159SKalle Valo 	idr_destroy(&rx_ring->bufs_idr);
326d5c65159SKalle Valo 	spin_unlock_bh(&rx_ring->idr_lock);
327d5c65159SKalle Valo 
328d5c65159SKalle Valo 	rx_ring = &dp->rx_mon_status_refill_ring;
329d5c65159SKalle Valo 
330d5c65159SKalle Valo 	spin_lock_bh(&rx_ring->idr_lock);
331d5c65159SKalle Valo 	idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
332d5c65159SKalle Valo 		idr_remove(&rx_ring->bufs_idr, buf_id);
333d5c65159SKalle Valo 		/* XXX: Understand where internal driver does this dma_unmap of
334d5c65159SKalle Valo 		 * of rxdma_buffer.
335d5c65159SKalle Valo 		 */
336d5c65159SKalle Valo 		dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
337d5c65159SKalle Valo 				 skb->len + skb_tailroom(skb), DMA_BIDIRECTIONAL);
338d5c65159SKalle Valo 		dev_kfree_skb_any(skb);
339d5c65159SKalle Valo 	}
340d5c65159SKalle Valo 
341d5c65159SKalle Valo 	idr_destroy(&rx_ring->bufs_idr);
342d5c65159SKalle Valo 	spin_unlock_bh(&rx_ring->idr_lock);
343d5c65159SKalle Valo 	return 0;
344d5c65159SKalle Valo }
345d5c65159SKalle Valo 
346d5c65159SKalle Valo static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar)
347d5c65159SKalle Valo {
348d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
349d5c65159SKalle Valo 	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
350d5c65159SKalle Valo 
351d5c65159SKalle Valo 	ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
352d5c65159SKalle Valo 
353d5c65159SKalle Valo 	rx_ring = &dp->rxdma_mon_buf_ring;
354d5c65159SKalle Valo 	ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
355d5c65159SKalle Valo 
356d5c65159SKalle Valo 	rx_ring = &dp->rx_mon_status_refill_ring;
357d5c65159SKalle Valo 	ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
358d5c65159SKalle Valo 	return 0;
359d5c65159SKalle Valo }
360d5c65159SKalle Valo 
361d5c65159SKalle Valo static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar,
362d5c65159SKalle Valo 					  struct dp_rxdma_ring *rx_ring,
363d5c65159SKalle Valo 					  u32 ringtype)
364d5c65159SKalle Valo {
365d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
366d5c65159SKalle Valo 	int num_entries;
367d5c65159SKalle Valo 
368d5c65159SKalle Valo 	num_entries = rx_ring->refill_buf_ring.size /
369d5c65159SKalle Valo 		      ath11k_hal_srng_get_entrysize(ringtype);
370d5c65159SKalle Valo 
371d5c65159SKalle Valo 	rx_ring->bufs_max = num_entries;
372d5c65159SKalle Valo 	ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries,
373d5c65159SKalle Valo 				   HAL_RX_BUF_RBM_SW3_BM, GFP_KERNEL);
374d5c65159SKalle Valo 	return 0;
375d5c65159SKalle Valo }
376d5c65159SKalle Valo 
377d5c65159SKalle Valo static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar)
378d5c65159SKalle Valo {
379d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
380d5c65159SKalle Valo 	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
381d5c65159SKalle Valo 
382d5c65159SKalle Valo 	ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF);
383d5c65159SKalle Valo 
384d5c65159SKalle Valo 	rx_ring = &dp->rxdma_mon_buf_ring;
385d5c65159SKalle Valo 	ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF);
386d5c65159SKalle Valo 
387d5c65159SKalle Valo 	rx_ring = &dp->rx_mon_status_refill_ring;
388d5c65159SKalle Valo 	ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS);
389d5c65159SKalle Valo 
390d5c65159SKalle Valo 	return 0;
391d5c65159SKalle Valo }
392d5c65159SKalle Valo 
393d5c65159SKalle Valo static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar)
394d5c65159SKalle Valo {
395d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
396d5c65159SKalle Valo 
397d5c65159SKalle Valo 	ath11k_dp_srng_cleanup(ar->ab, &dp->rx_refill_buf_ring.refill_buf_ring);
398d5c65159SKalle Valo 	ath11k_dp_srng_cleanup(ar->ab, &dp->rxdma_err_dst_ring);
399d5c65159SKalle Valo 	ath11k_dp_srng_cleanup(ar->ab, &dp->rx_mon_status_refill_ring.refill_buf_ring);
400d5c65159SKalle Valo 	ath11k_dp_srng_cleanup(ar->ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
401d5c65159SKalle Valo }
402d5c65159SKalle Valo 
4039c57d7e3SVasanthakumar Thiagarajan void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab)
4049c57d7e3SVasanthakumar Thiagarajan {
4059c57d7e3SVasanthakumar Thiagarajan 	struct ath11k_pdev_dp *dp;
4069c57d7e3SVasanthakumar Thiagarajan 	struct ath11k *ar;
4079c57d7e3SVasanthakumar Thiagarajan 	int i;
4089c57d7e3SVasanthakumar Thiagarajan 
4099c57d7e3SVasanthakumar Thiagarajan 	for (i = 0; i < ab->num_radios; i++) {
4109c57d7e3SVasanthakumar Thiagarajan 		ar = ab->pdevs[i].ar;
4119c57d7e3SVasanthakumar Thiagarajan 		dp = &ar->dp;
4129c57d7e3SVasanthakumar Thiagarajan 		ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring);
4139c57d7e3SVasanthakumar Thiagarajan 	}
4149c57d7e3SVasanthakumar Thiagarajan }
4159c57d7e3SVasanthakumar Thiagarajan 
4169c57d7e3SVasanthakumar Thiagarajan int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab)
4179c57d7e3SVasanthakumar Thiagarajan {
4189c57d7e3SVasanthakumar Thiagarajan 	struct ath11k *ar;
4199c57d7e3SVasanthakumar Thiagarajan 	struct ath11k_pdev_dp *dp;
4209c57d7e3SVasanthakumar Thiagarajan 	int ret;
4219c57d7e3SVasanthakumar Thiagarajan 	int i;
4229c57d7e3SVasanthakumar Thiagarajan 
4239c57d7e3SVasanthakumar Thiagarajan 	for (i = 0; i < ab->num_radios; i++) {
4249c57d7e3SVasanthakumar Thiagarajan 		ar = ab->pdevs[i].ar;
4259c57d7e3SVasanthakumar Thiagarajan 		dp = &ar->dp;
4269c57d7e3SVasanthakumar Thiagarajan 		ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring, HAL_REO_DST,
4279c57d7e3SVasanthakumar Thiagarajan 					   dp->mac_id, dp->mac_id,
4289c57d7e3SVasanthakumar Thiagarajan 					   DP_REO_DST_RING_SIZE);
4299c57d7e3SVasanthakumar Thiagarajan 		if (ret) {
4309c57d7e3SVasanthakumar Thiagarajan 			ath11k_warn(ar->ab, "failed to setup reo_dst_ring\n");
4319c57d7e3SVasanthakumar Thiagarajan 			goto err_reo_cleanup;
4329c57d7e3SVasanthakumar Thiagarajan 		}
4339c57d7e3SVasanthakumar Thiagarajan 	}
4349c57d7e3SVasanthakumar Thiagarajan 
4359c57d7e3SVasanthakumar Thiagarajan 	return 0;
4369c57d7e3SVasanthakumar Thiagarajan 
4379c57d7e3SVasanthakumar Thiagarajan err_reo_cleanup:
4389c57d7e3SVasanthakumar Thiagarajan 	ath11k_dp_pdev_reo_cleanup(ab);
4399c57d7e3SVasanthakumar Thiagarajan 
4409c57d7e3SVasanthakumar Thiagarajan 	return ret;
4419c57d7e3SVasanthakumar Thiagarajan }
4429c57d7e3SVasanthakumar Thiagarajan 
443d5c65159SKalle Valo static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar)
444d5c65159SKalle Valo {
445d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
446d5c65159SKalle Valo 	struct dp_srng *srng = NULL;
447d5c65159SKalle Valo 	int ret;
448d5c65159SKalle Valo 
449d5c65159SKalle Valo 	ret = ath11k_dp_srng_setup(ar->ab,
450d5c65159SKalle Valo 				   &dp->rx_refill_buf_ring.refill_buf_ring,
451d5c65159SKalle Valo 				   HAL_RXDMA_BUF, 0,
452d5c65159SKalle Valo 				   dp->mac_id, DP_RXDMA_BUF_RING_SIZE);
453d5c65159SKalle Valo 	if (ret) {
454d5c65159SKalle Valo 		ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n");
455d5c65159SKalle Valo 		return ret;
456d5c65159SKalle Valo 	}
457d5c65159SKalle Valo 
458d5c65159SKalle Valo 	ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring,
459d5c65159SKalle Valo 				   HAL_RXDMA_DST, 0, dp->mac_id,
460d5c65159SKalle Valo 				   DP_RXDMA_ERR_DST_RING_SIZE);
461d5c65159SKalle Valo 	if (ret) {
462d5c65159SKalle Valo 		ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring\n");
463d5c65159SKalle Valo 		return ret;
464d5c65159SKalle Valo 	}
465d5c65159SKalle Valo 
466d5c65159SKalle Valo 	srng = &dp->rx_mon_status_refill_ring.refill_buf_ring;
467d5c65159SKalle Valo 	ret = ath11k_dp_srng_setup(ar->ab,
468d5c65159SKalle Valo 				   srng,
469d5c65159SKalle Valo 				   HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id,
470d5c65159SKalle Valo 				   DP_RXDMA_MON_STATUS_RING_SIZE);
471d5c65159SKalle Valo 	if (ret) {
472d5c65159SKalle Valo 		ath11k_warn(ar->ab,
473d5c65159SKalle Valo 			    "failed to setup rx_mon_status_refill_ring\n");
474d5c65159SKalle Valo 		return ret;
475d5c65159SKalle Valo 	}
476d5c65159SKalle Valo 	ret = ath11k_dp_srng_setup(ar->ab,
477d5c65159SKalle Valo 				   &dp->rxdma_mon_buf_ring.refill_buf_ring,
478d5c65159SKalle Valo 				   HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id,
479d5c65159SKalle Valo 				   DP_RXDMA_MONITOR_BUF_RING_SIZE);
480d5c65159SKalle Valo 	if (ret) {
481d5c65159SKalle Valo 		ath11k_warn(ar->ab,
482d5c65159SKalle Valo 			    "failed to setup HAL_RXDMA_MONITOR_BUF\n");
483d5c65159SKalle Valo 		return ret;
484d5c65159SKalle Valo 	}
485d5c65159SKalle Valo 
486d5c65159SKalle Valo 	ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring,
487d5c65159SKalle Valo 				   HAL_RXDMA_MONITOR_DST, 0, dp->mac_id,
488d5c65159SKalle Valo 				   DP_RXDMA_MONITOR_DST_RING_SIZE);
489d5c65159SKalle Valo 	if (ret) {
490d5c65159SKalle Valo 		ath11k_warn(ar->ab,
491d5c65159SKalle Valo 			    "failed to setup HAL_RXDMA_MONITOR_DST\n");
492d5c65159SKalle Valo 		return ret;
493d5c65159SKalle Valo 	}
494d5c65159SKalle Valo 
495d5c65159SKalle Valo 	ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring,
496d5c65159SKalle Valo 				   HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id,
497d5c65159SKalle Valo 				   DP_RXDMA_MONITOR_DESC_RING_SIZE);
498d5c65159SKalle Valo 	if (ret) {
499d5c65159SKalle Valo 		ath11k_warn(ar->ab,
500d5c65159SKalle Valo 			    "failed to setup HAL_RXDMA_MONITOR_DESC\n");
501d5c65159SKalle Valo 		return ret;
502d5c65159SKalle Valo 	}
503d5c65159SKalle Valo 
504d5c65159SKalle Valo 	return 0;
505d5c65159SKalle Valo }
506d5c65159SKalle Valo 
507d5c65159SKalle Valo void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
508d5c65159SKalle Valo {
509d5c65159SKalle Valo 	struct ath11k_dp *dp = &ab->dp;
510d5c65159SKalle Valo 	struct dp_reo_cmd *cmd, *tmp;
511d5c65159SKalle Valo 	struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache;
512d5c65159SKalle Valo 
513d5c65159SKalle Valo 	spin_lock_bh(&dp->reo_cmd_lock);
514d5c65159SKalle Valo 	list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
515d5c65159SKalle Valo 		list_del(&cmd->list);
516d5c65159SKalle Valo 		dma_unmap_single(ab->dev, cmd->data.paddr,
517d5c65159SKalle Valo 				 cmd->data.size, DMA_BIDIRECTIONAL);
518d5c65159SKalle Valo 		kfree(cmd->data.vaddr);
519d5c65159SKalle Valo 		kfree(cmd);
520d5c65159SKalle Valo 	}
521d5c65159SKalle Valo 
522d5c65159SKalle Valo 	list_for_each_entry_safe(cmd_cache, tmp_cache,
523d5c65159SKalle Valo 				 &dp->reo_cmd_cache_flush_list, list) {
524d5c65159SKalle Valo 		list_del(&cmd_cache->list);
525d5c65159SKalle Valo 		dma_unmap_single(ab->dev, cmd_cache->data.paddr,
526d5c65159SKalle Valo 				 cmd_cache->data.size, DMA_BIDIRECTIONAL);
527d5c65159SKalle Valo 		kfree(cmd_cache->data.vaddr);
528d5c65159SKalle Valo 		kfree(cmd_cache);
529d5c65159SKalle Valo 	}
530d5c65159SKalle Valo 	spin_unlock_bh(&dp->reo_cmd_lock);
531d5c65159SKalle Valo }
532d5c65159SKalle Valo 
533d5c65159SKalle Valo static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx,
534d5c65159SKalle Valo 				   enum hal_reo_cmd_status status)
535d5c65159SKalle Valo {
536d5c65159SKalle Valo 	struct dp_rx_tid *rx_tid = ctx;
537d5c65159SKalle Valo 
538d5c65159SKalle Valo 	if (status != HAL_REO_CMD_SUCCESS)
539d5c65159SKalle Valo 		ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
540d5c65159SKalle Valo 			    rx_tid->tid, status);
541d5c65159SKalle Valo 
542d5c65159SKalle Valo 	dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
543d5c65159SKalle Valo 			 DMA_BIDIRECTIONAL);
544d5c65159SKalle Valo 	kfree(rx_tid->vaddr);
545d5c65159SKalle Valo }
546d5c65159SKalle Valo 
547d5c65159SKalle Valo static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab,
548d5c65159SKalle Valo 				      struct dp_rx_tid *rx_tid)
549d5c65159SKalle Valo {
550d5c65159SKalle Valo 	struct ath11k_hal_reo_cmd cmd = {0};
551d5c65159SKalle Valo 	unsigned long tot_desc_sz, desc_sz;
552d5c65159SKalle Valo 	int ret;
553d5c65159SKalle Valo 
554d5c65159SKalle Valo 	tot_desc_sz = rx_tid->size;
555d5c65159SKalle Valo 	desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
556d5c65159SKalle Valo 
557d5c65159SKalle Valo 	while (tot_desc_sz > desc_sz) {
558d5c65159SKalle Valo 		tot_desc_sz -= desc_sz;
559d5c65159SKalle Valo 		cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);
560d5c65159SKalle Valo 		cmd.addr_hi = upper_32_bits(rx_tid->paddr);
561d5c65159SKalle Valo 		ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
562d5c65159SKalle Valo 						HAL_REO_CMD_FLUSH_CACHE, &cmd,
563d5c65159SKalle Valo 						NULL);
564d5c65159SKalle Valo 		if (ret)
565d5c65159SKalle Valo 			ath11k_warn(ab,
566d5c65159SKalle Valo 				    "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
567d5c65159SKalle Valo 				    rx_tid->tid, ret);
568d5c65159SKalle Valo 	}
569d5c65159SKalle Valo 
570d5c65159SKalle Valo 	memset(&cmd, 0, sizeof(cmd));
571d5c65159SKalle Valo 	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
572d5c65159SKalle Valo 	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
573d5c65159SKalle Valo 	cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
574d5c65159SKalle Valo 	ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
575d5c65159SKalle Valo 					HAL_REO_CMD_FLUSH_CACHE,
576d5c65159SKalle Valo 					&cmd, ath11k_dp_reo_cmd_free);
577d5c65159SKalle Valo 	if (ret) {
578d5c65159SKalle Valo 		ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
579d5c65159SKalle Valo 			   rx_tid->tid, ret);
580d5c65159SKalle Valo 		dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
581d5c65159SKalle Valo 				 DMA_BIDIRECTIONAL);
582d5c65159SKalle Valo 		kfree(rx_tid->vaddr);
583d5c65159SKalle Valo 	}
584d5c65159SKalle Valo }
585d5c65159SKalle Valo 
586d5c65159SKalle Valo static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx,
587d5c65159SKalle Valo 				      enum hal_reo_cmd_status status)
588d5c65159SKalle Valo {
589d5c65159SKalle Valo 	struct ath11k_base *ab = dp->ab;
590d5c65159SKalle Valo 	struct dp_rx_tid *rx_tid = ctx;
591d5c65159SKalle Valo 	struct dp_reo_cache_flush_elem *elem, *tmp;
592d5c65159SKalle Valo 
593d5c65159SKalle Valo 	if (status == HAL_REO_CMD_DRAIN) {
594d5c65159SKalle Valo 		goto free_desc;
595d5c65159SKalle Valo 	} else if (status != HAL_REO_CMD_SUCCESS) {
596d5c65159SKalle Valo 		/* Shouldn't happen! Cleanup in case of other failure? */
597d5c65159SKalle Valo 		ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n",
598d5c65159SKalle Valo 			    rx_tid->tid, status);
599d5c65159SKalle Valo 		return;
600d5c65159SKalle Valo 	}
601d5c65159SKalle Valo 
602d5c65159SKalle Valo 	elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
603d5c65159SKalle Valo 	if (!elem)
604d5c65159SKalle Valo 		goto free_desc;
605d5c65159SKalle Valo 
606d5c65159SKalle Valo 	elem->ts = jiffies;
607d5c65159SKalle Valo 	memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
608d5c65159SKalle Valo 
609d5c65159SKalle Valo 	spin_lock_bh(&dp->reo_cmd_lock);
610d5c65159SKalle Valo 	list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
611d5c65159SKalle Valo 	spin_unlock_bh(&dp->reo_cmd_lock);
612d5c65159SKalle Valo 
613d5c65159SKalle Valo 	/* Flush and invalidate aged REO desc from HW cache */
614d5c65159SKalle Valo 	spin_lock_bh(&dp->reo_cmd_lock);
615d5c65159SKalle Valo 	list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
616d5c65159SKalle Valo 				 list) {
617d5c65159SKalle Valo 		if (time_after(jiffies, elem->ts +
618d5c65159SKalle Valo 			       msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) {
619d5c65159SKalle Valo 			list_del(&elem->list);
620d5c65159SKalle Valo 			spin_unlock_bh(&dp->reo_cmd_lock);
621d5c65159SKalle Valo 
622d5c65159SKalle Valo 			ath11k_dp_reo_cache_flush(ab, &elem->data);
623d5c65159SKalle Valo 			kfree(elem);
624d5c65159SKalle Valo 			spin_lock_bh(&dp->reo_cmd_lock);
625d5c65159SKalle Valo 		}
626d5c65159SKalle Valo 	}
627d5c65159SKalle Valo 	spin_unlock_bh(&dp->reo_cmd_lock);
628d5c65159SKalle Valo 
629d5c65159SKalle Valo 	return;
630d5c65159SKalle Valo free_desc:
631d5c65159SKalle Valo 	dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
632d5c65159SKalle Valo 			 DMA_BIDIRECTIONAL);
633d5c65159SKalle Valo 	kfree(rx_tid->vaddr);
634d5c65159SKalle Valo }
635d5c65159SKalle Valo 
636a36adf54SGovindaraj Saminathan void ath11k_peer_rx_tid_delete(struct ath11k *ar,
637d5c65159SKalle Valo 			       struct ath11k_peer *peer, u8 tid)
638d5c65159SKalle Valo {
639d5c65159SKalle Valo 	struct ath11k_hal_reo_cmd cmd = {0};
640d5c65159SKalle Valo 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
641d5c65159SKalle Valo 	int ret;
642d5c65159SKalle Valo 
643d5c65159SKalle Valo 	if (!rx_tid->active)
644d5c65159SKalle Valo 		return;
645d5c65159SKalle Valo 
646d5c65159SKalle Valo 	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
647d5c65159SKalle Valo 	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
648d5c65159SKalle Valo 	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
649d5c65159SKalle Valo 	cmd.upd0 |= HAL_REO_CMD_UPD0_VLD;
650d5c65159SKalle Valo 	ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
651d5c65159SKalle Valo 					HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
652d5c65159SKalle Valo 					ath11k_dp_rx_tid_del_func);
653d5c65159SKalle Valo 	if (ret) {
654d5c65159SKalle Valo 		ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
655d5c65159SKalle Valo 			   tid, ret);
656d5c65159SKalle Valo 		dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
657d5c65159SKalle Valo 				 DMA_BIDIRECTIONAL);
658d5c65159SKalle Valo 		kfree(rx_tid->vaddr);
659d5c65159SKalle Valo 	}
660d5c65159SKalle Valo 
661d5c65159SKalle Valo 	rx_tid->active = false;
662d5c65159SKalle Valo }
663d5c65159SKalle Valo 
664d5c65159SKalle Valo void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer)
665d5c65159SKalle Valo {
666d5c65159SKalle Valo 	int i;
667d5c65159SKalle Valo 
668d5c65159SKalle Valo 	for (i = 0; i <= IEEE80211_NUM_TIDS; i++)
669d5c65159SKalle Valo 		ath11k_peer_rx_tid_delete(ar, peer, i);
670d5c65159SKalle Valo }
671d5c65159SKalle Valo 
672d5c65159SKalle Valo static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar,
673d5c65159SKalle Valo 					 struct ath11k_peer *peer,
674d5c65159SKalle Valo 					 struct dp_rx_tid *rx_tid,
675fe201947SVenkateswara Naralasetty 					 u32 ba_win_sz, u16 ssn,
676fe201947SVenkateswara Naralasetty 					 bool update_ssn)
677d5c65159SKalle Valo {
678d5c65159SKalle Valo 	struct ath11k_hal_reo_cmd cmd = {0};
679d5c65159SKalle Valo 	int ret;
680d5c65159SKalle Valo 
681d5c65159SKalle Valo 	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
682d5c65159SKalle Valo 	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
683d5c65159SKalle Valo 	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
684fe201947SVenkateswara Naralasetty 	cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
685d5c65159SKalle Valo 	cmd.ba_window_size = ba_win_sz;
686fe201947SVenkateswara Naralasetty 
687fe201947SVenkateswara Naralasetty 	if (update_ssn) {
688fe201947SVenkateswara Naralasetty 		cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
689d5c65159SKalle Valo 		cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn);
690fe201947SVenkateswara Naralasetty 	}
691d5c65159SKalle Valo 
692d5c65159SKalle Valo 	ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
693d5c65159SKalle Valo 					HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
694d5c65159SKalle Valo 					NULL);
695d5c65159SKalle Valo 	if (ret) {
696d5c65159SKalle Valo 		ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
697d5c65159SKalle Valo 			    rx_tid->tid, ret);
698d5c65159SKalle Valo 		return ret;
699d5c65159SKalle Valo 	}
700d5c65159SKalle Valo 
701d5c65159SKalle Valo 	rx_tid->ba_win_sz = ba_win_sz;
702d5c65159SKalle Valo 
703d5c65159SKalle Valo 	return 0;
704d5c65159SKalle Valo }
705d5c65159SKalle Valo 
706d5c65159SKalle Valo static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab,
707d5c65159SKalle Valo 				      const u8 *peer_mac, int vdev_id, u8 tid)
708d5c65159SKalle Valo {
709d5c65159SKalle Valo 	struct ath11k_peer *peer;
710d5c65159SKalle Valo 	struct dp_rx_tid *rx_tid;
711d5c65159SKalle Valo 
712d5c65159SKalle Valo 	spin_lock_bh(&ab->base_lock);
713d5c65159SKalle Valo 
714d5c65159SKalle Valo 	peer = ath11k_peer_find(ab, vdev_id, peer_mac);
715d5c65159SKalle Valo 	if (!peer) {
716d5c65159SKalle Valo 		ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n");
717d5c65159SKalle Valo 		goto unlock_exit;
718d5c65159SKalle Valo 	}
719d5c65159SKalle Valo 
720d5c65159SKalle Valo 	rx_tid = &peer->rx_tid[tid];
721d5c65159SKalle Valo 	if (!rx_tid->active)
722d5c65159SKalle Valo 		goto unlock_exit;
723d5c65159SKalle Valo 
724d5c65159SKalle Valo 	dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
725d5c65159SKalle Valo 			 DMA_BIDIRECTIONAL);
726d5c65159SKalle Valo 	kfree(rx_tid->vaddr);
727d5c65159SKalle Valo 
728d5c65159SKalle Valo 	rx_tid->active = false;
729d5c65159SKalle Valo 
730d5c65159SKalle Valo unlock_exit:
731d5c65159SKalle Valo 	spin_unlock_bh(&ab->base_lock);
732d5c65159SKalle Valo }
733d5c65159SKalle Valo 
734d5c65159SKalle Valo int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
735d5c65159SKalle Valo 			     u8 tid, u32 ba_win_sz, u16 ssn)
736d5c65159SKalle Valo {
737d5c65159SKalle Valo 	struct ath11k_base *ab = ar->ab;
738d5c65159SKalle Valo 	struct ath11k_peer *peer;
739d5c65159SKalle Valo 	struct dp_rx_tid *rx_tid;
740d5c65159SKalle Valo 	u32 hw_desc_sz;
741d5c65159SKalle Valo 	u32 *addr_aligned;
742d5c65159SKalle Valo 	void *vaddr;
743d5c65159SKalle Valo 	dma_addr_t paddr;
744d5c65159SKalle Valo 	int ret;
745d5c65159SKalle Valo 
746d5c65159SKalle Valo 	spin_lock_bh(&ab->base_lock);
747d5c65159SKalle Valo 
748d5c65159SKalle Valo 	peer = ath11k_peer_find(ab, vdev_id, peer_mac);
749d5c65159SKalle Valo 	if (!peer) {
750d5c65159SKalle Valo 		ath11k_warn(ab, "failed to find the peer to set up rx tid\n");
751d5c65159SKalle Valo 		spin_unlock_bh(&ab->base_lock);
752d5c65159SKalle Valo 		return -ENOENT;
753d5c65159SKalle Valo 	}
754d5c65159SKalle Valo 
755d5c65159SKalle Valo 	rx_tid = &peer->rx_tid[tid];
756d5c65159SKalle Valo 	/* Update the tid queue if it is already setup */
757d5c65159SKalle Valo 	if (rx_tid->active) {
758d5c65159SKalle Valo 		paddr = rx_tid->paddr;
759d5c65159SKalle Valo 		ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid,
760fe201947SVenkateswara Naralasetty 						    ba_win_sz, ssn, true);
761d5c65159SKalle Valo 		spin_unlock_bh(&ab->base_lock);
762d5c65159SKalle Valo 		if (ret) {
763d5c65159SKalle Valo 			ath11k_warn(ab, "failed to update reo for rx tid %d\n", tid);
764d5c65159SKalle Valo 			return ret;
765d5c65159SKalle Valo 		}
766d5c65159SKalle Valo 
767d5c65159SKalle Valo 		ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
768d5c65159SKalle Valo 							     peer_mac, paddr,
769d5c65159SKalle Valo 							     tid, 1, ba_win_sz);
770d5c65159SKalle Valo 		if (ret)
771d5c65159SKalle Valo 			ath11k_warn(ab, "failed to send wmi command to update rx reorder queue, tid :%d (%d)\n",
772d5c65159SKalle Valo 				    tid, ret);
773d5c65159SKalle Valo 		return ret;
774d5c65159SKalle Valo 	}
775d5c65159SKalle Valo 
776d5c65159SKalle Valo 	rx_tid->tid = tid;
777d5c65159SKalle Valo 
778d5c65159SKalle Valo 	rx_tid->ba_win_sz = ba_win_sz;
779d5c65159SKalle Valo 
780d5c65159SKalle Valo 	/* TODO: Optimize the memory allocation for qos tid based on the
781d5c65159SKalle Valo 	 * the actual BA window size in REO tid update path.
782d5c65159SKalle Valo 	 */
783d5c65159SKalle Valo 	if (tid == HAL_DESC_REO_NON_QOS_TID)
784d5c65159SKalle Valo 		hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid);
785d5c65159SKalle Valo 	else
786d5c65159SKalle Valo 		hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
787d5c65159SKalle Valo 
788d5c65159SKalle Valo 	vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_KERNEL);
789d5c65159SKalle Valo 	if (!vaddr) {
790d5c65159SKalle Valo 		spin_unlock_bh(&ab->base_lock);
791d5c65159SKalle Valo 		return -ENOMEM;
792d5c65159SKalle Valo 	}
793d5c65159SKalle Valo 
794d5c65159SKalle Valo 	addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
795d5c65159SKalle Valo 
796d5c65159SKalle Valo 	ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz, ssn);
797d5c65159SKalle Valo 
798d5c65159SKalle Valo 	paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
799d5c65159SKalle Valo 			       DMA_BIDIRECTIONAL);
800d5c65159SKalle Valo 
801d5c65159SKalle Valo 	ret = dma_mapping_error(ab->dev, paddr);
802d5c65159SKalle Valo 	if (ret) {
803d5c65159SKalle Valo 		spin_unlock_bh(&ab->base_lock);
804d5c65159SKalle Valo 		goto err_mem_free;
805d5c65159SKalle Valo 	}
806d5c65159SKalle Valo 
807d5c65159SKalle Valo 	rx_tid->vaddr = vaddr;
808d5c65159SKalle Valo 	rx_tid->paddr = paddr;
809d5c65159SKalle Valo 	rx_tid->size = hw_desc_sz;
810d5c65159SKalle Valo 	rx_tid->active = true;
811d5c65159SKalle Valo 
812d5c65159SKalle Valo 	spin_unlock_bh(&ab->base_lock);
813d5c65159SKalle Valo 
814d5c65159SKalle Valo 	ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
815d5c65159SKalle Valo 						     paddr, tid, 1, ba_win_sz);
816d5c65159SKalle Valo 	if (ret) {
817d5c65159SKalle Valo 		ath11k_warn(ar->ab, "failed to setup rx reorder queue, tid :%d (%d)\n",
818d5c65159SKalle Valo 			    tid, ret);
819d5c65159SKalle Valo 		ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid);
820d5c65159SKalle Valo 	}
821d5c65159SKalle Valo 
822d5c65159SKalle Valo 	return ret;
823d5c65159SKalle Valo 
824d5c65159SKalle Valo err_mem_free:
825d5c65159SKalle Valo 	kfree(vaddr);
826d5c65159SKalle Valo 
827d5c65159SKalle Valo 	return ret;
828d5c65159SKalle Valo }
829d5c65159SKalle Valo 
830d5c65159SKalle Valo int ath11k_dp_rx_ampdu_start(struct ath11k *ar,
831d5c65159SKalle Valo 			     struct ieee80211_ampdu_params *params)
832d5c65159SKalle Valo {
833d5c65159SKalle Valo 	struct ath11k_base *ab = ar->ab;
834d5c65159SKalle Valo 	struct ath11k_sta *arsta = (void *)params->sta->drv_priv;
835d5c65159SKalle Valo 	int vdev_id = arsta->arvif->vdev_id;
836d5c65159SKalle Valo 	int ret;
837d5c65159SKalle Valo 
838d5c65159SKalle Valo 	ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id,
839d5c65159SKalle Valo 				       params->tid, params->buf_size,
840d5c65159SKalle Valo 				       params->ssn);
841d5c65159SKalle Valo 	if (ret)
842d5c65159SKalle Valo 		ath11k_warn(ab, "failed to setup rx tid %d\n", ret);
843d5c65159SKalle Valo 
844d5c65159SKalle Valo 	return ret;
845d5c65159SKalle Valo }
846d5c65159SKalle Valo 
847d5c65159SKalle Valo int ath11k_dp_rx_ampdu_stop(struct ath11k *ar,
848d5c65159SKalle Valo 			    struct ieee80211_ampdu_params *params)
849d5c65159SKalle Valo {
850d5c65159SKalle Valo 	struct ath11k_base *ab = ar->ab;
851d5c65159SKalle Valo 	struct ath11k_peer *peer;
852d5c65159SKalle Valo 	struct ath11k_sta *arsta = (void *)params->sta->drv_priv;
853d5c65159SKalle Valo 	int vdev_id = arsta->arvif->vdev_id;
854d5c65159SKalle Valo 	dma_addr_t paddr;
855d5c65159SKalle Valo 	bool active;
856d5c65159SKalle Valo 	int ret;
857d5c65159SKalle Valo 
858d5c65159SKalle Valo 	spin_lock_bh(&ab->base_lock);
859d5c65159SKalle Valo 
860d5c65159SKalle Valo 	peer = ath11k_peer_find(ab, vdev_id, params->sta->addr);
861d5c65159SKalle Valo 	if (!peer) {
862d5c65159SKalle Valo 		ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n");
863d5c65159SKalle Valo 		spin_unlock_bh(&ab->base_lock);
864d5c65159SKalle Valo 		return -ENOENT;
865d5c65159SKalle Valo 	}
866d5c65159SKalle Valo 
867d5c65159SKalle Valo 	paddr = peer->rx_tid[params->tid].paddr;
868d5c65159SKalle Valo 	active = peer->rx_tid[params->tid].active;
869d5c65159SKalle Valo 
870fe201947SVenkateswara Naralasetty 	if (!active) {
871d5c65159SKalle Valo 		spin_unlock_bh(&ab->base_lock);
872d5c65159SKalle Valo 		return 0;
873fe201947SVenkateswara Naralasetty 	}
874fe201947SVenkateswara Naralasetty 
875fe201947SVenkateswara Naralasetty 	ret = ath11k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false);
876fe201947SVenkateswara Naralasetty 	spin_unlock_bh(&ab->base_lock);
877fe201947SVenkateswara Naralasetty 	if (ret) {
878fe201947SVenkateswara Naralasetty 		ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n",
879fe201947SVenkateswara Naralasetty 			    params->tid, ret);
880fe201947SVenkateswara Naralasetty 		return ret;
881fe201947SVenkateswara Naralasetty 	}
882d5c65159SKalle Valo 
883d5c65159SKalle Valo 	ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
884d5c65159SKalle Valo 						     params->sta->addr, paddr,
885d5c65159SKalle Valo 						     params->tid, 1, 1);
886d5c65159SKalle Valo 	if (ret)
887d5c65159SKalle Valo 		ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n",
888d5c65159SKalle Valo 			    ret);
889d5c65159SKalle Valo 
890d5c65159SKalle Valo 	return ret;
891d5c65159SKalle Valo }
892d5c65159SKalle Valo 
893d5c65159SKalle Valo static int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
894d5c65159SKalle Valo 				      u16 peer_id)
895d5c65159SKalle Valo {
896d5c65159SKalle Valo 	int i;
897d5c65159SKalle Valo 
898d5c65159SKalle Valo 	for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {
899d5c65159SKalle Valo 		if (ppdu_stats->user_stats[i].is_valid_peer_id) {
900d5c65159SKalle Valo 			if (peer_id == ppdu_stats->user_stats[i].peer_id)
901d5c65159SKalle Valo 				return i;
902d5c65159SKalle Valo 		} else {
903d5c65159SKalle Valo 			return i;
904d5c65159SKalle Valo 		}
905d5c65159SKalle Valo 	}
906d5c65159SKalle Valo 
907d5c65159SKalle Valo 	return -EINVAL;
908d5c65159SKalle Valo }
909d5c65159SKalle Valo 
910d5c65159SKalle Valo static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab,
911d5c65159SKalle Valo 					   u16 tag, u16 len, const void *ptr,
912d5c65159SKalle Valo 					   void *data)
913d5c65159SKalle Valo {
914d5c65159SKalle Valo 	struct htt_ppdu_stats_info *ppdu_info;
915d5c65159SKalle Valo 	struct htt_ppdu_user_stats *user_stats;
916d5c65159SKalle Valo 	int cur_user;
917d5c65159SKalle Valo 	u16 peer_id;
918d5c65159SKalle Valo 
919d5c65159SKalle Valo 	ppdu_info = (struct htt_ppdu_stats_info *)data;
920d5c65159SKalle Valo 
921d5c65159SKalle Valo 	switch (tag) {
922d5c65159SKalle Valo 	case HTT_PPDU_STATS_TAG_COMMON:
923d5c65159SKalle Valo 		if (len < sizeof(struct htt_ppdu_stats_common)) {
924d5c65159SKalle Valo 			ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
925d5c65159SKalle Valo 				    len, tag);
926d5c65159SKalle Valo 			return -EINVAL;
927d5c65159SKalle Valo 		}
928d5c65159SKalle Valo 		memcpy((void *)&ppdu_info->ppdu_stats.common, ptr,
929d5c65159SKalle Valo 		       sizeof(struct htt_ppdu_stats_common));
930d5c65159SKalle Valo 		break;
931d5c65159SKalle Valo 	case HTT_PPDU_STATS_TAG_USR_RATE:
932d5c65159SKalle Valo 		if (len < sizeof(struct htt_ppdu_stats_user_rate)) {
933d5c65159SKalle Valo 			ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
934d5c65159SKalle Valo 				    len, tag);
935d5c65159SKalle Valo 			return -EINVAL;
936d5c65159SKalle Valo 		}
937d5c65159SKalle Valo 
938d5c65159SKalle Valo 		peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id;
939d5c65159SKalle Valo 		cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
940d5c65159SKalle Valo 						      peer_id);
941d5c65159SKalle Valo 		if (cur_user < 0)
942d5c65159SKalle Valo 			return -EINVAL;
943d5c65159SKalle Valo 		user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
944d5c65159SKalle Valo 		user_stats->peer_id = peer_id;
945d5c65159SKalle Valo 		user_stats->is_valid_peer_id = true;
946d5c65159SKalle Valo 		memcpy((void *)&user_stats->rate, ptr,
947d5c65159SKalle Valo 		       sizeof(struct htt_ppdu_stats_user_rate));
948d5c65159SKalle Valo 		user_stats->tlv_flags |= BIT(tag);
949d5c65159SKalle Valo 		break;
950d5c65159SKalle Valo 	case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:
951d5c65159SKalle Valo 		if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
952d5c65159SKalle Valo 			ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
953d5c65159SKalle Valo 				    len, tag);
954d5c65159SKalle Valo 			return -EINVAL;
955d5c65159SKalle Valo 		}
956d5c65159SKalle Valo 
957d5c65159SKalle Valo 		peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id;
958d5c65159SKalle Valo 		cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
959d5c65159SKalle Valo 						      peer_id);
960d5c65159SKalle Valo 		if (cur_user < 0)
961d5c65159SKalle Valo 			return -EINVAL;
962d5c65159SKalle Valo 		user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
963d5c65159SKalle Valo 		user_stats->peer_id = peer_id;
964d5c65159SKalle Valo 		user_stats->is_valid_peer_id = true;
965d5c65159SKalle Valo 		memcpy((void *)&user_stats->cmpltn_cmn, ptr,
966d5c65159SKalle Valo 		       sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
967d5c65159SKalle Valo 		user_stats->tlv_flags |= BIT(tag);
968d5c65159SKalle Valo 		break;
969d5c65159SKalle Valo 	case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:
970d5c65159SKalle Valo 		if (len <
971d5c65159SKalle Valo 		    sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {
972d5c65159SKalle Valo 			ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
973d5c65159SKalle Valo 				    len, tag);
974d5c65159SKalle Valo 			return -EINVAL;
975d5c65159SKalle Valo 		}
976d5c65159SKalle Valo 
977d5c65159SKalle Valo 		peer_id =
978d5c65159SKalle Valo 		((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id;
979d5c65159SKalle Valo 		cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
980d5c65159SKalle Valo 						      peer_id);
981d5c65159SKalle Valo 		if (cur_user < 0)
982d5c65159SKalle Valo 			return -EINVAL;
983d5c65159SKalle Valo 		user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
984d5c65159SKalle Valo 		user_stats->peer_id = peer_id;
985d5c65159SKalle Valo 		user_stats->is_valid_peer_id = true;
986d5c65159SKalle Valo 		memcpy((void *)&user_stats->ack_ba, ptr,
987d5c65159SKalle Valo 		       sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));
988d5c65159SKalle Valo 		user_stats->tlv_flags |= BIT(tag);
989d5c65159SKalle Valo 		break;
990d5c65159SKalle Valo 	}
991d5c65159SKalle Valo 	return 0;
992d5c65159SKalle Valo }
993d5c65159SKalle Valo 
994d5c65159SKalle Valo int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
995d5c65159SKalle Valo 			   int (*iter)(struct ath11k_base *ar, u16 tag, u16 len,
996d5c65159SKalle Valo 				       const void *ptr, void *data),
997d5c65159SKalle Valo 			   void *data)
998d5c65159SKalle Valo {
999d5c65159SKalle Valo 	const struct htt_tlv *tlv;
1000d5c65159SKalle Valo 	const void *begin = ptr;
1001d5c65159SKalle Valo 	u16 tlv_tag, tlv_len;
1002d5c65159SKalle Valo 	int ret = -EINVAL;
1003d5c65159SKalle Valo 
1004d5c65159SKalle Valo 	while (len > 0) {
1005d5c65159SKalle Valo 		if (len < sizeof(*tlv)) {
1006d5c65159SKalle Valo 			ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
1007d5c65159SKalle Valo 				   ptr - begin, len, sizeof(*tlv));
1008d5c65159SKalle Valo 			return -EINVAL;
1009d5c65159SKalle Valo 		}
1010d5c65159SKalle Valo 		tlv = (struct htt_tlv *)ptr;
1011d5c65159SKalle Valo 		tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header);
1012d5c65159SKalle Valo 		tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header);
1013d5c65159SKalle Valo 		ptr += sizeof(*tlv);
1014d5c65159SKalle Valo 		len -= sizeof(*tlv);
1015d5c65159SKalle Valo 
1016d5c65159SKalle Valo 		if (tlv_len > len) {
1017d5c65159SKalle Valo 			ath11k_err(ab, "htt tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n",
1018d5c65159SKalle Valo 				   tlv_tag, ptr - begin, len, tlv_len);
1019d5c65159SKalle Valo 			return -EINVAL;
1020d5c65159SKalle Valo 		}
1021d5c65159SKalle Valo 		ret = iter(ab, tlv_tag, tlv_len, ptr, data);
1022d5c65159SKalle Valo 		if (ret == -ENOMEM)
1023d5c65159SKalle Valo 			return ret;
1024d5c65159SKalle Valo 
1025d5c65159SKalle Valo 		ptr += tlv_len;
1026d5c65159SKalle Valo 		len -= tlv_len;
1027d5c65159SKalle Valo 	}
1028d5c65159SKalle Valo 	return 0;
1029d5c65159SKalle Valo }
1030d5c65159SKalle Valo 
10316a0c3702SJohn Crispin static inline u32 ath11k_he_gi_to_nl80211_he_gi(u8 sgi)
10326a0c3702SJohn Crispin {
10336a0c3702SJohn Crispin 	u32 ret = 0;
10346a0c3702SJohn Crispin 
10356a0c3702SJohn Crispin 	switch (sgi) {
10366a0c3702SJohn Crispin 	case RX_MSDU_START_SGI_0_8_US:
10376a0c3702SJohn Crispin 		ret = NL80211_RATE_INFO_HE_GI_0_8;
10386a0c3702SJohn Crispin 		break;
10396a0c3702SJohn Crispin 	case RX_MSDU_START_SGI_1_6_US:
10406a0c3702SJohn Crispin 		ret = NL80211_RATE_INFO_HE_GI_1_6;
10416a0c3702SJohn Crispin 		break;
10426a0c3702SJohn Crispin 	case RX_MSDU_START_SGI_3_2_US:
10436a0c3702SJohn Crispin 		ret = NL80211_RATE_INFO_HE_GI_3_2;
10446a0c3702SJohn Crispin 		break;
10456a0c3702SJohn Crispin 	}
10466a0c3702SJohn Crispin 
10476a0c3702SJohn Crispin 	return ret;
10486a0c3702SJohn Crispin }
10496a0c3702SJohn Crispin 
1050d5c65159SKalle Valo static void
1051d5c65159SKalle Valo ath11k_update_per_peer_tx_stats(struct ath11k *ar,
1052d5c65159SKalle Valo 				struct htt_ppdu_stats *ppdu_stats, u8 user)
1053d5c65159SKalle Valo {
1054d5c65159SKalle Valo 	struct ath11k_base *ab = ar->ab;
1055d5c65159SKalle Valo 	struct ath11k_peer *peer;
1056d5c65159SKalle Valo 	struct ieee80211_sta *sta;
1057d5c65159SKalle Valo 	struct ath11k_sta *arsta;
1058d5c65159SKalle Valo 	struct htt_ppdu_stats_user_rate *user_rate;
1059d5c65159SKalle Valo 	struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
1060d5c65159SKalle Valo 	struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
1061d5c65159SKalle Valo 	struct htt_ppdu_stats_common *common = &ppdu_stats->common;
1062d5c65159SKalle Valo 	int ret;
10636a0c3702SJohn Crispin 	u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0;
1064d5c65159SKalle Valo 	u32 succ_bytes = 0;
1065d5c65159SKalle Valo 	u16 rate = 0, succ_pkts = 0;
1066d5c65159SKalle Valo 	u32 tx_duration = 0;
1067b9269a07SVenkateswara Naralasetty 	u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
1068d5c65159SKalle Valo 	bool is_ampdu = false;
1069d5c65159SKalle Valo 
1070d5c65159SKalle Valo 	if (!usr_stats)
1071d5c65159SKalle Valo 		return;
1072d5c65159SKalle Valo 
1073d5c65159SKalle Valo 	if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
1074d5c65159SKalle Valo 		return;
1075d5c65159SKalle Valo 
1076d5c65159SKalle Valo 	if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON))
1077d5c65159SKalle Valo 		is_ampdu =
1078d5c65159SKalle Valo 			HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
1079d5c65159SKalle Valo 
1080d5c65159SKalle Valo 	if (usr_stats->tlv_flags &
1081d5c65159SKalle Valo 	    BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
1082d5c65159SKalle Valo 		succ_bytes = usr_stats->ack_ba.success_bytes;
1083d5c65159SKalle Valo 		succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M,
1084d5c65159SKalle Valo 				      usr_stats->ack_ba.info);
1085b9269a07SVenkateswara Naralasetty 		tid = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM,
1086b9269a07SVenkateswara Naralasetty 				usr_stats->ack_ba.info);
1087d5c65159SKalle Valo 	}
1088d5c65159SKalle Valo 
1089d5c65159SKalle Valo 	if (common->fes_duration_us)
1090d5c65159SKalle Valo 		tx_duration = common->fes_duration_us;
1091d5c65159SKalle Valo 
1092d5c65159SKalle Valo 	user_rate = &usr_stats->rate;
1093d5c65159SKalle Valo 	flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
1094d5c65159SKalle Valo 	bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;
1095d5c65159SKalle Valo 	nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
1096d5c65159SKalle Valo 	mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
1097d5c65159SKalle Valo 	sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
10986a0c3702SJohn Crispin 	dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);
1099d5c65159SKalle Valo 
1100d5c65159SKalle Valo 	/* Note: If host configured fixed rates and in some other special
1101d5c65159SKalle Valo 	 * cases, the broadcast/management frames are sent in different rates.
1102d5c65159SKalle Valo 	 * Firmware rate's control to be skipped for this?
1103d5c65159SKalle Valo 	 */
1104d5c65159SKalle Valo 
11056a0c3702SJohn Crispin 	if (flags == WMI_RATE_PREAMBLE_HE && mcs > 11) {
11066a0c3702SJohn Crispin 		ath11k_warn(ab, "Invalid HE mcs %hhd peer stats",  mcs);
11076a0c3702SJohn Crispin 		return;
11086a0c3702SJohn Crispin 	}
11096a0c3702SJohn Crispin 
11106a0c3702SJohn Crispin 	if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) {
11116a0c3702SJohn Crispin 		ath11k_warn(ab, "Invalid HE mcs %hhd peer stats",  mcs);
11126a0c3702SJohn Crispin 		return;
11136a0c3702SJohn Crispin 	}
11146a0c3702SJohn Crispin 
11156a0c3702SJohn Crispin 	if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH11K_VHT_MCS_MAX) {
1116d5c65159SKalle Valo 		ath11k_warn(ab, "Invalid VHT mcs %hhd peer stats",  mcs);
1117d5c65159SKalle Valo 		return;
1118d5c65159SKalle Valo 	}
1119d5c65159SKalle Valo 
11206a0c3702SJohn Crispin 	if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH11K_HT_MCS_MAX || nss < 1)) {
1121d5c65159SKalle Valo 		ath11k_warn(ab, "Invalid HT mcs %hhd nss %hhd peer stats",
1122d5c65159SKalle Valo 			    mcs, nss);
1123d5c65159SKalle Valo 		return;
1124d5c65159SKalle Valo 	}
1125d5c65159SKalle Valo 
1126d5c65159SKalle Valo 	if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {
1127d5c65159SKalle Valo 		ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs,
1128d5c65159SKalle Valo 							    flags,
1129d5c65159SKalle Valo 							    &rate_idx,
1130d5c65159SKalle Valo 							    &rate);
1131d5c65159SKalle Valo 		if (ret < 0)
1132d5c65159SKalle Valo 			return;
1133d5c65159SKalle Valo 	}
1134d5c65159SKalle Valo 
1135d5c65159SKalle Valo 	rcu_read_lock();
1136d5c65159SKalle Valo 	spin_lock_bh(&ab->base_lock);
1137d5c65159SKalle Valo 	peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id);
1138d5c65159SKalle Valo 
1139d5c65159SKalle Valo 	if (!peer || !peer->sta) {
1140d5c65159SKalle Valo 		spin_unlock_bh(&ab->base_lock);
1141d5c65159SKalle Valo 		rcu_read_unlock();
1142d5c65159SKalle Valo 		return;
1143d5c65159SKalle Valo 	}
1144d5c65159SKalle Valo 
1145d5c65159SKalle Valo 	sta = peer->sta;
1146d5c65159SKalle Valo 	arsta = (struct ath11k_sta *)sta->drv_priv;
1147d5c65159SKalle Valo 
1148d5c65159SKalle Valo 	memset(&arsta->txrate, 0, sizeof(arsta->txrate));
1149d5c65159SKalle Valo 
1150d5c65159SKalle Valo 	switch (flags) {
1151d5c65159SKalle Valo 	case WMI_RATE_PREAMBLE_OFDM:
1152d5c65159SKalle Valo 		arsta->txrate.legacy = rate;
1153d5c65159SKalle Valo 		break;
1154d5c65159SKalle Valo 	case WMI_RATE_PREAMBLE_CCK:
1155d5c65159SKalle Valo 		arsta->txrate.legacy = rate;
1156d5c65159SKalle Valo 		break;
1157d5c65159SKalle Valo 	case WMI_RATE_PREAMBLE_HT:
1158d5c65159SKalle Valo 		arsta->txrate.mcs = mcs + 8 * (nss - 1);
1159d5c65159SKalle Valo 		arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
1160be43ce64SJohn Crispin 		if (sgi)
1161d5c65159SKalle Valo 			arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1162d5c65159SKalle Valo 		break;
1163d5c65159SKalle Valo 	case WMI_RATE_PREAMBLE_VHT:
1164d5c65159SKalle Valo 		arsta->txrate.mcs = mcs;
1165d5c65159SKalle Valo 		arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
1166be43ce64SJohn Crispin 		if (sgi)
1167d5c65159SKalle Valo 			arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1168d5c65159SKalle Valo 		break;
11696a0c3702SJohn Crispin 	case WMI_RATE_PREAMBLE_HE:
11706a0c3702SJohn Crispin 		arsta->txrate.mcs = mcs;
11716a0c3702SJohn Crispin 		arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
11726a0c3702SJohn Crispin 		arsta->txrate.he_dcm = dcm;
11736a0c3702SJohn Crispin 		arsta->txrate.he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi);
11746a0c3702SJohn Crispin 		arsta->txrate.he_ru_alloc = ath11k_he_ru_tones_to_nl80211_he_ru_alloc(
11756a0c3702SJohn Crispin 						(user_rate->ru_end -
11766a0c3702SJohn Crispin 						 user_rate->ru_start) + 1);
11776a0c3702SJohn Crispin 		break;
1178d5c65159SKalle Valo 	}
1179d5c65159SKalle Valo 
1180d5c65159SKalle Valo 	arsta->txrate.nss = nss;
118139e81c6aSTamizh chelvam 	arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
1182a9e945eaSVenkateswara Naralasetty 	arsta->tx_duration += tx_duration;
1183d5c65159SKalle Valo 	memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
1184d5c65159SKalle Valo 
1185b9269a07SVenkateswara Naralasetty 	/* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
1186b9269a07SVenkateswara Naralasetty 	 * So skip peer stats update for mgmt packets.
1187b9269a07SVenkateswara Naralasetty 	 */
1188b9269a07SVenkateswara Naralasetty 	if (tid < HTT_PPDU_STATS_NON_QOS_TID) {
1189d5c65159SKalle Valo 		memset(peer_stats, 0, sizeof(*peer_stats));
1190d5c65159SKalle Valo 		peer_stats->succ_pkts = succ_pkts;
1191d5c65159SKalle Valo 		peer_stats->succ_bytes = succ_bytes;
1192d5c65159SKalle Valo 		peer_stats->is_ampdu = is_ampdu;
1193d5c65159SKalle Valo 		peer_stats->duration = tx_duration;
1194d5c65159SKalle Valo 		peer_stats->ba_fails =
1195d5c65159SKalle Valo 			HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
1196d5c65159SKalle Valo 			HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
1197d5c65159SKalle Valo 
1198d5c65159SKalle Valo 		if (ath11k_debug_is_extd_tx_stats_enabled(ar))
1199d5c65159SKalle Valo 			ath11k_accumulate_per_peer_tx_stats(arsta,
1200d5c65159SKalle Valo 							    peer_stats, rate_idx);
1201b9269a07SVenkateswara Naralasetty 	}
1202d5c65159SKalle Valo 
1203d5c65159SKalle Valo 	spin_unlock_bh(&ab->base_lock);
1204d5c65159SKalle Valo 	rcu_read_unlock();
1205d5c65159SKalle Valo }
1206d5c65159SKalle Valo 
1207d5c65159SKalle Valo static void ath11k_htt_update_ppdu_stats(struct ath11k *ar,
1208d5c65159SKalle Valo 					 struct htt_ppdu_stats *ppdu_stats)
1209d5c65159SKalle Valo {
1210d5c65159SKalle Valo 	u8 user;
1211d5c65159SKalle Valo 
1212d5c65159SKalle Valo 	for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
1213d5c65159SKalle Valo 		ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user);
1214d5c65159SKalle Valo }
1215d5c65159SKalle Valo 
1216d5c65159SKalle Valo static
1217d5c65159SKalle Valo struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar,
1218d5c65159SKalle Valo 							u32 ppdu_id)
1219d5c65159SKalle Valo {
1220269663f1SDan Carpenter 	struct htt_ppdu_stats_info *ppdu_info;
1221d5c65159SKalle Valo 
1222d5c65159SKalle Valo 	spin_lock_bh(&ar->data_lock);
1223d5c65159SKalle Valo 	if (!list_empty(&ar->ppdu_stats_info)) {
1224d5c65159SKalle Valo 		list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {
1225269663f1SDan Carpenter 			if (ppdu_info->ppdu_id == ppdu_id) {
1226d5c65159SKalle Valo 				spin_unlock_bh(&ar->data_lock);
1227d5c65159SKalle Valo 				return ppdu_info;
1228d5c65159SKalle Valo 			}
1229d5c65159SKalle Valo 		}
1230d5c65159SKalle Valo 
1231d5c65159SKalle Valo 		if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
1232d5c65159SKalle Valo 			ppdu_info = list_first_entry(&ar->ppdu_stats_info,
1233d5c65159SKalle Valo 						     typeof(*ppdu_info), list);
1234d5c65159SKalle Valo 			list_del(&ppdu_info->list);
1235d5c65159SKalle Valo 			ar->ppdu_stat_list_depth--;
1236d5c65159SKalle Valo 			ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats);
1237d5c65159SKalle Valo 			kfree(ppdu_info);
1238d5c65159SKalle Valo 		}
1239d5c65159SKalle Valo 	}
1240d5c65159SKalle Valo 	spin_unlock_bh(&ar->data_lock);
1241d5c65159SKalle Valo 
1242d5c65159SKalle Valo 	ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_KERNEL);
1243d5c65159SKalle Valo 	if (!ppdu_info)
1244d5c65159SKalle Valo 		return NULL;
1245d5c65159SKalle Valo 
1246d5c65159SKalle Valo 	spin_lock_bh(&ar->data_lock);
1247d5c65159SKalle Valo 	list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);
1248d5c65159SKalle Valo 	ar->ppdu_stat_list_depth++;
1249d5c65159SKalle Valo 	spin_unlock_bh(&ar->data_lock);
1250d5c65159SKalle Valo 
1251d5c65159SKalle Valo 	return ppdu_info;
1252d5c65159SKalle Valo }
1253d5c65159SKalle Valo 
1254d5c65159SKalle Valo static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab,
1255d5c65159SKalle Valo 				      struct sk_buff *skb)
1256d5c65159SKalle Valo {
1257d5c65159SKalle Valo 	struct ath11k_htt_ppdu_stats_msg *msg;
1258d5c65159SKalle Valo 	struct htt_ppdu_stats_info *ppdu_info;
1259d5c65159SKalle Valo 	struct ath11k *ar;
1260d5c65159SKalle Valo 	int ret;
1261d5c65159SKalle Valo 	u8 pdev_id;
1262d5c65159SKalle Valo 	u32 ppdu_id, len;
1263d5c65159SKalle Valo 
1264d5c65159SKalle Valo 	msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data;
1265d5c65159SKalle Valo 	len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info);
1266d5c65159SKalle Valo 	pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info);
1267d5c65159SKalle Valo 	ppdu_id = msg->ppdu_id;
1268d5c65159SKalle Valo 
1269d5c65159SKalle Valo 	rcu_read_lock();
1270d5c65159SKalle Valo 	ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
1271d5c65159SKalle Valo 	if (!ar) {
1272d5c65159SKalle Valo 		ret = -EINVAL;
1273d5c65159SKalle Valo 		goto exit;
1274d5c65159SKalle Valo 	}
1275d5c65159SKalle Valo 
1276d5c65159SKalle Valo 	if (ath11k_debug_is_pktlog_lite_mode_enabled(ar))
1277d5c65159SKalle Valo 		trace_ath11k_htt_ppdu_stats(ar, skb->data, len);
1278d5c65159SKalle Valo 
1279d5c65159SKalle Valo 	ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id);
1280d5c65159SKalle Valo 	if (!ppdu_info) {
1281d5c65159SKalle Valo 		ret = -EINVAL;
1282d5c65159SKalle Valo 		goto exit;
1283d5c65159SKalle Valo 	}
1284d5c65159SKalle Valo 
1285d5c65159SKalle Valo 	ppdu_info->ppdu_id = ppdu_id;
1286d5c65159SKalle Valo 	ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len,
1287d5c65159SKalle Valo 				     ath11k_htt_tlv_ppdu_stats_parse,
1288d5c65159SKalle Valo 				     (void *)ppdu_info);
1289d5c65159SKalle Valo 	if (ret) {
1290d5c65159SKalle Valo 		ath11k_warn(ab, "Failed to parse tlv %d\n", ret);
1291d5c65159SKalle Valo 		goto exit;
1292d5c65159SKalle Valo 	}
1293d5c65159SKalle Valo 
1294d5c65159SKalle Valo exit:
1295d5c65159SKalle Valo 	rcu_read_unlock();
1296d5c65159SKalle Valo 
1297d5c65159SKalle Valo 	return ret;
1298d5c65159SKalle Valo }
1299d5c65159SKalle Valo 
1300d5c65159SKalle Valo static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb)
1301d5c65159SKalle Valo {
1302d5c65159SKalle Valo 	struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data;
1303443d2ee7SAnilkumar Kolli 	struct ath_pktlog_hdr *hdr = (struct ath_pktlog_hdr *)data;
1304d5c65159SKalle Valo 	struct ath11k *ar;
1305d5c65159SKalle Valo 	u8 pdev_id;
1306d5c65159SKalle Valo 
1307d5c65159SKalle Valo 	pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr);
1308d0f390eaSAnilkumar Kolli 	ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
1309d0f390eaSAnilkumar Kolli 	if (!ar) {
1310d0f390eaSAnilkumar Kolli 		ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id);
1311d0f390eaSAnilkumar Kolli 		return;
1312d0f390eaSAnilkumar Kolli 	}
1313d5c65159SKalle Valo 
1314443d2ee7SAnilkumar Kolli 	trace_ath11k_htt_pktlog(ar, data->payload, hdr->size);
1315d5c65159SKalle Valo }
1316d5c65159SKalle Valo 
1317d5c65159SKalle Valo void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
1318d5c65159SKalle Valo 				       struct sk_buff *skb)
1319d5c65159SKalle Valo {
1320d5c65159SKalle Valo 	struct ath11k_dp *dp = &ab->dp;
1321d5c65159SKalle Valo 	struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;
1322d5c65159SKalle Valo 	enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp);
1323d5c65159SKalle Valo 	u16 peer_id;
1324d5c65159SKalle Valo 	u8 vdev_id;
1325d5c65159SKalle Valo 	u8 mac_addr[ETH_ALEN];
1326d5c65159SKalle Valo 	u16 peer_mac_h16;
1327d5c65159SKalle Valo 	u16 ast_hash;
1328d5c65159SKalle Valo 
1329d5c65159SKalle Valo 	ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
1330d5c65159SKalle Valo 
1331d5c65159SKalle Valo 	switch (type) {
1332d5c65159SKalle Valo 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
1333d5c65159SKalle Valo 		dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR,
1334d5c65159SKalle Valo 						  resp->version_msg.version);
1335d5c65159SKalle Valo 		dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR,
1336d5c65159SKalle Valo 						  resp->version_msg.version);
1337d5c65159SKalle Valo 		complete(&dp->htt_tgt_version_received);
1338d5c65159SKalle Valo 		break;
1339d5c65159SKalle Valo 	case HTT_T2H_MSG_TYPE_PEER_MAP:
1340d5c65159SKalle Valo 		vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
1341d5c65159SKalle Valo 				    resp->peer_map_ev.info);
1342d5c65159SKalle Valo 		peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
1343d5c65159SKalle Valo 				    resp->peer_map_ev.info);
1344d5c65159SKalle Valo 		peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
1345d5c65159SKalle Valo 					 resp->peer_map_ev.info1);
1346d5c65159SKalle Valo 		ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
1347d5c65159SKalle Valo 				       peer_mac_h16, mac_addr);
1348d5c65159SKalle Valo 		ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL,
13490f37fbf4SAnilkumar Kolli 				     resp->peer_map_ev.info2);
1350d5c65159SKalle Valo 		ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash);
1351d5c65159SKalle Valo 		break;
1352d5c65159SKalle Valo 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
1353d5c65159SKalle Valo 		peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID,
1354d5c65159SKalle Valo 				    resp->peer_unmap_ev.info);
1355d5c65159SKalle Valo 		ath11k_peer_unmap_event(ab, peer_id);
1356d5c65159SKalle Valo 		break;
1357d5c65159SKalle Valo 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
1358d5c65159SKalle Valo 		ath11k_htt_pull_ppdu_stats(ab, skb);
1359d5c65159SKalle Valo 		break;
1360d5c65159SKalle Valo 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
1361d5c65159SKalle Valo 		ath11k_dbg_htt_ext_stats_handler(ab, skb);
1362d5c65159SKalle Valo 		break;
1363d5c65159SKalle Valo 	case HTT_T2H_MSG_TYPE_PKTLOG:
1364d5c65159SKalle Valo 		ath11k_htt_pktlog(ab, skb);
1365d5c65159SKalle Valo 		break;
1366d5c65159SKalle Valo 	default:
1367d5c65159SKalle Valo 		ath11k_warn(ab, "htt event %d not handled\n", type);
1368d5c65159SKalle Valo 		break;
1369d5c65159SKalle Valo 	}
1370d5c65159SKalle Valo 
1371d5c65159SKalle Valo 	dev_kfree_skb_any(skb);
1372d5c65159SKalle Valo }
1373d5c65159SKalle Valo 
1374d5c65159SKalle Valo static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar,
1375d5c65159SKalle Valo 				      struct sk_buff_head *msdu_list,
1376d5c65159SKalle Valo 				      struct sk_buff *first, struct sk_buff *last,
1377d5c65159SKalle Valo 				      u8 l3pad_bytes, int msdu_len)
1378d5c65159SKalle Valo {
1379d5c65159SKalle Valo 	struct sk_buff *skb;
1380d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
1381d2f510faSSriram R 	int buf_first_hdr_len, buf_first_len;
1382d5c65159SKalle Valo 	struct hal_rx_desc *ldesc;
1383d5c65159SKalle Valo 	int space_extra;
1384d5c65159SKalle Valo 	int rem_len;
1385d5c65159SKalle Valo 	int buf_len;
1386d5c65159SKalle Valo 
1387d2f510faSSriram R 	/* As the msdu is spread across multiple rx buffers,
1388d2f510faSSriram R 	 * find the offset to the start of msdu for computing
1389d2f510faSSriram R 	 * the length of the msdu in the first buffer.
1390d2f510faSSriram R 	 */
1391d2f510faSSriram R 	buf_first_hdr_len = HAL_RX_DESC_SIZE + l3pad_bytes;
1392d2f510faSSriram R 	buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;
1393d2f510faSSriram R 
1394d2f510faSSriram R 	if (WARN_ON_ONCE(msdu_len <= buf_first_len)) {
1395d2f510faSSriram R 		skb_put(first, buf_first_hdr_len + msdu_len);
1396d2f510faSSriram R 		skb_pull(first, buf_first_hdr_len);
1397d5c65159SKalle Valo 		return 0;
1398d5c65159SKalle Valo 	}
1399d5c65159SKalle Valo 
1400d5c65159SKalle Valo 	ldesc = (struct hal_rx_desc *)last->data;
1401d5c65159SKalle Valo 	rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ldesc);
1402d5c65159SKalle Valo 	rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ldesc);
1403d5c65159SKalle Valo 
1404d5c65159SKalle Valo 	/* MSDU spans over multiple buffers because the length of the MSDU
1405d5c65159SKalle Valo 	 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
1406d5c65159SKalle Valo 	 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
1407d5c65159SKalle Valo 	 */
1408d5c65159SKalle Valo 	skb_put(first, DP_RX_BUFFER_SIZE);
1409d2f510faSSriram R 	skb_pull(first, buf_first_hdr_len);
1410d5c65159SKalle Valo 
141130679ec4SKarthikeyan Periyasamy 	/* When an MSDU spread over multiple buffers attention, MSDU_END and
141230679ec4SKarthikeyan Periyasamy 	 * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs.
141330679ec4SKarthikeyan Periyasamy 	 */
141430679ec4SKarthikeyan Periyasamy 	ath11k_dp_rx_desc_end_tlv_copy(rxcb->rx_desc, ldesc);
141530679ec4SKarthikeyan Periyasamy 
1416d2f510faSSriram R 	space_extra = msdu_len - (buf_first_len + skb_tailroom(first));
1417d5c65159SKalle Valo 	if (space_extra > 0 &&
1418d5c65159SKalle Valo 	    (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) {
1419d5c65159SKalle Valo 		/* Free up all buffers of the MSDU */
1420d5c65159SKalle Valo 		while ((skb = __skb_dequeue(msdu_list)) != NULL) {
1421d5c65159SKalle Valo 			rxcb = ATH11K_SKB_RXCB(skb);
1422d5c65159SKalle Valo 			if (!rxcb->is_continuation) {
1423d5c65159SKalle Valo 				dev_kfree_skb_any(skb);
1424d5c65159SKalle Valo 				break;
1425d5c65159SKalle Valo 			}
1426d5c65159SKalle Valo 			dev_kfree_skb_any(skb);
1427d5c65159SKalle Valo 		}
1428d5c65159SKalle Valo 		return -ENOMEM;
1429d5c65159SKalle Valo 	}
1430d5c65159SKalle Valo 
1431d2f510faSSriram R 	rem_len = msdu_len - buf_first_len;
1432d5c65159SKalle Valo 	while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
1433d5c65159SKalle Valo 		rxcb = ATH11K_SKB_RXCB(skb);
1434d5c65159SKalle Valo 		if (rxcb->is_continuation)
1435d5c65159SKalle Valo 			buf_len = DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE;
1436d5c65159SKalle Valo 		else
1437d5c65159SKalle Valo 			buf_len = rem_len;
1438d5c65159SKalle Valo 
1439d5c65159SKalle Valo 		if (buf_len > (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE)) {
1440d5c65159SKalle Valo 			WARN_ON_ONCE(1);
1441d5c65159SKalle Valo 			dev_kfree_skb_any(skb);
1442d5c65159SKalle Valo 			return -EINVAL;
1443d5c65159SKalle Valo 		}
1444d5c65159SKalle Valo 
1445d5c65159SKalle Valo 		skb_put(skb, buf_len + HAL_RX_DESC_SIZE);
1446d5c65159SKalle Valo 		skb_pull(skb, HAL_RX_DESC_SIZE);
1447d5c65159SKalle Valo 		skb_copy_from_linear_data(skb, skb_put(first, buf_len),
1448d5c65159SKalle Valo 					  buf_len);
1449d5c65159SKalle Valo 		dev_kfree_skb_any(skb);
1450d5c65159SKalle Valo 
1451d5c65159SKalle Valo 		rem_len -= buf_len;
1452d5c65159SKalle Valo 		if (!rxcb->is_continuation)
1453d5c65159SKalle Valo 			break;
1454d5c65159SKalle Valo 	}
1455d5c65159SKalle Valo 
1456d5c65159SKalle Valo 	return 0;
1457d5c65159SKalle Valo }
1458d5c65159SKalle Valo 
1459d5c65159SKalle Valo static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,
1460d5c65159SKalle Valo 						      struct sk_buff *first)
1461d5c65159SKalle Valo {
1462d5c65159SKalle Valo 	struct sk_buff *skb;
1463d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
1464d5c65159SKalle Valo 
1465d5c65159SKalle Valo 	if (!rxcb->is_continuation)
1466d5c65159SKalle Valo 		return first;
1467d5c65159SKalle Valo 
1468d5c65159SKalle Valo 	skb_queue_walk(msdu_list, skb) {
1469d5c65159SKalle Valo 		rxcb = ATH11K_SKB_RXCB(skb);
1470d5c65159SKalle Valo 		if (!rxcb->is_continuation)
1471d5c65159SKalle Valo 			return skb;
1472d5c65159SKalle Valo 	}
1473d5c65159SKalle Valo 
1474d5c65159SKalle Valo 	return NULL;
1475d5c65159SKalle Valo }
1476d5c65159SKalle Valo 
1477d5c65159SKalle Valo static int ath11k_dp_rx_retrieve_amsdu(struct ath11k *ar,
1478d5c65159SKalle Valo 				       struct sk_buff_head *msdu_list,
1479d5c65159SKalle Valo 				       struct sk_buff_head *amsdu_list)
1480d5c65159SKalle Valo {
1481d5c65159SKalle Valo 	struct sk_buff *msdu = skb_peek(msdu_list);
1482d5c65159SKalle Valo 	struct sk_buff *last_buf;
1483d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb;
1484d5c65159SKalle Valo 	struct ieee80211_hdr *hdr;
1485d5c65159SKalle Valo 	struct hal_rx_desc *rx_desc, *lrx_desc;
1486d5c65159SKalle Valo 	u16 msdu_len;
1487d5c65159SKalle Valo 	u8 l3_pad_bytes;
1488d5c65159SKalle Valo 	u8 *hdr_status;
1489d5c65159SKalle Valo 	int ret;
1490d5c65159SKalle Valo 
1491d5c65159SKalle Valo 	if (!msdu)
1492d5c65159SKalle Valo 		return -ENOENT;
1493d5c65159SKalle Valo 
1494d5c65159SKalle Valo 	rx_desc = (struct hal_rx_desc *)msdu->data;
1495d5c65159SKalle Valo 	hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc);
1496d5c65159SKalle Valo 	hdr = (struct ieee80211_hdr *)hdr_status;
1497d5c65159SKalle Valo 	/* Process only data frames */
1498d5c65159SKalle Valo 	if (!ieee80211_is_data(hdr->frame_control)) {
1499d5c65159SKalle Valo 		__skb_unlink(msdu, msdu_list);
1500d5c65159SKalle Valo 		dev_kfree_skb_any(msdu);
1501d5c65159SKalle Valo 		return -EINVAL;
1502d5c65159SKalle Valo 	}
1503d5c65159SKalle Valo 
1504d5c65159SKalle Valo 	do {
1505d5c65159SKalle Valo 		__skb_unlink(msdu, msdu_list);
1506d5c65159SKalle Valo 		last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
1507d5c65159SKalle Valo 		if (!last_buf) {
1508d5c65159SKalle Valo 			ath11k_warn(ar->ab,
1509d5c65159SKalle Valo 				    "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n");
1510d5c65159SKalle Valo 			ret = -EIO;
1511d5c65159SKalle Valo 			goto free_out;
1512d5c65159SKalle Valo 		}
1513d5c65159SKalle Valo 
1514d5c65159SKalle Valo 		rx_desc = (struct hal_rx_desc *)msdu->data;
1515d5c65159SKalle Valo 		lrx_desc = (struct hal_rx_desc *)last_buf->data;
1516d5c65159SKalle Valo 
1517d5c65159SKalle Valo 		if (!ath11k_dp_rx_h_attn_msdu_done(lrx_desc)) {
1518d5c65159SKalle Valo 			ath11k_warn(ar->ab, "msdu_done bit in attention is not set\n");
1519d5c65159SKalle Valo 			ret = -EIO;
1520d5c65159SKalle Valo 			goto free_out;
1521d5c65159SKalle Valo 		}
1522d5c65159SKalle Valo 
1523d5c65159SKalle Valo 		rxcb = ATH11K_SKB_RXCB(msdu);
1524d5c65159SKalle Valo 		rxcb->rx_desc = rx_desc;
1525d5c65159SKalle Valo 		msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc);
1526d5c65159SKalle Valo 		l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(lrx_desc);
1527d5c65159SKalle Valo 
1528d5c65159SKalle Valo 		if (!rxcb->is_continuation) {
1529d5c65159SKalle Valo 			skb_put(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes + msdu_len);
1530d5c65159SKalle Valo 			skb_pull(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes);
1531d5c65159SKalle Valo 		} else {
1532d5c65159SKalle Valo 			ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list,
1533d5c65159SKalle Valo 							 msdu, last_buf,
1534d5c65159SKalle Valo 							 l3_pad_bytes, msdu_len);
1535d5c65159SKalle Valo 			if (ret) {
1536d5c65159SKalle Valo 				ath11k_warn(ar->ab,
1537d5c65159SKalle Valo 					    "failed to coalesce msdu rx buffer%d\n", ret);
1538d5c65159SKalle Valo 				goto free_out;
1539d5c65159SKalle Valo 			}
1540d5c65159SKalle Valo 		}
1541d5c65159SKalle Valo 		__skb_queue_tail(amsdu_list, msdu);
1542d5c65159SKalle Valo 
1543d5c65159SKalle Valo 		/* Should we also consider msdu_cnt from mpdu_meta while
1544d5c65159SKalle Valo 		 * preparing amsdu list?
1545d5c65159SKalle Valo 		 */
1546d5c65159SKalle Valo 		if (rxcb->is_last_msdu)
1547d5c65159SKalle Valo 			break;
1548d5c65159SKalle Valo 	} while ((msdu = skb_peek(msdu_list)) != NULL);
1549d5c65159SKalle Valo 
1550d5c65159SKalle Valo 	return 0;
1551d5c65159SKalle Valo 
1552d5c65159SKalle Valo free_out:
1553d5c65159SKalle Valo 	dev_kfree_skb_any(msdu);
1554d5c65159SKalle Valo 	__skb_queue_purge(amsdu_list);
1555d5c65159SKalle Valo 
1556d5c65159SKalle Valo 	return ret;
1557d5c65159SKalle Valo }
1558d5c65159SKalle Valo 
1559d5c65159SKalle Valo static void ath11k_dp_rx_h_csum_offload(struct sk_buff *msdu)
1560d5c65159SKalle Valo {
1561d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1562d5c65159SKalle Valo 	bool ip_csum_fail, l4_csum_fail;
1563d5c65159SKalle Valo 
1564d5c65159SKalle Valo 	ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rxcb->rx_desc);
1565d5c65159SKalle Valo 	l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rxcb->rx_desc);
1566d5c65159SKalle Valo 
1567d5c65159SKalle Valo 	msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?
1568d5c65159SKalle Valo 			  CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
1569d5c65159SKalle Valo }
1570d5c65159SKalle Valo 
1571d5c65159SKalle Valo static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar,
1572d5c65159SKalle Valo 				       enum hal_encrypt_type enctype)
1573d5c65159SKalle Valo {
1574d5c65159SKalle Valo 	switch (enctype) {
1575d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_OPEN:
1576d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1577d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_TKIP_MIC:
1578d5c65159SKalle Valo 		return 0;
1579d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_CCMP_128:
1580d5c65159SKalle Valo 		return IEEE80211_CCMP_MIC_LEN;
1581d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_CCMP_256:
1582d5c65159SKalle Valo 		return IEEE80211_CCMP_256_MIC_LEN;
1583d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_GCMP_128:
1584d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1585d5c65159SKalle Valo 		return IEEE80211_GCMP_MIC_LEN;
1586d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_WEP_40:
1587d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_WEP_104:
1588d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_WEP_128:
1589d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1590d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_WAPI:
1591d5c65159SKalle Valo 		break;
1592d5c65159SKalle Valo 	}
1593d5c65159SKalle Valo 
1594d5c65159SKalle Valo 	ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype);
1595d5c65159SKalle Valo 	return 0;
1596d5c65159SKalle Valo }
1597d5c65159SKalle Valo 
1598d5c65159SKalle Valo static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar,
1599d5c65159SKalle Valo 					 enum hal_encrypt_type enctype)
1600d5c65159SKalle Valo {
1601d5c65159SKalle Valo 	switch (enctype) {
1602d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_OPEN:
1603d5c65159SKalle Valo 		return 0;
1604d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1605d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_TKIP_MIC:
1606d5c65159SKalle Valo 		return IEEE80211_TKIP_IV_LEN;
1607d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_CCMP_128:
1608d5c65159SKalle Valo 		return IEEE80211_CCMP_HDR_LEN;
1609d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_CCMP_256:
1610d5c65159SKalle Valo 		return IEEE80211_CCMP_256_HDR_LEN;
1611d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_GCMP_128:
1612d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1613d5c65159SKalle Valo 		return IEEE80211_GCMP_HDR_LEN;
1614d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_WEP_40:
1615d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_WEP_104:
1616d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_WEP_128:
1617d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1618d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_WAPI:
1619d5c65159SKalle Valo 		break;
1620d5c65159SKalle Valo 	}
1621d5c65159SKalle Valo 
1622d5c65159SKalle Valo 	ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1623d5c65159SKalle Valo 	return 0;
1624d5c65159SKalle Valo }
1625d5c65159SKalle Valo 
1626d5c65159SKalle Valo static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar,
1627d5c65159SKalle Valo 				       enum hal_encrypt_type enctype)
1628d5c65159SKalle Valo {
1629d5c65159SKalle Valo 	switch (enctype) {
1630d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_OPEN:
1631d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_CCMP_128:
1632d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_CCMP_256:
1633d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_GCMP_128:
1634d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1635d5c65159SKalle Valo 		return 0;
1636d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1637d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_TKIP_MIC:
1638d5c65159SKalle Valo 		return IEEE80211_TKIP_ICV_LEN;
1639d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_WEP_40:
1640d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_WEP_104:
1641d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_WEP_128:
1642d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1643d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_WAPI:
1644d5c65159SKalle Valo 		break;
1645d5c65159SKalle Valo 	}
1646d5c65159SKalle Valo 
1647d5c65159SKalle Valo 	ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1648d5c65159SKalle Valo 	return 0;
1649d5c65159SKalle Valo }
1650d5c65159SKalle Valo 
1651d5c65159SKalle Valo static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar,
1652d5c65159SKalle Valo 					 struct sk_buff *msdu,
1653d5c65159SKalle Valo 					 u8 *first_hdr,
1654d5c65159SKalle Valo 					 enum hal_encrypt_type enctype,
1655d5c65159SKalle Valo 					 struct ieee80211_rx_status *status)
1656d5c65159SKalle Valo {
1657d5c65159SKalle Valo 	struct ieee80211_hdr *hdr;
1658d5c65159SKalle Valo 	size_t hdr_len;
1659d5c65159SKalle Valo 	u8 da[ETH_ALEN];
1660d5c65159SKalle Valo 	u8 sa[ETH_ALEN];
1661d5c65159SKalle Valo 
1662d5c65159SKalle Valo 	/* pull decapped header and copy SA & DA */
1663d5c65159SKalle Valo 	hdr = (struct ieee80211_hdr *)msdu->data;
1664d5c65159SKalle Valo 	ether_addr_copy(da, ieee80211_get_DA(hdr));
1665d5c65159SKalle Valo 	ether_addr_copy(sa, ieee80211_get_SA(hdr));
1666d5c65159SKalle Valo 	skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control));
1667d5c65159SKalle Valo 
1668d5c65159SKalle Valo 	/* push original 802.11 header */
1669d5c65159SKalle Valo 	hdr = (struct ieee80211_hdr *)first_hdr;
1670d5c65159SKalle Valo 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1671d5c65159SKalle Valo 
1672d5c65159SKalle Valo 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1673d5c65159SKalle Valo 		memcpy(skb_push(msdu,
1674d5c65159SKalle Valo 				ath11k_dp_rx_crypto_param_len(ar, enctype)),
1675d5c65159SKalle Valo 		       (void *)hdr + hdr_len,
1676d5c65159SKalle Valo 		       ath11k_dp_rx_crypto_param_len(ar, enctype));
1677d5c65159SKalle Valo 	}
1678d5c65159SKalle Valo 
1679d5c65159SKalle Valo 	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1680d5c65159SKalle Valo 
1681d5c65159SKalle Valo 	/* original 802.11 header has a different DA and in
1682d5c65159SKalle Valo 	 * case of 4addr it may also have different SA
1683d5c65159SKalle Valo 	 */
1684d5c65159SKalle Valo 	hdr = (struct ieee80211_hdr *)msdu->data;
1685d5c65159SKalle Valo 	ether_addr_copy(ieee80211_get_DA(hdr), da);
1686d5c65159SKalle Valo 	ether_addr_copy(ieee80211_get_SA(hdr), sa);
1687d5c65159SKalle Valo }
1688d5c65159SKalle Valo 
1689d5c65159SKalle Valo static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu,
1690d5c65159SKalle Valo 				       enum hal_encrypt_type enctype,
1691d5c65159SKalle Valo 				       struct ieee80211_rx_status *status,
1692d5c65159SKalle Valo 				       bool decrypted)
1693d5c65159SKalle Valo {
1694d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1695d5c65159SKalle Valo 	struct ieee80211_hdr *hdr;
1696d5c65159SKalle Valo 	size_t hdr_len;
1697d5c65159SKalle Valo 	size_t crypto_len;
1698d5c65159SKalle Valo 
1699d5c65159SKalle Valo 	if (!rxcb->is_first_msdu ||
1700d5c65159SKalle Valo 	    !(rxcb->is_first_msdu && rxcb->is_last_msdu)) {
1701d5c65159SKalle Valo 		WARN_ON_ONCE(1);
1702d5c65159SKalle Valo 		return;
1703d5c65159SKalle Valo 	}
1704d5c65159SKalle Valo 
1705d5c65159SKalle Valo 	skb_trim(msdu, msdu->len - FCS_LEN);
1706d5c65159SKalle Valo 
1707d5c65159SKalle Valo 	if (!decrypted)
1708d5c65159SKalle Valo 		return;
1709d5c65159SKalle Valo 
1710d5c65159SKalle Valo 	hdr = (void *)msdu->data;
1711d5c65159SKalle Valo 
1712d5c65159SKalle Valo 	/* Tail */
1713d5c65159SKalle Valo 	if (status->flag & RX_FLAG_IV_STRIPPED) {
1714d5c65159SKalle Valo 		skb_trim(msdu, msdu->len -
1715d5c65159SKalle Valo 			 ath11k_dp_rx_crypto_mic_len(ar, enctype));
1716d5c65159SKalle Valo 
1717d5c65159SKalle Valo 		skb_trim(msdu, msdu->len -
1718d5c65159SKalle Valo 			 ath11k_dp_rx_crypto_icv_len(ar, enctype));
1719d5c65159SKalle Valo 	} else {
1720d5c65159SKalle Valo 		/* MIC */
1721d5c65159SKalle Valo 		if (status->flag & RX_FLAG_MIC_STRIPPED)
1722d5c65159SKalle Valo 			skb_trim(msdu, msdu->len -
1723d5c65159SKalle Valo 				 ath11k_dp_rx_crypto_mic_len(ar, enctype));
1724d5c65159SKalle Valo 
1725d5c65159SKalle Valo 		/* ICV */
1726d5c65159SKalle Valo 		if (status->flag & RX_FLAG_ICV_STRIPPED)
1727d5c65159SKalle Valo 			skb_trim(msdu, msdu->len -
1728d5c65159SKalle Valo 				 ath11k_dp_rx_crypto_icv_len(ar, enctype));
1729d5c65159SKalle Valo 	}
1730d5c65159SKalle Valo 
1731d5c65159SKalle Valo 	/* MMIC */
1732d5c65159SKalle Valo 	if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
1733d5c65159SKalle Valo 	    !ieee80211_has_morefrags(hdr->frame_control) &&
1734d5c65159SKalle Valo 	    enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
1735d5c65159SKalle Valo 		skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
1736d5c65159SKalle Valo 
1737d5c65159SKalle Valo 	/* Head */
1738d5c65159SKalle Valo 	if (status->flag & RX_FLAG_IV_STRIPPED) {
1739d5c65159SKalle Valo 		hdr_len = ieee80211_hdrlen(hdr->frame_control);
1740d5c65159SKalle Valo 		crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
1741d5c65159SKalle Valo 
1742d5c65159SKalle Valo 		memmove((void *)msdu->data + crypto_len,
1743d5c65159SKalle Valo 			(void *)msdu->data, hdr_len);
1744d5c65159SKalle Valo 		skb_pull(msdu, crypto_len);
1745d5c65159SKalle Valo 	}
1746d5c65159SKalle Valo }
1747d5c65159SKalle Valo 
1748d5c65159SKalle Valo static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar,
1749d5c65159SKalle Valo 					 struct sk_buff *msdu,
1750d5c65159SKalle Valo 					 enum hal_encrypt_type enctype)
1751d5c65159SKalle Valo {
1752d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1753d5c65159SKalle Valo 	struct ieee80211_hdr *hdr;
1754d5c65159SKalle Valo 	size_t hdr_len, crypto_len;
1755d5c65159SKalle Valo 	void *rfc1042;
1756d5c65159SKalle Valo 	bool is_amsdu;
1757d5c65159SKalle Valo 
1758d5c65159SKalle Valo 	is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu);
1759d5c65159SKalle Valo 	hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(rxcb->rx_desc);
1760d5c65159SKalle Valo 	rfc1042 = hdr;
1761d5c65159SKalle Valo 
1762d5c65159SKalle Valo 	if (rxcb->is_first_msdu) {
1763d5c65159SKalle Valo 		hdr_len = ieee80211_hdrlen(hdr->frame_control);
1764d5c65159SKalle Valo 		crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
1765d5c65159SKalle Valo 
1766d5c65159SKalle Valo 		rfc1042 += hdr_len + crypto_len;
1767d5c65159SKalle Valo 	}
1768d5c65159SKalle Valo 
1769d5c65159SKalle Valo 	if (is_amsdu)
1770d5c65159SKalle Valo 		rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr);
1771d5c65159SKalle Valo 
1772d5c65159SKalle Valo 	return rfc1042;
1773d5c65159SKalle Valo }
1774d5c65159SKalle Valo 
1775d5c65159SKalle Valo static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar,
1776d5c65159SKalle Valo 				       struct sk_buff *msdu,
1777d5c65159SKalle Valo 				       u8 *first_hdr,
1778d5c65159SKalle Valo 				       enum hal_encrypt_type enctype,
1779d5c65159SKalle Valo 				       struct ieee80211_rx_status *status)
1780d5c65159SKalle Valo {
1781d5c65159SKalle Valo 	struct ieee80211_hdr *hdr;
1782d5c65159SKalle Valo 	struct ethhdr *eth;
1783d5c65159SKalle Valo 	size_t hdr_len;
1784d5c65159SKalle Valo 	u8 da[ETH_ALEN];
1785d5c65159SKalle Valo 	u8 sa[ETH_ALEN];
1786d5c65159SKalle Valo 	void *rfc1042;
1787d5c65159SKalle Valo 
1788d5c65159SKalle Valo 	rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype);
1789d5c65159SKalle Valo 	if (WARN_ON_ONCE(!rfc1042))
1790d5c65159SKalle Valo 		return;
1791d5c65159SKalle Valo 
1792d5c65159SKalle Valo 	/* pull decapped header and copy SA & DA */
1793d5c65159SKalle Valo 	eth = (struct ethhdr *)msdu->data;
1794d5c65159SKalle Valo 	ether_addr_copy(da, eth->h_dest);
1795d5c65159SKalle Valo 	ether_addr_copy(sa, eth->h_source);
1796d5c65159SKalle Valo 	skb_pull(msdu, sizeof(struct ethhdr));
1797d5c65159SKalle Valo 
1798d5c65159SKalle Valo 	/* push rfc1042/llc/snap */
1799d5c65159SKalle Valo 	memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042,
1800d5c65159SKalle Valo 	       sizeof(struct ath11k_dp_rfc1042_hdr));
1801d5c65159SKalle Valo 
1802d5c65159SKalle Valo 	/* push original 802.11 header */
1803d5c65159SKalle Valo 	hdr = (struct ieee80211_hdr *)first_hdr;
1804d5c65159SKalle Valo 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1805d5c65159SKalle Valo 
1806d5c65159SKalle Valo 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1807d5c65159SKalle Valo 		memcpy(skb_push(msdu,
1808d5c65159SKalle Valo 				ath11k_dp_rx_crypto_param_len(ar, enctype)),
1809d5c65159SKalle Valo 		       (void *)hdr + hdr_len,
1810d5c65159SKalle Valo 		       ath11k_dp_rx_crypto_param_len(ar, enctype));
1811d5c65159SKalle Valo 	}
1812d5c65159SKalle Valo 
1813d5c65159SKalle Valo 	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1814d5c65159SKalle Valo 
1815d5c65159SKalle Valo 	/* original 802.11 header has a different DA and in
1816d5c65159SKalle Valo 	 * case of 4addr it may also have different SA
1817d5c65159SKalle Valo 	 */
1818d5c65159SKalle Valo 	hdr = (struct ieee80211_hdr *)msdu->data;
1819d5c65159SKalle Valo 	ether_addr_copy(ieee80211_get_DA(hdr), da);
1820d5c65159SKalle Valo 	ether_addr_copy(ieee80211_get_SA(hdr), sa);
1821d5c65159SKalle Valo }
1822d5c65159SKalle Valo 
1823d5c65159SKalle Valo static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
1824d5c65159SKalle Valo 				   struct hal_rx_desc *rx_desc,
1825d5c65159SKalle Valo 				   enum hal_encrypt_type enctype,
1826d5c65159SKalle Valo 				   struct ieee80211_rx_status *status,
1827d5c65159SKalle Valo 				   bool decrypted)
1828d5c65159SKalle Valo {
1829d5c65159SKalle Valo 	u8 *first_hdr;
1830d5c65159SKalle Valo 	u8 decap;
1831d5c65159SKalle Valo 
1832d5c65159SKalle Valo 	first_hdr = ath11k_dp_rx_h_80211_hdr(rx_desc);
1833d5c65159SKalle Valo 	decap = ath11k_dp_rx_h_mpdu_start_decap_type(rx_desc);
1834d5c65159SKalle Valo 
1835d5c65159SKalle Valo 	switch (decap) {
1836d5c65159SKalle Valo 	case DP_RX_DECAP_TYPE_NATIVE_WIFI:
1837d5c65159SKalle Valo 		ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr,
1838d5c65159SKalle Valo 					     enctype, status);
1839d5c65159SKalle Valo 		break;
1840d5c65159SKalle Valo 	case DP_RX_DECAP_TYPE_RAW:
1841d5c65159SKalle Valo 		ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status,
1842d5c65159SKalle Valo 					   decrypted);
1843d5c65159SKalle Valo 		break;
1844d5c65159SKalle Valo 	case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
1845d5c65159SKalle Valo 		ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
1846d5c65159SKalle Valo 					   enctype, status);
1847d5c65159SKalle Valo 		break;
1848d5c65159SKalle Valo 	case DP_RX_DECAP_TYPE_8023:
1849d5c65159SKalle Valo 		/* TODO: Handle undecap for these formats */
1850d5c65159SKalle Valo 		break;
1851d5c65159SKalle Valo 	}
1852d5c65159SKalle Valo }
1853d5c65159SKalle Valo 
1854d5c65159SKalle Valo static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
1855d5c65159SKalle Valo 				struct sk_buff_head *amsdu_list,
1856d5c65159SKalle Valo 				struct hal_rx_desc *rx_desc,
1857d5c65159SKalle Valo 				struct ieee80211_rx_status *rx_status)
1858d5c65159SKalle Valo {
1859d5c65159SKalle Valo 	struct ieee80211_hdr *hdr;
1860d5c65159SKalle Valo 	enum hal_encrypt_type enctype;
1861d5c65159SKalle Valo 	struct sk_buff *last_msdu;
1862d5c65159SKalle Valo 	struct sk_buff *msdu;
1863d5c65159SKalle Valo 	struct ath11k_skb_rxcb *last_rxcb;
1864d5c65159SKalle Valo 	bool is_decrypted;
1865d5c65159SKalle Valo 	u32 err_bitmap;
1866d5c65159SKalle Valo 	u8 *qos;
1867d5c65159SKalle Valo 
1868d5c65159SKalle Valo 	if (skb_queue_empty(amsdu_list))
1869d5c65159SKalle Valo 		return;
1870d5c65159SKalle Valo 
1871d5c65159SKalle Valo 	hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(rx_desc);
1872d5c65159SKalle Valo 
1873d5c65159SKalle Valo 	/* Each A-MSDU subframe will use the original header as the base and be
1874d5c65159SKalle Valo 	 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1875d5c65159SKalle Valo 	 */
1876d5c65159SKalle Valo 	if (ieee80211_is_data_qos(hdr->frame_control)) {
1877d5c65159SKalle Valo 		qos = ieee80211_get_qos_ctl(hdr);
1878d5c65159SKalle Valo 		qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1879d5c65159SKalle Valo 	}
1880d5c65159SKalle Valo 
1881d5c65159SKalle Valo 	is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc);
1882d5c65159SKalle Valo 	enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc);
1883d5c65159SKalle Valo 
1884d5c65159SKalle Valo 	/* Some attention flags are valid only in the last MSDU. */
1885d5c65159SKalle Valo 	last_msdu = skb_peek_tail(amsdu_list);
1886d5c65159SKalle Valo 	last_rxcb = ATH11K_SKB_RXCB(last_msdu);
1887d5c65159SKalle Valo 
1888d5c65159SKalle Valo 	err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(last_rxcb->rx_desc);
1889d5c65159SKalle Valo 
1890d5c65159SKalle Valo 	/* Clear per-MPDU flags while leaving per-PPDU flags intact. */
1891d5c65159SKalle Valo 	rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
1892d5c65159SKalle Valo 			     RX_FLAG_MMIC_ERROR |
1893d5c65159SKalle Valo 			     RX_FLAG_DECRYPTED |
1894d5c65159SKalle Valo 			     RX_FLAG_IV_STRIPPED |
1895d5c65159SKalle Valo 			     RX_FLAG_MMIC_STRIPPED);
1896d5c65159SKalle Valo 
1897d5c65159SKalle Valo 	if (err_bitmap & DP_RX_MPDU_ERR_FCS)
1898d5c65159SKalle Valo 		rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
1899d5c65159SKalle Valo 
1900d5c65159SKalle Valo 	if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC)
1901d5c65159SKalle Valo 		rx_status->flag |= RX_FLAG_MMIC_ERROR;
1902d5c65159SKalle Valo 
1903d5c65159SKalle Valo 	if (is_decrypted)
1904d5c65159SKalle Valo 		rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED |
1905d5c65159SKalle Valo 				   RX_FLAG_MIC_STRIPPED | RX_FLAG_ICV_STRIPPED;
1906d5c65159SKalle Valo 
1907d5c65159SKalle Valo 	skb_queue_walk(amsdu_list, msdu) {
1908d5c65159SKalle Valo 		ath11k_dp_rx_h_csum_offload(msdu);
1909d5c65159SKalle Valo 		ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
1910d5c65159SKalle Valo 				       enctype, rx_status, is_decrypted);
1911d5c65159SKalle Valo 	}
1912d5c65159SKalle Valo }
1913d5c65159SKalle Valo 
1914d5c65159SKalle Valo static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
1915d5c65159SKalle Valo 				struct ieee80211_rx_status *rx_status)
1916d5c65159SKalle Valo {
1917d5c65159SKalle Valo 	struct ieee80211_supported_band *sband;
1918d5c65159SKalle Valo 	enum rx_msdu_start_pkt_type pkt_type;
1919d5c65159SKalle Valo 	u8 bw;
1920d5c65159SKalle Valo 	u8 rate_mcs, nss;
1921d5c65159SKalle Valo 	u8 sgi;
1922d5c65159SKalle Valo 	bool is_cck;
1923d5c65159SKalle Valo 
1924d5c65159SKalle Valo 	pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(rx_desc);
1925d5c65159SKalle Valo 	bw = ath11k_dp_rx_h_msdu_start_rx_bw(rx_desc);
1926d5c65159SKalle Valo 	rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(rx_desc);
1927d5c65159SKalle Valo 	nss = ath11k_dp_rx_h_msdu_start_nss(rx_desc);
1928d5c65159SKalle Valo 	sgi = ath11k_dp_rx_h_msdu_start_sgi(rx_desc);
1929d5c65159SKalle Valo 
1930d5c65159SKalle Valo 	switch (pkt_type) {
1931d5c65159SKalle Valo 	case RX_MSDU_START_PKT_TYPE_11A:
1932d5c65159SKalle Valo 	case RX_MSDU_START_PKT_TYPE_11B:
1933d5c65159SKalle Valo 		is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
1934d5c65159SKalle Valo 		sband = &ar->mac.sbands[rx_status->band];
1935d5c65159SKalle Valo 		rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs,
1936d5c65159SKalle Valo 								is_cck);
1937d5c65159SKalle Valo 		break;
1938d5c65159SKalle Valo 	case RX_MSDU_START_PKT_TYPE_11N:
1939d5c65159SKalle Valo 		rx_status->encoding = RX_ENC_HT;
1940d5c65159SKalle Valo 		if (rate_mcs > ATH11K_HT_MCS_MAX) {
1941d5c65159SKalle Valo 			ath11k_warn(ar->ab,
1942d5c65159SKalle Valo 				    "Received with invalid mcs in HT mode %d\n",
1943d5c65159SKalle Valo 				     rate_mcs);
1944d5c65159SKalle Valo 			break;
1945d5c65159SKalle Valo 		}
1946d5c65159SKalle Valo 		rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
1947d5c65159SKalle Valo 		if (sgi)
1948d5c65159SKalle Valo 			rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
194939e81c6aSTamizh chelvam 		rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
1950d5c65159SKalle Valo 		break;
1951d5c65159SKalle Valo 	case RX_MSDU_START_PKT_TYPE_11AC:
1952d5c65159SKalle Valo 		rx_status->encoding = RX_ENC_VHT;
1953d5c65159SKalle Valo 		rx_status->rate_idx = rate_mcs;
1954d5c65159SKalle Valo 		if (rate_mcs > ATH11K_VHT_MCS_MAX) {
1955d5c65159SKalle Valo 			ath11k_warn(ar->ab,
1956d5c65159SKalle Valo 				    "Received with invalid mcs in VHT mode %d\n",
1957d5c65159SKalle Valo 				     rate_mcs);
1958d5c65159SKalle Valo 			break;
1959d5c65159SKalle Valo 		}
1960d5c65159SKalle Valo 		rx_status->nss = nss;
1961d5c65159SKalle Valo 		if (sgi)
1962d5c65159SKalle Valo 			rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
196339e81c6aSTamizh chelvam 		rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
1964d5c65159SKalle Valo 		break;
1965d5c65159SKalle Valo 	case RX_MSDU_START_PKT_TYPE_11AX:
1966d5c65159SKalle Valo 		rx_status->rate_idx = rate_mcs;
1967d5c65159SKalle Valo 		if (rate_mcs > ATH11K_HE_MCS_MAX) {
1968d5c65159SKalle Valo 			ath11k_warn(ar->ab,
1969d5c65159SKalle Valo 				    "Received with invalid mcs in HE mode %d\n",
1970d5c65159SKalle Valo 				    rate_mcs);
1971d5c65159SKalle Valo 			break;
1972d5c65159SKalle Valo 		}
1973d5c65159SKalle Valo 		rx_status->encoding = RX_ENC_HE;
1974d5c65159SKalle Valo 		rx_status->nss = nss;
19756a0c3702SJohn Crispin 		rx_status->he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi);
197639e81c6aSTamizh chelvam 		rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
1977d5c65159SKalle Valo 		break;
1978d5c65159SKalle Valo 	}
1979d5c65159SKalle Valo }
1980d5c65159SKalle Valo 
1981d5c65159SKalle Valo static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
1982d5c65159SKalle Valo 				struct ieee80211_rx_status *rx_status)
1983d5c65159SKalle Valo {
1984d5c65159SKalle Valo 	u8 channel_num;
1985d5c65159SKalle Valo 
1986d5c65159SKalle Valo 	rx_status->freq = 0;
1987d5c65159SKalle Valo 	rx_status->rate_idx = 0;
1988d5c65159SKalle Valo 	rx_status->nss = 0;
1989d5c65159SKalle Valo 	rx_status->encoding = RX_ENC_LEGACY;
1990d5c65159SKalle Valo 	rx_status->bw = RATE_INFO_BW_20;
1991d5c65159SKalle Valo 
1992d5c65159SKalle Valo 	rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1993d5c65159SKalle Valo 
1994d5c65159SKalle Valo 	channel_num = ath11k_dp_rx_h_msdu_start_freq(rx_desc);
1995d5c65159SKalle Valo 
1996d5c65159SKalle Valo 	if (channel_num >= 1 && channel_num <= 14) {
1997d5c65159SKalle Valo 		rx_status->band = NL80211_BAND_2GHZ;
1998d5c65159SKalle Valo 	} else if (channel_num >= 36 && channel_num <= 173) {
1999d5c65159SKalle Valo 		rx_status->band = NL80211_BAND_5GHZ;
2000d5c65159SKalle Valo 	} else {
2001d5c65159SKalle Valo 		ath11k_warn(ar->ab, "Unsupported Channel info received %d\n",
2002d5c65159SKalle Valo 			    channel_num);
2003d5c65159SKalle Valo 		return;
2004d5c65159SKalle Valo 	}
2005d5c65159SKalle Valo 
2006d5c65159SKalle Valo 	rx_status->freq = ieee80211_channel_to_frequency(channel_num,
2007d5c65159SKalle Valo 							 rx_status->band);
2008d5c65159SKalle Valo 
2009d5c65159SKalle Valo 	ath11k_dp_rx_h_rate(ar, rx_desc, rx_status);
2010d5c65159SKalle Valo }
2011d5c65159SKalle Valo 
2012d5c65159SKalle Valo static void ath11k_dp_rx_process_amsdu(struct ath11k *ar,
2013d5c65159SKalle Valo 				       struct sk_buff_head *amsdu_list,
2014d5c65159SKalle Valo 				       struct ieee80211_rx_status *rx_status)
2015d5c65159SKalle Valo {
2016d5c65159SKalle Valo 	struct sk_buff *first;
2017d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb;
2018d5c65159SKalle Valo 	struct hal_rx_desc *rx_desc;
2019d5c65159SKalle Valo 	bool first_mpdu;
2020d5c65159SKalle Valo 
2021d5c65159SKalle Valo 	if (skb_queue_empty(amsdu_list))
2022d5c65159SKalle Valo 		return;
2023d5c65159SKalle Valo 
2024d5c65159SKalle Valo 	first = skb_peek(amsdu_list);
2025d5c65159SKalle Valo 	rxcb = ATH11K_SKB_RXCB(first);
2026d5c65159SKalle Valo 	rx_desc = rxcb->rx_desc;
2027d5c65159SKalle Valo 
2028d5c65159SKalle Valo 	first_mpdu = ath11k_dp_rx_h_attn_first_mpdu(rx_desc);
2029d5c65159SKalle Valo 	if (first_mpdu)
2030d5c65159SKalle Valo 		ath11k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
2031d5c65159SKalle Valo 
2032d5c65159SKalle Valo 	ath11k_dp_rx_h_mpdu(ar, amsdu_list, rx_desc, rx_status);
2033d5c65159SKalle Valo }
2034d5c65159SKalle Valo 
2035d5c65159SKalle Valo static char *ath11k_print_get_tid(struct ieee80211_hdr *hdr, char *out,
2036d5c65159SKalle Valo 				  size_t size)
2037d5c65159SKalle Valo {
2038d5c65159SKalle Valo 	u8 *qc;
2039d5c65159SKalle Valo 	int tid;
2040d5c65159SKalle Valo 
2041d5c65159SKalle Valo 	if (!ieee80211_is_data_qos(hdr->frame_control))
2042d5c65159SKalle Valo 		return "";
2043d5c65159SKalle Valo 
2044d5c65159SKalle Valo 	qc = ieee80211_get_qos_ctl(hdr);
2045d5c65159SKalle Valo 	tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
2046d5c65159SKalle Valo 	snprintf(out, size, "tid %d", tid);
2047d5c65159SKalle Valo 
2048d5c65159SKalle Valo 	return out;
2049d5c65159SKalle Valo }
2050d5c65159SKalle Valo 
2051d5c65159SKalle Valo static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi,
2052d5c65159SKalle Valo 				      struct sk_buff *msdu)
2053d5c65159SKalle Valo {
2054e4eb7b5cSJohn Crispin 	static const struct ieee80211_radiotap_he known = {
205593634c61SJohn Crispin 		.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
205693634c61SJohn Crispin 				     IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
2057e4eb7b5cSJohn Crispin 		.data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
2058e4eb7b5cSJohn Crispin 	};
2059d5c65159SKalle Valo 	struct ieee80211_rx_status *status;
2060d5c65159SKalle Valo 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
2061e4eb7b5cSJohn Crispin 	struct ieee80211_radiotap_he *he = NULL;
2062d5c65159SKalle Valo 	char tid[32];
2063d5c65159SKalle Valo 
2064d5c65159SKalle Valo 	status = IEEE80211_SKB_RXCB(msdu);
2065e4eb7b5cSJohn Crispin 	if (status->encoding == RX_ENC_HE) {
2066e4eb7b5cSJohn Crispin 		he = skb_push(msdu, sizeof(known));
2067e4eb7b5cSJohn Crispin 		memcpy(he, &known, sizeof(known));
2068e4eb7b5cSJohn Crispin 		status->flag |= RX_FLAG_RADIOTAP_HE;
2069e4eb7b5cSJohn Crispin 	}
2070d5c65159SKalle Valo 
2071d5c65159SKalle Valo 	ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
2072d5c65159SKalle Valo 		   "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
2073d5c65159SKalle Valo 		   msdu,
2074d5c65159SKalle Valo 		   msdu->len,
2075d5c65159SKalle Valo 		   ieee80211_get_SA(hdr),
2076d5c65159SKalle Valo 		   ath11k_print_get_tid(hdr, tid, sizeof(tid)),
2077d5c65159SKalle Valo 		   is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
2078d5c65159SKalle Valo 							"mcast" : "ucast",
2079d5c65159SKalle Valo 		   (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
2080d5c65159SKalle Valo 		   (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
2081d5c65159SKalle Valo 		   (status->encoding == RX_ENC_HT) ? "ht" : "",
2082d5c65159SKalle Valo 		   (status->encoding == RX_ENC_VHT) ? "vht" : "",
2083d5c65159SKalle Valo 		   (status->encoding == RX_ENC_HE) ? "he" : "",
2084d5c65159SKalle Valo 		   (status->bw == RATE_INFO_BW_40) ? "40" : "",
2085d5c65159SKalle Valo 		   (status->bw == RATE_INFO_BW_80) ? "80" : "",
2086d5c65159SKalle Valo 		   (status->bw == RATE_INFO_BW_160) ? "160" : "",
2087d5c65159SKalle Valo 		   status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
2088d5c65159SKalle Valo 		   status->rate_idx,
2089d5c65159SKalle Valo 		   status->nss,
2090d5c65159SKalle Valo 		   status->freq,
2091d5c65159SKalle Valo 		   status->band, status->flag,
2092d5c65159SKalle Valo 		   !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
2093d5c65159SKalle Valo 		   !!(status->flag & RX_FLAG_MMIC_ERROR),
2094d5c65159SKalle Valo 		   !!(status->flag & RX_FLAG_AMSDU_MORE));
2095d5c65159SKalle Valo 
2096d5c65159SKalle Valo 	/* TODO: trace rx packet */
2097d5c65159SKalle Valo 
2098d5c65159SKalle Valo 	ieee80211_rx_napi(ar->hw, NULL, msdu, napi);
2099d5c65159SKalle Valo }
2100d5c65159SKalle Valo 
2101d5c65159SKalle Valo static void ath11k_dp_rx_pre_deliver_amsdu(struct ath11k *ar,
2102d5c65159SKalle Valo 					   struct sk_buff_head *amsdu_list,
2103d5c65159SKalle Valo 					   struct ieee80211_rx_status *rxs)
2104d5c65159SKalle Valo {
2105d5c65159SKalle Valo 	struct sk_buff *msdu;
2106d5c65159SKalle Valo 	struct sk_buff *first_subframe;
2107d5c65159SKalle Valo 	struct ieee80211_rx_status *status;
2108d5c65159SKalle Valo 
2109d5c65159SKalle Valo 	first_subframe = skb_peek(amsdu_list);
2110d5c65159SKalle Valo 
2111d5c65159SKalle Valo 	skb_queue_walk(amsdu_list, msdu) {
2112d5c65159SKalle Valo 		/* Setup per-MSDU flags */
2113d5c65159SKalle Valo 		if (skb_queue_empty(amsdu_list))
2114d5c65159SKalle Valo 			rxs->flag &= ~RX_FLAG_AMSDU_MORE;
2115d5c65159SKalle Valo 		else
2116d5c65159SKalle Valo 			rxs->flag |= RX_FLAG_AMSDU_MORE;
2117d5c65159SKalle Valo 
2118d5c65159SKalle Valo 		if (msdu == first_subframe) {
2119d5c65159SKalle Valo 			first_subframe = NULL;
2120d5c65159SKalle Valo 			rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN;
2121d5c65159SKalle Valo 		} else {
2122d5c65159SKalle Valo 			rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
2123d5c65159SKalle Valo 		}
2124d5c65159SKalle Valo 		rxs->flag |= RX_FLAG_SKIP_MONITOR;
2125d5c65159SKalle Valo 
2126d5c65159SKalle Valo 		status = IEEE80211_SKB_RXCB(msdu);
2127d5c65159SKalle Valo 		*status = *rxs;
2128d5c65159SKalle Valo 	}
2129d5c65159SKalle Valo }
2130d5c65159SKalle Valo 
2131d5c65159SKalle Valo static void ath11k_dp_rx_process_pending_packets(struct ath11k_base *ab,
2132d5c65159SKalle Valo 						 struct napi_struct *napi,
2133d5c65159SKalle Valo 						 struct sk_buff_head *pending_q,
2134d5c65159SKalle Valo 						 int *quota, u8 mac_id)
2135d5c65159SKalle Valo {
2136d5c65159SKalle Valo 	struct ath11k *ar;
2137d5c65159SKalle Valo 	struct sk_buff *msdu;
2138d5c65159SKalle Valo 	struct ath11k_pdev *pdev;
2139d5c65159SKalle Valo 
2140d5c65159SKalle Valo 	if (skb_queue_empty(pending_q))
2141d5c65159SKalle Valo 		return;
2142d5c65159SKalle Valo 
2143d5c65159SKalle Valo 	ar = ab->pdevs[mac_id].ar;
2144d5c65159SKalle Valo 
2145d5c65159SKalle Valo 	rcu_read_lock();
2146d5c65159SKalle Valo 	pdev = rcu_dereference(ab->pdevs_active[mac_id]);
2147d5c65159SKalle Valo 
2148d5c65159SKalle Valo 	while (*quota && (msdu = __skb_dequeue(pending_q))) {
2149d5c65159SKalle Valo 		if (!pdev) {
2150d5c65159SKalle Valo 			dev_kfree_skb_any(msdu);
2151d5c65159SKalle Valo 			continue;
2152d5c65159SKalle Valo 		}
2153d5c65159SKalle Valo 
2154d5c65159SKalle Valo 		ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
2155d5c65159SKalle Valo 		(*quota)--;
2156d5c65159SKalle Valo 	}
2157d5c65159SKalle Valo 	rcu_read_unlock();
2158d5c65159SKalle Valo }
2159d5c65159SKalle Valo 
2160d5c65159SKalle Valo int ath11k_dp_process_rx(struct ath11k_base *ab, int mac_id,
2161d5c65159SKalle Valo 			 struct napi_struct *napi, struct sk_buff_head *pending_q,
2162d5c65159SKalle Valo 			 int budget)
2163d5c65159SKalle Valo {
2164d5c65159SKalle Valo 	struct ath11k *ar = ab->pdevs[mac_id].ar;
2165d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
2166d5c65159SKalle Valo 	struct ieee80211_rx_status *rx_status = &dp->rx_status;
2167d5c65159SKalle Valo 	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
2168d5c65159SKalle Valo 	struct hal_srng *srng;
2169d5c65159SKalle Valo 	struct sk_buff *msdu;
2170d5c65159SKalle Valo 	struct sk_buff_head msdu_list;
2171d5c65159SKalle Valo 	struct sk_buff_head amsdu_list;
2172d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb;
2173d5c65159SKalle Valo 	u32 *rx_desc;
2174d5c65159SKalle Valo 	int buf_id;
2175d5c65159SKalle Valo 	int num_buffs_reaped = 0;
2176d5c65159SKalle Valo 	int quota = budget;
2177d5c65159SKalle Valo 	int ret;
2178d5c65159SKalle Valo 	bool done = false;
2179d5c65159SKalle Valo 
2180d5c65159SKalle Valo 	/* Process any pending packets from the previous napi poll.
2181d5c65159SKalle Valo 	 * Note: All msdu's in this pending_q corresponds to the same mac id
2182d5c65159SKalle Valo 	 * due to pdev based reo dest mapping and also since each irq group id
2183d5c65159SKalle Valo 	 * maps to specific reo dest ring.
2184d5c65159SKalle Valo 	 */
2185d5c65159SKalle Valo 	ath11k_dp_rx_process_pending_packets(ab, napi, pending_q, &quota,
2186d5c65159SKalle Valo 					     mac_id);
2187d5c65159SKalle Valo 
2188d5c65159SKalle Valo 	/* If all quota is exhausted by processing the pending_q,
2189d5c65159SKalle Valo 	 * Wait for the next napi poll to reap the new info
2190d5c65159SKalle Valo 	 */
2191d5c65159SKalle Valo 	if (!quota)
2192d5c65159SKalle Valo 		goto exit;
2193d5c65159SKalle Valo 
2194d5c65159SKalle Valo 	__skb_queue_head_init(&msdu_list);
2195d5c65159SKalle Valo 
2196d5c65159SKalle Valo 	srng = &ab->hal.srng_list[dp->reo_dst_ring.ring_id];
2197d5c65159SKalle Valo 
2198d5c65159SKalle Valo 	spin_lock_bh(&srng->lock);
2199d5c65159SKalle Valo 
2200d5c65159SKalle Valo 	ath11k_hal_srng_access_begin(ab, srng);
2201d5c65159SKalle Valo 
2202d5c65159SKalle Valo try_again:
2203d5c65159SKalle Valo 	while ((rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
2204293cb583SJohn Crispin 		struct hal_reo_dest_ring *desc = (struct hal_reo_dest_ring *)rx_desc;
2205293cb583SJohn Crispin 		enum hal_reo_dest_ring_push_reason push_reason;
2206293cb583SJohn Crispin 		u32 cookie;
2207d5c65159SKalle Valo 
2208293cb583SJohn Crispin 		cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
2209293cb583SJohn Crispin 				   desc->buf_addr_info.info1);
2210d5c65159SKalle Valo 		buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
2211293cb583SJohn Crispin 				   cookie);
2212d5c65159SKalle Valo 		spin_lock_bh(&rx_ring->idr_lock);
2213d5c65159SKalle Valo 		msdu = idr_find(&rx_ring->bufs_idr, buf_id);
2214d5c65159SKalle Valo 		if (!msdu) {
2215d5c65159SKalle Valo 			ath11k_warn(ab, "frame rx with invalid buf_id %d\n",
2216d5c65159SKalle Valo 				    buf_id);
2217d5c65159SKalle Valo 			spin_unlock_bh(&rx_ring->idr_lock);
2218d5c65159SKalle Valo 			continue;
2219d5c65159SKalle Valo 		}
2220d5c65159SKalle Valo 
2221d5c65159SKalle Valo 		idr_remove(&rx_ring->bufs_idr, buf_id);
2222d5c65159SKalle Valo 		spin_unlock_bh(&rx_ring->idr_lock);
2223d5c65159SKalle Valo 
2224d5c65159SKalle Valo 		rxcb = ATH11K_SKB_RXCB(msdu);
2225d5c65159SKalle Valo 		dma_unmap_single(ab->dev, rxcb->paddr,
2226d5c65159SKalle Valo 				 msdu->len + skb_tailroom(msdu),
2227d5c65159SKalle Valo 				 DMA_FROM_DEVICE);
2228d5c65159SKalle Valo 
2229d5c65159SKalle Valo 		num_buffs_reaped++;
2230d5c65159SKalle Valo 
2231293cb583SJohn Crispin 		push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
2232293cb583SJohn Crispin 					desc->info0);
2233293cb583SJohn Crispin 		if (push_reason !=
2234d5c65159SKalle Valo 		    HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
2235d5c65159SKalle Valo 			/* TODO: Check if the msdu can be sent up for processing */
2236d5c65159SKalle Valo 			dev_kfree_skb_any(msdu);
2237d5c65159SKalle Valo 			ab->soc_stats.hal_reo_error[dp->reo_dst_ring.ring_id]++;
2238d5c65159SKalle Valo 			continue;
2239d5c65159SKalle Valo 		}
2240d5c65159SKalle Valo 
2241293cb583SJohn Crispin 		rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 &
2242293cb583SJohn Crispin 					 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
2243293cb583SJohn Crispin 		rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 &
2244293cb583SJohn Crispin 					RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
2245293cb583SJohn Crispin 		rxcb->is_continuation = !!(desc->rx_msdu_info.info0 &
2246293cb583SJohn Crispin 					   RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
2247d5c65159SKalle Valo 		rxcb->mac_id = mac_id;
2248d5c65159SKalle Valo 		__skb_queue_tail(&msdu_list, msdu);
2249d5c65159SKalle Valo 
2250d5c65159SKalle Valo 		/* Stop reaping from the ring once quota is exhausted
2251d5c65159SKalle Valo 		 * and we've received all msdu's in the the AMSDU. The
2252d5c65159SKalle Valo 		 * additional msdu's reaped in excess of quota here would
2253d5c65159SKalle Valo 		 * be pushed into the pending queue to be processed during
2254d5c65159SKalle Valo 		 * the next napi poll.
2255d5c65159SKalle Valo 		 * Note: More profiling can be done to see the impact on
2256d5c65159SKalle Valo 		 * pending_q and throughput during various traffic & density
2257d5c65159SKalle Valo 		 * and how use of budget instead of remaining quota affects it.
2258d5c65159SKalle Valo 		 */
2259d5c65159SKalle Valo 		if (num_buffs_reaped >= quota && rxcb->is_last_msdu &&
2260d5c65159SKalle Valo 		    !rxcb->is_continuation) {
2261d5c65159SKalle Valo 			done = true;
2262d5c65159SKalle Valo 			break;
2263d5c65159SKalle Valo 		}
2264d5c65159SKalle Valo 	}
2265d5c65159SKalle Valo 
2266d5c65159SKalle Valo 	/* Hw might have updated the head pointer after we cached it.
2267d5c65159SKalle Valo 	 * In this case, even though there are entries in the ring we'll
2268d5c65159SKalle Valo 	 * get rx_desc NULL. Give the read another try with updated cached
2269d5c65159SKalle Valo 	 * head pointer so that we can reap complete MPDU in the current
2270d5c65159SKalle Valo 	 * rx processing.
2271d5c65159SKalle Valo 	 */
2272d5c65159SKalle Valo 	if (!done && ath11k_hal_srng_dst_num_free(ab, srng, true)) {
2273d5c65159SKalle Valo 		ath11k_hal_srng_access_end(ab, srng);
2274d5c65159SKalle Valo 		goto try_again;
2275d5c65159SKalle Valo 	}
2276d5c65159SKalle Valo 
2277d5c65159SKalle Valo 	ath11k_hal_srng_access_end(ab, srng);
2278d5c65159SKalle Valo 
2279d5c65159SKalle Valo 	spin_unlock_bh(&srng->lock);
2280d5c65159SKalle Valo 
2281d5c65159SKalle Valo 	if (!num_buffs_reaped)
2282d5c65159SKalle Valo 		goto exit;
2283d5c65159SKalle Valo 
2284d5c65159SKalle Valo 	/* Should we reschedule it later if we are not able to replenish all
2285d5c65159SKalle Valo 	 * the buffers?
2286d5c65159SKalle Valo 	 */
2287d5c65159SKalle Valo 	ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buffs_reaped,
2288d5c65159SKalle Valo 				   HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
2289d5c65159SKalle Valo 
2290d5c65159SKalle Valo 	rcu_read_lock();
2291d5c65159SKalle Valo 	if (!rcu_dereference(ab->pdevs_active[mac_id])) {
2292d5c65159SKalle Valo 		__skb_queue_purge(&msdu_list);
2293d5c65159SKalle Valo 		goto rcu_unlock;
2294d5c65159SKalle Valo 	}
2295d5c65159SKalle Valo 
2296d5c65159SKalle Valo 	if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
2297d5c65159SKalle Valo 		__skb_queue_purge(&msdu_list);
2298d5c65159SKalle Valo 		goto rcu_unlock;
2299d5c65159SKalle Valo 	}
2300d5c65159SKalle Valo 
2301d5c65159SKalle Valo 	while (!skb_queue_empty(&msdu_list)) {
2302d5c65159SKalle Valo 		__skb_queue_head_init(&amsdu_list);
2303d5c65159SKalle Valo 		ret = ath11k_dp_rx_retrieve_amsdu(ar, &msdu_list, &amsdu_list);
2304d5c65159SKalle Valo 		if (ret) {
2305d5c65159SKalle Valo 			if (ret == -EIO) {
2306d5c65159SKalle Valo 				ath11k_err(ab, "rx ring got corrupted %d\n", ret);
2307d5c65159SKalle Valo 				__skb_queue_purge(&msdu_list);
2308d5c65159SKalle Valo 				/* Should stop processing any more rx in
2309d5c65159SKalle Valo 				 * future from this ring?
2310d5c65159SKalle Valo 				 */
2311d5c65159SKalle Valo 				goto rcu_unlock;
2312d5c65159SKalle Valo 			}
2313d5c65159SKalle Valo 
2314d5c65159SKalle Valo 			/* A-MSDU retrieval got failed due to non-fatal condition,
2315d5c65159SKalle Valo 			 * continue processing with the next msdu.
2316d5c65159SKalle Valo 			 */
2317d5c65159SKalle Valo 			continue;
2318d5c65159SKalle Valo 		}
2319d5c65159SKalle Valo 
2320d5c65159SKalle Valo 		ath11k_dp_rx_process_amsdu(ar, &amsdu_list, rx_status);
2321d5c65159SKalle Valo 
2322d5c65159SKalle Valo 		ath11k_dp_rx_pre_deliver_amsdu(ar, &amsdu_list, rx_status);
2323d5c65159SKalle Valo 		skb_queue_splice_tail(&amsdu_list, pending_q);
2324d5c65159SKalle Valo 	}
2325d5c65159SKalle Valo 
2326d5c65159SKalle Valo 	while (quota && (msdu = __skb_dequeue(pending_q))) {
2327d5c65159SKalle Valo 		ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
2328d5c65159SKalle Valo 		quota--;
2329d5c65159SKalle Valo 	}
2330d5c65159SKalle Valo 
2331d5c65159SKalle Valo rcu_unlock:
2332d5c65159SKalle Valo 	rcu_read_unlock();
2333d5c65159SKalle Valo exit:
2334d5c65159SKalle Valo 	return budget - quota;
2335d5c65159SKalle Valo }
2336d5c65159SKalle Valo 
2337d5c65159SKalle Valo static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
2338d5c65159SKalle Valo 					   struct hal_rx_mon_ppdu_info *ppdu_info)
2339d5c65159SKalle Valo {
2340d5c65159SKalle Valo 	struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats;
2341d5c65159SKalle Valo 	u32 num_msdu;
2342d5c65159SKalle Valo 
2343d5c65159SKalle Valo 	if (!rx_stats)
2344d5c65159SKalle Valo 		return;
2345d5c65159SKalle Valo 
2346d5c65159SKalle Valo 	num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count +
2347d5c65159SKalle Valo 		   ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count;
2348d5c65159SKalle Valo 
2349d5c65159SKalle Valo 	rx_stats->num_msdu += num_msdu;
2350d5c65159SKalle Valo 	rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count +
2351d5c65159SKalle Valo 				    ppdu_info->tcp_ack_msdu_count;
2352d5c65159SKalle Valo 	rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count;
2353d5c65159SKalle Valo 	rx_stats->other_msdu_count += ppdu_info->other_msdu_count;
2354d5c65159SKalle Valo 
2355d5c65159SKalle Valo 	if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A ||
2356d5c65159SKalle Valo 	    ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) {
2357d5c65159SKalle Valo 		ppdu_info->nss = 1;
2358d5c65159SKalle Valo 		ppdu_info->mcs = HAL_RX_MAX_MCS;
2359d5c65159SKalle Valo 		ppdu_info->tid = IEEE80211_NUM_TIDS;
2360d5c65159SKalle Valo 	}
2361d5c65159SKalle Valo 
2362d5c65159SKalle Valo 	if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS)
2363d5c65159SKalle Valo 		rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu;
2364d5c65159SKalle Valo 
2365d5c65159SKalle Valo 	if (ppdu_info->mcs <= HAL_RX_MAX_MCS)
2366d5c65159SKalle Valo 		rx_stats->mcs_count[ppdu_info->mcs] += num_msdu;
2367d5c65159SKalle Valo 
2368d5c65159SKalle Valo 	if (ppdu_info->gi < HAL_RX_GI_MAX)
2369d5c65159SKalle Valo 		rx_stats->gi_count[ppdu_info->gi] += num_msdu;
2370d5c65159SKalle Valo 
2371d5c65159SKalle Valo 	if (ppdu_info->bw < HAL_RX_BW_MAX)
2372d5c65159SKalle Valo 		rx_stats->bw_count[ppdu_info->bw] += num_msdu;
2373d5c65159SKalle Valo 
2374d5c65159SKalle Valo 	if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX)
2375d5c65159SKalle Valo 		rx_stats->coding_count[ppdu_info->ldpc] += num_msdu;
2376d5c65159SKalle Valo 
2377d5c65159SKalle Valo 	if (ppdu_info->tid <= IEEE80211_NUM_TIDS)
2378d5c65159SKalle Valo 		rx_stats->tid_count[ppdu_info->tid] += num_msdu;
2379d5c65159SKalle Valo 
2380d5c65159SKalle Valo 	if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX)
2381d5c65159SKalle Valo 		rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu;
2382d5c65159SKalle Valo 
2383d5c65159SKalle Valo 	if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX)
2384d5c65159SKalle Valo 		rx_stats->reception_type[ppdu_info->reception_type] += num_msdu;
2385d5c65159SKalle Valo 
2386d5c65159SKalle Valo 	if (ppdu_info->is_stbc)
2387d5c65159SKalle Valo 		rx_stats->stbc_count += num_msdu;
2388d5c65159SKalle Valo 
2389d5c65159SKalle Valo 	if (ppdu_info->beamformed)
2390d5c65159SKalle Valo 		rx_stats->beamformed_count += num_msdu;
2391d5c65159SKalle Valo 
2392d5c65159SKalle Valo 	if (ppdu_info->num_mpdu_fcs_ok > 1)
2393d5c65159SKalle Valo 		rx_stats->ampdu_msdu_count += num_msdu;
2394d5c65159SKalle Valo 	else
2395d5c65159SKalle Valo 		rx_stats->non_ampdu_msdu_count += num_msdu;
2396d5c65159SKalle Valo 
2397d5c65159SKalle Valo 	rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok;
2398d5c65159SKalle Valo 	rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err;
23996a0c3702SJohn Crispin 	rx_stats->dcm_count += ppdu_info->dcm;
24006a0c3702SJohn Crispin 	rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu;
2401d5c65159SKalle Valo 
2402d5c65159SKalle Valo 	arsta->rssi_comb = ppdu_info->rssi_comb;
2403d5c65159SKalle Valo 	rx_stats->rx_duration += ppdu_info->rx_duration;
2404d5c65159SKalle Valo 	arsta->rx_duration = rx_stats->rx_duration;
2405d5c65159SKalle Valo }
2406d5c65159SKalle Valo 
2407d5c65159SKalle Valo static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab,
2408d5c65159SKalle Valo 							 struct dp_rxdma_ring *rx_ring,
2409d5c65159SKalle Valo 							 int *buf_id, gfp_t gfp)
2410d5c65159SKalle Valo {
2411d5c65159SKalle Valo 	struct sk_buff *skb;
2412d5c65159SKalle Valo 	dma_addr_t paddr;
2413d5c65159SKalle Valo 
2414d5c65159SKalle Valo 	skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
2415d5c65159SKalle Valo 			    DP_RX_BUFFER_ALIGN_SIZE);
2416d5c65159SKalle Valo 
2417d5c65159SKalle Valo 	if (!skb)
2418d5c65159SKalle Valo 		goto fail_alloc_skb;
2419d5c65159SKalle Valo 
2420d5c65159SKalle Valo 	if (!IS_ALIGNED((unsigned long)skb->data,
2421d5c65159SKalle Valo 			DP_RX_BUFFER_ALIGN_SIZE)) {
2422d5c65159SKalle Valo 		skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
2423d5c65159SKalle Valo 			 skb->data);
2424d5c65159SKalle Valo 	}
2425d5c65159SKalle Valo 
2426d5c65159SKalle Valo 	paddr = dma_map_single(ab->dev, skb->data,
2427d5c65159SKalle Valo 			       skb->len + skb_tailroom(skb),
2428d5c65159SKalle Valo 			       DMA_BIDIRECTIONAL);
2429d5c65159SKalle Valo 	if (unlikely(dma_mapping_error(ab->dev, paddr)))
2430d5c65159SKalle Valo 		goto fail_free_skb;
2431d5c65159SKalle Valo 
2432d5c65159SKalle Valo 	spin_lock_bh(&rx_ring->idr_lock);
2433d5c65159SKalle Valo 	*buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
2434d5c65159SKalle Valo 			    rx_ring->bufs_max, gfp);
2435d5c65159SKalle Valo 	spin_unlock_bh(&rx_ring->idr_lock);
2436d5c65159SKalle Valo 	if (*buf_id < 0)
2437d5c65159SKalle Valo 		goto fail_dma_unmap;
2438d5c65159SKalle Valo 
2439d5c65159SKalle Valo 	ATH11K_SKB_RXCB(skb)->paddr = paddr;
2440d5c65159SKalle Valo 	return skb;
2441d5c65159SKalle Valo 
2442d5c65159SKalle Valo fail_dma_unmap:
2443d5c65159SKalle Valo 	dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
2444d5c65159SKalle Valo 			 DMA_BIDIRECTIONAL);
2445d5c65159SKalle Valo fail_free_skb:
2446d5c65159SKalle Valo 	dev_kfree_skb_any(skb);
2447d5c65159SKalle Valo fail_alloc_skb:
2448d5c65159SKalle Valo 	return NULL;
2449d5c65159SKalle Valo }
2450d5c65159SKalle Valo 
2451d5c65159SKalle Valo int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
2452d5c65159SKalle Valo 					   struct dp_rxdma_ring *rx_ring,
2453d5c65159SKalle Valo 					   int req_entries,
2454d5c65159SKalle Valo 					   enum hal_rx_buf_return_buf_manager mgr,
2455d5c65159SKalle Valo 					   gfp_t gfp)
2456d5c65159SKalle Valo {
2457d5c65159SKalle Valo 	struct hal_srng *srng;
2458d5c65159SKalle Valo 	u32 *desc;
2459d5c65159SKalle Valo 	struct sk_buff *skb;
2460d5c65159SKalle Valo 	int num_free;
2461d5c65159SKalle Valo 	int num_remain;
2462d5c65159SKalle Valo 	int buf_id;
2463d5c65159SKalle Valo 	u32 cookie;
2464d5c65159SKalle Valo 	dma_addr_t paddr;
2465d5c65159SKalle Valo 
2466d5c65159SKalle Valo 	req_entries = min(req_entries, rx_ring->bufs_max);
2467d5c65159SKalle Valo 
2468d5c65159SKalle Valo 	srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
2469d5c65159SKalle Valo 
2470d5c65159SKalle Valo 	spin_lock_bh(&srng->lock);
2471d5c65159SKalle Valo 
2472d5c65159SKalle Valo 	ath11k_hal_srng_access_begin(ab, srng);
2473d5c65159SKalle Valo 
2474d5c65159SKalle Valo 	num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
2475d5c65159SKalle Valo 
2476d5c65159SKalle Valo 	req_entries = min(num_free, req_entries);
2477d5c65159SKalle Valo 	num_remain = req_entries;
2478d5c65159SKalle Valo 
2479d5c65159SKalle Valo 	while (num_remain > 0) {
2480d5c65159SKalle Valo 		skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
2481d5c65159SKalle Valo 							&buf_id, gfp);
2482d5c65159SKalle Valo 		if (!skb)
2483d5c65159SKalle Valo 			break;
2484d5c65159SKalle Valo 		paddr = ATH11K_SKB_RXCB(skb)->paddr;
2485d5c65159SKalle Valo 
2486d5c65159SKalle Valo 		desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
2487d5c65159SKalle Valo 		if (!desc)
2488d5c65159SKalle Valo 			goto fail_desc_get;
2489d5c65159SKalle Valo 
2490d5c65159SKalle Valo 		cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
2491d5c65159SKalle Valo 			 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
2492d5c65159SKalle Valo 
2493d5c65159SKalle Valo 		num_remain--;
2494d5c65159SKalle Valo 
2495d5c65159SKalle Valo 		ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
2496d5c65159SKalle Valo 	}
2497d5c65159SKalle Valo 
2498d5c65159SKalle Valo 	ath11k_hal_srng_access_end(ab, srng);
2499d5c65159SKalle Valo 
2500d5c65159SKalle Valo 	spin_unlock_bh(&srng->lock);
2501d5c65159SKalle Valo 
2502d5c65159SKalle Valo 	return req_entries - num_remain;
2503d5c65159SKalle Valo 
2504d5c65159SKalle Valo fail_desc_get:
2505d5c65159SKalle Valo 	spin_lock_bh(&rx_ring->idr_lock);
2506d5c65159SKalle Valo 	idr_remove(&rx_ring->bufs_idr, buf_id);
2507d5c65159SKalle Valo 	spin_unlock_bh(&rx_ring->idr_lock);
2508d5c65159SKalle Valo 	dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
2509d5c65159SKalle Valo 			 DMA_BIDIRECTIONAL);
2510d5c65159SKalle Valo 	dev_kfree_skb_any(skb);
2511d5c65159SKalle Valo 	ath11k_hal_srng_access_end(ab, srng);
2512d5c65159SKalle Valo 	spin_unlock_bh(&srng->lock);
2513d5c65159SKalle Valo 
2514d5c65159SKalle Valo 	return req_entries - num_remain;
2515d5c65159SKalle Valo }
2516d5c65159SKalle Valo 
2517d5c65159SKalle Valo static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
2518d5c65159SKalle Valo 					     int *budget, struct sk_buff_head *skb_list)
2519d5c65159SKalle Valo {
2520d5c65159SKalle Valo 	struct ath11k *ar = ab->pdevs[mac_id].ar;
2521d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
2522d5c65159SKalle Valo 	struct dp_rxdma_ring *rx_ring = &dp->rx_mon_status_refill_ring;
2523d5c65159SKalle Valo 	struct hal_srng *srng;
2524d5c65159SKalle Valo 	void *rx_mon_status_desc;
2525d5c65159SKalle Valo 	struct sk_buff *skb;
2526d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb;
2527d5c65159SKalle Valo 	struct hal_tlv_hdr *tlv;
2528d5c65159SKalle Valo 	u32 cookie;
2529d5c65159SKalle Valo 	int buf_id;
2530d5c65159SKalle Valo 	dma_addr_t paddr;
2531d5c65159SKalle Valo 	u8 rbm;
2532d5c65159SKalle Valo 	int num_buffs_reaped = 0;
2533d5c65159SKalle Valo 
2534d5c65159SKalle Valo 	srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
2535d5c65159SKalle Valo 
2536d5c65159SKalle Valo 	spin_lock_bh(&srng->lock);
2537d5c65159SKalle Valo 
2538d5c65159SKalle Valo 	ath11k_hal_srng_access_begin(ab, srng);
2539d5c65159SKalle Valo 	while (*budget) {
2540d5c65159SKalle Valo 		*budget -= 1;
2541d5c65159SKalle Valo 		rx_mon_status_desc =
2542d5c65159SKalle Valo 			ath11k_hal_srng_src_peek(ab, srng);
2543d5c65159SKalle Valo 		if (!rx_mon_status_desc)
2544d5c65159SKalle Valo 			break;
2545d5c65159SKalle Valo 
2546d5c65159SKalle Valo 		ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr,
2547d5c65159SKalle Valo 						&cookie, &rbm);
2548d5c65159SKalle Valo 		if (paddr) {
2549d5c65159SKalle Valo 			buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
2550d5c65159SKalle Valo 
2551d5c65159SKalle Valo 			spin_lock_bh(&rx_ring->idr_lock);
2552d5c65159SKalle Valo 			skb = idr_find(&rx_ring->bufs_idr, buf_id);
2553d5c65159SKalle Valo 			if (!skb) {
2554d5c65159SKalle Valo 				ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n",
2555d5c65159SKalle Valo 					    buf_id);
2556d5c65159SKalle Valo 				spin_unlock_bh(&rx_ring->idr_lock);
2557d5c65159SKalle Valo 				continue;
2558d5c65159SKalle Valo 			}
2559d5c65159SKalle Valo 
2560d5c65159SKalle Valo 			idr_remove(&rx_ring->bufs_idr, buf_id);
2561d5c65159SKalle Valo 			spin_unlock_bh(&rx_ring->idr_lock);
2562d5c65159SKalle Valo 
2563d5c65159SKalle Valo 			rxcb = ATH11K_SKB_RXCB(skb);
2564d5c65159SKalle Valo 
2565d5c65159SKalle Valo 			dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
2566d5c65159SKalle Valo 						skb->len + skb_tailroom(skb),
2567d5c65159SKalle Valo 						DMA_FROM_DEVICE);
2568d5c65159SKalle Valo 
2569d5c65159SKalle Valo 			dma_unmap_single(ab->dev, rxcb->paddr,
2570d5c65159SKalle Valo 					 skb->len + skb_tailroom(skb),
2571d5c65159SKalle Valo 					 DMA_BIDIRECTIONAL);
2572d5c65159SKalle Valo 
2573d5c65159SKalle Valo 			tlv = (struct hal_tlv_hdr *)skb->data;
2574d5c65159SKalle Valo 			if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) !=
2575d5c65159SKalle Valo 					HAL_RX_STATUS_BUFFER_DONE) {
2576d5c65159SKalle Valo 				ath11k_hal_srng_src_get_next_entry(ab, srng);
2577d5c65159SKalle Valo 				continue;
2578d5c65159SKalle Valo 			}
2579d5c65159SKalle Valo 
2580d5c65159SKalle Valo 			__skb_queue_tail(skb_list, skb);
2581d5c65159SKalle Valo 		}
2582d5c65159SKalle Valo 
2583d5c65159SKalle Valo 		skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
2584d5c65159SKalle Valo 							&buf_id, GFP_ATOMIC);
2585d5c65159SKalle Valo 
2586d5c65159SKalle Valo 		if (!skb) {
2587d5c65159SKalle Valo 			ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
2588d5c65159SKalle Valo 							HAL_RX_BUF_RBM_SW3_BM);
2589d5c65159SKalle Valo 			num_buffs_reaped++;
2590d5c65159SKalle Valo 			break;
2591d5c65159SKalle Valo 		}
2592d5c65159SKalle Valo 		rxcb = ATH11K_SKB_RXCB(skb);
2593d5c65159SKalle Valo 
2594d5c65159SKalle Valo 		cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
2595d5c65159SKalle Valo 			 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
2596d5c65159SKalle Valo 
2597d5c65159SKalle Valo 		ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr,
2598d5c65159SKalle Valo 						cookie, HAL_RX_BUF_RBM_SW3_BM);
2599d5c65159SKalle Valo 		ath11k_hal_srng_src_get_next_entry(ab, srng);
2600d5c65159SKalle Valo 		num_buffs_reaped++;
2601d5c65159SKalle Valo 	}
2602d5c65159SKalle Valo 	ath11k_hal_srng_access_end(ab, srng);
2603d5c65159SKalle Valo 	spin_unlock_bh(&srng->lock);
2604d5c65159SKalle Valo 
2605d5c65159SKalle Valo 	return num_buffs_reaped;
2606d5c65159SKalle Valo }
2607d5c65159SKalle Valo 
2608d5c65159SKalle Valo int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
2609d5c65159SKalle Valo 				    struct napi_struct *napi, int budget)
2610d5c65159SKalle Valo {
2611d5c65159SKalle Valo 	struct ath11k *ar = ab->pdevs[mac_id].ar;
2612d5c65159SKalle Valo 	enum hal_rx_mon_status hal_status;
2613d5c65159SKalle Valo 	struct sk_buff *skb;
2614d5c65159SKalle Valo 	struct sk_buff_head skb_list;
2615d5c65159SKalle Valo 	struct hal_rx_mon_ppdu_info ppdu_info;
2616d5c65159SKalle Valo 	struct ath11k_peer *peer;
2617d5c65159SKalle Valo 	struct ath11k_sta *arsta;
2618d5c65159SKalle Valo 	int num_buffs_reaped = 0;
2619d5c65159SKalle Valo 
2620d5c65159SKalle Valo 	__skb_queue_head_init(&skb_list);
2621d5c65159SKalle Valo 
2622d5c65159SKalle Valo 	num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget,
2623d5c65159SKalle Valo 							     &skb_list);
2624d5c65159SKalle Valo 	if (!num_buffs_reaped)
2625d5c65159SKalle Valo 		goto exit;
2626d5c65159SKalle Valo 
2627d5c65159SKalle Valo 	while ((skb = __skb_dequeue(&skb_list))) {
2628d5c65159SKalle Valo 		memset(&ppdu_info, 0, sizeof(ppdu_info));
2629d5c65159SKalle Valo 		ppdu_info.peer_id = HAL_INVALID_PEERID;
2630d5c65159SKalle Valo 
2631d5c65159SKalle Valo 		if (ath11k_debug_is_pktlog_rx_stats_enabled(ar))
2632d5c65159SKalle Valo 			trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE);
2633d5c65159SKalle Valo 
2634d5c65159SKalle Valo 		hal_status = ath11k_hal_rx_parse_mon_status(ab, &ppdu_info, skb);
2635d5c65159SKalle Valo 
2636d5c65159SKalle Valo 		if (ppdu_info.peer_id == HAL_INVALID_PEERID ||
2637d5c65159SKalle Valo 		    hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
2638d5c65159SKalle Valo 			dev_kfree_skb_any(skb);
2639d5c65159SKalle Valo 			continue;
2640d5c65159SKalle Valo 		}
2641d5c65159SKalle Valo 
2642d5c65159SKalle Valo 		rcu_read_lock();
2643d5c65159SKalle Valo 		spin_lock_bh(&ab->base_lock);
2644d5c65159SKalle Valo 		peer = ath11k_peer_find_by_id(ab, ppdu_info.peer_id);
2645d5c65159SKalle Valo 
2646d5c65159SKalle Valo 		if (!peer || !peer->sta) {
26472dab7d22SJohn Crispin 			ath11k_dbg(ab, ATH11K_DBG_DATA,
26482dab7d22SJohn Crispin 				   "failed to find the peer with peer_id %d\n",
2649d5c65159SKalle Valo 				   ppdu_info.peer_id);
2650d5c65159SKalle Valo 			spin_unlock_bh(&ab->base_lock);
2651d5c65159SKalle Valo 			rcu_read_unlock();
2652d5c65159SKalle Valo 			dev_kfree_skb_any(skb);
2653d5c65159SKalle Valo 			continue;
2654d5c65159SKalle Valo 		}
2655d5c65159SKalle Valo 
2656d5c65159SKalle Valo 		arsta = (struct ath11k_sta *)peer->sta->drv_priv;
2657d5c65159SKalle Valo 		ath11k_dp_rx_update_peer_stats(arsta, &ppdu_info);
2658d5c65159SKalle Valo 
2659d5c65159SKalle Valo 		if (ath11k_debug_is_pktlog_peer_valid(ar, peer->addr))
2660d5c65159SKalle Valo 			trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE);
2661d5c65159SKalle Valo 
2662d5c65159SKalle Valo 		spin_unlock_bh(&ab->base_lock);
2663d5c65159SKalle Valo 		rcu_read_unlock();
2664d5c65159SKalle Valo 
2665d5c65159SKalle Valo 		dev_kfree_skb_any(skb);
2666d5c65159SKalle Valo 	}
2667d5c65159SKalle Valo exit:
2668d5c65159SKalle Valo 	return num_buffs_reaped;
2669d5c65159SKalle Valo }
2670d5c65159SKalle Valo 
2671d5c65159SKalle Valo static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab,
2672d5c65159SKalle Valo 					 u32 *link_desc,
2673d5c65159SKalle Valo 					 enum hal_wbm_rel_bm_act action)
2674d5c65159SKalle Valo {
2675d5c65159SKalle Valo 	struct ath11k_dp *dp = &ab->dp;
2676d5c65159SKalle Valo 	struct hal_srng *srng;
2677d5c65159SKalle Valo 	u32 *desc;
2678d5c65159SKalle Valo 	int ret = 0;
2679d5c65159SKalle Valo 
2680d5c65159SKalle Valo 	srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
2681d5c65159SKalle Valo 
2682d5c65159SKalle Valo 	spin_lock_bh(&srng->lock);
2683d5c65159SKalle Valo 
2684d5c65159SKalle Valo 	ath11k_hal_srng_access_begin(ab, srng);
2685d5c65159SKalle Valo 
2686d5c65159SKalle Valo 	desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
2687d5c65159SKalle Valo 	if (!desc) {
2688d5c65159SKalle Valo 		ret = -ENOBUFS;
2689d5c65159SKalle Valo 		goto exit;
2690d5c65159SKalle Valo 	}
2691d5c65159SKalle Valo 
2692d5c65159SKalle Valo 	ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc,
2693d5c65159SKalle Valo 					 action);
2694d5c65159SKalle Valo 
2695d5c65159SKalle Valo exit:
2696d5c65159SKalle Valo 	ath11k_hal_srng_access_end(ab, srng);
2697d5c65159SKalle Valo 
2698d5c65159SKalle Valo 	spin_unlock_bh(&srng->lock);
2699d5c65159SKalle Valo 
2700d5c65159SKalle Valo 	return ret;
2701d5c65159SKalle Valo }
2702d5c65159SKalle Valo 
2703d5c65159SKalle Valo static void ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
2704d5c65159SKalle Valo 				     struct sk_buff *msdu,
2705d5c65159SKalle Valo 				     struct hal_rx_desc *rx_desc,
2706d5c65159SKalle Valo 				     struct ieee80211_rx_status *rx_status)
2707d5c65159SKalle Valo {
2708d5c65159SKalle Valo 	u8 rx_channel;
2709d5c65159SKalle Valo 	enum hal_encrypt_type enctype;
2710d5c65159SKalle Valo 	bool is_decrypted;
2711d5c65159SKalle Valo 	u32 err_bitmap;
2712d5c65159SKalle Valo 
2713d5c65159SKalle Valo 	is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc);
2714d5c65159SKalle Valo 	enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc);
2715d5c65159SKalle Valo 	err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_desc);
2716d5c65159SKalle Valo 
2717d5c65159SKalle Valo 	if (err_bitmap & DP_RX_MPDU_ERR_FCS)
2718d5c65159SKalle Valo 		rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
2719d5c65159SKalle Valo 
2720d5c65159SKalle Valo 	if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC)
2721d5c65159SKalle Valo 		rx_status->flag |= RX_FLAG_MMIC_ERROR;
2722d5c65159SKalle Valo 
2723d5c65159SKalle Valo 	rx_status->encoding = RX_ENC_LEGACY;
2724d5c65159SKalle Valo 	rx_status->bw = RATE_INFO_BW_20;
2725d5c65159SKalle Valo 
2726d5c65159SKalle Valo 	rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2727d5c65159SKalle Valo 
2728d5c65159SKalle Valo 	rx_channel = ath11k_dp_rx_h_msdu_start_freq(rx_desc);
2729d5c65159SKalle Valo 
2730d5c65159SKalle Valo 	if (rx_channel >= 1 && rx_channel <= 14) {
2731d5c65159SKalle Valo 		rx_status->band = NL80211_BAND_2GHZ;
2732d5c65159SKalle Valo 	} else if (rx_channel >= 36 && rx_channel <= 173) {
2733d5c65159SKalle Valo 		rx_status->band = NL80211_BAND_5GHZ;
2734d5c65159SKalle Valo 	} else {
2735d5c65159SKalle Valo 		ath11k_warn(ar->ab, "Unsupported Channel info received %d\n",
2736d5c65159SKalle Valo 			    rx_channel);
2737d5c65159SKalle Valo 		return;
2738d5c65159SKalle Valo 	}
2739d5c65159SKalle Valo 
2740d5c65159SKalle Valo 	rx_status->freq = ieee80211_channel_to_frequency(rx_channel,
2741d5c65159SKalle Valo 							 rx_status->band);
2742d5c65159SKalle Valo 	ath11k_dp_rx_h_rate(ar, rx_desc, rx_status);
2743d5c65159SKalle Valo 
2744d5c65159SKalle Valo 	/* Rx fragments are received in raw mode */
2745d5c65159SKalle Valo 	skb_trim(msdu, msdu->len - FCS_LEN);
2746d5c65159SKalle Valo 
2747d5c65159SKalle Valo 	if (is_decrypted) {
2748d5c65159SKalle Valo 		rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MIC_STRIPPED;
2749d5c65159SKalle Valo 		skb_trim(msdu, msdu->len -
2750d5c65159SKalle Valo 			 ath11k_dp_rx_crypto_mic_len(ar, enctype));
2751d5c65159SKalle Valo 	}
2752d5c65159SKalle Valo }
2753d5c65159SKalle Valo 
2754d5c65159SKalle Valo static int
2755d5c65159SKalle Valo ath11k_dp_process_rx_err_buf(struct ath11k *ar, struct napi_struct *napi,
2756d5c65159SKalle Valo 			     int buf_id, bool frag)
2757d5c65159SKalle Valo {
2758d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
2759d5c65159SKalle Valo 	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
2760d5c65159SKalle Valo 	struct ieee80211_rx_status rx_status = {0};
2761d5c65159SKalle Valo 	struct sk_buff *msdu;
2762d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb;
2763d5c65159SKalle Valo 	struct ieee80211_rx_status *status;
2764d5c65159SKalle Valo 	struct hal_rx_desc *rx_desc;
2765d5c65159SKalle Valo 	u16 msdu_len;
2766d5c65159SKalle Valo 
2767d5c65159SKalle Valo 	spin_lock_bh(&rx_ring->idr_lock);
2768d5c65159SKalle Valo 	msdu = idr_find(&rx_ring->bufs_idr, buf_id);
2769d5c65159SKalle Valo 	if (!msdu) {
2770d5c65159SKalle Valo 		ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n",
2771d5c65159SKalle Valo 			    buf_id);
2772d5c65159SKalle Valo 		spin_unlock_bh(&rx_ring->idr_lock);
2773d5c65159SKalle Valo 		return -EINVAL;
2774d5c65159SKalle Valo 	}
2775d5c65159SKalle Valo 
2776d5c65159SKalle Valo 	idr_remove(&rx_ring->bufs_idr, buf_id);
2777d5c65159SKalle Valo 	spin_unlock_bh(&rx_ring->idr_lock);
2778d5c65159SKalle Valo 
2779d5c65159SKalle Valo 	rxcb = ATH11K_SKB_RXCB(msdu);
2780d5c65159SKalle Valo 	dma_unmap_single(ar->ab->dev, rxcb->paddr,
2781d5c65159SKalle Valo 			 msdu->len + skb_tailroom(msdu),
2782d5c65159SKalle Valo 			 DMA_FROM_DEVICE);
2783d5c65159SKalle Valo 
2784d5c65159SKalle Valo 	if (!frag) {
2785d5c65159SKalle Valo 		/* Process only rx fragments below, and drop
2786d5c65159SKalle Valo 		 * msdu's indicated due to error reasons.
2787d5c65159SKalle Valo 		 */
2788d5c65159SKalle Valo 		dev_kfree_skb_any(msdu);
2789d5c65159SKalle Valo 		return 0;
2790d5c65159SKalle Valo 	}
2791d5c65159SKalle Valo 
2792d5c65159SKalle Valo 	rcu_read_lock();
2793d5c65159SKalle Valo 	if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {
2794d5c65159SKalle Valo 		dev_kfree_skb_any(msdu);
2795d5c65159SKalle Valo 		goto exit;
2796d5c65159SKalle Valo 	}
2797d5c65159SKalle Valo 
2798d5c65159SKalle Valo 	if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
2799d5c65159SKalle Valo 		dev_kfree_skb_any(msdu);
2800d5c65159SKalle Valo 		goto exit;
2801d5c65159SKalle Valo 	}
2802d5c65159SKalle Valo 
2803d5c65159SKalle Valo 	rx_desc = (struct hal_rx_desc *)msdu->data;
2804d5c65159SKalle Valo 	msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc);
2805d5c65159SKalle Valo 	skb_put(msdu, HAL_RX_DESC_SIZE + msdu_len);
2806d5c65159SKalle Valo 	skb_pull(msdu, HAL_RX_DESC_SIZE);
2807d5c65159SKalle Valo 
2808d5c65159SKalle Valo 	ath11k_dp_rx_frag_h_mpdu(ar, msdu, rx_desc, &rx_status);
2809d5c65159SKalle Valo 
2810d5c65159SKalle Valo 	status = IEEE80211_SKB_RXCB(msdu);
2811d5c65159SKalle Valo 
2812d5c65159SKalle Valo 	*status = rx_status;
2813d5c65159SKalle Valo 
2814d5c65159SKalle Valo 	ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
2815d5c65159SKalle Valo 
2816d5c65159SKalle Valo exit:
2817d5c65159SKalle Valo 	rcu_read_unlock();
2818d5c65159SKalle Valo 	return 0;
2819d5c65159SKalle Valo }
2820d5c65159SKalle Valo 
2821d5c65159SKalle Valo int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
2822d5c65159SKalle Valo 			     int budget)
2823d5c65159SKalle Valo {
2824293cb583SJohn Crispin 	u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
2825d5c65159SKalle Valo 	struct dp_link_desc_bank *link_desc_banks;
2826d5c65159SKalle Valo 	enum hal_rx_buf_return_buf_manager rbm;
2827d5c65159SKalle Valo 	int tot_n_bufs_reaped, quota, ret, i;
2828d5c65159SKalle Valo 	int n_bufs_reaped[MAX_RADIOS] = {0};
2829d5c65159SKalle Valo 	struct dp_rxdma_ring *rx_ring;
2830d5c65159SKalle Valo 	struct dp_srng *reo_except;
2831d5c65159SKalle Valo 	u32 desc_bank, num_msdus;
2832d5c65159SKalle Valo 	struct hal_srng *srng;
2833d5c65159SKalle Valo 	struct ath11k_dp *dp;
2834d5c65159SKalle Valo 	void *link_desc_va;
2835d5c65159SKalle Valo 	int buf_id, mac_id;
2836d5c65159SKalle Valo 	struct ath11k *ar;
2837d5c65159SKalle Valo 	dma_addr_t paddr;
2838d5c65159SKalle Valo 	u32 *desc;
2839d5c65159SKalle Valo 	bool is_frag;
2840d5c65159SKalle Valo 
2841d5c65159SKalle Valo 	tot_n_bufs_reaped = 0;
2842d5c65159SKalle Valo 	quota = budget;
2843d5c65159SKalle Valo 
2844d5c65159SKalle Valo 	dp = &ab->dp;
2845d5c65159SKalle Valo 	reo_except = &dp->reo_except_ring;
2846d5c65159SKalle Valo 	link_desc_banks = dp->link_desc_banks;
2847d5c65159SKalle Valo 
2848d5c65159SKalle Valo 	srng = &ab->hal.srng_list[reo_except->ring_id];
2849d5c65159SKalle Valo 
2850d5c65159SKalle Valo 	spin_lock_bh(&srng->lock);
2851d5c65159SKalle Valo 
2852d5c65159SKalle Valo 	ath11k_hal_srng_access_begin(ab, srng);
2853d5c65159SKalle Valo 
2854d5c65159SKalle Valo 	while (budget &&
2855d5c65159SKalle Valo 	       (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
2856293cb583SJohn Crispin 		struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc;
2857293cb583SJohn Crispin 
2858d5c65159SKalle Valo 		ab->soc_stats.err_ring_pkts++;
2859d5c65159SKalle Valo 		ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr,
2860d5c65159SKalle Valo 						    &desc_bank);
2861d5c65159SKalle Valo 		if (ret) {
2862d5c65159SKalle Valo 			ath11k_warn(ab, "failed to parse error reo desc %d\n",
2863d5c65159SKalle Valo 				    ret);
2864d5c65159SKalle Valo 			continue;
2865d5c65159SKalle Valo 		}
2866d5c65159SKalle Valo 		link_desc_va = link_desc_banks[desc_bank].vaddr +
2867d5c65159SKalle Valo 			       (paddr - link_desc_banks[desc_bank].paddr);
2868293cb583SJohn Crispin 		ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
2869d5c65159SKalle Valo 						 &rbm);
2870d5c65159SKalle Valo 		if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST &&
2871d5c65159SKalle Valo 		    rbm != HAL_RX_BUF_RBM_SW3_BM) {
2872d5c65159SKalle Valo 			ab->soc_stats.invalid_rbm++;
2873d5c65159SKalle Valo 			ath11k_warn(ab, "invalid return buffer manager %d\n", rbm);
2874d5c65159SKalle Valo 			ath11k_dp_rx_link_desc_return(ab, desc,
2875d5c65159SKalle Valo 						      HAL_WBM_REL_BM_ACT_REL_MSDU);
2876d5c65159SKalle Valo 			continue;
2877d5c65159SKalle Valo 		}
2878d5c65159SKalle Valo 
2879293cb583SJohn Crispin 		is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG);
2880d5c65159SKalle Valo 
2881d5c65159SKalle Valo 		/* Return the link desc back to wbm idle list */
2882d5c65159SKalle Valo 		ath11k_dp_rx_link_desc_return(ab, desc,
2883d5c65159SKalle Valo 					      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
2884d5c65159SKalle Valo 
2885d5c65159SKalle Valo 		for (i = 0; i < num_msdus; i++) {
2886d5c65159SKalle Valo 			buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
2887293cb583SJohn Crispin 					   msdu_cookies[i]);
2888d5c65159SKalle Valo 
2889d5c65159SKalle Valo 			mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID,
2890293cb583SJohn Crispin 					   msdu_cookies[i]);
2891d5c65159SKalle Valo 
2892d5c65159SKalle Valo 			ar = ab->pdevs[mac_id].ar;
2893d5c65159SKalle Valo 
2894d5c65159SKalle Valo 			if (!ath11k_dp_process_rx_err_buf(ar, napi, buf_id,
2895d5c65159SKalle Valo 							  is_frag)) {
2896d5c65159SKalle Valo 				n_bufs_reaped[mac_id]++;
2897d5c65159SKalle Valo 				tot_n_bufs_reaped++;
2898d5c65159SKalle Valo 			}
2899d5c65159SKalle Valo 		}
2900d5c65159SKalle Valo 
2901d5c65159SKalle Valo 		if (tot_n_bufs_reaped >= quota) {
2902d5c65159SKalle Valo 			tot_n_bufs_reaped = quota;
2903d5c65159SKalle Valo 			goto exit;
2904d5c65159SKalle Valo 		}
2905d5c65159SKalle Valo 
2906d5c65159SKalle Valo 		budget = quota - tot_n_bufs_reaped;
2907d5c65159SKalle Valo 	}
2908d5c65159SKalle Valo 
2909d5c65159SKalle Valo exit:
2910d5c65159SKalle Valo 	ath11k_hal_srng_access_end(ab, srng);
2911d5c65159SKalle Valo 
2912d5c65159SKalle Valo 	spin_unlock_bh(&srng->lock);
2913d5c65159SKalle Valo 
2914d5c65159SKalle Valo 	for (i = 0; i <  ab->num_radios; i++) {
2915d5c65159SKalle Valo 		if (!n_bufs_reaped[i])
2916d5c65159SKalle Valo 			continue;
2917d5c65159SKalle Valo 
2918d5c65159SKalle Valo 		ar = ab->pdevs[i].ar;
2919d5c65159SKalle Valo 		rx_ring = &ar->dp.rx_refill_buf_ring;
2920d5c65159SKalle Valo 
2921d5c65159SKalle Valo 		ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i],
2922d5c65159SKalle Valo 					   HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
2923d5c65159SKalle Valo 	}
2924d5c65159SKalle Valo 
2925d5c65159SKalle Valo 	return tot_n_bufs_reaped;
2926d5c65159SKalle Valo }
2927d5c65159SKalle Valo 
2928d5c65159SKalle Valo static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar,
2929d5c65159SKalle Valo 					     int msdu_len,
2930d5c65159SKalle Valo 					     struct sk_buff_head *msdu_list)
2931d5c65159SKalle Valo {
2932d5c65159SKalle Valo 	struct sk_buff *skb, *tmp;
2933d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb;
2934d5c65159SKalle Valo 	int n_buffs;
2935d5c65159SKalle Valo 
2936d5c65159SKalle Valo 	n_buffs = DIV_ROUND_UP(msdu_len,
2937d5c65159SKalle Valo 			       (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE));
2938d5c65159SKalle Valo 
2939d5c65159SKalle Valo 	skb_queue_walk_safe(msdu_list, skb, tmp) {
2940d5c65159SKalle Valo 		rxcb = ATH11K_SKB_RXCB(skb);
2941d5c65159SKalle Valo 		if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO &&
2942d5c65159SKalle Valo 		    rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) {
2943d5c65159SKalle Valo 			if (!n_buffs)
2944d5c65159SKalle Valo 				break;
2945d5c65159SKalle Valo 			__skb_unlink(skb, msdu_list);
2946d5c65159SKalle Valo 			dev_kfree_skb_any(skb);
2947d5c65159SKalle Valo 			n_buffs--;
2948d5c65159SKalle Valo 		}
2949d5c65159SKalle Valo 	}
2950d5c65159SKalle Valo }
2951d5c65159SKalle Valo 
2952d5c65159SKalle Valo static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu,
2953d5c65159SKalle Valo 				      struct ieee80211_rx_status *status,
2954d5c65159SKalle Valo 				      struct sk_buff_head *msdu_list)
2955d5c65159SKalle Valo {
2956d5c65159SKalle Valo 	struct sk_buff_head amsdu_list;
2957d5c65159SKalle Valo 	u16 msdu_len;
2958d5c65159SKalle Valo 	struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
2959d5c65159SKalle Valo 	u8 l3pad_bytes;
2960d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2961d5c65159SKalle Valo 
2962d5c65159SKalle Valo 	msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc);
2963d5c65159SKalle Valo 
2964d5c65159SKalle Valo 	if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) {
2965d5c65159SKalle Valo 		/* First buffer will be freed by the caller, so deduct it's length */
2966d5c65159SKalle Valo 		msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE);
2967d5c65159SKalle Valo 		ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);
2968d5c65159SKalle Valo 		return -EINVAL;
2969d5c65159SKalle Valo 	}
2970d5c65159SKalle Valo 
2971d5c65159SKalle Valo 	if (!ath11k_dp_rx_h_attn_msdu_done(desc)) {
2972d5c65159SKalle Valo 		ath11k_warn(ar->ab,
2973d5c65159SKalle Valo 			    "msdu_done bit not set in null_q_des processing\n");
2974d5c65159SKalle Valo 		__skb_queue_purge(msdu_list);
2975d5c65159SKalle Valo 		return -EIO;
2976d5c65159SKalle Valo 	}
2977d5c65159SKalle Valo 
2978d5c65159SKalle Valo 	/* Handle NULL queue descriptor violations arising out a missing
2979d5c65159SKalle Valo 	 * REO queue for a given peer or a given TID. This typically
2980d5c65159SKalle Valo 	 * may happen if a packet is received on a QOS enabled TID before the
2981d5c65159SKalle Valo 	 * ADDBA negotiation for that TID, when the TID queue is setup. Or
2982d5c65159SKalle Valo 	 * it may also happen for MC/BC frames if they are not routed to the
2983d5c65159SKalle Valo 	 * non-QOS TID queue, in the absence of any other default TID queue.
2984d5c65159SKalle Valo 	 * This error can show up both in a REO destination or WBM release ring.
2985d5c65159SKalle Valo 	 */
2986d5c65159SKalle Valo 
2987d5c65159SKalle Valo 	__skb_queue_head_init(&amsdu_list);
2988d5c65159SKalle Valo 
2989d5c65159SKalle Valo 	rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc);
2990d5c65159SKalle Valo 	rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc);
2991d5c65159SKalle Valo 
2992d5c65159SKalle Valo 	l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc);
2993d5c65159SKalle Valo 
2994d5c65159SKalle Valo 	if ((HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
2995d5c65159SKalle Valo 		return -EINVAL;
2996d5c65159SKalle Valo 
2997d5c65159SKalle Valo 	skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len);
2998d5c65159SKalle Valo 	skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes);
2999d5c65159SKalle Valo 
3000d5c65159SKalle Valo 	ath11k_dp_rx_h_ppdu(ar, desc, status);
3001d5c65159SKalle Valo 
3002d5c65159SKalle Valo 	__skb_queue_tail(&amsdu_list, msdu);
3003d5c65159SKalle Valo 
3004d5c65159SKalle Valo 	ath11k_dp_rx_h_mpdu(ar, &amsdu_list, desc, status);
3005d5c65159SKalle Valo 
3006d5c65159SKalle Valo 	/* Please note that caller will having the access to msdu and completing
3007d5c65159SKalle Valo 	 * rx with mac80211. Need not worry about cleaning up amsdu_list.
3008d5c65159SKalle Valo 	 */
3009d5c65159SKalle Valo 
3010d5c65159SKalle Valo 	return 0;
3011d5c65159SKalle Valo }
3012d5c65159SKalle Valo 
3013d5c65159SKalle Valo static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu,
3014d5c65159SKalle Valo 				   struct ieee80211_rx_status *status,
3015d5c65159SKalle Valo 				   struct sk_buff_head *msdu_list)
3016d5c65159SKalle Valo {
3017d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3018d5c65159SKalle Valo 	bool drop = false;
3019d5c65159SKalle Valo 
3020d5c65159SKalle Valo 	ar->ab->soc_stats.reo_error[rxcb->err_code]++;
3021d5c65159SKalle Valo 
3022d5c65159SKalle Valo 	switch (rxcb->err_code) {
3023d5c65159SKalle Valo 	case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
3024d5c65159SKalle Valo 		if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))
3025d5c65159SKalle Valo 			drop = true;
3026d5c65159SKalle Valo 		break;
3027d5c65159SKalle Valo 	default:
3028d5c65159SKalle Valo 		/* TODO: Review other errors and process them to mac80211
3029d5c65159SKalle Valo 		 * as appropriate.
3030d5c65159SKalle Valo 		 */
3031d5c65159SKalle Valo 		drop = true;
3032d5c65159SKalle Valo 		break;
3033d5c65159SKalle Valo 	}
3034d5c65159SKalle Valo 
3035d5c65159SKalle Valo 	return drop;
3036d5c65159SKalle Valo }
3037d5c65159SKalle Valo 
3038d5c65159SKalle Valo static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu,
3039d5c65159SKalle Valo 					struct ieee80211_rx_status *status)
3040d5c65159SKalle Valo {
3041d5c65159SKalle Valo 	u16 msdu_len;
3042d5c65159SKalle Valo 	struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
3043d5c65159SKalle Valo 	u8 l3pad_bytes;
3044d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3045d5c65159SKalle Valo 
3046d5c65159SKalle Valo 	rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc);
3047d5c65159SKalle Valo 	rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc);
3048d5c65159SKalle Valo 
3049d5c65159SKalle Valo 	l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc);
3050d5c65159SKalle Valo 	msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc);
3051d5c65159SKalle Valo 	skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len);
3052d5c65159SKalle Valo 	skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes);
3053d5c65159SKalle Valo 
3054d5c65159SKalle Valo 	ath11k_dp_rx_h_ppdu(ar, desc, status);
3055d5c65159SKalle Valo 
3056d5c65159SKalle Valo 	status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
3057d5c65159SKalle Valo 			 RX_FLAG_DECRYPTED);
3058d5c65159SKalle Valo 
3059d5c65159SKalle Valo 	ath11k_dp_rx_h_undecap(ar, msdu, desc,
3060d5c65159SKalle Valo 			       HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
3061d5c65159SKalle Valo }
3062d5c65159SKalle Valo 
3063d5c65159SKalle Valo static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar,  struct sk_buff *msdu,
3064d5c65159SKalle Valo 				     struct ieee80211_rx_status *status)
3065d5c65159SKalle Valo {
3066d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3067d5c65159SKalle Valo 	bool drop = false;
3068d5c65159SKalle Valo 
3069d5c65159SKalle Valo 	ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
3070d5c65159SKalle Valo 
3071d5c65159SKalle Valo 	switch (rxcb->err_code) {
3072d5c65159SKalle Valo 	case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
3073d5c65159SKalle Valo 		ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status);
3074d5c65159SKalle Valo 		break;
3075d5c65159SKalle Valo 	default:
3076d5c65159SKalle Valo 		/* TODO: Review other rxdma error code to check if anything is
3077d5c65159SKalle Valo 		 * worth reporting to mac80211
3078d5c65159SKalle Valo 		 */
3079d5c65159SKalle Valo 		drop = true;
3080d5c65159SKalle Valo 		break;
3081d5c65159SKalle Valo 	}
3082d5c65159SKalle Valo 
3083d5c65159SKalle Valo 	return drop;
3084d5c65159SKalle Valo }
3085d5c65159SKalle Valo 
3086d5c65159SKalle Valo static void ath11k_dp_rx_wbm_err(struct ath11k *ar,
3087d5c65159SKalle Valo 				 struct napi_struct *napi,
3088d5c65159SKalle Valo 				 struct sk_buff *msdu,
3089d5c65159SKalle Valo 				 struct sk_buff_head *msdu_list)
3090d5c65159SKalle Valo {
3091d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3092d5c65159SKalle Valo 	struct ieee80211_rx_status rxs = {0};
3093d5c65159SKalle Valo 	struct ieee80211_rx_status *status;
3094d5c65159SKalle Valo 	bool drop = true;
3095d5c65159SKalle Valo 
3096d5c65159SKalle Valo 	switch (rxcb->err_rel_src) {
3097d5c65159SKalle Valo 	case HAL_WBM_REL_SRC_MODULE_REO:
3098d5c65159SKalle Valo 		drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list);
3099d5c65159SKalle Valo 		break;
3100d5c65159SKalle Valo 	case HAL_WBM_REL_SRC_MODULE_RXDMA:
3101d5c65159SKalle Valo 		drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs);
3102d5c65159SKalle Valo 		break;
3103d5c65159SKalle Valo 	default:
3104d5c65159SKalle Valo 		/* msdu will get freed */
3105d5c65159SKalle Valo 		break;
3106d5c65159SKalle Valo 	}
3107d5c65159SKalle Valo 
3108d5c65159SKalle Valo 	if (drop) {
3109d5c65159SKalle Valo 		dev_kfree_skb_any(msdu);
3110d5c65159SKalle Valo 		return;
3111d5c65159SKalle Valo 	}
3112d5c65159SKalle Valo 
3113d5c65159SKalle Valo 	status = IEEE80211_SKB_RXCB(msdu);
3114d5c65159SKalle Valo 	*status = rxs;
3115d5c65159SKalle Valo 
3116d5c65159SKalle Valo 	ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
3117d5c65159SKalle Valo }
3118d5c65159SKalle Valo 
3119d5c65159SKalle Valo int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
3120d5c65159SKalle Valo 				 struct napi_struct *napi, int budget)
3121d5c65159SKalle Valo {
3122d5c65159SKalle Valo 	struct ath11k *ar;
3123d5c65159SKalle Valo 	struct ath11k_dp *dp = &ab->dp;
3124d5c65159SKalle Valo 	struct dp_rxdma_ring *rx_ring;
3125d5c65159SKalle Valo 	struct hal_rx_wbm_rel_info err_info;
3126d5c65159SKalle Valo 	struct hal_srng *srng;
3127d5c65159SKalle Valo 	struct sk_buff *msdu;
3128d5c65159SKalle Valo 	struct sk_buff_head msdu_list[MAX_RADIOS];
3129d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb;
3130d5c65159SKalle Valo 	u32 *rx_desc;
3131d5c65159SKalle Valo 	int buf_id, mac_id;
3132d5c65159SKalle Valo 	int num_buffs_reaped[MAX_RADIOS] = {0};
3133d5c65159SKalle Valo 	int total_num_buffs_reaped = 0;
3134d5c65159SKalle Valo 	int ret, i;
3135d5c65159SKalle Valo 
3136d5c65159SKalle Valo 	for (i = 0; i < MAX_RADIOS; i++)
3137d5c65159SKalle Valo 		__skb_queue_head_init(&msdu_list[i]);
3138d5c65159SKalle Valo 
3139d5c65159SKalle Valo 	srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
3140d5c65159SKalle Valo 
3141d5c65159SKalle Valo 	spin_lock_bh(&srng->lock);
3142d5c65159SKalle Valo 
3143d5c65159SKalle Valo 	ath11k_hal_srng_access_begin(ab, srng);
3144d5c65159SKalle Valo 
3145d5c65159SKalle Valo 	while (budget) {
3146d5c65159SKalle Valo 		rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
3147d5c65159SKalle Valo 		if (!rx_desc)
3148d5c65159SKalle Valo 			break;
3149d5c65159SKalle Valo 
3150d5c65159SKalle Valo 		ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info);
3151d5c65159SKalle Valo 		if (ret) {
3152d5c65159SKalle Valo 			ath11k_warn(ab,
3153d5c65159SKalle Valo 				    "failed to parse rx error in wbm_rel ring desc %d\n",
3154d5c65159SKalle Valo 				    ret);
3155d5c65159SKalle Valo 			continue;
3156d5c65159SKalle Valo 		}
3157d5c65159SKalle Valo 
3158d5c65159SKalle Valo 		buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie);
3159d5c65159SKalle Valo 		mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie);
3160d5c65159SKalle Valo 
3161d5c65159SKalle Valo 		ar = ab->pdevs[mac_id].ar;
3162d5c65159SKalle Valo 		rx_ring = &ar->dp.rx_refill_buf_ring;
3163d5c65159SKalle Valo 
3164d5c65159SKalle Valo 		spin_lock_bh(&rx_ring->idr_lock);
3165d5c65159SKalle Valo 		msdu = idr_find(&rx_ring->bufs_idr, buf_id);
3166d5c65159SKalle Valo 		if (!msdu) {
3167d5c65159SKalle Valo 			ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n",
3168d5c65159SKalle Valo 				    buf_id, mac_id);
3169d5c65159SKalle Valo 			spin_unlock_bh(&rx_ring->idr_lock);
3170d5c65159SKalle Valo 			continue;
3171d5c65159SKalle Valo 		}
3172d5c65159SKalle Valo 
3173d5c65159SKalle Valo 		idr_remove(&rx_ring->bufs_idr, buf_id);
3174d5c65159SKalle Valo 		spin_unlock_bh(&rx_ring->idr_lock);
3175d5c65159SKalle Valo 
3176d5c65159SKalle Valo 		rxcb = ATH11K_SKB_RXCB(msdu);
3177d5c65159SKalle Valo 		dma_unmap_single(ab->dev, rxcb->paddr,
3178d5c65159SKalle Valo 				 msdu->len + skb_tailroom(msdu),
3179d5c65159SKalle Valo 				 DMA_FROM_DEVICE);
3180d5c65159SKalle Valo 
3181d5c65159SKalle Valo 		num_buffs_reaped[mac_id]++;
3182d5c65159SKalle Valo 		total_num_buffs_reaped++;
3183d5c65159SKalle Valo 		budget--;
3184d5c65159SKalle Valo 
3185d5c65159SKalle Valo 		if (err_info.push_reason !=
3186d5c65159SKalle Valo 		    HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
3187d5c65159SKalle Valo 			dev_kfree_skb_any(msdu);
3188d5c65159SKalle Valo 			continue;
3189d5c65159SKalle Valo 		}
3190d5c65159SKalle Valo 
3191d5c65159SKalle Valo 		rxcb->err_rel_src = err_info.err_rel_src;
3192d5c65159SKalle Valo 		rxcb->err_code = err_info.err_code;
3193d5c65159SKalle Valo 		rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
3194d5c65159SKalle Valo 		__skb_queue_tail(&msdu_list[mac_id], msdu);
3195d5c65159SKalle Valo 	}
3196d5c65159SKalle Valo 
3197d5c65159SKalle Valo 	ath11k_hal_srng_access_end(ab, srng);
3198d5c65159SKalle Valo 
3199d5c65159SKalle Valo 	spin_unlock_bh(&srng->lock);
3200d5c65159SKalle Valo 
3201d5c65159SKalle Valo 	if (!total_num_buffs_reaped)
3202d5c65159SKalle Valo 		goto done;
3203d5c65159SKalle Valo 
3204d5c65159SKalle Valo 	for (i = 0; i <  ab->num_radios; i++) {
3205d5c65159SKalle Valo 		if (!num_buffs_reaped[i])
3206d5c65159SKalle Valo 			continue;
3207d5c65159SKalle Valo 
3208d5c65159SKalle Valo 		ar = ab->pdevs[i].ar;
3209d5c65159SKalle Valo 		rx_ring = &ar->dp.rx_refill_buf_ring;
3210d5c65159SKalle Valo 
3211d5c65159SKalle Valo 		ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
3212d5c65159SKalle Valo 					   HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
3213d5c65159SKalle Valo 	}
3214d5c65159SKalle Valo 
3215d5c65159SKalle Valo 	rcu_read_lock();
3216d5c65159SKalle Valo 	for (i = 0; i <  ab->num_radios; i++) {
3217d5c65159SKalle Valo 		if (!rcu_dereference(ab->pdevs_active[i])) {
3218d5c65159SKalle Valo 			__skb_queue_purge(&msdu_list[i]);
3219d5c65159SKalle Valo 			continue;
3220d5c65159SKalle Valo 		}
3221d5c65159SKalle Valo 
3222d5c65159SKalle Valo 		ar = ab->pdevs[i].ar;
3223d5c65159SKalle Valo 
3224d5c65159SKalle Valo 		if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
3225d5c65159SKalle Valo 			__skb_queue_purge(&msdu_list[i]);
3226d5c65159SKalle Valo 			continue;
3227d5c65159SKalle Valo 		}
3228d5c65159SKalle Valo 
3229d5c65159SKalle Valo 		while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL)
3230d5c65159SKalle Valo 			ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]);
3231d5c65159SKalle Valo 	}
3232d5c65159SKalle Valo 	rcu_read_unlock();
3233d5c65159SKalle Valo done:
3234d5c65159SKalle Valo 	return total_num_buffs_reaped;
3235d5c65159SKalle Valo }
3236d5c65159SKalle Valo 
3237d5c65159SKalle Valo int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget)
3238d5c65159SKalle Valo {
3239d5c65159SKalle Valo 	struct ath11k *ar = ab->pdevs[mac_id].ar;
3240d5c65159SKalle Valo 	struct dp_srng *err_ring = &ar->dp.rxdma_err_dst_ring;
3241d5c65159SKalle Valo 	struct dp_rxdma_ring *rx_ring = &ar->dp.rx_refill_buf_ring;
3242d5c65159SKalle Valo 	struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks;
3243d5c65159SKalle Valo 	struct hal_srng *srng;
3244293cb583SJohn Crispin 	u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
3245d5c65159SKalle Valo 	enum hal_rx_buf_return_buf_manager rbm;
3246d5c65159SKalle Valo 	enum hal_reo_entr_rxdma_ecode rxdma_err_code;
3247d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb;
3248d5c65159SKalle Valo 	struct sk_buff *skb;
3249d5c65159SKalle Valo 	struct hal_reo_entrance_ring *entr_ring;
3250d5c65159SKalle Valo 	void *desc;
3251d5c65159SKalle Valo 	int num_buf_freed = 0;
3252d5c65159SKalle Valo 	int quota = budget;
3253d5c65159SKalle Valo 	dma_addr_t paddr;
3254d5c65159SKalle Valo 	u32 desc_bank;
3255d5c65159SKalle Valo 	void *link_desc_va;
3256d5c65159SKalle Valo 	int num_msdus;
3257d5c65159SKalle Valo 	int i;
3258d5c65159SKalle Valo 	int buf_id;
3259d5c65159SKalle Valo 
3260d5c65159SKalle Valo 	srng = &ab->hal.srng_list[err_ring->ring_id];
3261d5c65159SKalle Valo 
3262d5c65159SKalle Valo 	spin_lock_bh(&srng->lock);
3263d5c65159SKalle Valo 
3264d5c65159SKalle Valo 	ath11k_hal_srng_access_begin(ab, srng);
3265d5c65159SKalle Valo 
3266d5c65159SKalle Valo 	while (quota-- &&
3267d5c65159SKalle Valo 	       (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
3268d5c65159SKalle Valo 		ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank);
3269d5c65159SKalle Valo 
3270d5c65159SKalle Valo 		entr_ring = (struct hal_reo_entrance_ring *)desc;
3271d5c65159SKalle Valo 		rxdma_err_code =
3272d5c65159SKalle Valo 			FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
3273d5c65159SKalle Valo 				  entr_ring->info1);
3274d5c65159SKalle Valo 		ab->soc_stats.rxdma_error[rxdma_err_code]++;
3275d5c65159SKalle Valo 
3276d5c65159SKalle Valo 		link_desc_va = link_desc_banks[desc_bank].vaddr +
3277d5c65159SKalle Valo 			       (paddr - link_desc_banks[desc_bank].paddr);
3278293cb583SJohn Crispin 		ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus,
3279293cb583SJohn Crispin 						 msdu_cookies, &rbm);
3280d5c65159SKalle Valo 
3281d5c65159SKalle Valo 		for (i = 0; i < num_msdus; i++) {
3282d5c65159SKalle Valo 			buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
3283293cb583SJohn Crispin 					   msdu_cookies[i]);
3284d5c65159SKalle Valo 
3285d5c65159SKalle Valo 			spin_lock_bh(&rx_ring->idr_lock);
3286d5c65159SKalle Valo 			skb = idr_find(&rx_ring->bufs_idr, buf_id);
3287d5c65159SKalle Valo 			if (!skb) {
3288d5c65159SKalle Valo 				ath11k_warn(ab, "rxdma error with invalid buf_id %d\n",
3289d5c65159SKalle Valo 					    buf_id);
3290d5c65159SKalle Valo 				spin_unlock_bh(&rx_ring->idr_lock);
3291d5c65159SKalle Valo 				continue;
3292d5c65159SKalle Valo 			}
3293d5c65159SKalle Valo 
3294d5c65159SKalle Valo 			idr_remove(&rx_ring->bufs_idr, buf_id);
3295d5c65159SKalle Valo 			spin_unlock_bh(&rx_ring->idr_lock);
3296d5c65159SKalle Valo 
3297d5c65159SKalle Valo 			rxcb = ATH11K_SKB_RXCB(skb);
3298d5c65159SKalle Valo 			dma_unmap_single(ab->dev, rxcb->paddr,
3299d5c65159SKalle Valo 					 skb->len + skb_tailroom(skb),
3300d5c65159SKalle Valo 					 DMA_FROM_DEVICE);
3301d5c65159SKalle Valo 			dev_kfree_skb_any(skb);
3302d5c65159SKalle Valo 
3303d5c65159SKalle Valo 			num_buf_freed++;
3304d5c65159SKalle Valo 		}
3305d5c65159SKalle Valo 
3306d5c65159SKalle Valo 		ath11k_dp_rx_link_desc_return(ab, desc,
3307d5c65159SKalle Valo 					      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3308d5c65159SKalle Valo 	}
3309d5c65159SKalle Valo 
3310d5c65159SKalle Valo 	ath11k_hal_srng_access_end(ab, srng);
3311d5c65159SKalle Valo 
3312d5c65159SKalle Valo 	spin_unlock_bh(&srng->lock);
3313d5c65159SKalle Valo 
3314d5c65159SKalle Valo 	if (num_buf_freed)
3315d5c65159SKalle Valo 		ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed,
3316d5c65159SKalle Valo 					   HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
3317d5c65159SKalle Valo 
3318d5c65159SKalle Valo 	return budget - quota;
3319d5c65159SKalle Valo }
3320d5c65159SKalle Valo 
3321d5c65159SKalle Valo void ath11k_dp_process_reo_status(struct ath11k_base *ab)
3322d5c65159SKalle Valo {
3323d5c65159SKalle Valo 	struct ath11k_dp *dp = &ab->dp;
3324d5c65159SKalle Valo 	struct hal_srng *srng;
3325d5c65159SKalle Valo 	struct dp_reo_cmd *cmd, *tmp;
3326d5c65159SKalle Valo 	bool found = false;
3327d5c65159SKalle Valo 	u32 *reo_desc;
3328d5c65159SKalle Valo 	u16 tag;
3329d5c65159SKalle Valo 	struct hal_reo_status reo_status;
3330d5c65159SKalle Valo 
3331d5c65159SKalle Valo 	srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id];
3332d5c65159SKalle Valo 
3333d5c65159SKalle Valo 	memset(&reo_status, 0, sizeof(reo_status));
3334d5c65159SKalle Valo 
3335d5c65159SKalle Valo 	spin_lock_bh(&srng->lock);
3336d5c65159SKalle Valo 
3337d5c65159SKalle Valo 	ath11k_hal_srng_access_begin(ab, srng);
3338d5c65159SKalle Valo 
3339d5c65159SKalle Valo 	while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
3340d5c65159SKalle Valo 		tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc);
3341d5c65159SKalle Valo 
3342d5c65159SKalle Valo 		switch (tag) {
3343d5c65159SKalle Valo 		case HAL_REO_GET_QUEUE_STATS_STATUS:
3344d5c65159SKalle Valo 			ath11k_hal_reo_status_queue_stats(ab, reo_desc,
3345d5c65159SKalle Valo 							  &reo_status);
3346d5c65159SKalle Valo 			break;
3347d5c65159SKalle Valo 		case HAL_REO_FLUSH_QUEUE_STATUS:
3348d5c65159SKalle Valo 			ath11k_hal_reo_flush_queue_status(ab, reo_desc,
3349d5c65159SKalle Valo 							  &reo_status);
3350d5c65159SKalle Valo 			break;
3351d5c65159SKalle Valo 		case HAL_REO_FLUSH_CACHE_STATUS:
3352d5c65159SKalle Valo 			ath11k_hal_reo_flush_cache_status(ab, reo_desc,
3353d5c65159SKalle Valo 							  &reo_status);
3354d5c65159SKalle Valo 			break;
3355d5c65159SKalle Valo 		case HAL_REO_UNBLOCK_CACHE_STATUS:
3356d5c65159SKalle Valo 			ath11k_hal_reo_unblk_cache_status(ab, reo_desc,
3357d5c65159SKalle Valo 							  &reo_status);
3358d5c65159SKalle Valo 			break;
3359d5c65159SKalle Valo 		case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
3360d5c65159SKalle Valo 			ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc,
3361d5c65159SKalle Valo 								 &reo_status);
3362d5c65159SKalle Valo 			break;
3363d5c65159SKalle Valo 		case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
3364d5c65159SKalle Valo 			ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc,
3365d5c65159SKalle Valo 								  &reo_status);
3366d5c65159SKalle Valo 			break;
3367d5c65159SKalle Valo 		case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
3368d5c65159SKalle Valo 			ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc,
3369d5c65159SKalle Valo 								  &reo_status);
3370d5c65159SKalle Valo 			break;
3371d5c65159SKalle Valo 		default:
3372d5c65159SKalle Valo 			ath11k_warn(ab, "Unknown reo status type %d\n", tag);
3373d5c65159SKalle Valo 			continue;
3374d5c65159SKalle Valo 		}
3375d5c65159SKalle Valo 
3376d5c65159SKalle Valo 		spin_lock_bh(&dp->reo_cmd_lock);
3377d5c65159SKalle Valo 		list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
3378d5c65159SKalle Valo 			if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
3379d5c65159SKalle Valo 				found = true;
3380d5c65159SKalle Valo 				list_del(&cmd->list);
3381d5c65159SKalle Valo 				break;
3382d5c65159SKalle Valo 			}
3383d5c65159SKalle Valo 		}
3384d5c65159SKalle Valo 		spin_unlock_bh(&dp->reo_cmd_lock);
3385d5c65159SKalle Valo 
3386d5c65159SKalle Valo 		if (found) {
3387d5c65159SKalle Valo 			cmd->handler(dp, (void *)&cmd->data,
3388d5c65159SKalle Valo 				     reo_status.uniform_hdr.cmd_status);
3389d5c65159SKalle Valo 			kfree(cmd);
3390d5c65159SKalle Valo 		}
3391d5c65159SKalle Valo 
3392d5c65159SKalle Valo 		found = false;
3393d5c65159SKalle Valo 	}
3394d5c65159SKalle Valo 
3395d5c65159SKalle Valo 	ath11k_hal_srng_access_end(ab, srng);
3396d5c65159SKalle Valo 
3397d5c65159SKalle Valo 	spin_unlock_bh(&srng->lock);
3398d5c65159SKalle Valo }
3399d5c65159SKalle Valo 
3400d5c65159SKalle Valo void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id)
3401d5c65159SKalle Valo {
3402d5c65159SKalle Valo 	struct ath11k *ar = ab->pdevs[mac_id].ar;
3403d5c65159SKalle Valo 
3404d5c65159SKalle Valo 	ath11k_dp_rx_pdev_srng_free(ar);
3405d5c65159SKalle Valo 	ath11k_dp_rxdma_pdev_buf_free(ar);
3406d5c65159SKalle Valo }
3407d5c65159SKalle Valo 
3408d5c65159SKalle Valo int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)
3409d5c65159SKalle Valo {
3410d5c65159SKalle Valo 	struct ath11k *ar = ab->pdevs[mac_id].ar;
3411d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
3412d5c65159SKalle Valo 	u32 ring_id;
3413d5c65159SKalle Valo 	int ret;
3414d5c65159SKalle Valo 
3415d5c65159SKalle Valo 	ret = ath11k_dp_rx_pdev_srng_alloc(ar);
3416d5c65159SKalle Valo 	if (ret) {
3417d5c65159SKalle Valo 		ath11k_warn(ab, "failed to setup rx srngs\n");
3418d5c65159SKalle Valo 		return ret;
3419d5c65159SKalle Valo 	}
3420d5c65159SKalle Valo 
3421d5c65159SKalle Valo 	ret = ath11k_dp_rxdma_pdev_buf_setup(ar);
3422d5c65159SKalle Valo 	if (ret) {
3423d5c65159SKalle Valo 		ath11k_warn(ab, "failed to setup rxdma ring\n");
3424d5c65159SKalle Valo 		return ret;
3425d5c65159SKalle Valo 	}
3426d5c65159SKalle Valo 
3427d5c65159SKalle Valo 	ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
3428d5c65159SKalle Valo 	ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF);
3429d5c65159SKalle Valo 	if (ret) {
3430d5c65159SKalle Valo 		ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n",
3431d5c65159SKalle Valo 			    ret);
3432d5c65159SKalle Valo 		return ret;
3433d5c65159SKalle Valo 	}
3434d5c65159SKalle Valo 
3435d5c65159SKalle Valo 	ring_id = dp->rxdma_err_dst_ring.ring_id;
3436d5c65159SKalle Valo 	ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_DST);
3437d5c65159SKalle Valo 	if (ret) {
3438d5c65159SKalle Valo 		ath11k_warn(ab, "failed to configure rxdma_err_dest_ring %d\n",
3439d5c65159SKalle Valo 			    ret);
3440d5c65159SKalle Valo 		return ret;
3441d5c65159SKalle Valo 	}
3442d5c65159SKalle Valo 
3443d5c65159SKalle Valo 	ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
3444d5c65159SKalle Valo 	ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
3445d5c65159SKalle Valo 					  mac_id, HAL_RXDMA_MONITOR_BUF);
3446d5c65159SKalle Valo 	if (ret) {
3447d5c65159SKalle Valo 		ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
3448d5c65159SKalle Valo 			    ret);
3449d5c65159SKalle Valo 		return ret;
3450d5c65159SKalle Valo 	}
3451d5c65159SKalle Valo 	ret = ath11k_dp_tx_htt_srng_setup(ab,
3452d5c65159SKalle Valo 					  dp->rxdma_mon_dst_ring.ring_id,
3453d5c65159SKalle Valo 					  mac_id, HAL_RXDMA_MONITOR_DST);
3454d5c65159SKalle Valo 	if (ret) {
3455d5c65159SKalle Valo 		ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
3456d5c65159SKalle Valo 			    ret);
3457d5c65159SKalle Valo 		return ret;
3458d5c65159SKalle Valo 	}
3459d5c65159SKalle Valo 	ret = ath11k_dp_tx_htt_srng_setup(ab,
3460d5c65159SKalle Valo 					  dp->rxdma_mon_desc_ring.ring_id,
3461d5c65159SKalle Valo 					  mac_id, HAL_RXDMA_MONITOR_DESC);
3462d5c65159SKalle Valo 	if (ret) {
3463d5c65159SKalle Valo 		ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
3464d5c65159SKalle Valo 			    ret);
3465d5c65159SKalle Valo 		return ret;
3466d5c65159SKalle Valo 	}
3467d5c65159SKalle Valo 	ring_id = dp->rx_mon_status_refill_ring.refill_buf_ring.ring_id;
3468d5c65159SKalle Valo 	ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id,
3469d5c65159SKalle Valo 					  HAL_RXDMA_MONITOR_STATUS);
3470d5c65159SKalle Valo 	if (ret) {
3471d5c65159SKalle Valo 		ath11k_warn(ab,
3472d5c65159SKalle Valo 			    "failed to configure mon_status_refill_ring %d\n",
3473d5c65159SKalle Valo 			    ret);
3474d5c65159SKalle Valo 		return ret;
3475d5c65159SKalle Valo 	}
3476d5c65159SKalle Valo 	return 0;
3477d5c65159SKalle Valo }
3478d5c65159SKalle Valo 
3479d5c65159SKalle Valo static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len)
3480d5c65159SKalle Valo {
3481d5c65159SKalle Valo 	if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) {
3482d5c65159SKalle Valo 		*frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc);
3483d5c65159SKalle Valo 		*total_len -= *frag_len;
3484d5c65159SKalle Valo 	} else {
3485d5c65159SKalle Valo 		*frag_len = *total_len;
3486d5c65159SKalle Valo 		*total_len = 0;
3487d5c65159SKalle Valo 	}
3488d5c65159SKalle Valo }
3489d5c65159SKalle Valo 
3490d5c65159SKalle Valo static
3491d5c65159SKalle Valo int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar,
3492d5c65159SKalle Valo 					  void *p_last_buf_addr_info,
3493d5c65159SKalle Valo 					  u8 mac_id)
3494d5c65159SKalle Valo {
3495d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
3496d5c65159SKalle Valo 	struct dp_srng *dp_srng;
3497d5c65159SKalle Valo 	void *hal_srng;
3498d5c65159SKalle Valo 	void *src_srng_desc;
3499d5c65159SKalle Valo 	int ret = 0;
3500d5c65159SKalle Valo 
3501d5c65159SKalle Valo 	dp_srng = &dp->rxdma_mon_desc_ring;
3502d5c65159SKalle Valo 	hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
3503d5c65159SKalle Valo 
3504d5c65159SKalle Valo 	ath11k_hal_srng_access_begin(ar->ab, hal_srng);
3505d5c65159SKalle Valo 
3506d5c65159SKalle Valo 	src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng);
3507d5c65159SKalle Valo 
3508d5c65159SKalle Valo 	if (src_srng_desc) {
3509d5c65159SKalle Valo 		struct ath11k_buffer_addr *src_desc =
3510d5c65159SKalle Valo 				(struct ath11k_buffer_addr *)src_srng_desc;
3511d5c65159SKalle Valo 
3512d5c65159SKalle Valo 		*src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info);
3513d5c65159SKalle Valo 	} else {
3514d5c65159SKalle Valo 		ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
3515d5c65159SKalle Valo 			   "Monitor Link Desc Ring %d Full", mac_id);
3516d5c65159SKalle Valo 		ret = -ENOMEM;
3517d5c65159SKalle Valo 	}
3518d5c65159SKalle Valo 
3519d5c65159SKalle Valo 	ath11k_hal_srng_access_end(ar->ab, hal_srng);
3520d5c65159SKalle Valo 	return ret;
3521d5c65159SKalle Valo }
3522d5c65159SKalle Valo 
3523d5c65159SKalle Valo static
3524d5c65159SKalle Valo void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc,
3525d5c65159SKalle Valo 					 dma_addr_t *paddr, u32 *sw_cookie,
3526d5c65159SKalle Valo 					 void **pp_buf_addr_info)
3527d5c65159SKalle Valo {
3528d5c65159SKalle Valo 	struct hal_rx_msdu_link *msdu_link =
3529d5c65159SKalle Valo 			(struct hal_rx_msdu_link *)rx_msdu_link_desc;
3530d5c65159SKalle Valo 	struct ath11k_buffer_addr *buf_addr_info;
3531d5c65159SKalle Valo 	u8 rbm = 0;
3532d5c65159SKalle Valo 
3533d5c65159SKalle Valo 	buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info;
3534d5c65159SKalle Valo 
3535d5c65159SKalle Valo 	ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, &rbm);
3536d5c65159SKalle Valo 
3537d5c65159SKalle Valo 	*pp_buf_addr_info = (void *)buf_addr_info;
3538d5c65159SKalle Valo }
3539d5c65159SKalle Valo 
3540d5c65159SKalle Valo static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len)
3541d5c65159SKalle Valo {
3542d5c65159SKalle Valo 	if (skb->len > len) {
3543d5c65159SKalle Valo 		skb_trim(skb, len);
3544d5c65159SKalle Valo 	} else {
3545d5c65159SKalle Valo 		if (skb_tailroom(skb) < len - skb->len) {
3546d5c65159SKalle Valo 			if ((pskb_expand_head(skb, 0,
3547d5c65159SKalle Valo 					      len - skb->len - skb_tailroom(skb),
3548d5c65159SKalle Valo 					      GFP_ATOMIC))) {
3549d5c65159SKalle Valo 				dev_kfree_skb_any(skb);
3550d5c65159SKalle Valo 				return -ENOMEM;
3551d5c65159SKalle Valo 			}
3552d5c65159SKalle Valo 		}
3553d5c65159SKalle Valo 		skb_put(skb, (len - skb->len));
3554d5c65159SKalle Valo 	}
3555d5c65159SKalle Valo 	return 0;
3556d5c65159SKalle Valo }
3557d5c65159SKalle Valo 
3558d5c65159SKalle Valo static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar,
3559d5c65159SKalle Valo 					void *msdu_link_desc,
3560d5c65159SKalle Valo 					struct hal_rx_msdu_list *msdu_list,
3561d5c65159SKalle Valo 					u16 *num_msdus)
3562d5c65159SKalle Valo {
3563d5c65159SKalle Valo 	struct hal_rx_msdu_details *msdu_details = NULL;
3564d5c65159SKalle Valo 	struct rx_msdu_desc *msdu_desc_info = NULL;
3565d5c65159SKalle Valo 	struct hal_rx_msdu_link *msdu_link = NULL;
3566d5c65159SKalle Valo 	int i;
3567d5c65159SKalle Valo 	u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1);
3568d5c65159SKalle Valo 	u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1);
3569d5c65159SKalle Valo 	u8  tmp  = 0;
3570d5c65159SKalle Valo 
3571d5c65159SKalle Valo 	msdu_link = (struct hal_rx_msdu_link *)msdu_link_desc;
3572d5c65159SKalle Valo 	msdu_details = &msdu_link->msdu_link[0];
3573d5c65159SKalle Valo 
3574d5c65159SKalle Valo 	for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) {
3575d5c65159SKalle Valo 		if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
3576d5c65159SKalle Valo 			      msdu_details[i].buf_addr_info.info0) == 0) {
3577d5c65159SKalle Valo 			msdu_desc_info = &msdu_details[i - 1].rx_msdu_info;
3578d5c65159SKalle Valo 			msdu_desc_info->info0 |= last;
3579d5c65159SKalle Valo 			;
3580d5c65159SKalle Valo 			break;
3581d5c65159SKalle Valo 		}
3582d5c65159SKalle Valo 		msdu_desc_info = &msdu_details[i].rx_msdu_info;
3583d5c65159SKalle Valo 
3584d5c65159SKalle Valo 		if (!i)
3585d5c65159SKalle Valo 			msdu_desc_info->info0 |= first;
3586d5c65159SKalle Valo 		else if (i == (HAL_RX_NUM_MSDU_DESC - 1))
3587d5c65159SKalle Valo 			msdu_desc_info->info0 |= last;
3588d5c65159SKalle Valo 		msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0;
3589d5c65159SKalle Valo 		msdu_list->msdu_info[i].msdu_len =
3590d5c65159SKalle Valo 			 HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0);
3591d5c65159SKalle Valo 		msdu_list->sw_cookie[i] =
3592d5c65159SKalle Valo 			FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
3593d5c65159SKalle Valo 				  msdu_details[i].buf_addr_info.info1);
3594d5c65159SKalle Valo 		tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
3595d5c65159SKalle Valo 				msdu_details[i].buf_addr_info.info1);
3596d5c65159SKalle Valo 		msdu_list->rbm[i] = tmp;
3597d5c65159SKalle Valo 	}
3598d5c65159SKalle Valo 	*num_msdus = i;
3599d5c65159SKalle Valo }
3600d5c65159SKalle Valo 
3601d5c65159SKalle Valo static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id,
3602d5c65159SKalle Valo 					u32 *rx_bufs_used)
3603d5c65159SKalle Valo {
3604d5c65159SKalle Valo 	u32 ret = 0;
3605d5c65159SKalle Valo 
3606d5c65159SKalle Valo 	if ((*ppdu_id < msdu_ppdu_id) &&
3607d5c65159SKalle Valo 	    ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) {
3608d5c65159SKalle Valo 		*ppdu_id = msdu_ppdu_id;
3609d5c65159SKalle Valo 		ret = msdu_ppdu_id;
3610d5c65159SKalle Valo 	} else if ((*ppdu_id > msdu_ppdu_id) &&
3611d5c65159SKalle Valo 		((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) {
3612d5c65159SKalle Valo 		/* mon_dst is behind than mon_status
3613d5c65159SKalle Valo 		 * skip dst_ring and free it
3614d5c65159SKalle Valo 		 */
3615d5c65159SKalle Valo 		*rx_bufs_used += 1;
3616d5c65159SKalle Valo 		*ppdu_id = msdu_ppdu_id;
3617d5c65159SKalle Valo 		ret = msdu_ppdu_id;
3618d5c65159SKalle Valo 	}
3619d5c65159SKalle Valo 	return ret;
3620d5c65159SKalle Valo }
3621d5c65159SKalle Valo 
3622d5c65159SKalle Valo static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info,
3623d5c65159SKalle Valo 				      bool *is_frag, u32 *total_len,
3624d5c65159SKalle Valo 				      u32 *frag_len, u32 *msdu_cnt)
3625d5c65159SKalle Valo {
3626d5c65159SKalle Valo 	if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) {
3627d5c65159SKalle Valo 		if (!*is_frag) {
3628d5c65159SKalle Valo 			*total_len = info->msdu_len;
3629d5c65159SKalle Valo 			*is_frag = true;
3630d5c65159SKalle Valo 		}
3631d5c65159SKalle Valo 		ath11k_dp_mon_set_frag_len(total_len,
3632d5c65159SKalle Valo 					   frag_len);
3633d5c65159SKalle Valo 	} else {
3634d5c65159SKalle Valo 		if (*is_frag) {
3635d5c65159SKalle Valo 			ath11k_dp_mon_set_frag_len(total_len,
3636d5c65159SKalle Valo 						   frag_len);
3637d5c65159SKalle Valo 		} else {
3638d5c65159SKalle Valo 			*frag_len = info->msdu_len;
3639d5c65159SKalle Valo 		}
3640d5c65159SKalle Valo 		*is_frag = false;
3641d5c65159SKalle Valo 		*msdu_cnt -= 1;
3642d5c65159SKalle Valo 	}
3643d5c65159SKalle Valo }
3644d5c65159SKalle Valo 
3645d5c65159SKalle Valo static u32
3646d5c65159SKalle Valo ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar,
3647d5c65159SKalle Valo 			  void *ring_entry, struct sk_buff **head_msdu,
3648d5c65159SKalle Valo 			  struct sk_buff **tail_msdu, u32 *npackets,
3649d5c65159SKalle Valo 			  u32 *ppdu_id)
3650d5c65159SKalle Valo {
3651d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
3652d5c65159SKalle Valo 	struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
3653d5c65159SKalle Valo 	struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;
3654d5c65159SKalle Valo 	struct sk_buff *msdu = NULL, *last = NULL;
3655d5c65159SKalle Valo 	struct hal_rx_msdu_list msdu_list;
3656d5c65159SKalle Valo 	void *p_buf_addr_info, *p_last_buf_addr_info;
3657d5c65159SKalle Valo 	struct hal_rx_desc *rx_desc;
3658d5c65159SKalle Valo 	void *rx_msdu_link_desc;
3659d5c65159SKalle Valo 	dma_addr_t paddr;
3660d5c65159SKalle Valo 	u16 num_msdus = 0;
3661d5c65159SKalle Valo 	u32 rx_buf_size, rx_pkt_offset, sw_cookie;
3662d5c65159SKalle Valo 	u32 rx_bufs_used = 0, i = 0;
3663d5c65159SKalle Valo 	u32 msdu_ppdu_id = 0, msdu_cnt = 0;
3664d5c65159SKalle Valo 	u32 total_len = 0, frag_len = 0;
3665d5c65159SKalle Valo 	bool is_frag, is_first_msdu;
3666d5c65159SKalle Valo 	bool drop_mpdu = false;
3667d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb;
3668d5c65159SKalle Valo 	struct hal_reo_entrance_ring *ent_desc =
3669d5c65159SKalle Valo 			(struct hal_reo_entrance_ring *)ring_entry;
3670d5c65159SKalle Valo 	int buf_id;
3671d5c65159SKalle Valo 
3672d5c65159SKalle Valo 	ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr,
3673d5c65159SKalle Valo 					    &sw_cookie, &p_last_buf_addr_info,
3674d5c65159SKalle Valo 					    &msdu_cnt);
3675d5c65159SKalle Valo 
3676d5c65159SKalle Valo 	if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON,
3677d5c65159SKalle Valo 		      ent_desc->info1) ==
3678d5c65159SKalle Valo 		      HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
3679d5c65159SKalle Valo 		u8 rxdma_err =
3680d5c65159SKalle Valo 			FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
3681d5c65159SKalle Valo 				  ent_desc->info1);
3682d5c65159SKalle Valo 		if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
3683d5c65159SKalle Valo 		    rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
3684d5c65159SKalle Valo 		    rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
3685d5c65159SKalle Valo 			drop_mpdu = true;
3686d5c65159SKalle Valo 			pmon->rx_mon_stats.dest_mpdu_drop++;
3687d5c65159SKalle Valo 		}
3688d5c65159SKalle Valo 	}
3689d5c65159SKalle Valo 
3690d5c65159SKalle Valo 	is_frag = false;
3691d5c65159SKalle Valo 	is_first_msdu = true;
3692d5c65159SKalle Valo 
3693d5c65159SKalle Valo 	do {
3694d5c65159SKalle Valo 		if (pmon->mon_last_linkdesc_paddr == paddr) {
3695d5c65159SKalle Valo 			pmon->rx_mon_stats.dup_mon_linkdesc_cnt++;
3696d5c65159SKalle Valo 			return rx_bufs_used;
3697d5c65159SKalle Valo 		}
3698d5c65159SKalle Valo 
3699d5c65159SKalle Valo 		rx_msdu_link_desc =
3700d5c65159SKalle Valo 			(void *)pmon->link_desc_banks[sw_cookie].vaddr +
3701d5c65159SKalle Valo 			(paddr - pmon->link_desc_banks[sw_cookie].paddr);
3702d5c65159SKalle Valo 
3703d5c65159SKalle Valo 		ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
3704d5c65159SKalle Valo 					    &num_msdus);
3705d5c65159SKalle Valo 
3706d5c65159SKalle Valo 		for (i = 0; i < num_msdus; i++) {
3707d5c65159SKalle Valo 			u32 l2_hdr_offset;
3708d5c65159SKalle Valo 
3709d5c65159SKalle Valo 			if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) {
3710d5c65159SKalle Valo 				ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
3711d5c65159SKalle Valo 					   "i %d last_cookie %d is same\n",
3712d5c65159SKalle Valo 					   i, pmon->mon_last_buf_cookie);
3713d5c65159SKalle Valo 				drop_mpdu = true;
3714d5c65159SKalle Valo 				pmon->rx_mon_stats.dup_mon_buf_cnt++;
3715d5c65159SKalle Valo 				continue;
3716d5c65159SKalle Valo 			}
3717d5c65159SKalle Valo 			buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
3718d5c65159SKalle Valo 					   msdu_list.sw_cookie[i]);
3719d5c65159SKalle Valo 
3720d5c65159SKalle Valo 			spin_lock_bh(&rx_ring->idr_lock);
3721d5c65159SKalle Valo 			msdu = idr_find(&rx_ring->bufs_idr, buf_id);
3722d5c65159SKalle Valo 			spin_unlock_bh(&rx_ring->idr_lock);
3723d5c65159SKalle Valo 			if (!msdu) {
3724d5c65159SKalle Valo 				ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
3725d5c65159SKalle Valo 					   "msdu_pop: invalid buf_id %d\n", buf_id);
3726d5c65159SKalle Valo 				break;
3727d5c65159SKalle Valo 			}
3728d5c65159SKalle Valo 			rxcb = ATH11K_SKB_RXCB(msdu);
3729d5c65159SKalle Valo 			if (!rxcb->unmapped) {
3730d5c65159SKalle Valo 				dma_unmap_single(ar->ab->dev, rxcb->paddr,
3731d5c65159SKalle Valo 						 msdu->len +
3732d5c65159SKalle Valo 						 skb_tailroom(msdu),
3733d5c65159SKalle Valo 						 DMA_FROM_DEVICE);
3734d5c65159SKalle Valo 				rxcb->unmapped = 1;
3735d5c65159SKalle Valo 			}
3736d5c65159SKalle Valo 			if (drop_mpdu) {
3737d5c65159SKalle Valo 				ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
3738d5c65159SKalle Valo 					   "i %d drop msdu %p *ppdu_id %x\n",
3739d5c65159SKalle Valo 					   i, msdu, *ppdu_id);
3740d5c65159SKalle Valo 				dev_kfree_skb_any(msdu);
3741d5c65159SKalle Valo 				msdu = NULL;
3742d5c65159SKalle Valo 				goto next_msdu;
3743d5c65159SKalle Valo 			}
3744d5c65159SKalle Valo 
3745d5c65159SKalle Valo 			rx_desc = (struct hal_rx_desc *)msdu->data;
3746d5c65159SKalle Valo 
3747d5c65159SKalle Valo 			rx_pkt_offset = sizeof(struct hal_rx_desc);
3748d5c65159SKalle Valo 			l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(rx_desc);
3749d5c65159SKalle Valo 
3750d5c65159SKalle Valo 			if (is_first_msdu) {
3751d5c65159SKalle Valo 				if (!ath11k_dp_rxdesc_mpdu_valid(rx_desc)) {
3752d5c65159SKalle Valo 					drop_mpdu = true;
3753d5c65159SKalle Valo 					dev_kfree_skb_any(msdu);
3754d5c65159SKalle Valo 					msdu = NULL;
3755d5c65159SKalle Valo 					pmon->mon_last_linkdesc_paddr = paddr;
3756d5c65159SKalle Valo 					goto next_msdu;
3757d5c65159SKalle Valo 				}
3758d5c65159SKalle Valo 
3759d5c65159SKalle Valo 				msdu_ppdu_id =
3760d5c65159SKalle Valo 					ath11k_dp_rxdesc_get_ppduid(rx_desc);
3761d5c65159SKalle Valo 
3762d5c65159SKalle Valo 				if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id,
3763d5c65159SKalle Valo 								 ppdu_id,
37645e02bc73SMiles Hu 								 &rx_bufs_used)) {
37655e02bc73SMiles Hu 					if (rx_bufs_used) {
37665e02bc73SMiles Hu 						drop_mpdu = true;
37675e02bc73SMiles Hu 						dev_kfree_skb_any(msdu);
37685e02bc73SMiles Hu 						msdu = NULL;
37695e02bc73SMiles Hu 						goto next_msdu;
37705e02bc73SMiles Hu 					}
3771d5c65159SKalle Valo 					return rx_bufs_used;
37725e02bc73SMiles Hu 				}
3773d5c65159SKalle Valo 				pmon->mon_last_linkdesc_paddr = paddr;
3774d5c65159SKalle Valo 				is_first_msdu = false;
3775d5c65159SKalle Valo 			}
3776d5c65159SKalle Valo 			ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
3777d5c65159SKalle Valo 						  &is_frag, &total_len,
3778d5c65159SKalle Valo 						  &frag_len, &msdu_cnt);
3779d5c65159SKalle Valo 			rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
3780d5c65159SKalle Valo 
3781d5c65159SKalle Valo 			ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);
3782d5c65159SKalle Valo 
3783d5c65159SKalle Valo 			if (!(*head_msdu))
3784d5c65159SKalle Valo 				*head_msdu = msdu;
3785d5c65159SKalle Valo 			else if (last)
3786d5c65159SKalle Valo 				last->next = msdu;
3787d5c65159SKalle Valo 
3788d5c65159SKalle Valo 			last = msdu;
3789d5c65159SKalle Valo next_msdu:
3790d5c65159SKalle Valo 			pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i];
3791d5c65159SKalle Valo 			rx_bufs_used++;
3792d5c65159SKalle Valo 			spin_lock_bh(&rx_ring->idr_lock);
3793d5c65159SKalle Valo 			idr_remove(&rx_ring->bufs_idr, buf_id);
3794d5c65159SKalle Valo 			spin_unlock_bh(&rx_ring->idr_lock);
3795d5c65159SKalle Valo 		}
3796d5c65159SKalle Valo 
3797d5c65159SKalle Valo 		ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr,
3798d5c65159SKalle Valo 						    &sw_cookie,
3799d5c65159SKalle Valo 						    &p_buf_addr_info);
3800d5c65159SKalle Valo 
3801d5c65159SKalle Valo 		if (ath11k_dp_rx_monitor_link_desc_return(ar,
3802d5c65159SKalle Valo 							  p_last_buf_addr_info,
3803d5c65159SKalle Valo 							  dp->mac_id))
3804d5c65159SKalle Valo 			ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
3805d5c65159SKalle Valo 				   "dp_rx_monitor_link_desc_return failed");
3806d5c65159SKalle Valo 
3807d5c65159SKalle Valo 		p_last_buf_addr_info = p_buf_addr_info;
3808d5c65159SKalle Valo 
3809d5c65159SKalle Valo 	} while (paddr && msdu_cnt);
3810d5c65159SKalle Valo 
3811d5c65159SKalle Valo 	if (last)
3812d5c65159SKalle Valo 		last->next = NULL;
3813d5c65159SKalle Valo 
3814d5c65159SKalle Valo 	*tail_msdu = msdu;
3815d5c65159SKalle Valo 
3816d5c65159SKalle Valo 	if (msdu_cnt == 0)
3817d5c65159SKalle Valo 		*npackets = 1;
3818d5c65159SKalle Valo 
3819d5c65159SKalle Valo 	return rx_bufs_used;
3820d5c65159SKalle Valo }
3821d5c65159SKalle Valo 
3822d5c65159SKalle Valo static void ath11k_dp_rx_msdus_set_payload(struct sk_buff *msdu)
3823d5c65159SKalle Valo {
3824d5c65159SKalle Valo 	u32 rx_pkt_offset, l2_hdr_offset;
3825d5c65159SKalle Valo 
3826d5c65159SKalle Valo 	rx_pkt_offset = sizeof(struct hal_rx_desc);
3827d5c65159SKalle Valo 	l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad((struct hal_rx_desc *)msdu->data);
3828d5c65159SKalle Valo 	skb_pull(msdu, rx_pkt_offset + l2_hdr_offset);
3829d5c65159SKalle Valo }
3830d5c65159SKalle Valo 
3831d5c65159SKalle Valo static struct sk_buff *
3832d5c65159SKalle Valo ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
3833d5c65159SKalle Valo 			    u32 mac_id, struct sk_buff *head_msdu,
3834d5c65159SKalle Valo 			    struct sk_buff *last_msdu,
3835d5c65159SKalle Valo 			    struct ieee80211_rx_status *rxs)
3836d5c65159SKalle Valo {
3837d5c65159SKalle Valo 	struct sk_buff *msdu, *mpdu_buf, *prev_buf;
3838d5c65159SKalle Valo 	u32 decap_format, wifi_hdr_len;
3839d5c65159SKalle Valo 	struct hal_rx_desc *rx_desc;
3840d5c65159SKalle Valo 	char *hdr_desc;
3841d5c65159SKalle Valo 	u8 *dest;
3842d5c65159SKalle Valo 	struct ieee80211_hdr_3addr *wh;
3843d5c65159SKalle Valo 
3844d5c65159SKalle Valo 	mpdu_buf = NULL;
3845d5c65159SKalle Valo 
3846d5c65159SKalle Valo 	if (!head_msdu)
3847d5c65159SKalle Valo 		goto err_merge_fail;
3848d5c65159SKalle Valo 
3849d5c65159SKalle Valo 	rx_desc = (struct hal_rx_desc *)head_msdu->data;
3850d5c65159SKalle Valo 
3851d5c65159SKalle Valo 	if (ath11k_dp_rxdesc_get_mpdulen_err(rx_desc))
3852d5c65159SKalle Valo 		return NULL;
3853d5c65159SKalle Valo 
3854d5c65159SKalle Valo 	decap_format = ath11k_dp_rxdesc_get_decap_format(rx_desc);
3855d5c65159SKalle Valo 
3856d5c65159SKalle Valo 	ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
3857d5c65159SKalle Valo 
3858d5c65159SKalle Valo 	if (decap_format == DP_RX_DECAP_TYPE_RAW) {
3859d5c65159SKalle Valo 		ath11k_dp_rx_msdus_set_payload(head_msdu);
3860d5c65159SKalle Valo 
3861d5c65159SKalle Valo 		prev_buf = head_msdu;
3862d5c65159SKalle Valo 		msdu = head_msdu->next;
3863d5c65159SKalle Valo 
3864d5c65159SKalle Valo 		while (msdu) {
3865d5c65159SKalle Valo 			ath11k_dp_rx_msdus_set_payload(msdu);
3866d5c65159SKalle Valo 
3867d5c65159SKalle Valo 			prev_buf = msdu;
3868d5c65159SKalle Valo 			msdu = msdu->next;
3869d5c65159SKalle Valo 		}
3870d5c65159SKalle Valo 
3871d5c65159SKalle Valo 		prev_buf->next = NULL;
3872d5c65159SKalle Valo 
3873d5c65159SKalle Valo 		skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
3874d5c65159SKalle Valo 	} else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) {
3875d5c65159SKalle Valo 		__le16 qos_field;
3876d5c65159SKalle Valo 		u8 qos_pkt = 0;
3877d5c65159SKalle Valo 
3878d5c65159SKalle Valo 		rx_desc = (struct hal_rx_desc *)head_msdu->data;
3879d5c65159SKalle Valo 		hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc);
3880d5c65159SKalle Valo 
3881d5c65159SKalle Valo 		/* Base size */
3882d5c65159SKalle Valo 		wifi_hdr_len = sizeof(struct ieee80211_hdr_3addr);
3883d5c65159SKalle Valo 		wh = (struct ieee80211_hdr_3addr *)hdr_desc;
3884d5c65159SKalle Valo 
3885d5c65159SKalle Valo 		if (ieee80211_is_data_qos(wh->frame_control)) {
3886d5c65159SKalle Valo 			struct ieee80211_qos_hdr *qwh =
3887d5c65159SKalle Valo 					(struct ieee80211_qos_hdr *)hdr_desc;
3888d5c65159SKalle Valo 
3889d5c65159SKalle Valo 			qos_field = qwh->qos_ctrl;
3890d5c65159SKalle Valo 			qos_pkt = 1;
3891d5c65159SKalle Valo 		}
3892d5c65159SKalle Valo 		msdu = head_msdu;
3893d5c65159SKalle Valo 
3894d5c65159SKalle Valo 		while (msdu) {
3895d5c65159SKalle Valo 			rx_desc = (struct hal_rx_desc *)msdu->data;
3896d5c65159SKalle Valo 			hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc);
3897d5c65159SKalle Valo 
3898d5c65159SKalle Valo 			if (qos_pkt) {
3899d5c65159SKalle Valo 				dest = skb_push(msdu, sizeof(__le16));
3900d5c65159SKalle Valo 				if (!dest)
3901d5c65159SKalle Valo 					goto err_merge_fail;
3902d5c65159SKalle Valo 				memcpy(dest, hdr_desc, wifi_hdr_len);
3903d5c65159SKalle Valo 				memcpy(dest + wifi_hdr_len,
3904d5c65159SKalle Valo 				       (u8 *)&qos_field, sizeof(__le16));
3905d5c65159SKalle Valo 			}
3906d5c65159SKalle Valo 			ath11k_dp_rx_msdus_set_payload(msdu);
3907d5c65159SKalle Valo 			prev_buf = msdu;
3908d5c65159SKalle Valo 			msdu = msdu->next;
3909d5c65159SKalle Valo 		}
3910d5c65159SKalle Valo 		dest = skb_put(prev_buf, HAL_RX_FCS_LEN);
3911d5c65159SKalle Valo 		if (!dest)
3912d5c65159SKalle Valo 			goto err_merge_fail;
3913d5c65159SKalle Valo 
3914d5c65159SKalle Valo 		ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
3915d5c65159SKalle Valo 			   "mpdu_buf %pK mpdu_buf->len %u",
3916d5c65159SKalle Valo 			   prev_buf, prev_buf->len);
3917d5c65159SKalle Valo 	} else {
3918d5c65159SKalle Valo 		ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
3919d5c65159SKalle Valo 			   "decap format %d is not supported!\n",
3920d5c65159SKalle Valo 			   decap_format);
3921d5c65159SKalle Valo 		goto err_merge_fail;
3922d5c65159SKalle Valo 	}
3923d5c65159SKalle Valo 
3924d5c65159SKalle Valo 	return head_msdu;
3925d5c65159SKalle Valo 
3926d5c65159SKalle Valo err_merge_fail:
3927d5c65159SKalle Valo 	if (mpdu_buf && decap_format != DP_RX_DECAP_TYPE_RAW) {
3928d5c65159SKalle Valo 		ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
3929d5c65159SKalle Valo 			   "err_merge_fail mpdu_buf %pK", mpdu_buf);
3930d5c65159SKalle Valo 		/* Free the head buffer */
3931d5c65159SKalle Valo 		dev_kfree_skb_any(mpdu_buf);
3932d5c65159SKalle Valo 	}
3933d5c65159SKalle Valo 	return NULL;
3934d5c65159SKalle Valo }
3935d5c65159SKalle Valo 
3936d5c65159SKalle Valo static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
3937d5c65159SKalle Valo 				    struct sk_buff *head_msdu,
3938d5c65159SKalle Valo 				    struct sk_buff *tail_msdu,
3939d5c65159SKalle Valo 				    struct napi_struct *napi)
3940d5c65159SKalle Valo {
3941d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
3942d5c65159SKalle Valo 	struct sk_buff *mon_skb, *skb_next, *header;
3943d5c65159SKalle Valo 	struct ieee80211_rx_status *rxs = &dp->rx_status, *status;
3944d5c65159SKalle Valo 
3945d5c65159SKalle Valo 	mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu,
3946d5c65159SKalle Valo 					      tail_msdu, rxs);
3947d5c65159SKalle Valo 
3948d5c65159SKalle Valo 	if (!mon_skb)
3949d5c65159SKalle Valo 		goto mon_deliver_fail;
3950d5c65159SKalle Valo 
3951d5c65159SKalle Valo 	header = mon_skb;
3952d5c65159SKalle Valo 
3953d5c65159SKalle Valo 	rxs->flag = 0;
3954d5c65159SKalle Valo 	do {
3955d5c65159SKalle Valo 		skb_next = mon_skb->next;
3956d5c65159SKalle Valo 		if (!skb_next)
3957d5c65159SKalle Valo 			rxs->flag &= ~RX_FLAG_AMSDU_MORE;
3958d5c65159SKalle Valo 		else
3959d5c65159SKalle Valo 			rxs->flag |= RX_FLAG_AMSDU_MORE;
3960d5c65159SKalle Valo 
3961d5c65159SKalle Valo 		if (mon_skb == header) {
3962d5c65159SKalle Valo 			header = NULL;
3963d5c65159SKalle Valo 			rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN;
3964d5c65159SKalle Valo 		} else {
3965d5c65159SKalle Valo 			rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
3966d5c65159SKalle Valo 		}
3967d5c65159SKalle Valo 		rxs->flag |= RX_FLAG_ONLY_MONITOR;
3968d5c65159SKalle Valo 
3969d5c65159SKalle Valo 		status = IEEE80211_SKB_RXCB(mon_skb);
3970d5c65159SKalle Valo 		*status = *rxs;
3971d5c65159SKalle Valo 
3972d5c65159SKalle Valo 		ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb);
3973d5c65159SKalle Valo 		mon_skb = skb_next;
39745e02bc73SMiles Hu 	} while (mon_skb);
3975d5c65159SKalle Valo 	rxs->flag = 0;
3976d5c65159SKalle Valo 
3977d5c65159SKalle Valo 	return 0;
3978d5c65159SKalle Valo 
3979d5c65159SKalle Valo mon_deliver_fail:
3980d5c65159SKalle Valo 	mon_skb = head_msdu;
3981d5c65159SKalle Valo 	while (mon_skb) {
3982d5c65159SKalle Valo 		skb_next = mon_skb->next;
3983d5c65159SKalle Valo 		dev_kfree_skb_any(mon_skb);
3984d5c65159SKalle Valo 		mon_skb = skb_next;
3985d5c65159SKalle Valo 	}
3986d5c65159SKalle Valo 	return -EINVAL;
3987d5c65159SKalle Valo }
3988d5c65159SKalle Valo 
3989d5c65159SKalle Valo static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, u32 quota,
3990d5c65159SKalle Valo 					  struct napi_struct *napi)
3991d5c65159SKalle Valo {
3992d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
3993d5c65159SKalle Valo 	struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
3994d5c65159SKalle Valo 	void *ring_entry;
3995d5c65159SKalle Valo 	void *mon_dst_srng;
3996d5c65159SKalle Valo 	u32 ppdu_id;
3997d5c65159SKalle Valo 	u32 rx_bufs_used;
3998d5c65159SKalle Valo 	struct ath11k_pdev_mon_stats *rx_mon_stats;
3999d5c65159SKalle Valo 	u32	 npackets = 0;
4000d5c65159SKalle Valo 
4001d5c65159SKalle Valo 	mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
4002d5c65159SKalle Valo 
4003d5c65159SKalle Valo 	if (!mon_dst_srng) {
4004d5c65159SKalle Valo 		ath11k_warn(ar->ab,
4005d5c65159SKalle Valo 			    "HAL Monitor Destination Ring Init Failed -- %pK",
4006d5c65159SKalle Valo 			    mon_dst_srng);
4007d5c65159SKalle Valo 		return;
4008d5c65159SKalle Valo 	}
4009d5c65159SKalle Valo 
4010d5c65159SKalle Valo 	spin_lock_bh(&pmon->mon_lock);
4011d5c65159SKalle Valo 
4012d5c65159SKalle Valo 	ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
4013d5c65159SKalle Valo 
4014d5c65159SKalle Valo 	ppdu_id = pmon->mon_ppdu_info.ppdu_id;
4015d5c65159SKalle Valo 	rx_bufs_used = 0;
4016d5c65159SKalle Valo 	rx_mon_stats = &pmon->rx_mon_stats;
4017d5c65159SKalle Valo 
4018d5c65159SKalle Valo 	while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
4019d5c65159SKalle Valo 		struct sk_buff *head_msdu, *tail_msdu;
4020d5c65159SKalle Valo 
4021d5c65159SKalle Valo 		head_msdu = NULL;
4022d5c65159SKalle Valo 		tail_msdu = NULL;
4023d5c65159SKalle Valo 
4024d5c65159SKalle Valo 		rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, ring_entry,
4025d5c65159SKalle Valo 							  &head_msdu,
4026d5c65159SKalle Valo 							  &tail_msdu,
4027d5c65159SKalle Valo 							  &npackets, &ppdu_id);
4028d5c65159SKalle Valo 
4029d5c65159SKalle Valo 		if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) {
4030d5c65159SKalle Valo 			pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
4031d5c65159SKalle Valo 			ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4032d5c65159SKalle Valo 				   "dest_rx: new ppdu_id %x != status ppdu_id %x",
4033d5c65159SKalle Valo 				   ppdu_id, pmon->mon_ppdu_info.ppdu_id);
4034d5c65159SKalle Valo 			break;
4035d5c65159SKalle Valo 		}
4036d5c65159SKalle Valo 		if (head_msdu && tail_msdu) {
4037d5c65159SKalle Valo 			ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu,
4038d5c65159SKalle Valo 						 tail_msdu, napi);
4039d5c65159SKalle Valo 			rx_mon_stats->dest_mpdu_done++;
4040d5c65159SKalle Valo 		}
4041d5c65159SKalle Valo 
4042d5c65159SKalle Valo 		ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,
4043d5c65159SKalle Valo 								mon_dst_srng);
4044d5c65159SKalle Valo 	}
4045d5c65159SKalle Valo 	ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
4046d5c65159SKalle Valo 
4047d5c65159SKalle Valo 	spin_unlock_bh(&pmon->mon_lock);
4048d5c65159SKalle Valo 
4049d5c65159SKalle Valo 	if (rx_bufs_used) {
4050d5c65159SKalle Valo 		rx_mon_stats->dest_ppdu_done++;
4051d5c65159SKalle Valo 		ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
4052d5c65159SKalle Valo 					   &dp->rxdma_mon_buf_ring,
4053d5c65159SKalle Valo 					   rx_bufs_used,
4054d5c65159SKalle Valo 					   HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
4055d5c65159SKalle Valo 	}
4056d5c65159SKalle Valo }
4057d5c65159SKalle Valo 
4058d5c65159SKalle Valo static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar,
4059d5c65159SKalle Valo 						u32 quota,
4060d5c65159SKalle Valo 						struct napi_struct *napi)
4061d5c65159SKalle Valo {
4062d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
4063d5c65159SKalle Valo 	struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
4064d5c65159SKalle Valo 	struct hal_rx_mon_ppdu_info *ppdu_info;
4065d5c65159SKalle Valo 	struct sk_buff *status_skb;
4066d5c65159SKalle Valo 	u32 tlv_status = HAL_TLV_STATUS_BUF_DONE;
4067d5c65159SKalle Valo 	struct ath11k_pdev_mon_stats *rx_mon_stats;
4068d5c65159SKalle Valo 
4069d5c65159SKalle Valo 	ppdu_info = &pmon->mon_ppdu_info;
4070d5c65159SKalle Valo 	rx_mon_stats = &pmon->rx_mon_stats;
4071d5c65159SKalle Valo 
4072d5c65159SKalle Valo 	if (pmon->mon_ppdu_status != DP_PPDU_STATUS_START)
4073d5c65159SKalle Valo 		return;
4074d5c65159SKalle Valo 
4075d5c65159SKalle Valo 	while (!skb_queue_empty(&pmon->rx_status_q)) {
4076d5c65159SKalle Valo 		status_skb = skb_dequeue(&pmon->rx_status_q);
4077d5c65159SKalle Valo 
4078d5c65159SKalle Valo 		tlv_status = ath11k_hal_rx_parse_mon_status(ar->ab, ppdu_info,
4079d5c65159SKalle Valo 							    status_skb);
4080d5c65159SKalle Valo 		if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) {
4081d5c65159SKalle Valo 			rx_mon_stats->status_ppdu_done++;
4082d5c65159SKalle Valo 			pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
4083d5c65159SKalle Valo 			ath11k_dp_rx_mon_dest_process(ar, quota, napi);
4084d5c65159SKalle Valo 			pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
4085d5c65159SKalle Valo 		}
4086d5c65159SKalle Valo 		dev_kfree_skb_any(status_skb);
4087d5c65159SKalle Valo 	}
4088d5c65159SKalle Valo }
4089d5c65159SKalle Valo 
4090d5c65159SKalle Valo static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id,
4091d5c65159SKalle Valo 				    struct napi_struct *napi, int budget)
4092d5c65159SKalle Valo {
4093d5c65159SKalle Valo 	struct ath11k *ar = ab->pdevs[mac_id].ar;
4094d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
4095d5c65159SKalle Valo 	struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
4096d5c65159SKalle Valo 	int num_buffs_reaped = 0;
4097d5c65159SKalle Valo 
4098d5c65159SKalle Valo 	num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ar->ab, dp->mac_id, &budget,
4099d5c65159SKalle Valo 							     &pmon->rx_status_q);
4100d5c65159SKalle Valo 	if (num_buffs_reaped)
4101d5c65159SKalle Valo 		ath11k_dp_rx_mon_status_process_tlv(ar, budget, napi);
4102d5c65159SKalle Valo 
4103d5c65159SKalle Valo 	return num_buffs_reaped;
4104d5c65159SKalle Valo }
4105d5c65159SKalle Valo 
4106d5c65159SKalle Valo int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
4107d5c65159SKalle Valo 				   struct napi_struct *napi, int budget)
4108d5c65159SKalle Valo {
4109d5c65159SKalle Valo 	struct ath11k *ar = ab->pdevs[mac_id].ar;
4110d5c65159SKalle Valo 	int ret = 0;
4111d5c65159SKalle Valo 
4112d5c65159SKalle Valo 	if (test_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags))
4113d5c65159SKalle Valo 		ret = ath11k_dp_mon_process_rx(ab, mac_id, napi, budget);
4114d5c65159SKalle Valo 	else
4115d5c65159SKalle Valo 		ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget);
4116d5c65159SKalle Valo 	return ret;
4117d5c65159SKalle Valo }
4118d5c65159SKalle Valo 
4119d5c65159SKalle Valo static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar)
4120d5c65159SKalle Valo {
4121d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
4122d5c65159SKalle Valo 	struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
4123d5c65159SKalle Valo 
4124d5c65159SKalle Valo 	skb_queue_head_init(&pmon->rx_status_q);
4125d5c65159SKalle Valo 
4126d5c65159SKalle Valo 	pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
4127d5c65159SKalle Valo 
4128d5c65159SKalle Valo 	memset(&pmon->rx_mon_stats, 0,
4129d5c65159SKalle Valo 	       sizeof(pmon->rx_mon_stats));
4130d5c65159SKalle Valo 	return 0;
4131d5c65159SKalle Valo }
4132d5c65159SKalle Valo 
4133d5c65159SKalle Valo int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar)
4134d5c65159SKalle Valo {
4135d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
4136d5c65159SKalle Valo 	struct ath11k_mon_data *pmon = &dp->mon_data;
4137d5c65159SKalle Valo 	struct hal_srng *mon_desc_srng = NULL;
4138d5c65159SKalle Valo 	struct dp_srng *dp_srng;
4139d5c65159SKalle Valo 	int ret = 0;
4140d5c65159SKalle Valo 	u32 n_link_desc = 0;
4141d5c65159SKalle Valo 
4142d5c65159SKalle Valo 	ret = ath11k_dp_rx_pdev_mon_status_attach(ar);
4143d5c65159SKalle Valo 	if (ret) {
4144d5c65159SKalle Valo 		ath11k_warn(ar->ab, "pdev_mon_status_attach() failed");
4145d5c65159SKalle Valo 		return ret;
4146d5c65159SKalle Valo 	}
4147d5c65159SKalle Valo 
4148d5c65159SKalle Valo 	dp_srng = &dp->rxdma_mon_desc_ring;
4149d5c65159SKalle Valo 	n_link_desc = dp_srng->size /
4150d5c65159SKalle Valo 		ath11k_hal_srng_get_entrysize(HAL_RXDMA_MONITOR_DESC);
4151d5c65159SKalle Valo 	mon_desc_srng =
4152d5c65159SKalle Valo 		&ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id];
4153d5c65159SKalle Valo 
4154d5c65159SKalle Valo 	ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks,
4155d5c65159SKalle Valo 					HAL_RXDMA_MONITOR_DESC, mon_desc_srng,
4156d5c65159SKalle Valo 					n_link_desc);
4157d5c65159SKalle Valo 	if (ret) {
4158d5c65159SKalle Valo 		ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed");
4159d5c65159SKalle Valo 		return ret;
4160d5c65159SKalle Valo 	}
4161d5c65159SKalle Valo 	pmon->mon_last_linkdesc_paddr = 0;
4162d5c65159SKalle Valo 	pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
4163d5c65159SKalle Valo 	spin_lock_init(&pmon->mon_lock);
4164d5c65159SKalle Valo 	return 0;
4165d5c65159SKalle Valo }
4166d5c65159SKalle Valo 
4167d5c65159SKalle Valo static int ath11k_dp_mon_link_free(struct ath11k *ar)
4168d5c65159SKalle Valo {
4169d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
4170d5c65159SKalle Valo 	struct ath11k_mon_data *pmon = &dp->mon_data;
4171d5c65159SKalle Valo 
4172d5c65159SKalle Valo 	ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks,
4173d5c65159SKalle Valo 				    HAL_RXDMA_MONITOR_DESC,
4174d5c65159SKalle Valo 				    &dp->rxdma_mon_desc_ring);
4175d5c65159SKalle Valo 	return 0;
4176d5c65159SKalle Valo }
4177d5c65159SKalle Valo 
4178d5c65159SKalle Valo int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar)
4179d5c65159SKalle Valo {
4180d5c65159SKalle Valo 	ath11k_dp_mon_link_free(ar);
4181d5c65159SKalle Valo 	return 0;
4182d5c65159SKalle Valo }
4183