1d5c65159SKalle Valo // SPDX-License-Identifier: BSD-3-Clause-Clear
2d5c65159SKalle Valo /*
3d5c65159SKalle Valo  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4d5c65159SKalle Valo  */
5d5c65159SKalle Valo 
6d5c65159SKalle Valo #include <linux/ieee80211.h>
7d5c65159SKalle Valo #include "core.h"
8d5c65159SKalle Valo #include "debug.h"
9d5c65159SKalle Valo #include "hal_desc.h"
10d5c65159SKalle Valo #include "hw.h"
11d5c65159SKalle Valo #include "dp_rx.h"
12d5c65159SKalle Valo #include "hal_rx.h"
13d5c65159SKalle Valo #include "dp_tx.h"
14d5c65159SKalle Valo #include "peer.h"
15d5c65159SKalle Valo 
16d5c65159SKalle Valo static u8 *ath11k_dp_rx_h_80211_hdr(struct hal_rx_desc *desc)
17d5c65159SKalle Valo {
18d5c65159SKalle Valo 	return desc->hdr_status;
19d5c65159SKalle Valo }
20d5c65159SKalle Valo 
21d5c65159SKalle Valo static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct hal_rx_desc *desc)
22d5c65159SKalle Valo {
23d5c65159SKalle Valo 	if (!(__le32_to_cpu(desc->mpdu_start.info1) &
24d5c65159SKalle Valo 	    RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID))
25d5c65159SKalle Valo 		return HAL_ENCRYPT_TYPE_OPEN;
26d5c65159SKalle Valo 
27d5c65159SKalle Valo 	return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE,
28d5c65159SKalle Valo 			 __le32_to_cpu(desc->mpdu_start.info2));
29d5c65159SKalle Valo }
30d5c65159SKalle Valo 
31d5c65159SKalle Valo static u8 ath11k_dp_rx_h_mpdu_start_decap_type(struct hal_rx_desc *desc)
32d5c65159SKalle Valo {
33d5c65159SKalle Valo 	return FIELD_GET(RX_MPDU_START_INFO5_DECAP_TYPE,
34d5c65159SKalle Valo 			 __le32_to_cpu(desc->mpdu_start.info5));
35d5c65159SKalle Valo }
36d5c65159SKalle Valo 
37d5c65159SKalle Valo static bool ath11k_dp_rx_h_attn_msdu_done(struct hal_rx_desc *desc)
38d5c65159SKalle Valo {
39d5c65159SKalle Valo 	return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE,
40d5c65159SKalle Valo 			   __le32_to_cpu(desc->attention.info2));
41d5c65159SKalle Valo }
42d5c65159SKalle Valo 
43d5c65159SKalle Valo static bool ath11k_dp_rx_h_attn_first_mpdu(struct hal_rx_desc *desc)
44d5c65159SKalle Valo {
45d5c65159SKalle Valo 	return !!FIELD_GET(RX_ATTENTION_INFO1_FIRST_MPDU,
46d5c65159SKalle Valo 			   __le32_to_cpu(desc->attention.info1));
47d5c65159SKalle Valo }
48d5c65159SKalle Valo 
49d5c65159SKalle Valo static bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct hal_rx_desc *desc)
50d5c65159SKalle Valo {
51d5c65159SKalle Valo 	return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL,
52d5c65159SKalle Valo 			   __le32_to_cpu(desc->attention.info1));
53d5c65159SKalle Valo }
54d5c65159SKalle Valo 
55d5c65159SKalle Valo static bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct hal_rx_desc *desc)
56d5c65159SKalle Valo {
57d5c65159SKalle Valo 	return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL,
58d5c65159SKalle Valo 			   __le32_to_cpu(desc->attention.info1));
59d5c65159SKalle Valo }
60d5c65159SKalle Valo 
61d5c65159SKalle Valo static bool ath11k_dp_rx_h_attn_is_decrypted(struct hal_rx_desc *desc)
62d5c65159SKalle Valo {
63d5c65159SKalle Valo 	return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE,
64d5c65159SKalle Valo 			  __le32_to_cpu(desc->attention.info2)) ==
65d5c65159SKalle Valo 		RX_DESC_DECRYPT_STATUS_CODE_OK);
66d5c65159SKalle Valo }
67d5c65159SKalle Valo 
68d5c65159SKalle Valo static u32 ath11k_dp_rx_h_attn_mpdu_err(struct hal_rx_desc *desc)
69d5c65159SKalle Valo {
70d5c65159SKalle Valo 	u32 info = __le32_to_cpu(desc->attention.info1);
71d5c65159SKalle Valo 	u32 errmap = 0;
72d5c65159SKalle Valo 
73d5c65159SKalle Valo 	if (info & RX_ATTENTION_INFO1_FCS_ERR)
74d5c65159SKalle Valo 		errmap |= DP_RX_MPDU_ERR_FCS;
75d5c65159SKalle Valo 
76d5c65159SKalle Valo 	if (info & RX_ATTENTION_INFO1_DECRYPT_ERR)
77d5c65159SKalle Valo 		errmap |= DP_RX_MPDU_ERR_DECRYPT;
78d5c65159SKalle Valo 
79d5c65159SKalle Valo 	if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR)
80d5c65159SKalle Valo 		errmap |= DP_RX_MPDU_ERR_TKIP_MIC;
81d5c65159SKalle Valo 
82d5c65159SKalle Valo 	if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR)
83d5c65159SKalle Valo 		errmap |= DP_RX_MPDU_ERR_AMSDU_ERR;
84d5c65159SKalle Valo 
85d5c65159SKalle Valo 	if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR)
86d5c65159SKalle Valo 		errmap |= DP_RX_MPDU_ERR_OVERFLOW;
87d5c65159SKalle Valo 
88d5c65159SKalle Valo 	if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR)
89d5c65159SKalle Valo 		errmap |= DP_RX_MPDU_ERR_MSDU_LEN;
90d5c65159SKalle Valo 
91d5c65159SKalle Valo 	if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR)
92d5c65159SKalle Valo 		errmap |= DP_RX_MPDU_ERR_MPDU_LEN;
93d5c65159SKalle Valo 
94d5c65159SKalle Valo 	return errmap;
95d5c65159SKalle Valo }
96d5c65159SKalle Valo 
97d5c65159SKalle Valo static u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct hal_rx_desc *desc)
98d5c65159SKalle Valo {
99d5c65159SKalle Valo 	return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH,
100d5c65159SKalle Valo 			 __le32_to_cpu(desc->msdu_start.info1));
101d5c65159SKalle Valo }
102d5c65159SKalle Valo 
103d5c65159SKalle Valo static u8 ath11k_dp_rx_h_msdu_start_sgi(struct hal_rx_desc *desc)
104d5c65159SKalle Valo {
105d5c65159SKalle Valo 	return FIELD_GET(RX_MSDU_START_INFO3_SGI,
106d5c65159SKalle Valo 			 __le32_to_cpu(desc->msdu_start.info3));
107d5c65159SKalle Valo }
108d5c65159SKalle Valo 
109d5c65159SKalle Valo static u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct hal_rx_desc *desc)
110d5c65159SKalle Valo {
111d5c65159SKalle Valo 	return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS,
112d5c65159SKalle Valo 			 __le32_to_cpu(desc->msdu_start.info3));
113d5c65159SKalle Valo }
114d5c65159SKalle Valo 
115d5c65159SKalle Valo static u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct hal_rx_desc *desc)
116d5c65159SKalle Valo {
117d5c65159SKalle Valo 	return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW,
118d5c65159SKalle Valo 			 __le32_to_cpu(desc->msdu_start.info3));
119d5c65159SKalle Valo }
120d5c65159SKalle Valo 
121d5c65159SKalle Valo static u32 ath11k_dp_rx_h_msdu_start_freq(struct hal_rx_desc *desc)
122d5c65159SKalle Valo {
123d5c65159SKalle Valo 	return __le32_to_cpu(desc->msdu_start.phy_meta_data);
124d5c65159SKalle Valo }
125d5c65159SKalle Valo 
126d5c65159SKalle Valo static u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct hal_rx_desc *desc)
127d5c65159SKalle Valo {
128d5c65159SKalle Valo 	return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE,
129d5c65159SKalle Valo 			 __le32_to_cpu(desc->msdu_start.info3));
130d5c65159SKalle Valo }
131d5c65159SKalle Valo 
132d5c65159SKalle Valo static u8 ath11k_dp_rx_h_msdu_start_nss(struct hal_rx_desc *desc)
133d5c65159SKalle Valo {
134d5c65159SKalle Valo 	u8 mimo_ss_bitmap = FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP,
135d5c65159SKalle Valo 				      __le32_to_cpu(desc->msdu_start.info3));
136d5c65159SKalle Valo 
137d5c65159SKalle Valo 	return hweight8(mimo_ss_bitmap);
138d5c65159SKalle Valo }
139d5c65159SKalle Valo 
140d5c65159SKalle Valo static u8 ath11k_dp_rx_h_msdu_end_l3pad(struct hal_rx_desc *desc)
141d5c65159SKalle Valo {
142d5c65159SKalle Valo 	return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING,
143d5c65159SKalle Valo 			 __le32_to_cpu(desc->msdu_end.info2));
144d5c65159SKalle Valo }
145d5c65159SKalle Valo 
146d5c65159SKalle Valo static bool ath11k_dp_rx_h_msdu_end_first_msdu(struct hal_rx_desc *desc)
147d5c65159SKalle Valo {
148d5c65159SKalle Valo 	return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU,
149d5c65159SKalle Valo 			   __le32_to_cpu(desc->msdu_end.info2));
150d5c65159SKalle Valo }
151d5c65159SKalle Valo 
152d5c65159SKalle Valo static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct hal_rx_desc *desc)
153d5c65159SKalle Valo {
154d5c65159SKalle Valo 	return !!FIELD_GET(RX_MSDU_END_INFO2_LAST_MSDU,
155d5c65159SKalle Valo 			   __le32_to_cpu(desc->msdu_end.info2));
156d5c65159SKalle Valo }
157d5c65159SKalle Valo 
158d5c65159SKalle Valo static void ath11k_dp_rx_desc_end_tlv_copy(struct hal_rx_desc *fdesc,
159d5c65159SKalle Valo 					   struct hal_rx_desc *ldesc)
160d5c65159SKalle Valo {
161d5c65159SKalle Valo 	memcpy((u8 *)&fdesc->msdu_end, (u8 *)&ldesc->msdu_end,
162d5c65159SKalle Valo 	       sizeof(struct rx_msdu_end));
163d5c65159SKalle Valo 	memcpy((u8 *)&fdesc->attention, (u8 *)&ldesc->attention,
164d5c65159SKalle Valo 	       sizeof(struct rx_attention));
165d5c65159SKalle Valo 	memcpy((u8 *)&fdesc->mpdu_end, (u8 *)&ldesc->mpdu_end,
166d5c65159SKalle Valo 	       sizeof(struct rx_mpdu_end));
167d5c65159SKalle Valo }
168d5c65159SKalle Valo 
169d5c65159SKalle Valo static u32 ath11k_dp_rxdesc_get_mpdulen_err(struct hal_rx_desc *rx_desc)
170d5c65159SKalle Valo {
171d5c65159SKalle Valo 	struct rx_attention *rx_attn;
172d5c65159SKalle Valo 
173d5c65159SKalle Valo 	rx_attn = &rx_desc->attention;
174d5c65159SKalle Valo 
175d5c65159SKalle Valo 	return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR,
176d5c65159SKalle Valo 			 __le32_to_cpu(rx_attn->info1));
177d5c65159SKalle Valo }
178d5c65159SKalle Valo 
179d5c65159SKalle Valo static u32 ath11k_dp_rxdesc_get_decap_format(struct hal_rx_desc *rx_desc)
180d5c65159SKalle Valo {
181d5c65159SKalle Valo 	struct rx_msdu_start *rx_msdu_start;
182d5c65159SKalle Valo 
183d5c65159SKalle Valo 	rx_msdu_start = &rx_desc->msdu_start;
184d5c65159SKalle Valo 
185d5c65159SKalle Valo 	return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
186d5c65159SKalle Valo 			 __le32_to_cpu(rx_msdu_start->info2));
187d5c65159SKalle Valo }
188d5c65159SKalle Valo 
189d5c65159SKalle Valo static u8 *ath11k_dp_rxdesc_get_80211hdr(struct hal_rx_desc *rx_desc)
190d5c65159SKalle Valo {
191d5c65159SKalle Valo 	u8 *rx_pkt_hdr;
192d5c65159SKalle Valo 
193d5c65159SKalle Valo 	rx_pkt_hdr = &rx_desc->msdu_payload[0];
194d5c65159SKalle Valo 
195d5c65159SKalle Valo 	return rx_pkt_hdr;
196d5c65159SKalle Valo }
197d5c65159SKalle Valo 
198d5c65159SKalle Valo static bool ath11k_dp_rxdesc_mpdu_valid(struct hal_rx_desc *rx_desc)
199d5c65159SKalle Valo {
200d5c65159SKalle Valo 	u32 tlv_tag;
201d5c65159SKalle Valo 
202d5c65159SKalle Valo 	tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG,
203d5c65159SKalle Valo 			    __le32_to_cpu(rx_desc->mpdu_start_tag));
204d5c65159SKalle Valo 
205d5c65159SKalle Valo 	return tlv_tag == HAL_RX_MPDU_START ? true : false;
206d5c65159SKalle Valo }
207d5c65159SKalle Valo 
208d5c65159SKalle Valo static u32 ath11k_dp_rxdesc_get_ppduid(struct hal_rx_desc *rx_desc)
209d5c65159SKalle Valo {
210d5c65159SKalle Valo 	return __le16_to_cpu(rx_desc->mpdu_start.phy_ppdu_id);
211d5c65159SKalle Valo }
212d5c65159SKalle Valo 
213d5c65159SKalle Valo /* Returns number of Rx buffers replenished */
214d5c65159SKalle Valo int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
215d5c65159SKalle Valo 			       struct dp_rxdma_ring *rx_ring,
216d5c65159SKalle Valo 			       int req_entries,
217d5c65159SKalle Valo 			       enum hal_rx_buf_return_buf_manager mgr,
218d5c65159SKalle Valo 			       gfp_t gfp)
219d5c65159SKalle Valo {
220d5c65159SKalle Valo 	struct hal_srng *srng;
221d5c65159SKalle Valo 	u32 *desc;
222d5c65159SKalle Valo 	struct sk_buff *skb;
223d5c65159SKalle Valo 	int num_free;
224d5c65159SKalle Valo 	int num_remain;
225d5c65159SKalle Valo 	int buf_id;
226d5c65159SKalle Valo 	u32 cookie;
227d5c65159SKalle Valo 	dma_addr_t paddr;
228d5c65159SKalle Valo 
229d5c65159SKalle Valo 	req_entries = min(req_entries, rx_ring->bufs_max);
230d5c65159SKalle Valo 
231d5c65159SKalle Valo 	srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
232d5c65159SKalle Valo 
233d5c65159SKalle Valo 	spin_lock_bh(&srng->lock);
234d5c65159SKalle Valo 
235d5c65159SKalle Valo 	ath11k_hal_srng_access_begin(ab, srng);
236d5c65159SKalle Valo 
237d5c65159SKalle Valo 	num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
238d5c65159SKalle Valo 	if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
239d5c65159SKalle Valo 		req_entries = num_free;
240d5c65159SKalle Valo 
241d5c65159SKalle Valo 	req_entries = min(num_free, req_entries);
242d5c65159SKalle Valo 	num_remain = req_entries;
243d5c65159SKalle Valo 
244d5c65159SKalle Valo 	while (num_remain > 0) {
245d5c65159SKalle Valo 		skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
246d5c65159SKalle Valo 				    DP_RX_BUFFER_ALIGN_SIZE);
247d5c65159SKalle Valo 		if (!skb)
248d5c65159SKalle Valo 			break;
249d5c65159SKalle Valo 
250d5c65159SKalle Valo 		if (!IS_ALIGNED((unsigned long)skb->data,
251d5c65159SKalle Valo 				DP_RX_BUFFER_ALIGN_SIZE)) {
252d5c65159SKalle Valo 			skb_pull(skb,
253d5c65159SKalle Valo 				 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
254d5c65159SKalle Valo 				 skb->data);
255d5c65159SKalle Valo 		}
256d5c65159SKalle Valo 
257d5c65159SKalle Valo 		paddr = dma_map_single(ab->dev, skb->data,
258d5c65159SKalle Valo 				       skb->len + skb_tailroom(skb),
259d5c65159SKalle Valo 				       DMA_FROM_DEVICE);
260d5c65159SKalle Valo 		if (dma_mapping_error(ab->dev, paddr))
261d5c65159SKalle Valo 			goto fail_free_skb;
262d5c65159SKalle Valo 
263d5c65159SKalle Valo 		spin_lock_bh(&rx_ring->idr_lock);
264d5c65159SKalle Valo 		buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
265d5c65159SKalle Valo 				   rx_ring->bufs_max * 3, gfp);
266d5c65159SKalle Valo 		spin_unlock_bh(&rx_ring->idr_lock);
267d5c65159SKalle Valo 		if (buf_id < 0)
268d5c65159SKalle Valo 			goto fail_dma_unmap;
269d5c65159SKalle Valo 
270d5c65159SKalle Valo 		desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
271d5c65159SKalle Valo 		if (!desc)
272d5c65159SKalle Valo 			goto fail_idr_remove;
273d5c65159SKalle Valo 
274d5c65159SKalle Valo 		ATH11K_SKB_RXCB(skb)->paddr = paddr;
275d5c65159SKalle Valo 
276d5c65159SKalle Valo 		cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
277d5c65159SKalle Valo 			 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
278d5c65159SKalle Valo 
279d5c65159SKalle Valo 		num_remain--;
280d5c65159SKalle Valo 
281d5c65159SKalle Valo 		ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
282d5c65159SKalle Valo 	}
283d5c65159SKalle Valo 
284d5c65159SKalle Valo 	ath11k_hal_srng_access_end(ab, srng);
285d5c65159SKalle Valo 
286d5c65159SKalle Valo 	spin_unlock_bh(&srng->lock);
287d5c65159SKalle Valo 
288d5c65159SKalle Valo 	return req_entries - num_remain;
289d5c65159SKalle Valo 
290d5c65159SKalle Valo fail_idr_remove:
291d5c65159SKalle Valo 	spin_lock_bh(&rx_ring->idr_lock);
292d5c65159SKalle Valo 	idr_remove(&rx_ring->bufs_idr, buf_id);
293d5c65159SKalle Valo 	spin_unlock_bh(&rx_ring->idr_lock);
294d5c65159SKalle Valo fail_dma_unmap:
295d5c65159SKalle Valo 	dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
296d5c65159SKalle Valo 			 DMA_FROM_DEVICE);
297d5c65159SKalle Valo fail_free_skb:
298d5c65159SKalle Valo 	dev_kfree_skb_any(skb);
299d5c65159SKalle Valo 
300d5c65159SKalle Valo 	ath11k_hal_srng_access_end(ab, srng);
301d5c65159SKalle Valo 
302d5c65159SKalle Valo 	spin_unlock_bh(&srng->lock);
303d5c65159SKalle Valo 
304d5c65159SKalle Valo 	return req_entries - num_remain;
305d5c65159SKalle Valo }
306d5c65159SKalle Valo 
307d5c65159SKalle Valo static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar,
308d5c65159SKalle Valo 					 struct dp_rxdma_ring *rx_ring)
309d5c65159SKalle Valo {
310d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
311d5c65159SKalle Valo 	struct sk_buff *skb;
312d5c65159SKalle Valo 	int buf_id;
313d5c65159SKalle Valo 
314d5c65159SKalle Valo 	spin_lock_bh(&rx_ring->idr_lock);
315d5c65159SKalle Valo 	idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
316d5c65159SKalle Valo 		idr_remove(&rx_ring->bufs_idr, buf_id);
317d5c65159SKalle Valo 		/* TODO: Understand where internal driver does this dma_unmap of
318d5c65159SKalle Valo 		 * of rxdma_buffer.
319d5c65159SKalle Valo 		 */
320d5c65159SKalle Valo 		dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
321d5c65159SKalle Valo 				 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
322d5c65159SKalle Valo 		dev_kfree_skb_any(skb);
323d5c65159SKalle Valo 	}
324d5c65159SKalle Valo 
325d5c65159SKalle Valo 	idr_destroy(&rx_ring->bufs_idr);
326d5c65159SKalle Valo 	spin_unlock_bh(&rx_ring->idr_lock);
327d5c65159SKalle Valo 
328d5c65159SKalle Valo 	rx_ring = &dp->rx_mon_status_refill_ring;
329d5c65159SKalle Valo 
330d5c65159SKalle Valo 	spin_lock_bh(&rx_ring->idr_lock);
331d5c65159SKalle Valo 	idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
332d5c65159SKalle Valo 		idr_remove(&rx_ring->bufs_idr, buf_id);
333d5c65159SKalle Valo 		/* XXX: Understand where internal driver does this dma_unmap of
334d5c65159SKalle Valo 		 * of rxdma_buffer.
335d5c65159SKalle Valo 		 */
336d5c65159SKalle Valo 		dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
337d5c65159SKalle Valo 				 skb->len + skb_tailroom(skb), DMA_BIDIRECTIONAL);
338d5c65159SKalle Valo 		dev_kfree_skb_any(skb);
339d5c65159SKalle Valo 	}
340d5c65159SKalle Valo 
341d5c65159SKalle Valo 	idr_destroy(&rx_ring->bufs_idr);
342d5c65159SKalle Valo 	spin_unlock_bh(&rx_ring->idr_lock);
343d5c65159SKalle Valo 	return 0;
344d5c65159SKalle Valo }
345d5c65159SKalle Valo 
346d5c65159SKalle Valo static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar)
347d5c65159SKalle Valo {
348d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
349d5c65159SKalle Valo 	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
350d5c65159SKalle Valo 
351d5c65159SKalle Valo 	ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
352d5c65159SKalle Valo 
353d5c65159SKalle Valo 	rx_ring = &dp->rxdma_mon_buf_ring;
354d5c65159SKalle Valo 	ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
355d5c65159SKalle Valo 
356d5c65159SKalle Valo 	rx_ring = &dp->rx_mon_status_refill_ring;
357d5c65159SKalle Valo 	ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
358d5c65159SKalle Valo 	return 0;
359d5c65159SKalle Valo }
360d5c65159SKalle Valo 
361d5c65159SKalle Valo static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar,
362d5c65159SKalle Valo 					  struct dp_rxdma_ring *rx_ring,
363d5c65159SKalle Valo 					  u32 ringtype)
364d5c65159SKalle Valo {
365d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
366d5c65159SKalle Valo 	int num_entries;
367d5c65159SKalle Valo 
368d5c65159SKalle Valo 	num_entries = rx_ring->refill_buf_ring.size /
369d5c65159SKalle Valo 		      ath11k_hal_srng_get_entrysize(ringtype);
370d5c65159SKalle Valo 
371d5c65159SKalle Valo 	rx_ring->bufs_max = num_entries;
372d5c65159SKalle Valo 	ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries,
373d5c65159SKalle Valo 				   HAL_RX_BUF_RBM_SW3_BM, GFP_KERNEL);
374d5c65159SKalle Valo 	return 0;
375d5c65159SKalle Valo }
376d5c65159SKalle Valo 
377d5c65159SKalle Valo static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar)
378d5c65159SKalle Valo {
379d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
380d5c65159SKalle Valo 	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
381d5c65159SKalle Valo 
382d5c65159SKalle Valo 	ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF);
383d5c65159SKalle Valo 
384d5c65159SKalle Valo 	rx_ring = &dp->rxdma_mon_buf_ring;
385d5c65159SKalle Valo 	ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF);
386d5c65159SKalle Valo 
387d5c65159SKalle Valo 	rx_ring = &dp->rx_mon_status_refill_ring;
388d5c65159SKalle Valo 	ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS);
389d5c65159SKalle Valo 
390d5c65159SKalle Valo 	return 0;
391d5c65159SKalle Valo }
392d5c65159SKalle Valo 
393d5c65159SKalle Valo static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar)
394d5c65159SKalle Valo {
395d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
396d5c65159SKalle Valo 
397d5c65159SKalle Valo 	ath11k_dp_srng_cleanup(ar->ab, &dp->rx_refill_buf_ring.refill_buf_ring);
398d5c65159SKalle Valo 	ath11k_dp_srng_cleanup(ar->ab, &dp->reo_dst_ring);
399d5c65159SKalle Valo 	ath11k_dp_srng_cleanup(ar->ab, &dp->rxdma_err_dst_ring);
400d5c65159SKalle Valo 	ath11k_dp_srng_cleanup(ar->ab, &dp->rx_mon_status_refill_ring.refill_buf_ring);
401d5c65159SKalle Valo 	ath11k_dp_srng_cleanup(ar->ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
402d5c65159SKalle Valo }
403d5c65159SKalle Valo 
404d5c65159SKalle Valo static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar)
405d5c65159SKalle Valo {
406d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
407d5c65159SKalle Valo 	struct dp_srng *srng = NULL;
408d5c65159SKalle Valo 	int ret;
409d5c65159SKalle Valo 
410d5c65159SKalle Valo 	ret = ath11k_dp_srng_setup(ar->ab,
411d5c65159SKalle Valo 				   &dp->rx_refill_buf_ring.refill_buf_ring,
412d5c65159SKalle Valo 				   HAL_RXDMA_BUF, 0,
413d5c65159SKalle Valo 				   dp->mac_id, DP_RXDMA_BUF_RING_SIZE);
414d5c65159SKalle Valo 	if (ret) {
415d5c65159SKalle Valo 		ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n");
416d5c65159SKalle Valo 		return ret;
417d5c65159SKalle Valo 	}
418d5c65159SKalle Valo 
419d5c65159SKalle Valo 	ret = ath11k_dp_srng_setup(ar->ab, &dp->reo_dst_ring, HAL_REO_DST,
420d5c65159SKalle Valo 				   dp->mac_id, dp->mac_id,
421d5c65159SKalle Valo 				   DP_REO_DST_RING_SIZE);
422d5c65159SKalle Valo 	if (ret) {
423d5c65159SKalle Valo 		ath11k_warn(ar->ab, "failed to setup reo_dst_ring\n");
424d5c65159SKalle Valo 		return ret;
425d5c65159SKalle Valo 	}
426d5c65159SKalle Valo 
427d5c65159SKalle Valo 	ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring,
428d5c65159SKalle Valo 				   HAL_RXDMA_DST, 0, dp->mac_id,
429d5c65159SKalle Valo 				   DP_RXDMA_ERR_DST_RING_SIZE);
430d5c65159SKalle Valo 	if (ret) {
431d5c65159SKalle Valo 		ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring\n");
432d5c65159SKalle Valo 		return ret;
433d5c65159SKalle Valo 	}
434d5c65159SKalle Valo 
435d5c65159SKalle Valo 	srng = &dp->rx_mon_status_refill_ring.refill_buf_ring;
436d5c65159SKalle Valo 	ret = ath11k_dp_srng_setup(ar->ab,
437d5c65159SKalle Valo 				   srng,
438d5c65159SKalle Valo 				   HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id,
439d5c65159SKalle Valo 				   DP_RXDMA_MON_STATUS_RING_SIZE);
440d5c65159SKalle Valo 	if (ret) {
441d5c65159SKalle Valo 		ath11k_warn(ar->ab,
442d5c65159SKalle Valo 			    "failed to setup rx_mon_status_refill_ring\n");
443d5c65159SKalle Valo 		return ret;
444d5c65159SKalle Valo 	}
445d5c65159SKalle Valo 	ret = ath11k_dp_srng_setup(ar->ab,
446d5c65159SKalle Valo 				   &dp->rxdma_mon_buf_ring.refill_buf_ring,
447d5c65159SKalle Valo 				   HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id,
448d5c65159SKalle Valo 				   DP_RXDMA_MONITOR_BUF_RING_SIZE);
449d5c65159SKalle Valo 	if (ret) {
450d5c65159SKalle Valo 		ath11k_warn(ar->ab,
451d5c65159SKalle Valo 			    "failed to setup HAL_RXDMA_MONITOR_BUF\n");
452d5c65159SKalle Valo 		return ret;
453d5c65159SKalle Valo 	}
454d5c65159SKalle Valo 
455d5c65159SKalle Valo 	ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring,
456d5c65159SKalle Valo 				   HAL_RXDMA_MONITOR_DST, 0, dp->mac_id,
457d5c65159SKalle Valo 				   DP_RXDMA_MONITOR_DST_RING_SIZE);
458d5c65159SKalle Valo 	if (ret) {
459d5c65159SKalle Valo 		ath11k_warn(ar->ab,
460d5c65159SKalle Valo 			    "failed to setup HAL_RXDMA_MONITOR_DST\n");
461d5c65159SKalle Valo 		return ret;
462d5c65159SKalle Valo 	}
463d5c65159SKalle Valo 
464d5c65159SKalle Valo 	ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring,
465d5c65159SKalle Valo 				   HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id,
466d5c65159SKalle Valo 				   DP_RXDMA_MONITOR_DESC_RING_SIZE);
467d5c65159SKalle Valo 	if (ret) {
468d5c65159SKalle Valo 		ath11k_warn(ar->ab,
469d5c65159SKalle Valo 			    "failed to setup HAL_RXDMA_MONITOR_DESC\n");
470d5c65159SKalle Valo 		return ret;
471d5c65159SKalle Valo 	}
472d5c65159SKalle Valo 
473d5c65159SKalle Valo 	return 0;
474d5c65159SKalle Valo }
475d5c65159SKalle Valo 
476d5c65159SKalle Valo void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
477d5c65159SKalle Valo {
478d5c65159SKalle Valo 	struct ath11k_dp *dp = &ab->dp;
479d5c65159SKalle Valo 	struct dp_reo_cmd *cmd, *tmp;
480d5c65159SKalle Valo 	struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache;
481d5c65159SKalle Valo 
482d5c65159SKalle Valo 	spin_lock_bh(&dp->reo_cmd_lock);
483d5c65159SKalle Valo 	list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
484d5c65159SKalle Valo 		list_del(&cmd->list);
485d5c65159SKalle Valo 		dma_unmap_single(ab->dev, cmd->data.paddr,
486d5c65159SKalle Valo 				 cmd->data.size, DMA_BIDIRECTIONAL);
487d5c65159SKalle Valo 		kfree(cmd->data.vaddr);
488d5c65159SKalle Valo 		kfree(cmd);
489d5c65159SKalle Valo 	}
490d5c65159SKalle Valo 
491d5c65159SKalle Valo 	list_for_each_entry_safe(cmd_cache, tmp_cache,
492d5c65159SKalle Valo 				 &dp->reo_cmd_cache_flush_list, list) {
493d5c65159SKalle Valo 		list_del(&cmd_cache->list);
494d5c65159SKalle Valo 		dma_unmap_single(ab->dev, cmd_cache->data.paddr,
495d5c65159SKalle Valo 				 cmd_cache->data.size, DMA_BIDIRECTIONAL);
496d5c65159SKalle Valo 		kfree(cmd_cache->data.vaddr);
497d5c65159SKalle Valo 		kfree(cmd_cache);
498d5c65159SKalle Valo 	}
499d5c65159SKalle Valo 	spin_unlock_bh(&dp->reo_cmd_lock);
500d5c65159SKalle Valo }
501d5c65159SKalle Valo 
502d5c65159SKalle Valo static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx,
503d5c65159SKalle Valo 				   enum hal_reo_cmd_status status)
504d5c65159SKalle Valo {
505d5c65159SKalle Valo 	struct dp_rx_tid *rx_tid = ctx;
506d5c65159SKalle Valo 
507d5c65159SKalle Valo 	if (status != HAL_REO_CMD_SUCCESS)
508d5c65159SKalle Valo 		ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
509d5c65159SKalle Valo 			    rx_tid->tid, status);
510d5c65159SKalle Valo 
511d5c65159SKalle Valo 	dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
512d5c65159SKalle Valo 			 DMA_BIDIRECTIONAL);
513d5c65159SKalle Valo 	kfree(rx_tid->vaddr);
514d5c65159SKalle Valo }
515d5c65159SKalle Valo 
516d5c65159SKalle Valo static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab,
517d5c65159SKalle Valo 				      struct dp_rx_tid *rx_tid)
518d5c65159SKalle Valo {
519d5c65159SKalle Valo 	struct ath11k_hal_reo_cmd cmd = {0};
520d5c65159SKalle Valo 	unsigned long tot_desc_sz, desc_sz;
521d5c65159SKalle Valo 	int ret;
522d5c65159SKalle Valo 
523d5c65159SKalle Valo 	tot_desc_sz = rx_tid->size;
524d5c65159SKalle Valo 	desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
525d5c65159SKalle Valo 
526d5c65159SKalle Valo 	while (tot_desc_sz > desc_sz) {
527d5c65159SKalle Valo 		tot_desc_sz -= desc_sz;
528d5c65159SKalle Valo 		cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);
529d5c65159SKalle Valo 		cmd.addr_hi = upper_32_bits(rx_tid->paddr);
530d5c65159SKalle Valo 		ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
531d5c65159SKalle Valo 						HAL_REO_CMD_FLUSH_CACHE, &cmd,
532d5c65159SKalle Valo 						NULL);
533d5c65159SKalle Valo 		if (ret)
534d5c65159SKalle Valo 			ath11k_warn(ab,
535d5c65159SKalle Valo 				    "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
536d5c65159SKalle Valo 				    rx_tid->tid, ret);
537d5c65159SKalle Valo 	}
538d5c65159SKalle Valo 
539d5c65159SKalle Valo 	memset(&cmd, 0, sizeof(cmd));
540d5c65159SKalle Valo 	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
541d5c65159SKalle Valo 	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
542d5c65159SKalle Valo 	cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
543d5c65159SKalle Valo 	ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
544d5c65159SKalle Valo 					HAL_REO_CMD_FLUSH_CACHE,
545d5c65159SKalle Valo 					&cmd, ath11k_dp_reo_cmd_free);
546d5c65159SKalle Valo 	if (ret) {
547d5c65159SKalle Valo 		ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
548d5c65159SKalle Valo 			   rx_tid->tid, ret);
549d5c65159SKalle Valo 		dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
550d5c65159SKalle Valo 				 DMA_BIDIRECTIONAL);
551d5c65159SKalle Valo 		kfree(rx_tid->vaddr);
552d5c65159SKalle Valo 	}
553d5c65159SKalle Valo }
554d5c65159SKalle Valo 
555d5c65159SKalle Valo static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx,
556d5c65159SKalle Valo 				      enum hal_reo_cmd_status status)
557d5c65159SKalle Valo {
558d5c65159SKalle Valo 	struct ath11k_base *ab = dp->ab;
559d5c65159SKalle Valo 	struct dp_rx_tid *rx_tid = ctx;
560d5c65159SKalle Valo 	struct dp_reo_cache_flush_elem *elem, *tmp;
561d5c65159SKalle Valo 
562d5c65159SKalle Valo 	if (status == HAL_REO_CMD_DRAIN) {
563d5c65159SKalle Valo 		goto free_desc;
564d5c65159SKalle Valo 	} else if (status != HAL_REO_CMD_SUCCESS) {
565d5c65159SKalle Valo 		/* Shouldn't happen! Cleanup in case of other failure? */
566d5c65159SKalle Valo 		ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n",
567d5c65159SKalle Valo 			    rx_tid->tid, status);
568d5c65159SKalle Valo 		return;
569d5c65159SKalle Valo 	}
570d5c65159SKalle Valo 
571d5c65159SKalle Valo 	elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
572d5c65159SKalle Valo 	if (!elem)
573d5c65159SKalle Valo 		goto free_desc;
574d5c65159SKalle Valo 
575d5c65159SKalle Valo 	elem->ts = jiffies;
576d5c65159SKalle Valo 	memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
577d5c65159SKalle Valo 
578d5c65159SKalle Valo 	spin_lock_bh(&dp->reo_cmd_lock);
579d5c65159SKalle Valo 	list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
580d5c65159SKalle Valo 	spin_unlock_bh(&dp->reo_cmd_lock);
581d5c65159SKalle Valo 
582d5c65159SKalle Valo 	/* Flush and invalidate aged REO desc from HW cache */
583d5c65159SKalle Valo 	spin_lock_bh(&dp->reo_cmd_lock);
584d5c65159SKalle Valo 	list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
585d5c65159SKalle Valo 				 list) {
586d5c65159SKalle Valo 		if (time_after(jiffies, elem->ts +
587d5c65159SKalle Valo 			       msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) {
588d5c65159SKalle Valo 			list_del(&elem->list);
589d5c65159SKalle Valo 			spin_unlock_bh(&dp->reo_cmd_lock);
590d5c65159SKalle Valo 
591d5c65159SKalle Valo 			ath11k_dp_reo_cache_flush(ab, &elem->data);
592d5c65159SKalle Valo 			kfree(elem);
593d5c65159SKalle Valo 			spin_lock_bh(&dp->reo_cmd_lock);
594d5c65159SKalle Valo 		}
595d5c65159SKalle Valo 	}
596d5c65159SKalle Valo 	spin_unlock_bh(&dp->reo_cmd_lock);
597d5c65159SKalle Valo 
598d5c65159SKalle Valo 	return;
599d5c65159SKalle Valo free_desc:
600d5c65159SKalle Valo 	dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
601d5c65159SKalle Valo 			 DMA_BIDIRECTIONAL);
602d5c65159SKalle Valo 	kfree(rx_tid->vaddr);
603d5c65159SKalle Valo }
604d5c65159SKalle Valo 
605d5c65159SKalle Valo static void ath11k_peer_rx_tid_delete(struct ath11k *ar,
606d5c65159SKalle Valo 				      struct ath11k_peer *peer, u8 tid)
607d5c65159SKalle Valo {
608d5c65159SKalle Valo 	struct ath11k_hal_reo_cmd cmd = {0};
609d5c65159SKalle Valo 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
610d5c65159SKalle Valo 	int ret;
611d5c65159SKalle Valo 
612d5c65159SKalle Valo 	if (!rx_tid->active)
613d5c65159SKalle Valo 		return;
614d5c65159SKalle Valo 
615d5c65159SKalle Valo 	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
616d5c65159SKalle Valo 	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
617d5c65159SKalle Valo 	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
618d5c65159SKalle Valo 	cmd.upd0 |= HAL_REO_CMD_UPD0_VLD;
619d5c65159SKalle Valo 	ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
620d5c65159SKalle Valo 					HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
621d5c65159SKalle Valo 					ath11k_dp_rx_tid_del_func);
622d5c65159SKalle Valo 	if (ret) {
623d5c65159SKalle Valo 		ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
624d5c65159SKalle Valo 			   tid, ret);
625d5c65159SKalle Valo 		dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
626d5c65159SKalle Valo 				 DMA_BIDIRECTIONAL);
627d5c65159SKalle Valo 		kfree(rx_tid->vaddr);
628d5c65159SKalle Valo 	}
629d5c65159SKalle Valo 
630d5c65159SKalle Valo 	rx_tid->active = false;
631d5c65159SKalle Valo }
632d5c65159SKalle Valo 
633d5c65159SKalle Valo void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer)
634d5c65159SKalle Valo {
635d5c65159SKalle Valo 	int i;
636d5c65159SKalle Valo 
637d5c65159SKalle Valo 	for (i = 0; i <= IEEE80211_NUM_TIDS; i++)
638d5c65159SKalle Valo 		ath11k_peer_rx_tid_delete(ar, peer, i);
639d5c65159SKalle Valo }
640d5c65159SKalle Valo 
641d5c65159SKalle Valo static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar,
642d5c65159SKalle Valo 					 struct ath11k_peer *peer,
643d5c65159SKalle Valo 					 struct dp_rx_tid *rx_tid,
644d5c65159SKalle Valo 					 u32 ba_win_sz, u16 ssn)
645d5c65159SKalle Valo {
646d5c65159SKalle Valo 	struct ath11k_hal_reo_cmd cmd = {0};
647d5c65159SKalle Valo 	int ret;
648d5c65159SKalle Valo 
649d5c65159SKalle Valo 	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
650d5c65159SKalle Valo 	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
651d5c65159SKalle Valo 	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
652d5c65159SKalle Valo 	cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE |
653d5c65159SKalle Valo 		   HAL_REO_CMD_UPD0_SSN;
654d5c65159SKalle Valo 	cmd.ba_window_size = ba_win_sz;
655d5c65159SKalle Valo 	cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn);
656d5c65159SKalle Valo 
657d5c65159SKalle Valo 	ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
658d5c65159SKalle Valo 					HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
659d5c65159SKalle Valo 					NULL);
660d5c65159SKalle Valo 	if (ret) {
661d5c65159SKalle Valo 		ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
662d5c65159SKalle Valo 			    rx_tid->tid, ret);
663d5c65159SKalle Valo 		return ret;
664d5c65159SKalle Valo 	}
665d5c65159SKalle Valo 
666d5c65159SKalle Valo 	rx_tid->ba_win_sz = ba_win_sz;
667d5c65159SKalle Valo 
668d5c65159SKalle Valo 	return 0;
669d5c65159SKalle Valo }
670d5c65159SKalle Valo 
671d5c65159SKalle Valo static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab,
672d5c65159SKalle Valo 				      const u8 *peer_mac, int vdev_id, u8 tid)
673d5c65159SKalle Valo {
674d5c65159SKalle Valo 	struct ath11k_peer *peer;
675d5c65159SKalle Valo 	struct dp_rx_tid *rx_tid;
676d5c65159SKalle Valo 
677d5c65159SKalle Valo 	spin_lock_bh(&ab->base_lock);
678d5c65159SKalle Valo 
679d5c65159SKalle Valo 	peer = ath11k_peer_find(ab, vdev_id, peer_mac);
680d5c65159SKalle Valo 	if (!peer) {
681d5c65159SKalle Valo 		ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n");
682d5c65159SKalle Valo 		goto unlock_exit;
683d5c65159SKalle Valo 	}
684d5c65159SKalle Valo 
685d5c65159SKalle Valo 	rx_tid = &peer->rx_tid[tid];
686d5c65159SKalle Valo 	if (!rx_tid->active)
687d5c65159SKalle Valo 		goto unlock_exit;
688d5c65159SKalle Valo 
689d5c65159SKalle Valo 	dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
690d5c65159SKalle Valo 			 DMA_BIDIRECTIONAL);
691d5c65159SKalle Valo 	kfree(rx_tid->vaddr);
692d5c65159SKalle Valo 
693d5c65159SKalle Valo 	rx_tid->active = false;
694d5c65159SKalle Valo 
695d5c65159SKalle Valo unlock_exit:
696d5c65159SKalle Valo 	spin_unlock_bh(&ab->base_lock);
697d5c65159SKalle Valo }
698d5c65159SKalle Valo 
699d5c65159SKalle Valo int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
700d5c65159SKalle Valo 			     u8 tid, u32 ba_win_sz, u16 ssn)
701d5c65159SKalle Valo {
702d5c65159SKalle Valo 	struct ath11k_base *ab = ar->ab;
703d5c65159SKalle Valo 	struct ath11k_peer *peer;
704d5c65159SKalle Valo 	struct dp_rx_tid *rx_tid;
705d5c65159SKalle Valo 	u32 hw_desc_sz;
706d5c65159SKalle Valo 	u32 *addr_aligned;
707d5c65159SKalle Valo 	void *vaddr;
708d5c65159SKalle Valo 	dma_addr_t paddr;
709d5c65159SKalle Valo 	int ret;
710d5c65159SKalle Valo 
711d5c65159SKalle Valo 	spin_lock_bh(&ab->base_lock);
712d5c65159SKalle Valo 
713d5c65159SKalle Valo 	peer = ath11k_peer_find(ab, vdev_id, peer_mac);
714d5c65159SKalle Valo 	if (!peer) {
715d5c65159SKalle Valo 		ath11k_warn(ab, "failed to find the peer to set up rx tid\n");
716d5c65159SKalle Valo 		spin_unlock_bh(&ab->base_lock);
717d5c65159SKalle Valo 		return -ENOENT;
718d5c65159SKalle Valo 	}
719d5c65159SKalle Valo 
720d5c65159SKalle Valo 	rx_tid = &peer->rx_tid[tid];
721d5c65159SKalle Valo 	/* Update the tid queue if it is already setup */
722d5c65159SKalle Valo 	if (rx_tid->active) {
723d5c65159SKalle Valo 		paddr = rx_tid->paddr;
724d5c65159SKalle Valo 		ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid,
725d5c65159SKalle Valo 						    ba_win_sz, ssn);
726d5c65159SKalle Valo 		spin_unlock_bh(&ab->base_lock);
727d5c65159SKalle Valo 		if (ret) {
728d5c65159SKalle Valo 			ath11k_warn(ab, "failed to update reo for rx tid %d\n", tid);
729d5c65159SKalle Valo 			return ret;
730d5c65159SKalle Valo 		}
731d5c65159SKalle Valo 
732d5c65159SKalle Valo 		ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
733d5c65159SKalle Valo 							     peer_mac, paddr,
734d5c65159SKalle Valo 							     tid, 1, ba_win_sz);
735d5c65159SKalle Valo 		if (ret)
736d5c65159SKalle Valo 			ath11k_warn(ab, "failed to send wmi command to update rx reorder queue, tid :%d (%d)\n",
737d5c65159SKalle Valo 				    tid, ret);
738d5c65159SKalle Valo 		return ret;
739d5c65159SKalle Valo 	}
740d5c65159SKalle Valo 
741d5c65159SKalle Valo 	rx_tid->tid = tid;
742d5c65159SKalle Valo 
743d5c65159SKalle Valo 	rx_tid->ba_win_sz = ba_win_sz;
744d5c65159SKalle Valo 
745d5c65159SKalle Valo 	/* TODO: Optimize the memory allocation for qos tid based on the
746d5c65159SKalle Valo 	 * the actual BA window size in REO tid update path.
747d5c65159SKalle Valo 	 */
748d5c65159SKalle Valo 	if (tid == HAL_DESC_REO_NON_QOS_TID)
749d5c65159SKalle Valo 		hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid);
750d5c65159SKalle Valo 	else
751d5c65159SKalle Valo 		hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
752d5c65159SKalle Valo 
753d5c65159SKalle Valo 	vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_KERNEL);
754d5c65159SKalle Valo 	if (!vaddr) {
755d5c65159SKalle Valo 		spin_unlock_bh(&ab->base_lock);
756d5c65159SKalle Valo 		return -ENOMEM;
757d5c65159SKalle Valo 	}
758d5c65159SKalle Valo 
759d5c65159SKalle Valo 	addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
760d5c65159SKalle Valo 
761d5c65159SKalle Valo 	ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz, ssn);
762d5c65159SKalle Valo 
763d5c65159SKalle Valo 	paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
764d5c65159SKalle Valo 			       DMA_BIDIRECTIONAL);
765d5c65159SKalle Valo 
766d5c65159SKalle Valo 	ret = dma_mapping_error(ab->dev, paddr);
767d5c65159SKalle Valo 	if (ret) {
768d5c65159SKalle Valo 		spin_unlock_bh(&ab->base_lock);
769d5c65159SKalle Valo 		goto err_mem_free;
770d5c65159SKalle Valo 	}
771d5c65159SKalle Valo 
772d5c65159SKalle Valo 	rx_tid->vaddr = vaddr;
773d5c65159SKalle Valo 	rx_tid->paddr = paddr;
774d5c65159SKalle Valo 	rx_tid->size = hw_desc_sz;
775d5c65159SKalle Valo 	rx_tid->active = true;
776d5c65159SKalle Valo 
777d5c65159SKalle Valo 	spin_unlock_bh(&ab->base_lock);
778d5c65159SKalle Valo 
779d5c65159SKalle Valo 	ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
780d5c65159SKalle Valo 						     paddr, tid, 1, ba_win_sz);
781d5c65159SKalle Valo 	if (ret) {
782d5c65159SKalle Valo 		ath11k_warn(ar->ab, "failed to setup rx reorder queue, tid :%d (%d)\n",
783d5c65159SKalle Valo 			    tid, ret);
784d5c65159SKalle Valo 		ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid);
785d5c65159SKalle Valo 	}
786d5c65159SKalle Valo 
787d5c65159SKalle Valo 	return ret;
788d5c65159SKalle Valo 
789d5c65159SKalle Valo err_mem_free:
790d5c65159SKalle Valo 	kfree(vaddr);
791d5c65159SKalle Valo 
792d5c65159SKalle Valo 	return ret;
793d5c65159SKalle Valo }
794d5c65159SKalle Valo 
795d5c65159SKalle Valo int ath11k_dp_rx_ampdu_start(struct ath11k *ar,
796d5c65159SKalle Valo 			     struct ieee80211_ampdu_params *params)
797d5c65159SKalle Valo {
798d5c65159SKalle Valo 	struct ath11k_base *ab = ar->ab;
799d5c65159SKalle Valo 	struct ath11k_sta *arsta = (void *)params->sta->drv_priv;
800d5c65159SKalle Valo 	int vdev_id = arsta->arvif->vdev_id;
801d5c65159SKalle Valo 	int ret;
802d5c65159SKalle Valo 
803d5c65159SKalle Valo 	ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id,
804d5c65159SKalle Valo 				       params->tid, params->buf_size,
805d5c65159SKalle Valo 				       params->ssn);
806d5c65159SKalle Valo 	if (ret)
807d5c65159SKalle Valo 		ath11k_warn(ab, "failed to setup rx tid %d\n", ret);
808d5c65159SKalle Valo 
809d5c65159SKalle Valo 	return ret;
810d5c65159SKalle Valo }
811d5c65159SKalle Valo 
812d5c65159SKalle Valo int ath11k_dp_rx_ampdu_stop(struct ath11k *ar,
813d5c65159SKalle Valo 			    struct ieee80211_ampdu_params *params)
814d5c65159SKalle Valo {
815d5c65159SKalle Valo 	struct ath11k_base *ab = ar->ab;
816d5c65159SKalle Valo 	struct ath11k_peer *peer;
817d5c65159SKalle Valo 	struct ath11k_sta *arsta = (void *)params->sta->drv_priv;
818d5c65159SKalle Valo 	int vdev_id = arsta->arvif->vdev_id;
819d5c65159SKalle Valo 	dma_addr_t paddr;
820d5c65159SKalle Valo 	bool active;
821d5c65159SKalle Valo 	int ret;
822d5c65159SKalle Valo 
823d5c65159SKalle Valo 	spin_lock_bh(&ab->base_lock);
824d5c65159SKalle Valo 
825d5c65159SKalle Valo 	peer = ath11k_peer_find(ab, vdev_id, params->sta->addr);
826d5c65159SKalle Valo 	if (!peer) {
827d5c65159SKalle Valo 		ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n");
828d5c65159SKalle Valo 		spin_unlock_bh(&ab->base_lock);
829d5c65159SKalle Valo 		return -ENOENT;
830d5c65159SKalle Valo 	}
831d5c65159SKalle Valo 
832d5c65159SKalle Valo 	paddr = peer->rx_tid[params->tid].paddr;
833d5c65159SKalle Valo 	active = peer->rx_tid[params->tid].active;
834d5c65159SKalle Valo 
835d5c65159SKalle Valo 	ath11k_peer_rx_tid_delete(ar, peer, params->tid);
836d5c65159SKalle Valo 
837d5c65159SKalle Valo 	spin_unlock_bh(&ab->base_lock);
838d5c65159SKalle Valo 
839d5c65159SKalle Valo 	if (!active)
840d5c65159SKalle Valo 		return 0;
841d5c65159SKalle Valo 
842d5c65159SKalle Valo 	ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
843d5c65159SKalle Valo 						     params->sta->addr, paddr,
844d5c65159SKalle Valo 						     params->tid, 1, 1);
845d5c65159SKalle Valo 	if (ret)
846d5c65159SKalle Valo 		ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n",
847d5c65159SKalle Valo 			    ret);
848d5c65159SKalle Valo 
849d5c65159SKalle Valo 	return ret;
850d5c65159SKalle Valo }
851d5c65159SKalle Valo 
852d5c65159SKalle Valo static int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
853d5c65159SKalle Valo 				      u16 peer_id)
854d5c65159SKalle Valo {
855d5c65159SKalle Valo 	int i;
856d5c65159SKalle Valo 
857d5c65159SKalle Valo 	for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {
858d5c65159SKalle Valo 		if (ppdu_stats->user_stats[i].is_valid_peer_id) {
859d5c65159SKalle Valo 			if (peer_id == ppdu_stats->user_stats[i].peer_id)
860d5c65159SKalle Valo 				return i;
861d5c65159SKalle Valo 		} else {
862d5c65159SKalle Valo 			return i;
863d5c65159SKalle Valo 		}
864d5c65159SKalle Valo 	}
865d5c65159SKalle Valo 
866d5c65159SKalle Valo 	return -EINVAL;
867d5c65159SKalle Valo }
868d5c65159SKalle Valo 
869d5c65159SKalle Valo static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab,
870d5c65159SKalle Valo 					   u16 tag, u16 len, const void *ptr,
871d5c65159SKalle Valo 					   void *data)
872d5c65159SKalle Valo {
873d5c65159SKalle Valo 	struct htt_ppdu_stats_info *ppdu_info;
874d5c65159SKalle Valo 	struct htt_ppdu_user_stats *user_stats;
875d5c65159SKalle Valo 	int cur_user;
876d5c65159SKalle Valo 	u16 peer_id;
877d5c65159SKalle Valo 
878d5c65159SKalle Valo 	ppdu_info = (struct htt_ppdu_stats_info *)data;
879d5c65159SKalle Valo 
880d5c65159SKalle Valo 	switch (tag) {
881d5c65159SKalle Valo 	case HTT_PPDU_STATS_TAG_COMMON:
882d5c65159SKalle Valo 		if (len < sizeof(struct htt_ppdu_stats_common)) {
883d5c65159SKalle Valo 			ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
884d5c65159SKalle Valo 				    len, tag);
885d5c65159SKalle Valo 			return -EINVAL;
886d5c65159SKalle Valo 		}
887d5c65159SKalle Valo 		memcpy((void *)&ppdu_info->ppdu_stats.common, ptr,
888d5c65159SKalle Valo 		       sizeof(struct htt_ppdu_stats_common));
889d5c65159SKalle Valo 		break;
890d5c65159SKalle Valo 	case HTT_PPDU_STATS_TAG_USR_RATE:
891d5c65159SKalle Valo 		if (len < sizeof(struct htt_ppdu_stats_user_rate)) {
892d5c65159SKalle Valo 			ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
893d5c65159SKalle Valo 				    len, tag);
894d5c65159SKalle Valo 			return -EINVAL;
895d5c65159SKalle Valo 		}
896d5c65159SKalle Valo 
897d5c65159SKalle Valo 		peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id;
898d5c65159SKalle Valo 		cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
899d5c65159SKalle Valo 						      peer_id);
900d5c65159SKalle Valo 		if (cur_user < 0)
901d5c65159SKalle Valo 			return -EINVAL;
902d5c65159SKalle Valo 		user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
903d5c65159SKalle Valo 		user_stats->peer_id = peer_id;
904d5c65159SKalle Valo 		user_stats->is_valid_peer_id = true;
905d5c65159SKalle Valo 		memcpy((void *)&user_stats->rate, ptr,
906d5c65159SKalle Valo 		       sizeof(struct htt_ppdu_stats_user_rate));
907d5c65159SKalle Valo 		user_stats->tlv_flags |= BIT(tag);
908d5c65159SKalle Valo 		break;
909d5c65159SKalle Valo 	case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:
910d5c65159SKalle Valo 		if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
911d5c65159SKalle Valo 			ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
912d5c65159SKalle Valo 				    len, tag);
913d5c65159SKalle Valo 			return -EINVAL;
914d5c65159SKalle Valo 		}
915d5c65159SKalle Valo 
916d5c65159SKalle Valo 		peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id;
917d5c65159SKalle Valo 		cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
918d5c65159SKalle Valo 						      peer_id);
919d5c65159SKalle Valo 		if (cur_user < 0)
920d5c65159SKalle Valo 			return -EINVAL;
921d5c65159SKalle Valo 		user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
922d5c65159SKalle Valo 		user_stats->peer_id = peer_id;
923d5c65159SKalle Valo 		user_stats->is_valid_peer_id = true;
924d5c65159SKalle Valo 		memcpy((void *)&user_stats->cmpltn_cmn, ptr,
925d5c65159SKalle Valo 		       sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
926d5c65159SKalle Valo 		user_stats->tlv_flags |= BIT(tag);
927d5c65159SKalle Valo 		break;
928d5c65159SKalle Valo 	case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:
929d5c65159SKalle Valo 		if (len <
930d5c65159SKalle Valo 		    sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {
931d5c65159SKalle Valo 			ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
932d5c65159SKalle Valo 				    len, tag);
933d5c65159SKalle Valo 			return -EINVAL;
934d5c65159SKalle Valo 		}
935d5c65159SKalle Valo 
936d5c65159SKalle Valo 		peer_id =
937d5c65159SKalle Valo 		((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id;
938d5c65159SKalle Valo 		cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
939d5c65159SKalle Valo 						      peer_id);
940d5c65159SKalle Valo 		if (cur_user < 0)
941d5c65159SKalle Valo 			return -EINVAL;
942d5c65159SKalle Valo 		user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
943d5c65159SKalle Valo 		user_stats->peer_id = peer_id;
944d5c65159SKalle Valo 		user_stats->is_valid_peer_id = true;
945d5c65159SKalle Valo 		memcpy((void *)&user_stats->ack_ba, ptr,
946d5c65159SKalle Valo 		       sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));
947d5c65159SKalle Valo 		user_stats->tlv_flags |= BIT(tag);
948d5c65159SKalle Valo 		break;
949d5c65159SKalle Valo 	}
950d5c65159SKalle Valo 	return 0;
951d5c65159SKalle Valo }
952d5c65159SKalle Valo 
953d5c65159SKalle Valo int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
954d5c65159SKalle Valo 			   int (*iter)(struct ath11k_base *ar, u16 tag, u16 len,
955d5c65159SKalle Valo 				       const void *ptr, void *data),
956d5c65159SKalle Valo 			   void *data)
957d5c65159SKalle Valo {
958d5c65159SKalle Valo 	const struct htt_tlv *tlv;
959d5c65159SKalle Valo 	const void *begin = ptr;
960d5c65159SKalle Valo 	u16 tlv_tag, tlv_len;
961d5c65159SKalle Valo 	int ret = -EINVAL;
962d5c65159SKalle Valo 
963d5c65159SKalle Valo 	while (len > 0) {
964d5c65159SKalle Valo 		if (len < sizeof(*tlv)) {
965d5c65159SKalle Valo 			ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
966d5c65159SKalle Valo 				   ptr - begin, len, sizeof(*tlv));
967d5c65159SKalle Valo 			return -EINVAL;
968d5c65159SKalle Valo 		}
969d5c65159SKalle Valo 		tlv = (struct htt_tlv *)ptr;
970d5c65159SKalle Valo 		tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header);
971d5c65159SKalle Valo 		tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header);
972d5c65159SKalle Valo 		ptr += sizeof(*tlv);
973d5c65159SKalle Valo 		len -= sizeof(*tlv);
974d5c65159SKalle Valo 
975d5c65159SKalle Valo 		if (tlv_len > len) {
976d5c65159SKalle Valo 			ath11k_err(ab, "htt tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n",
977d5c65159SKalle Valo 				   tlv_tag, ptr - begin, len, tlv_len);
978d5c65159SKalle Valo 			return -EINVAL;
979d5c65159SKalle Valo 		}
980d5c65159SKalle Valo 		ret = iter(ab, tlv_tag, tlv_len, ptr, data);
981d5c65159SKalle Valo 		if (ret == -ENOMEM)
982d5c65159SKalle Valo 			return ret;
983d5c65159SKalle Valo 
984d5c65159SKalle Valo 		ptr += tlv_len;
985d5c65159SKalle Valo 		len -= tlv_len;
986d5c65159SKalle Valo 	}
987d5c65159SKalle Valo 	return 0;
988d5c65159SKalle Valo }
989d5c65159SKalle Valo 
990d5c65159SKalle Valo static u32 ath11k_bw_to_mac80211_bwflags(u8 bw)
991d5c65159SKalle Valo {
992d5c65159SKalle Valo 	u32 bwflags = 0;
993d5c65159SKalle Valo 
994d5c65159SKalle Valo 	switch (bw) {
995d5c65159SKalle Valo 	case ATH11K_BW_40:
996d5c65159SKalle Valo 		bwflags = IEEE80211_TX_RC_40_MHZ_WIDTH;
997d5c65159SKalle Valo 		break;
998d5c65159SKalle Valo 	case ATH11K_BW_80:
999d5c65159SKalle Valo 		bwflags = IEEE80211_TX_RC_80_MHZ_WIDTH;
1000d5c65159SKalle Valo 		break;
1001d5c65159SKalle Valo 	case ATH11K_BW_160:
1002d5c65159SKalle Valo 		bwflags = IEEE80211_TX_RC_160_MHZ_WIDTH;
1003d5c65159SKalle Valo 		break;
1004d5c65159SKalle Valo 	}
1005d5c65159SKalle Valo 
1006d5c65159SKalle Valo 	return bwflags;
1007d5c65159SKalle Valo }
1008d5c65159SKalle Valo 
1009d5c65159SKalle Valo static void
1010d5c65159SKalle Valo ath11k_update_per_peer_tx_stats(struct ath11k *ar,
1011d5c65159SKalle Valo 				struct htt_ppdu_stats *ppdu_stats, u8 user)
1012d5c65159SKalle Valo {
1013d5c65159SKalle Valo 	struct ath11k_base *ab = ar->ab;
1014d5c65159SKalle Valo 	struct ath11k_peer *peer;
1015d5c65159SKalle Valo 	struct ieee80211_sta *sta;
1016d5c65159SKalle Valo 	struct ath11k_sta *arsta;
1017d5c65159SKalle Valo 	struct htt_ppdu_stats_user_rate *user_rate;
1018d5c65159SKalle Valo 	struct ieee80211_chanctx_conf *conf = NULL;
1019d5c65159SKalle Valo 	struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
1020d5c65159SKalle Valo 	struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
1021d5c65159SKalle Valo 	struct htt_ppdu_stats_common *common = &ppdu_stats->common;
1022d5c65159SKalle Valo 	int ret;
1023d5c65159SKalle Valo 	u8 flags, mcs, nss, bw, sgi, rate_idx = 0;
1024d5c65159SKalle Valo 	u32 succ_bytes = 0;
1025d5c65159SKalle Valo 	u16 rate = 0, succ_pkts = 0;
1026d5c65159SKalle Valo 	u32 tx_duration = 0;
1027d5c65159SKalle Valo 	bool is_ampdu = false;
1028d5c65159SKalle Valo 
1029d5c65159SKalle Valo 	if (!usr_stats)
1030d5c65159SKalle Valo 		return;
1031d5c65159SKalle Valo 
1032d5c65159SKalle Valo 	if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
1033d5c65159SKalle Valo 		return;
1034d5c65159SKalle Valo 
1035d5c65159SKalle Valo 	if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON))
1036d5c65159SKalle Valo 		is_ampdu =
1037d5c65159SKalle Valo 			HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
1038d5c65159SKalle Valo 
1039d5c65159SKalle Valo 	if (usr_stats->tlv_flags &
1040d5c65159SKalle Valo 	    BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
1041d5c65159SKalle Valo 		succ_bytes = usr_stats->ack_ba.success_bytes;
1042d5c65159SKalle Valo 		succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M,
1043d5c65159SKalle Valo 				      usr_stats->ack_ba.info);
1044d5c65159SKalle Valo 	}
1045d5c65159SKalle Valo 
1046d5c65159SKalle Valo 	if (common->fes_duration_us)
1047d5c65159SKalle Valo 		tx_duration = common->fes_duration_us;
1048d5c65159SKalle Valo 
1049d5c65159SKalle Valo 	user_rate = &usr_stats->rate;
1050d5c65159SKalle Valo 	flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
1051d5c65159SKalle Valo 	bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;
1052d5c65159SKalle Valo 	nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
1053d5c65159SKalle Valo 	mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
1054d5c65159SKalle Valo 	sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
1055d5c65159SKalle Valo 
1056d5c65159SKalle Valo 	/* Note: If host configured fixed rates and in some other special
1057d5c65159SKalle Valo 	 * cases, the broadcast/management frames are sent in different rates.
1058d5c65159SKalle Valo 	 * Firmware rate's control to be skipped for this?
1059d5c65159SKalle Valo 	 */
1060d5c65159SKalle Valo 
1061d5c65159SKalle Valo 	if (flags == WMI_RATE_PREAMBLE_VHT && mcs > 9) {
1062d5c65159SKalle Valo 		ath11k_warn(ab, "Invalid VHT mcs %hhd peer stats",  mcs);
1063d5c65159SKalle Valo 		return;
1064d5c65159SKalle Valo 	}
1065d5c65159SKalle Valo 
1066d5c65159SKalle Valo 	if (flags == WMI_RATE_PREAMBLE_HT && (mcs > 7 || nss < 1)) {
1067d5c65159SKalle Valo 		ath11k_warn(ab, "Invalid HT mcs %hhd nss %hhd peer stats",
1068d5c65159SKalle Valo 			    mcs, nss);
1069d5c65159SKalle Valo 		return;
1070d5c65159SKalle Valo 	}
1071d5c65159SKalle Valo 
1072d5c65159SKalle Valo 	if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {
1073d5c65159SKalle Valo 		ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs,
1074d5c65159SKalle Valo 							    flags,
1075d5c65159SKalle Valo 							    &rate_idx,
1076d5c65159SKalle Valo 							    &rate);
1077d5c65159SKalle Valo 		if (ret < 0)
1078d5c65159SKalle Valo 			return;
1079d5c65159SKalle Valo 	}
1080d5c65159SKalle Valo 
1081d5c65159SKalle Valo 	rcu_read_lock();
1082d5c65159SKalle Valo 	spin_lock_bh(&ab->base_lock);
1083d5c65159SKalle Valo 	peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id);
1084d5c65159SKalle Valo 
1085d5c65159SKalle Valo 	if (!peer || !peer->sta) {
1086d5c65159SKalle Valo 		spin_unlock_bh(&ab->base_lock);
1087d5c65159SKalle Valo 		rcu_read_unlock();
1088d5c65159SKalle Valo 		return;
1089d5c65159SKalle Valo 	}
1090d5c65159SKalle Valo 
1091d5c65159SKalle Valo 	sta = peer->sta;
1092d5c65159SKalle Valo 	arsta = (struct ath11k_sta *)sta->drv_priv;
1093d5c65159SKalle Valo 
1094d5c65159SKalle Valo 	memset(&arsta->txrate, 0, sizeof(arsta->txrate));
1095d5c65159SKalle Valo 	memset(&arsta->tx_info.status, 0, sizeof(arsta->tx_info.status));
1096d5c65159SKalle Valo 
1097d5c65159SKalle Valo 	switch (flags) {
1098d5c65159SKalle Valo 	case WMI_RATE_PREAMBLE_OFDM:
1099d5c65159SKalle Valo 		arsta->txrate.legacy = rate;
1100d5c65159SKalle Valo 		if (arsta->arvif && arsta->arvif->vif)
1101d5c65159SKalle Valo 			conf = rcu_dereference(arsta->arvif->vif->chanctx_conf);
1102d5c65159SKalle Valo 		if (conf && conf->def.chan->band == NL80211_BAND_5GHZ)
1103d5c65159SKalle Valo 			arsta->tx_info.status.rates[0].idx = rate_idx - 4;
1104d5c65159SKalle Valo 		break;
1105d5c65159SKalle Valo 	case WMI_RATE_PREAMBLE_CCK:
1106d5c65159SKalle Valo 		arsta->txrate.legacy = rate;
1107d5c65159SKalle Valo 		arsta->tx_info.status.rates[0].idx = rate_idx;
1108d5c65159SKalle Valo 		if (mcs > ATH11K_HW_RATE_CCK_LP_1M &&
1109d5c65159SKalle Valo 		    mcs <= ATH11K_HW_RATE_CCK_SP_2M)
1110d5c65159SKalle Valo 			arsta->tx_info.status.rates[0].flags |=
1111d5c65159SKalle Valo 					IEEE80211_TX_RC_USE_SHORT_PREAMBLE;
1112d5c65159SKalle Valo 		break;
1113d5c65159SKalle Valo 	case WMI_RATE_PREAMBLE_HT:
1114d5c65159SKalle Valo 		arsta->txrate.mcs = mcs + 8 * (nss - 1);
1115d5c65159SKalle Valo 		arsta->tx_info.status.rates[0].idx = arsta->txrate.mcs;
1116d5c65159SKalle Valo 		arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
1117d5c65159SKalle Valo 		arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_MCS;
1118d5c65159SKalle Valo 		if (sgi) {
1119d5c65159SKalle Valo 			arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1120d5c65159SKalle Valo 			arsta->tx_info.status.rates[0].flags |=
1121d5c65159SKalle Valo 					IEEE80211_TX_RC_SHORT_GI;
1122d5c65159SKalle Valo 		}
1123d5c65159SKalle Valo 		break;
1124d5c65159SKalle Valo 	case WMI_RATE_PREAMBLE_VHT:
1125d5c65159SKalle Valo 		arsta->txrate.mcs = mcs;
1126d5c65159SKalle Valo 		ieee80211_rate_set_vht(&arsta->tx_info.status.rates[0], mcs, nss);
1127d5c65159SKalle Valo 		arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
1128d5c65159SKalle Valo 		arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_VHT_MCS;
1129d5c65159SKalle Valo 		if (sgi) {
1130d5c65159SKalle Valo 			arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1131d5c65159SKalle Valo 			arsta->tx_info.status.rates[0].flags |=
1132d5c65159SKalle Valo 						IEEE80211_TX_RC_SHORT_GI;
1133d5c65159SKalle Valo 		}
1134d5c65159SKalle Valo 		break;
1135d5c65159SKalle Valo 	}
1136d5c65159SKalle Valo 
1137d5c65159SKalle Valo 	arsta->txrate.nss = nss;
113839e81c6aSTamizh chelvam 	arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
1139d5c65159SKalle Valo 	arsta->tx_info.status.rates[0].flags |= ath11k_bw_to_mac80211_bwflags(bw);
1140d5c65159SKalle Valo 
1141d5c65159SKalle Valo 	memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
1142d5c65159SKalle Valo 
1143d5c65159SKalle Valo 	if (succ_pkts) {
1144d5c65159SKalle Valo 		arsta->tx_info.flags = IEEE80211_TX_STAT_ACK;
1145d5c65159SKalle Valo 		arsta->tx_info.status.rates[0].count = 1;
1146d5c65159SKalle Valo 		ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info);
1147d5c65159SKalle Valo 	}
1148d5c65159SKalle Valo 
1149d5c65159SKalle Valo 	memset(peer_stats, 0, sizeof(*peer_stats));
1150d5c65159SKalle Valo 
1151d5c65159SKalle Valo 	peer_stats->succ_pkts = succ_pkts;
1152d5c65159SKalle Valo 	peer_stats->succ_bytes = succ_bytes;
1153d5c65159SKalle Valo 	peer_stats->is_ampdu = is_ampdu;
1154d5c65159SKalle Valo 	peer_stats->duration = tx_duration;
1155d5c65159SKalle Valo 	peer_stats->ba_fails =
1156d5c65159SKalle Valo 		HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
1157d5c65159SKalle Valo 		HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
1158d5c65159SKalle Valo 
1159d5c65159SKalle Valo 	if (ath11k_debug_is_extd_tx_stats_enabled(ar))
1160d5c65159SKalle Valo 		ath11k_accumulate_per_peer_tx_stats(arsta,
1161d5c65159SKalle Valo 						    peer_stats, rate_idx);
1162d5c65159SKalle Valo 
1163d5c65159SKalle Valo 	spin_unlock_bh(&ab->base_lock);
1164d5c65159SKalle Valo 	rcu_read_unlock();
1165d5c65159SKalle Valo }
1166d5c65159SKalle Valo 
1167d5c65159SKalle Valo static void ath11k_htt_update_ppdu_stats(struct ath11k *ar,
1168d5c65159SKalle Valo 					 struct htt_ppdu_stats *ppdu_stats)
1169d5c65159SKalle Valo {
1170d5c65159SKalle Valo 	u8 user;
1171d5c65159SKalle Valo 
1172d5c65159SKalle Valo 	for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
1173d5c65159SKalle Valo 		ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user);
1174d5c65159SKalle Valo }
1175d5c65159SKalle Valo 
1176d5c65159SKalle Valo static
1177d5c65159SKalle Valo struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar,
1178d5c65159SKalle Valo 							u32 ppdu_id)
1179d5c65159SKalle Valo {
1180d5c65159SKalle Valo 	struct htt_ppdu_stats_info *ppdu_info = NULL;
1181d5c65159SKalle Valo 
1182d5c65159SKalle Valo 	spin_lock_bh(&ar->data_lock);
1183d5c65159SKalle Valo 	if (!list_empty(&ar->ppdu_stats_info)) {
1184d5c65159SKalle Valo 		list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {
1185d5c65159SKalle Valo 			if (ppdu_info && ppdu_info->ppdu_id == ppdu_id) {
1186d5c65159SKalle Valo 				spin_unlock_bh(&ar->data_lock);
1187d5c65159SKalle Valo 				return ppdu_info;
1188d5c65159SKalle Valo 			}
1189d5c65159SKalle Valo 		}
1190d5c65159SKalle Valo 
1191d5c65159SKalle Valo 		if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
1192d5c65159SKalle Valo 			ppdu_info = list_first_entry(&ar->ppdu_stats_info,
1193d5c65159SKalle Valo 						     typeof(*ppdu_info), list);
1194d5c65159SKalle Valo 			list_del(&ppdu_info->list);
1195d5c65159SKalle Valo 			ar->ppdu_stat_list_depth--;
1196d5c65159SKalle Valo 			ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats);
1197d5c65159SKalle Valo 			kfree(ppdu_info);
1198d5c65159SKalle Valo 		}
1199d5c65159SKalle Valo 	}
1200d5c65159SKalle Valo 	spin_unlock_bh(&ar->data_lock);
1201d5c65159SKalle Valo 
1202d5c65159SKalle Valo 	ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_KERNEL);
1203d5c65159SKalle Valo 	if (!ppdu_info)
1204d5c65159SKalle Valo 		return NULL;
1205d5c65159SKalle Valo 
1206d5c65159SKalle Valo 	spin_lock_bh(&ar->data_lock);
1207d5c65159SKalle Valo 	list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);
1208d5c65159SKalle Valo 	ar->ppdu_stat_list_depth++;
1209d5c65159SKalle Valo 	spin_unlock_bh(&ar->data_lock);
1210d5c65159SKalle Valo 
1211d5c65159SKalle Valo 	return ppdu_info;
1212d5c65159SKalle Valo }
1213d5c65159SKalle Valo 
1214d5c65159SKalle Valo static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab,
1215d5c65159SKalle Valo 				      struct sk_buff *skb)
1216d5c65159SKalle Valo {
1217d5c65159SKalle Valo 	struct ath11k_htt_ppdu_stats_msg *msg;
1218d5c65159SKalle Valo 	struct htt_ppdu_stats_info *ppdu_info;
1219d5c65159SKalle Valo 	struct ath11k *ar;
1220d5c65159SKalle Valo 	int ret;
1221d5c65159SKalle Valo 	u8 pdev_id;
1222d5c65159SKalle Valo 	u32 ppdu_id, len;
1223d5c65159SKalle Valo 
1224d5c65159SKalle Valo 	msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data;
1225d5c65159SKalle Valo 	len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info);
1226d5c65159SKalle Valo 	pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info);
1227d5c65159SKalle Valo 	ppdu_id = msg->ppdu_id;
1228d5c65159SKalle Valo 
1229d5c65159SKalle Valo 	rcu_read_lock();
1230d5c65159SKalle Valo 	ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
1231d5c65159SKalle Valo 	if (!ar) {
1232d5c65159SKalle Valo 		ret = -EINVAL;
1233d5c65159SKalle Valo 		goto exit;
1234d5c65159SKalle Valo 	}
1235d5c65159SKalle Valo 
1236d5c65159SKalle Valo 	if (ath11k_debug_is_pktlog_lite_mode_enabled(ar))
1237d5c65159SKalle Valo 		trace_ath11k_htt_ppdu_stats(ar, skb->data, len);
1238d5c65159SKalle Valo 
1239d5c65159SKalle Valo 	ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id);
1240d5c65159SKalle Valo 	if (!ppdu_info) {
1241d5c65159SKalle Valo 		ret = -EINVAL;
1242d5c65159SKalle Valo 		goto exit;
1243d5c65159SKalle Valo 	}
1244d5c65159SKalle Valo 
1245d5c65159SKalle Valo 	ppdu_info->ppdu_id = ppdu_id;
1246d5c65159SKalle Valo 	ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len,
1247d5c65159SKalle Valo 				     ath11k_htt_tlv_ppdu_stats_parse,
1248d5c65159SKalle Valo 				     (void *)ppdu_info);
1249d5c65159SKalle Valo 	if (ret) {
1250d5c65159SKalle Valo 		ath11k_warn(ab, "Failed to parse tlv %d\n", ret);
1251d5c65159SKalle Valo 		goto exit;
1252d5c65159SKalle Valo 	}
1253d5c65159SKalle Valo 
1254d5c65159SKalle Valo exit:
1255d5c65159SKalle Valo 	rcu_read_unlock();
1256d5c65159SKalle Valo 
1257d5c65159SKalle Valo 	return ret;
1258d5c65159SKalle Valo }
1259d5c65159SKalle Valo 
1260d5c65159SKalle Valo static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb)
1261d5c65159SKalle Valo {
1262d5c65159SKalle Valo 	struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data;
1263d5c65159SKalle Valo 	struct ath11k *ar;
1264d5c65159SKalle Valo 	u32 len;
1265d5c65159SKalle Valo 	u8 pdev_id;
1266d5c65159SKalle Valo 
1267d5c65159SKalle Valo 	len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, data->hdr);
1268d5c65159SKalle Valo 
1269d5c65159SKalle Valo 	if (len > ATH11K_HTT_PKTLOG_MAX_SIZE) {
1270d5c65159SKalle Valo 		ath11k_warn(ab, "htt pktlog buffer size %d, expected < %d\n",
1271d5c65159SKalle Valo 			    len,
1272d5c65159SKalle Valo 			    ATH11K_HTT_PKTLOG_MAX_SIZE);
1273d5c65159SKalle Valo 		return;
1274d5c65159SKalle Valo 	}
1275d5c65159SKalle Valo 
1276d5c65159SKalle Valo 	pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr);
1277d5c65159SKalle Valo 	pdev_id = DP_HW2SW_MACID(pdev_id);
1278d5c65159SKalle Valo 	ar = ab->pdevs[pdev_id].ar;
1279d5c65159SKalle Valo 
1280d5c65159SKalle Valo 	trace_ath11k_htt_pktlog(ar, data->payload, len);
1281d5c65159SKalle Valo }
1282d5c65159SKalle Valo 
1283d5c65159SKalle Valo void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
1284d5c65159SKalle Valo 				       struct sk_buff *skb)
1285d5c65159SKalle Valo {
1286d5c65159SKalle Valo 	struct ath11k_dp *dp = &ab->dp;
1287d5c65159SKalle Valo 	struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;
1288d5c65159SKalle Valo 	enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp);
1289d5c65159SKalle Valo 	u16 peer_id;
1290d5c65159SKalle Valo 	u8 vdev_id;
1291d5c65159SKalle Valo 	u8 mac_addr[ETH_ALEN];
1292d5c65159SKalle Valo 	u16 peer_mac_h16;
1293d5c65159SKalle Valo 	u16 ast_hash;
1294d5c65159SKalle Valo 
1295d5c65159SKalle Valo 	ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
1296d5c65159SKalle Valo 
1297d5c65159SKalle Valo 	switch (type) {
1298d5c65159SKalle Valo 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
1299d5c65159SKalle Valo 		dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR,
1300d5c65159SKalle Valo 						  resp->version_msg.version);
1301d5c65159SKalle Valo 		dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR,
1302d5c65159SKalle Valo 						  resp->version_msg.version);
1303d5c65159SKalle Valo 		complete(&dp->htt_tgt_version_received);
1304d5c65159SKalle Valo 		break;
1305d5c65159SKalle Valo 	case HTT_T2H_MSG_TYPE_PEER_MAP:
1306d5c65159SKalle Valo 		vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
1307d5c65159SKalle Valo 				    resp->peer_map_ev.info);
1308d5c65159SKalle Valo 		peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
1309d5c65159SKalle Valo 				    resp->peer_map_ev.info);
1310d5c65159SKalle Valo 		peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
1311d5c65159SKalle Valo 					 resp->peer_map_ev.info1);
1312d5c65159SKalle Valo 		ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
1313d5c65159SKalle Valo 				       peer_mac_h16, mac_addr);
1314d5c65159SKalle Valo 		ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL,
13150f37fbf4SAnilkumar Kolli 				     resp->peer_map_ev.info2);
1316d5c65159SKalle Valo 		ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash);
1317d5c65159SKalle Valo 		break;
1318d5c65159SKalle Valo 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
1319d5c65159SKalle Valo 		peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID,
1320d5c65159SKalle Valo 				    resp->peer_unmap_ev.info);
1321d5c65159SKalle Valo 		ath11k_peer_unmap_event(ab, peer_id);
1322d5c65159SKalle Valo 		break;
1323d5c65159SKalle Valo 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
1324d5c65159SKalle Valo 		ath11k_htt_pull_ppdu_stats(ab, skb);
1325d5c65159SKalle Valo 		break;
1326d5c65159SKalle Valo 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
1327d5c65159SKalle Valo 		ath11k_dbg_htt_ext_stats_handler(ab, skb);
1328d5c65159SKalle Valo 		break;
1329d5c65159SKalle Valo 	case HTT_T2H_MSG_TYPE_PKTLOG:
1330d5c65159SKalle Valo 		ath11k_htt_pktlog(ab, skb);
1331d5c65159SKalle Valo 		break;
1332d5c65159SKalle Valo 	default:
1333d5c65159SKalle Valo 		ath11k_warn(ab, "htt event %d not handled\n", type);
1334d5c65159SKalle Valo 		break;
1335d5c65159SKalle Valo 	}
1336d5c65159SKalle Valo 
1337d5c65159SKalle Valo 	dev_kfree_skb_any(skb);
1338d5c65159SKalle Valo }
1339d5c65159SKalle Valo 
1340d5c65159SKalle Valo static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar,
1341d5c65159SKalle Valo 				      struct sk_buff_head *msdu_list,
1342d5c65159SKalle Valo 				      struct sk_buff *first, struct sk_buff *last,
1343d5c65159SKalle Valo 				      u8 l3pad_bytes, int msdu_len)
1344d5c65159SKalle Valo {
1345d5c65159SKalle Valo 	struct sk_buff *skb;
1346d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
1347d5c65159SKalle Valo 	struct hal_rx_desc *ldesc;
1348d5c65159SKalle Valo 	int space_extra;
1349d5c65159SKalle Valo 	int rem_len;
1350d5c65159SKalle Valo 	int buf_len;
1351d5c65159SKalle Valo 
1352d5c65159SKalle Valo 	if (WARN_ON_ONCE(msdu_len <= (DP_RX_BUFFER_SIZE -
1353d5c65159SKalle Valo 			 (HAL_RX_DESC_SIZE + l3pad_bytes)))) {
1354d5c65159SKalle Valo 		skb_put(first, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len);
1355d5c65159SKalle Valo 		skb_pull(first, HAL_RX_DESC_SIZE + l3pad_bytes);
1356d5c65159SKalle Valo 		return 0;
1357d5c65159SKalle Valo 	}
1358d5c65159SKalle Valo 
1359d5c65159SKalle Valo 	ldesc = (struct hal_rx_desc *)last->data;
1360d5c65159SKalle Valo 	rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ldesc);
1361d5c65159SKalle Valo 	rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ldesc);
1362d5c65159SKalle Valo 
1363d5c65159SKalle Valo 	/* MSDU spans over multiple buffers because the length of the MSDU
1364d5c65159SKalle Valo 	 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
1365d5c65159SKalle Valo 	 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
1366d5c65159SKalle Valo 	 */
1367d5c65159SKalle Valo 	skb_put(first, DP_RX_BUFFER_SIZE);
1368d5c65159SKalle Valo 	skb_pull(first, HAL_RX_DESC_SIZE + l3pad_bytes);
1369d5c65159SKalle Valo 
1370d5c65159SKalle Valo 	space_extra = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first));
1371d5c65159SKalle Valo 	if (space_extra > 0 &&
1372d5c65159SKalle Valo 	    (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) {
1373d5c65159SKalle Valo 		/* Free up all buffers of the MSDU */
1374d5c65159SKalle Valo 		while ((skb = __skb_dequeue(msdu_list)) != NULL) {
1375d5c65159SKalle Valo 			rxcb = ATH11K_SKB_RXCB(skb);
1376d5c65159SKalle Valo 			if (!rxcb->is_continuation) {
1377d5c65159SKalle Valo 				dev_kfree_skb_any(skb);
1378d5c65159SKalle Valo 				break;
1379d5c65159SKalle Valo 			}
1380d5c65159SKalle Valo 			dev_kfree_skb_any(skb);
1381d5c65159SKalle Valo 		}
1382d5c65159SKalle Valo 		return -ENOMEM;
1383d5c65159SKalle Valo 	}
1384d5c65159SKalle Valo 
1385d5c65159SKalle Valo 	/* When an MSDU spread over multiple buffers attention, MSDU_END and
1386d5c65159SKalle Valo 	 * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs.
1387d5c65159SKalle Valo 	 */
1388d5c65159SKalle Valo 	ath11k_dp_rx_desc_end_tlv_copy(rxcb->rx_desc, ldesc);
1389d5c65159SKalle Valo 
1390d5c65159SKalle Valo 	rem_len = msdu_len -
1391d5c65159SKalle Valo 		  (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE - l3pad_bytes);
1392d5c65159SKalle Valo 	while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
1393d5c65159SKalle Valo 		rxcb = ATH11K_SKB_RXCB(skb);
1394d5c65159SKalle Valo 		if (rxcb->is_continuation)
1395d5c65159SKalle Valo 			buf_len = DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE;
1396d5c65159SKalle Valo 		else
1397d5c65159SKalle Valo 			buf_len = rem_len;
1398d5c65159SKalle Valo 
1399d5c65159SKalle Valo 		if (buf_len > (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE)) {
1400d5c65159SKalle Valo 			WARN_ON_ONCE(1);
1401d5c65159SKalle Valo 			dev_kfree_skb_any(skb);
1402d5c65159SKalle Valo 			return -EINVAL;
1403d5c65159SKalle Valo 		}
1404d5c65159SKalle Valo 
1405d5c65159SKalle Valo 		skb_put(skb, buf_len + HAL_RX_DESC_SIZE);
1406d5c65159SKalle Valo 		skb_pull(skb, HAL_RX_DESC_SIZE);
1407d5c65159SKalle Valo 		skb_copy_from_linear_data(skb, skb_put(first, buf_len),
1408d5c65159SKalle Valo 					  buf_len);
1409d5c65159SKalle Valo 		dev_kfree_skb_any(skb);
1410d5c65159SKalle Valo 
1411d5c65159SKalle Valo 		rem_len -= buf_len;
1412d5c65159SKalle Valo 		if (!rxcb->is_continuation)
1413d5c65159SKalle Valo 			break;
1414d5c65159SKalle Valo 	}
1415d5c65159SKalle Valo 
1416d5c65159SKalle Valo 	return 0;
1417d5c65159SKalle Valo }
1418d5c65159SKalle Valo 
1419d5c65159SKalle Valo static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,
1420d5c65159SKalle Valo 						      struct sk_buff *first)
1421d5c65159SKalle Valo {
1422d5c65159SKalle Valo 	struct sk_buff *skb;
1423d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
1424d5c65159SKalle Valo 
1425d5c65159SKalle Valo 	if (!rxcb->is_continuation)
1426d5c65159SKalle Valo 		return first;
1427d5c65159SKalle Valo 
1428d5c65159SKalle Valo 	skb_queue_walk(msdu_list, skb) {
1429d5c65159SKalle Valo 		rxcb = ATH11K_SKB_RXCB(skb);
1430d5c65159SKalle Valo 		if (!rxcb->is_continuation)
1431d5c65159SKalle Valo 			return skb;
1432d5c65159SKalle Valo 	}
1433d5c65159SKalle Valo 
1434d5c65159SKalle Valo 	return NULL;
1435d5c65159SKalle Valo }
1436d5c65159SKalle Valo 
1437d5c65159SKalle Valo static int ath11k_dp_rx_retrieve_amsdu(struct ath11k *ar,
1438d5c65159SKalle Valo 				       struct sk_buff_head *msdu_list,
1439d5c65159SKalle Valo 				       struct sk_buff_head *amsdu_list)
1440d5c65159SKalle Valo {
1441d5c65159SKalle Valo 	struct sk_buff *msdu = skb_peek(msdu_list);
1442d5c65159SKalle Valo 	struct sk_buff *last_buf;
1443d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb;
1444d5c65159SKalle Valo 	struct ieee80211_hdr *hdr;
1445d5c65159SKalle Valo 	struct hal_rx_desc *rx_desc, *lrx_desc;
1446d5c65159SKalle Valo 	u16 msdu_len;
1447d5c65159SKalle Valo 	u8 l3_pad_bytes;
1448d5c65159SKalle Valo 	u8 *hdr_status;
1449d5c65159SKalle Valo 	int ret;
1450d5c65159SKalle Valo 
1451d5c65159SKalle Valo 	if (!msdu)
1452d5c65159SKalle Valo 		return -ENOENT;
1453d5c65159SKalle Valo 
1454d5c65159SKalle Valo 	rx_desc = (struct hal_rx_desc *)msdu->data;
1455d5c65159SKalle Valo 	hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc);
1456d5c65159SKalle Valo 	hdr = (struct ieee80211_hdr *)hdr_status;
1457d5c65159SKalle Valo 	/* Process only data frames */
1458d5c65159SKalle Valo 	if (!ieee80211_is_data(hdr->frame_control)) {
1459d5c65159SKalle Valo 		__skb_unlink(msdu, msdu_list);
1460d5c65159SKalle Valo 		dev_kfree_skb_any(msdu);
1461d5c65159SKalle Valo 		return -EINVAL;
1462d5c65159SKalle Valo 	}
1463d5c65159SKalle Valo 
1464d5c65159SKalle Valo 	do {
1465d5c65159SKalle Valo 		__skb_unlink(msdu, msdu_list);
1466d5c65159SKalle Valo 		last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
1467d5c65159SKalle Valo 		if (!last_buf) {
1468d5c65159SKalle Valo 			ath11k_warn(ar->ab,
1469d5c65159SKalle Valo 				    "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n");
1470d5c65159SKalle Valo 			ret = -EIO;
1471d5c65159SKalle Valo 			goto free_out;
1472d5c65159SKalle Valo 		}
1473d5c65159SKalle Valo 
1474d5c65159SKalle Valo 		rx_desc = (struct hal_rx_desc *)msdu->data;
1475d5c65159SKalle Valo 		lrx_desc = (struct hal_rx_desc *)last_buf->data;
1476d5c65159SKalle Valo 
1477d5c65159SKalle Valo 		if (!ath11k_dp_rx_h_attn_msdu_done(lrx_desc)) {
1478d5c65159SKalle Valo 			ath11k_warn(ar->ab, "msdu_done bit in attention is not set\n");
1479d5c65159SKalle Valo 			ret = -EIO;
1480d5c65159SKalle Valo 			goto free_out;
1481d5c65159SKalle Valo 		}
1482d5c65159SKalle Valo 
1483d5c65159SKalle Valo 		rxcb = ATH11K_SKB_RXCB(msdu);
1484d5c65159SKalle Valo 		rxcb->rx_desc = rx_desc;
1485d5c65159SKalle Valo 		msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc);
1486d5c65159SKalle Valo 		l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(lrx_desc);
1487d5c65159SKalle Valo 
1488d5c65159SKalle Valo 		if (!rxcb->is_continuation) {
1489d5c65159SKalle Valo 			skb_put(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes + msdu_len);
1490d5c65159SKalle Valo 			skb_pull(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes);
1491d5c65159SKalle Valo 		} else {
1492d5c65159SKalle Valo 			ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list,
1493d5c65159SKalle Valo 							 msdu, last_buf,
1494d5c65159SKalle Valo 							 l3_pad_bytes, msdu_len);
1495d5c65159SKalle Valo 			if (ret) {
1496d5c65159SKalle Valo 				ath11k_warn(ar->ab,
1497d5c65159SKalle Valo 					    "failed to coalesce msdu rx buffer%d\n", ret);
1498d5c65159SKalle Valo 				goto free_out;
1499d5c65159SKalle Valo 			}
1500d5c65159SKalle Valo 		}
1501d5c65159SKalle Valo 		__skb_queue_tail(amsdu_list, msdu);
1502d5c65159SKalle Valo 
1503d5c65159SKalle Valo 		/* Should we also consider msdu_cnt from mpdu_meta while
1504d5c65159SKalle Valo 		 * preparing amsdu list?
1505d5c65159SKalle Valo 		 */
1506d5c65159SKalle Valo 		if (rxcb->is_last_msdu)
1507d5c65159SKalle Valo 			break;
1508d5c65159SKalle Valo 	} while ((msdu = skb_peek(msdu_list)) != NULL);
1509d5c65159SKalle Valo 
1510d5c65159SKalle Valo 	return 0;
1511d5c65159SKalle Valo 
1512d5c65159SKalle Valo free_out:
1513d5c65159SKalle Valo 	dev_kfree_skb_any(msdu);
1514d5c65159SKalle Valo 	__skb_queue_purge(amsdu_list);
1515d5c65159SKalle Valo 
1516d5c65159SKalle Valo 	return ret;
1517d5c65159SKalle Valo }
1518d5c65159SKalle Valo 
1519d5c65159SKalle Valo static void ath11k_dp_rx_h_csum_offload(struct sk_buff *msdu)
1520d5c65159SKalle Valo {
1521d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1522d5c65159SKalle Valo 	bool ip_csum_fail, l4_csum_fail;
1523d5c65159SKalle Valo 
1524d5c65159SKalle Valo 	ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rxcb->rx_desc);
1525d5c65159SKalle Valo 	l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rxcb->rx_desc);
1526d5c65159SKalle Valo 
1527d5c65159SKalle Valo 	msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?
1528d5c65159SKalle Valo 			  CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
1529d5c65159SKalle Valo }
1530d5c65159SKalle Valo 
1531d5c65159SKalle Valo static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar,
1532d5c65159SKalle Valo 				       enum hal_encrypt_type enctype)
1533d5c65159SKalle Valo {
1534d5c65159SKalle Valo 	switch (enctype) {
1535d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_OPEN:
1536d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1537d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_TKIP_MIC:
1538d5c65159SKalle Valo 		return 0;
1539d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_CCMP_128:
1540d5c65159SKalle Valo 		return IEEE80211_CCMP_MIC_LEN;
1541d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_CCMP_256:
1542d5c65159SKalle Valo 		return IEEE80211_CCMP_256_MIC_LEN;
1543d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_GCMP_128:
1544d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1545d5c65159SKalle Valo 		return IEEE80211_GCMP_MIC_LEN;
1546d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_WEP_40:
1547d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_WEP_104:
1548d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_WEP_128:
1549d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1550d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_WAPI:
1551d5c65159SKalle Valo 		break;
1552d5c65159SKalle Valo 	}
1553d5c65159SKalle Valo 
1554d5c65159SKalle Valo 	ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype);
1555d5c65159SKalle Valo 	return 0;
1556d5c65159SKalle Valo }
1557d5c65159SKalle Valo 
1558d5c65159SKalle Valo static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar,
1559d5c65159SKalle Valo 					 enum hal_encrypt_type enctype)
1560d5c65159SKalle Valo {
1561d5c65159SKalle Valo 	switch (enctype) {
1562d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_OPEN:
1563d5c65159SKalle Valo 		return 0;
1564d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1565d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_TKIP_MIC:
1566d5c65159SKalle Valo 		return IEEE80211_TKIP_IV_LEN;
1567d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_CCMP_128:
1568d5c65159SKalle Valo 		return IEEE80211_CCMP_HDR_LEN;
1569d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_CCMP_256:
1570d5c65159SKalle Valo 		return IEEE80211_CCMP_256_HDR_LEN;
1571d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_GCMP_128:
1572d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1573d5c65159SKalle Valo 		return IEEE80211_GCMP_HDR_LEN;
1574d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_WEP_40:
1575d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_WEP_104:
1576d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_WEP_128:
1577d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1578d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_WAPI:
1579d5c65159SKalle Valo 		break;
1580d5c65159SKalle Valo 	}
1581d5c65159SKalle Valo 
1582d5c65159SKalle Valo 	ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1583d5c65159SKalle Valo 	return 0;
1584d5c65159SKalle Valo }
1585d5c65159SKalle Valo 
1586d5c65159SKalle Valo static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar,
1587d5c65159SKalle Valo 				       enum hal_encrypt_type enctype)
1588d5c65159SKalle Valo {
1589d5c65159SKalle Valo 	switch (enctype) {
1590d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_OPEN:
1591d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_CCMP_128:
1592d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_CCMP_256:
1593d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_GCMP_128:
1594d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1595d5c65159SKalle Valo 		return 0;
1596d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1597d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_TKIP_MIC:
1598d5c65159SKalle Valo 		return IEEE80211_TKIP_ICV_LEN;
1599d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_WEP_40:
1600d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_WEP_104:
1601d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_WEP_128:
1602d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1603d5c65159SKalle Valo 	case HAL_ENCRYPT_TYPE_WAPI:
1604d5c65159SKalle Valo 		break;
1605d5c65159SKalle Valo 	}
1606d5c65159SKalle Valo 
1607d5c65159SKalle Valo 	ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1608d5c65159SKalle Valo 	return 0;
1609d5c65159SKalle Valo }
1610d5c65159SKalle Valo 
1611d5c65159SKalle Valo static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar,
1612d5c65159SKalle Valo 					 struct sk_buff *msdu,
1613d5c65159SKalle Valo 					 u8 *first_hdr,
1614d5c65159SKalle Valo 					 enum hal_encrypt_type enctype,
1615d5c65159SKalle Valo 					 struct ieee80211_rx_status *status)
1616d5c65159SKalle Valo {
1617d5c65159SKalle Valo 	struct ieee80211_hdr *hdr;
1618d5c65159SKalle Valo 	size_t hdr_len;
1619d5c65159SKalle Valo 	u8 da[ETH_ALEN];
1620d5c65159SKalle Valo 	u8 sa[ETH_ALEN];
1621d5c65159SKalle Valo 
1622d5c65159SKalle Valo 	/* pull decapped header and copy SA & DA */
1623d5c65159SKalle Valo 	hdr = (struct ieee80211_hdr *)msdu->data;
1624d5c65159SKalle Valo 	ether_addr_copy(da, ieee80211_get_DA(hdr));
1625d5c65159SKalle Valo 	ether_addr_copy(sa, ieee80211_get_SA(hdr));
1626d5c65159SKalle Valo 	skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control));
1627d5c65159SKalle Valo 
1628d5c65159SKalle Valo 	/* push original 802.11 header */
1629d5c65159SKalle Valo 	hdr = (struct ieee80211_hdr *)first_hdr;
1630d5c65159SKalle Valo 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1631d5c65159SKalle Valo 
1632d5c65159SKalle Valo 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1633d5c65159SKalle Valo 		memcpy(skb_push(msdu,
1634d5c65159SKalle Valo 				ath11k_dp_rx_crypto_param_len(ar, enctype)),
1635d5c65159SKalle Valo 		       (void *)hdr + hdr_len,
1636d5c65159SKalle Valo 		       ath11k_dp_rx_crypto_param_len(ar, enctype));
1637d5c65159SKalle Valo 	}
1638d5c65159SKalle Valo 
1639d5c65159SKalle Valo 	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1640d5c65159SKalle Valo 
1641d5c65159SKalle Valo 	/* original 802.11 header has a different DA and in
1642d5c65159SKalle Valo 	 * case of 4addr it may also have different SA
1643d5c65159SKalle Valo 	 */
1644d5c65159SKalle Valo 	hdr = (struct ieee80211_hdr *)msdu->data;
1645d5c65159SKalle Valo 	ether_addr_copy(ieee80211_get_DA(hdr), da);
1646d5c65159SKalle Valo 	ether_addr_copy(ieee80211_get_SA(hdr), sa);
1647d5c65159SKalle Valo }
1648d5c65159SKalle Valo 
1649d5c65159SKalle Valo static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu,
1650d5c65159SKalle Valo 				       enum hal_encrypt_type enctype,
1651d5c65159SKalle Valo 				       struct ieee80211_rx_status *status,
1652d5c65159SKalle Valo 				       bool decrypted)
1653d5c65159SKalle Valo {
1654d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1655d5c65159SKalle Valo 	struct ieee80211_hdr *hdr;
1656d5c65159SKalle Valo 	size_t hdr_len;
1657d5c65159SKalle Valo 	size_t crypto_len;
1658d5c65159SKalle Valo 
1659d5c65159SKalle Valo 	if (!rxcb->is_first_msdu ||
1660d5c65159SKalle Valo 	    !(rxcb->is_first_msdu && rxcb->is_last_msdu)) {
1661d5c65159SKalle Valo 		WARN_ON_ONCE(1);
1662d5c65159SKalle Valo 		return;
1663d5c65159SKalle Valo 	}
1664d5c65159SKalle Valo 
1665d5c65159SKalle Valo 	skb_trim(msdu, msdu->len - FCS_LEN);
1666d5c65159SKalle Valo 
1667d5c65159SKalle Valo 	if (!decrypted)
1668d5c65159SKalle Valo 		return;
1669d5c65159SKalle Valo 
1670d5c65159SKalle Valo 	hdr = (void *)msdu->data;
1671d5c65159SKalle Valo 
1672d5c65159SKalle Valo 	/* Tail */
1673d5c65159SKalle Valo 	if (status->flag & RX_FLAG_IV_STRIPPED) {
1674d5c65159SKalle Valo 		skb_trim(msdu, msdu->len -
1675d5c65159SKalle Valo 			 ath11k_dp_rx_crypto_mic_len(ar, enctype));
1676d5c65159SKalle Valo 
1677d5c65159SKalle Valo 		skb_trim(msdu, msdu->len -
1678d5c65159SKalle Valo 			 ath11k_dp_rx_crypto_icv_len(ar, enctype));
1679d5c65159SKalle Valo 	} else {
1680d5c65159SKalle Valo 		/* MIC */
1681d5c65159SKalle Valo 		if (status->flag & RX_FLAG_MIC_STRIPPED)
1682d5c65159SKalle Valo 			skb_trim(msdu, msdu->len -
1683d5c65159SKalle Valo 				 ath11k_dp_rx_crypto_mic_len(ar, enctype));
1684d5c65159SKalle Valo 
1685d5c65159SKalle Valo 		/* ICV */
1686d5c65159SKalle Valo 		if (status->flag & RX_FLAG_ICV_STRIPPED)
1687d5c65159SKalle Valo 			skb_trim(msdu, msdu->len -
1688d5c65159SKalle Valo 				 ath11k_dp_rx_crypto_icv_len(ar, enctype));
1689d5c65159SKalle Valo 	}
1690d5c65159SKalle Valo 
1691d5c65159SKalle Valo 	/* MMIC */
1692d5c65159SKalle Valo 	if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
1693d5c65159SKalle Valo 	    !ieee80211_has_morefrags(hdr->frame_control) &&
1694d5c65159SKalle Valo 	    enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
1695d5c65159SKalle Valo 		skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
1696d5c65159SKalle Valo 
1697d5c65159SKalle Valo 	/* Head */
1698d5c65159SKalle Valo 	if (status->flag & RX_FLAG_IV_STRIPPED) {
1699d5c65159SKalle Valo 		hdr_len = ieee80211_hdrlen(hdr->frame_control);
1700d5c65159SKalle Valo 		crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
1701d5c65159SKalle Valo 
1702d5c65159SKalle Valo 		memmove((void *)msdu->data + crypto_len,
1703d5c65159SKalle Valo 			(void *)msdu->data, hdr_len);
1704d5c65159SKalle Valo 		skb_pull(msdu, crypto_len);
1705d5c65159SKalle Valo 	}
1706d5c65159SKalle Valo }
1707d5c65159SKalle Valo 
1708d5c65159SKalle Valo static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar,
1709d5c65159SKalle Valo 					 struct sk_buff *msdu,
1710d5c65159SKalle Valo 					 enum hal_encrypt_type enctype)
1711d5c65159SKalle Valo {
1712d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1713d5c65159SKalle Valo 	struct ieee80211_hdr *hdr;
1714d5c65159SKalle Valo 	size_t hdr_len, crypto_len;
1715d5c65159SKalle Valo 	void *rfc1042;
1716d5c65159SKalle Valo 	bool is_amsdu;
1717d5c65159SKalle Valo 
1718d5c65159SKalle Valo 	is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu);
1719d5c65159SKalle Valo 	hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(rxcb->rx_desc);
1720d5c65159SKalle Valo 	rfc1042 = hdr;
1721d5c65159SKalle Valo 
1722d5c65159SKalle Valo 	if (rxcb->is_first_msdu) {
1723d5c65159SKalle Valo 		hdr_len = ieee80211_hdrlen(hdr->frame_control);
1724d5c65159SKalle Valo 		crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
1725d5c65159SKalle Valo 
1726d5c65159SKalle Valo 		rfc1042 += hdr_len + crypto_len;
1727d5c65159SKalle Valo 	}
1728d5c65159SKalle Valo 
1729d5c65159SKalle Valo 	if (is_amsdu)
1730d5c65159SKalle Valo 		rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr);
1731d5c65159SKalle Valo 
1732d5c65159SKalle Valo 	return rfc1042;
1733d5c65159SKalle Valo }
1734d5c65159SKalle Valo 
1735d5c65159SKalle Valo static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar,
1736d5c65159SKalle Valo 				       struct sk_buff *msdu,
1737d5c65159SKalle Valo 				       u8 *first_hdr,
1738d5c65159SKalle Valo 				       enum hal_encrypt_type enctype,
1739d5c65159SKalle Valo 				       struct ieee80211_rx_status *status)
1740d5c65159SKalle Valo {
1741d5c65159SKalle Valo 	struct ieee80211_hdr *hdr;
1742d5c65159SKalle Valo 	struct ethhdr *eth;
1743d5c65159SKalle Valo 	size_t hdr_len;
1744d5c65159SKalle Valo 	u8 da[ETH_ALEN];
1745d5c65159SKalle Valo 	u8 sa[ETH_ALEN];
1746d5c65159SKalle Valo 	void *rfc1042;
1747d5c65159SKalle Valo 
1748d5c65159SKalle Valo 	rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype);
1749d5c65159SKalle Valo 	if (WARN_ON_ONCE(!rfc1042))
1750d5c65159SKalle Valo 		return;
1751d5c65159SKalle Valo 
1752d5c65159SKalle Valo 	/* pull decapped header and copy SA & DA */
1753d5c65159SKalle Valo 	eth = (struct ethhdr *)msdu->data;
1754d5c65159SKalle Valo 	ether_addr_copy(da, eth->h_dest);
1755d5c65159SKalle Valo 	ether_addr_copy(sa, eth->h_source);
1756d5c65159SKalle Valo 	skb_pull(msdu, sizeof(struct ethhdr));
1757d5c65159SKalle Valo 
1758d5c65159SKalle Valo 	/* push rfc1042/llc/snap */
1759d5c65159SKalle Valo 	memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042,
1760d5c65159SKalle Valo 	       sizeof(struct ath11k_dp_rfc1042_hdr));
1761d5c65159SKalle Valo 
1762d5c65159SKalle Valo 	/* push original 802.11 header */
1763d5c65159SKalle Valo 	hdr = (struct ieee80211_hdr *)first_hdr;
1764d5c65159SKalle Valo 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1765d5c65159SKalle Valo 
1766d5c65159SKalle Valo 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1767d5c65159SKalle Valo 		memcpy(skb_push(msdu,
1768d5c65159SKalle Valo 				ath11k_dp_rx_crypto_param_len(ar, enctype)),
1769d5c65159SKalle Valo 		       (void *)hdr + hdr_len,
1770d5c65159SKalle Valo 		       ath11k_dp_rx_crypto_param_len(ar, enctype));
1771d5c65159SKalle Valo 	}
1772d5c65159SKalle Valo 
1773d5c65159SKalle Valo 	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1774d5c65159SKalle Valo 
1775d5c65159SKalle Valo 	/* original 802.11 header has a different DA and in
1776d5c65159SKalle Valo 	 * case of 4addr it may also have different SA
1777d5c65159SKalle Valo 	 */
1778d5c65159SKalle Valo 	hdr = (struct ieee80211_hdr *)msdu->data;
1779d5c65159SKalle Valo 	ether_addr_copy(ieee80211_get_DA(hdr), da);
1780d5c65159SKalle Valo 	ether_addr_copy(ieee80211_get_SA(hdr), sa);
1781d5c65159SKalle Valo }
1782d5c65159SKalle Valo 
1783d5c65159SKalle Valo static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
1784d5c65159SKalle Valo 				   struct hal_rx_desc *rx_desc,
1785d5c65159SKalle Valo 				   enum hal_encrypt_type enctype,
1786d5c65159SKalle Valo 				   struct ieee80211_rx_status *status,
1787d5c65159SKalle Valo 				   bool decrypted)
1788d5c65159SKalle Valo {
1789d5c65159SKalle Valo 	u8 *first_hdr;
1790d5c65159SKalle Valo 	u8 decap;
1791d5c65159SKalle Valo 
1792d5c65159SKalle Valo 	first_hdr = ath11k_dp_rx_h_80211_hdr(rx_desc);
1793d5c65159SKalle Valo 	decap = ath11k_dp_rx_h_mpdu_start_decap_type(rx_desc);
1794d5c65159SKalle Valo 
1795d5c65159SKalle Valo 	switch (decap) {
1796d5c65159SKalle Valo 	case DP_RX_DECAP_TYPE_NATIVE_WIFI:
1797d5c65159SKalle Valo 		ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr,
1798d5c65159SKalle Valo 					     enctype, status);
1799d5c65159SKalle Valo 		break;
1800d5c65159SKalle Valo 	case DP_RX_DECAP_TYPE_RAW:
1801d5c65159SKalle Valo 		ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status,
1802d5c65159SKalle Valo 					   decrypted);
1803d5c65159SKalle Valo 		break;
1804d5c65159SKalle Valo 	case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
1805d5c65159SKalle Valo 		ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
1806d5c65159SKalle Valo 					   enctype, status);
1807d5c65159SKalle Valo 		break;
1808d5c65159SKalle Valo 	case DP_RX_DECAP_TYPE_8023:
1809d5c65159SKalle Valo 		/* TODO: Handle undecap for these formats */
1810d5c65159SKalle Valo 		break;
1811d5c65159SKalle Valo 	}
1812d5c65159SKalle Valo }
1813d5c65159SKalle Valo 
1814d5c65159SKalle Valo static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
1815d5c65159SKalle Valo 				struct sk_buff_head *amsdu_list,
1816d5c65159SKalle Valo 				struct hal_rx_desc *rx_desc,
1817d5c65159SKalle Valo 				struct ieee80211_rx_status *rx_status)
1818d5c65159SKalle Valo {
1819d5c65159SKalle Valo 	struct ieee80211_hdr *hdr;
1820d5c65159SKalle Valo 	enum hal_encrypt_type enctype;
1821d5c65159SKalle Valo 	struct sk_buff *last_msdu;
1822d5c65159SKalle Valo 	struct sk_buff *msdu;
1823d5c65159SKalle Valo 	struct ath11k_skb_rxcb *last_rxcb;
1824d5c65159SKalle Valo 	bool is_decrypted;
1825d5c65159SKalle Valo 	u32 err_bitmap;
1826d5c65159SKalle Valo 	u8 *qos;
1827d5c65159SKalle Valo 
1828d5c65159SKalle Valo 	if (skb_queue_empty(amsdu_list))
1829d5c65159SKalle Valo 		return;
1830d5c65159SKalle Valo 
1831d5c65159SKalle Valo 	hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(rx_desc);
1832d5c65159SKalle Valo 
1833d5c65159SKalle Valo 	/* Each A-MSDU subframe will use the original header as the base and be
1834d5c65159SKalle Valo 	 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1835d5c65159SKalle Valo 	 */
1836d5c65159SKalle Valo 	if (ieee80211_is_data_qos(hdr->frame_control)) {
1837d5c65159SKalle Valo 		qos = ieee80211_get_qos_ctl(hdr);
1838d5c65159SKalle Valo 		qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1839d5c65159SKalle Valo 	}
1840d5c65159SKalle Valo 
1841d5c65159SKalle Valo 	is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc);
1842d5c65159SKalle Valo 	enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc);
1843d5c65159SKalle Valo 
1844d5c65159SKalle Valo 	/* Some attention flags are valid only in the last MSDU. */
1845d5c65159SKalle Valo 	last_msdu = skb_peek_tail(amsdu_list);
1846d5c65159SKalle Valo 	last_rxcb = ATH11K_SKB_RXCB(last_msdu);
1847d5c65159SKalle Valo 
1848d5c65159SKalle Valo 	err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(last_rxcb->rx_desc);
1849d5c65159SKalle Valo 
1850d5c65159SKalle Valo 	/* Clear per-MPDU flags while leaving per-PPDU flags intact. */
1851d5c65159SKalle Valo 	rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
1852d5c65159SKalle Valo 			     RX_FLAG_MMIC_ERROR |
1853d5c65159SKalle Valo 			     RX_FLAG_DECRYPTED |
1854d5c65159SKalle Valo 			     RX_FLAG_IV_STRIPPED |
1855d5c65159SKalle Valo 			     RX_FLAG_MMIC_STRIPPED);
1856d5c65159SKalle Valo 
1857d5c65159SKalle Valo 	if (err_bitmap & DP_RX_MPDU_ERR_FCS)
1858d5c65159SKalle Valo 		rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
1859d5c65159SKalle Valo 
1860d5c65159SKalle Valo 	if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC)
1861d5c65159SKalle Valo 		rx_status->flag |= RX_FLAG_MMIC_ERROR;
1862d5c65159SKalle Valo 
1863d5c65159SKalle Valo 	if (is_decrypted)
1864d5c65159SKalle Valo 		rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED |
1865d5c65159SKalle Valo 				   RX_FLAG_MIC_STRIPPED | RX_FLAG_ICV_STRIPPED;
1866d5c65159SKalle Valo 
1867d5c65159SKalle Valo 	skb_queue_walk(amsdu_list, msdu) {
1868d5c65159SKalle Valo 		ath11k_dp_rx_h_csum_offload(msdu);
1869d5c65159SKalle Valo 		ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
1870d5c65159SKalle Valo 				       enctype, rx_status, is_decrypted);
1871d5c65159SKalle Valo 	}
1872d5c65159SKalle Valo }
1873d5c65159SKalle Valo 
1874d5c65159SKalle Valo static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
1875d5c65159SKalle Valo 				struct ieee80211_rx_status *rx_status)
1876d5c65159SKalle Valo {
1877d5c65159SKalle Valo 	struct ieee80211_supported_band *sband;
1878d5c65159SKalle Valo 	enum rx_msdu_start_pkt_type pkt_type;
1879d5c65159SKalle Valo 	u8 bw;
1880d5c65159SKalle Valo 	u8 rate_mcs, nss;
1881d5c65159SKalle Valo 	u8 sgi;
1882d5c65159SKalle Valo 	bool is_cck;
1883d5c65159SKalle Valo 
1884d5c65159SKalle Valo 	pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(rx_desc);
1885d5c65159SKalle Valo 	bw = ath11k_dp_rx_h_msdu_start_rx_bw(rx_desc);
1886d5c65159SKalle Valo 	rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(rx_desc);
1887d5c65159SKalle Valo 	nss = ath11k_dp_rx_h_msdu_start_nss(rx_desc);
1888d5c65159SKalle Valo 	sgi = ath11k_dp_rx_h_msdu_start_sgi(rx_desc);
1889d5c65159SKalle Valo 
1890d5c65159SKalle Valo 	switch (pkt_type) {
1891d5c65159SKalle Valo 	case RX_MSDU_START_PKT_TYPE_11A:
1892d5c65159SKalle Valo 	case RX_MSDU_START_PKT_TYPE_11B:
1893d5c65159SKalle Valo 		is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
1894d5c65159SKalle Valo 		sband = &ar->mac.sbands[rx_status->band];
1895d5c65159SKalle Valo 		rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs,
1896d5c65159SKalle Valo 								is_cck);
1897d5c65159SKalle Valo 		break;
1898d5c65159SKalle Valo 	case RX_MSDU_START_PKT_TYPE_11N:
1899d5c65159SKalle Valo 		rx_status->encoding = RX_ENC_HT;
1900d5c65159SKalle Valo 		if (rate_mcs > ATH11K_HT_MCS_MAX) {
1901d5c65159SKalle Valo 			ath11k_warn(ar->ab,
1902d5c65159SKalle Valo 				    "Received with invalid mcs in HT mode %d\n",
1903d5c65159SKalle Valo 				     rate_mcs);
1904d5c65159SKalle Valo 			break;
1905d5c65159SKalle Valo 		}
1906d5c65159SKalle Valo 		rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
1907d5c65159SKalle Valo 		if (sgi)
1908d5c65159SKalle Valo 			rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
190939e81c6aSTamizh chelvam 		rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
1910d5c65159SKalle Valo 		break;
1911d5c65159SKalle Valo 	case RX_MSDU_START_PKT_TYPE_11AC:
1912d5c65159SKalle Valo 		rx_status->encoding = RX_ENC_VHT;
1913d5c65159SKalle Valo 		rx_status->rate_idx = rate_mcs;
1914d5c65159SKalle Valo 		if (rate_mcs > ATH11K_VHT_MCS_MAX) {
1915d5c65159SKalle Valo 			ath11k_warn(ar->ab,
1916d5c65159SKalle Valo 				    "Received with invalid mcs in VHT mode %d\n",
1917d5c65159SKalle Valo 				     rate_mcs);
1918d5c65159SKalle Valo 			break;
1919d5c65159SKalle Valo 		}
1920d5c65159SKalle Valo 		rx_status->nss = nss;
1921d5c65159SKalle Valo 		if (sgi)
1922d5c65159SKalle Valo 			rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
192339e81c6aSTamizh chelvam 		rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
1924d5c65159SKalle Valo 		break;
1925d5c65159SKalle Valo 	case RX_MSDU_START_PKT_TYPE_11AX:
1926d5c65159SKalle Valo 		rx_status->rate_idx = rate_mcs;
1927d5c65159SKalle Valo 		if (rate_mcs > ATH11K_HE_MCS_MAX) {
1928d5c65159SKalle Valo 			ath11k_warn(ar->ab,
1929d5c65159SKalle Valo 				    "Received with invalid mcs in HE mode %d\n",
1930d5c65159SKalle Valo 				    rate_mcs);
1931d5c65159SKalle Valo 			break;
1932d5c65159SKalle Valo 		}
1933d5c65159SKalle Valo 		rx_status->encoding = RX_ENC_HE;
1934d5c65159SKalle Valo 		rx_status->nss = nss;
193539e81c6aSTamizh chelvam 		rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
1936d5c65159SKalle Valo 		break;
1937d5c65159SKalle Valo 	}
1938d5c65159SKalle Valo }
1939d5c65159SKalle Valo 
1940d5c65159SKalle Valo static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
1941d5c65159SKalle Valo 				struct ieee80211_rx_status *rx_status)
1942d5c65159SKalle Valo {
1943d5c65159SKalle Valo 	u8 channel_num;
1944d5c65159SKalle Valo 
1945d5c65159SKalle Valo 	rx_status->freq = 0;
1946d5c65159SKalle Valo 	rx_status->rate_idx = 0;
1947d5c65159SKalle Valo 	rx_status->nss = 0;
1948d5c65159SKalle Valo 	rx_status->encoding = RX_ENC_LEGACY;
1949d5c65159SKalle Valo 	rx_status->bw = RATE_INFO_BW_20;
1950d5c65159SKalle Valo 
1951d5c65159SKalle Valo 	rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1952d5c65159SKalle Valo 
1953d5c65159SKalle Valo 	channel_num = ath11k_dp_rx_h_msdu_start_freq(rx_desc);
1954d5c65159SKalle Valo 
1955d5c65159SKalle Valo 	if (channel_num >= 1 && channel_num <= 14) {
1956d5c65159SKalle Valo 		rx_status->band = NL80211_BAND_2GHZ;
1957d5c65159SKalle Valo 	} else if (channel_num >= 36 && channel_num <= 173) {
1958d5c65159SKalle Valo 		rx_status->band = NL80211_BAND_5GHZ;
1959d5c65159SKalle Valo 	} else {
1960d5c65159SKalle Valo 		ath11k_warn(ar->ab, "Unsupported Channel info received %d\n",
1961d5c65159SKalle Valo 			    channel_num);
1962d5c65159SKalle Valo 		return;
1963d5c65159SKalle Valo 	}
1964d5c65159SKalle Valo 
1965d5c65159SKalle Valo 	rx_status->freq = ieee80211_channel_to_frequency(channel_num,
1966d5c65159SKalle Valo 							 rx_status->band);
1967d5c65159SKalle Valo 
1968d5c65159SKalle Valo 	ath11k_dp_rx_h_rate(ar, rx_desc, rx_status);
1969d5c65159SKalle Valo }
1970d5c65159SKalle Valo 
1971d5c65159SKalle Valo static void ath11k_dp_rx_process_amsdu(struct ath11k *ar,
1972d5c65159SKalle Valo 				       struct sk_buff_head *amsdu_list,
1973d5c65159SKalle Valo 				       struct ieee80211_rx_status *rx_status)
1974d5c65159SKalle Valo {
1975d5c65159SKalle Valo 	struct sk_buff *first;
1976d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb;
1977d5c65159SKalle Valo 	struct hal_rx_desc *rx_desc;
1978d5c65159SKalle Valo 	bool first_mpdu;
1979d5c65159SKalle Valo 
1980d5c65159SKalle Valo 	if (skb_queue_empty(amsdu_list))
1981d5c65159SKalle Valo 		return;
1982d5c65159SKalle Valo 
1983d5c65159SKalle Valo 	first = skb_peek(amsdu_list);
1984d5c65159SKalle Valo 	rxcb = ATH11K_SKB_RXCB(first);
1985d5c65159SKalle Valo 	rx_desc = rxcb->rx_desc;
1986d5c65159SKalle Valo 
1987d5c65159SKalle Valo 	first_mpdu = ath11k_dp_rx_h_attn_first_mpdu(rx_desc);
1988d5c65159SKalle Valo 	if (first_mpdu)
1989d5c65159SKalle Valo 		ath11k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
1990d5c65159SKalle Valo 
1991d5c65159SKalle Valo 	ath11k_dp_rx_h_mpdu(ar, amsdu_list, rx_desc, rx_status);
1992d5c65159SKalle Valo }
1993d5c65159SKalle Valo 
1994d5c65159SKalle Valo static char *ath11k_print_get_tid(struct ieee80211_hdr *hdr, char *out,
1995d5c65159SKalle Valo 				  size_t size)
1996d5c65159SKalle Valo {
1997d5c65159SKalle Valo 	u8 *qc;
1998d5c65159SKalle Valo 	int tid;
1999d5c65159SKalle Valo 
2000d5c65159SKalle Valo 	if (!ieee80211_is_data_qos(hdr->frame_control))
2001d5c65159SKalle Valo 		return "";
2002d5c65159SKalle Valo 
2003d5c65159SKalle Valo 	qc = ieee80211_get_qos_ctl(hdr);
2004d5c65159SKalle Valo 	tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
2005d5c65159SKalle Valo 	snprintf(out, size, "tid %d", tid);
2006d5c65159SKalle Valo 
2007d5c65159SKalle Valo 	return out;
2008d5c65159SKalle Valo }
2009d5c65159SKalle Valo 
2010d5c65159SKalle Valo static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi,
2011d5c65159SKalle Valo 				      struct sk_buff *msdu)
2012d5c65159SKalle Valo {
2013e4eb7b5cSJohn Crispin 	static const struct ieee80211_radiotap_he known = {
2014e4eb7b5cSJohn Crispin 		.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN),
2015e4eb7b5cSJohn Crispin 		.data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
2016e4eb7b5cSJohn Crispin 	};
2017d5c65159SKalle Valo 	struct ieee80211_rx_status *status;
2018d5c65159SKalle Valo 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
2019e4eb7b5cSJohn Crispin 	struct ieee80211_radiotap_he *he = NULL;
2020d5c65159SKalle Valo 	char tid[32];
2021d5c65159SKalle Valo 
2022d5c65159SKalle Valo 	status = IEEE80211_SKB_RXCB(msdu);
2023e4eb7b5cSJohn Crispin 	if (status->encoding == RX_ENC_HE) {
2024e4eb7b5cSJohn Crispin 		he = skb_push(msdu, sizeof(known));
2025e4eb7b5cSJohn Crispin 		memcpy(he, &known, sizeof(known));
2026e4eb7b5cSJohn Crispin 		status->flag |= RX_FLAG_RADIOTAP_HE;
2027e4eb7b5cSJohn Crispin 	}
2028d5c65159SKalle Valo 
2029d5c65159SKalle Valo 	ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
2030d5c65159SKalle Valo 		   "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
2031d5c65159SKalle Valo 		   msdu,
2032d5c65159SKalle Valo 		   msdu->len,
2033d5c65159SKalle Valo 		   ieee80211_get_SA(hdr),
2034d5c65159SKalle Valo 		   ath11k_print_get_tid(hdr, tid, sizeof(tid)),
2035d5c65159SKalle Valo 		   is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
2036d5c65159SKalle Valo 							"mcast" : "ucast",
2037d5c65159SKalle Valo 		   (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
2038d5c65159SKalle Valo 		   (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
2039d5c65159SKalle Valo 		   (status->encoding == RX_ENC_HT) ? "ht" : "",
2040d5c65159SKalle Valo 		   (status->encoding == RX_ENC_VHT) ? "vht" : "",
2041d5c65159SKalle Valo 		   (status->encoding == RX_ENC_HE) ? "he" : "",
2042d5c65159SKalle Valo 		   (status->bw == RATE_INFO_BW_40) ? "40" : "",
2043d5c65159SKalle Valo 		   (status->bw == RATE_INFO_BW_80) ? "80" : "",
2044d5c65159SKalle Valo 		   (status->bw == RATE_INFO_BW_160) ? "160" : "",
2045d5c65159SKalle Valo 		   status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
2046d5c65159SKalle Valo 		   status->rate_idx,
2047d5c65159SKalle Valo 		   status->nss,
2048d5c65159SKalle Valo 		   status->freq,
2049d5c65159SKalle Valo 		   status->band, status->flag,
2050d5c65159SKalle Valo 		   !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
2051d5c65159SKalle Valo 		   !!(status->flag & RX_FLAG_MMIC_ERROR),
2052d5c65159SKalle Valo 		   !!(status->flag & RX_FLAG_AMSDU_MORE));
2053d5c65159SKalle Valo 
2054d5c65159SKalle Valo 	/* TODO: trace rx packet */
2055d5c65159SKalle Valo 
2056d5c65159SKalle Valo 	ieee80211_rx_napi(ar->hw, NULL, msdu, napi);
2057d5c65159SKalle Valo }
2058d5c65159SKalle Valo 
2059d5c65159SKalle Valo static void ath11k_dp_rx_pre_deliver_amsdu(struct ath11k *ar,
2060d5c65159SKalle Valo 					   struct sk_buff_head *amsdu_list,
2061d5c65159SKalle Valo 					   struct ieee80211_rx_status *rxs)
2062d5c65159SKalle Valo {
2063d5c65159SKalle Valo 	struct sk_buff *msdu;
2064d5c65159SKalle Valo 	struct sk_buff *first_subframe;
2065d5c65159SKalle Valo 	struct ieee80211_rx_status *status;
2066d5c65159SKalle Valo 
2067d5c65159SKalle Valo 	first_subframe = skb_peek(amsdu_list);
2068d5c65159SKalle Valo 
2069d5c65159SKalle Valo 	skb_queue_walk(amsdu_list, msdu) {
2070d5c65159SKalle Valo 		/* Setup per-MSDU flags */
2071d5c65159SKalle Valo 		if (skb_queue_empty(amsdu_list))
2072d5c65159SKalle Valo 			rxs->flag &= ~RX_FLAG_AMSDU_MORE;
2073d5c65159SKalle Valo 		else
2074d5c65159SKalle Valo 			rxs->flag |= RX_FLAG_AMSDU_MORE;
2075d5c65159SKalle Valo 
2076d5c65159SKalle Valo 		if (msdu == first_subframe) {
2077d5c65159SKalle Valo 			first_subframe = NULL;
2078d5c65159SKalle Valo 			rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN;
2079d5c65159SKalle Valo 		} else {
2080d5c65159SKalle Valo 			rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
2081d5c65159SKalle Valo 		}
2082d5c65159SKalle Valo 		rxs->flag |= RX_FLAG_SKIP_MONITOR;
2083d5c65159SKalle Valo 
2084d5c65159SKalle Valo 		status = IEEE80211_SKB_RXCB(msdu);
2085d5c65159SKalle Valo 		*status = *rxs;
2086d5c65159SKalle Valo 	}
2087d5c65159SKalle Valo }
2088d5c65159SKalle Valo 
2089d5c65159SKalle Valo static void ath11k_dp_rx_process_pending_packets(struct ath11k_base *ab,
2090d5c65159SKalle Valo 						 struct napi_struct *napi,
2091d5c65159SKalle Valo 						 struct sk_buff_head *pending_q,
2092d5c65159SKalle Valo 						 int *quota, u8 mac_id)
2093d5c65159SKalle Valo {
2094d5c65159SKalle Valo 	struct ath11k *ar;
2095d5c65159SKalle Valo 	struct sk_buff *msdu;
2096d5c65159SKalle Valo 	struct ath11k_pdev *pdev;
2097d5c65159SKalle Valo 
2098d5c65159SKalle Valo 	if (skb_queue_empty(pending_q))
2099d5c65159SKalle Valo 		return;
2100d5c65159SKalle Valo 
2101d5c65159SKalle Valo 	ar = ab->pdevs[mac_id].ar;
2102d5c65159SKalle Valo 
2103d5c65159SKalle Valo 	rcu_read_lock();
2104d5c65159SKalle Valo 	pdev = rcu_dereference(ab->pdevs_active[mac_id]);
2105d5c65159SKalle Valo 
2106d5c65159SKalle Valo 	while (*quota && (msdu = __skb_dequeue(pending_q))) {
2107d5c65159SKalle Valo 		if (!pdev) {
2108d5c65159SKalle Valo 			dev_kfree_skb_any(msdu);
2109d5c65159SKalle Valo 			continue;
2110d5c65159SKalle Valo 		}
2111d5c65159SKalle Valo 
2112d5c65159SKalle Valo 		ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
2113d5c65159SKalle Valo 		(*quota)--;
2114d5c65159SKalle Valo 	}
2115d5c65159SKalle Valo 	rcu_read_unlock();
2116d5c65159SKalle Valo }
2117d5c65159SKalle Valo 
2118d5c65159SKalle Valo int ath11k_dp_process_rx(struct ath11k_base *ab, int mac_id,
2119d5c65159SKalle Valo 			 struct napi_struct *napi, struct sk_buff_head *pending_q,
2120d5c65159SKalle Valo 			 int budget)
2121d5c65159SKalle Valo {
2122d5c65159SKalle Valo 	struct ath11k *ar = ab->pdevs[mac_id].ar;
2123d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
2124d5c65159SKalle Valo 	struct ieee80211_rx_status *rx_status = &dp->rx_status;
2125d5c65159SKalle Valo 	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
2126d5c65159SKalle Valo 	struct hal_srng *srng;
2127d5c65159SKalle Valo 	struct sk_buff *msdu;
2128d5c65159SKalle Valo 	struct sk_buff_head msdu_list;
2129d5c65159SKalle Valo 	struct sk_buff_head amsdu_list;
2130d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb;
2131d5c65159SKalle Valo 	u32 *rx_desc;
2132d5c65159SKalle Valo 	int buf_id;
2133d5c65159SKalle Valo 	int num_buffs_reaped = 0;
2134d5c65159SKalle Valo 	int quota = budget;
2135d5c65159SKalle Valo 	int ret;
2136d5c65159SKalle Valo 	bool done = false;
2137d5c65159SKalle Valo 
2138d5c65159SKalle Valo 	/* Process any pending packets from the previous napi poll.
2139d5c65159SKalle Valo 	 * Note: All msdu's in this pending_q corresponds to the same mac id
2140d5c65159SKalle Valo 	 * due to pdev based reo dest mapping and also since each irq group id
2141d5c65159SKalle Valo 	 * maps to specific reo dest ring.
2142d5c65159SKalle Valo 	 */
2143d5c65159SKalle Valo 	ath11k_dp_rx_process_pending_packets(ab, napi, pending_q, &quota,
2144d5c65159SKalle Valo 					     mac_id);
2145d5c65159SKalle Valo 
2146d5c65159SKalle Valo 	/* If all quota is exhausted by processing the pending_q,
2147d5c65159SKalle Valo 	 * Wait for the next napi poll to reap the new info
2148d5c65159SKalle Valo 	 */
2149d5c65159SKalle Valo 	if (!quota)
2150d5c65159SKalle Valo 		goto exit;
2151d5c65159SKalle Valo 
2152d5c65159SKalle Valo 	__skb_queue_head_init(&msdu_list);
2153d5c65159SKalle Valo 
2154d5c65159SKalle Valo 	srng = &ab->hal.srng_list[dp->reo_dst_ring.ring_id];
2155d5c65159SKalle Valo 
2156d5c65159SKalle Valo 	spin_lock_bh(&srng->lock);
2157d5c65159SKalle Valo 
2158d5c65159SKalle Valo 	ath11k_hal_srng_access_begin(ab, srng);
2159d5c65159SKalle Valo 
2160d5c65159SKalle Valo try_again:
2161d5c65159SKalle Valo 	while ((rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
2162293cb583SJohn Crispin 		struct hal_reo_dest_ring *desc = (struct hal_reo_dest_ring *)rx_desc;
2163293cb583SJohn Crispin 		enum hal_reo_dest_ring_push_reason push_reason;
2164293cb583SJohn Crispin 		u32 cookie;
2165d5c65159SKalle Valo 
2166293cb583SJohn Crispin 		cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
2167293cb583SJohn Crispin 				   desc->buf_addr_info.info1);
2168d5c65159SKalle Valo 		buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
2169293cb583SJohn Crispin 				   cookie);
2170d5c65159SKalle Valo 		spin_lock_bh(&rx_ring->idr_lock);
2171d5c65159SKalle Valo 		msdu = idr_find(&rx_ring->bufs_idr, buf_id);
2172d5c65159SKalle Valo 		if (!msdu) {
2173d5c65159SKalle Valo 			ath11k_warn(ab, "frame rx with invalid buf_id %d\n",
2174d5c65159SKalle Valo 				    buf_id);
2175d5c65159SKalle Valo 			spin_unlock_bh(&rx_ring->idr_lock);
2176d5c65159SKalle Valo 			continue;
2177d5c65159SKalle Valo 		}
2178d5c65159SKalle Valo 
2179d5c65159SKalle Valo 		idr_remove(&rx_ring->bufs_idr, buf_id);
2180d5c65159SKalle Valo 		spin_unlock_bh(&rx_ring->idr_lock);
2181d5c65159SKalle Valo 
2182d5c65159SKalle Valo 		rxcb = ATH11K_SKB_RXCB(msdu);
2183d5c65159SKalle Valo 		dma_unmap_single(ab->dev, rxcb->paddr,
2184d5c65159SKalle Valo 				 msdu->len + skb_tailroom(msdu),
2185d5c65159SKalle Valo 				 DMA_FROM_DEVICE);
2186d5c65159SKalle Valo 
2187d5c65159SKalle Valo 		num_buffs_reaped++;
2188d5c65159SKalle Valo 
2189293cb583SJohn Crispin 		push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
2190293cb583SJohn Crispin 					desc->info0);
2191293cb583SJohn Crispin 		if (push_reason !=
2192d5c65159SKalle Valo 		    HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
2193d5c65159SKalle Valo 			/* TODO: Check if the msdu can be sent up for processing */
2194d5c65159SKalle Valo 			dev_kfree_skb_any(msdu);
2195d5c65159SKalle Valo 			ab->soc_stats.hal_reo_error[dp->reo_dst_ring.ring_id]++;
2196d5c65159SKalle Valo 			continue;
2197d5c65159SKalle Valo 		}
2198d5c65159SKalle Valo 
2199293cb583SJohn Crispin 		rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 &
2200293cb583SJohn Crispin 					 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
2201293cb583SJohn Crispin 		rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 &
2202293cb583SJohn Crispin 					RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
2203293cb583SJohn Crispin 		rxcb->is_continuation = !!(desc->rx_msdu_info.info0 &
2204293cb583SJohn Crispin 					   RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
2205d5c65159SKalle Valo 		rxcb->mac_id = mac_id;
2206d5c65159SKalle Valo 		__skb_queue_tail(&msdu_list, msdu);
2207d5c65159SKalle Valo 
2208d5c65159SKalle Valo 		/* Stop reaping from the ring once quota is exhausted
2209d5c65159SKalle Valo 		 * and we've received all msdu's in the the AMSDU. The
2210d5c65159SKalle Valo 		 * additional msdu's reaped in excess of quota here would
2211d5c65159SKalle Valo 		 * be pushed into the pending queue to be processed during
2212d5c65159SKalle Valo 		 * the next napi poll.
2213d5c65159SKalle Valo 		 * Note: More profiling can be done to see the impact on
2214d5c65159SKalle Valo 		 * pending_q and throughput during various traffic & density
2215d5c65159SKalle Valo 		 * and how use of budget instead of remaining quota affects it.
2216d5c65159SKalle Valo 		 */
2217d5c65159SKalle Valo 		if (num_buffs_reaped >= quota && rxcb->is_last_msdu &&
2218d5c65159SKalle Valo 		    !rxcb->is_continuation) {
2219d5c65159SKalle Valo 			done = true;
2220d5c65159SKalle Valo 			break;
2221d5c65159SKalle Valo 		}
2222d5c65159SKalle Valo 	}
2223d5c65159SKalle Valo 
2224d5c65159SKalle Valo 	/* Hw might have updated the head pointer after we cached it.
2225d5c65159SKalle Valo 	 * In this case, even though there are entries in the ring we'll
2226d5c65159SKalle Valo 	 * get rx_desc NULL. Give the read another try with updated cached
2227d5c65159SKalle Valo 	 * head pointer so that we can reap complete MPDU in the current
2228d5c65159SKalle Valo 	 * rx processing.
2229d5c65159SKalle Valo 	 */
2230d5c65159SKalle Valo 	if (!done && ath11k_hal_srng_dst_num_free(ab, srng, true)) {
2231d5c65159SKalle Valo 		ath11k_hal_srng_access_end(ab, srng);
2232d5c65159SKalle Valo 		goto try_again;
2233d5c65159SKalle Valo 	}
2234d5c65159SKalle Valo 
2235d5c65159SKalle Valo 	ath11k_hal_srng_access_end(ab, srng);
2236d5c65159SKalle Valo 
2237d5c65159SKalle Valo 	spin_unlock_bh(&srng->lock);
2238d5c65159SKalle Valo 
2239d5c65159SKalle Valo 	if (!num_buffs_reaped)
2240d5c65159SKalle Valo 		goto exit;
2241d5c65159SKalle Valo 
2242d5c65159SKalle Valo 	/* Should we reschedule it later if we are not able to replenish all
2243d5c65159SKalle Valo 	 * the buffers?
2244d5c65159SKalle Valo 	 */
2245d5c65159SKalle Valo 	ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buffs_reaped,
2246d5c65159SKalle Valo 				   HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
2247d5c65159SKalle Valo 
2248d5c65159SKalle Valo 	rcu_read_lock();
2249d5c65159SKalle Valo 	if (!rcu_dereference(ab->pdevs_active[mac_id])) {
2250d5c65159SKalle Valo 		__skb_queue_purge(&msdu_list);
2251d5c65159SKalle Valo 		goto rcu_unlock;
2252d5c65159SKalle Valo 	}
2253d5c65159SKalle Valo 
2254d5c65159SKalle Valo 	if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
2255d5c65159SKalle Valo 		__skb_queue_purge(&msdu_list);
2256d5c65159SKalle Valo 		goto rcu_unlock;
2257d5c65159SKalle Valo 	}
2258d5c65159SKalle Valo 
2259d5c65159SKalle Valo 	while (!skb_queue_empty(&msdu_list)) {
2260d5c65159SKalle Valo 		__skb_queue_head_init(&amsdu_list);
2261d5c65159SKalle Valo 		ret = ath11k_dp_rx_retrieve_amsdu(ar, &msdu_list, &amsdu_list);
2262d5c65159SKalle Valo 		if (ret) {
2263d5c65159SKalle Valo 			if (ret == -EIO) {
2264d5c65159SKalle Valo 				ath11k_err(ab, "rx ring got corrupted %d\n", ret);
2265d5c65159SKalle Valo 				__skb_queue_purge(&msdu_list);
2266d5c65159SKalle Valo 				/* Should stop processing any more rx in
2267d5c65159SKalle Valo 				 * future from this ring?
2268d5c65159SKalle Valo 				 */
2269d5c65159SKalle Valo 				goto rcu_unlock;
2270d5c65159SKalle Valo 			}
2271d5c65159SKalle Valo 
2272d5c65159SKalle Valo 			/* A-MSDU retrieval got failed due to non-fatal condition,
2273d5c65159SKalle Valo 			 * continue processing with the next msdu.
2274d5c65159SKalle Valo 			 */
2275d5c65159SKalle Valo 			continue;
2276d5c65159SKalle Valo 		}
2277d5c65159SKalle Valo 
2278d5c65159SKalle Valo 		ath11k_dp_rx_process_amsdu(ar, &amsdu_list, rx_status);
2279d5c65159SKalle Valo 
2280d5c65159SKalle Valo 		ath11k_dp_rx_pre_deliver_amsdu(ar, &amsdu_list, rx_status);
2281d5c65159SKalle Valo 		skb_queue_splice_tail(&amsdu_list, pending_q);
2282d5c65159SKalle Valo 	}
2283d5c65159SKalle Valo 
2284d5c65159SKalle Valo 	while (quota && (msdu = __skb_dequeue(pending_q))) {
2285d5c65159SKalle Valo 		ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
2286d5c65159SKalle Valo 		quota--;
2287d5c65159SKalle Valo 	}
2288d5c65159SKalle Valo 
2289d5c65159SKalle Valo rcu_unlock:
2290d5c65159SKalle Valo 	rcu_read_unlock();
2291d5c65159SKalle Valo exit:
2292d5c65159SKalle Valo 	return budget - quota;
2293d5c65159SKalle Valo }
2294d5c65159SKalle Valo 
2295d5c65159SKalle Valo static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
2296d5c65159SKalle Valo 					   struct hal_rx_mon_ppdu_info *ppdu_info)
2297d5c65159SKalle Valo {
2298d5c65159SKalle Valo 	struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats;
2299d5c65159SKalle Valo 	u32 num_msdu;
2300d5c65159SKalle Valo 
2301d5c65159SKalle Valo 	if (!rx_stats)
2302d5c65159SKalle Valo 		return;
2303d5c65159SKalle Valo 
2304d5c65159SKalle Valo 	num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count +
2305d5c65159SKalle Valo 		   ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count;
2306d5c65159SKalle Valo 
2307d5c65159SKalle Valo 	rx_stats->num_msdu += num_msdu;
2308d5c65159SKalle Valo 	rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count +
2309d5c65159SKalle Valo 				    ppdu_info->tcp_ack_msdu_count;
2310d5c65159SKalle Valo 	rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count;
2311d5c65159SKalle Valo 	rx_stats->other_msdu_count += ppdu_info->other_msdu_count;
2312d5c65159SKalle Valo 
2313d5c65159SKalle Valo 	if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A ||
2314d5c65159SKalle Valo 	    ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) {
2315d5c65159SKalle Valo 		ppdu_info->nss = 1;
2316d5c65159SKalle Valo 		ppdu_info->mcs = HAL_RX_MAX_MCS;
2317d5c65159SKalle Valo 		ppdu_info->tid = IEEE80211_NUM_TIDS;
2318d5c65159SKalle Valo 	}
2319d5c65159SKalle Valo 
2320d5c65159SKalle Valo 	if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS)
2321d5c65159SKalle Valo 		rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu;
2322d5c65159SKalle Valo 
2323d5c65159SKalle Valo 	if (ppdu_info->mcs <= HAL_RX_MAX_MCS)
2324d5c65159SKalle Valo 		rx_stats->mcs_count[ppdu_info->mcs] += num_msdu;
2325d5c65159SKalle Valo 
2326d5c65159SKalle Valo 	if (ppdu_info->gi < HAL_RX_GI_MAX)
2327d5c65159SKalle Valo 		rx_stats->gi_count[ppdu_info->gi] += num_msdu;
2328d5c65159SKalle Valo 
2329d5c65159SKalle Valo 	if (ppdu_info->bw < HAL_RX_BW_MAX)
2330d5c65159SKalle Valo 		rx_stats->bw_count[ppdu_info->bw] += num_msdu;
2331d5c65159SKalle Valo 
2332d5c65159SKalle Valo 	if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX)
2333d5c65159SKalle Valo 		rx_stats->coding_count[ppdu_info->ldpc] += num_msdu;
2334d5c65159SKalle Valo 
2335d5c65159SKalle Valo 	if (ppdu_info->tid <= IEEE80211_NUM_TIDS)
2336d5c65159SKalle Valo 		rx_stats->tid_count[ppdu_info->tid] += num_msdu;
2337d5c65159SKalle Valo 
2338d5c65159SKalle Valo 	if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX)
2339d5c65159SKalle Valo 		rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu;
2340d5c65159SKalle Valo 
2341d5c65159SKalle Valo 	if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX)
2342d5c65159SKalle Valo 		rx_stats->reception_type[ppdu_info->reception_type] += num_msdu;
2343d5c65159SKalle Valo 
2344d5c65159SKalle Valo 	if (ppdu_info->is_stbc)
2345d5c65159SKalle Valo 		rx_stats->stbc_count += num_msdu;
2346d5c65159SKalle Valo 
2347d5c65159SKalle Valo 	if (ppdu_info->beamformed)
2348d5c65159SKalle Valo 		rx_stats->beamformed_count += num_msdu;
2349d5c65159SKalle Valo 
2350d5c65159SKalle Valo 	if (ppdu_info->num_mpdu_fcs_ok > 1)
2351d5c65159SKalle Valo 		rx_stats->ampdu_msdu_count += num_msdu;
2352d5c65159SKalle Valo 	else
2353d5c65159SKalle Valo 		rx_stats->non_ampdu_msdu_count += num_msdu;
2354d5c65159SKalle Valo 
2355d5c65159SKalle Valo 	rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok;
2356d5c65159SKalle Valo 	rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err;
2357d5c65159SKalle Valo 
2358d5c65159SKalle Valo 	arsta->rssi_comb = ppdu_info->rssi_comb;
2359d5c65159SKalle Valo 	rx_stats->rx_duration += ppdu_info->rx_duration;
2360d5c65159SKalle Valo 	arsta->rx_duration = rx_stats->rx_duration;
2361d5c65159SKalle Valo }
2362d5c65159SKalle Valo 
2363d5c65159SKalle Valo static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab,
2364d5c65159SKalle Valo 							 struct dp_rxdma_ring *rx_ring,
2365d5c65159SKalle Valo 							 int *buf_id, gfp_t gfp)
2366d5c65159SKalle Valo {
2367d5c65159SKalle Valo 	struct sk_buff *skb;
2368d5c65159SKalle Valo 	dma_addr_t paddr;
2369d5c65159SKalle Valo 
2370d5c65159SKalle Valo 	skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
2371d5c65159SKalle Valo 			    DP_RX_BUFFER_ALIGN_SIZE);
2372d5c65159SKalle Valo 
2373d5c65159SKalle Valo 	if (!skb)
2374d5c65159SKalle Valo 		goto fail_alloc_skb;
2375d5c65159SKalle Valo 
2376d5c65159SKalle Valo 	if (!IS_ALIGNED((unsigned long)skb->data,
2377d5c65159SKalle Valo 			DP_RX_BUFFER_ALIGN_SIZE)) {
2378d5c65159SKalle Valo 		skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
2379d5c65159SKalle Valo 			 skb->data);
2380d5c65159SKalle Valo 	}
2381d5c65159SKalle Valo 
2382d5c65159SKalle Valo 	paddr = dma_map_single(ab->dev, skb->data,
2383d5c65159SKalle Valo 			       skb->len + skb_tailroom(skb),
2384d5c65159SKalle Valo 			       DMA_BIDIRECTIONAL);
2385d5c65159SKalle Valo 	if (unlikely(dma_mapping_error(ab->dev, paddr)))
2386d5c65159SKalle Valo 		goto fail_free_skb;
2387d5c65159SKalle Valo 
2388d5c65159SKalle Valo 	spin_lock_bh(&rx_ring->idr_lock);
2389d5c65159SKalle Valo 	*buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
2390d5c65159SKalle Valo 			    rx_ring->bufs_max, gfp);
2391d5c65159SKalle Valo 	spin_unlock_bh(&rx_ring->idr_lock);
2392d5c65159SKalle Valo 	if (*buf_id < 0)
2393d5c65159SKalle Valo 		goto fail_dma_unmap;
2394d5c65159SKalle Valo 
2395d5c65159SKalle Valo 	ATH11K_SKB_RXCB(skb)->paddr = paddr;
2396d5c65159SKalle Valo 	return skb;
2397d5c65159SKalle Valo 
2398d5c65159SKalle Valo fail_dma_unmap:
2399d5c65159SKalle Valo 	dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
2400d5c65159SKalle Valo 			 DMA_BIDIRECTIONAL);
2401d5c65159SKalle Valo fail_free_skb:
2402d5c65159SKalle Valo 	dev_kfree_skb_any(skb);
2403d5c65159SKalle Valo fail_alloc_skb:
2404d5c65159SKalle Valo 	return NULL;
2405d5c65159SKalle Valo }
2406d5c65159SKalle Valo 
2407d5c65159SKalle Valo int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
2408d5c65159SKalle Valo 					   struct dp_rxdma_ring *rx_ring,
2409d5c65159SKalle Valo 					   int req_entries,
2410d5c65159SKalle Valo 					   enum hal_rx_buf_return_buf_manager mgr,
2411d5c65159SKalle Valo 					   gfp_t gfp)
2412d5c65159SKalle Valo {
2413d5c65159SKalle Valo 	struct hal_srng *srng;
2414d5c65159SKalle Valo 	u32 *desc;
2415d5c65159SKalle Valo 	struct sk_buff *skb;
2416d5c65159SKalle Valo 	int num_free;
2417d5c65159SKalle Valo 	int num_remain;
2418d5c65159SKalle Valo 	int buf_id;
2419d5c65159SKalle Valo 	u32 cookie;
2420d5c65159SKalle Valo 	dma_addr_t paddr;
2421d5c65159SKalle Valo 
2422d5c65159SKalle Valo 	req_entries = min(req_entries, rx_ring->bufs_max);
2423d5c65159SKalle Valo 
2424d5c65159SKalle Valo 	srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
2425d5c65159SKalle Valo 
2426d5c65159SKalle Valo 	spin_lock_bh(&srng->lock);
2427d5c65159SKalle Valo 
2428d5c65159SKalle Valo 	ath11k_hal_srng_access_begin(ab, srng);
2429d5c65159SKalle Valo 
2430d5c65159SKalle Valo 	num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
2431d5c65159SKalle Valo 
2432d5c65159SKalle Valo 	req_entries = min(num_free, req_entries);
2433d5c65159SKalle Valo 	num_remain = req_entries;
2434d5c65159SKalle Valo 
2435d5c65159SKalle Valo 	while (num_remain > 0) {
2436d5c65159SKalle Valo 		skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
2437d5c65159SKalle Valo 							&buf_id, gfp);
2438d5c65159SKalle Valo 		if (!skb)
2439d5c65159SKalle Valo 			break;
2440d5c65159SKalle Valo 		paddr = ATH11K_SKB_RXCB(skb)->paddr;
2441d5c65159SKalle Valo 
2442d5c65159SKalle Valo 		desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
2443d5c65159SKalle Valo 		if (!desc)
2444d5c65159SKalle Valo 			goto fail_desc_get;
2445d5c65159SKalle Valo 
2446d5c65159SKalle Valo 		cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
2447d5c65159SKalle Valo 			 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
2448d5c65159SKalle Valo 
2449d5c65159SKalle Valo 		num_remain--;
2450d5c65159SKalle Valo 
2451d5c65159SKalle Valo 		ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
2452d5c65159SKalle Valo 	}
2453d5c65159SKalle Valo 
2454d5c65159SKalle Valo 	ath11k_hal_srng_access_end(ab, srng);
2455d5c65159SKalle Valo 
2456d5c65159SKalle Valo 	spin_unlock_bh(&srng->lock);
2457d5c65159SKalle Valo 
2458d5c65159SKalle Valo 	return req_entries - num_remain;
2459d5c65159SKalle Valo 
2460d5c65159SKalle Valo fail_desc_get:
2461d5c65159SKalle Valo 	spin_lock_bh(&rx_ring->idr_lock);
2462d5c65159SKalle Valo 	idr_remove(&rx_ring->bufs_idr, buf_id);
2463d5c65159SKalle Valo 	spin_unlock_bh(&rx_ring->idr_lock);
2464d5c65159SKalle Valo 	dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
2465d5c65159SKalle Valo 			 DMA_BIDIRECTIONAL);
2466d5c65159SKalle Valo 	dev_kfree_skb_any(skb);
2467d5c65159SKalle Valo 	ath11k_hal_srng_access_end(ab, srng);
2468d5c65159SKalle Valo 	spin_unlock_bh(&srng->lock);
2469d5c65159SKalle Valo 
2470d5c65159SKalle Valo 	return req_entries - num_remain;
2471d5c65159SKalle Valo }
2472d5c65159SKalle Valo 
2473d5c65159SKalle Valo static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
2474d5c65159SKalle Valo 					     int *budget, struct sk_buff_head *skb_list)
2475d5c65159SKalle Valo {
2476d5c65159SKalle Valo 	struct ath11k *ar = ab->pdevs[mac_id].ar;
2477d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
2478d5c65159SKalle Valo 	struct dp_rxdma_ring *rx_ring = &dp->rx_mon_status_refill_ring;
2479d5c65159SKalle Valo 	struct hal_srng *srng;
2480d5c65159SKalle Valo 	void *rx_mon_status_desc;
2481d5c65159SKalle Valo 	struct sk_buff *skb;
2482d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb;
2483d5c65159SKalle Valo 	struct hal_tlv_hdr *tlv;
2484d5c65159SKalle Valo 	u32 cookie;
2485d5c65159SKalle Valo 	int buf_id;
2486d5c65159SKalle Valo 	dma_addr_t paddr;
2487d5c65159SKalle Valo 	u8 rbm;
2488d5c65159SKalle Valo 	int num_buffs_reaped = 0;
2489d5c65159SKalle Valo 
2490d5c65159SKalle Valo 	srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
2491d5c65159SKalle Valo 
2492d5c65159SKalle Valo 	spin_lock_bh(&srng->lock);
2493d5c65159SKalle Valo 
2494d5c65159SKalle Valo 	ath11k_hal_srng_access_begin(ab, srng);
2495d5c65159SKalle Valo 	while (*budget) {
2496d5c65159SKalle Valo 		*budget -= 1;
2497d5c65159SKalle Valo 		rx_mon_status_desc =
2498d5c65159SKalle Valo 			ath11k_hal_srng_src_peek(ab, srng);
2499d5c65159SKalle Valo 		if (!rx_mon_status_desc)
2500d5c65159SKalle Valo 			break;
2501d5c65159SKalle Valo 
2502d5c65159SKalle Valo 		ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr,
2503d5c65159SKalle Valo 						&cookie, &rbm);
2504d5c65159SKalle Valo 		if (paddr) {
2505d5c65159SKalle Valo 			buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
2506d5c65159SKalle Valo 
2507d5c65159SKalle Valo 			spin_lock_bh(&rx_ring->idr_lock);
2508d5c65159SKalle Valo 			skb = idr_find(&rx_ring->bufs_idr, buf_id);
2509d5c65159SKalle Valo 			if (!skb) {
2510d5c65159SKalle Valo 				ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n",
2511d5c65159SKalle Valo 					    buf_id);
2512d5c65159SKalle Valo 				spin_unlock_bh(&rx_ring->idr_lock);
2513d5c65159SKalle Valo 				continue;
2514d5c65159SKalle Valo 			}
2515d5c65159SKalle Valo 
2516d5c65159SKalle Valo 			idr_remove(&rx_ring->bufs_idr, buf_id);
2517d5c65159SKalle Valo 			spin_unlock_bh(&rx_ring->idr_lock);
2518d5c65159SKalle Valo 
2519d5c65159SKalle Valo 			rxcb = ATH11K_SKB_RXCB(skb);
2520d5c65159SKalle Valo 
2521d5c65159SKalle Valo 			dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
2522d5c65159SKalle Valo 						skb->len + skb_tailroom(skb),
2523d5c65159SKalle Valo 						DMA_FROM_DEVICE);
2524d5c65159SKalle Valo 
2525d5c65159SKalle Valo 			dma_unmap_single(ab->dev, rxcb->paddr,
2526d5c65159SKalle Valo 					 skb->len + skb_tailroom(skb),
2527d5c65159SKalle Valo 					 DMA_BIDIRECTIONAL);
2528d5c65159SKalle Valo 
2529d5c65159SKalle Valo 			tlv = (struct hal_tlv_hdr *)skb->data;
2530d5c65159SKalle Valo 			if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) !=
2531d5c65159SKalle Valo 					HAL_RX_STATUS_BUFFER_DONE) {
2532d5c65159SKalle Valo 				ath11k_hal_srng_src_get_next_entry(ab, srng);
2533d5c65159SKalle Valo 				continue;
2534d5c65159SKalle Valo 			}
2535d5c65159SKalle Valo 
2536d5c65159SKalle Valo 			__skb_queue_tail(skb_list, skb);
2537d5c65159SKalle Valo 		}
2538d5c65159SKalle Valo 
2539d5c65159SKalle Valo 		skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
2540d5c65159SKalle Valo 							&buf_id, GFP_ATOMIC);
2541d5c65159SKalle Valo 
2542d5c65159SKalle Valo 		if (!skb) {
2543d5c65159SKalle Valo 			ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
2544d5c65159SKalle Valo 							HAL_RX_BUF_RBM_SW3_BM);
2545d5c65159SKalle Valo 			num_buffs_reaped++;
2546d5c65159SKalle Valo 			break;
2547d5c65159SKalle Valo 		}
2548d5c65159SKalle Valo 		rxcb = ATH11K_SKB_RXCB(skb);
2549d5c65159SKalle Valo 
2550d5c65159SKalle Valo 		cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
2551d5c65159SKalle Valo 			 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
2552d5c65159SKalle Valo 
2553d5c65159SKalle Valo 		ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr,
2554d5c65159SKalle Valo 						cookie, HAL_RX_BUF_RBM_SW3_BM);
2555d5c65159SKalle Valo 		ath11k_hal_srng_src_get_next_entry(ab, srng);
2556d5c65159SKalle Valo 		num_buffs_reaped++;
2557d5c65159SKalle Valo 	}
2558d5c65159SKalle Valo 	ath11k_hal_srng_access_end(ab, srng);
2559d5c65159SKalle Valo 	spin_unlock_bh(&srng->lock);
2560d5c65159SKalle Valo 
2561d5c65159SKalle Valo 	return num_buffs_reaped;
2562d5c65159SKalle Valo }
2563d5c65159SKalle Valo 
2564d5c65159SKalle Valo int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
2565d5c65159SKalle Valo 				    struct napi_struct *napi, int budget)
2566d5c65159SKalle Valo {
2567d5c65159SKalle Valo 	struct ath11k *ar = ab->pdevs[mac_id].ar;
2568d5c65159SKalle Valo 	enum hal_rx_mon_status hal_status;
2569d5c65159SKalle Valo 	struct sk_buff *skb;
2570d5c65159SKalle Valo 	struct sk_buff_head skb_list;
2571d5c65159SKalle Valo 	struct hal_rx_mon_ppdu_info ppdu_info;
2572d5c65159SKalle Valo 	struct ath11k_peer *peer;
2573d5c65159SKalle Valo 	struct ath11k_sta *arsta;
2574d5c65159SKalle Valo 	int num_buffs_reaped = 0;
2575d5c65159SKalle Valo 
2576d5c65159SKalle Valo 	__skb_queue_head_init(&skb_list);
2577d5c65159SKalle Valo 
2578d5c65159SKalle Valo 	num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget,
2579d5c65159SKalle Valo 							     &skb_list);
2580d5c65159SKalle Valo 	if (!num_buffs_reaped)
2581d5c65159SKalle Valo 		goto exit;
2582d5c65159SKalle Valo 
2583d5c65159SKalle Valo 	while ((skb = __skb_dequeue(&skb_list))) {
2584d5c65159SKalle Valo 		memset(&ppdu_info, 0, sizeof(ppdu_info));
2585d5c65159SKalle Valo 		ppdu_info.peer_id = HAL_INVALID_PEERID;
2586d5c65159SKalle Valo 
2587d5c65159SKalle Valo 		if (ath11k_debug_is_pktlog_rx_stats_enabled(ar))
2588d5c65159SKalle Valo 			trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE);
2589d5c65159SKalle Valo 
2590d5c65159SKalle Valo 		hal_status = ath11k_hal_rx_parse_mon_status(ab, &ppdu_info, skb);
2591d5c65159SKalle Valo 
2592d5c65159SKalle Valo 		if (ppdu_info.peer_id == HAL_INVALID_PEERID ||
2593d5c65159SKalle Valo 		    hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
2594d5c65159SKalle Valo 			dev_kfree_skb_any(skb);
2595d5c65159SKalle Valo 			continue;
2596d5c65159SKalle Valo 		}
2597d5c65159SKalle Valo 
2598d5c65159SKalle Valo 		rcu_read_lock();
2599d5c65159SKalle Valo 		spin_lock_bh(&ab->base_lock);
2600d5c65159SKalle Valo 		peer = ath11k_peer_find_by_id(ab, ppdu_info.peer_id);
2601d5c65159SKalle Valo 
2602d5c65159SKalle Valo 		if (!peer || !peer->sta) {
26032dab7d22SJohn Crispin 			ath11k_dbg(ab, ATH11K_DBG_DATA,
26042dab7d22SJohn Crispin 				   "failed to find the peer with peer_id %d\n",
2605d5c65159SKalle Valo 				   ppdu_info.peer_id);
2606d5c65159SKalle Valo 			spin_unlock_bh(&ab->base_lock);
2607d5c65159SKalle Valo 			rcu_read_unlock();
2608d5c65159SKalle Valo 			dev_kfree_skb_any(skb);
2609d5c65159SKalle Valo 			continue;
2610d5c65159SKalle Valo 		}
2611d5c65159SKalle Valo 
2612d5c65159SKalle Valo 		arsta = (struct ath11k_sta *)peer->sta->drv_priv;
2613d5c65159SKalle Valo 		ath11k_dp_rx_update_peer_stats(arsta, &ppdu_info);
2614d5c65159SKalle Valo 
2615d5c65159SKalle Valo 		if (ath11k_debug_is_pktlog_peer_valid(ar, peer->addr))
2616d5c65159SKalle Valo 			trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE);
2617d5c65159SKalle Valo 
2618d5c65159SKalle Valo 		spin_unlock_bh(&ab->base_lock);
2619d5c65159SKalle Valo 		rcu_read_unlock();
2620d5c65159SKalle Valo 
2621d5c65159SKalle Valo 		dev_kfree_skb_any(skb);
2622d5c65159SKalle Valo 	}
2623d5c65159SKalle Valo exit:
2624d5c65159SKalle Valo 	return num_buffs_reaped;
2625d5c65159SKalle Valo }
2626d5c65159SKalle Valo 
2627d5c65159SKalle Valo static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab,
2628d5c65159SKalle Valo 					 u32 *link_desc,
2629d5c65159SKalle Valo 					 enum hal_wbm_rel_bm_act action)
2630d5c65159SKalle Valo {
2631d5c65159SKalle Valo 	struct ath11k_dp *dp = &ab->dp;
2632d5c65159SKalle Valo 	struct hal_srng *srng;
2633d5c65159SKalle Valo 	u32 *desc;
2634d5c65159SKalle Valo 	int ret = 0;
2635d5c65159SKalle Valo 
2636d5c65159SKalle Valo 	srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
2637d5c65159SKalle Valo 
2638d5c65159SKalle Valo 	spin_lock_bh(&srng->lock);
2639d5c65159SKalle Valo 
2640d5c65159SKalle Valo 	ath11k_hal_srng_access_begin(ab, srng);
2641d5c65159SKalle Valo 
2642d5c65159SKalle Valo 	desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
2643d5c65159SKalle Valo 	if (!desc) {
2644d5c65159SKalle Valo 		ret = -ENOBUFS;
2645d5c65159SKalle Valo 		goto exit;
2646d5c65159SKalle Valo 	}
2647d5c65159SKalle Valo 
2648d5c65159SKalle Valo 	ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc,
2649d5c65159SKalle Valo 					 action);
2650d5c65159SKalle Valo 
2651d5c65159SKalle Valo exit:
2652d5c65159SKalle Valo 	ath11k_hal_srng_access_end(ab, srng);
2653d5c65159SKalle Valo 
2654d5c65159SKalle Valo 	spin_unlock_bh(&srng->lock);
2655d5c65159SKalle Valo 
2656d5c65159SKalle Valo 	return ret;
2657d5c65159SKalle Valo }
2658d5c65159SKalle Valo 
2659d5c65159SKalle Valo static void ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
2660d5c65159SKalle Valo 				     struct sk_buff *msdu,
2661d5c65159SKalle Valo 				     struct hal_rx_desc *rx_desc,
2662d5c65159SKalle Valo 				     struct ieee80211_rx_status *rx_status)
2663d5c65159SKalle Valo {
2664d5c65159SKalle Valo 	u8 rx_channel;
2665d5c65159SKalle Valo 	enum hal_encrypt_type enctype;
2666d5c65159SKalle Valo 	bool is_decrypted;
2667d5c65159SKalle Valo 	u32 err_bitmap;
2668d5c65159SKalle Valo 
2669d5c65159SKalle Valo 	is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc);
2670d5c65159SKalle Valo 	enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc);
2671d5c65159SKalle Valo 	err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_desc);
2672d5c65159SKalle Valo 
2673d5c65159SKalle Valo 	if (err_bitmap & DP_RX_MPDU_ERR_FCS)
2674d5c65159SKalle Valo 		rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
2675d5c65159SKalle Valo 
2676d5c65159SKalle Valo 	if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC)
2677d5c65159SKalle Valo 		rx_status->flag |= RX_FLAG_MMIC_ERROR;
2678d5c65159SKalle Valo 
2679d5c65159SKalle Valo 	rx_status->encoding = RX_ENC_LEGACY;
2680d5c65159SKalle Valo 	rx_status->bw = RATE_INFO_BW_20;
2681d5c65159SKalle Valo 
2682d5c65159SKalle Valo 	rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2683d5c65159SKalle Valo 
2684d5c65159SKalle Valo 	rx_channel = ath11k_dp_rx_h_msdu_start_freq(rx_desc);
2685d5c65159SKalle Valo 
2686d5c65159SKalle Valo 	if (rx_channel >= 1 && rx_channel <= 14) {
2687d5c65159SKalle Valo 		rx_status->band = NL80211_BAND_2GHZ;
2688d5c65159SKalle Valo 	} else if (rx_channel >= 36 && rx_channel <= 173) {
2689d5c65159SKalle Valo 		rx_status->band = NL80211_BAND_5GHZ;
2690d5c65159SKalle Valo 	} else {
2691d5c65159SKalle Valo 		ath11k_warn(ar->ab, "Unsupported Channel info received %d\n",
2692d5c65159SKalle Valo 			    rx_channel);
2693d5c65159SKalle Valo 		return;
2694d5c65159SKalle Valo 	}
2695d5c65159SKalle Valo 
2696d5c65159SKalle Valo 	rx_status->freq = ieee80211_channel_to_frequency(rx_channel,
2697d5c65159SKalle Valo 							 rx_status->band);
2698d5c65159SKalle Valo 	ath11k_dp_rx_h_rate(ar, rx_desc, rx_status);
2699d5c65159SKalle Valo 
2700d5c65159SKalle Valo 	/* Rx fragments are received in raw mode */
2701d5c65159SKalle Valo 	skb_trim(msdu, msdu->len - FCS_LEN);
2702d5c65159SKalle Valo 
2703d5c65159SKalle Valo 	if (is_decrypted) {
2704d5c65159SKalle Valo 		rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MIC_STRIPPED;
2705d5c65159SKalle Valo 		skb_trim(msdu, msdu->len -
2706d5c65159SKalle Valo 			 ath11k_dp_rx_crypto_mic_len(ar, enctype));
2707d5c65159SKalle Valo 	}
2708d5c65159SKalle Valo }
2709d5c65159SKalle Valo 
2710d5c65159SKalle Valo static int
2711d5c65159SKalle Valo ath11k_dp_process_rx_err_buf(struct ath11k *ar, struct napi_struct *napi,
2712d5c65159SKalle Valo 			     int buf_id, bool frag)
2713d5c65159SKalle Valo {
2714d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
2715d5c65159SKalle Valo 	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
2716d5c65159SKalle Valo 	struct ieee80211_rx_status rx_status = {0};
2717d5c65159SKalle Valo 	struct sk_buff *msdu;
2718d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb;
2719d5c65159SKalle Valo 	struct ieee80211_rx_status *status;
2720d5c65159SKalle Valo 	struct hal_rx_desc *rx_desc;
2721d5c65159SKalle Valo 	u16 msdu_len;
2722d5c65159SKalle Valo 
2723d5c65159SKalle Valo 	spin_lock_bh(&rx_ring->idr_lock);
2724d5c65159SKalle Valo 	msdu = idr_find(&rx_ring->bufs_idr, buf_id);
2725d5c65159SKalle Valo 	if (!msdu) {
2726d5c65159SKalle Valo 		ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n",
2727d5c65159SKalle Valo 			    buf_id);
2728d5c65159SKalle Valo 		spin_unlock_bh(&rx_ring->idr_lock);
2729d5c65159SKalle Valo 		return -EINVAL;
2730d5c65159SKalle Valo 	}
2731d5c65159SKalle Valo 
2732d5c65159SKalle Valo 	idr_remove(&rx_ring->bufs_idr, buf_id);
2733d5c65159SKalle Valo 	spin_unlock_bh(&rx_ring->idr_lock);
2734d5c65159SKalle Valo 
2735d5c65159SKalle Valo 	rxcb = ATH11K_SKB_RXCB(msdu);
2736d5c65159SKalle Valo 	dma_unmap_single(ar->ab->dev, rxcb->paddr,
2737d5c65159SKalle Valo 			 msdu->len + skb_tailroom(msdu),
2738d5c65159SKalle Valo 			 DMA_FROM_DEVICE);
2739d5c65159SKalle Valo 
2740d5c65159SKalle Valo 	if (!frag) {
2741d5c65159SKalle Valo 		/* Process only rx fragments below, and drop
2742d5c65159SKalle Valo 		 * msdu's indicated due to error reasons.
2743d5c65159SKalle Valo 		 */
2744d5c65159SKalle Valo 		dev_kfree_skb_any(msdu);
2745d5c65159SKalle Valo 		return 0;
2746d5c65159SKalle Valo 	}
2747d5c65159SKalle Valo 
2748d5c65159SKalle Valo 	rcu_read_lock();
2749d5c65159SKalle Valo 	if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {
2750d5c65159SKalle Valo 		dev_kfree_skb_any(msdu);
2751d5c65159SKalle Valo 		goto exit;
2752d5c65159SKalle Valo 	}
2753d5c65159SKalle Valo 
2754d5c65159SKalle Valo 	if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
2755d5c65159SKalle Valo 		dev_kfree_skb_any(msdu);
2756d5c65159SKalle Valo 		goto exit;
2757d5c65159SKalle Valo 	}
2758d5c65159SKalle Valo 
2759d5c65159SKalle Valo 	rx_desc = (struct hal_rx_desc *)msdu->data;
2760d5c65159SKalle Valo 	msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc);
2761d5c65159SKalle Valo 	skb_put(msdu, HAL_RX_DESC_SIZE + msdu_len);
2762d5c65159SKalle Valo 	skb_pull(msdu, HAL_RX_DESC_SIZE);
2763d5c65159SKalle Valo 
2764d5c65159SKalle Valo 	ath11k_dp_rx_frag_h_mpdu(ar, msdu, rx_desc, &rx_status);
2765d5c65159SKalle Valo 
2766d5c65159SKalle Valo 	status = IEEE80211_SKB_RXCB(msdu);
2767d5c65159SKalle Valo 
2768d5c65159SKalle Valo 	*status = rx_status;
2769d5c65159SKalle Valo 
2770d5c65159SKalle Valo 	ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
2771d5c65159SKalle Valo 
2772d5c65159SKalle Valo exit:
2773d5c65159SKalle Valo 	rcu_read_unlock();
2774d5c65159SKalle Valo 	return 0;
2775d5c65159SKalle Valo }
2776d5c65159SKalle Valo 
2777d5c65159SKalle Valo int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
2778d5c65159SKalle Valo 			     int budget)
2779d5c65159SKalle Valo {
2780293cb583SJohn Crispin 	u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
2781d5c65159SKalle Valo 	struct dp_link_desc_bank *link_desc_banks;
2782d5c65159SKalle Valo 	enum hal_rx_buf_return_buf_manager rbm;
2783d5c65159SKalle Valo 	int tot_n_bufs_reaped, quota, ret, i;
2784d5c65159SKalle Valo 	int n_bufs_reaped[MAX_RADIOS] = {0};
2785d5c65159SKalle Valo 	struct dp_rxdma_ring *rx_ring;
2786d5c65159SKalle Valo 	struct dp_srng *reo_except;
2787d5c65159SKalle Valo 	u32 desc_bank, num_msdus;
2788d5c65159SKalle Valo 	struct hal_srng *srng;
2789d5c65159SKalle Valo 	struct ath11k_dp *dp;
2790d5c65159SKalle Valo 	void *link_desc_va;
2791d5c65159SKalle Valo 	int buf_id, mac_id;
2792d5c65159SKalle Valo 	struct ath11k *ar;
2793d5c65159SKalle Valo 	dma_addr_t paddr;
2794d5c65159SKalle Valo 	u32 *desc;
2795d5c65159SKalle Valo 	bool is_frag;
2796d5c65159SKalle Valo 
2797d5c65159SKalle Valo 	tot_n_bufs_reaped = 0;
2798d5c65159SKalle Valo 	quota = budget;
2799d5c65159SKalle Valo 
2800d5c65159SKalle Valo 	dp = &ab->dp;
2801d5c65159SKalle Valo 	reo_except = &dp->reo_except_ring;
2802d5c65159SKalle Valo 	link_desc_banks = dp->link_desc_banks;
2803d5c65159SKalle Valo 
2804d5c65159SKalle Valo 	srng = &ab->hal.srng_list[reo_except->ring_id];
2805d5c65159SKalle Valo 
2806d5c65159SKalle Valo 	spin_lock_bh(&srng->lock);
2807d5c65159SKalle Valo 
2808d5c65159SKalle Valo 	ath11k_hal_srng_access_begin(ab, srng);
2809d5c65159SKalle Valo 
2810d5c65159SKalle Valo 	while (budget &&
2811d5c65159SKalle Valo 	       (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
2812293cb583SJohn Crispin 		struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc;
2813293cb583SJohn Crispin 
2814d5c65159SKalle Valo 		ab->soc_stats.err_ring_pkts++;
2815d5c65159SKalle Valo 		ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr,
2816d5c65159SKalle Valo 						    &desc_bank);
2817d5c65159SKalle Valo 		if (ret) {
2818d5c65159SKalle Valo 			ath11k_warn(ab, "failed to parse error reo desc %d\n",
2819d5c65159SKalle Valo 				    ret);
2820d5c65159SKalle Valo 			continue;
2821d5c65159SKalle Valo 		}
2822d5c65159SKalle Valo 		link_desc_va = link_desc_banks[desc_bank].vaddr +
2823d5c65159SKalle Valo 			       (paddr - link_desc_banks[desc_bank].paddr);
2824293cb583SJohn Crispin 		ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
2825d5c65159SKalle Valo 						 &rbm);
2826d5c65159SKalle Valo 		if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST &&
2827d5c65159SKalle Valo 		    rbm != HAL_RX_BUF_RBM_SW3_BM) {
2828d5c65159SKalle Valo 			ab->soc_stats.invalid_rbm++;
2829d5c65159SKalle Valo 			ath11k_warn(ab, "invalid return buffer manager %d\n", rbm);
2830d5c65159SKalle Valo 			ath11k_dp_rx_link_desc_return(ab, desc,
2831d5c65159SKalle Valo 						      HAL_WBM_REL_BM_ACT_REL_MSDU);
2832d5c65159SKalle Valo 			continue;
2833d5c65159SKalle Valo 		}
2834d5c65159SKalle Valo 
2835293cb583SJohn Crispin 		is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG);
2836d5c65159SKalle Valo 
2837d5c65159SKalle Valo 		/* Return the link desc back to wbm idle list */
2838d5c65159SKalle Valo 		ath11k_dp_rx_link_desc_return(ab, desc,
2839d5c65159SKalle Valo 					      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
2840d5c65159SKalle Valo 
2841d5c65159SKalle Valo 		for (i = 0; i < num_msdus; i++) {
2842d5c65159SKalle Valo 			buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
2843293cb583SJohn Crispin 					   msdu_cookies[i]);
2844d5c65159SKalle Valo 
2845d5c65159SKalle Valo 			mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID,
2846293cb583SJohn Crispin 					   msdu_cookies[i]);
2847d5c65159SKalle Valo 
2848d5c65159SKalle Valo 			ar = ab->pdevs[mac_id].ar;
2849d5c65159SKalle Valo 
2850d5c65159SKalle Valo 			if (!ath11k_dp_process_rx_err_buf(ar, napi, buf_id,
2851d5c65159SKalle Valo 							  is_frag)) {
2852d5c65159SKalle Valo 				n_bufs_reaped[mac_id]++;
2853d5c65159SKalle Valo 				tot_n_bufs_reaped++;
2854d5c65159SKalle Valo 			}
2855d5c65159SKalle Valo 		}
2856d5c65159SKalle Valo 
2857d5c65159SKalle Valo 		if (tot_n_bufs_reaped >= quota) {
2858d5c65159SKalle Valo 			tot_n_bufs_reaped = quota;
2859d5c65159SKalle Valo 			goto exit;
2860d5c65159SKalle Valo 		}
2861d5c65159SKalle Valo 
2862d5c65159SKalle Valo 		budget = quota - tot_n_bufs_reaped;
2863d5c65159SKalle Valo 	}
2864d5c65159SKalle Valo 
2865d5c65159SKalle Valo exit:
2866d5c65159SKalle Valo 	ath11k_hal_srng_access_end(ab, srng);
2867d5c65159SKalle Valo 
2868d5c65159SKalle Valo 	spin_unlock_bh(&srng->lock);
2869d5c65159SKalle Valo 
2870d5c65159SKalle Valo 	for (i = 0; i <  ab->num_radios; i++) {
2871d5c65159SKalle Valo 		if (!n_bufs_reaped[i])
2872d5c65159SKalle Valo 			continue;
2873d5c65159SKalle Valo 
2874d5c65159SKalle Valo 		ar = ab->pdevs[i].ar;
2875d5c65159SKalle Valo 		rx_ring = &ar->dp.rx_refill_buf_ring;
2876d5c65159SKalle Valo 
2877d5c65159SKalle Valo 		ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i],
2878d5c65159SKalle Valo 					   HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
2879d5c65159SKalle Valo 	}
2880d5c65159SKalle Valo 
2881d5c65159SKalle Valo 	return tot_n_bufs_reaped;
2882d5c65159SKalle Valo }
2883d5c65159SKalle Valo 
2884d5c65159SKalle Valo static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar,
2885d5c65159SKalle Valo 					     int msdu_len,
2886d5c65159SKalle Valo 					     struct sk_buff_head *msdu_list)
2887d5c65159SKalle Valo {
2888d5c65159SKalle Valo 	struct sk_buff *skb, *tmp;
2889d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb;
2890d5c65159SKalle Valo 	int n_buffs;
2891d5c65159SKalle Valo 
2892d5c65159SKalle Valo 	n_buffs = DIV_ROUND_UP(msdu_len,
2893d5c65159SKalle Valo 			       (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE));
2894d5c65159SKalle Valo 
2895d5c65159SKalle Valo 	skb_queue_walk_safe(msdu_list, skb, tmp) {
2896d5c65159SKalle Valo 		rxcb = ATH11K_SKB_RXCB(skb);
2897d5c65159SKalle Valo 		if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO &&
2898d5c65159SKalle Valo 		    rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) {
2899d5c65159SKalle Valo 			if (!n_buffs)
2900d5c65159SKalle Valo 				break;
2901d5c65159SKalle Valo 			__skb_unlink(skb, msdu_list);
2902d5c65159SKalle Valo 			dev_kfree_skb_any(skb);
2903d5c65159SKalle Valo 			n_buffs--;
2904d5c65159SKalle Valo 		}
2905d5c65159SKalle Valo 	}
2906d5c65159SKalle Valo }
2907d5c65159SKalle Valo 
2908d5c65159SKalle Valo static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu,
2909d5c65159SKalle Valo 				      struct ieee80211_rx_status *status,
2910d5c65159SKalle Valo 				      struct sk_buff_head *msdu_list)
2911d5c65159SKalle Valo {
2912d5c65159SKalle Valo 	struct sk_buff_head amsdu_list;
2913d5c65159SKalle Valo 	u16 msdu_len;
2914d5c65159SKalle Valo 	struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
2915d5c65159SKalle Valo 	u8 l3pad_bytes;
2916d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2917d5c65159SKalle Valo 
2918d5c65159SKalle Valo 	msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc);
2919d5c65159SKalle Valo 
2920d5c65159SKalle Valo 	if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) {
2921d5c65159SKalle Valo 		/* First buffer will be freed by the caller, so deduct it's length */
2922d5c65159SKalle Valo 		msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE);
2923d5c65159SKalle Valo 		ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);
2924d5c65159SKalle Valo 		return -EINVAL;
2925d5c65159SKalle Valo 	}
2926d5c65159SKalle Valo 
2927d5c65159SKalle Valo 	if (!ath11k_dp_rx_h_attn_msdu_done(desc)) {
2928d5c65159SKalle Valo 		ath11k_warn(ar->ab,
2929d5c65159SKalle Valo 			    "msdu_done bit not set in null_q_des processing\n");
2930d5c65159SKalle Valo 		__skb_queue_purge(msdu_list);
2931d5c65159SKalle Valo 		return -EIO;
2932d5c65159SKalle Valo 	}
2933d5c65159SKalle Valo 
2934d5c65159SKalle Valo 	/* Handle NULL queue descriptor violations arising out a missing
2935d5c65159SKalle Valo 	 * REO queue for a given peer or a given TID. This typically
2936d5c65159SKalle Valo 	 * may happen if a packet is received on a QOS enabled TID before the
2937d5c65159SKalle Valo 	 * ADDBA negotiation for that TID, when the TID queue is setup. Or
2938d5c65159SKalle Valo 	 * it may also happen for MC/BC frames if they are not routed to the
2939d5c65159SKalle Valo 	 * non-QOS TID queue, in the absence of any other default TID queue.
2940d5c65159SKalle Valo 	 * This error can show up both in a REO destination or WBM release ring.
2941d5c65159SKalle Valo 	 */
2942d5c65159SKalle Valo 
2943d5c65159SKalle Valo 	__skb_queue_head_init(&amsdu_list);
2944d5c65159SKalle Valo 
2945d5c65159SKalle Valo 	rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc);
2946d5c65159SKalle Valo 	rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc);
2947d5c65159SKalle Valo 
2948d5c65159SKalle Valo 	l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc);
2949d5c65159SKalle Valo 
2950d5c65159SKalle Valo 	if ((HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
2951d5c65159SKalle Valo 		return -EINVAL;
2952d5c65159SKalle Valo 
2953d5c65159SKalle Valo 	skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len);
2954d5c65159SKalle Valo 	skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes);
2955d5c65159SKalle Valo 
2956d5c65159SKalle Valo 	ath11k_dp_rx_h_ppdu(ar, desc, status);
2957d5c65159SKalle Valo 
2958d5c65159SKalle Valo 	__skb_queue_tail(&amsdu_list, msdu);
2959d5c65159SKalle Valo 
2960d5c65159SKalle Valo 	ath11k_dp_rx_h_mpdu(ar, &amsdu_list, desc, status);
2961d5c65159SKalle Valo 
2962d5c65159SKalle Valo 	/* Please note that caller will having the access to msdu and completing
2963d5c65159SKalle Valo 	 * rx with mac80211. Need not worry about cleaning up amsdu_list.
2964d5c65159SKalle Valo 	 */
2965d5c65159SKalle Valo 
2966d5c65159SKalle Valo 	return 0;
2967d5c65159SKalle Valo }
2968d5c65159SKalle Valo 
2969d5c65159SKalle Valo static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu,
2970d5c65159SKalle Valo 				   struct ieee80211_rx_status *status,
2971d5c65159SKalle Valo 				   struct sk_buff_head *msdu_list)
2972d5c65159SKalle Valo {
2973d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2974d5c65159SKalle Valo 	bool drop = false;
2975d5c65159SKalle Valo 
2976d5c65159SKalle Valo 	ar->ab->soc_stats.reo_error[rxcb->err_code]++;
2977d5c65159SKalle Valo 
2978d5c65159SKalle Valo 	switch (rxcb->err_code) {
2979d5c65159SKalle Valo 	case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
2980d5c65159SKalle Valo 		if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))
2981d5c65159SKalle Valo 			drop = true;
2982d5c65159SKalle Valo 		break;
2983d5c65159SKalle Valo 	default:
2984d5c65159SKalle Valo 		/* TODO: Review other errors and process them to mac80211
2985d5c65159SKalle Valo 		 * as appropriate.
2986d5c65159SKalle Valo 		 */
2987d5c65159SKalle Valo 		drop = true;
2988d5c65159SKalle Valo 		break;
2989d5c65159SKalle Valo 	}
2990d5c65159SKalle Valo 
2991d5c65159SKalle Valo 	return drop;
2992d5c65159SKalle Valo }
2993d5c65159SKalle Valo 
2994d5c65159SKalle Valo static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu,
2995d5c65159SKalle Valo 					struct ieee80211_rx_status *status)
2996d5c65159SKalle Valo {
2997d5c65159SKalle Valo 	u16 msdu_len;
2998d5c65159SKalle Valo 	struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
2999d5c65159SKalle Valo 	u8 l3pad_bytes;
3000d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3001d5c65159SKalle Valo 
3002d5c65159SKalle Valo 	rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc);
3003d5c65159SKalle Valo 	rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc);
3004d5c65159SKalle Valo 
3005d5c65159SKalle Valo 	l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc);
3006d5c65159SKalle Valo 	msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc);
3007d5c65159SKalle Valo 	skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len);
3008d5c65159SKalle Valo 	skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes);
3009d5c65159SKalle Valo 
3010d5c65159SKalle Valo 	ath11k_dp_rx_h_ppdu(ar, desc, status);
3011d5c65159SKalle Valo 
3012d5c65159SKalle Valo 	status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
3013d5c65159SKalle Valo 			 RX_FLAG_DECRYPTED);
3014d5c65159SKalle Valo 
3015d5c65159SKalle Valo 	ath11k_dp_rx_h_undecap(ar, msdu, desc,
3016d5c65159SKalle Valo 			       HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
3017d5c65159SKalle Valo }
3018d5c65159SKalle Valo 
3019d5c65159SKalle Valo static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar,  struct sk_buff *msdu,
3020d5c65159SKalle Valo 				     struct ieee80211_rx_status *status)
3021d5c65159SKalle Valo {
3022d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3023d5c65159SKalle Valo 	bool drop = false;
3024d5c65159SKalle Valo 
3025d5c65159SKalle Valo 	ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
3026d5c65159SKalle Valo 
3027d5c65159SKalle Valo 	switch (rxcb->err_code) {
3028d5c65159SKalle Valo 	case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
3029d5c65159SKalle Valo 		ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status);
3030d5c65159SKalle Valo 		break;
3031d5c65159SKalle Valo 	default:
3032d5c65159SKalle Valo 		/* TODO: Review other rxdma error code to check if anything is
3033d5c65159SKalle Valo 		 * worth reporting to mac80211
3034d5c65159SKalle Valo 		 */
3035d5c65159SKalle Valo 		drop = true;
3036d5c65159SKalle Valo 		break;
3037d5c65159SKalle Valo 	}
3038d5c65159SKalle Valo 
3039d5c65159SKalle Valo 	return drop;
3040d5c65159SKalle Valo }
3041d5c65159SKalle Valo 
3042d5c65159SKalle Valo static void ath11k_dp_rx_wbm_err(struct ath11k *ar,
3043d5c65159SKalle Valo 				 struct napi_struct *napi,
3044d5c65159SKalle Valo 				 struct sk_buff *msdu,
3045d5c65159SKalle Valo 				 struct sk_buff_head *msdu_list)
3046d5c65159SKalle Valo {
3047d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3048d5c65159SKalle Valo 	struct ieee80211_rx_status rxs = {0};
3049d5c65159SKalle Valo 	struct ieee80211_rx_status *status;
3050d5c65159SKalle Valo 	bool drop = true;
3051d5c65159SKalle Valo 
3052d5c65159SKalle Valo 	switch (rxcb->err_rel_src) {
3053d5c65159SKalle Valo 	case HAL_WBM_REL_SRC_MODULE_REO:
3054d5c65159SKalle Valo 		drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list);
3055d5c65159SKalle Valo 		break;
3056d5c65159SKalle Valo 	case HAL_WBM_REL_SRC_MODULE_RXDMA:
3057d5c65159SKalle Valo 		drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs);
3058d5c65159SKalle Valo 		break;
3059d5c65159SKalle Valo 	default:
3060d5c65159SKalle Valo 		/* msdu will get freed */
3061d5c65159SKalle Valo 		break;
3062d5c65159SKalle Valo 	}
3063d5c65159SKalle Valo 
3064d5c65159SKalle Valo 	if (drop) {
3065d5c65159SKalle Valo 		dev_kfree_skb_any(msdu);
3066d5c65159SKalle Valo 		return;
3067d5c65159SKalle Valo 	}
3068d5c65159SKalle Valo 
3069d5c65159SKalle Valo 	status = IEEE80211_SKB_RXCB(msdu);
3070d5c65159SKalle Valo 	*status = rxs;
3071d5c65159SKalle Valo 
3072d5c65159SKalle Valo 	ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
3073d5c65159SKalle Valo }
3074d5c65159SKalle Valo 
3075d5c65159SKalle Valo int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
3076d5c65159SKalle Valo 				 struct napi_struct *napi, int budget)
3077d5c65159SKalle Valo {
3078d5c65159SKalle Valo 	struct ath11k *ar;
3079d5c65159SKalle Valo 	struct ath11k_dp *dp = &ab->dp;
3080d5c65159SKalle Valo 	struct dp_rxdma_ring *rx_ring;
3081d5c65159SKalle Valo 	struct hal_rx_wbm_rel_info err_info;
3082d5c65159SKalle Valo 	struct hal_srng *srng;
3083d5c65159SKalle Valo 	struct sk_buff *msdu;
3084d5c65159SKalle Valo 	struct sk_buff_head msdu_list[MAX_RADIOS];
3085d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb;
3086d5c65159SKalle Valo 	u32 *rx_desc;
3087d5c65159SKalle Valo 	int buf_id, mac_id;
3088d5c65159SKalle Valo 	int num_buffs_reaped[MAX_RADIOS] = {0};
3089d5c65159SKalle Valo 	int total_num_buffs_reaped = 0;
3090d5c65159SKalle Valo 	int ret, i;
3091d5c65159SKalle Valo 
3092d5c65159SKalle Valo 	for (i = 0; i < MAX_RADIOS; i++)
3093d5c65159SKalle Valo 		__skb_queue_head_init(&msdu_list[i]);
3094d5c65159SKalle Valo 
3095d5c65159SKalle Valo 	srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
3096d5c65159SKalle Valo 
3097d5c65159SKalle Valo 	spin_lock_bh(&srng->lock);
3098d5c65159SKalle Valo 
3099d5c65159SKalle Valo 	ath11k_hal_srng_access_begin(ab, srng);
3100d5c65159SKalle Valo 
3101d5c65159SKalle Valo 	while (budget) {
3102d5c65159SKalle Valo 		rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
3103d5c65159SKalle Valo 		if (!rx_desc)
3104d5c65159SKalle Valo 			break;
3105d5c65159SKalle Valo 
3106d5c65159SKalle Valo 		ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info);
3107d5c65159SKalle Valo 		if (ret) {
3108d5c65159SKalle Valo 			ath11k_warn(ab,
3109d5c65159SKalle Valo 				    "failed to parse rx error in wbm_rel ring desc %d\n",
3110d5c65159SKalle Valo 				    ret);
3111d5c65159SKalle Valo 			continue;
3112d5c65159SKalle Valo 		}
3113d5c65159SKalle Valo 
3114d5c65159SKalle Valo 		buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie);
3115d5c65159SKalle Valo 		mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie);
3116d5c65159SKalle Valo 
3117d5c65159SKalle Valo 		ar = ab->pdevs[mac_id].ar;
3118d5c65159SKalle Valo 		rx_ring = &ar->dp.rx_refill_buf_ring;
3119d5c65159SKalle Valo 
3120d5c65159SKalle Valo 		spin_lock_bh(&rx_ring->idr_lock);
3121d5c65159SKalle Valo 		msdu = idr_find(&rx_ring->bufs_idr, buf_id);
3122d5c65159SKalle Valo 		if (!msdu) {
3123d5c65159SKalle Valo 			ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n",
3124d5c65159SKalle Valo 				    buf_id, mac_id);
3125d5c65159SKalle Valo 			spin_unlock_bh(&rx_ring->idr_lock);
3126d5c65159SKalle Valo 			continue;
3127d5c65159SKalle Valo 		}
3128d5c65159SKalle Valo 
3129d5c65159SKalle Valo 		idr_remove(&rx_ring->bufs_idr, buf_id);
3130d5c65159SKalle Valo 		spin_unlock_bh(&rx_ring->idr_lock);
3131d5c65159SKalle Valo 
3132d5c65159SKalle Valo 		rxcb = ATH11K_SKB_RXCB(msdu);
3133d5c65159SKalle Valo 		dma_unmap_single(ab->dev, rxcb->paddr,
3134d5c65159SKalle Valo 				 msdu->len + skb_tailroom(msdu),
3135d5c65159SKalle Valo 				 DMA_FROM_DEVICE);
3136d5c65159SKalle Valo 
3137d5c65159SKalle Valo 		num_buffs_reaped[mac_id]++;
3138d5c65159SKalle Valo 		total_num_buffs_reaped++;
3139d5c65159SKalle Valo 		budget--;
3140d5c65159SKalle Valo 
3141d5c65159SKalle Valo 		if (err_info.push_reason !=
3142d5c65159SKalle Valo 		    HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
3143d5c65159SKalle Valo 			dev_kfree_skb_any(msdu);
3144d5c65159SKalle Valo 			continue;
3145d5c65159SKalle Valo 		}
3146d5c65159SKalle Valo 
3147d5c65159SKalle Valo 		rxcb->err_rel_src = err_info.err_rel_src;
3148d5c65159SKalle Valo 		rxcb->err_code = err_info.err_code;
3149d5c65159SKalle Valo 		rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
3150d5c65159SKalle Valo 		__skb_queue_tail(&msdu_list[mac_id], msdu);
3151d5c65159SKalle Valo 	}
3152d5c65159SKalle Valo 
3153d5c65159SKalle Valo 	ath11k_hal_srng_access_end(ab, srng);
3154d5c65159SKalle Valo 
3155d5c65159SKalle Valo 	spin_unlock_bh(&srng->lock);
3156d5c65159SKalle Valo 
3157d5c65159SKalle Valo 	if (!total_num_buffs_reaped)
3158d5c65159SKalle Valo 		goto done;
3159d5c65159SKalle Valo 
3160d5c65159SKalle Valo 	for (i = 0; i <  ab->num_radios; i++) {
3161d5c65159SKalle Valo 		if (!num_buffs_reaped[i])
3162d5c65159SKalle Valo 			continue;
3163d5c65159SKalle Valo 
3164d5c65159SKalle Valo 		ar = ab->pdevs[i].ar;
3165d5c65159SKalle Valo 		rx_ring = &ar->dp.rx_refill_buf_ring;
3166d5c65159SKalle Valo 
3167d5c65159SKalle Valo 		ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
3168d5c65159SKalle Valo 					   HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
3169d5c65159SKalle Valo 	}
3170d5c65159SKalle Valo 
3171d5c65159SKalle Valo 	rcu_read_lock();
3172d5c65159SKalle Valo 	for (i = 0; i <  ab->num_radios; i++) {
3173d5c65159SKalle Valo 		if (!rcu_dereference(ab->pdevs_active[i])) {
3174d5c65159SKalle Valo 			__skb_queue_purge(&msdu_list[i]);
3175d5c65159SKalle Valo 			continue;
3176d5c65159SKalle Valo 		}
3177d5c65159SKalle Valo 
3178d5c65159SKalle Valo 		ar = ab->pdevs[i].ar;
3179d5c65159SKalle Valo 
3180d5c65159SKalle Valo 		if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
3181d5c65159SKalle Valo 			__skb_queue_purge(&msdu_list[i]);
3182d5c65159SKalle Valo 			continue;
3183d5c65159SKalle Valo 		}
3184d5c65159SKalle Valo 
3185d5c65159SKalle Valo 		while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL)
3186d5c65159SKalle Valo 			ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]);
3187d5c65159SKalle Valo 	}
3188d5c65159SKalle Valo 	rcu_read_unlock();
3189d5c65159SKalle Valo done:
3190d5c65159SKalle Valo 	return total_num_buffs_reaped;
3191d5c65159SKalle Valo }
3192d5c65159SKalle Valo 
3193d5c65159SKalle Valo int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget)
3194d5c65159SKalle Valo {
3195d5c65159SKalle Valo 	struct ath11k *ar = ab->pdevs[mac_id].ar;
3196d5c65159SKalle Valo 	struct dp_srng *err_ring = &ar->dp.rxdma_err_dst_ring;
3197d5c65159SKalle Valo 	struct dp_rxdma_ring *rx_ring = &ar->dp.rx_refill_buf_ring;
3198d5c65159SKalle Valo 	struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks;
3199d5c65159SKalle Valo 	struct hal_srng *srng;
3200293cb583SJohn Crispin 	u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
3201d5c65159SKalle Valo 	enum hal_rx_buf_return_buf_manager rbm;
3202d5c65159SKalle Valo 	enum hal_reo_entr_rxdma_ecode rxdma_err_code;
3203d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb;
3204d5c65159SKalle Valo 	struct sk_buff *skb;
3205d5c65159SKalle Valo 	struct hal_reo_entrance_ring *entr_ring;
3206d5c65159SKalle Valo 	void *desc;
3207d5c65159SKalle Valo 	int num_buf_freed = 0;
3208d5c65159SKalle Valo 	int quota = budget;
3209d5c65159SKalle Valo 	dma_addr_t paddr;
3210d5c65159SKalle Valo 	u32 desc_bank;
3211d5c65159SKalle Valo 	void *link_desc_va;
3212d5c65159SKalle Valo 	int num_msdus;
3213d5c65159SKalle Valo 	int i;
3214d5c65159SKalle Valo 	int buf_id;
3215d5c65159SKalle Valo 
3216d5c65159SKalle Valo 	srng = &ab->hal.srng_list[err_ring->ring_id];
3217d5c65159SKalle Valo 
3218d5c65159SKalle Valo 	spin_lock_bh(&srng->lock);
3219d5c65159SKalle Valo 
3220d5c65159SKalle Valo 	ath11k_hal_srng_access_begin(ab, srng);
3221d5c65159SKalle Valo 
3222d5c65159SKalle Valo 	while (quota-- &&
3223d5c65159SKalle Valo 	       (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
3224d5c65159SKalle Valo 		ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank);
3225d5c65159SKalle Valo 
3226d5c65159SKalle Valo 		entr_ring = (struct hal_reo_entrance_ring *)desc;
3227d5c65159SKalle Valo 		rxdma_err_code =
3228d5c65159SKalle Valo 			FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
3229d5c65159SKalle Valo 				  entr_ring->info1);
3230d5c65159SKalle Valo 		ab->soc_stats.rxdma_error[rxdma_err_code]++;
3231d5c65159SKalle Valo 
3232d5c65159SKalle Valo 		link_desc_va = link_desc_banks[desc_bank].vaddr +
3233d5c65159SKalle Valo 			       (paddr - link_desc_banks[desc_bank].paddr);
3234293cb583SJohn Crispin 		ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus,
3235293cb583SJohn Crispin 						 msdu_cookies, &rbm);
3236d5c65159SKalle Valo 
3237d5c65159SKalle Valo 		for (i = 0; i < num_msdus; i++) {
3238d5c65159SKalle Valo 			buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
3239293cb583SJohn Crispin 					   msdu_cookies[i]);
3240d5c65159SKalle Valo 
3241d5c65159SKalle Valo 			spin_lock_bh(&rx_ring->idr_lock);
3242d5c65159SKalle Valo 			skb = idr_find(&rx_ring->bufs_idr, buf_id);
3243d5c65159SKalle Valo 			if (!skb) {
3244d5c65159SKalle Valo 				ath11k_warn(ab, "rxdma error with invalid buf_id %d\n",
3245d5c65159SKalle Valo 					    buf_id);
3246d5c65159SKalle Valo 				spin_unlock_bh(&rx_ring->idr_lock);
3247d5c65159SKalle Valo 				continue;
3248d5c65159SKalle Valo 			}
3249d5c65159SKalle Valo 
3250d5c65159SKalle Valo 			idr_remove(&rx_ring->bufs_idr, buf_id);
3251d5c65159SKalle Valo 			spin_unlock_bh(&rx_ring->idr_lock);
3252d5c65159SKalle Valo 
3253d5c65159SKalle Valo 			rxcb = ATH11K_SKB_RXCB(skb);
3254d5c65159SKalle Valo 			dma_unmap_single(ab->dev, rxcb->paddr,
3255d5c65159SKalle Valo 					 skb->len + skb_tailroom(skb),
3256d5c65159SKalle Valo 					 DMA_FROM_DEVICE);
3257d5c65159SKalle Valo 			dev_kfree_skb_any(skb);
3258d5c65159SKalle Valo 
3259d5c65159SKalle Valo 			num_buf_freed++;
3260d5c65159SKalle Valo 		}
3261d5c65159SKalle Valo 
3262d5c65159SKalle Valo 		ath11k_dp_rx_link_desc_return(ab, desc,
3263d5c65159SKalle Valo 					      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3264d5c65159SKalle Valo 	}
3265d5c65159SKalle Valo 
3266d5c65159SKalle Valo 	ath11k_hal_srng_access_end(ab, srng);
3267d5c65159SKalle Valo 
3268d5c65159SKalle Valo 	spin_unlock_bh(&srng->lock);
3269d5c65159SKalle Valo 
3270d5c65159SKalle Valo 	if (num_buf_freed)
3271d5c65159SKalle Valo 		ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed,
3272d5c65159SKalle Valo 					   HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
3273d5c65159SKalle Valo 
3274d5c65159SKalle Valo 	return budget - quota;
3275d5c65159SKalle Valo }
3276d5c65159SKalle Valo 
3277d5c65159SKalle Valo void ath11k_dp_process_reo_status(struct ath11k_base *ab)
3278d5c65159SKalle Valo {
3279d5c65159SKalle Valo 	struct ath11k_dp *dp = &ab->dp;
3280d5c65159SKalle Valo 	struct hal_srng *srng;
3281d5c65159SKalle Valo 	struct dp_reo_cmd *cmd, *tmp;
3282d5c65159SKalle Valo 	bool found = false;
3283d5c65159SKalle Valo 	u32 *reo_desc;
3284d5c65159SKalle Valo 	u16 tag;
3285d5c65159SKalle Valo 	struct hal_reo_status reo_status;
3286d5c65159SKalle Valo 
3287d5c65159SKalle Valo 	srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id];
3288d5c65159SKalle Valo 
3289d5c65159SKalle Valo 	memset(&reo_status, 0, sizeof(reo_status));
3290d5c65159SKalle Valo 
3291d5c65159SKalle Valo 	spin_lock_bh(&srng->lock);
3292d5c65159SKalle Valo 
3293d5c65159SKalle Valo 	ath11k_hal_srng_access_begin(ab, srng);
3294d5c65159SKalle Valo 
3295d5c65159SKalle Valo 	while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
3296d5c65159SKalle Valo 		tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc);
3297d5c65159SKalle Valo 
3298d5c65159SKalle Valo 		switch (tag) {
3299d5c65159SKalle Valo 		case HAL_REO_GET_QUEUE_STATS_STATUS:
3300d5c65159SKalle Valo 			ath11k_hal_reo_status_queue_stats(ab, reo_desc,
3301d5c65159SKalle Valo 							  &reo_status);
3302d5c65159SKalle Valo 			break;
3303d5c65159SKalle Valo 		case HAL_REO_FLUSH_QUEUE_STATUS:
3304d5c65159SKalle Valo 			ath11k_hal_reo_flush_queue_status(ab, reo_desc,
3305d5c65159SKalle Valo 							  &reo_status);
3306d5c65159SKalle Valo 			break;
3307d5c65159SKalle Valo 		case HAL_REO_FLUSH_CACHE_STATUS:
3308d5c65159SKalle Valo 			ath11k_hal_reo_flush_cache_status(ab, reo_desc,
3309d5c65159SKalle Valo 							  &reo_status);
3310d5c65159SKalle Valo 			break;
3311d5c65159SKalle Valo 		case HAL_REO_UNBLOCK_CACHE_STATUS:
3312d5c65159SKalle Valo 			ath11k_hal_reo_unblk_cache_status(ab, reo_desc,
3313d5c65159SKalle Valo 							  &reo_status);
3314d5c65159SKalle Valo 			break;
3315d5c65159SKalle Valo 		case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
3316d5c65159SKalle Valo 			ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc,
3317d5c65159SKalle Valo 								 &reo_status);
3318d5c65159SKalle Valo 			break;
3319d5c65159SKalle Valo 		case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
3320d5c65159SKalle Valo 			ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc,
3321d5c65159SKalle Valo 								  &reo_status);
3322d5c65159SKalle Valo 			break;
3323d5c65159SKalle Valo 		case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
3324d5c65159SKalle Valo 			ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc,
3325d5c65159SKalle Valo 								  &reo_status);
3326d5c65159SKalle Valo 			break;
3327d5c65159SKalle Valo 		default:
3328d5c65159SKalle Valo 			ath11k_warn(ab, "Unknown reo status type %d\n", tag);
3329d5c65159SKalle Valo 			continue;
3330d5c65159SKalle Valo 		}
3331d5c65159SKalle Valo 
3332d5c65159SKalle Valo 		spin_lock_bh(&dp->reo_cmd_lock);
3333d5c65159SKalle Valo 		list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
3334d5c65159SKalle Valo 			if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
3335d5c65159SKalle Valo 				found = true;
3336d5c65159SKalle Valo 				list_del(&cmd->list);
3337d5c65159SKalle Valo 				break;
3338d5c65159SKalle Valo 			}
3339d5c65159SKalle Valo 		}
3340d5c65159SKalle Valo 		spin_unlock_bh(&dp->reo_cmd_lock);
3341d5c65159SKalle Valo 
3342d5c65159SKalle Valo 		if (found) {
3343d5c65159SKalle Valo 			cmd->handler(dp, (void *)&cmd->data,
3344d5c65159SKalle Valo 				     reo_status.uniform_hdr.cmd_status);
3345d5c65159SKalle Valo 			kfree(cmd);
3346d5c65159SKalle Valo 		}
3347d5c65159SKalle Valo 
3348d5c65159SKalle Valo 		found = false;
3349d5c65159SKalle Valo 	}
3350d5c65159SKalle Valo 
3351d5c65159SKalle Valo 	ath11k_hal_srng_access_end(ab, srng);
3352d5c65159SKalle Valo 
3353d5c65159SKalle Valo 	spin_unlock_bh(&srng->lock);
3354d5c65159SKalle Valo }
3355d5c65159SKalle Valo 
3356d5c65159SKalle Valo void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id)
3357d5c65159SKalle Valo {
3358d5c65159SKalle Valo 	struct ath11k *ar = ab->pdevs[mac_id].ar;
3359d5c65159SKalle Valo 
3360d5c65159SKalle Valo 	ath11k_dp_rx_pdev_srng_free(ar);
3361d5c65159SKalle Valo 	ath11k_dp_rxdma_pdev_buf_free(ar);
3362d5c65159SKalle Valo }
3363d5c65159SKalle Valo 
3364d5c65159SKalle Valo int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)
3365d5c65159SKalle Valo {
3366d5c65159SKalle Valo 	struct ath11k *ar = ab->pdevs[mac_id].ar;
3367d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
3368d5c65159SKalle Valo 	u32 ring_id;
3369d5c65159SKalle Valo 	int ret;
3370d5c65159SKalle Valo 
3371d5c65159SKalle Valo 	ret = ath11k_dp_rx_pdev_srng_alloc(ar);
3372d5c65159SKalle Valo 	if (ret) {
3373d5c65159SKalle Valo 		ath11k_warn(ab, "failed to setup rx srngs\n");
3374d5c65159SKalle Valo 		return ret;
3375d5c65159SKalle Valo 	}
3376d5c65159SKalle Valo 
3377d5c65159SKalle Valo 	ret = ath11k_dp_rxdma_pdev_buf_setup(ar);
3378d5c65159SKalle Valo 	if (ret) {
3379d5c65159SKalle Valo 		ath11k_warn(ab, "failed to setup rxdma ring\n");
3380d5c65159SKalle Valo 		return ret;
3381d5c65159SKalle Valo 	}
3382d5c65159SKalle Valo 
3383d5c65159SKalle Valo 	ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
3384d5c65159SKalle Valo 	ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF);
3385d5c65159SKalle Valo 	if (ret) {
3386d5c65159SKalle Valo 		ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n",
3387d5c65159SKalle Valo 			    ret);
3388d5c65159SKalle Valo 		return ret;
3389d5c65159SKalle Valo 	}
3390d5c65159SKalle Valo 
3391d5c65159SKalle Valo 	ring_id = dp->rxdma_err_dst_ring.ring_id;
3392d5c65159SKalle Valo 	ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_DST);
3393d5c65159SKalle Valo 	if (ret) {
3394d5c65159SKalle Valo 		ath11k_warn(ab, "failed to configure rxdma_err_dest_ring %d\n",
3395d5c65159SKalle Valo 			    ret);
3396d5c65159SKalle Valo 		return ret;
3397d5c65159SKalle Valo 	}
3398d5c65159SKalle Valo 
3399d5c65159SKalle Valo 	ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
3400d5c65159SKalle Valo 	ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
3401d5c65159SKalle Valo 					  mac_id, HAL_RXDMA_MONITOR_BUF);
3402d5c65159SKalle Valo 	if (ret) {
3403d5c65159SKalle Valo 		ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
3404d5c65159SKalle Valo 			    ret);
3405d5c65159SKalle Valo 		return ret;
3406d5c65159SKalle Valo 	}
3407d5c65159SKalle Valo 	ret = ath11k_dp_tx_htt_srng_setup(ab,
3408d5c65159SKalle Valo 					  dp->rxdma_mon_dst_ring.ring_id,
3409d5c65159SKalle Valo 					  mac_id, HAL_RXDMA_MONITOR_DST);
3410d5c65159SKalle Valo 	if (ret) {
3411d5c65159SKalle Valo 		ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
3412d5c65159SKalle Valo 			    ret);
3413d5c65159SKalle Valo 		return ret;
3414d5c65159SKalle Valo 	}
3415d5c65159SKalle Valo 	ret = ath11k_dp_tx_htt_srng_setup(ab,
3416d5c65159SKalle Valo 					  dp->rxdma_mon_desc_ring.ring_id,
3417d5c65159SKalle Valo 					  mac_id, HAL_RXDMA_MONITOR_DESC);
3418d5c65159SKalle Valo 	if (ret) {
3419d5c65159SKalle Valo 		ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
3420d5c65159SKalle Valo 			    ret);
3421d5c65159SKalle Valo 		return ret;
3422d5c65159SKalle Valo 	}
3423d5c65159SKalle Valo 	ring_id = dp->rx_mon_status_refill_ring.refill_buf_ring.ring_id;
3424d5c65159SKalle Valo 	ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id,
3425d5c65159SKalle Valo 					  HAL_RXDMA_MONITOR_STATUS);
3426d5c65159SKalle Valo 	if (ret) {
3427d5c65159SKalle Valo 		ath11k_warn(ab,
3428d5c65159SKalle Valo 			    "failed to configure mon_status_refill_ring %d\n",
3429d5c65159SKalle Valo 			    ret);
3430d5c65159SKalle Valo 		return ret;
3431d5c65159SKalle Valo 	}
3432d5c65159SKalle Valo 	return 0;
3433d5c65159SKalle Valo }
3434d5c65159SKalle Valo 
3435d5c65159SKalle Valo static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len)
3436d5c65159SKalle Valo {
3437d5c65159SKalle Valo 	if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) {
3438d5c65159SKalle Valo 		*frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc);
3439d5c65159SKalle Valo 		*total_len -= *frag_len;
3440d5c65159SKalle Valo 	} else {
3441d5c65159SKalle Valo 		*frag_len = *total_len;
3442d5c65159SKalle Valo 		*total_len = 0;
3443d5c65159SKalle Valo 	}
3444d5c65159SKalle Valo }
3445d5c65159SKalle Valo 
3446d5c65159SKalle Valo static
3447d5c65159SKalle Valo int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar,
3448d5c65159SKalle Valo 					  void *p_last_buf_addr_info,
3449d5c65159SKalle Valo 					  u8 mac_id)
3450d5c65159SKalle Valo {
3451d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
3452d5c65159SKalle Valo 	struct dp_srng *dp_srng;
3453d5c65159SKalle Valo 	void *hal_srng;
3454d5c65159SKalle Valo 	void *src_srng_desc;
3455d5c65159SKalle Valo 	int ret = 0;
3456d5c65159SKalle Valo 
3457d5c65159SKalle Valo 	dp_srng = &dp->rxdma_mon_desc_ring;
3458d5c65159SKalle Valo 	hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
3459d5c65159SKalle Valo 
3460d5c65159SKalle Valo 	ath11k_hal_srng_access_begin(ar->ab, hal_srng);
3461d5c65159SKalle Valo 
3462d5c65159SKalle Valo 	src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng);
3463d5c65159SKalle Valo 
3464d5c65159SKalle Valo 	if (src_srng_desc) {
3465d5c65159SKalle Valo 		struct ath11k_buffer_addr *src_desc =
3466d5c65159SKalle Valo 				(struct ath11k_buffer_addr *)src_srng_desc;
3467d5c65159SKalle Valo 
3468d5c65159SKalle Valo 		*src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info);
3469d5c65159SKalle Valo 	} else {
3470d5c65159SKalle Valo 		ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
3471d5c65159SKalle Valo 			   "Monitor Link Desc Ring %d Full", mac_id);
3472d5c65159SKalle Valo 		ret = -ENOMEM;
3473d5c65159SKalle Valo 	}
3474d5c65159SKalle Valo 
3475d5c65159SKalle Valo 	ath11k_hal_srng_access_end(ar->ab, hal_srng);
3476d5c65159SKalle Valo 	return ret;
3477d5c65159SKalle Valo }
3478d5c65159SKalle Valo 
3479d5c65159SKalle Valo static
3480d5c65159SKalle Valo void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc,
3481d5c65159SKalle Valo 					 dma_addr_t *paddr, u32 *sw_cookie,
3482d5c65159SKalle Valo 					 void **pp_buf_addr_info)
3483d5c65159SKalle Valo {
3484d5c65159SKalle Valo 	struct hal_rx_msdu_link *msdu_link =
3485d5c65159SKalle Valo 			(struct hal_rx_msdu_link *)rx_msdu_link_desc;
3486d5c65159SKalle Valo 	struct ath11k_buffer_addr *buf_addr_info;
3487d5c65159SKalle Valo 	u8 rbm = 0;
3488d5c65159SKalle Valo 
3489d5c65159SKalle Valo 	buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info;
3490d5c65159SKalle Valo 
3491d5c65159SKalle Valo 	ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, &rbm);
3492d5c65159SKalle Valo 
3493d5c65159SKalle Valo 	*pp_buf_addr_info = (void *)buf_addr_info;
3494d5c65159SKalle Valo }
3495d5c65159SKalle Valo 
3496d5c65159SKalle Valo static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len)
3497d5c65159SKalle Valo {
3498d5c65159SKalle Valo 	if (skb->len > len) {
3499d5c65159SKalle Valo 		skb_trim(skb, len);
3500d5c65159SKalle Valo 	} else {
3501d5c65159SKalle Valo 		if (skb_tailroom(skb) < len - skb->len) {
3502d5c65159SKalle Valo 			if ((pskb_expand_head(skb, 0,
3503d5c65159SKalle Valo 					      len - skb->len - skb_tailroom(skb),
3504d5c65159SKalle Valo 					      GFP_ATOMIC))) {
3505d5c65159SKalle Valo 				dev_kfree_skb_any(skb);
3506d5c65159SKalle Valo 				return -ENOMEM;
3507d5c65159SKalle Valo 			}
3508d5c65159SKalle Valo 		}
3509d5c65159SKalle Valo 		skb_put(skb, (len - skb->len));
3510d5c65159SKalle Valo 	}
3511d5c65159SKalle Valo 	return 0;
3512d5c65159SKalle Valo }
3513d5c65159SKalle Valo 
3514d5c65159SKalle Valo static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar,
3515d5c65159SKalle Valo 					void *msdu_link_desc,
3516d5c65159SKalle Valo 					struct hal_rx_msdu_list *msdu_list,
3517d5c65159SKalle Valo 					u16 *num_msdus)
3518d5c65159SKalle Valo {
3519d5c65159SKalle Valo 	struct hal_rx_msdu_details *msdu_details = NULL;
3520d5c65159SKalle Valo 	struct rx_msdu_desc *msdu_desc_info = NULL;
3521d5c65159SKalle Valo 	struct hal_rx_msdu_link *msdu_link = NULL;
3522d5c65159SKalle Valo 	int i;
3523d5c65159SKalle Valo 	u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1);
3524d5c65159SKalle Valo 	u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1);
3525d5c65159SKalle Valo 	u8  tmp  = 0;
3526d5c65159SKalle Valo 
3527d5c65159SKalle Valo 	msdu_link = (struct hal_rx_msdu_link *)msdu_link_desc;
3528d5c65159SKalle Valo 	msdu_details = &msdu_link->msdu_link[0];
3529d5c65159SKalle Valo 
3530d5c65159SKalle Valo 	for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) {
3531d5c65159SKalle Valo 		if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
3532d5c65159SKalle Valo 			      msdu_details[i].buf_addr_info.info0) == 0) {
3533d5c65159SKalle Valo 			msdu_desc_info = &msdu_details[i - 1].rx_msdu_info;
3534d5c65159SKalle Valo 			msdu_desc_info->info0 |= last;
3535d5c65159SKalle Valo 			;
3536d5c65159SKalle Valo 			break;
3537d5c65159SKalle Valo 		}
3538d5c65159SKalle Valo 		msdu_desc_info = &msdu_details[i].rx_msdu_info;
3539d5c65159SKalle Valo 
3540d5c65159SKalle Valo 		if (!i)
3541d5c65159SKalle Valo 			msdu_desc_info->info0 |= first;
3542d5c65159SKalle Valo 		else if (i == (HAL_RX_NUM_MSDU_DESC - 1))
3543d5c65159SKalle Valo 			msdu_desc_info->info0 |= last;
3544d5c65159SKalle Valo 		msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0;
3545d5c65159SKalle Valo 		msdu_list->msdu_info[i].msdu_len =
3546d5c65159SKalle Valo 			 HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0);
3547d5c65159SKalle Valo 		msdu_list->sw_cookie[i] =
3548d5c65159SKalle Valo 			FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
3549d5c65159SKalle Valo 				  msdu_details[i].buf_addr_info.info1);
3550d5c65159SKalle Valo 		tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
3551d5c65159SKalle Valo 				msdu_details[i].buf_addr_info.info1);
3552d5c65159SKalle Valo 		msdu_list->rbm[i] = tmp;
3553d5c65159SKalle Valo 	}
3554d5c65159SKalle Valo 	*num_msdus = i;
3555d5c65159SKalle Valo }
3556d5c65159SKalle Valo 
3557d5c65159SKalle Valo static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id,
3558d5c65159SKalle Valo 					u32 *rx_bufs_used)
3559d5c65159SKalle Valo {
3560d5c65159SKalle Valo 	u32 ret = 0;
3561d5c65159SKalle Valo 
3562d5c65159SKalle Valo 	if ((*ppdu_id < msdu_ppdu_id) &&
3563d5c65159SKalle Valo 	    ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) {
3564d5c65159SKalle Valo 		*ppdu_id = msdu_ppdu_id;
3565d5c65159SKalle Valo 		ret = msdu_ppdu_id;
3566d5c65159SKalle Valo 	} else if ((*ppdu_id > msdu_ppdu_id) &&
3567d5c65159SKalle Valo 		((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) {
3568d5c65159SKalle Valo 		/* mon_dst is behind than mon_status
3569d5c65159SKalle Valo 		 * skip dst_ring and free it
3570d5c65159SKalle Valo 		 */
3571d5c65159SKalle Valo 		*rx_bufs_used += 1;
3572d5c65159SKalle Valo 		*ppdu_id = msdu_ppdu_id;
3573d5c65159SKalle Valo 		ret = msdu_ppdu_id;
3574d5c65159SKalle Valo 	}
3575d5c65159SKalle Valo 	return ret;
3576d5c65159SKalle Valo }
3577d5c65159SKalle Valo 
3578d5c65159SKalle Valo static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info,
3579d5c65159SKalle Valo 				      bool *is_frag, u32 *total_len,
3580d5c65159SKalle Valo 				      u32 *frag_len, u32 *msdu_cnt)
3581d5c65159SKalle Valo {
3582d5c65159SKalle Valo 	if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) {
3583d5c65159SKalle Valo 		if (!*is_frag) {
3584d5c65159SKalle Valo 			*total_len = info->msdu_len;
3585d5c65159SKalle Valo 			*is_frag = true;
3586d5c65159SKalle Valo 		}
3587d5c65159SKalle Valo 		ath11k_dp_mon_set_frag_len(total_len,
3588d5c65159SKalle Valo 					   frag_len);
3589d5c65159SKalle Valo 	} else {
3590d5c65159SKalle Valo 		if (*is_frag) {
3591d5c65159SKalle Valo 			ath11k_dp_mon_set_frag_len(total_len,
3592d5c65159SKalle Valo 						   frag_len);
3593d5c65159SKalle Valo 		} else {
3594d5c65159SKalle Valo 			*frag_len = info->msdu_len;
3595d5c65159SKalle Valo 		}
3596d5c65159SKalle Valo 		*is_frag = false;
3597d5c65159SKalle Valo 		*msdu_cnt -= 1;
3598d5c65159SKalle Valo 	}
3599d5c65159SKalle Valo }
3600d5c65159SKalle Valo 
3601d5c65159SKalle Valo static u32
3602d5c65159SKalle Valo ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar,
3603d5c65159SKalle Valo 			  void *ring_entry, struct sk_buff **head_msdu,
3604d5c65159SKalle Valo 			  struct sk_buff **tail_msdu, u32 *npackets,
3605d5c65159SKalle Valo 			  u32 *ppdu_id)
3606d5c65159SKalle Valo {
3607d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
3608d5c65159SKalle Valo 	struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
3609d5c65159SKalle Valo 	struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;
3610d5c65159SKalle Valo 	struct sk_buff *msdu = NULL, *last = NULL;
3611d5c65159SKalle Valo 	struct hal_rx_msdu_list msdu_list;
3612d5c65159SKalle Valo 	void *p_buf_addr_info, *p_last_buf_addr_info;
3613d5c65159SKalle Valo 	struct hal_rx_desc *rx_desc;
3614d5c65159SKalle Valo 	void *rx_msdu_link_desc;
3615d5c65159SKalle Valo 	dma_addr_t paddr;
3616d5c65159SKalle Valo 	u16 num_msdus = 0;
3617d5c65159SKalle Valo 	u32 rx_buf_size, rx_pkt_offset, sw_cookie;
3618d5c65159SKalle Valo 	u32 rx_bufs_used = 0, i = 0;
3619d5c65159SKalle Valo 	u32 msdu_ppdu_id = 0, msdu_cnt = 0;
3620d5c65159SKalle Valo 	u32 total_len = 0, frag_len = 0;
3621d5c65159SKalle Valo 	bool is_frag, is_first_msdu;
3622d5c65159SKalle Valo 	bool drop_mpdu = false;
3623d5c65159SKalle Valo 	struct ath11k_skb_rxcb *rxcb;
3624d5c65159SKalle Valo 	struct hal_reo_entrance_ring *ent_desc =
3625d5c65159SKalle Valo 			(struct hal_reo_entrance_ring *)ring_entry;
3626d5c65159SKalle Valo 	int buf_id;
3627d5c65159SKalle Valo 
3628d5c65159SKalle Valo 	ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr,
3629d5c65159SKalle Valo 					    &sw_cookie, &p_last_buf_addr_info,
3630d5c65159SKalle Valo 					    &msdu_cnt);
3631d5c65159SKalle Valo 
3632d5c65159SKalle Valo 	if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON,
3633d5c65159SKalle Valo 		      ent_desc->info1) ==
3634d5c65159SKalle Valo 		      HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
3635d5c65159SKalle Valo 		u8 rxdma_err =
3636d5c65159SKalle Valo 			FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
3637d5c65159SKalle Valo 				  ent_desc->info1);
3638d5c65159SKalle Valo 		if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
3639d5c65159SKalle Valo 		    rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
3640d5c65159SKalle Valo 		    rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
3641d5c65159SKalle Valo 			drop_mpdu = true;
3642d5c65159SKalle Valo 			pmon->rx_mon_stats.dest_mpdu_drop++;
3643d5c65159SKalle Valo 		}
3644d5c65159SKalle Valo 	}
3645d5c65159SKalle Valo 
3646d5c65159SKalle Valo 	is_frag = false;
3647d5c65159SKalle Valo 	is_first_msdu = true;
3648d5c65159SKalle Valo 
3649d5c65159SKalle Valo 	do {
3650d5c65159SKalle Valo 		if (pmon->mon_last_linkdesc_paddr == paddr) {
3651d5c65159SKalle Valo 			pmon->rx_mon_stats.dup_mon_linkdesc_cnt++;
3652d5c65159SKalle Valo 			return rx_bufs_used;
3653d5c65159SKalle Valo 		}
3654d5c65159SKalle Valo 
3655d5c65159SKalle Valo 		rx_msdu_link_desc =
3656d5c65159SKalle Valo 			(void *)pmon->link_desc_banks[sw_cookie].vaddr +
3657d5c65159SKalle Valo 			(paddr - pmon->link_desc_banks[sw_cookie].paddr);
3658d5c65159SKalle Valo 
3659d5c65159SKalle Valo 		ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
3660d5c65159SKalle Valo 					    &num_msdus);
3661d5c65159SKalle Valo 
3662d5c65159SKalle Valo 		for (i = 0; i < num_msdus; i++) {
3663d5c65159SKalle Valo 			u32 l2_hdr_offset;
3664d5c65159SKalle Valo 
3665d5c65159SKalle Valo 			if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) {
3666d5c65159SKalle Valo 				ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
3667d5c65159SKalle Valo 					   "i %d last_cookie %d is same\n",
3668d5c65159SKalle Valo 					   i, pmon->mon_last_buf_cookie);
3669d5c65159SKalle Valo 				drop_mpdu = true;
3670d5c65159SKalle Valo 				pmon->rx_mon_stats.dup_mon_buf_cnt++;
3671d5c65159SKalle Valo 				continue;
3672d5c65159SKalle Valo 			}
3673d5c65159SKalle Valo 			buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
3674d5c65159SKalle Valo 					   msdu_list.sw_cookie[i]);
3675d5c65159SKalle Valo 
3676d5c65159SKalle Valo 			spin_lock_bh(&rx_ring->idr_lock);
3677d5c65159SKalle Valo 			msdu = idr_find(&rx_ring->bufs_idr, buf_id);
3678d5c65159SKalle Valo 			spin_unlock_bh(&rx_ring->idr_lock);
3679d5c65159SKalle Valo 			if (!msdu) {
3680d5c65159SKalle Valo 				ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
3681d5c65159SKalle Valo 					   "msdu_pop: invalid buf_id %d\n", buf_id);
3682d5c65159SKalle Valo 				break;
3683d5c65159SKalle Valo 			}
3684d5c65159SKalle Valo 			rxcb = ATH11K_SKB_RXCB(msdu);
3685d5c65159SKalle Valo 			if (!rxcb->unmapped) {
3686d5c65159SKalle Valo 				dma_unmap_single(ar->ab->dev, rxcb->paddr,
3687d5c65159SKalle Valo 						 msdu->len +
3688d5c65159SKalle Valo 						 skb_tailroom(msdu),
3689d5c65159SKalle Valo 						 DMA_FROM_DEVICE);
3690d5c65159SKalle Valo 				rxcb->unmapped = 1;
3691d5c65159SKalle Valo 			}
3692d5c65159SKalle Valo 			if (drop_mpdu) {
3693d5c65159SKalle Valo 				ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
3694d5c65159SKalle Valo 					   "i %d drop msdu %p *ppdu_id %x\n",
3695d5c65159SKalle Valo 					   i, msdu, *ppdu_id);
3696d5c65159SKalle Valo 				dev_kfree_skb_any(msdu);
3697d5c65159SKalle Valo 				msdu = NULL;
3698d5c65159SKalle Valo 				goto next_msdu;
3699d5c65159SKalle Valo 			}
3700d5c65159SKalle Valo 
3701d5c65159SKalle Valo 			rx_desc = (struct hal_rx_desc *)msdu->data;
3702d5c65159SKalle Valo 
3703d5c65159SKalle Valo 			rx_pkt_offset = sizeof(struct hal_rx_desc);
3704d5c65159SKalle Valo 			l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(rx_desc);
3705d5c65159SKalle Valo 
3706d5c65159SKalle Valo 			if (is_first_msdu) {
3707d5c65159SKalle Valo 				if (!ath11k_dp_rxdesc_mpdu_valid(rx_desc)) {
3708d5c65159SKalle Valo 					drop_mpdu = true;
3709d5c65159SKalle Valo 					dev_kfree_skb_any(msdu);
3710d5c65159SKalle Valo 					msdu = NULL;
3711d5c65159SKalle Valo 					pmon->mon_last_linkdesc_paddr = paddr;
3712d5c65159SKalle Valo 					goto next_msdu;
3713d5c65159SKalle Valo 				}
3714d5c65159SKalle Valo 
3715d5c65159SKalle Valo 				msdu_ppdu_id =
3716d5c65159SKalle Valo 					ath11k_dp_rxdesc_get_ppduid(rx_desc);
3717d5c65159SKalle Valo 
3718d5c65159SKalle Valo 				if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id,
3719d5c65159SKalle Valo 								 ppdu_id,
3720d5c65159SKalle Valo 								 &rx_bufs_used))
3721d5c65159SKalle Valo 					return rx_bufs_used;
3722d5c65159SKalle Valo 				pmon->mon_last_linkdesc_paddr = paddr;
3723d5c65159SKalle Valo 				is_first_msdu = false;
3724d5c65159SKalle Valo 			}
3725d5c65159SKalle Valo 			ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
3726d5c65159SKalle Valo 						  &is_frag, &total_len,
3727d5c65159SKalle Valo 						  &frag_len, &msdu_cnt);
3728d5c65159SKalle Valo 			rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
3729d5c65159SKalle Valo 
3730d5c65159SKalle Valo 			ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);
3731d5c65159SKalle Valo 
3732d5c65159SKalle Valo 			if (!(*head_msdu))
3733d5c65159SKalle Valo 				*head_msdu = msdu;
3734d5c65159SKalle Valo 			else if (last)
3735d5c65159SKalle Valo 				last->next = msdu;
3736d5c65159SKalle Valo 
3737d5c65159SKalle Valo 			last = msdu;
3738d5c65159SKalle Valo next_msdu:
3739d5c65159SKalle Valo 			pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i];
3740d5c65159SKalle Valo 			rx_bufs_used++;
3741d5c65159SKalle Valo 			spin_lock_bh(&rx_ring->idr_lock);
3742d5c65159SKalle Valo 			idr_remove(&rx_ring->bufs_idr, buf_id);
3743d5c65159SKalle Valo 			spin_unlock_bh(&rx_ring->idr_lock);
3744d5c65159SKalle Valo 		}
3745d5c65159SKalle Valo 
3746d5c65159SKalle Valo 		ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr,
3747d5c65159SKalle Valo 						    &sw_cookie,
3748d5c65159SKalle Valo 						    &p_buf_addr_info);
3749d5c65159SKalle Valo 
3750d5c65159SKalle Valo 		if (ath11k_dp_rx_monitor_link_desc_return(ar,
3751d5c65159SKalle Valo 							  p_last_buf_addr_info,
3752d5c65159SKalle Valo 							  dp->mac_id))
3753d5c65159SKalle Valo 			ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
3754d5c65159SKalle Valo 				   "dp_rx_monitor_link_desc_return failed");
3755d5c65159SKalle Valo 
3756d5c65159SKalle Valo 		p_last_buf_addr_info = p_buf_addr_info;
3757d5c65159SKalle Valo 
3758d5c65159SKalle Valo 	} while (paddr && msdu_cnt);
3759d5c65159SKalle Valo 
3760d5c65159SKalle Valo 	if (last)
3761d5c65159SKalle Valo 		last->next = NULL;
3762d5c65159SKalle Valo 
3763d5c65159SKalle Valo 	*tail_msdu = msdu;
3764d5c65159SKalle Valo 
3765d5c65159SKalle Valo 	if (msdu_cnt == 0)
3766d5c65159SKalle Valo 		*npackets = 1;
3767d5c65159SKalle Valo 
3768d5c65159SKalle Valo 	return rx_bufs_used;
3769d5c65159SKalle Valo }
3770d5c65159SKalle Valo 
3771d5c65159SKalle Valo static void ath11k_dp_rx_msdus_set_payload(struct sk_buff *msdu)
3772d5c65159SKalle Valo {
3773d5c65159SKalle Valo 	u32 rx_pkt_offset, l2_hdr_offset;
3774d5c65159SKalle Valo 
3775d5c65159SKalle Valo 	rx_pkt_offset = sizeof(struct hal_rx_desc);
3776d5c65159SKalle Valo 	l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad((struct hal_rx_desc *)msdu->data);
3777d5c65159SKalle Valo 	skb_pull(msdu, rx_pkt_offset + l2_hdr_offset);
3778d5c65159SKalle Valo }
3779d5c65159SKalle Valo 
3780d5c65159SKalle Valo static struct sk_buff *
3781d5c65159SKalle Valo ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
3782d5c65159SKalle Valo 			    u32 mac_id, struct sk_buff *head_msdu,
3783d5c65159SKalle Valo 			    struct sk_buff *last_msdu,
3784d5c65159SKalle Valo 			    struct ieee80211_rx_status *rxs)
3785d5c65159SKalle Valo {
3786d5c65159SKalle Valo 	struct sk_buff *msdu, *mpdu_buf, *prev_buf;
3787d5c65159SKalle Valo 	u32 decap_format, wifi_hdr_len;
3788d5c65159SKalle Valo 	struct hal_rx_desc *rx_desc;
3789d5c65159SKalle Valo 	char *hdr_desc;
3790d5c65159SKalle Valo 	u8 *dest;
3791d5c65159SKalle Valo 	struct ieee80211_hdr_3addr *wh;
3792d5c65159SKalle Valo 
3793d5c65159SKalle Valo 	mpdu_buf = NULL;
3794d5c65159SKalle Valo 
3795d5c65159SKalle Valo 	if (!head_msdu)
3796d5c65159SKalle Valo 		goto err_merge_fail;
3797d5c65159SKalle Valo 
3798d5c65159SKalle Valo 	rx_desc = (struct hal_rx_desc *)head_msdu->data;
3799d5c65159SKalle Valo 
3800d5c65159SKalle Valo 	if (ath11k_dp_rxdesc_get_mpdulen_err(rx_desc))
3801d5c65159SKalle Valo 		return NULL;
3802d5c65159SKalle Valo 
3803d5c65159SKalle Valo 	decap_format = ath11k_dp_rxdesc_get_decap_format(rx_desc);
3804d5c65159SKalle Valo 
3805d5c65159SKalle Valo 	ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
3806d5c65159SKalle Valo 
3807d5c65159SKalle Valo 	if (decap_format == DP_RX_DECAP_TYPE_RAW) {
3808d5c65159SKalle Valo 		ath11k_dp_rx_msdus_set_payload(head_msdu);
3809d5c65159SKalle Valo 
3810d5c65159SKalle Valo 		prev_buf = head_msdu;
3811d5c65159SKalle Valo 		msdu = head_msdu->next;
3812d5c65159SKalle Valo 
3813d5c65159SKalle Valo 		while (msdu) {
3814d5c65159SKalle Valo 			ath11k_dp_rx_msdus_set_payload(msdu);
3815d5c65159SKalle Valo 
3816d5c65159SKalle Valo 			prev_buf = msdu;
3817d5c65159SKalle Valo 			msdu = msdu->next;
3818d5c65159SKalle Valo 		}
3819d5c65159SKalle Valo 
3820d5c65159SKalle Valo 		prev_buf->next = NULL;
3821d5c65159SKalle Valo 
3822d5c65159SKalle Valo 		skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
3823d5c65159SKalle Valo 	} else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) {
3824d5c65159SKalle Valo 		__le16 qos_field;
3825d5c65159SKalle Valo 		u8 qos_pkt = 0;
3826d5c65159SKalle Valo 
3827d5c65159SKalle Valo 		rx_desc = (struct hal_rx_desc *)head_msdu->data;
3828d5c65159SKalle Valo 		hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc);
3829d5c65159SKalle Valo 
3830d5c65159SKalle Valo 		/* Base size */
3831d5c65159SKalle Valo 		wifi_hdr_len = sizeof(struct ieee80211_hdr_3addr);
3832d5c65159SKalle Valo 		wh = (struct ieee80211_hdr_3addr *)hdr_desc;
3833d5c65159SKalle Valo 
3834d5c65159SKalle Valo 		if (ieee80211_is_data_qos(wh->frame_control)) {
3835d5c65159SKalle Valo 			struct ieee80211_qos_hdr *qwh =
3836d5c65159SKalle Valo 					(struct ieee80211_qos_hdr *)hdr_desc;
3837d5c65159SKalle Valo 
3838d5c65159SKalle Valo 			qos_field = qwh->qos_ctrl;
3839d5c65159SKalle Valo 			qos_pkt = 1;
3840d5c65159SKalle Valo 		}
3841d5c65159SKalle Valo 		msdu = head_msdu;
3842d5c65159SKalle Valo 
3843d5c65159SKalle Valo 		while (msdu) {
3844d5c65159SKalle Valo 			rx_desc = (struct hal_rx_desc *)msdu->data;
3845d5c65159SKalle Valo 			hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc);
3846d5c65159SKalle Valo 
3847d5c65159SKalle Valo 			if (qos_pkt) {
3848d5c65159SKalle Valo 				dest = skb_push(msdu, sizeof(__le16));
3849d5c65159SKalle Valo 				if (!dest)
3850d5c65159SKalle Valo 					goto err_merge_fail;
3851d5c65159SKalle Valo 				memcpy(dest, hdr_desc, wifi_hdr_len);
3852d5c65159SKalle Valo 				memcpy(dest + wifi_hdr_len,
3853d5c65159SKalle Valo 				       (u8 *)&qos_field, sizeof(__le16));
3854d5c65159SKalle Valo 			}
3855d5c65159SKalle Valo 			ath11k_dp_rx_msdus_set_payload(msdu);
3856d5c65159SKalle Valo 			prev_buf = msdu;
3857d5c65159SKalle Valo 			msdu = msdu->next;
3858d5c65159SKalle Valo 		}
3859d5c65159SKalle Valo 		dest = skb_put(prev_buf, HAL_RX_FCS_LEN);
3860d5c65159SKalle Valo 		if (!dest)
3861d5c65159SKalle Valo 			goto err_merge_fail;
3862d5c65159SKalle Valo 
3863d5c65159SKalle Valo 		ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
3864d5c65159SKalle Valo 			   "mpdu_buf %pK mpdu_buf->len %u",
3865d5c65159SKalle Valo 			   prev_buf, prev_buf->len);
3866d5c65159SKalle Valo 	} else {
3867d5c65159SKalle Valo 		ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
3868d5c65159SKalle Valo 			   "decap format %d is not supported!\n",
3869d5c65159SKalle Valo 			   decap_format);
3870d5c65159SKalle Valo 		goto err_merge_fail;
3871d5c65159SKalle Valo 	}
3872d5c65159SKalle Valo 
3873d5c65159SKalle Valo 	return head_msdu;
3874d5c65159SKalle Valo 
3875d5c65159SKalle Valo err_merge_fail:
3876d5c65159SKalle Valo 	if (mpdu_buf && decap_format != DP_RX_DECAP_TYPE_RAW) {
3877d5c65159SKalle Valo 		ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
3878d5c65159SKalle Valo 			   "err_merge_fail mpdu_buf %pK", mpdu_buf);
3879d5c65159SKalle Valo 		/* Free the head buffer */
3880d5c65159SKalle Valo 		dev_kfree_skb_any(mpdu_buf);
3881d5c65159SKalle Valo 	}
3882d5c65159SKalle Valo 	return NULL;
3883d5c65159SKalle Valo }
3884d5c65159SKalle Valo 
3885d5c65159SKalle Valo static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
3886d5c65159SKalle Valo 				    struct sk_buff *head_msdu,
3887d5c65159SKalle Valo 				    struct sk_buff *tail_msdu,
3888d5c65159SKalle Valo 				    struct napi_struct *napi)
3889d5c65159SKalle Valo {
3890d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
3891d5c65159SKalle Valo 	struct sk_buff *mon_skb, *skb_next, *header;
3892d5c65159SKalle Valo 	struct ieee80211_rx_status *rxs = &dp->rx_status, *status;
3893d5c65159SKalle Valo 
3894d5c65159SKalle Valo 	mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu,
3895d5c65159SKalle Valo 					      tail_msdu, rxs);
3896d5c65159SKalle Valo 
3897d5c65159SKalle Valo 	if (!mon_skb)
3898d5c65159SKalle Valo 		goto mon_deliver_fail;
3899d5c65159SKalle Valo 
3900d5c65159SKalle Valo 	header = mon_skb;
3901d5c65159SKalle Valo 
3902d5c65159SKalle Valo 	rxs->flag = 0;
3903d5c65159SKalle Valo 	do {
3904d5c65159SKalle Valo 		skb_next = mon_skb->next;
3905d5c65159SKalle Valo 		if (!skb_next)
3906d5c65159SKalle Valo 			rxs->flag &= ~RX_FLAG_AMSDU_MORE;
3907d5c65159SKalle Valo 		else
3908d5c65159SKalle Valo 			rxs->flag |= RX_FLAG_AMSDU_MORE;
3909d5c65159SKalle Valo 
3910d5c65159SKalle Valo 		if (mon_skb == header) {
3911d5c65159SKalle Valo 			header = NULL;
3912d5c65159SKalle Valo 			rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN;
3913d5c65159SKalle Valo 		} else {
3914d5c65159SKalle Valo 			rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
3915d5c65159SKalle Valo 		}
3916d5c65159SKalle Valo 		rxs->flag |= RX_FLAG_ONLY_MONITOR;
3917d5c65159SKalle Valo 
3918d5c65159SKalle Valo 		status = IEEE80211_SKB_RXCB(mon_skb);
3919d5c65159SKalle Valo 		*status = *rxs;
3920d5c65159SKalle Valo 
3921d5c65159SKalle Valo 		ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb);
3922d5c65159SKalle Valo 		mon_skb = skb_next;
3923d5c65159SKalle Valo 	} while (mon_skb && (mon_skb != tail_msdu));
3924d5c65159SKalle Valo 	rxs->flag = 0;
3925d5c65159SKalle Valo 
3926d5c65159SKalle Valo 	return 0;
3927d5c65159SKalle Valo 
3928d5c65159SKalle Valo mon_deliver_fail:
3929d5c65159SKalle Valo 	mon_skb = head_msdu;
3930d5c65159SKalle Valo 	while (mon_skb) {
3931d5c65159SKalle Valo 		skb_next = mon_skb->next;
3932d5c65159SKalle Valo 		dev_kfree_skb_any(mon_skb);
3933d5c65159SKalle Valo 		mon_skb = skb_next;
3934d5c65159SKalle Valo 	}
3935d5c65159SKalle Valo 	return -EINVAL;
3936d5c65159SKalle Valo }
3937d5c65159SKalle Valo 
3938d5c65159SKalle Valo static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, u32 quota,
3939d5c65159SKalle Valo 					  struct napi_struct *napi)
3940d5c65159SKalle Valo {
3941d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
3942d5c65159SKalle Valo 	struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
3943d5c65159SKalle Valo 	void *ring_entry;
3944d5c65159SKalle Valo 	void *mon_dst_srng;
3945d5c65159SKalle Valo 	u32 ppdu_id;
3946d5c65159SKalle Valo 	u32 rx_bufs_used;
3947d5c65159SKalle Valo 	struct ath11k_pdev_mon_stats *rx_mon_stats;
3948d5c65159SKalle Valo 	u32	 npackets = 0;
3949d5c65159SKalle Valo 
3950d5c65159SKalle Valo 	mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
3951d5c65159SKalle Valo 
3952d5c65159SKalle Valo 	if (!mon_dst_srng) {
3953d5c65159SKalle Valo 		ath11k_warn(ar->ab,
3954d5c65159SKalle Valo 			    "HAL Monitor Destination Ring Init Failed -- %pK",
3955d5c65159SKalle Valo 			    mon_dst_srng);
3956d5c65159SKalle Valo 		return;
3957d5c65159SKalle Valo 	}
3958d5c65159SKalle Valo 
3959d5c65159SKalle Valo 	spin_lock_bh(&pmon->mon_lock);
3960d5c65159SKalle Valo 
3961d5c65159SKalle Valo 	ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
3962d5c65159SKalle Valo 
3963d5c65159SKalle Valo 	ppdu_id = pmon->mon_ppdu_info.ppdu_id;
3964d5c65159SKalle Valo 	rx_bufs_used = 0;
3965d5c65159SKalle Valo 	rx_mon_stats = &pmon->rx_mon_stats;
3966d5c65159SKalle Valo 
3967d5c65159SKalle Valo 	while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
3968d5c65159SKalle Valo 		struct sk_buff *head_msdu, *tail_msdu;
3969d5c65159SKalle Valo 
3970d5c65159SKalle Valo 		head_msdu = NULL;
3971d5c65159SKalle Valo 		tail_msdu = NULL;
3972d5c65159SKalle Valo 
3973d5c65159SKalle Valo 		rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, ring_entry,
3974d5c65159SKalle Valo 							  &head_msdu,
3975d5c65159SKalle Valo 							  &tail_msdu,
3976d5c65159SKalle Valo 							  &npackets, &ppdu_id);
3977d5c65159SKalle Valo 
3978d5c65159SKalle Valo 		if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) {
3979d5c65159SKalle Valo 			pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
3980d5c65159SKalle Valo 			ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
3981d5c65159SKalle Valo 				   "dest_rx: new ppdu_id %x != status ppdu_id %x",
3982d5c65159SKalle Valo 				   ppdu_id, pmon->mon_ppdu_info.ppdu_id);
3983d5c65159SKalle Valo 			break;
3984d5c65159SKalle Valo 		}
3985d5c65159SKalle Valo 		if (head_msdu && tail_msdu) {
3986d5c65159SKalle Valo 			ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu,
3987d5c65159SKalle Valo 						 tail_msdu, napi);
3988d5c65159SKalle Valo 			rx_mon_stats->dest_mpdu_done++;
3989d5c65159SKalle Valo 		}
3990d5c65159SKalle Valo 
3991d5c65159SKalle Valo 		ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,
3992d5c65159SKalle Valo 								mon_dst_srng);
3993d5c65159SKalle Valo 	}
3994d5c65159SKalle Valo 	ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
3995d5c65159SKalle Valo 
3996d5c65159SKalle Valo 	spin_unlock_bh(&pmon->mon_lock);
3997d5c65159SKalle Valo 
3998d5c65159SKalle Valo 	if (rx_bufs_used) {
3999d5c65159SKalle Valo 		rx_mon_stats->dest_ppdu_done++;
4000d5c65159SKalle Valo 		ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
4001d5c65159SKalle Valo 					   &dp->rxdma_mon_buf_ring,
4002d5c65159SKalle Valo 					   rx_bufs_used,
4003d5c65159SKalle Valo 					   HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
4004d5c65159SKalle Valo 	}
4005d5c65159SKalle Valo }
4006d5c65159SKalle Valo 
4007d5c65159SKalle Valo static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar,
4008d5c65159SKalle Valo 						u32 quota,
4009d5c65159SKalle Valo 						struct napi_struct *napi)
4010d5c65159SKalle Valo {
4011d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
4012d5c65159SKalle Valo 	struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
4013d5c65159SKalle Valo 	struct hal_rx_mon_ppdu_info *ppdu_info;
4014d5c65159SKalle Valo 	struct sk_buff *status_skb;
4015d5c65159SKalle Valo 	u32 tlv_status = HAL_TLV_STATUS_BUF_DONE;
4016d5c65159SKalle Valo 	struct ath11k_pdev_mon_stats *rx_mon_stats;
4017d5c65159SKalle Valo 
4018d5c65159SKalle Valo 	ppdu_info = &pmon->mon_ppdu_info;
4019d5c65159SKalle Valo 	rx_mon_stats = &pmon->rx_mon_stats;
4020d5c65159SKalle Valo 
4021d5c65159SKalle Valo 	if (pmon->mon_ppdu_status != DP_PPDU_STATUS_START)
4022d5c65159SKalle Valo 		return;
4023d5c65159SKalle Valo 
4024d5c65159SKalle Valo 	while (!skb_queue_empty(&pmon->rx_status_q)) {
4025d5c65159SKalle Valo 		status_skb = skb_dequeue(&pmon->rx_status_q);
4026d5c65159SKalle Valo 
4027d5c65159SKalle Valo 		tlv_status = ath11k_hal_rx_parse_mon_status(ar->ab, ppdu_info,
4028d5c65159SKalle Valo 							    status_skb);
4029d5c65159SKalle Valo 		if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) {
4030d5c65159SKalle Valo 			rx_mon_stats->status_ppdu_done++;
4031d5c65159SKalle Valo 			pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
4032d5c65159SKalle Valo 			ath11k_dp_rx_mon_dest_process(ar, quota, napi);
4033d5c65159SKalle Valo 			pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
4034d5c65159SKalle Valo 		}
4035d5c65159SKalle Valo 		dev_kfree_skb_any(status_skb);
4036d5c65159SKalle Valo 	}
4037d5c65159SKalle Valo }
4038d5c65159SKalle Valo 
4039d5c65159SKalle Valo static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id,
4040d5c65159SKalle Valo 				    struct napi_struct *napi, int budget)
4041d5c65159SKalle Valo {
4042d5c65159SKalle Valo 	struct ath11k *ar = ab->pdevs[mac_id].ar;
4043d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
4044d5c65159SKalle Valo 	struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
4045d5c65159SKalle Valo 	int num_buffs_reaped = 0;
4046d5c65159SKalle Valo 
4047d5c65159SKalle Valo 	num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ar->ab, dp->mac_id, &budget,
4048d5c65159SKalle Valo 							     &pmon->rx_status_q);
4049d5c65159SKalle Valo 	if (num_buffs_reaped)
4050d5c65159SKalle Valo 		ath11k_dp_rx_mon_status_process_tlv(ar, budget, napi);
4051d5c65159SKalle Valo 
4052d5c65159SKalle Valo 	return num_buffs_reaped;
4053d5c65159SKalle Valo }
4054d5c65159SKalle Valo 
4055d5c65159SKalle Valo int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
4056d5c65159SKalle Valo 				   struct napi_struct *napi, int budget)
4057d5c65159SKalle Valo {
4058d5c65159SKalle Valo 	struct ath11k *ar = ab->pdevs[mac_id].ar;
4059d5c65159SKalle Valo 	int ret = 0;
4060d5c65159SKalle Valo 
4061d5c65159SKalle Valo 	if (test_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags))
4062d5c65159SKalle Valo 		ret = ath11k_dp_mon_process_rx(ab, mac_id, napi, budget);
4063d5c65159SKalle Valo 	else
4064d5c65159SKalle Valo 		ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget);
4065d5c65159SKalle Valo 	return ret;
4066d5c65159SKalle Valo }
4067d5c65159SKalle Valo 
4068d5c65159SKalle Valo static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar)
4069d5c65159SKalle Valo {
4070d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
4071d5c65159SKalle Valo 	struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
4072d5c65159SKalle Valo 
4073d5c65159SKalle Valo 	skb_queue_head_init(&pmon->rx_status_q);
4074d5c65159SKalle Valo 
4075d5c65159SKalle Valo 	pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
4076d5c65159SKalle Valo 
4077d5c65159SKalle Valo 	memset(&pmon->rx_mon_stats, 0,
4078d5c65159SKalle Valo 	       sizeof(pmon->rx_mon_stats));
4079d5c65159SKalle Valo 	return 0;
4080d5c65159SKalle Valo }
4081d5c65159SKalle Valo 
4082d5c65159SKalle Valo int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar)
4083d5c65159SKalle Valo {
4084d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
4085d5c65159SKalle Valo 	struct ath11k_mon_data *pmon = &dp->mon_data;
4086d5c65159SKalle Valo 	struct hal_srng *mon_desc_srng = NULL;
4087d5c65159SKalle Valo 	struct dp_srng *dp_srng;
4088d5c65159SKalle Valo 	int ret = 0;
4089d5c65159SKalle Valo 	u32 n_link_desc = 0;
4090d5c65159SKalle Valo 
4091d5c65159SKalle Valo 	ret = ath11k_dp_rx_pdev_mon_status_attach(ar);
4092d5c65159SKalle Valo 	if (ret) {
4093d5c65159SKalle Valo 		ath11k_warn(ar->ab, "pdev_mon_status_attach() failed");
4094d5c65159SKalle Valo 		return ret;
4095d5c65159SKalle Valo 	}
4096d5c65159SKalle Valo 
4097d5c65159SKalle Valo 	dp_srng = &dp->rxdma_mon_desc_ring;
4098d5c65159SKalle Valo 	n_link_desc = dp_srng->size /
4099d5c65159SKalle Valo 		ath11k_hal_srng_get_entrysize(HAL_RXDMA_MONITOR_DESC);
4100d5c65159SKalle Valo 	mon_desc_srng =
4101d5c65159SKalle Valo 		&ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id];
4102d5c65159SKalle Valo 
4103d5c65159SKalle Valo 	ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks,
4104d5c65159SKalle Valo 					HAL_RXDMA_MONITOR_DESC, mon_desc_srng,
4105d5c65159SKalle Valo 					n_link_desc);
4106d5c65159SKalle Valo 	if (ret) {
4107d5c65159SKalle Valo 		ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed");
4108d5c65159SKalle Valo 		return ret;
4109d5c65159SKalle Valo 	}
4110d5c65159SKalle Valo 	pmon->mon_last_linkdesc_paddr = 0;
4111d5c65159SKalle Valo 	pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
4112d5c65159SKalle Valo 	spin_lock_init(&pmon->mon_lock);
4113d5c65159SKalle Valo 	return 0;
4114d5c65159SKalle Valo }
4115d5c65159SKalle Valo 
4116d5c65159SKalle Valo static int ath11k_dp_mon_link_free(struct ath11k *ar)
4117d5c65159SKalle Valo {
4118d5c65159SKalle Valo 	struct ath11k_pdev_dp *dp = &ar->dp;
4119d5c65159SKalle Valo 	struct ath11k_mon_data *pmon = &dp->mon_data;
4120d5c65159SKalle Valo 
4121d5c65159SKalle Valo 	ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks,
4122d5c65159SKalle Valo 				    HAL_RXDMA_MONITOR_DESC,
4123d5c65159SKalle Valo 				    &dp->rxdma_mon_desc_ring);
4124d5c65159SKalle Valo 	return 0;
4125d5c65159SKalle Valo }
4126d5c65159SKalle Valo 
4127d5c65159SKalle Valo int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar)
4128d5c65159SKalle Valo {
4129d5c65159SKalle Valo 	ath11k_dp_mon_link_free(ar);
4130d5c65159SKalle Valo 	return 0;
4131d5c65159SKalle Valo }
4132