1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4  */
5 
6 #include <linux/ieee80211.h>
7 #include <linux/kernel.h>
8 #include <linux/skbuff.h>
9 #include <crypto/hash.h>
10 #include "core.h"
11 #include "debug.h"
12 #include "hal_desc.h"
13 #include "hw.h"
14 #include "dp_rx.h"
15 #include "hal_rx.h"
16 #include "dp_tx.h"
17 #include "peer.h"
18 
19 #define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
20 
21 static u8 *ath11k_dp_rx_h_80211_hdr(struct hal_rx_desc *desc)
22 {
23 	return desc->hdr_status;
24 }
25 
26 static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct hal_rx_desc *desc)
27 {
28 	if (!(__le32_to_cpu(desc->mpdu_start.info1) &
29 	    RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID))
30 		return HAL_ENCRYPT_TYPE_OPEN;
31 
32 	return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE,
33 			 __le32_to_cpu(desc->mpdu_start.info2));
34 }
35 
36 static u8 ath11k_dp_rx_h_msdu_start_decap_type(struct hal_rx_desc *desc)
37 {
38 	return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
39 			 __le32_to_cpu(desc->msdu_start.info2));
40 }
41 
42 static u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct hal_rx_desc *desc)
43 {
44 	return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT,
45 			 __le32_to_cpu(desc->msdu_start.info2));
46 }
47 
48 static bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct hal_rx_desc *desc)
49 {
50 	return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID,
51 			   __le32_to_cpu(desc->mpdu_start.info1));
52 }
53 
54 static bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct hal_rx_desc *desc)
55 {
56 	return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_FCTRL_VALID,
57 			   __le32_to_cpu(desc->mpdu_start.info1));
58 }
59 
60 static bool ath11k_dp_rx_h_mpdu_start_more_frags(struct sk_buff *skb)
61 {
62 	struct ieee80211_hdr *hdr;
63 
64 	hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE);
65 	return ieee80211_has_morefrags(hdr->frame_control);
66 }
67 
68 static u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct sk_buff *skb)
69 {
70 	struct ieee80211_hdr *hdr;
71 
72 	hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE);
73 	return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
74 }
75 
76 static u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct hal_rx_desc *desc)
77 {
78 	return FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_NUM,
79 			 __le32_to_cpu(desc->mpdu_start.info1));
80 }
81 
82 static bool ath11k_dp_rx_h_attn_msdu_done(struct hal_rx_desc *desc)
83 {
84 	return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE,
85 			   __le32_to_cpu(desc->attention.info2));
86 }
87 
88 static bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct hal_rx_desc *desc)
89 {
90 	return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL,
91 			   __le32_to_cpu(desc->attention.info1));
92 }
93 
94 static bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct hal_rx_desc *desc)
95 {
96 	return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL,
97 			   __le32_to_cpu(desc->attention.info1));
98 }
99 
100 static bool ath11k_dp_rx_h_attn_is_decrypted(struct hal_rx_desc *desc)
101 {
102 	return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE,
103 			  __le32_to_cpu(desc->attention.info2)) ==
104 		RX_DESC_DECRYPT_STATUS_CODE_OK);
105 }
106 
107 static u32 ath11k_dp_rx_h_attn_mpdu_err(struct hal_rx_desc *desc)
108 {
109 	u32 info = __le32_to_cpu(desc->attention.info1);
110 	u32 errmap = 0;
111 
112 	if (info & RX_ATTENTION_INFO1_FCS_ERR)
113 		errmap |= DP_RX_MPDU_ERR_FCS;
114 
115 	if (info & RX_ATTENTION_INFO1_DECRYPT_ERR)
116 		errmap |= DP_RX_MPDU_ERR_DECRYPT;
117 
118 	if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR)
119 		errmap |= DP_RX_MPDU_ERR_TKIP_MIC;
120 
121 	if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR)
122 		errmap |= DP_RX_MPDU_ERR_AMSDU_ERR;
123 
124 	if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR)
125 		errmap |= DP_RX_MPDU_ERR_OVERFLOW;
126 
127 	if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR)
128 		errmap |= DP_RX_MPDU_ERR_MSDU_LEN;
129 
130 	if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR)
131 		errmap |= DP_RX_MPDU_ERR_MPDU_LEN;
132 
133 	return errmap;
134 }
135 
136 static u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct hal_rx_desc *desc)
137 {
138 	return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH,
139 			 __le32_to_cpu(desc->msdu_start.info1));
140 }
141 
142 static u8 ath11k_dp_rx_h_msdu_start_sgi(struct hal_rx_desc *desc)
143 {
144 	return FIELD_GET(RX_MSDU_START_INFO3_SGI,
145 			 __le32_to_cpu(desc->msdu_start.info3));
146 }
147 
148 static u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct hal_rx_desc *desc)
149 {
150 	return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS,
151 			 __le32_to_cpu(desc->msdu_start.info3));
152 }
153 
154 static u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct hal_rx_desc *desc)
155 {
156 	return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW,
157 			 __le32_to_cpu(desc->msdu_start.info3));
158 }
159 
160 static u32 ath11k_dp_rx_h_msdu_start_freq(struct hal_rx_desc *desc)
161 {
162 	return __le32_to_cpu(desc->msdu_start.phy_meta_data);
163 }
164 
165 static u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct hal_rx_desc *desc)
166 {
167 	return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE,
168 			 __le32_to_cpu(desc->msdu_start.info3));
169 }
170 
171 static u8 ath11k_dp_rx_h_msdu_start_nss(struct hal_rx_desc *desc)
172 {
173 	u8 mimo_ss_bitmap = FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP,
174 				      __le32_to_cpu(desc->msdu_start.info3));
175 
176 	return hweight8(mimo_ss_bitmap);
177 }
178 
179 static u8 ath11k_dp_rx_h_mpdu_start_tid(struct hal_rx_desc *desc)
180 {
181 	return FIELD_GET(RX_MPDU_START_INFO2_TID,
182 			 __le32_to_cpu(desc->mpdu_start.info2));
183 }
184 
185 static u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct hal_rx_desc *desc)
186 {
187 	return __le16_to_cpu(desc->mpdu_start.sw_peer_id);
188 }
189 
190 static u8 ath11k_dp_rx_h_msdu_end_l3pad(struct hal_rx_desc *desc)
191 {
192 	return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING,
193 			 __le32_to_cpu(desc->msdu_end.info2));
194 }
195 
196 static bool ath11k_dp_rx_h_msdu_end_first_msdu(struct hal_rx_desc *desc)
197 {
198 	return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU,
199 			   __le32_to_cpu(desc->msdu_end.info2));
200 }
201 
202 static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct hal_rx_desc *desc)
203 {
204 	return !!FIELD_GET(RX_MSDU_END_INFO2_LAST_MSDU,
205 			   __le32_to_cpu(desc->msdu_end.info2));
206 }
207 
208 static void ath11k_dp_rx_desc_end_tlv_copy(struct hal_rx_desc *fdesc,
209 					   struct hal_rx_desc *ldesc)
210 {
211 	memcpy((u8 *)&fdesc->msdu_end, (u8 *)&ldesc->msdu_end,
212 	       sizeof(struct rx_msdu_end));
213 	memcpy((u8 *)&fdesc->attention, (u8 *)&ldesc->attention,
214 	       sizeof(struct rx_attention));
215 	memcpy((u8 *)&fdesc->mpdu_end, (u8 *)&ldesc->mpdu_end,
216 	       sizeof(struct rx_mpdu_end));
217 }
218 
219 static u32 ath11k_dp_rxdesc_get_mpdulen_err(struct hal_rx_desc *rx_desc)
220 {
221 	struct rx_attention *rx_attn;
222 
223 	rx_attn = &rx_desc->attention;
224 
225 	return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR,
226 			 __le32_to_cpu(rx_attn->info1));
227 }
228 
229 static u32 ath11k_dp_rxdesc_get_decap_format(struct hal_rx_desc *rx_desc)
230 {
231 	struct rx_msdu_start *rx_msdu_start;
232 
233 	rx_msdu_start = &rx_desc->msdu_start;
234 
235 	return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
236 			 __le32_to_cpu(rx_msdu_start->info2));
237 }
238 
239 static u8 *ath11k_dp_rxdesc_get_80211hdr(struct hal_rx_desc *rx_desc)
240 {
241 	u8 *rx_pkt_hdr;
242 
243 	rx_pkt_hdr = &rx_desc->msdu_payload[0];
244 
245 	return rx_pkt_hdr;
246 }
247 
248 static bool ath11k_dp_rxdesc_mpdu_valid(struct hal_rx_desc *rx_desc)
249 {
250 	u32 tlv_tag;
251 
252 	tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG,
253 			    __le32_to_cpu(rx_desc->mpdu_start_tag));
254 
255 	return tlv_tag == HAL_RX_MPDU_START ? true : false;
256 }
257 
258 static u32 ath11k_dp_rxdesc_get_ppduid(struct hal_rx_desc *rx_desc)
259 {
260 	return __le16_to_cpu(rx_desc->mpdu_start.phy_ppdu_id);
261 }
262 
263 /* Returns number of Rx buffers replenished */
264 int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
265 			       struct dp_rxdma_ring *rx_ring,
266 			       int req_entries,
267 			       enum hal_rx_buf_return_buf_manager mgr,
268 			       gfp_t gfp)
269 {
270 	struct hal_srng *srng;
271 	u32 *desc;
272 	struct sk_buff *skb;
273 	int num_free;
274 	int num_remain;
275 	int buf_id;
276 	u32 cookie;
277 	dma_addr_t paddr;
278 
279 	req_entries = min(req_entries, rx_ring->bufs_max);
280 
281 	srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
282 
283 	spin_lock_bh(&srng->lock);
284 
285 	ath11k_hal_srng_access_begin(ab, srng);
286 
287 	num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
288 	if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
289 		req_entries = num_free;
290 
291 	req_entries = min(num_free, req_entries);
292 	num_remain = req_entries;
293 
294 	while (num_remain > 0) {
295 		skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
296 				    DP_RX_BUFFER_ALIGN_SIZE);
297 		if (!skb)
298 			break;
299 
300 		if (!IS_ALIGNED((unsigned long)skb->data,
301 				DP_RX_BUFFER_ALIGN_SIZE)) {
302 			skb_pull(skb,
303 				 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
304 				 skb->data);
305 		}
306 
307 		paddr = dma_map_single(ab->dev, skb->data,
308 				       skb->len + skb_tailroom(skb),
309 				       DMA_FROM_DEVICE);
310 		if (dma_mapping_error(ab->dev, paddr))
311 			goto fail_free_skb;
312 
313 		spin_lock_bh(&rx_ring->idr_lock);
314 		buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
315 				   rx_ring->bufs_max * 3, gfp);
316 		spin_unlock_bh(&rx_ring->idr_lock);
317 		if (buf_id < 0)
318 			goto fail_dma_unmap;
319 
320 		desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
321 		if (!desc)
322 			goto fail_idr_remove;
323 
324 		ATH11K_SKB_RXCB(skb)->paddr = paddr;
325 
326 		cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
327 			 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
328 
329 		num_remain--;
330 
331 		ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
332 	}
333 
334 	ath11k_hal_srng_access_end(ab, srng);
335 
336 	spin_unlock_bh(&srng->lock);
337 
338 	return req_entries - num_remain;
339 
340 fail_idr_remove:
341 	spin_lock_bh(&rx_ring->idr_lock);
342 	idr_remove(&rx_ring->bufs_idr, buf_id);
343 	spin_unlock_bh(&rx_ring->idr_lock);
344 fail_dma_unmap:
345 	dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
346 			 DMA_FROM_DEVICE);
347 fail_free_skb:
348 	dev_kfree_skb_any(skb);
349 
350 	ath11k_hal_srng_access_end(ab, srng);
351 
352 	spin_unlock_bh(&srng->lock);
353 
354 	return req_entries - num_remain;
355 }
356 
357 static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar,
358 					 struct dp_rxdma_ring *rx_ring)
359 {
360 	struct ath11k_pdev_dp *dp = &ar->dp;
361 	struct sk_buff *skb;
362 	int buf_id;
363 
364 	spin_lock_bh(&rx_ring->idr_lock);
365 	idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
366 		idr_remove(&rx_ring->bufs_idr, buf_id);
367 		/* TODO: Understand where internal driver does this dma_unmap of
368 		 * of rxdma_buffer.
369 		 */
370 		dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
371 				 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
372 		dev_kfree_skb_any(skb);
373 	}
374 
375 	idr_destroy(&rx_ring->bufs_idr);
376 	spin_unlock_bh(&rx_ring->idr_lock);
377 
378 	rx_ring = &dp->rx_mon_status_refill_ring;
379 
380 	spin_lock_bh(&rx_ring->idr_lock);
381 	idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
382 		idr_remove(&rx_ring->bufs_idr, buf_id);
383 		/* XXX: Understand where internal driver does this dma_unmap of
384 		 * of rxdma_buffer.
385 		 */
386 		dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
387 				 skb->len + skb_tailroom(skb), DMA_BIDIRECTIONAL);
388 		dev_kfree_skb_any(skb);
389 	}
390 
391 	idr_destroy(&rx_ring->bufs_idr);
392 	spin_unlock_bh(&rx_ring->idr_lock);
393 	return 0;
394 }
395 
396 static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar)
397 {
398 	struct ath11k_pdev_dp *dp = &ar->dp;
399 	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
400 
401 	ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
402 
403 	rx_ring = &dp->rxdma_mon_buf_ring;
404 	ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
405 
406 	rx_ring = &dp->rx_mon_status_refill_ring;
407 	ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
408 	return 0;
409 }
410 
411 static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar,
412 					  struct dp_rxdma_ring *rx_ring,
413 					  u32 ringtype)
414 {
415 	struct ath11k_pdev_dp *dp = &ar->dp;
416 	int num_entries;
417 
418 	num_entries = rx_ring->refill_buf_ring.size /
419 		      ath11k_hal_srng_get_entrysize(ringtype);
420 
421 	rx_ring->bufs_max = num_entries;
422 	ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries,
423 				   HAL_RX_BUF_RBM_SW3_BM, GFP_KERNEL);
424 	return 0;
425 }
426 
427 static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar)
428 {
429 	struct ath11k_pdev_dp *dp = &ar->dp;
430 	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
431 
432 	ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF);
433 
434 	rx_ring = &dp->rxdma_mon_buf_ring;
435 	ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF);
436 
437 	rx_ring = &dp->rx_mon_status_refill_ring;
438 	ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS);
439 
440 	return 0;
441 }
442 
443 static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar)
444 {
445 	struct ath11k_pdev_dp *dp = &ar->dp;
446 
447 	ath11k_dp_srng_cleanup(ar->ab, &dp->rx_refill_buf_ring.refill_buf_ring);
448 	ath11k_dp_srng_cleanup(ar->ab, &dp->rxdma_err_dst_ring);
449 	ath11k_dp_srng_cleanup(ar->ab, &dp->rx_mon_status_refill_ring.refill_buf_ring);
450 	ath11k_dp_srng_cleanup(ar->ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
451 }
452 
453 void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab)
454 {
455 	struct ath11k_dp *dp = &ab->dp;
456 	int i;
457 
458 	for (i = 0; i < DP_REO_DST_RING_MAX; i++)
459 		ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]);
460 }
461 
462 int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab)
463 {
464 	struct ath11k_dp *dp = &ab->dp;
465 	int ret;
466 	int i;
467 
468 	for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
469 		ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring[i],
470 					   HAL_REO_DST, i, 0,
471 					   DP_REO_DST_RING_SIZE);
472 		if (ret) {
473 			ath11k_warn(ab, "failed to setup reo_dst_ring\n");
474 			goto err_reo_cleanup;
475 		}
476 	}
477 
478 	return 0;
479 
480 err_reo_cleanup:
481 	ath11k_dp_pdev_reo_cleanup(ab);
482 
483 	return ret;
484 }
485 
486 static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar)
487 {
488 	struct ath11k_pdev_dp *dp = &ar->dp;
489 	struct dp_srng *srng = NULL;
490 	int ret;
491 
492 	ret = ath11k_dp_srng_setup(ar->ab,
493 				   &dp->rx_refill_buf_ring.refill_buf_ring,
494 				   HAL_RXDMA_BUF, 0,
495 				   dp->mac_id, DP_RXDMA_BUF_RING_SIZE);
496 	if (ret) {
497 		ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n");
498 		return ret;
499 	}
500 
501 	ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring,
502 				   HAL_RXDMA_DST, 0, dp->mac_id,
503 				   DP_RXDMA_ERR_DST_RING_SIZE);
504 	if (ret) {
505 		ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring\n");
506 		return ret;
507 	}
508 
509 	srng = &dp->rx_mon_status_refill_ring.refill_buf_ring;
510 	ret = ath11k_dp_srng_setup(ar->ab,
511 				   srng,
512 				   HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id,
513 				   DP_RXDMA_MON_STATUS_RING_SIZE);
514 	if (ret) {
515 		ath11k_warn(ar->ab,
516 			    "failed to setup rx_mon_status_refill_ring\n");
517 		return ret;
518 	}
519 	ret = ath11k_dp_srng_setup(ar->ab,
520 				   &dp->rxdma_mon_buf_ring.refill_buf_ring,
521 				   HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id,
522 				   DP_RXDMA_MONITOR_BUF_RING_SIZE);
523 	if (ret) {
524 		ath11k_warn(ar->ab,
525 			    "failed to setup HAL_RXDMA_MONITOR_BUF\n");
526 		return ret;
527 	}
528 
529 	ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring,
530 				   HAL_RXDMA_MONITOR_DST, 0, dp->mac_id,
531 				   DP_RXDMA_MONITOR_DST_RING_SIZE);
532 	if (ret) {
533 		ath11k_warn(ar->ab,
534 			    "failed to setup HAL_RXDMA_MONITOR_DST\n");
535 		return ret;
536 	}
537 
538 	ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring,
539 				   HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id,
540 				   DP_RXDMA_MONITOR_DESC_RING_SIZE);
541 	if (ret) {
542 		ath11k_warn(ar->ab,
543 			    "failed to setup HAL_RXDMA_MONITOR_DESC\n");
544 		return ret;
545 	}
546 
547 	return 0;
548 }
549 
550 void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
551 {
552 	struct ath11k_dp *dp = &ab->dp;
553 	struct dp_reo_cmd *cmd, *tmp;
554 	struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache;
555 
556 	spin_lock_bh(&dp->reo_cmd_lock);
557 	list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
558 		list_del(&cmd->list);
559 		dma_unmap_single(ab->dev, cmd->data.paddr,
560 				 cmd->data.size, DMA_BIDIRECTIONAL);
561 		kfree(cmd->data.vaddr);
562 		kfree(cmd);
563 	}
564 
565 	list_for_each_entry_safe(cmd_cache, tmp_cache,
566 				 &dp->reo_cmd_cache_flush_list, list) {
567 		list_del(&cmd_cache->list);
568 		dma_unmap_single(ab->dev, cmd_cache->data.paddr,
569 				 cmd_cache->data.size, DMA_BIDIRECTIONAL);
570 		kfree(cmd_cache->data.vaddr);
571 		kfree(cmd_cache);
572 	}
573 	spin_unlock_bh(&dp->reo_cmd_lock);
574 }
575 
576 static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx,
577 				   enum hal_reo_cmd_status status)
578 {
579 	struct dp_rx_tid *rx_tid = ctx;
580 
581 	if (status != HAL_REO_CMD_SUCCESS)
582 		ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
583 			    rx_tid->tid, status);
584 
585 	dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
586 			 DMA_BIDIRECTIONAL);
587 	kfree(rx_tid->vaddr);
588 }
589 
590 static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab,
591 				      struct dp_rx_tid *rx_tid)
592 {
593 	struct ath11k_hal_reo_cmd cmd = {0};
594 	unsigned long tot_desc_sz, desc_sz;
595 	int ret;
596 
597 	tot_desc_sz = rx_tid->size;
598 	desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
599 
600 	while (tot_desc_sz > desc_sz) {
601 		tot_desc_sz -= desc_sz;
602 		cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);
603 		cmd.addr_hi = upper_32_bits(rx_tid->paddr);
604 		ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
605 						HAL_REO_CMD_FLUSH_CACHE, &cmd,
606 						NULL);
607 		if (ret)
608 			ath11k_warn(ab,
609 				    "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
610 				    rx_tid->tid, ret);
611 	}
612 
613 	memset(&cmd, 0, sizeof(cmd));
614 	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
615 	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
616 	cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
617 	ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
618 					HAL_REO_CMD_FLUSH_CACHE,
619 					&cmd, ath11k_dp_reo_cmd_free);
620 	if (ret) {
621 		ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
622 			   rx_tid->tid, ret);
623 		dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
624 				 DMA_BIDIRECTIONAL);
625 		kfree(rx_tid->vaddr);
626 	}
627 }
628 
629 static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx,
630 				      enum hal_reo_cmd_status status)
631 {
632 	struct ath11k_base *ab = dp->ab;
633 	struct dp_rx_tid *rx_tid = ctx;
634 	struct dp_reo_cache_flush_elem *elem, *tmp;
635 
636 	if (status == HAL_REO_CMD_DRAIN) {
637 		goto free_desc;
638 	} else if (status != HAL_REO_CMD_SUCCESS) {
639 		/* Shouldn't happen! Cleanup in case of other failure? */
640 		ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n",
641 			    rx_tid->tid, status);
642 		return;
643 	}
644 
645 	elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
646 	if (!elem)
647 		goto free_desc;
648 
649 	elem->ts = jiffies;
650 	memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
651 
652 	spin_lock_bh(&dp->reo_cmd_lock);
653 	list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
654 	spin_unlock_bh(&dp->reo_cmd_lock);
655 
656 	/* Flush and invalidate aged REO desc from HW cache */
657 	spin_lock_bh(&dp->reo_cmd_lock);
658 	list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
659 				 list) {
660 		if (time_after(jiffies, elem->ts +
661 			       msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) {
662 			list_del(&elem->list);
663 			spin_unlock_bh(&dp->reo_cmd_lock);
664 
665 			ath11k_dp_reo_cache_flush(ab, &elem->data);
666 			kfree(elem);
667 			spin_lock_bh(&dp->reo_cmd_lock);
668 		}
669 	}
670 	spin_unlock_bh(&dp->reo_cmd_lock);
671 
672 	return;
673 free_desc:
674 	dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
675 			 DMA_BIDIRECTIONAL);
676 	kfree(rx_tid->vaddr);
677 }
678 
679 void ath11k_peer_rx_tid_delete(struct ath11k *ar,
680 			       struct ath11k_peer *peer, u8 tid)
681 {
682 	struct ath11k_hal_reo_cmd cmd = {0};
683 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
684 	int ret;
685 
686 	if (!rx_tid->active)
687 		return;
688 
689 	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
690 	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
691 	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
692 	cmd.upd0 |= HAL_REO_CMD_UPD0_VLD;
693 	ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
694 					HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
695 					ath11k_dp_rx_tid_del_func);
696 	if (ret) {
697 		ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
698 			   tid, ret);
699 		dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
700 				 DMA_BIDIRECTIONAL);
701 		kfree(rx_tid->vaddr);
702 	}
703 
704 	rx_tid->active = false;
705 }
706 
707 static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab,
708 					 u32 *link_desc,
709 					 enum hal_wbm_rel_bm_act action)
710 {
711 	struct ath11k_dp *dp = &ab->dp;
712 	struct hal_srng *srng;
713 	u32 *desc;
714 	int ret = 0;
715 
716 	srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
717 
718 	spin_lock_bh(&srng->lock);
719 
720 	ath11k_hal_srng_access_begin(ab, srng);
721 
722 	desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
723 	if (!desc) {
724 		ret = -ENOBUFS;
725 		goto exit;
726 	}
727 
728 	ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc,
729 					 action);
730 
731 exit:
732 	ath11k_hal_srng_access_end(ab, srng);
733 
734 	spin_unlock_bh(&srng->lock);
735 
736 	return ret;
737 }
738 
739 static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_desc)
740 {
741 	struct ath11k_base *ab = rx_tid->ab;
742 
743 	lockdep_assert_held(&ab->base_lock);
744 
745 	if (rx_tid->dst_ring_desc) {
746 		if (rel_link_desc)
747 			ath11k_dp_rx_link_desc_return(ab, (u32 *)rx_tid->dst_ring_desc,
748 						      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
749 		kfree(rx_tid->dst_ring_desc);
750 		rx_tid->dst_ring_desc = NULL;
751 	}
752 
753 	rx_tid->cur_sn = 0;
754 	rx_tid->last_frag_no = 0;
755 	rx_tid->rx_frag_bitmap = 0;
756 	__skb_queue_purge(&rx_tid->rx_frags);
757 }
758 
759 void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer)
760 {
761 	struct dp_rx_tid *rx_tid;
762 	int i;
763 
764 	lockdep_assert_held(&ar->ab->base_lock);
765 
766 	for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
767 		rx_tid = &peer->rx_tid[i];
768 
769 		ath11k_peer_rx_tid_delete(ar, peer, i);
770 		ath11k_dp_rx_frags_cleanup(rx_tid, true);
771 
772 		spin_unlock_bh(&ar->ab->base_lock);
773 		del_timer_sync(&rx_tid->frag_timer);
774 		spin_lock_bh(&ar->ab->base_lock);
775 	}
776 }
777 
778 static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar,
779 					 struct ath11k_peer *peer,
780 					 struct dp_rx_tid *rx_tid,
781 					 u32 ba_win_sz, u16 ssn,
782 					 bool update_ssn)
783 {
784 	struct ath11k_hal_reo_cmd cmd = {0};
785 	int ret;
786 
787 	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
788 	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
789 	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
790 	cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
791 	cmd.ba_window_size = ba_win_sz;
792 
793 	if (update_ssn) {
794 		cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
795 		cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn);
796 	}
797 
798 	ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
799 					HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
800 					NULL);
801 	if (ret) {
802 		ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
803 			    rx_tid->tid, ret);
804 		return ret;
805 	}
806 
807 	rx_tid->ba_win_sz = ba_win_sz;
808 
809 	return 0;
810 }
811 
812 static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab,
813 				      const u8 *peer_mac, int vdev_id, u8 tid)
814 {
815 	struct ath11k_peer *peer;
816 	struct dp_rx_tid *rx_tid;
817 
818 	spin_lock_bh(&ab->base_lock);
819 
820 	peer = ath11k_peer_find(ab, vdev_id, peer_mac);
821 	if (!peer) {
822 		ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n");
823 		goto unlock_exit;
824 	}
825 
826 	rx_tid = &peer->rx_tid[tid];
827 	if (!rx_tid->active)
828 		goto unlock_exit;
829 
830 	dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
831 			 DMA_BIDIRECTIONAL);
832 	kfree(rx_tid->vaddr);
833 
834 	rx_tid->active = false;
835 
836 unlock_exit:
837 	spin_unlock_bh(&ab->base_lock);
838 }
839 
840 int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
841 			     u8 tid, u32 ba_win_sz, u16 ssn,
842 			     enum hal_pn_type pn_type)
843 {
844 	struct ath11k_base *ab = ar->ab;
845 	struct ath11k_peer *peer;
846 	struct dp_rx_tid *rx_tid;
847 	u32 hw_desc_sz;
848 	u32 *addr_aligned;
849 	void *vaddr;
850 	dma_addr_t paddr;
851 	int ret;
852 
853 	spin_lock_bh(&ab->base_lock);
854 
855 	peer = ath11k_peer_find(ab, vdev_id, peer_mac);
856 	if (!peer) {
857 		ath11k_warn(ab, "failed to find the peer to set up rx tid\n");
858 		spin_unlock_bh(&ab->base_lock);
859 		return -ENOENT;
860 	}
861 
862 	rx_tid = &peer->rx_tid[tid];
863 	/* Update the tid queue if it is already setup */
864 	if (rx_tid->active) {
865 		paddr = rx_tid->paddr;
866 		ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid,
867 						    ba_win_sz, ssn, true);
868 		spin_unlock_bh(&ab->base_lock);
869 		if (ret) {
870 			ath11k_warn(ab, "failed to update reo for rx tid %d\n", tid);
871 			return ret;
872 		}
873 
874 		ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
875 							     peer_mac, paddr,
876 							     tid, 1, ba_win_sz);
877 		if (ret)
878 			ath11k_warn(ab, "failed to send wmi command to update rx reorder queue, tid :%d (%d)\n",
879 				    tid, ret);
880 		return ret;
881 	}
882 
883 	rx_tid->tid = tid;
884 
885 	rx_tid->ba_win_sz = ba_win_sz;
886 
887 	/* TODO: Optimize the memory allocation for qos tid based on the
888 	 * the actual BA window size in REO tid update path.
889 	 */
890 	if (tid == HAL_DESC_REO_NON_QOS_TID)
891 		hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid);
892 	else
893 		hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
894 
895 	vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_KERNEL);
896 	if (!vaddr) {
897 		spin_unlock_bh(&ab->base_lock);
898 		return -ENOMEM;
899 	}
900 
901 	addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
902 
903 	ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,
904 				   ssn, pn_type);
905 
906 	paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
907 			       DMA_BIDIRECTIONAL);
908 
909 	ret = dma_mapping_error(ab->dev, paddr);
910 	if (ret) {
911 		spin_unlock_bh(&ab->base_lock);
912 		goto err_mem_free;
913 	}
914 
915 	rx_tid->vaddr = vaddr;
916 	rx_tid->paddr = paddr;
917 	rx_tid->size = hw_desc_sz;
918 	rx_tid->active = true;
919 
920 	spin_unlock_bh(&ab->base_lock);
921 
922 	ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
923 						     paddr, tid, 1, ba_win_sz);
924 	if (ret) {
925 		ath11k_warn(ar->ab, "failed to setup rx reorder queue, tid :%d (%d)\n",
926 			    tid, ret);
927 		ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid);
928 	}
929 
930 	return ret;
931 
932 err_mem_free:
933 	kfree(vaddr);
934 
935 	return ret;
936 }
937 
938 int ath11k_dp_rx_ampdu_start(struct ath11k *ar,
939 			     struct ieee80211_ampdu_params *params)
940 {
941 	struct ath11k_base *ab = ar->ab;
942 	struct ath11k_sta *arsta = (void *)params->sta->drv_priv;
943 	int vdev_id = arsta->arvif->vdev_id;
944 	int ret;
945 
946 	ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id,
947 				       params->tid, params->buf_size,
948 				       params->ssn, arsta->pn_type);
949 	if (ret)
950 		ath11k_warn(ab, "failed to setup rx tid %d\n", ret);
951 
952 	return ret;
953 }
954 
955 int ath11k_dp_rx_ampdu_stop(struct ath11k *ar,
956 			    struct ieee80211_ampdu_params *params)
957 {
958 	struct ath11k_base *ab = ar->ab;
959 	struct ath11k_peer *peer;
960 	struct ath11k_sta *arsta = (void *)params->sta->drv_priv;
961 	int vdev_id = arsta->arvif->vdev_id;
962 	dma_addr_t paddr;
963 	bool active;
964 	int ret;
965 
966 	spin_lock_bh(&ab->base_lock);
967 
968 	peer = ath11k_peer_find(ab, vdev_id, params->sta->addr);
969 	if (!peer) {
970 		ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n");
971 		spin_unlock_bh(&ab->base_lock);
972 		return -ENOENT;
973 	}
974 
975 	paddr = peer->rx_tid[params->tid].paddr;
976 	active = peer->rx_tid[params->tid].active;
977 
978 	if (!active) {
979 		spin_unlock_bh(&ab->base_lock);
980 		return 0;
981 	}
982 
983 	ret = ath11k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false);
984 	spin_unlock_bh(&ab->base_lock);
985 	if (ret) {
986 		ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n",
987 			    params->tid, ret);
988 		return ret;
989 	}
990 
991 	ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
992 						     params->sta->addr, paddr,
993 						     params->tid, 1, 1);
994 	if (ret)
995 		ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n",
996 			    ret);
997 
998 	return ret;
999 }
1000 
1001 int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif,
1002 				       const u8 *peer_addr,
1003 				       enum set_key_cmd key_cmd,
1004 				       struct ieee80211_key_conf *key)
1005 {
1006 	struct ath11k *ar = arvif->ar;
1007 	struct ath11k_base *ab = ar->ab;
1008 	struct ath11k_hal_reo_cmd cmd = {0};
1009 	struct ath11k_peer *peer;
1010 	struct dp_rx_tid *rx_tid;
1011 	u8 tid;
1012 	int ret = 0;
1013 
1014 	/* NOTE: Enable PN/TSC replay check offload only for unicast frames.
1015 	 * We use mac80211 PN/TSC replay check functionality for bcast/mcast
1016 	 * for now.
1017 	 */
1018 	if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1019 		return 0;
1020 
1021 	cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
1022 	cmd.upd0 |= HAL_REO_CMD_UPD0_PN |
1023 		    HAL_REO_CMD_UPD0_PN_SIZE |
1024 		    HAL_REO_CMD_UPD0_PN_VALID |
1025 		    HAL_REO_CMD_UPD0_PN_CHECK |
1026 		    HAL_REO_CMD_UPD0_SVLD;
1027 
1028 	switch (key->cipher) {
1029 	case WLAN_CIPHER_SUITE_TKIP:
1030 	case WLAN_CIPHER_SUITE_CCMP:
1031 	case WLAN_CIPHER_SUITE_CCMP_256:
1032 	case WLAN_CIPHER_SUITE_GCMP:
1033 	case WLAN_CIPHER_SUITE_GCMP_256:
1034 		if (key_cmd == SET_KEY) {
1035 			cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;
1036 			cmd.pn_size = 48;
1037 		}
1038 		break;
1039 	default:
1040 		break;
1041 	}
1042 
1043 	spin_lock_bh(&ab->base_lock);
1044 
1045 	peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
1046 	if (!peer) {
1047 		ath11k_warn(ab, "failed to find the peer to configure pn replay detection\n");
1048 		spin_unlock_bh(&ab->base_lock);
1049 		return -ENOENT;
1050 	}
1051 
1052 	for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
1053 		rx_tid = &peer->rx_tid[tid];
1054 		if (!rx_tid->active)
1055 			continue;
1056 		cmd.addr_lo = lower_32_bits(rx_tid->paddr);
1057 		cmd.addr_hi = upper_32_bits(rx_tid->paddr);
1058 		ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
1059 						HAL_REO_CMD_UPDATE_RX_QUEUE,
1060 						&cmd, NULL);
1061 		if (ret) {
1062 			ath11k_warn(ab, "failed to configure rx tid %d queue for pn replay detection %d\n",
1063 				    tid, ret);
1064 			break;
1065 		}
1066 	}
1067 
1068 	spin_unlock_bh(&ar->ab->base_lock);
1069 
1070 	return ret;
1071 }
1072 
1073 static inline int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
1074 					     u16 peer_id)
1075 {
1076 	int i;
1077 
1078 	for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {
1079 		if (ppdu_stats->user_stats[i].is_valid_peer_id) {
1080 			if (peer_id == ppdu_stats->user_stats[i].peer_id)
1081 				return i;
1082 		} else {
1083 			return i;
1084 		}
1085 	}
1086 
1087 	return -EINVAL;
1088 }
1089 
1090 static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab,
1091 					   u16 tag, u16 len, const void *ptr,
1092 					   void *data)
1093 {
1094 	struct htt_ppdu_stats_info *ppdu_info;
1095 	struct htt_ppdu_user_stats *user_stats;
1096 	int cur_user;
1097 	u16 peer_id;
1098 
1099 	ppdu_info = (struct htt_ppdu_stats_info *)data;
1100 
1101 	switch (tag) {
1102 	case HTT_PPDU_STATS_TAG_COMMON:
1103 		if (len < sizeof(struct htt_ppdu_stats_common)) {
1104 			ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1105 				    len, tag);
1106 			return -EINVAL;
1107 		}
1108 		memcpy((void *)&ppdu_info->ppdu_stats.common, ptr,
1109 		       sizeof(struct htt_ppdu_stats_common));
1110 		break;
1111 	case HTT_PPDU_STATS_TAG_USR_RATE:
1112 		if (len < sizeof(struct htt_ppdu_stats_user_rate)) {
1113 			ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1114 				    len, tag);
1115 			return -EINVAL;
1116 		}
1117 
1118 		peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id;
1119 		cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1120 						      peer_id);
1121 		if (cur_user < 0)
1122 			return -EINVAL;
1123 		user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1124 		user_stats->peer_id = peer_id;
1125 		user_stats->is_valid_peer_id = true;
1126 		memcpy((void *)&user_stats->rate, ptr,
1127 		       sizeof(struct htt_ppdu_stats_user_rate));
1128 		user_stats->tlv_flags |= BIT(tag);
1129 		break;
1130 	case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:
1131 		if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
1132 			ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1133 				    len, tag);
1134 			return -EINVAL;
1135 		}
1136 
1137 		peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id;
1138 		cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1139 						      peer_id);
1140 		if (cur_user < 0)
1141 			return -EINVAL;
1142 		user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1143 		user_stats->peer_id = peer_id;
1144 		user_stats->is_valid_peer_id = true;
1145 		memcpy((void *)&user_stats->cmpltn_cmn, ptr,
1146 		       sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
1147 		user_stats->tlv_flags |= BIT(tag);
1148 		break;
1149 	case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:
1150 		if (len <
1151 		    sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {
1152 			ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1153 				    len, tag);
1154 			return -EINVAL;
1155 		}
1156 
1157 		peer_id =
1158 		((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id;
1159 		cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1160 						      peer_id);
1161 		if (cur_user < 0)
1162 			return -EINVAL;
1163 		user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1164 		user_stats->peer_id = peer_id;
1165 		user_stats->is_valid_peer_id = true;
1166 		memcpy((void *)&user_stats->ack_ba, ptr,
1167 		       sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));
1168 		user_stats->tlv_flags |= BIT(tag);
1169 		break;
1170 	}
1171 	return 0;
1172 }
1173 
1174 int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
1175 			   int (*iter)(struct ath11k_base *ar, u16 tag, u16 len,
1176 				       const void *ptr, void *data),
1177 			   void *data)
1178 {
1179 	const struct htt_tlv *tlv;
1180 	const void *begin = ptr;
1181 	u16 tlv_tag, tlv_len;
1182 	int ret = -EINVAL;
1183 
1184 	while (len > 0) {
1185 		if (len < sizeof(*tlv)) {
1186 			ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
1187 				   ptr - begin, len, sizeof(*tlv));
1188 			return -EINVAL;
1189 		}
1190 		tlv = (struct htt_tlv *)ptr;
1191 		tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header);
1192 		tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header);
1193 		ptr += sizeof(*tlv);
1194 		len -= sizeof(*tlv);
1195 
1196 		if (tlv_len > len) {
1197 			ath11k_err(ab, "htt tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n",
1198 				   tlv_tag, ptr - begin, len, tlv_len);
1199 			return -EINVAL;
1200 		}
1201 		ret = iter(ab, tlv_tag, tlv_len, ptr, data);
1202 		if (ret == -ENOMEM)
1203 			return ret;
1204 
1205 		ptr += tlv_len;
1206 		len -= tlv_len;
1207 	}
1208 	return 0;
1209 }
1210 
1211 static inline u32 ath11k_he_gi_to_nl80211_he_gi(u8 sgi)
1212 {
1213 	u32 ret = 0;
1214 
1215 	switch (sgi) {
1216 	case RX_MSDU_START_SGI_0_8_US:
1217 		ret = NL80211_RATE_INFO_HE_GI_0_8;
1218 		break;
1219 	case RX_MSDU_START_SGI_1_6_US:
1220 		ret = NL80211_RATE_INFO_HE_GI_1_6;
1221 		break;
1222 	case RX_MSDU_START_SGI_3_2_US:
1223 		ret = NL80211_RATE_INFO_HE_GI_3_2;
1224 		break;
1225 	}
1226 
1227 	return ret;
1228 }
1229 
1230 static void
1231 ath11k_update_per_peer_tx_stats(struct ath11k *ar,
1232 				struct htt_ppdu_stats *ppdu_stats, u8 user)
1233 {
1234 	struct ath11k_base *ab = ar->ab;
1235 	struct ath11k_peer *peer;
1236 	struct ieee80211_sta *sta;
1237 	struct ath11k_sta *arsta;
1238 	struct htt_ppdu_stats_user_rate *user_rate;
1239 	struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
1240 	struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
1241 	struct htt_ppdu_stats_common *common = &ppdu_stats->common;
1242 	int ret;
1243 	u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0;
1244 	u32 succ_bytes = 0;
1245 	u16 rate = 0, succ_pkts = 0;
1246 	u32 tx_duration = 0;
1247 	u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
1248 	bool is_ampdu = false;
1249 
1250 	if (!usr_stats)
1251 		return;
1252 
1253 	if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
1254 		return;
1255 
1256 	if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON))
1257 		is_ampdu =
1258 			HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
1259 
1260 	if (usr_stats->tlv_flags &
1261 	    BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
1262 		succ_bytes = usr_stats->ack_ba.success_bytes;
1263 		succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M,
1264 				      usr_stats->ack_ba.info);
1265 		tid = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM,
1266 				usr_stats->ack_ba.info);
1267 	}
1268 
1269 	if (common->fes_duration_us)
1270 		tx_duration = common->fes_duration_us;
1271 
1272 	user_rate = &usr_stats->rate;
1273 	flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
1274 	bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;
1275 	nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
1276 	mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
1277 	sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
1278 	dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);
1279 
1280 	/* Note: If host configured fixed rates and in some other special
1281 	 * cases, the broadcast/management frames are sent in different rates.
1282 	 * Firmware rate's control to be skipped for this?
1283 	 */
1284 
1285 	if (flags == WMI_RATE_PREAMBLE_HE && mcs > 11) {
1286 		ath11k_warn(ab, "Invalid HE mcs %hhd peer stats",  mcs);
1287 		return;
1288 	}
1289 
1290 	if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) {
1291 		ath11k_warn(ab, "Invalid HE mcs %hhd peer stats",  mcs);
1292 		return;
1293 	}
1294 
1295 	if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH11K_VHT_MCS_MAX) {
1296 		ath11k_warn(ab, "Invalid VHT mcs %hhd peer stats",  mcs);
1297 		return;
1298 	}
1299 
1300 	if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH11K_HT_MCS_MAX || nss < 1)) {
1301 		ath11k_warn(ab, "Invalid HT mcs %hhd nss %hhd peer stats",
1302 			    mcs, nss);
1303 		return;
1304 	}
1305 
1306 	if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {
1307 		ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs,
1308 							    flags,
1309 							    &rate_idx,
1310 							    &rate);
1311 		if (ret < 0)
1312 			return;
1313 	}
1314 
1315 	rcu_read_lock();
1316 	spin_lock_bh(&ab->base_lock);
1317 	peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id);
1318 
1319 	if (!peer || !peer->sta) {
1320 		spin_unlock_bh(&ab->base_lock);
1321 		rcu_read_unlock();
1322 		return;
1323 	}
1324 
1325 	sta = peer->sta;
1326 	arsta = (struct ath11k_sta *)sta->drv_priv;
1327 
1328 	memset(&arsta->txrate, 0, sizeof(arsta->txrate));
1329 
1330 	switch (flags) {
1331 	case WMI_RATE_PREAMBLE_OFDM:
1332 		arsta->txrate.legacy = rate;
1333 		break;
1334 	case WMI_RATE_PREAMBLE_CCK:
1335 		arsta->txrate.legacy = rate;
1336 		break;
1337 	case WMI_RATE_PREAMBLE_HT:
1338 		arsta->txrate.mcs = mcs + 8 * (nss - 1);
1339 		arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
1340 		if (sgi)
1341 			arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1342 		break;
1343 	case WMI_RATE_PREAMBLE_VHT:
1344 		arsta->txrate.mcs = mcs;
1345 		arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
1346 		if (sgi)
1347 			arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1348 		break;
1349 	case WMI_RATE_PREAMBLE_HE:
1350 		arsta->txrate.mcs = mcs;
1351 		arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
1352 		arsta->txrate.he_dcm = dcm;
1353 		arsta->txrate.he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi);
1354 		arsta->txrate.he_ru_alloc = ath11k_he_ru_tones_to_nl80211_he_ru_alloc(
1355 						(user_rate->ru_end -
1356 						 user_rate->ru_start) + 1);
1357 		break;
1358 	}
1359 
1360 	arsta->txrate.nss = nss;
1361 	arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
1362 	arsta->tx_duration += tx_duration;
1363 	memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
1364 
1365 	/* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
1366 	 * So skip peer stats update for mgmt packets.
1367 	 */
1368 	if (tid < HTT_PPDU_STATS_NON_QOS_TID) {
1369 		memset(peer_stats, 0, sizeof(*peer_stats));
1370 		peer_stats->succ_pkts = succ_pkts;
1371 		peer_stats->succ_bytes = succ_bytes;
1372 		peer_stats->is_ampdu = is_ampdu;
1373 		peer_stats->duration = tx_duration;
1374 		peer_stats->ba_fails =
1375 			HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
1376 			HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
1377 
1378 		if (ath11k_debug_is_extd_tx_stats_enabled(ar))
1379 			ath11k_accumulate_per_peer_tx_stats(arsta,
1380 							    peer_stats, rate_idx);
1381 	}
1382 
1383 	spin_unlock_bh(&ab->base_lock);
1384 	rcu_read_unlock();
1385 }
1386 
1387 static void ath11k_htt_update_ppdu_stats(struct ath11k *ar,
1388 					 struct htt_ppdu_stats *ppdu_stats)
1389 {
1390 	u8 user;
1391 
1392 	for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
1393 		ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user);
1394 }
1395 
1396 static
1397 struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar,
1398 							u32 ppdu_id)
1399 {
1400 	struct htt_ppdu_stats_info *ppdu_info;
1401 
1402 	spin_lock_bh(&ar->data_lock);
1403 	if (!list_empty(&ar->ppdu_stats_info)) {
1404 		list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {
1405 			if (ppdu_info->ppdu_id == ppdu_id) {
1406 				spin_unlock_bh(&ar->data_lock);
1407 				return ppdu_info;
1408 			}
1409 		}
1410 
1411 		if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
1412 			ppdu_info = list_first_entry(&ar->ppdu_stats_info,
1413 						     typeof(*ppdu_info), list);
1414 			list_del(&ppdu_info->list);
1415 			ar->ppdu_stat_list_depth--;
1416 			ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats);
1417 			kfree(ppdu_info);
1418 		}
1419 	}
1420 	spin_unlock_bh(&ar->data_lock);
1421 
1422 	ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_KERNEL);
1423 	if (!ppdu_info)
1424 		return NULL;
1425 
1426 	spin_lock_bh(&ar->data_lock);
1427 	list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);
1428 	ar->ppdu_stat_list_depth++;
1429 	spin_unlock_bh(&ar->data_lock);
1430 
1431 	return ppdu_info;
1432 }
1433 
1434 static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab,
1435 				      struct sk_buff *skb)
1436 {
1437 	struct ath11k_htt_ppdu_stats_msg *msg;
1438 	struct htt_ppdu_stats_info *ppdu_info;
1439 	struct ath11k *ar;
1440 	int ret;
1441 	u8 pdev_id;
1442 	u32 ppdu_id, len;
1443 
1444 	msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data;
1445 	len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info);
1446 	pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info);
1447 	ppdu_id = msg->ppdu_id;
1448 
1449 	rcu_read_lock();
1450 	ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
1451 	if (!ar) {
1452 		ret = -EINVAL;
1453 		goto exit;
1454 	}
1455 
1456 	if (ath11k_debug_is_pktlog_lite_mode_enabled(ar))
1457 		trace_ath11k_htt_ppdu_stats(ar, skb->data, len);
1458 
1459 	ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id);
1460 	if (!ppdu_info) {
1461 		ret = -EINVAL;
1462 		goto exit;
1463 	}
1464 
1465 	ppdu_info->ppdu_id = ppdu_id;
1466 	ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len,
1467 				     ath11k_htt_tlv_ppdu_stats_parse,
1468 				     (void *)ppdu_info);
1469 	if (ret) {
1470 		ath11k_warn(ab, "Failed to parse tlv %d\n", ret);
1471 		goto exit;
1472 	}
1473 
1474 exit:
1475 	rcu_read_unlock();
1476 
1477 	return ret;
1478 }
1479 
1480 static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb)
1481 {
1482 	struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data;
1483 	struct ath_pktlog_hdr *hdr = (struct ath_pktlog_hdr *)data;
1484 	struct ath11k *ar;
1485 	u8 pdev_id;
1486 
1487 	pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr);
1488 	ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
1489 	if (!ar) {
1490 		ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id);
1491 		return;
1492 	}
1493 
1494 	trace_ath11k_htt_pktlog(ar, data->payload, hdr->size);
1495 }
1496 
1497 static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab,
1498 						  struct sk_buff *skb)
1499 {
1500 	u32 *data = (u32 *)skb->data;
1501 	u8 pdev_id, ring_type, ring_id;
1502 	u16 hp, tp;
1503 	u32 backpressure_time;
1504 
1505 	pdev_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_PDEV_ID_M, *data);
1506 	ring_type = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_TYPE_M, *data);
1507 	ring_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_ID_M, *data);
1508 	++data;
1509 
1510 	hp = FIELD_GET(HTT_BACKPRESSURE_EVENT_HP_M, *data);
1511 	tp = FIELD_GET(HTT_BACKPRESSURE_EVENT_TP_M, *data);
1512 	++data;
1513 
1514 	backpressure_time = *data;
1515 
1516 	ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "htt backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n",
1517 		   pdev_id, ring_type, ring_id, hp, tp, backpressure_time);
1518 }
1519 
1520 void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
1521 				       struct sk_buff *skb)
1522 {
1523 	struct ath11k_dp *dp = &ab->dp;
1524 	struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;
1525 	enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp);
1526 	u16 peer_id;
1527 	u8 vdev_id;
1528 	u8 mac_addr[ETH_ALEN];
1529 	u16 peer_mac_h16;
1530 	u16 ast_hash;
1531 
1532 	ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
1533 
1534 	switch (type) {
1535 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
1536 		dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR,
1537 						  resp->version_msg.version);
1538 		dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR,
1539 						  resp->version_msg.version);
1540 		complete(&dp->htt_tgt_version_received);
1541 		break;
1542 	case HTT_T2H_MSG_TYPE_PEER_MAP:
1543 		vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
1544 				    resp->peer_map_ev.info);
1545 		peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
1546 				    resp->peer_map_ev.info);
1547 		peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
1548 					 resp->peer_map_ev.info1);
1549 		ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
1550 				       peer_mac_h16, mac_addr);
1551 		ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL,
1552 				     resp->peer_map_ev.info2);
1553 		ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash);
1554 		break;
1555 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
1556 		peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID,
1557 				    resp->peer_unmap_ev.info);
1558 		ath11k_peer_unmap_event(ab, peer_id);
1559 		break;
1560 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
1561 		ath11k_htt_pull_ppdu_stats(ab, skb);
1562 		break;
1563 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
1564 		ath11k_dbg_htt_ext_stats_handler(ab, skb);
1565 		break;
1566 	case HTT_T2H_MSG_TYPE_PKTLOG:
1567 		ath11k_htt_pktlog(ab, skb);
1568 		break;
1569 	case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
1570 		ath11k_htt_backpressure_event_handler(ab, skb);
1571 		break;
1572 	default:
1573 		ath11k_warn(ab, "htt event %d not handled\n", type);
1574 		break;
1575 	}
1576 
1577 	dev_kfree_skb_any(skb);
1578 }
1579 
1580 static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar,
1581 				      struct sk_buff_head *msdu_list,
1582 				      struct sk_buff *first, struct sk_buff *last,
1583 				      u8 l3pad_bytes, int msdu_len)
1584 {
1585 	struct sk_buff *skb;
1586 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
1587 	int buf_first_hdr_len, buf_first_len;
1588 	struct hal_rx_desc *ldesc;
1589 	int space_extra;
1590 	int rem_len;
1591 	int buf_len;
1592 
1593 	/* As the msdu is spread across multiple rx buffers,
1594 	 * find the offset to the start of msdu for computing
1595 	 * the length of the msdu in the first buffer.
1596 	 */
1597 	buf_first_hdr_len = HAL_RX_DESC_SIZE + l3pad_bytes;
1598 	buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;
1599 
1600 	if (WARN_ON_ONCE(msdu_len <= buf_first_len)) {
1601 		skb_put(first, buf_first_hdr_len + msdu_len);
1602 		skb_pull(first, buf_first_hdr_len);
1603 		return 0;
1604 	}
1605 
1606 	ldesc = (struct hal_rx_desc *)last->data;
1607 	rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ldesc);
1608 	rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ldesc);
1609 
1610 	/* MSDU spans over multiple buffers because the length of the MSDU
1611 	 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
1612 	 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
1613 	 */
1614 	skb_put(first, DP_RX_BUFFER_SIZE);
1615 	skb_pull(first, buf_first_hdr_len);
1616 
1617 	/* When an MSDU spread over multiple buffers attention, MSDU_END and
1618 	 * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs.
1619 	 */
1620 	ath11k_dp_rx_desc_end_tlv_copy(rxcb->rx_desc, ldesc);
1621 
1622 	space_extra = msdu_len - (buf_first_len + skb_tailroom(first));
1623 	if (space_extra > 0 &&
1624 	    (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) {
1625 		/* Free up all buffers of the MSDU */
1626 		while ((skb = __skb_dequeue(msdu_list)) != NULL) {
1627 			rxcb = ATH11K_SKB_RXCB(skb);
1628 			if (!rxcb->is_continuation) {
1629 				dev_kfree_skb_any(skb);
1630 				break;
1631 			}
1632 			dev_kfree_skb_any(skb);
1633 		}
1634 		return -ENOMEM;
1635 	}
1636 
1637 	rem_len = msdu_len - buf_first_len;
1638 	while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
1639 		rxcb = ATH11K_SKB_RXCB(skb);
1640 		if (rxcb->is_continuation)
1641 			buf_len = DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE;
1642 		else
1643 			buf_len = rem_len;
1644 
1645 		if (buf_len > (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE)) {
1646 			WARN_ON_ONCE(1);
1647 			dev_kfree_skb_any(skb);
1648 			return -EINVAL;
1649 		}
1650 
1651 		skb_put(skb, buf_len + HAL_RX_DESC_SIZE);
1652 		skb_pull(skb, HAL_RX_DESC_SIZE);
1653 		skb_copy_from_linear_data(skb, skb_put(first, buf_len),
1654 					  buf_len);
1655 		dev_kfree_skb_any(skb);
1656 
1657 		rem_len -= buf_len;
1658 		if (!rxcb->is_continuation)
1659 			break;
1660 	}
1661 
1662 	return 0;
1663 }
1664 
1665 static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,
1666 						      struct sk_buff *first)
1667 {
1668 	struct sk_buff *skb;
1669 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
1670 
1671 	if (!rxcb->is_continuation)
1672 		return first;
1673 
1674 	skb_queue_walk(msdu_list, skb) {
1675 		rxcb = ATH11K_SKB_RXCB(skb);
1676 		if (!rxcb->is_continuation)
1677 			return skb;
1678 	}
1679 
1680 	return NULL;
1681 }
1682 
1683 static void ath11k_dp_rx_h_csum_offload(struct sk_buff *msdu)
1684 {
1685 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1686 	bool ip_csum_fail, l4_csum_fail;
1687 
1688 	ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rxcb->rx_desc);
1689 	l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rxcb->rx_desc);
1690 
1691 	msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?
1692 			  CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
1693 }
1694 
1695 static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar,
1696 				       enum hal_encrypt_type enctype)
1697 {
1698 	switch (enctype) {
1699 	case HAL_ENCRYPT_TYPE_OPEN:
1700 	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1701 	case HAL_ENCRYPT_TYPE_TKIP_MIC:
1702 		return 0;
1703 	case HAL_ENCRYPT_TYPE_CCMP_128:
1704 		return IEEE80211_CCMP_MIC_LEN;
1705 	case HAL_ENCRYPT_TYPE_CCMP_256:
1706 		return IEEE80211_CCMP_256_MIC_LEN;
1707 	case HAL_ENCRYPT_TYPE_GCMP_128:
1708 	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1709 		return IEEE80211_GCMP_MIC_LEN;
1710 	case HAL_ENCRYPT_TYPE_WEP_40:
1711 	case HAL_ENCRYPT_TYPE_WEP_104:
1712 	case HAL_ENCRYPT_TYPE_WEP_128:
1713 	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1714 	case HAL_ENCRYPT_TYPE_WAPI:
1715 		break;
1716 	}
1717 
1718 	ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype);
1719 	return 0;
1720 }
1721 
1722 static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar,
1723 					 enum hal_encrypt_type enctype)
1724 {
1725 	switch (enctype) {
1726 	case HAL_ENCRYPT_TYPE_OPEN:
1727 		return 0;
1728 	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1729 	case HAL_ENCRYPT_TYPE_TKIP_MIC:
1730 		return IEEE80211_TKIP_IV_LEN;
1731 	case HAL_ENCRYPT_TYPE_CCMP_128:
1732 		return IEEE80211_CCMP_HDR_LEN;
1733 	case HAL_ENCRYPT_TYPE_CCMP_256:
1734 		return IEEE80211_CCMP_256_HDR_LEN;
1735 	case HAL_ENCRYPT_TYPE_GCMP_128:
1736 	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1737 		return IEEE80211_GCMP_HDR_LEN;
1738 	case HAL_ENCRYPT_TYPE_WEP_40:
1739 	case HAL_ENCRYPT_TYPE_WEP_104:
1740 	case HAL_ENCRYPT_TYPE_WEP_128:
1741 	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1742 	case HAL_ENCRYPT_TYPE_WAPI:
1743 		break;
1744 	}
1745 
1746 	ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1747 	return 0;
1748 }
1749 
1750 static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar,
1751 				       enum hal_encrypt_type enctype)
1752 {
1753 	switch (enctype) {
1754 	case HAL_ENCRYPT_TYPE_OPEN:
1755 	case HAL_ENCRYPT_TYPE_CCMP_128:
1756 	case HAL_ENCRYPT_TYPE_CCMP_256:
1757 	case HAL_ENCRYPT_TYPE_GCMP_128:
1758 	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1759 		return 0;
1760 	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1761 	case HAL_ENCRYPT_TYPE_TKIP_MIC:
1762 		return IEEE80211_TKIP_ICV_LEN;
1763 	case HAL_ENCRYPT_TYPE_WEP_40:
1764 	case HAL_ENCRYPT_TYPE_WEP_104:
1765 	case HAL_ENCRYPT_TYPE_WEP_128:
1766 	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1767 	case HAL_ENCRYPT_TYPE_WAPI:
1768 		break;
1769 	}
1770 
1771 	ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1772 	return 0;
1773 }
1774 
1775 static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar,
1776 					 struct sk_buff *msdu,
1777 					 u8 *first_hdr,
1778 					 enum hal_encrypt_type enctype,
1779 					 struct ieee80211_rx_status *status)
1780 {
1781 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1782 	u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN];
1783 	struct ieee80211_hdr *hdr;
1784 	size_t hdr_len;
1785 	u8 da[ETH_ALEN];
1786 	u8 sa[ETH_ALEN];
1787 	u16 qos_ctl = 0;
1788 	u8 *qos;
1789 
1790 	/* copy SA & DA and pull decapped header */
1791 	hdr = (struct ieee80211_hdr *)msdu->data;
1792 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1793 	ether_addr_copy(da, ieee80211_get_DA(hdr));
1794 	ether_addr_copy(sa, ieee80211_get_SA(hdr));
1795 	skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control));
1796 
1797 	if (rxcb->is_first_msdu) {
1798 		/* original 802.11 header is valid for the first msdu
1799 		 * hence we can reuse the same header
1800 		 */
1801 		hdr = (struct ieee80211_hdr *)first_hdr;
1802 		hdr_len = ieee80211_hdrlen(hdr->frame_control);
1803 
1804 		/* Each A-MSDU subframe will be reported as a separate MSDU,
1805 		 * so strip the A-MSDU bit from QoS Ctl.
1806 		 */
1807 		if (ieee80211_is_data_qos(hdr->frame_control)) {
1808 			qos = ieee80211_get_qos_ctl(hdr);
1809 			qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1810 		}
1811 	} else {
1812 		/*  Rebuild qos header if this is a middle/last msdu */
1813 		hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1814 
1815 		/* Reset the order bit as the HT_Control header is stripped */
1816 		hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER));
1817 
1818 		qos_ctl = rxcb->tid;
1819 
1820 		if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(rxcb->rx_desc))
1821 			qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
1822 
1823 		/* TODO Add other QoS ctl fields when required */
1824 
1825 		/* copy decap header before overwriting for reuse below */
1826 		memcpy(decap_hdr, (uint8_t *)hdr, hdr_len);
1827 	}
1828 
1829 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1830 		memcpy(skb_push(msdu,
1831 				ath11k_dp_rx_crypto_param_len(ar, enctype)),
1832 		       (void *)hdr + hdr_len,
1833 		       ath11k_dp_rx_crypto_param_len(ar, enctype));
1834 	}
1835 
1836 	if (!rxcb->is_first_msdu) {
1837 		memcpy(skb_push(msdu,
1838 				IEEE80211_QOS_CTL_LEN), &qos_ctl,
1839 				IEEE80211_QOS_CTL_LEN);
1840 		memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len);
1841 		return;
1842 	}
1843 
1844 	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1845 
1846 	/* original 802.11 header has a different DA and in
1847 	 * case of 4addr it may also have different SA
1848 	 */
1849 	hdr = (struct ieee80211_hdr *)msdu->data;
1850 	ether_addr_copy(ieee80211_get_DA(hdr), da);
1851 	ether_addr_copy(ieee80211_get_SA(hdr), sa);
1852 }
1853 
1854 static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu,
1855 				       enum hal_encrypt_type enctype,
1856 				       struct ieee80211_rx_status *status,
1857 				       bool decrypted)
1858 {
1859 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1860 	struct ieee80211_hdr *hdr;
1861 	size_t hdr_len;
1862 	size_t crypto_len;
1863 
1864 	if (!rxcb->is_first_msdu ||
1865 	    !(rxcb->is_first_msdu && rxcb->is_last_msdu)) {
1866 		WARN_ON_ONCE(1);
1867 		return;
1868 	}
1869 
1870 	skb_trim(msdu, msdu->len - FCS_LEN);
1871 
1872 	if (!decrypted)
1873 		return;
1874 
1875 	hdr = (void *)msdu->data;
1876 
1877 	/* Tail */
1878 	if (status->flag & RX_FLAG_IV_STRIPPED) {
1879 		skb_trim(msdu, msdu->len -
1880 			 ath11k_dp_rx_crypto_mic_len(ar, enctype));
1881 
1882 		skb_trim(msdu, msdu->len -
1883 			 ath11k_dp_rx_crypto_icv_len(ar, enctype));
1884 	} else {
1885 		/* MIC */
1886 		if (status->flag & RX_FLAG_MIC_STRIPPED)
1887 			skb_trim(msdu, msdu->len -
1888 				 ath11k_dp_rx_crypto_mic_len(ar, enctype));
1889 
1890 		/* ICV */
1891 		if (status->flag & RX_FLAG_ICV_STRIPPED)
1892 			skb_trim(msdu, msdu->len -
1893 				 ath11k_dp_rx_crypto_icv_len(ar, enctype));
1894 	}
1895 
1896 	/* MMIC */
1897 	if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
1898 	    !ieee80211_has_morefrags(hdr->frame_control) &&
1899 	    enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
1900 		skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
1901 
1902 	/* Head */
1903 	if (status->flag & RX_FLAG_IV_STRIPPED) {
1904 		hdr_len = ieee80211_hdrlen(hdr->frame_control);
1905 		crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
1906 
1907 		memmove((void *)msdu->data + crypto_len,
1908 			(void *)msdu->data, hdr_len);
1909 		skb_pull(msdu, crypto_len);
1910 	}
1911 }
1912 
1913 static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar,
1914 					 struct sk_buff *msdu,
1915 					 enum hal_encrypt_type enctype)
1916 {
1917 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1918 	struct ieee80211_hdr *hdr;
1919 	size_t hdr_len, crypto_len;
1920 	void *rfc1042;
1921 	bool is_amsdu;
1922 
1923 	is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu);
1924 	hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(rxcb->rx_desc);
1925 	rfc1042 = hdr;
1926 
1927 	if (rxcb->is_first_msdu) {
1928 		hdr_len = ieee80211_hdrlen(hdr->frame_control);
1929 		crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
1930 
1931 		rfc1042 += hdr_len + crypto_len;
1932 	}
1933 
1934 	if (is_amsdu)
1935 		rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr);
1936 
1937 	return rfc1042;
1938 }
1939 
1940 static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar,
1941 				       struct sk_buff *msdu,
1942 				       u8 *first_hdr,
1943 				       enum hal_encrypt_type enctype,
1944 				       struct ieee80211_rx_status *status)
1945 {
1946 	struct ieee80211_hdr *hdr;
1947 	struct ethhdr *eth;
1948 	size_t hdr_len;
1949 	u8 da[ETH_ALEN];
1950 	u8 sa[ETH_ALEN];
1951 	void *rfc1042;
1952 
1953 	rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype);
1954 	if (WARN_ON_ONCE(!rfc1042))
1955 		return;
1956 
1957 	/* pull decapped header and copy SA & DA */
1958 	eth = (struct ethhdr *)msdu->data;
1959 	ether_addr_copy(da, eth->h_dest);
1960 	ether_addr_copy(sa, eth->h_source);
1961 	skb_pull(msdu, sizeof(struct ethhdr));
1962 
1963 	/* push rfc1042/llc/snap */
1964 	memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042,
1965 	       sizeof(struct ath11k_dp_rfc1042_hdr));
1966 
1967 	/* push original 802.11 header */
1968 	hdr = (struct ieee80211_hdr *)first_hdr;
1969 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1970 
1971 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1972 		memcpy(skb_push(msdu,
1973 				ath11k_dp_rx_crypto_param_len(ar, enctype)),
1974 		       (void *)hdr + hdr_len,
1975 		       ath11k_dp_rx_crypto_param_len(ar, enctype));
1976 	}
1977 
1978 	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1979 
1980 	/* original 802.11 header has a different DA and in
1981 	 * case of 4addr it may also have different SA
1982 	 */
1983 	hdr = (struct ieee80211_hdr *)msdu->data;
1984 	ether_addr_copy(ieee80211_get_DA(hdr), da);
1985 	ether_addr_copy(ieee80211_get_SA(hdr), sa);
1986 }
1987 
1988 static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
1989 				   struct hal_rx_desc *rx_desc,
1990 				   enum hal_encrypt_type enctype,
1991 				   struct ieee80211_rx_status *status,
1992 				   bool decrypted)
1993 {
1994 	u8 *first_hdr;
1995 	u8 decap;
1996 
1997 	first_hdr = ath11k_dp_rx_h_80211_hdr(rx_desc);
1998 	decap = ath11k_dp_rx_h_msdu_start_decap_type(rx_desc);
1999 
2000 	switch (decap) {
2001 	case DP_RX_DECAP_TYPE_NATIVE_WIFI:
2002 		ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr,
2003 					     enctype, status);
2004 		break;
2005 	case DP_RX_DECAP_TYPE_RAW:
2006 		ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status,
2007 					   decrypted);
2008 		break;
2009 	case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
2010 		/* TODO undecap support for middle/last msdu's of amsdu */
2011 		ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
2012 					   enctype, status);
2013 		break;
2014 	case DP_RX_DECAP_TYPE_8023:
2015 		/* TODO: Handle undecap for these formats */
2016 		break;
2017 	}
2018 }
2019 
2020 static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
2021 				struct sk_buff *msdu,
2022 				struct hal_rx_desc *rx_desc,
2023 				struct ieee80211_rx_status *rx_status)
2024 {
2025 	bool  fill_crypto_hdr, mcast;
2026 	enum hal_encrypt_type enctype;
2027 	bool is_decrypted = false;
2028 	struct ieee80211_hdr *hdr;
2029 	struct ath11k_peer *peer;
2030 	u32 err_bitmap;
2031 
2032 	hdr = (struct ieee80211_hdr *)msdu->data;
2033 
2034 	/* PN for multicast packets will be checked in mac80211 */
2035 
2036 	mcast = is_multicast_ether_addr(hdr->addr1);
2037 	fill_crypto_hdr = mcast;
2038 
2039 	is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc);
2040 
2041 	spin_lock_bh(&ar->ab->base_lock);
2042 	peer = ath11k_peer_find_by_addr(ar->ab, hdr->addr2);
2043 	if (peer) {
2044 		if (mcast)
2045 			enctype = peer->sec_type_grp;
2046 		else
2047 			enctype = peer->sec_type;
2048 	} else {
2049 		enctype = HAL_ENCRYPT_TYPE_OPEN;
2050 	}
2051 	spin_unlock_bh(&ar->ab->base_lock);
2052 
2053 	err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_desc);
2054 
2055 	/* Clear per-MPDU flags while leaving per-PPDU flags intact */
2056 	rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
2057 			     RX_FLAG_MMIC_ERROR |
2058 			     RX_FLAG_DECRYPTED |
2059 			     RX_FLAG_IV_STRIPPED |
2060 			     RX_FLAG_MMIC_STRIPPED);
2061 
2062 	if (err_bitmap & DP_RX_MPDU_ERR_FCS)
2063 		rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
2064 	if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC)
2065 		rx_status->flag |= RX_FLAG_MMIC_ERROR;
2066 
2067 	if (is_decrypted) {
2068 		rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
2069 
2070 		if (fill_crypto_hdr)
2071 			rx_status->flag |= RX_FLAG_MIC_STRIPPED |
2072 					RX_FLAG_ICV_STRIPPED;
2073 		else
2074 			rx_status->flag |= RX_FLAG_IV_STRIPPED |
2075 					   RX_FLAG_PN_VALIDATED;
2076 	}
2077 
2078 	ath11k_dp_rx_h_csum_offload(msdu);
2079 	ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
2080 			       enctype, rx_status, is_decrypted);
2081 
2082 	if (!is_decrypted || fill_crypto_hdr)
2083 		return;
2084 
2085 	hdr = (void *)msdu->data;
2086 	hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2087 }
2088 
2089 static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
2090 				struct ieee80211_rx_status *rx_status)
2091 {
2092 	struct ieee80211_supported_band *sband;
2093 	enum rx_msdu_start_pkt_type pkt_type;
2094 	u8 bw;
2095 	u8 rate_mcs, nss;
2096 	u8 sgi;
2097 	bool is_cck;
2098 
2099 	pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(rx_desc);
2100 	bw = ath11k_dp_rx_h_msdu_start_rx_bw(rx_desc);
2101 	rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(rx_desc);
2102 	nss = ath11k_dp_rx_h_msdu_start_nss(rx_desc);
2103 	sgi = ath11k_dp_rx_h_msdu_start_sgi(rx_desc);
2104 
2105 	switch (pkt_type) {
2106 	case RX_MSDU_START_PKT_TYPE_11A:
2107 	case RX_MSDU_START_PKT_TYPE_11B:
2108 		is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
2109 		sband = &ar->mac.sbands[rx_status->band];
2110 		rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs,
2111 								is_cck);
2112 		break;
2113 	case RX_MSDU_START_PKT_TYPE_11N:
2114 		rx_status->encoding = RX_ENC_HT;
2115 		if (rate_mcs > ATH11K_HT_MCS_MAX) {
2116 			ath11k_warn(ar->ab,
2117 				    "Received with invalid mcs in HT mode %d\n",
2118 				     rate_mcs);
2119 			break;
2120 		}
2121 		rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
2122 		if (sgi)
2123 			rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2124 		rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2125 		break;
2126 	case RX_MSDU_START_PKT_TYPE_11AC:
2127 		rx_status->encoding = RX_ENC_VHT;
2128 		rx_status->rate_idx = rate_mcs;
2129 		if (rate_mcs > ATH11K_VHT_MCS_MAX) {
2130 			ath11k_warn(ar->ab,
2131 				    "Received with invalid mcs in VHT mode %d\n",
2132 				     rate_mcs);
2133 			break;
2134 		}
2135 		rx_status->nss = nss;
2136 		if (sgi)
2137 			rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2138 		rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2139 		break;
2140 	case RX_MSDU_START_PKT_TYPE_11AX:
2141 		rx_status->rate_idx = rate_mcs;
2142 		if (rate_mcs > ATH11K_HE_MCS_MAX) {
2143 			ath11k_warn(ar->ab,
2144 				    "Received with invalid mcs in HE mode %d\n",
2145 				    rate_mcs);
2146 			break;
2147 		}
2148 		rx_status->encoding = RX_ENC_HE;
2149 		rx_status->nss = nss;
2150 		rx_status->he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi);
2151 		rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2152 		break;
2153 	}
2154 }
2155 
2156 static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
2157 				struct ieee80211_rx_status *rx_status)
2158 {
2159 	u8 channel_num;
2160 
2161 	rx_status->freq = 0;
2162 	rx_status->rate_idx = 0;
2163 	rx_status->nss = 0;
2164 	rx_status->encoding = RX_ENC_LEGACY;
2165 	rx_status->bw = RATE_INFO_BW_20;
2166 
2167 	rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2168 
2169 	channel_num = ath11k_dp_rx_h_msdu_start_freq(rx_desc);
2170 
2171 	if (channel_num >= 1 && channel_num <= 14) {
2172 		rx_status->band = NL80211_BAND_2GHZ;
2173 	} else if (channel_num >= 36 && channel_num <= 173) {
2174 		rx_status->band = NL80211_BAND_5GHZ;
2175 	} else {
2176 		spin_lock_bh(&ar->data_lock);
2177 		rx_status->band = ar->rx_channel->band;
2178 		channel_num =
2179 			ieee80211_frequency_to_channel(ar->rx_channel->center_freq);
2180 		spin_unlock_bh(&ar->data_lock);
2181 		ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ",
2182 				rx_desc, sizeof(struct hal_rx_desc));
2183 	}
2184 
2185 	rx_status->freq = ieee80211_channel_to_frequency(channel_num,
2186 							 rx_status->band);
2187 
2188 	ath11k_dp_rx_h_rate(ar, rx_desc, rx_status);
2189 }
2190 
2191 static char *ath11k_print_get_tid(struct ieee80211_hdr *hdr, char *out,
2192 				  size_t size)
2193 {
2194 	u8 *qc;
2195 	int tid;
2196 
2197 	if (!ieee80211_is_data_qos(hdr->frame_control))
2198 		return "";
2199 
2200 	qc = ieee80211_get_qos_ctl(hdr);
2201 	tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
2202 	snprintf(out, size, "tid %d", tid);
2203 
2204 	return out;
2205 }
2206 
2207 static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi,
2208 				      struct sk_buff *msdu)
2209 {
2210 	static const struct ieee80211_radiotap_he known = {
2211 		.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
2212 				     IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
2213 		.data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
2214 	};
2215 	struct ieee80211_rx_status *status;
2216 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
2217 	struct ieee80211_radiotap_he *he = NULL;
2218 	char tid[32];
2219 
2220 	status = IEEE80211_SKB_RXCB(msdu);
2221 	if (status->encoding == RX_ENC_HE) {
2222 		he = skb_push(msdu, sizeof(known));
2223 		memcpy(he, &known, sizeof(known));
2224 		status->flag |= RX_FLAG_RADIOTAP_HE;
2225 	}
2226 
2227 	ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
2228 		   "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
2229 		   msdu,
2230 		   msdu->len,
2231 		   ieee80211_get_SA(hdr),
2232 		   ath11k_print_get_tid(hdr, tid, sizeof(tid)),
2233 		   is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
2234 							"mcast" : "ucast",
2235 		   (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
2236 		   (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
2237 		   (status->encoding == RX_ENC_HT) ? "ht" : "",
2238 		   (status->encoding == RX_ENC_VHT) ? "vht" : "",
2239 		   (status->encoding == RX_ENC_HE) ? "he" : "",
2240 		   (status->bw == RATE_INFO_BW_40) ? "40" : "",
2241 		   (status->bw == RATE_INFO_BW_80) ? "80" : "",
2242 		   (status->bw == RATE_INFO_BW_160) ? "160" : "",
2243 		   status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
2244 		   status->rate_idx,
2245 		   status->nss,
2246 		   status->freq,
2247 		   status->band, status->flag,
2248 		   !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
2249 		   !!(status->flag & RX_FLAG_MMIC_ERROR),
2250 		   !!(status->flag & RX_FLAG_AMSDU_MORE));
2251 
2252 	/* TODO: trace rx packet */
2253 
2254 	ieee80211_rx_napi(ar->hw, NULL, msdu, napi);
2255 }
2256 
2257 static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
2258 				     struct sk_buff *msdu,
2259 				     struct sk_buff_head *msdu_list)
2260 {
2261 	struct hal_rx_desc *rx_desc, *lrx_desc;
2262 	struct ieee80211_rx_status rx_status = {0};
2263 	struct ieee80211_rx_status *status;
2264 	struct ath11k_skb_rxcb *rxcb;
2265 	struct ieee80211_hdr *hdr;
2266 	struct sk_buff *last_buf;
2267 	u8 l3_pad_bytes;
2268 	u16 msdu_len;
2269 	int ret;
2270 
2271 	last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
2272 	if (!last_buf) {
2273 		ath11k_warn(ar->ab,
2274 			    "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n");
2275 		ret = -EIO;
2276 		goto free_out;
2277 	}
2278 
2279 	rx_desc = (struct hal_rx_desc *)msdu->data;
2280 	lrx_desc = (struct hal_rx_desc *)last_buf->data;
2281 	if (!ath11k_dp_rx_h_attn_msdu_done(lrx_desc)) {
2282 		ath11k_warn(ar->ab, "msdu_done bit in attention is not set\n");
2283 		ret = -EIO;
2284 		goto free_out;
2285 	}
2286 
2287 	rxcb = ATH11K_SKB_RXCB(msdu);
2288 	rxcb->rx_desc = rx_desc;
2289 	msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc);
2290 	l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(lrx_desc);
2291 
2292 	if (rxcb->is_frag) {
2293 		skb_pull(msdu, HAL_RX_DESC_SIZE);
2294 	} else if (!rxcb->is_continuation) {
2295 		if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) {
2296 			ret = -EINVAL;
2297 			ath11k_warn(ar->ab, "invalid msdu len %u\n", msdu_len);
2298 			goto free_out;
2299 		}
2300 		skb_put(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes + msdu_len);
2301 		skb_pull(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes);
2302 	} else {
2303 		ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list,
2304 						 msdu, last_buf,
2305 						 l3_pad_bytes, msdu_len);
2306 		if (ret) {
2307 			ath11k_warn(ar->ab,
2308 				    "failed to coalesce msdu rx buffer%d\n", ret);
2309 			goto free_out;
2310 		}
2311 	}
2312 
2313 	hdr = (struct ieee80211_hdr *)msdu->data;
2314 
2315 	/* Process only data frames */
2316 	if (!ieee80211_is_data(hdr->frame_control))
2317 		return -EINVAL;
2318 
2319 	ath11k_dp_rx_h_ppdu(ar, rx_desc, &rx_status);
2320 	ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, &rx_status);
2321 
2322 	rx_status.flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
2323 
2324 	status = IEEE80211_SKB_RXCB(msdu);
2325 	*status = rx_status;
2326 	return 0;
2327 
2328 free_out:
2329 	return ret;
2330 }
2331 
2332 static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab,
2333 						  struct napi_struct *napi,
2334 						  struct sk_buff_head *msdu_list,
2335 						  int *quota, int ring_id)
2336 {
2337 	struct ath11k_skb_rxcb *rxcb;
2338 	struct sk_buff *msdu;
2339 	struct ath11k *ar;
2340 	u8 mac_id;
2341 	int ret;
2342 
2343 	if (skb_queue_empty(msdu_list))
2344 		return;
2345 
2346 	rcu_read_lock();
2347 
2348 	while (*quota && (msdu = __skb_dequeue(msdu_list))) {
2349 		rxcb = ATH11K_SKB_RXCB(msdu);
2350 		mac_id = rxcb->mac_id;
2351 		ar = ab->pdevs[mac_id].ar;
2352 		if (!rcu_dereference(ab->pdevs_active[mac_id])) {
2353 			dev_kfree_skb_any(msdu);
2354 			continue;
2355 		}
2356 
2357 		if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
2358 			dev_kfree_skb_any(msdu);
2359 			continue;
2360 		}
2361 
2362 		ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list);
2363 		if (ret) {
2364 			ath11k_dbg(ab, ATH11K_DBG_DATA,
2365 				   "Unable to process msdu %d", ret);
2366 			dev_kfree_skb_any(msdu);
2367 			continue;
2368 		}
2369 
2370 		ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
2371 		(*quota)--;
2372 	}
2373 
2374 	rcu_read_unlock();
2375 }
2376 
2377 int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
2378 			 struct napi_struct *napi, int budget)
2379 {
2380 	struct ath11k_dp *dp = &ab->dp;
2381 	struct dp_rxdma_ring *rx_ring;
2382 	int num_buffs_reaped[MAX_RADIOS] = {0};
2383 	struct sk_buff_head msdu_list;
2384 	struct ath11k_skb_rxcb *rxcb;
2385 	int total_msdu_reaped = 0;
2386 	struct hal_srng *srng;
2387 	struct sk_buff *msdu;
2388 	int quota = budget;
2389 	bool done = false;
2390 	int buf_id, mac_id;
2391 	struct ath11k *ar;
2392 	u32 *rx_desc;
2393 	int i;
2394 
2395 	__skb_queue_head_init(&msdu_list);
2396 
2397 	srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
2398 
2399 	spin_lock_bh(&srng->lock);
2400 
2401 	ath11k_hal_srng_access_begin(ab, srng);
2402 
2403 try_again:
2404 	while ((rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
2405 		struct hal_reo_dest_ring *desc = (struct hal_reo_dest_ring *)rx_desc;
2406 		enum hal_reo_dest_ring_push_reason push_reason;
2407 		u32 cookie;
2408 
2409 		cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
2410 				   desc->buf_addr_info.info1);
2411 		buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
2412 				   cookie);
2413 		mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);
2414 
2415 		ar = ab->pdevs[mac_id].ar;
2416 		rx_ring = &ar->dp.rx_refill_buf_ring;
2417 		spin_lock_bh(&rx_ring->idr_lock);
2418 		msdu = idr_find(&rx_ring->bufs_idr, buf_id);
2419 		if (!msdu) {
2420 			ath11k_warn(ab, "frame rx with invalid buf_id %d\n",
2421 				    buf_id);
2422 			spin_unlock_bh(&rx_ring->idr_lock);
2423 			continue;
2424 		}
2425 
2426 		idr_remove(&rx_ring->bufs_idr, buf_id);
2427 		spin_unlock_bh(&rx_ring->idr_lock);
2428 
2429 		rxcb = ATH11K_SKB_RXCB(msdu);
2430 		dma_unmap_single(ab->dev, rxcb->paddr,
2431 				 msdu->len + skb_tailroom(msdu),
2432 				 DMA_FROM_DEVICE);
2433 
2434 		num_buffs_reaped[mac_id]++;
2435 		total_msdu_reaped++;
2436 
2437 		push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
2438 					desc->info0);
2439 		if (push_reason !=
2440 		    HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
2441 			dev_kfree_skb_any(msdu);
2442 			ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++;
2443 			continue;
2444 		}
2445 
2446 		rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 &
2447 					 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
2448 		rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 &
2449 					RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
2450 		rxcb->is_continuation = !!(desc->rx_msdu_info.info0 &
2451 					   RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
2452 		rxcb->mac_id = mac_id;
2453 		rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
2454 				      desc->info0);
2455 
2456 		__skb_queue_tail(&msdu_list, msdu);
2457 
2458 		if (total_msdu_reaped >= quota && !rxcb->is_continuation) {
2459 			done = true;
2460 			break;
2461 		}
2462 	}
2463 
2464 	/* Hw might have updated the head pointer after we cached it.
2465 	 * In this case, even though there are entries in the ring we'll
2466 	 * get rx_desc NULL. Give the read another try with updated cached
2467 	 * head pointer so that we can reap complete MPDU in the current
2468 	 * rx processing.
2469 	 */
2470 	if (!done && ath11k_hal_srng_dst_num_free(ab, srng, true)) {
2471 		ath11k_hal_srng_access_end(ab, srng);
2472 		goto try_again;
2473 	}
2474 
2475 	ath11k_hal_srng_access_end(ab, srng);
2476 
2477 	spin_unlock_bh(&srng->lock);
2478 
2479 	if (!total_msdu_reaped)
2480 		goto exit;
2481 
2482 	for (i = 0; i < ab->num_radios; i++) {
2483 		if (!num_buffs_reaped[i])
2484 			continue;
2485 
2486 		ar = ab->pdevs[i].ar;
2487 		rx_ring = &ar->dp.rx_refill_buf_ring;
2488 
2489 		ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
2490 					   HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
2491 	}
2492 
2493 	ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list,
2494 					      &quota, ring_id);
2495 
2496 exit:
2497 	return budget - quota;
2498 }
2499 
2500 static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
2501 					   struct hal_rx_mon_ppdu_info *ppdu_info)
2502 {
2503 	struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats;
2504 	u32 num_msdu;
2505 
2506 	if (!rx_stats)
2507 		return;
2508 
2509 	num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count +
2510 		   ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count;
2511 
2512 	rx_stats->num_msdu += num_msdu;
2513 	rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count +
2514 				    ppdu_info->tcp_ack_msdu_count;
2515 	rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count;
2516 	rx_stats->other_msdu_count += ppdu_info->other_msdu_count;
2517 
2518 	if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A ||
2519 	    ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) {
2520 		ppdu_info->nss = 1;
2521 		ppdu_info->mcs = HAL_RX_MAX_MCS;
2522 		ppdu_info->tid = IEEE80211_NUM_TIDS;
2523 	}
2524 
2525 	if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS)
2526 		rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu;
2527 
2528 	if (ppdu_info->mcs <= HAL_RX_MAX_MCS)
2529 		rx_stats->mcs_count[ppdu_info->mcs] += num_msdu;
2530 
2531 	if (ppdu_info->gi < HAL_RX_GI_MAX)
2532 		rx_stats->gi_count[ppdu_info->gi] += num_msdu;
2533 
2534 	if (ppdu_info->bw < HAL_RX_BW_MAX)
2535 		rx_stats->bw_count[ppdu_info->bw] += num_msdu;
2536 
2537 	if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX)
2538 		rx_stats->coding_count[ppdu_info->ldpc] += num_msdu;
2539 
2540 	if (ppdu_info->tid <= IEEE80211_NUM_TIDS)
2541 		rx_stats->tid_count[ppdu_info->tid] += num_msdu;
2542 
2543 	if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX)
2544 		rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu;
2545 
2546 	if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX)
2547 		rx_stats->reception_type[ppdu_info->reception_type] += num_msdu;
2548 
2549 	if (ppdu_info->is_stbc)
2550 		rx_stats->stbc_count += num_msdu;
2551 
2552 	if (ppdu_info->beamformed)
2553 		rx_stats->beamformed_count += num_msdu;
2554 
2555 	if (ppdu_info->num_mpdu_fcs_ok > 1)
2556 		rx_stats->ampdu_msdu_count += num_msdu;
2557 	else
2558 		rx_stats->non_ampdu_msdu_count += num_msdu;
2559 
2560 	rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok;
2561 	rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err;
2562 	rx_stats->dcm_count += ppdu_info->dcm;
2563 	rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu;
2564 
2565 	arsta->rssi_comb = ppdu_info->rssi_comb;
2566 	rx_stats->rx_duration += ppdu_info->rx_duration;
2567 	arsta->rx_duration = rx_stats->rx_duration;
2568 }
2569 
2570 static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab,
2571 							 struct dp_rxdma_ring *rx_ring,
2572 							 int *buf_id, gfp_t gfp)
2573 {
2574 	struct sk_buff *skb;
2575 	dma_addr_t paddr;
2576 
2577 	skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
2578 			    DP_RX_BUFFER_ALIGN_SIZE);
2579 
2580 	if (!skb)
2581 		goto fail_alloc_skb;
2582 
2583 	if (!IS_ALIGNED((unsigned long)skb->data,
2584 			DP_RX_BUFFER_ALIGN_SIZE)) {
2585 		skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
2586 			 skb->data);
2587 	}
2588 
2589 	paddr = dma_map_single(ab->dev, skb->data,
2590 			       skb->len + skb_tailroom(skb),
2591 			       DMA_BIDIRECTIONAL);
2592 	if (unlikely(dma_mapping_error(ab->dev, paddr)))
2593 		goto fail_free_skb;
2594 
2595 	spin_lock_bh(&rx_ring->idr_lock);
2596 	*buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
2597 			    rx_ring->bufs_max, gfp);
2598 	spin_unlock_bh(&rx_ring->idr_lock);
2599 	if (*buf_id < 0)
2600 		goto fail_dma_unmap;
2601 
2602 	ATH11K_SKB_RXCB(skb)->paddr = paddr;
2603 	return skb;
2604 
2605 fail_dma_unmap:
2606 	dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
2607 			 DMA_BIDIRECTIONAL);
2608 fail_free_skb:
2609 	dev_kfree_skb_any(skb);
2610 fail_alloc_skb:
2611 	return NULL;
2612 }
2613 
2614 int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
2615 					   struct dp_rxdma_ring *rx_ring,
2616 					   int req_entries,
2617 					   enum hal_rx_buf_return_buf_manager mgr,
2618 					   gfp_t gfp)
2619 {
2620 	struct hal_srng *srng;
2621 	u32 *desc;
2622 	struct sk_buff *skb;
2623 	int num_free;
2624 	int num_remain;
2625 	int buf_id;
2626 	u32 cookie;
2627 	dma_addr_t paddr;
2628 
2629 	req_entries = min(req_entries, rx_ring->bufs_max);
2630 
2631 	srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
2632 
2633 	spin_lock_bh(&srng->lock);
2634 
2635 	ath11k_hal_srng_access_begin(ab, srng);
2636 
2637 	num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
2638 
2639 	req_entries = min(num_free, req_entries);
2640 	num_remain = req_entries;
2641 
2642 	while (num_remain > 0) {
2643 		skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
2644 							&buf_id, gfp);
2645 		if (!skb)
2646 			break;
2647 		paddr = ATH11K_SKB_RXCB(skb)->paddr;
2648 
2649 		desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
2650 		if (!desc)
2651 			goto fail_desc_get;
2652 
2653 		cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
2654 			 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
2655 
2656 		num_remain--;
2657 
2658 		ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
2659 	}
2660 
2661 	ath11k_hal_srng_access_end(ab, srng);
2662 
2663 	spin_unlock_bh(&srng->lock);
2664 
2665 	return req_entries - num_remain;
2666 
2667 fail_desc_get:
2668 	spin_lock_bh(&rx_ring->idr_lock);
2669 	idr_remove(&rx_ring->bufs_idr, buf_id);
2670 	spin_unlock_bh(&rx_ring->idr_lock);
2671 	dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
2672 			 DMA_BIDIRECTIONAL);
2673 	dev_kfree_skb_any(skb);
2674 	ath11k_hal_srng_access_end(ab, srng);
2675 	spin_unlock_bh(&srng->lock);
2676 
2677 	return req_entries - num_remain;
2678 }
2679 
2680 static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
2681 					     int *budget, struct sk_buff_head *skb_list)
2682 {
2683 	struct ath11k *ar = ab->pdevs[mac_id].ar;
2684 	struct ath11k_pdev_dp *dp = &ar->dp;
2685 	struct dp_rxdma_ring *rx_ring = &dp->rx_mon_status_refill_ring;
2686 	struct hal_srng *srng;
2687 	void *rx_mon_status_desc;
2688 	struct sk_buff *skb;
2689 	struct ath11k_skb_rxcb *rxcb;
2690 	struct hal_tlv_hdr *tlv;
2691 	u32 cookie;
2692 	int buf_id;
2693 	dma_addr_t paddr;
2694 	u8 rbm;
2695 	int num_buffs_reaped = 0;
2696 
2697 	srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
2698 
2699 	spin_lock_bh(&srng->lock);
2700 
2701 	ath11k_hal_srng_access_begin(ab, srng);
2702 	while (*budget) {
2703 		*budget -= 1;
2704 		rx_mon_status_desc =
2705 			ath11k_hal_srng_src_peek(ab, srng);
2706 		if (!rx_mon_status_desc)
2707 			break;
2708 
2709 		ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr,
2710 						&cookie, &rbm);
2711 		if (paddr) {
2712 			buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
2713 
2714 			spin_lock_bh(&rx_ring->idr_lock);
2715 			skb = idr_find(&rx_ring->bufs_idr, buf_id);
2716 			if (!skb) {
2717 				ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n",
2718 					    buf_id);
2719 				spin_unlock_bh(&rx_ring->idr_lock);
2720 				continue;
2721 			}
2722 
2723 			idr_remove(&rx_ring->bufs_idr, buf_id);
2724 			spin_unlock_bh(&rx_ring->idr_lock);
2725 
2726 			rxcb = ATH11K_SKB_RXCB(skb);
2727 
2728 			dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
2729 						skb->len + skb_tailroom(skb),
2730 						DMA_FROM_DEVICE);
2731 
2732 			dma_unmap_single(ab->dev, rxcb->paddr,
2733 					 skb->len + skb_tailroom(skb),
2734 					 DMA_BIDIRECTIONAL);
2735 
2736 			tlv = (struct hal_tlv_hdr *)skb->data;
2737 			if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) !=
2738 					HAL_RX_STATUS_BUFFER_DONE) {
2739 				ath11k_hal_srng_src_get_next_entry(ab, srng);
2740 				continue;
2741 			}
2742 
2743 			__skb_queue_tail(skb_list, skb);
2744 		}
2745 
2746 		skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
2747 							&buf_id, GFP_ATOMIC);
2748 
2749 		if (!skb) {
2750 			ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
2751 							HAL_RX_BUF_RBM_SW3_BM);
2752 			num_buffs_reaped++;
2753 			break;
2754 		}
2755 		rxcb = ATH11K_SKB_RXCB(skb);
2756 
2757 		cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
2758 			 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
2759 
2760 		ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr,
2761 						cookie, HAL_RX_BUF_RBM_SW3_BM);
2762 		ath11k_hal_srng_src_get_next_entry(ab, srng);
2763 		num_buffs_reaped++;
2764 	}
2765 	ath11k_hal_srng_access_end(ab, srng);
2766 	spin_unlock_bh(&srng->lock);
2767 
2768 	return num_buffs_reaped;
2769 }
2770 
2771 int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
2772 				    struct napi_struct *napi, int budget)
2773 {
2774 	struct ath11k *ar = ab->pdevs[mac_id].ar;
2775 	enum hal_rx_mon_status hal_status;
2776 	struct sk_buff *skb;
2777 	struct sk_buff_head skb_list;
2778 	struct hal_rx_mon_ppdu_info ppdu_info;
2779 	struct ath11k_peer *peer;
2780 	struct ath11k_sta *arsta;
2781 	int num_buffs_reaped = 0;
2782 
2783 	__skb_queue_head_init(&skb_list);
2784 
2785 	num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget,
2786 							     &skb_list);
2787 	if (!num_buffs_reaped)
2788 		goto exit;
2789 
2790 	while ((skb = __skb_dequeue(&skb_list))) {
2791 		memset(&ppdu_info, 0, sizeof(ppdu_info));
2792 		ppdu_info.peer_id = HAL_INVALID_PEERID;
2793 
2794 		if (ath11k_debug_is_pktlog_rx_stats_enabled(ar))
2795 			trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE);
2796 
2797 		hal_status = ath11k_hal_rx_parse_mon_status(ab, &ppdu_info, skb);
2798 
2799 		if (ppdu_info.peer_id == HAL_INVALID_PEERID ||
2800 		    hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
2801 			dev_kfree_skb_any(skb);
2802 			continue;
2803 		}
2804 
2805 		rcu_read_lock();
2806 		spin_lock_bh(&ab->base_lock);
2807 		peer = ath11k_peer_find_by_id(ab, ppdu_info.peer_id);
2808 
2809 		if (!peer || !peer->sta) {
2810 			ath11k_dbg(ab, ATH11K_DBG_DATA,
2811 				   "failed to find the peer with peer_id %d\n",
2812 				   ppdu_info.peer_id);
2813 			spin_unlock_bh(&ab->base_lock);
2814 			rcu_read_unlock();
2815 			dev_kfree_skb_any(skb);
2816 			continue;
2817 		}
2818 
2819 		arsta = (struct ath11k_sta *)peer->sta->drv_priv;
2820 		ath11k_dp_rx_update_peer_stats(arsta, &ppdu_info);
2821 
2822 		if (ath11k_debug_is_pktlog_peer_valid(ar, peer->addr))
2823 			trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE);
2824 
2825 		spin_unlock_bh(&ab->base_lock);
2826 		rcu_read_unlock();
2827 
2828 		dev_kfree_skb_any(skb);
2829 	}
2830 exit:
2831 	return num_buffs_reaped;
2832 }
2833 
2834 static void ath11k_dp_rx_frag_timer(struct timer_list *timer)
2835 {
2836 	struct dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer);
2837 
2838 	spin_lock_bh(&rx_tid->ab->base_lock);
2839 	if (rx_tid->last_frag_no &&
2840 	    rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) {
2841 		spin_unlock_bh(&rx_tid->ab->base_lock);
2842 		return;
2843 	}
2844 	ath11k_dp_rx_frags_cleanup(rx_tid, true);
2845 	spin_unlock_bh(&rx_tid->ab->base_lock);
2846 }
2847 
2848 int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id)
2849 {
2850 	struct ath11k_base *ab = ar->ab;
2851 	struct crypto_shash *tfm;
2852 	struct ath11k_peer *peer;
2853 	struct dp_rx_tid *rx_tid;
2854 	int i;
2855 
2856 	tfm = crypto_alloc_shash("michael_mic", 0, 0);
2857 	if (IS_ERR(tfm))
2858 		return PTR_ERR(tfm);
2859 
2860 	spin_lock_bh(&ab->base_lock);
2861 
2862 	peer = ath11k_peer_find(ab, vdev_id, peer_mac);
2863 	if (!peer) {
2864 		ath11k_warn(ab, "failed to find the peer to set up fragment info\n");
2865 		spin_unlock_bh(&ab->base_lock);
2866 		return -ENOENT;
2867 	}
2868 
2869 	for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
2870 		rx_tid = &peer->rx_tid[i];
2871 		rx_tid->ab = ab;
2872 		timer_setup(&rx_tid->frag_timer, ath11k_dp_rx_frag_timer, 0);
2873 		skb_queue_head_init(&rx_tid->rx_frags);
2874 	}
2875 
2876 	peer->tfm_mmic = tfm;
2877 	spin_unlock_bh(&ab->base_lock);
2878 
2879 	return 0;
2880 }
2881 
2882 static int ath11k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,
2883 				      struct ieee80211_hdr *hdr, u8 *data,
2884 				      size_t data_len, u8 *mic)
2885 {
2886 	SHASH_DESC_ON_STACK(desc, tfm);
2887 	u8 mic_hdr[16] = {0};
2888 	u8 tid = 0;
2889 	int ret;
2890 
2891 	if (!tfm)
2892 		return -EINVAL;
2893 
2894 	desc->tfm = tfm;
2895 
2896 	ret = crypto_shash_setkey(tfm, key, 8);
2897 	if (ret)
2898 		goto out;
2899 
2900 	ret = crypto_shash_init(desc);
2901 	if (ret)
2902 		goto out;
2903 
2904 	/* TKIP MIC header */
2905 	memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN);
2906 	memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN);
2907 	if (ieee80211_is_data_qos(hdr->frame_control))
2908 		tid = ieee80211_get_tid(hdr);
2909 	mic_hdr[12] = tid;
2910 
2911 	ret = crypto_shash_update(desc, mic_hdr, 16);
2912 	if (ret)
2913 		goto out;
2914 	ret = crypto_shash_update(desc, data, data_len);
2915 	if (ret)
2916 		goto out;
2917 	ret = crypto_shash_final(desc, mic);
2918 out:
2919 	shash_desc_zero(desc);
2920 	return ret;
2921 }
2922 
2923 static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer *peer,
2924 					  struct sk_buff *msdu)
2925 {
2926 	struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
2927 	struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);
2928 	struct ieee80211_key_conf *key_conf;
2929 	struct ieee80211_hdr *hdr;
2930 	u8 mic[IEEE80211_CCMP_MIC_LEN];
2931 	int head_len, tail_len, ret;
2932 	size_t data_len;
2933 	u32 hdr_len;
2934 	u8 *key, *data;
2935 	u8 key_idx;
2936 
2937 	if (ath11k_dp_rx_h_mpdu_start_enctype(rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC)
2938 		return 0;
2939 
2940 	hdr = (struct ieee80211_hdr *)(msdu->data + HAL_RX_DESC_SIZE);
2941 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
2942 	head_len = hdr_len + HAL_RX_DESC_SIZE + IEEE80211_TKIP_IV_LEN;
2943 	tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN;
2944 
2945 	if (!is_multicast_ether_addr(hdr->addr1))
2946 		key_idx = peer->ucast_keyidx;
2947 	else
2948 		key_idx = peer->mcast_keyidx;
2949 
2950 	key_conf = peer->keys[key_idx];
2951 
2952 	data = msdu->data + head_len;
2953 	data_len = msdu->len - head_len - tail_len;
2954 	key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
2955 
2956 	ret = ath11k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic);
2957 	if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN))
2958 		goto mic_fail;
2959 
2960 	return 0;
2961 
2962 mic_fail:
2963 	(ATH11K_SKB_RXCB(msdu))->is_first_msdu = 1;
2964 	(ATH11K_SKB_RXCB(msdu))->is_last_msdu = 1;
2965 
2966 	rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
2967 		    RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
2968 	skb_pull(msdu, HAL_RX_DESC_SIZE);
2969 
2970 	ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
2971 	ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
2972 			       HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
2973 	ieee80211_rx(ar->hw, msdu);
2974 	return -EINVAL;
2975 }
2976 
2977 static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu,
2978 					enum hal_encrypt_type enctype, u32 flags)
2979 {
2980 	struct ieee80211_hdr *hdr;
2981 	size_t hdr_len;
2982 	size_t crypto_len;
2983 
2984 	if (!flags)
2985 		return;
2986 
2987 	hdr = (struct ieee80211_hdr *)(msdu->data + HAL_RX_DESC_SIZE);
2988 
2989 	if (flags & RX_FLAG_MIC_STRIPPED)
2990 		skb_trim(msdu, msdu->len -
2991 			 ath11k_dp_rx_crypto_mic_len(ar, enctype));
2992 
2993 	if (flags & RX_FLAG_ICV_STRIPPED)
2994 		skb_trim(msdu, msdu->len -
2995 			 ath11k_dp_rx_crypto_icv_len(ar, enctype));
2996 
2997 	if (flags & RX_FLAG_IV_STRIPPED) {
2998 		hdr_len = ieee80211_hdrlen(hdr->frame_control);
2999 		crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
3000 
3001 		memmove((void *)msdu->data + HAL_RX_DESC_SIZE + crypto_len,
3002 			(void *)msdu->data + HAL_RX_DESC_SIZE, hdr_len);
3003 		skb_pull(msdu, crypto_len);
3004 	}
3005 }
3006 
3007 static int ath11k_dp_rx_h_defrag(struct ath11k *ar,
3008 				 struct ath11k_peer *peer,
3009 				 struct dp_rx_tid *rx_tid,
3010 				 struct sk_buff **defrag_skb)
3011 {
3012 	struct hal_rx_desc *rx_desc;
3013 	struct sk_buff *skb, *first_frag, *last_frag;
3014 	struct ieee80211_hdr *hdr;
3015 	enum hal_encrypt_type enctype;
3016 	bool is_decrypted = false;
3017 	int msdu_len = 0;
3018 	int extra_space;
3019 	u32 flags;
3020 
3021 	first_frag = skb_peek(&rx_tid->rx_frags);
3022 	last_frag = skb_peek_tail(&rx_tid->rx_frags);
3023 
3024 	skb_queue_walk(&rx_tid->rx_frags, skb) {
3025 		flags = 0;
3026 		rx_desc = (struct hal_rx_desc *)skb->data;
3027 		hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE);
3028 
3029 		enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc);
3030 		if (enctype != HAL_ENCRYPT_TYPE_OPEN)
3031 			is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc);
3032 
3033 		if (is_decrypted) {
3034 			if (skb != first_frag)
3035 				flags |=  RX_FLAG_IV_STRIPPED;
3036 			if (skb != last_frag)
3037 				flags |= RX_FLAG_ICV_STRIPPED |
3038 					 RX_FLAG_MIC_STRIPPED;
3039 		}
3040 
3041 		/* RX fragments are always raw packets */
3042 		if (skb != last_frag)
3043 			skb_trim(skb, skb->len - FCS_LEN);
3044 		ath11k_dp_rx_h_undecap_frag(ar, skb, enctype, flags);
3045 
3046 		if (skb != first_frag)
3047 			skb_pull(skb, HAL_RX_DESC_SIZE +
3048 				      ieee80211_hdrlen(hdr->frame_control));
3049 		msdu_len += skb->len;
3050 	}
3051 
3052 	extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag));
3053 	if (extra_space > 0 &&
3054 	    (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0))
3055 		return -ENOMEM;
3056 
3057 	__skb_unlink(first_frag, &rx_tid->rx_frags);
3058 	while ((skb = __skb_dequeue(&rx_tid->rx_frags))) {
3059 		skb_put_data(first_frag, skb->data, skb->len);
3060 		dev_kfree_skb_any(skb);
3061 	}
3062 
3063 	hdr = (struct ieee80211_hdr *)(first_frag->data + HAL_RX_DESC_SIZE);
3064 	hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
3065 	ATH11K_SKB_RXCB(first_frag)->is_frag = 1;
3066 
3067 	if (ath11k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag))
3068 		first_frag = NULL;
3069 
3070 	*defrag_skb = first_frag;
3071 	return 0;
3072 }
3073 
3074 static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_tid *rx_tid,
3075 					      struct sk_buff *defrag_skb)
3076 {
3077 	struct ath11k_base *ab = ar->ab;
3078 	struct ath11k_pdev_dp *dp = &ar->dp;
3079 	struct dp_rxdma_ring *rx_refill_ring = &dp->rx_refill_buf_ring;
3080 	struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data;
3081 	struct hal_reo_entrance_ring *reo_ent_ring;
3082 	struct hal_reo_dest_ring *reo_dest_ring;
3083 	struct dp_link_desc_bank *link_desc_banks;
3084 	struct hal_rx_msdu_link *msdu_link;
3085 	struct hal_rx_msdu_details *msdu0;
3086 	struct hal_srng *srng;
3087 	dma_addr_t paddr;
3088 	u32 desc_bank, msdu_info, mpdu_info;
3089 	u32 dst_idx, cookie;
3090 	u32 *msdu_len_offset;
3091 	int ret, buf_id;
3092 
3093 	link_desc_banks = ab->dp.link_desc_banks;
3094 	reo_dest_ring = rx_tid->dst_ring_desc;
3095 
3096 	ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
3097 	msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr +
3098 			(paddr - link_desc_banks[desc_bank].paddr));
3099 	msdu0 = &msdu_link->msdu_link[0];
3100 	dst_idx = FIELD_GET(RX_MSDU_DESC_INFO0_REO_DEST_IND, msdu0->rx_msdu_info.info0);
3101 	memset(msdu0, 0, sizeof(*msdu0));
3102 
3103 	msdu_info = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1) |
3104 		    FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1) |
3105 		    FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_CONTINUATION, 0) |
3106 		    FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_LENGTH,
3107 			       defrag_skb->len - HAL_RX_DESC_SIZE) |
3108 		    FIELD_PREP(RX_MSDU_DESC_INFO0_REO_DEST_IND, dst_idx) |
3109 		    FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_SA, 1) |
3110 		    FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_DA, 1);
3111 	msdu0->rx_msdu_info.info0 = msdu_info;
3112 
3113 	/* change msdu len in hal rx desc */
3114 	msdu_len_offset = (u32 *)&rx_desc->msdu_start;
3115 	*msdu_len_offset &= ~(RX_MSDU_START_INFO1_MSDU_LENGTH);
3116 	*msdu_len_offset |= defrag_skb->len - HAL_RX_DESC_SIZE;
3117 
3118 	paddr = dma_map_single(ab->dev, defrag_skb->data,
3119 			       defrag_skb->len + skb_tailroom(defrag_skb),
3120 			       DMA_FROM_DEVICE);
3121 	if (dma_mapping_error(ab->dev, paddr))
3122 		return -ENOMEM;
3123 
3124 	spin_lock_bh(&rx_refill_ring->idr_lock);
3125 	buf_id = idr_alloc(&rx_refill_ring->bufs_idr, defrag_skb, 0,
3126 			   rx_refill_ring->bufs_max * 3, GFP_ATOMIC);
3127 	spin_unlock_bh(&rx_refill_ring->idr_lock);
3128 	if (buf_id < 0) {
3129 		ret = -ENOMEM;
3130 		goto err_unmap_dma;
3131 	}
3132 
3133 	ATH11K_SKB_RXCB(defrag_skb)->paddr = paddr;
3134 	cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, dp->mac_id) |
3135 		 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
3136 
3137 	ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie, HAL_RX_BUF_RBM_SW3_BM);
3138 
3139 	/* Fill mpdu details into reo entrace ring */
3140 	srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id];
3141 
3142 	spin_lock_bh(&srng->lock);
3143 	ath11k_hal_srng_access_begin(ab, srng);
3144 
3145 	reo_ent_ring = (struct hal_reo_entrance_ring *)
3146 			ath11k_hal_srng_src_get_next_entry(ab, srng);
3147 	if (!reo_ent_ring) {
3148 		ath11k_hal_srng_access_end(ab, srng);
3149 		spin_unlock_bh(&srng->lock);
3150 		ret = -ENOSPC;
3151 		goto err_free_idr;
3152 	}
3153 	memset(reo_ent_ring, 0, sizeof(*reo_ent_ring));
3154 
3155 	ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
3156 	ath11k_hal_rx_buf_addr_info_set(reo_ent_ring, paddr, desc_bank,
3157 					HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST);
3158 
3159 	mpdu_info = FIELD_PREP(RX_MPDU_DESC_INFO0_MSDU_COUNT, 1) |
3160 		    FIELD_PREP(RX_MPDU_DESC_INFO0_SEQ_NUM, rx_tid->cur_sn) |
3161 		    FIELD_PREP(RX_MPDU_DESC_INFO0_FRAG_FLAG, 0) |
3162 		    FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_SA, 1) |
3163 		    FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_DA, 1) |
3164 		    FIELD_PREP(RX_MPDU_DESC_INFO0_RAW_MPDU, 1) |
3165 		    FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_PN, 1);
3166 
3167 	reo_ent_ring->rx_mpdu_info.info0 = mpdu_info;
3168 	reo_ent_ring->rx_mpdu_info.meta_data = reo_dest_ring->rx_mpdu_info.meta_data;
3169 	reo_ent_ring->queue_addr_lo = reo_dest_ring->queue_addr_lo;
3170 	reo_ent_ring->info0 = FIELD_PREP(HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI,
3171 					 FIELD_GET(HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI,
3172 						   reo_dest_ring->info0)) |
3173 			      FIELD_PREP(HAL_REO_ENTR_RING_INFO0_DEST_IND, dst_idx);
3174 	ath11k_hal_srng_access_end(ab, srng);
3175 	spin_unlock_bh(&srng->lock);
3176 
3177 	return 0;
3178 
3179 err_free_idr:
3180 	spin_lock_bh(&rx_refill_ring->idr_lock);
3181 	idr_remove(&rx_refill_ring->bufs_idr, buf_id);
3182 	spin_unlock_bh(&rx_refill_ring->idr_lock);
3183 err_unmap_dma:
3184 	dma_unmap_single(ab->dev, paddr, defrag_skb->len + skb_tailroom(defrag_skb),
3185 			 DMA_FROM_DEVICE);
3186 	return ret;
3187 }
3188 
3189 static int ath11k_dp_rx_h_cmp_frags(struct sk_buff *a, struct sk_buff *b)
3190 {
3191 	int frag1, frag2;
3192 
3193 	frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(a);
3194 	frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(b);
3195 
3196 	return frag1 - frag2;
3197 }
3198 
3199 static void ath11k_dp_rx_h_sort_frags(struct sk_buff_head *frag_list,
3200 				      struct sk_buff *cur_frag)
3201 {
3202 	struct sk_buff *skb;
3203 	int cmp;
3204 
3205 	skb_queue_walk(frag_list, skb) {
3206 		cmp = ath11k_dp_rx_h_cmp_frags(skb, cur_frag);
3207 		if (cmp < 0)
3208 			continue;
3209 		__skb_queue_before(frag_list, skb, cur_frag);
3210 		return;
3211 	}
3212 	__skb_queue_tail(frag_list, cur_frag);
3213 }
3214 
3215 static u64 ath11k_dp_rx_h_get_pn(struct sk_buff *skb)
3216 {
3217 	struct ieee80211_hdr *hdr;
3218 	u64 pn = 0;
3219 	u8 *ehdr;
3220 
3221 	hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE);
3222 	ehdr = skb->data + HAL_RX_DESC_SIZE + ieee80211_hdrlen(hdr->frame_control);
3223 
3224 	pn = ehdr[0];
3225 	pn |= (u64)ehdr[1] << 8;
3226 	pn |= (u64)ehdr[4] << 16;
3227 	pn |= (u64)ehdr[5] << 24;
3228 	pn |= (u64)ehdr[6] << 32;
3229 	pn |= (u64)ehdr[7] << 40;
3230 
3231 	return pn;
3232 }
3233 
3234 static bool
3235 ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k *ar, struct dp_rx_tid *rx_tid)
3236 {
3237 	enum hal_encrypt_type encrypt_type;
3238 	struct sk_buff *first_frag, *skb;
3239 	struct hal_rx_desc *desc;
3240 	u64 last_pn;
3241 	u64 cur_pn;
3242 
3243 	first_frag = skb_peek(&rx_tid->rx_frags);
3244 	desc = (struct hal_rx_desc *)first_frag->data;
3245 
3246 	encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(desc);
3247 	if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 &&
3248 	    encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 &&
3249 	    encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 &&
3250 	    encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256)
3251 		return true;
3252 
3253 	last_pn = ath11k_dp_rx_h_get_pn(first_frag);
3254 	skb_queue_walk(&rx_tid->rx_frags, skb) {
3255 		if (skb == first_frag)
3256 			continue;
3257 
3258 		cur_pn = ath11k_dp_rx_h_get_pn(skb);
3259 		if (cur_pn != last_pn + 1)
3260 			return false;
3261 		last_pn = cur_pn;
3262 	}
3263 	return true;
3264 }
3265 
3266 static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
3267 				    struct sk_buff *msdu,
3268 				    u32 *ring_desc)
3269 {
3270 	struct ath11k_base *ab = ar->ab;
3271 	struct hal_rx_desc *rx_desc;
3272 	struct ath11k_peer *peer;
3273 	struct dp_rx_tid *rx_tid;
3274 	struct sk_buff *defrag_skb = NULL;
3275 	u32 peer_id;
3276 	u16 seqno, frag_no;
3277 	u8 tid;
3278 	int ret = 0;
3279 	bool more_frags;
3280 
3281 	rx_desc = (struct hal_rx_desc *)msdu->data;
3282 	peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(rx_desc);
3283 	tid = ath11k_dp_rx_h_mpdu_start_tid(rx_desc);
3284 	seqno = ath11k_dp_rx_h_mpdu_start_seq_no(rx_desc);
3285 	frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(msdu);
3286 	more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(msdu);
3287 
3288 	if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(rx_desc) ||
3289 	    !ath11k_dp_rx_h_mpdu_start_fc_valid(rx_desc) ||
3290 	    tid > IEEE80211_NUM_TIDS)
3291 		return -EINVAL;
3292 
3293 	/* received unfragmented packet in reo
3294 	 * exception ring, this shouldn't happen
3295 	 * as these packets typically come from
3296 	 * reo2sw srngs.
3297 	 */
3298 	if (WARN_ON_ONCE(!frag_no && !more_frags))
3299 		return -EINVAL;
3300 
3301 	spin_lock_bh(&ab->base_lock);
3302 	peer = ath11k_peer_find_by_id(ab, peer_id);
3303 	if (!peer) {
3304 		ath11k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n",
3305 			    peer_id);
3306 		ret = -ENOENT;
3307 		goto out_unlock;
3308 	}
3309 	rx_tid = &peer->rx_tid[tid];
3310 
3311 	if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
3312 	    skb_queue_empty(&rx_tid->rx_frags)) {
3313 		/* Flush stored fragments and start a new sequence */
3314 		ath11k_dp_rx_frags_cleanup(rx_tid, true);
3315 		rx_tid->cur_sn = seqno;
3316 	}
3317 
3318 	if (rx_tid->rx_frag_bitmap & BIT(frag_no)) {
3319 		/* Fragment already present */
3320 		ret = -EINVAL;
3321 		goto out_unlock;
3322 	}
3323 
3324 	if (frag_no > __fls(rx_tid->rx_frag_bitmap))
3325 		__skb_queue_tail(&rx_tid->rx_frags, msdu);
3326 	else
3327 		ath11k_dp_rx_h_sort_frags(&rx_tid->rx_frags, msdu);
3328 
3329 	rx_tid->rx_frag_bitmap |= BIT(frag_no);
3330 	if (!more_frags)
3331 		rx_tid->last_frag_no = frag_no;
3332 
3333 	if (frag_no == 0) {
3334 		rx_tid->dst_ring_desc = kmemdup(ring_desc,
3335 						sizeof(*rx_tid->dst_ring_desc),
3336 						GFP_ATOMIC);
3337 		if (!rx_tid->dst_ring_desc) {
3338 			ret = -ENOMEM;
3339 			goto out_unlock;
3340 		}
3341 	} else {
3342 		ath11k_dp_rx_link_desc_return(ab, ring_desc,
3343 					      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3344 	}
3345 
3346 	if (!rx_tid->last_frag_no ||
3347 	    rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) {
3348 		mod_timer(&rx_tid->frag_timer, jiffies +
3349 					       ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS);
3350 		goto out_unlock;
3351 	}
3352 
3353 	spin_unlock_bh(&ab->base_lock);
3354 	del_timer_sync(&rx_tid->frag_timer);
3355 	spin_lock_bh(&ab->base_lock);
3356 
3357 	peer = ath11k_peer_find_by_id(ab, peer_id);
3358 	if (!peer)
3359 		goto err_frags_cleanup;
3360 
3361 	if (!ath11k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid))
3362 		goto err_frags_cleanup;
3363 
3364 	if (ath11k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb))
3365 		goto err_frags_cleanup;
3366 
3367 	if (!defrag_skb)
3368 		goto err_frags_cleanup;
3369 
3370 	if (ath11k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb))
3371 		goto err_frags_cleanup;
3372 
3373 	ath11k_dp_rx_frags_cleanup(rx_tid, false);
3374 	goto out_unlock;
3375 
3376 err_frags_cleanup:
3377 	dev_kfree_skb_any(defrag_skb);
3378 	ath11k_dp_rx_frags_cleanup(rx_tid, true);
3379 out_unlock:
3380 	spin_unlock_bh(&ab->base_lock);
3381 	return ret;
3382 }
3383 
3384 static int
3385 ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool drop)
3386 {
3387 	struct ath11k_pdev_dp *dp = &ar->dp;
3388 	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
3389 	struct sk_buff *msdu;
3390 	struct ath11k_skb_rxcb *rxcb;
3391 	struct hal_rx_desc *rx_desc;
3392 	u16 msdu_len;
3393 
3394 	spin_lock_bh(&rx_ring->idr_lock);
3395 	msdu = idr_find(&rx_ring->bufs_idr, buf_id);
3396 	if (!msdu) {
3397 		ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n",
3398 			    buf_id);
3399 		spin_unlock_bh(&rx_ring->idr_lock);
3400 		return -EINVAL;
3401 	}
3402 
3403 	idr_remove(&rx_ring->bufs_idr, buf_id);
3404 	spin_unlock_bh(&rx_ring->idr_lock);
3405 
3406 	rxcb = ATH11K_SKB_RXCB(msdu);
3407 	dma_unmap_single(ar->ab->dev, rxcb->paddr,
3408 			 msdu->len + skb_tailroom(msdu),
3409 			 DMA_FROM_DEVICE);
3410 
3411 	if (drop) {
3412 		dev_kfree_skb_any(msdu);
3413 		return 0;
3414 	}
3415 
3416 	rcu_read_lock();
3417 	if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {
3418 		dev_kfree_skb_any(msdu);
3419 		goto exit;
3420 	}
3421 
3422 	if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
3423 		dev_kfree_skb_any(msdu);
3424 		goto exit;
3425 	}
3426 
3427 	rx_desc = (struct hal_rx_desc *)msdu->data;
3428 	msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc);
3429 	skb_put(msdu, HAL_RX_DESC_SIZE + msdu_len);
3430 
3431 	if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) {
3432 		dev_kfree_skb_any(msdu);
3433 		ath11k_dp_rx_link_desc_return(ar->ab, ring_desc,
3434 					      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3435 	}
3436 exit:
3437 	rcu_read_unlock();
3438 	return 0;
3439 }
3440 
3441 int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
3442 			     int budget)
3443 {
3444 	u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
3445 	struct dp_link_desc_bank *link_desc_banks;
3446 	enum hal_rx_buf_return_buf_manager rbm;
3447 	int tot_n_bufs_reaped, quota, ret, i;
3448 	int n_bufs_reaped[MAX_RADIOS] = {0};
3449 	struct dp_rxdma_ring *rx_ring;
3450 	struct dp_srng *reo_except;
3451 	u32 desc_bank, num_msdus;
3452 	struct hal_srng *srng;
3453 	struct ath11k_dp *dp;
3454 	void *link_desc_va;
3455 	int buf_id, mac_id;
3456 	struct ath11k *ar;
3457 	dma_addr_t paddr;
3458 	u32 *desc;
3459 	bool is_frag;
3460 	u8 drop = 0;
3461 
3462 	tot_n_bufs_reaped = 0;
3463 	quota = budget;
3464 
3465 	dp = &ab->dp;
3466 	reo_except = &dp->reo_except_ring;
3467 	link_desc_banks = dp->link_desc_banks;
3468 
3469 	srng = &ab->hal.srng_list[reo_except->ring_id];
3470 
3471 	spin_lock_bh(&srng->lock);
3472 
3473 	ath11k_hal_srng_access_begin(ab, srng);
3474 
3475 	while (budget &&
3476 	       (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
3477 		struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc;
3478 
3479 		ab->soc_stats.err_ring_pkts++;
3480 		ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr,
3481 						    &desc_bank);
3482 		if (ret) {
3483 			ath11k_warn(ab, "failed to parse error reo desc %d\n",
3484 				    ret);
3485 			continue;
3486 		}
3487 		link_desc_va = link_desc_banks[desc_bank].vaddr +
3488 			       (paddr - link_desc_banks[desc_bank].paddr);
3489 		ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
3490 						 &rbm);
3491 		if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST &&
3492 		    rbm != HAL_RX_BUF_RBM_SW3_BM) {
3493 			ab->soc_stats.invalid_rbm++;
3494 			ath11k_warn(ab, "invalid return buffer manager %d\n", rbm);
3495 			ath11k_dp_rx_link_desc_return(ab, desc,
3496 						      HAL_WBM_REL_BM_ACT_REL_MSDU);
3497 			continue;
3498 		}
3499 
3500 		is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG);
3501 
3502 		/* Process only rx fragments with one msdu per link desc below, and drop
3503 		 * msdu's indicated due to error reasons.
3504 		 */
3505 		if (!is_frag || num_msdus > 1) {
3506 			drop = 1;
3507 			/* Return the link desc back to wbm idle list */
3508 			ath11k_dp_rx_link_desc_return(ab, desc,
3509 						      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3510 		}
3511 
3512 		for (i = 0; i < num_msdus; i++) {
3513 			buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
3514 					   msdu_cookies[i]);
3515 
3516 			mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID,
3517 					   msdu_cookies[i]);
3518 
3519 			ar = ab->pdevs[mac_id].ar;
3520 
3521 			if (!ath11k_dp_process_rx_err_buf(ar, desc, buf_id, drop)) {
3522 				n_bufs_reaped[mac_id]++;
3523 				tot_n_bufs_reaped++;
3524 			}
3525 		}
3526 
3527 		if (tot_n_bufs_reaped >= quota) {
3528 			tot_n_bufs_reaped = quota;
3529 			goto exit;
3530 		}
3531 
3532 		budget = quota - tot_n_bufs_reaped;
3533 	}
3534 
3535 exit:
3536 	ath11k_hal_srng_access_end(ab, srng);
3537 
3538 	spin_unlock_bh(&srng->lock);
3539 
3540 	for (i = 0; i <  ab->num_radios; i++) {
3541 		if (!n_bufs_reaped[i])
3542 			continue;
3543 
3544 		ar = ab->pdevs[i].ar;
3545 		rx_ring = &ar->dp.rx_refill_buf_ring;
3546 
3547 		ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i],
3548 					   HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
3549 	}
3550 
3551 	return tot_n_bufs_reaped;
3552 }
3553 
3554 static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar,
3555 					     int msdu_len,
3556 					     struct sk_buff_head *msdu_list)
3557 {
3558 	struct sk_buff *skb, *tmp;
3559 	struct ath11k_skb_rxcb *rxcb;
3560 	int n_buffs;
3561 
3562 	n_buffs = DIV_ROUND_UP(msdu_len,
3563 			       (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE));
3564 
3565 	skb_queue_walk_safe(msdu_list, skb, tmp) {
3566 		rxcb = ATH11K_SKB_RXCB(skb);
3567 		if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO &&
3568 		    rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) {
3569 			if (!n_buffs)
3570 				break;
3571 			__skb_unlink(skb, msdu_list);
3572 			dev_kfree_skb_any(skb);
3573 			n_buffs--;
3574 		}
3575 	}
3576 }
3577 
3578 static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu,
3579 				      struct ieee80211_rx_status *status,
3580 				      struct sk_buff_head *msdu_list)
3581 {
3582 	u16 msdu_len;
3583 	struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
3584 	u8 l3pad_bytes;
3585 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3586 
3587 	msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc);
3588 
3589 	if (!rxcb->is_frag && ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE)) {
3590 		/* First buffer will be freed by the caller, so deduct it's length */
3591 		msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE);
3592 		ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);
3593 		return -EINVAL;
3594 	}
3595 
3596 	if (!ath11k_dp_rx_h_attn_msdu_done(desc)) {
3597 		ath11k_warn(ar->ab,
3598 			    "msdu_done bit not set in null_q_des processing\n");
3599 		__skb_queue_purge(msdu_list);
3600 		return -EIO;
3601 	}
3602 
3603 	/* Handle NULL queue descriptor violations arising out a missing
3604 	 * REO queue for a given peer or a given TID. This typically
3605 	 * may happen if a packet is received on a QOS enabled TID before the
3606 	 * ADDBA negotiation for that TID, when the TID queue is setup. Or
3607 	 * it may also happen for MC/BC frames if they are not routed to the
3608 	 * non-QOS TID queue, in the absence of any other default TID queue.
3609 	 * This error can show up both in a REO destination or WBM release ring.
3610 	 */
3611 
3612 	rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc);
3613 	rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc);
3614 
3615 	if (rxcb->is_frag) {
3616 		skb_pull(msdu, HAL_RX_DESC_SIZE);
3617 	} else {
3618 		l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc);
3619 
3620 		if ((HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
3621 			return -EINVAL;
3622 
3623 		skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len);
3624 		skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes);
3625 	}
3626 	ath11k_dp_rx_h_ppdu(ar, desc, status);
3627 
3628 	ath11k_dp_rx_h_mpdu(ar, msdu, desc, status);
3629 
3630 	rxcb->tid = ath11k_dp_rx_h_mpdu_start_tid(desc);
3631 
3632 	/* Please note that caller will having the access to msdu and completing
3633 	 * rx with mac80211. Need not worry about cleaning up amsdu_list.
3634 	 */
3635 
3636 	return 0;
3637 }
3638 
3639 static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu,
3640 				   struct ieee80211_rx_status *status,
3641 				   struct sk_buff_head *msdu_list)
3642 {
3643 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3644 	bool drop = false;
3645 
3646 	ar->ab->soc_stats.reo_error[rxcb->err_code]++;
3647 
3648 	switch (rxcb->err_code) {
3649 	case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
3650 		if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))
3651 			drop = true;
3652 		break;
3653 	case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
3654 		/* TODO: Do not drop PN failed packets in the driver;
3655 		 * instead, it is good to drop such packets in mac80211
3656 		 * after incrementing the replay counters.
3657 		 */
3658 
3659 		/* fall through */
3660 	default:
3661 		/* TODO: Review other errors and process them to mac80211
3662 		 * as appropriate.
3663 		 */
3664 		drop = true;
3665 		break;
3666 	}
3667 
3668 	return drop;
3669 }
3670 
3671 static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu,
3672 					struct ieee80211_rx_status *status)
3673 {
3674 	u16 msdu_len;
3675 	struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
3676 	u8 l3pad_bytes;
3677 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3678 
3679 	rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc);
3680 	rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc);
3681 
3682 	l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc);
3683 	msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc);
3684 	skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len);
3685 	skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes);
3686 
3687 	ath11k_dp_rx_h_ppdu(ar, desc, status);
3688 
3689 	status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
3690 			 RX_FLAG_DECRYPTED);
3691 
3692 	ath11k_dp_rx_h_undecap(ar, msdu, desc,
3693 			       HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
3694 }
3695 
3696 static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar,  struct sk_buff *msdu,
3697 				     struct ieee80211_rx_status *status)
3698 {
3699 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3700 	bool drop = false;
3701 
3702 	ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
3703 
3704 	switch (rxcb->err_code) {
3705 	case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
3706 		ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status);
3707 		break;
3708 	default:
3709 		/* TODO: Review other rxdma error code to check if anything is
3710 		 * worth reporting to mac80211
3711 		 */
3712 		drop = true;
3713 		break;
3714 	}
3715 
3716 	return drop;
3717 }
3718 
3719 static void ath11k_dp_rx_wbm_err(struct ath11k *ar,
3720 				 struct napi_struct *napi,
3721 				 struct sk_buff *msdu,
3722 				 struct sk_buff_head *msdu_list)
3723 {
3724 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3725 	struct ieee80211_rx_status rxs = {0};
3726 	struct ieee80211_rx_status *status;
3727 	bool drop = true;
3728 
3729 	switch (rxcb->err_rel_src) {
3730 	case HAL_WBM_REL_SRC_MODULE_REO:
3731 		drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list);
3732 		break;
3733 	case HAL_WBM_REL_SRC_MODULE_RXDMA:
3734 		drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs);
3735 		break;
3736 	default:
3737 		/* msdu will get freed */
3738 		break;
3739 	}
3740 
3741 	if (drop) {
3742 		dev_kfree_skb_any(msdu);
3743 		return;
3744 	}
3745 
3746 	status = IEEE80211_SKB_RXCB(msdu);
3747 	*status = rxs;
3748 
3749 	ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
3750 }
3751 
3752 int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
3753 				 struct napi_struct *napi, int budget)
3754 {
3755 	struct ath11k *ar;
3756 	struct ath11k_dp *dp = &ab->dp;
3757 	struct dp_rxdma_ring *rx_ring;
3758 	struct hal_rx_wbm_rel_info err_info;
3759 	struct hal_srng *srng;
3760 	struct sk_buff *msdu;
3761 	struct sk_buff_head msdu_list[MAX_RADIOS];
3762 	struct ath11k_skb_rxcb *rxcb;
3763 	u32 *rx_desc;
3764 	int buf_id, mac_id;
3765 	int num_buffs_reaped[MAX_RADIOS] = {0};
3766 	int total_num_buffs_reaped = 0;
3767 	int ret, i;
3768 
3769 	for (i = 0; i < MAX_RADIOS; i++)
3770 		__skb_queue_head_init(&msdu_list[i]);
3771 
3772 	srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
3773 
3774 	spin_lock_bh(&srng->lock);
3775 
3776 	ath11k_hal_srng_access_begin(ab, srng);
3777 
3778 	while (budget) {
3779 		rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
3780 		if (!rx_desc)
3781 			break;
3782 
3783 		ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info);
3784 		if (ret) {
3785 			ath11k_warn(ab,
3786 				    "failed to parse rx error in wbm_rel ring desc %d\n",
3787 				    ret);
3788 			continue;
3789 		}
3790 
3791 		buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie);
3792 		mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie);
3793 
3794 		ar = ab->pdevs[mac_id].ar;
3795 		rx_ring = &ar->dp.rx_refill_buf_ring;
3796 
3797 		spin_lock_bh(&rx_ring->idr_lock);
3798 		msdu = idr_find(&rx_ring->bufs_idr, buf_id);
3799 		if (!msdu) {
3800 			ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n",
3801 				    buf_id, mac_id);
3802 			spin_unlock_bh(&rx_ring->idr_lock);
3803 			continue;
3804 		}
3805 
3806 		idr_remove(&rx_ring->bufs_idr, buf_id);
3807 		spin_unlock_bh(&rx_ring->idr_lock);
3808 
3809 		rxcb = ATH11K_SKB_RXCB(msdu);
3810 		dma_unmap_single(ab->dev, rxcb->paddr,
3811 				 msdu->len + skb_tailroom(msdu),
3812 				 DMA_FROM_DEVICE);
3813 
3814 		num_buffs_reaped[mac_id]++;
3815 		total_num_buffs_reaped++;
3816 		budget--;
3817 
3818 		if (err_info.push_reason !=
3819 		    HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
3820 			dev_kfree_skb_any(msdu);
3821 			continue;
3822 		}
3823 
3824 		rxcb->err_rel_src = err_info.err_rel_src;
3825 		rxcb->err_code = err_info.err_code;
3826 		rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
3827 		__skb_queue_tail(&msdu_list[mac_id], msdu);
3828 	}
3829 
3830 	ath11k_hal_srng_access_end(ab, srng);
3831 
3832 	spin_unlock_bh(&srng->lock);
3833 
3834 	if (!total_num_buffs_reaped)
3835 		goto done;
3836 
3837 	for (i = 0; i <  ab->num_radios; i++) {
3838 		if (!num_buffs_reaped[i])
3839 			continue;
3840 
3841 		ar = ab->pdevs[i].ar;
3842 		rx_ring = &ar->dp.rx_refill_buf_ring;
3843 
3844 		ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
3845 					   HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
3846 	}
3847 
3848 	rcu_read_lock();
3849 	for (i = 0; i <  ab->num_radios; i++) {
3850 		if (!rcu_dereference(ab->pdevs_active[i])) {
3851 			__skb_queue_purge(&msdu_list[i]);
3852 			continue;
3853 		}
3854 
3855 		ar = ab->pdevs[i].ar;
3856 
3857 		if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
3858 			__skb_queue_purge(&msdu_list[i]);
3859 			continue;
3860 		}
3861 
3862 		while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL)
3863 			ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]);
3864 	}
3865 	rcu_read_unlock();
3866 done:
3867 	return total_num_buffs_reaped;
3868 }
3869 
3870 int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget)
3871 {
3872 	struct ath11k *ar = ab->pdevs[mac_id].ar;
3873 	struct dp_srng *err_ring = &ar->dp.rxdma_err_dst_ring;
3874 	struct dp_rxdma_ring *rx_ring = &ar->dp.rx_refill_buf_ring;
3875 	struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks;
3876 	struct hal_srng *srng;
3877 	u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
3878 	enum hal_rx_buf_return_buf_manager rbm;
3879 	enum hal_reo_entr_rxdma_ecode rxdma_err_code;
3880 	struct ath11k_skb_rxcb *rxcb;
3881 	struct sk_buff *skb;
3882 	struct hal_reo_entrance_ring *entr_ring;
3883 	void *desc;
3884 	int num_buf_freed = 0;
3885 	int quota = budget;
3886 	dma_addr_t paddr;
3887 	u32 desc_bank;
3888 	void *link_desc_va;
3889 	int num_msdus;
3890 	int i;
3891 	int buf_id;
3892 
3893 	srng = &ab->hal.srng_list[err_ring->ring_id];
3894 
3895 	spin_lock_bh(&srng->lock);
3896 
3897 	ath11k_hal_srng_access_begin(ab, srng);
3898 
3899 	while (quota-- &&
3900 	       (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
3901 		ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank);
3902 
3903 		entr_ring = (struct hal_reo_entrance_ring *)desc;
3904 		rxdma_err_code =
3905 			FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
3906 				  entr_ring->info1);
3907 		ab->soc_stats.rxdma_error[rxdma_err_code]++;
3908 
3909 		link_desc_va = link_desc_banks[desc_bank].vaddr +
3910 			       (paddr - link_desc_banks[desc_bank].paddr);
3911 		ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus,
3912 						 msdu_cookies, &rbm);
3913 
3914 		for (i = 0; i < num_msdus; i++) {
3915 			buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
3916 					   msdu_cookies[i]);
3917 
3918 			spin_lock_bh(&rx_ring->idr_lock);
3919 			skb = idr_find(&rx_ring->bufs_idr, buf_id);
3920 			if (!skb) {
3921 				ath11k_warn(ab, "rxdma error with invalid buf_id %d\n",
3922 					    buf_id);
3923 				spin_unlock_bh(&rx_ring->idr_lock);
3924 				continue;
3925 			}
3926 
3927 			idr_remove(&rx_ring->bufs_idr, buf_id);
3928 			spin_unlock_bh(&rx_ring->idr_lock);
3929 
3930 			rxcb = ATH11K_SKB_RXCB(skb);
3931 			dma_unmap_single(ab->dev, rxcb->paddr,
3932 					 skb->len + skb_tailroom(skb),
3933 					 DMA_FROM_DEVICE);
3934 			dev_kfree_skb_any(skb);
3935 
3936 			num_buf_freed++;
3937 		}
3938 
3939 		ath11k_dp_rx_link_desc_return(ab, desc,
3940 					      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3941 	}
3942 
3943 	ath11k_hal_srng_access_end(ab, srng);
3944 
3945 	spin_unlock_bh(&srng->lock);
3946 
3947 	if (num_buf_freed)
3948 		ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed,
3949 					   HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
3950 
3951 	return budget - quota;
3952 }
3953 
3954 void ath11k_dp_process_reo_status(struct ath11k_base *ab)
3955 {
3956 	struct ath11k_dp *dp = &ab->dp;
3957 	struct hal_srng *srng;
3958 	struct dp_reo_cmd *cmd, *tmp;
3959 	bool found = false;
3960 	u32 *reo_desc;
3961 	u16 tag;
3962 	struct hal_reo_status reo_status;
3963 
3964 	srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id];
3965 
3966 	memset(&reo_status, 0, sizeof(reo_status));
3967 
3968 	spin_lock_bh(&srng->lock);
3969 
3970 	ath11k_hal_srng_access_begin(ab, srng);
3971 
3972 	while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
3973 		tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc);
3974 
3975 		switch (tag) {
3976 		case HAL_REO_GET_QUEUE_STATS_STATUS:
3977 			ath11k_hal_reo_status_queue_stats(ab, reo_desc,
3978 							  &reo_status);
3979 			break;
3980 		case HAL_REO_FLUSH_QUEUE_STATUS:
3981 			ath11k_hal_reo_flush_queue_status(ab, reo_desc,
3982 							  &reo_status);
3983 			break;
3984 		case HAL_REO_FLUSH_CACHE_STATUS:
3985 			ath11k_hal_reo_flush_cache_status(ab, reo_desc,
3986 							  &reo_status);
3987 			break;
3988 		case HAL_REO_UNBLOCK_CACHE_STATUS:
3989 			ath11k_hal_reo_unblk_cache_status(ab, reo_desc,
3990 							  &reo_status);
3991 			break;
3992 		case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
3993 			ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc,
3994 								 &reo_status);
3995 			break;
3996 		case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
3997 			ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc,
3998 								  &reo_status);
3999 			break;
4000 		case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
4001 			ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc,
4002 								  &reo_status);
4003 			break;
4004 		default:
4005 			ath11k_warn(ab, "Unknown reo status type %d\n", tag);
4006 			continue;
4007 		}
4008 
4009 		spin_lock_bh(&dp->reo_cmd_lock);
4010 		list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
4011 			if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
4012 				found = true;
4013 				list_del(&cmd->list);
4014 				break;
4015 			}
4016 		}
4017 		spin_unlock_bh(&dp->reo_cmd_lock);
4018 
4019 		if (found) {
4020 			cmd->handler(dp, (void *)&cmd->data,
4021 				     reo_status.uniform_hdr.cmd_status);
4022 			kfree(cmd);
4023 		}
4024 
4025 		found = false;
4026 	}
4027 
4028 	ath11k_hal_srng_access_end(ab, srng);
4029 
4030 	spin_unlock_bh(&srng->lock);
4031 }
4032 
4033 void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id)
4034 {
4035 	struct ath11k *ar = ab->pdevs[mac_id].ar;
4036 
4037 	ath11k_dp_rx_pdev_srng_free(ar);
4038 	ath11k_dp_rxdma_pdev_buf_free(ar);
4039 }
4040 
4041 int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)
4042 {
4043 	struct ath11k *ar = ab->pdevs[mac_id].ar;
4044 	struct ath11k_pdev_dp *dp = &ar->dp;
4045 	u32 ring_id;
4046 	int ret;
4047 
4048 	ret = ath11k_dp_rx_pdev_srng_alloc(ar);
4049 	if (ret) {
4050 		ath11k_warn(ab, "failed to setup rx srngs\n");
4051 		return ret;
4052 	}
4053 
4054 	ret = ath11k_dp_rxdma_pdev_buf_setup(ar);
4055 	if (ret) {
4056 		ath11k_warn(ab, "failed to setup rxdma ring\n");
4057 		return ret;
4058 	}
4059 
4060 	ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
4061 	ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF);
4062 	if (ret) {
4063 		ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n",
4064 			    ret);
4065 		return ret;
4066 	}
4067 
4068 	ring_id = dp->rxdma_err_dst_ring.ring_id;
4069 	ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_DST);
4070 	if (ret) {
4071 		ath11k_warn(ab, "failed to configure rxdma_err_dest_ring %d\n",
4072 			    ret);
4073 		return ret;
4074 	}
4075 
4076 	ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
4077 	ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
4078 					  mac_id, HAL_RXDMA_MONITOR_BUF);
4079 	if (ret) {
4080 		ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
4081 			    ret);
4082 		return ret;
4083 	}
4084 	ret = ath11k_dp_tx_htt_srng_setup(ab,
4085 					  dp->rxdma_mon_dst_ring.ring_id,
4086 					  mac_id, HAL_RXDMA_MONITOR_DST);
4087 	if (ret) {
4088 		ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
4089 			    ret);
4090 		return ret;
4091 	}
4092 	ret = ath11k_dp_tx_htt_srng_setup(ab,
4093 					  dp->rxdma_mon_desc_ring.ring_id,
4094 					  mac_id, HAL_RXDMA_MONITOR_DESC);
4095 	if (ret) {
4096 		ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
4097 			    ret);
4098 		return ret;
4099 	}
4100 	ring_id = dp->rx_mon_status_refill_ring.refill_buf_ring.ring_id;
4101 	ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id,
4102 					  HAL_RXDMA_MONITOR_STATUS);
4103 	if (ret) {
4104 		ath11k_warn(ab,
4105 			    "failed to configure mon_status_refill_ring %d\n",
4106 			    ret);
4107 		return ret;
4108 	}
4109 	return 0;
4110 }
4111 
4112 static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len)
4113 {
4114 	if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) {
4115 		*frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc);
4116 		*total_len -= *frag_len;
4117 	} else {
4118 		*frag_len = *total_len;
4119 		*total_len = 0;
4120 	}
4121 }
4122 
4123 static
4124 int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar,
4125 					  void *p_last_buf_addr_info,
4126 					  u8 mac_id)
4127 {
4128 	struct ath11k_pdev_dp *dp = &ar->dp;
4129 	struct dp_srng *dp_srng;
4130 	void *hal_srng;
4131 	void *src_srng_desc;
4132 	int ret = 0;
4133 
4134 	dp_srng = &dp->rxdma_mon_desc_ring;
4135 	hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
4136 
4137 	ath11k_hal_srng_access_begin(ar->ab, hal_srng);
4138 
4139 	src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng);
4140 
4141 	if (src_srng_desc) {
4142 		struct ath11k_buffer_addr *src_desc =
4143 				(struct ath11k_buffer_addr *)src_srng_desc;
4144 
4145 		*src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info);
4146 	} else {
4147 		ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4148 			   "Monitor Link Desc Ring %d Full", mac_id);
4149 		ret = -ENOMEM;
4150 	}
4151 
4152 	ath11k_hal_srng_access_end(ar->ab, hal_srng);
4153 	return ret;
4154 }
4155 
4156 static
4157 void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc,
4158 					 dma_addr_t *paddr, u32 *sw_cookie,
4159 					 void **pp_buf_addr_info)
4160 {
4161 	struct hal_rx_msdu_link *msdu_link =
4162 			(struct hal_rx_msdu_link *)rx_msdu_link_desc;
4163 	struct ath11k_buffer_addr *buf_addr_info;
4164 	u8 rbm = 0;
4165 
4166 	buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info;
4167 
4168 	ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, &rbm);
4169 
4170 	*pp_buf_addr_info = (void *)buf_addr_info;
4171 }
4172 
4173 static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len)
4174 {
4175 	if (skb->len > len) {
4176 		skb_trim(skb, len);
4177 	} else {
4178 		if (skb_tailroom(skb) < len - skb->len) {
4179 			if ((pskb_expand_head(skb, 0,
4180 					      len - skb->len - skb_tailroom(skb),
4181 					      GFP_ATOMIC))) {
4182 				dev_kfree_skb_any(skb);
4183 				return -ENOMEM;
4184 			}
4185 		}
4186 		skb_put(skb, (len - skb->len));
4187 	}
4188 	return 0;
4189 }
4190 
4191 static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar,
4192 					void *msdu_link_desc,
4193 					struct hal_rx_msdu_list *msdu_list,
4194 					u16 *num_msdus)
4195 {
4196 	struct hal_rx_msdu_details *msdu_details = NULL;
4197 	struct rx_msdu_desc *msdu_desc_info = NULL;
4198 	struct hal_rx_msdu_link *msdu_link = NULL;
4199 	int i;
4200 	u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1);
4201 	u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1);
4202 	u8  tmp  = 0;
4203 
4204 	msdu_link = (struct hal_rx_msdu_link *)msdu_link_desc;
4205 	msdu_details = &msdu_link->msdu_link[0];
4206 
4207 	for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) {
4208 		if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
4209 			      msdu_details[i].buf_addr_info.info0) == 0) {
4210 			msdu_desc_info = &msdu_details[i - 1].rx_msdu_info;
4211 			msdu_desc_info->info0 |= last;
4212 			;
4213 			break;
4214 		}
4215 		msdu_desc_info = &msdu_details[i].rx_msdu_info;
4216 
4217 		if (!i)
4218 			msdu_desc_info->info0 |= first;
4219 		else if (i == (HAL_RX_NUM_MSDU_DESC - 1))
4220 			msdu_desc_info->info0 |= last;
4221 		msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0;
4222 		msdu_list->msdu_info[i].msdu_len =
4223 			 HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0);
4224 		msdu_list->sw_cookie[i] =
4225 			FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
4226 				  msdu_details[i].buf_addr_info.info1);
4227 		tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
4228 				msdu_details[i].buf_addr_info.info1);
4229 		msdu_list->rbm[i] = tmp;
4230 	}
4231 	*num_msdus = i;
4232 }
4233 
4234 static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id,
4235 					u32 *rx_bufs_used)
4236 {
4237 	u32 ret = 0;
4238 
4239 	if ((*ppdu_id < msdu_ppdu_id) &&
4240 	    ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) {
4241 		*ppdu_id = msdu_ppdu_id;
4242 		ret = msdu_ppdu_id;
4243 	} else if ((*ppdu_id > msdu_ppdu_id) &&
4244 		((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) {
4245 		/* mon_dst is behind than mon_status
4246 		 * skip dst_ring and free it
4247 		 */
4248 		*rx_bufs_used += 1;
4249 		*ppdu_id = msdu_ppdu_id;
4250 		ret = msdu_ppdu_id;
4251 	}
4252 	return ret;
4253 }
4254 
4255 static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info,
4256 				      bool *is_frag, u32 *total_len,
4257 				      u32 *frag_len, u32 *msdu_cnt)
4258 {
4259 	if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) {
4260 		if (!*is_frag) {
4261 			*total_len = info->msdu_len;
4262 			*is_frag = true;
4263 		}
4264 		ath11k_dp_mon_set_frag_len(total_len,
4265 					   frag_len);
4266 	} else {
4267 		if (*is_frag) {
4268 			ath11k_dp_mon_set_frag_len(total_len,
4269 						   frag_len);
4270 		} else {
4271 			*frag_len = info->msdu_len;
4272 		}
4273 		*is_frag = false;
4274 		*msdu_cnt -= 1;
4275 	}
4276 }
4277 
4278 static u32
4279 ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar,
4280 			  void *ring_entry, struct sk_buff **head_msdu,
4281 			  struct sk_buff **tail_msdu, u32 *npackets,
4282 			  u32 *ppdu_id)
4283 {
4284 	struct ath11k_pdev_dp *dp = &ar->dp;
4285 	struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
4286 	struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;
4287 	struct sk_buff *msdu = NULL, *last = NULL;
4288 	struct hal_rx_msdu_list msdu_list;
4289 	void *p_buf_addr_info, *p_last_buf_addr_info;
4290 	struct hal_rx_desc *rx_desc;
4291 	void *rx_msdu_link_desc;
4292 	dma_addr_t paddr;
4293 	u16 num_msdus = 0;
4294 	u32 rx_buf_size, rx_pkt_offset, sw_cookie;
4295 	u32 rx_bufs_used = 0, i = 0;
4296 	u32 msdu_ppdu_id = 0, msdu_cnt = 0;
4297 	u32 total_len = 0, frag_len = 0;
4298 	bool is_frag, is_first_msdu;
4299 	bool drop_mpdu = false;
4300 	struct ath11k_skb_rxcb *rxcb;
4301 	struct hal_reo_entrance_ring *ent_desc =
4302 			(struct hal_reo_entrance_ring *)ring_entry;
4303 	int buf_id;
4304 
4305 	ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr,
4306 					    &sw_cookie, &p_last_buf_addr_info,
4307 					    &msdu_cnt);
4308 
4309 	if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON,
4310 		      ent_desc->info1) ==
4311 		      HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
4312 		u8 rxdma_err =
4313 			FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
4314 				  ent_desc->info1);
4315 		if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
4316 		    rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
4317 		    rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
4318 			drop_mpdu = true;
4319 			pmon->rx_mon_stats.dest_mpdu_drop++;
4320 		}
4321 	}
4322 
4323 	is_frag = false;
4324 	is_first_msdu = true;
4325 
4326 	do {
4327 		if (pmon->mon_last_linkdesc_paddr == paddr) {
4328 			pmon->rx_mon_stats.dup_mon_linkdesc_cnt++;
4329 			return rx_bufs_used;
4330 		}
4331 
4332 		rx_msdu_link_desc =
4333 			(void *)pmon->link_desc_banks[sw_cookie].vaddr +
4334 			(paddr - pmon->link_desc_banks[sw_cookie].paddr);
4335 
4336 		ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
4337 					    &num_msdus);
4338 
4339 		for (i = 0; i < num_msdus; i++) {
4340 			u32 l2_hdr_offset;
4341 
4342 			if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) {
4343 				ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4344 					   "i %d last_cookie %d is same\n",
4345 					   i, pmon->mon_last_buf_cookie);
4346 				drop_mpdu = true;
4347 				pmon->rx_mon_stats.dup_mon_buf_cnt++;
4348 				continue;
4349 			}
4350 			buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
4351 					   msdu_list.sw_cookie[i]);
4352 
4353 			spin_lock_bh(&rx_ring->idr_lock);
4354 			msdu = idr_find(&rx_ring->bufs_idr, buf_id);
4355 			spin_unlock_bh(&rx_ring->idr_lock);
4356 			if (!msdu) {
4357 				ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4358 					   "msdu_pop: invalid buf_id %d\n", buf_id);
4359 				break;
4360 			}
4361 			rxcb = ATH11K_SKB_RXCB(msdu);
4362 			if (!rxcb->unmapped) {
4363 				dma_unmap_single(ar->ab->dev, rxcb->paddr,
4364 						 msdu->len +
4365 						 skb_tailroom(msdu),
4366 						 DMA_FROM_DEVICE);
4367 				rxcb->unmapped = 1;
4368 			}
4369 			if (drop_mpdu) {
4370 				ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4371 					   "i %d drop msdu %p *ppdu_id %x\n",
4372 					   i, msdu, *ppdu_id);
4373 				dev_kfree_skb_any(msdu);
4374 				msdu = NULL;
4375 				goto next_msdu;
4376 			}
4377 
4378 			rx_desc = (struct hal_rx_desc *)msdu->data;
4379 
4380 			rx_pkt_offset = sizeof(struct hal_rx_desc);
4381 			l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(rx_desc);
4382 
4383 			if (is_first_msdu) {
4384 				if (!ath11k_dp_rxdesc_mpdu_valid(rx_desc)) {
4385 					drop_mpdu = true;
4386 					dev_kfree_skb_any(msdu);
4387 					msdu = NULL;
4388 					pmon->mon_last_linkdesc_paddr = paddr;
4389 					goto next_msdu;
4390 				}
4391 
4392 				msdu_ppdu_id =
4393 					ath11k_dp_rxdesc_get_ppduid(rx_desc);
4394 
4395 				if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id,
4396 								 ppdu_id,
4397 								 &rx_bufs_used)) {
4398 					if (rx_bufs_used) {
4399 						drop_mpdu = true;
4400 						dev_kfree_skb_any(msdu);
4401 						msdu = NULL;
4402 						goto next_msdu;
4403 					}
4404 					return rx_bufs_used;
4405 				}
4406 				pmon->mon_last_linkdesc_paddr = paddr;
4407 				is_first_msdu = false;
4408 			}
4409 			ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
4410 						  &is_frag, &total_len,
4411 						  &frag_len, &msdu_cnt);
4412 			rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
4413 
4414 			ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);
4415 
4416 			if (!(*head_msdu))
4417 				*head_msdu = msdu;
4418 			else if (last)
4419 				last->next = msdu;
4420 
4421 			last = msdu;
4422 next_msdu:
4423 			pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i];
4424 			rx_bufs_used++;
4425 			spin_lock_bh(&rx_ring->idr_lock);
4426 			idr_remove(&rx_ring->bufs_idr, buf_id);
4427 			spin_unlock_bh(&rx_ring->idr_lock);
4428 		}
4429 
4430 		ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr,
4431 						    &sw_cookie,
4432 						    &p_buf_addr_info);
4433 
4434 		if (ath11k_dp_rx_monitor_link_desc_return(ar,
4435 							  p_last_buf_addr_info,
4436 							  dp->mac_id))
4437 			ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4438 				   "dp_rx_monitor_link_desc_return failed");
4439 
4440 		p_last_buf_addr_info = p_buf_addr_info;
4441 
4442 	} while (paddr && msdu_cnt);
4443 
4444 	if (last)
4445 		last->next = NULL;
4446 
4447 	*tail_msdu = msdu;
4448 
4449 	if (msdu_cnt == 0)
4450 		*npackets = 1;
4451 
4452 	return rx_bufs_used;
4453 }
4454 
4455 static void ath11k_dp_rx_msdus_set_payload(struct sk_buff *msdu)
4456 {
4457 	u32 rx_pkt_offset, l2_hdr_offset;
4458 
4459 	rx_pkt_offset = sizeof(struct hal_rx_desc);
4460 	l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad((struct hal_rx_desc *)msdu->data);
4461 	skb_pull(msdu, rx_pkt_offset + l2_hdr_offset);
4462 }
4463 
4464 static struct sk_buff *
4465 ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
4466 			    u32 mac_id, struct sk_buff *head_msdu,
4467 			    struct sk_buff *last_msdu,
4468 			    struct ieee80211_rx_status *rxs)
4469 {
4470 	struct sk_buff *msdu, *mpdu_buf, *prev_buf;
4471 	u32 decap_format, wifi_hdr_len;
4472 	struct hal_rx_desc *rx_desc;
4473 	char *hdr_desc;
4474 	u8 *dest;
4475 	struct ieee80211_hdr_3addr *wh;
4476 
4477 	mpdu_buf = NULL;
4478 
4479 	if (!head_msdu)
4480 		goto err_merge_fail;
4481 
4482 	rx_desc = (struct hal_rx_desc *)head_msdu->data;
4483 
4484 	if (ath11k_dp_rxdesc_get_mpdulen_err(rx_desc))
4485 		return NULL;
4486 
4487 	decap_format = ath11k_dp_rxdesc_get_decap_format(rx_desc);
4488 
4489 	ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
4490 
4491 	if (decap_format == DP_RX_DECAP_TYPE_RAW) {
4492 		ath11k_dp_rx_msdus_set_payload(head_msdu);
4493 
4494 		prev_buf = head_msdu;
4495 		msdu = head_msdu->next;
4496 
4497 		while (msdu) {
4498 			ath11k_dp_rx_msdus_set_payload(msdu);
4499 
4500 			prev_buf = msdu;
4501 			msdu = msdu->next;
4502 		}
4503 
4504 		prev_buf->next = NULL;
4505 
4506 		skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
4507 	} else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) {
4508 		__le16 qos_field;
4509 		u8 qos_pkt = 0;
4510 
4511 		rx_desc = (struct hal_rx_desc *)head_msdu->data;
4512 		hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc);
4513 
4514 		/* Base size */
4515 		wifi_hdr_len = sizeof(struct ieee80211_hdr_3addr);
4516 		wh = (struct ieee80211_hdr_3addr *)hdr_desc;
4517 
4518 		if (ieee80211_is_data_qos(wh->frame_control)) {
4519 			struct ieee80211_qos_hdr *qwh =
4520 					(struct ieee80211_qos_hdr *)hdr_desc;
4521 
4522 			qos_field = qwh->qos_ctrl;
4523 			qos_pkt = 1;
4524 		}
4525 		msdu = head_msdu;
4526 
4527 		while (msdu) {
4528 			rx_desc = (struct hal_rx_desc *)msdu->data;
4529 			hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc);
4530 
4531 			if (qos_pkt) {
4532 				dest = skb_push(msdu, sizeof(__le16));
4533 				if (!dest)
4534 					goto err_merge_fail;
4535 				memcpy(dest, hdr_desc, wifi_hdr_len);
4536 				memcpy(dest + wifi_hdr_len,
4537 				       (u8 *)&qos_field, sizeof(__le16));
4538 			}
4539 			ath11k_dp_rx_msdus_set_payload(msdu);
4540 			prev_buf = msdu;
4541 			msdu = msdu->next;
4542 		}
4543 		dest = skb_put(prev_buf, HAL_RX_FCS_LEN);
4544 		if (!dest)
4545 			goto err_merge_fail;
4546 
4547 		ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4548 			   "mpdu_buf %pK mpdu_buf->len %u",
4549 			   prev_buf, prev_buf->len);
4550 	} else {
4551 		ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4552 			   "decap format %d is not supported!\n",
4553 			   decap_format);
4554 		goto err_merge_fail;
4555 	}
4556 
4557 	return head_msdu;
4558 
4559 err_merge_fail:
4560 	if (mpdu_buf && decap_format != DP_RX_DECAP_TYPE_RAW) {
4561 		ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4562 			   "err_merge_fail mpdu_buf %pK", mpdu_buf);
4563 		/* Free the head buffer */
4564 		dev_kfree_skb_any(mpdu_buf);
4565 	}
4566 	return NULL;
4567 }
4568 
4569 static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
4570 				    struct sk_buff *head_msdu,
4571 				    struct sk_buff *tail_msdu,
4572 				    struct napi_struct *napi)
4573 {
4574 	struct ath11k_pdev_dp *dp = &ar->dp;
4575 	struct sk_buff *mon_skb, *skb_next, *header;
4576 	struct ieee80211_rx_status *rxs = &dp->rx_status, *status;
4577 
4578 	mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu,
4579 					      tail_msdu, rxs);
4580 
4581 	if (!mon_skb)
4582 		goto mon_deliver_fail;
4583 
4584 	header = mon_skb;
4585 
4586 	rxs->flag = 0;
4587 	do {
4588 		skb_next = mon_skb->next;
4589 		if (!skb_next)
4590 			rxs->flag &= ~RX_FLAG_AMSDU_MORE;
4591 		else
4592 			rxs->flag |= RX_FLAG_AMSDU_MORE;
4593 
4594 		if (mon_skb == header) {
4595 			header = NULL;
4596 			rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN;
4597 		} else {
4598 			rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
4599 		}
4600 		rxs->flag |= RX_FLAG_ONLY_MONITOR;
4601 
4602 		status = IEEE80211_SKB_RXCB(mon_skb);
4603 		*status = *rxs;
4604 
4605 		ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb);
4606 		mon_skb = skb_next;
4607 	} while (mon_skb);
4608 	rxs->flag = 0;
4609 
4610 	return 0;
4611 
4612 mon_deliver_fail:
4613 	mon_skb = head_msdu;
4614 	while (mon_skb) {
4615 		skb_next = mon_skb->next;
4616 		dev_kfree_skb_any(mon_skb);
4617 		mon_skb = skb_next;
4618 	}
4619 	return -EINVAL;
4620 }
4621 
4622 static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, u32 quota,
4623 					  struct napi_struct *napi)
4624 {
4625 	struct ath11k_pdev_dp *dp = &ar->dp;
4626 	struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
4627 	void *ring_entry;
4628 	void *mon_dst_srng;
4629 	u32 ppdu_id;
4630 	u32 rx_bufs_used;
4631 	struct ath11k_pdev_mon_stats *rx_mon_stats;
4632 	u32	 npackets = 0;
4633 
4634 	mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
4635 
4636 	if (!mon_dst_srng) {
4637 		ath11k_warn(ar->ab,
4638 			    "HAL Monitor Destination Ring Init Failed -- %pK",
4639 			    mon_dst_srng);
4640 		return;
4641 	}
4642 
4643 	spin_lock_bh(&pmon->mon_lock);
4644 
4645 	ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
4646 
4647 	ppdu_id = pmon->mon_ppdu_info.ppdu_id;
4648 	rx_bufs_used = 0;
4649 	rx_mon_stats = &pmon->rx_mon_stats;
4650 
4651 	while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
4652 		struct sk_buff *head_msdu, *tail_msdu;
4653 
4654 		head_msdu = NULL;
4655 		tail_msdu = NULL;
4656 
4657 		rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, ring_entry,
4658 							  &head_msdu,
4659 							  &tail_msdu,
4660 							  &npackets, &ppdu_id);
4661 
4662 		if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) {
4663 			pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
4664 			ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4665 				   "dest_rx: new ppdu_id %x != status ppdu_id %x",
4666 				   ppdu_id, pmon->mon_ppdu_info.ppdu_id);
4667 			break;
4668 		}
4669 		if (head_msdu && tail_msdu) {
4670 			ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu,
4671 						 tail_msdu, napi);
4672 			rx_mon_stats->dest_mpdu_done++;
4673 		}
4674 
4675 		ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,
4676 								mon_dst_srng);
4677 	}
4678 	ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
4679 
4680 	spin_unlock_bh(&pmon->mon_lock);
4681 
4682 	if (rx_bufs_used) {
4683 		rx_mon_stats->dest_ppdu_done++;
4684 		ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
4685 					   &dp->rxdma_mon_buf_ring,
4686 					   rx_bufs_used,
4687 					   HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
4688 	}
4689 }
4690 
4691 static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar,
4692 						u32 quota,
4693 						struct napi_struct *napi)
4694 {
4695 	struct ath11k_pdev_dp *dp = &ar->dp;
4696 	struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
4697 	struct hal_rx_mon_ppdu_info *ppdu_info;
4698 	struct sk_buff *status_skb;
4699 	u32 tlv_status = HAL_TLV_STATUS_BUF_DONE;
4700 	struct ath11k_pdev_mon_stats *rx_mon_stats;
4701 
4702 	ppdu_info = &pmon->mon_ppdu_info;
4703 	rx_mon_stats = &pmon->rx_mon_stats;
4704 
4705 	if (pmon->mon_ppdu_status != DP_PPDU_STATUS_START)
4706 		return;
4707 
4708 	while (!skb_queue_empty(&pmon->rx_status_q)) {
4709 		status_skb = skb_dequeue(&pmon->rx_status_q);
4710 
4711 		tlv_status = ath11k_hal_rx_parse_mon_status(ar->ab, ppdu_info,
4712 							    status_skb);
4713 		if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) {
4714 			rx_mon_stats->status_ppdu_done++;
4715 			pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
4716 			ath11k_dp_rx_mon_dest_process(ar, quota, napi);
4717 			pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
4718 		}
4719 		dev_kfree_skb_any(status_skb);
4720 	}
4721 }
4722 
4723 static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id,
4724 				    struct napi_struct *napi, int budget)
4725 {
4726 	struct ath11k *ar = ab->pdevs[mac_id].ar;
4727 	struct ath11k_pdev_dp *dp = &ar->dp;
4728 	struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
4729 	int num_buffs_reaped = 0;
4730 
4731 	num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ar->ab, dp->mac_id, &budget,
4732 							     &pmon->rx_status_q);
4733 	if (num_buffs_reaped)
4734 		ath11k_dp_rx_mon_status_process_tlv(ar, budget, napi);
4735 
4736 	return num_buffs_reaped;
4737 }
4738 
4739 int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
4740 				   struct napi_struct *napi, int budget)
4741 {
4742 	struct ath11k *ar = ab->pdevs[mac_id].ar;
4743 	int ret = 0;
4744 
4745 	if (test_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags))
4746 		ret = ath11k_dp_mon_process_rx(ab, mac_id, napi, budget);
4747 	else
4748 		ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget);
4749 	return ret;
4750 }
4751 
4752 static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar)
4753 {
4754 	struct ath11k_pdev_dp *dp = &ar->dp;
4755 	struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
4756 
4757 	skb_queue_head_init(&pmon->rx_status_q);
4758 
4759 	pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
4760 
4761 	memset(&pmon->rx_mon_stats, 0,
4762 	       sizeof(pmon->rx_mon_stats));
4763 	return 0;
4764 }
4765 
4766 int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar)
4767 {
4768 	struct ath11k_pdev_dp *dp = &ar->dp;
4769 	struct ath11k_mon_data *pmon = &dp->mon_data;
4770 	struct hal_srng *mon_desc_srng = NULL;
4771 	struct dp_srng *dp_srng;
4772 	int ret = 0;
4773 	u32 n_link_desc = 0;
4774 
4775 	ret = ath11k_dp_rx_pdev_mon_status_attach(ar);
4776 	if (ret) {
4777 		ath11k_warn(ar->ab, "pdev_mon_status_attach() failed");
4778 		return ret;
4779 	}
4780 
4781 	dp_srng = &dp->rxdma_mon_desc_ring;
4782 	n_link_desc = dp_srng->size /
4783 		ath11k_hal_srng_get_entrysize(HAL_RXDMA_MONITOR_DESC);
4784 	mon_desc_srng =
4785 		&ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id];
4786 
4787 	ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks,
4788 					HAL_RXDMA_MONITOR_DESC, mon_desc_srng,
4789 					n_link_desc);
4790 	if (ret) {
4791 		ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed");
4792 		return ret;
4793 	}
4794 	pmon->mon_last_linkdesc_paddr = 0;
4795 	pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
4796 	spin_lock_init(&pmon->mon_lock);
4797 	return 0;
4798 }
4799 
4800 static int ath11k_dp_mon_link_free(struct ath11k *ar)
4801 {
4802 	struct ath11k_pdev_dp *dp = &ar->dp;
4803 	struct ath11k_mon_data *pmon = &dp->mon_data;
4804 
4805 	ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks,
4806 				    HAL_RXDMA_MONITOR_DESC,
4807 				    &dp->rxdma_mon_desc_ring);
4808 	return 0;
4809 }
4810 
4811 int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar)
4812 {
4813 	ath11k_dp_mon_link_free(ar);
4814 	return 0;
4815 }
4816