1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 
7 #include <linux/ieee80211.h>
8 #include <linux/kernel.h>
9 #include <linux/skbuff.h>
10 #include <crypto/hash.h>
11 #include "core.h"
12 #include "debug.h"
13 #include "hal_desc.h"
14 #include "hw.h"
15 #include "dp_rx.h"
16 #include "hal_rx.h"
17 #include "dp_tx.h"
18 #include "peer.h"
19 #include "dp_mon.h"
20 
21 #define ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
22 
23 static enum hal_encrypt_type ath12k_dp_rx_h_enctype(struct ath12k_base *ab,
24 						    struct hal_rx_desc *desc)
25 {
26 	if (!ab->hw_params->hal_ops->rx_desc_encrypt_valid(desc))
27 		return HAL_ENCRYPT_TYPE_OPEN;
28 
29 	return ab->hw_params->hal_ops->rx_desc_get_encrypt_type(desc);
30 }
31 
32 u8 ath12k_dp_rx_h_decap_type(struct ath12k_base *ab,
33 			     struct hal_rx_desc *desc)
34 {
35 	return ab->hw_params->hal_ops->rx_desc_get_decap_type(desc);
36 }
37 
38 static u8 ath12k_dp_rx_h_mesh_ctl_present(struct ath12k_base *ab,
39 					  struct hal_rx_desc *desc)
40 {
41 	return ab->hw_params->hal_ops->rx_desc_get_mesh_ctl(desc);
42 }
43 
44 static bool ath12k_dp_rx_h_seq_ctrl_valid(struct ath12k_base *ab,
45 					  struct hal_rx_desc *desc)
46 {
47 	return ab->hw_params->hal_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
48 }
49 
50 static bool ath12k_dp_rx_h_fc_valid(struct ath12k_base *ab,
51 				    struct hal_rx_desc *desc)
52 {
53 	return ab->hw_params->hal_ops->rx_desc_get_mpdu_fc_valid(desc);
54 }
55 
56 static bool ath12k_dp_rx_h_more_frags(struct ath12k_base *ab,
57 				      struct sk_buff *skb)
58 {
59 	struct ieee80211_hdr *hdr;
60 
61 	hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params->hal_desc_sz);
62 	return ieee80211_has_morefrags(hdr->frame_control);
63 }
64 
65 static u16 ath12k_dp_rx_h_frag_no(struct ath12k_base *ab,
66 				  struct sk_buff *skb)
67 {
68 	struct ieee80211_hdr *hdr;
69 
70 	hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params->hal_desc_sz);
71 	return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
72 }
73 
74 static u16 ath12k_dp_rx_h_seq_no(struct ath12k_base *ab,
75 				 struct hal_rx_desc *desc)
76 {
77 	return ab->hw_params->hal_ops->rx_desc_get_mpdu_start_seq_no(desc);
78 }
79 
80 static bool ath12k_dp_rx_h_msdu_done(struct ath12k_base *ab,
81 				     struct hal_rx_desc *desc)
82 {
83 	return ab->hw_params->hal_ops->dp_rx_h_msdu_done(desc);
84 }
85 
86 static bool ath12k_dp_rx_h_l4_cksum_fail(struct ath12k_base *ab,
87 					 struct hal_rx_desc *desc)
88 {
89 	return ab->hw_params->hal_ops->dp_rx_h_l4_cksum_fail(desc);
90 }
91 
92 static bool ath12k_dp_rx_h_ip_cksum_fail(struct ath12k_base *ab,
93 					 struct hal_rx_desc *desc)
94 {
95 	return ab->hw_params->hal_ops->dp_rx_h_ip_cksum_fail(desc);
96 }
97 
98 static bool ath12k_dp_rx_h_is_decrypted(struct ath12k_base *ab,
99 					struct hal_rx_desc *desc)
100 {
101 	return ab->hw_params->hal_ops->dp_rx_h_is_decrypted(desc);
102 }
103 
104 u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab,
105 			    struct hal_rx_desc *desc)
106 {
107 	return ab->hw_params->hal_ops->dp_rx_h_mpdu_err(desc);
108 }
109 
110 static u16 ath12k_dp_rx_h_msdu_len(struct ath12k_base *ab,
111 				   struct hal_rx_desc *desc)
112 {
113 	return ab->hw_params->hal_ops->rx_desc_get_msdu_len(desc);
114 }
115 
116 static u8 ath12k_dp_rx_h_sgi(struct ath12k_base *ab,
117 			     struct hal_rx_desc *desc)
118 {
119 	return ab->hw_params->hal_ops->rx_desc_get_msdu_sgi(desc);
120 }
121 
122 static u8 ath12k_dp_rx_h_rate_mcs(struct ath12k_base *ab,
123 				  struct hal_rx_desc *desc)
124 {
125 	return ab->hw_params->hal_ops->rx_desc_get_msdu_rate_mcs(desc);
126 }
127 
128 static u8 ath12k_dp_rx_h_rx_bw(struct ath12k_base *ab,
129 			       struct hal_rx_desc *desc)
130 {
131 	return ab->hw_params->hal_ops->rx_desc_get_msdu_rx_bw(desc);
132 }
133 
134 static u32 ath12k_dp_rx_h_freq(struct ath12k_base *ab,
135 			       struct hal_rx_desc *desc)
136 {
137 	return ab->hw_params->hal_ops->rx_desc_get_msdu_freq(desc);
138 }
139 
140 static u8 ath12k_dp_rx_h_pkt_type(struct ath12k_base *ab,
141 				  struct hal_rx_desc *desc)
142 {
143 	return ab->hw_params->hal_ops->rx_desc_get_msdu_pkt_type(desc);
144 }
145 
146 static u8 ath12k_dp_rx_h_nss(struct ath12k_base *ab,
147 			     struct hal_rx_desc *desc)
148 {
149 	return hweight8(ab->hw_params->hal_ops->rx_desc_get_msdu_nss(desc));
150 }
151 
152 static u8 ath12k_dp_rx_h_tid(struct ath12k_base *ab,
153 			     struct hal_rx_desc *desc)
154 {
155 	return ab->hw_params->hal_ops->rx_desc_get_mpdu_tid(desc);
156 }
157 
158 static u16 ath12k_dp_rx_h_peer_id(struct ath12k_base *ab,
159 				  struct hal_rx_desc *desc)
160 {
161 	return ab->hw_params->hal_ops->rx_desc_get_mpdu_peer_id(desc);
162 }
163 
164 u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab,
165 			struct hal_rx_desc *desc)
166 {
167 	return ab->hw_params->hal_ops->rx_desc_get_l3_pad_bytes(desc);
168 }
169 
170 static bool ath12k_dp_rx_h_first_msdu(struct ath12k_base *ab,
171 				      struct hal_rx_desc *desc)
172 {
173 	return ab->hw_params->hal_ops->rx_desc_get_first_msdu(desc);
174 }
175 
176 static bool ath12k_dp_rx_h_last_msdu(struct ath12k_base *ab,
177 				     struct hal_rx_desc *desc)
178 {
179 	return ab->hw_params->hal_ops->rx_desc_get_last_msdu(desc);
180 }
181 
182 static void ath12k_dp_rx_desc_end_tlv_copy(struct ath12k_base *ab,
183 					   struct hal_rx_desc *fdesc,
184 					   struct hal_rx_desc *ldesc)
185 {
186 	ab->hw_params->hal_ops->rx_desc_copy_end_tlv(fdesc, ldesc);
187 }
188 
189 static void ath12k_dp_rxdesc_set_msdu_len(struct ath12k_base *ab,
190 					  struct hal_rx_desc *desc,
191 					  u16 len)
192 {
193 	ab->hw_params->hal_ops->rx_desc_set_msdu_len(desc, len);
194 }
195 
196 static bool ath12k_dp_rx_h_is_da_mcbc(struct ath12k_base *ab,
197 				      struct hal_rx_desc *desc)
198 {
199 	return (ath12k_dp_rx_h_first_msdu(ab, desc) &&
200 		ab->hw_params->hal_ops->rx_desc_is_da_mcbc(desc));
201 }
202 
203 static bool ath12k_dp_rxdesc_mac_addr2_valid(struct ath12k_base *ab,
204 					     struct hal_rx_desc *desc)
205 {
206 	return ab->hw_params->hal_ops->rx_desc_mac_addr2_valid(desc);
207 }
208 
209 static u8 *ath12k_dp_rxdesc_get_mpdu_start_addr2(struct ath12k_base *ab,
210 						 struct hal_rx_desc *desc)
211 {
212 	return ab->hw_params->hal_ops->rx_desc_mpdu_start_addr2(desc);
213 }
214 
215 static void ath12k_dp_rx_desc_get_dot11_hdr(struct ath12k_base *ab,
216 					    struct hal_rx_desc *desc,
217 					    struct ieee80211_hdr *hdr)
218 {
219 	ab->hw_params->hal_ops->rx_desc_get_dot11_hdr(desc, hdr);
220 }
221 
222 static void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base *ab,
223 						struct hal_rx_desc *desc,
224 						u8 *crypto_hdr,
225 						enum hal_encrypt_type enctype)
226 {
227 	ab->hw_params->hal_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype);
228 }
229 
230 static u16 ath12k_dp_rxdesc_get_mpdu_frame_ctrl(struct ath12k_base *ab,
231 						struct hal_rx_desc *desc)
232 {
233 	return ab->hw_params->hal_ops->rx_desc_get_mpdu_frame_ctl(desc);
234 }
235 
236 static int ath12k_dp_purge_mon_ring(struct ath12k_base *ab)
237 {
238 	int i, reaped = 0;
239 	unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS);
240 
241 	do {
242 		for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++)
243 			reaped += ath12k_dp_mon_process_ring(ab, i, NULL,
244 							     DP_MON_SERVICE_BUDGET,
245 							     ATH12K_DP_RX_MONITOR_MODE);
246 
247 		/* nothing more to reap */
248 		if (reaped < DP_MON_SERVICE_BUDGET)
249 			return 0;
250 
251 	} while (time_before(jiffies, timeout));
252 
253 	ath12k_warn(ab, "dp mon ring purge timeout");
254 
255 	return -ETIMEDOUT;
256 }
257 
258 /* Returns number of Rx buffers replenished */
259 int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab, int mac_id,
260 				struct dp_rxdma_ring *rx_ring,
261 				int req_entries,
262 				enum hal_rx_buf_return_buf_manager mgr,
263 				bool hw_cc)
264 {
265 	struct ath12k_buffer_addr *desc;
266 	struct hal_srng *srng;
267 	struct sk_buff *skb;
268 	int num_free;
269 	int num_remain;
270 	int buf_id;
271 	u32 cookie;
272 	dma_addr_t paddr;
273 	struct ath12k_dp *dp = &ab->dp;
274 	struct ath12k_rx_desc_info *rx_desc;
275 
276 	req_entries = min(req_entries, rx_ring->bufs_max);
277 
278 	srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
279 
280 	spin_lock_bh(&srng->lock);
281 
282 	ath12k_hal_srng_access_begin(ab, srng);
283 
284 	num_free = ath12k_hal_srng_src_num_free(ab, srng, true);
285 	if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
286 		req_entries = num_free;
287 
288 	req_entries = min(num_free, req_entries);
289 	num_remain = req_entries;
290 
291 	while (num_remain > 0) {
292 		skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
293 				    DP_RX_BUFFER_ALIGN_SIZE);
294 		if (!skb)
295 			break;
296 
297 		if (!IS_ALIGNED((unsigned long)skb->data,
298 				DP_RX_BUFFER_ALIGN_SIZE)) {
299 			skb_pull(skb,
300 				 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
301 				 skb->data);
302 		}
303 
304 		paddr = dma_map_single(ab->dev, skb->data,
305 				       skb->len + skb_tailroom(skb),
306 				       DMA_FROM_DEVICE);
307 		if (dma_mapping_error(ab->dev, paddr))
308 			goto fail_free_skb;
309 
310 		if (hw_cc) {
311 			spin_lock_bh(&dp->rx_desc_lock);
312 
313 			/* Get desc from free list and store in used list
314 			 * for cleanup purposes
315 			 *
316 			 * TODO: pass the removed descs rather than
317 			 * add/read to optimize
318 			 */
319 			rx_desc = list_first_entry_or_null(&dp->rx_desc_free_list,
320 							   struct ath12k_rx_desc_info,
321 							   list);
322 			if (!rx_desc) {
323 				spin_unlock_bh(&dp->rx_desc_lock);
324 				goto fail_dma_unmap;
325 			}
326 
327 			rx_desc->skb = skb;
328 			cookie = rx_desc->cookie;
329 			list_del(&rx_desc->list);
330 			list_add_tail(&rx_desc->list, &dp->rx_desc_used_list);
331 
332 			spin_unlock_bh(&dp->rx_desc_lock);
333 		} else {
334 			spin_lock_bh(&rx_ring->idr_lock);
335 			buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
336 					   rx_ring->bufs_max * 3, GFP_ATOMIC);
337 			spin_unlock_bh(&rx_ring->idr_lock);
338 			if (buf_id < 0)
339 				goto fail_dma_unmap;
340 			cookie = u32_encode_bits(mac_id,
341 						 DP_RXDMA_BUF_COOKIE_PDEV_ID) |
342 				 u32_encode_bits(buf_id,
343 						 DP_RXDMA_BUF_COOKIE_BUF_ID);
344 		}
345 
346 		desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
347 		if (!desc)
348 			goto fail_buf_unassign;
349 
350 		ATH12K_SKB_RXCB(skb)->paddr = paddr;
351 
352 		num_remain--;
353 
354 		ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
355 	}
356 
357 	ath12k_hal_srng_access_end(ab, srng);
358 
359 	spin_unlock_bh(&srng->lock);
360 
361 	return req_entries - num_remain;
362 
363 fail_buf_unassign:
364 	if (hw_cc) {
365 		spin_lock_bh(&dp->rx_desc_lock);
366 		list_del(&rx_desc->list);
367 		list_add_tail(&rx_desc->list, &dp->rx_desc_free_list);
368 		rx_desc->skb = NULL;
369 		spin_unlock_bh(&dp->rx_desc_lock);
370 	} else {
371 		spin_lock_bh(&rx_ring->idr_lock);
372 		idr_remove(&rx_ring->bufs_idr, buf_id);
373 		spin_unlock_bh(&rx_ring->idr_lock);
374 	}
375 fail_dma_unmap:
376 	dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
377 			 DMA_FROM_DEVICE);
378 fail_free_skb:
379 	dev_kfree_skb_any(skb);
380 
381 	ath12k_hal_srng_access_end(ab, srng);
382 
383 	spin_unlock_bh(&srng->lock);
384 
385 	return req_entries - num_remain;
386 }
387 
388 static int ath12k_dp_rxdma_buf_ring_free(struct ath12k_base *ab,
389 					 struct dp_rxdma_ring *rx_ring)
390 {
391 	struct sk_buff *skb;
392 	int buf_id;
393 
394 	spin_lock_bh(&rx_ring->idr_lock);
395 	idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
396 		idr_remove(&rx_ring->bufs_idr, buf_id);
397 		/* TODO: Understand where internal driver does this dma_unmap
398 		 * of rxdma_buffer.
399 		 */
400 		dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
401 				 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
402 		dev_kfree_skb_any(skb);
403 	}
404 
405 	idr_destroy(&rx_ring->bufs_idr);
406 	spin_unlock_bh(&rx_ring->idr_lock);
407 
408 	return 0;
409 }
410 
411 static int ath12k_dp_rxdma_buf_free(struct ath12k_base *ab)
412 {
413 	struct ath12k_dp *dp = &ab->dp;
414 	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
415 
416 	ath12k_dp_rxdma_buf_ring_free(ab, rx_ring);
417 
418 	rx_ring = &dp->rxdma_mon_buf_ring;
419 	ath12k_dp_rxdma_buf_ring_free(ab, rx_ring);
420 
421 	rx_ring = &dp->tx_mon_buf_ring;
422 	ath12k_dp_rxdma_buf_ring_free(ab, rx_ring);
423 
424 	return 0;
425 }
426 
427 static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base *ab,
428 					  struct dp_rxdma_ring *rx_ring,
429 					  u32 ringtype)
430 {
431 	int num_entries;
432 
433 	num_entries = rx_ring->refill_buf_ring.size /
434 		ath12k_hal_srng_get_entrysize(ab, ringtype);
435 
436 	rx_ring->bufs_max = num_entries;
437 	if ((ringtype == HAL_RXDMA_MONITOR_BUF) || (ringtype == HAL_TX_MONITOR_BUF))
438 		ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries);
439 	else
440 		ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, num_entries,
441 					    ab->hw_params->hal_params->rx_buf_rbm,
442 					    ringtype == HAL_RXDMA_BUF);
443 	return 0;
444 }
445 
446 static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab)
447 {
448 	struct ath12k_dp *dp = &ab->dp;
449 	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
450 	int ret;
451 
452 	ret = ath12k_dp_rxdma_ring_buf_setup(ab, rx_ring,
453 					     HAL_RXDMA_BUF);
454 	if (ret) {
455 		ath12k_warn(ab,
456 			    "failed to setup HAL_RXDMA_BUF\n");
457 		return ret;
458 	}
459 
460 	if (ab->hw_params->rxdma1_enable) {
461 		rx_ring = &dp->rxdma_mon_buf_ring;
462 		ret = ath12k_dp_rxdma_ring_buf_setup(ab, rx_ring,
463 						     HAL_RXDMA_MONITOR_BUF);
464 		if (ret) {
465 			ath12k_warn(ab,
466 				    "failed to setup HAL_RXDMA_MONITOR_BUF\n");
467 			return ret;
468 		}
469 
470 		rx_ring = &dp->tx_mon_buf_ring;
471 		ret = ath12k_dp_rxdma_ring_buf_setup(ab, rx_ring,
472 						     HAL_TX_MONITOR_BUF);
473 		if (ret) {
474 			ath12k_warn(ab,
475 				    "failed to setup HAL_TX_MONITOR_BUF\n");
476 			return ret;
477 		}
478 	}
479 
480 	return 0;
481 }
482 
483 static void ath12k_dp_rx_pdev_srng_free(struct ath12k *ar)
484 {
485 	struct ath12k_pdev_dp *dp = &ar->dp;
486 	struct ath12k_base *ab = ar->ab;
487 	int i;
488 
489 	for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
490 		ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_dst_ring[i]);
491 		ath12k_dp_srng_cleanup(ab, &dp->tx_mon_dst_ring[i]);
492 	}
493 }
494 
495 void ath12k_dp_rx_pdev_reo_cleanup(struct ath12k_base *ab)
496 {
497 	struct ath12k_dp *dp = &ab->dp;
498 	int i;
499 
500 	for (i = 0; i < DP_REO_DST_RING_MAX; i++)
501 		ath12k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]);
502 }
503 
504 int ath12k_dp_rx_pdev_reo_setup(struct ath12k_base *ab)
505 {
506 	struct ath12k_dp *dp = &ab->dp;
507 	int ret;
508 	int i;
509 
510 	for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
511 		ret = ath12k_dp_srng_setup(ab, &dp->reo_dst_ring[i],
512 					   HAL_REO_DST, i, 0,
513 					   DP_REO_DST_RING_SIZE);
514 		if (ret) {
515 			ath12k_warn(ab, "failed to setup reo_dst_ring\n");
516 			goto err_reo_cleanup;
517 		}
518 	}
519 
520 	return 0;
521 
522 err_reo_cleanup:
523 	ath12k_dp_rx_pdev_reo_cleanup(ab);
524 
525 	return ret;
526 }
527 
528 static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar)
529 {
530 	struct ath12k_pdev_dp *dp = &ar->dp;
531 	struct ath12k_base *ab = ar->ab;
532 	int i;
533 	int ret;
534 	u32 mac_id = dp->mac_id;
535 
536 	for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
537 		ret = ath12k_dp_srng_setup(ar->ab,
538 					   &dp->rxdma_mon_dst_ring[i],
539 					   HAL_RXDMA_MONITOR_DST,
540 					   0, mac_id + i,
541 					   DP_RXDMA_MONITOR_DST_RING_SIZE);
542 		if (ret) {
543 			ath12k_warn(ar->ab,
544 				    "failed to setup HAL_RXDMA_MONITOR_DST\n");
545 			return ret;
546 		}
547 
548 		ret = ath12k_dp_srng_setup(ar->ab,
549 					   &dp->tx_mon_dst_ring[i],
550 					   HAL_TX_MONITOR_DST,
551 					   0, mac_id + i,
552 					   DP_TX_MONITOR_DEST_RING_SIZE);
553 		if (ret) {
554 			ath12k_warn(ar->ab,
555 				    "failed to setup HAL_TX_MONITOR_DST\n");
556 			return ret;
557 		}
558 	}
559 
560 	return 0;
561 }
562 
563 void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab)
564 {
565 	struct ath12k_dp *dp = &ab->dp;
566 	struct ath12k_dp_rx_reo_cmd *cmd, *tmp;
567 	struct ath12k_dp_rx_reo_cache_flush_elem *cmd_cache, *tmp_cache;
568 
569 	spin_lock_bh(&dp->reo_cmd_lock);
570 	list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
571 		list_del(&cmd->list);
572 		dma_unmap_single(ab->dev, cmd->data.paddr,
573 				 cmd->data.size, DMA_BIDIRECTIONAL);
574 		kfree(cmd->data.vaddr);
575 		kfree(cmd);
576 	}
577 
578 	list_for_each_entry_safe(cmd_cache, tmp_cache,
579 				 &dp->reo_cmd_cache_flush_list, list) {
580 		list_del(&cmd_cache->list);
581 		dp->reo_cmd_cache_flush_count--;
582 		dma_unmap_single(ab->dev, cmd_cache->data.paddr,
583 				 cmd_cache->data.size, DMA_BIDIRECTIONAL);
584 		kfree(cmd_cache->data.vaddr);
585 		kfree(cmd_cache);
586 	}
587 	spin_unlock_bh(&dp->reo_cmd_lock);
588 }
589 
590 static void ath12k_dp_reo_cmd_free(struct ath12k_dp *dp, void *ctx,
591 				   enum hal_reo_cmd_status status)
592 {
593 	struct ath12k_dp_rx_tid *rx_tid = ctx;
594 
595 	if (status != HAL_REO_CMD_SUCCESS)
596 		ath12k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
597 			    rx_tid->tid, status);
598 
599 	dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
600 			 DMA_BIDIRECTIONAL);
601 	kfree(rx_tid->vaddr);
602 	rx_tid->vaddr = NULL;
603 }
604 
605 static int ath12k_dp_reo_cmd_send(struct ath12k_base *ab, struct ath12k_dp_rx_tid *rx_tid,
606 				  enum hal_reo_cmd_type type,
607 				  struct ath12k_hal_reo_cmd *cmd,
608 				  void (*cb)(struct ath12k_dp *dp, void *ctx,
609 					     enum hal_reo_cmd_status status))
610 {
611 	struct ath12k_dp *dp = &ab->dp;
612 	struct ath12k_dp_rx_reo_cmd *dp_cmd;
613 	struct hal_srng *cmd_ring;
614 	int cmd_num;
615 
616 	cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
617 	cmd_num = ath12k_hal_reo_cmd_send(ab, cmd_ring, type, cmd);
618 
619 	/* cmd_num should start from 1, during failure return the error code */
620 	if (cmd_num < 0)
621 		return cmd_num;
622 
623 	/* reo cmd ring descriptors has cmd_num starting from 1 */
624 	if (cmd_num == 0)
625 		return -EINVAL;
626 
627 	if (!cb)
628 		return 0;
629 
630 	/* Can this be optimized so that we keep the pending command list only
631 	 * for tid delete command to free up the resource on the command status
632 	 * indication?
633 	 */
634 	dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC);
635 
636 	if (!dp_cmd)
637 		return -ENOMEM;
638 
639 	memcpy(&dp_cmd->data, rx_tid, sizeof(*rx_tid));
640 	dp_cmd->cmd_num = cmd_num;
641 	dp_cmd->handler = cb;
642 
643 	spin_lock_bh(&dp->reo_cmd_lock);
644 	list_add_tail(&dp_cmd->list, &dp->reo_cmd_list);
645 	spin_unlock_bh(&dp->reo_cmd_lock);
646 
647 	return 0;
648 }
649 
650 static void ath12k_dp_reo_cache_flush(struct ath12k_base *ab,
651 				      struct ath12k_dp_rx_tid *rx_tid)
652 {
653 	struct ath12k_hal_reo_cmd cmd = {0};
654 	unsigned long tot_desc_sz, desc_sz;
655 	int ret;
656 
657 	tot_desc_sz = rx_tid->size;
658 	desc_sz = ath12k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
659 
660 	while (tot_desc_sz > desc_sz) {
661 		tot_desc_sz -= desc_sz;
662 		cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);
663 		cmd.addr_hi = upper_32_bits(rx_tid->paddr);
664 		ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
665 					     HAL_REO_CMD_FLUSH_CACHE, &cmd,
666 					     NULL);
667 		if (ret)
668 			ath12k_warn(ab,
669 				    "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
670 				    rx_tid->tid, ret);
671 	}
672 
673 	memset(&cmd, 0, sizeof(cmd));
674 	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
675 	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
676 	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
677 	ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
678 				     HAL_REO_CMD_FLUSH_CACHE,
679 				     &cmd, ath12k_dp_reo_cmd_free);
680 	if (ret) {
681 		ath12k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
682 			   rx_tid->tid, ret);
683 		dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
684 				 DMA_BIDIRECTIONAL);
685 		kfree(rx_tid->vaddr);
686 		rx_tid->vaddr = NULL;
687 	}
688 }
689 
690 static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,
691 				      enum hal_reo_cmd_status status)
692 {
693 	struct ath12k_base *ab = dp->ab;
694 	struct ath12k_dp_rx_tid *rx_tid = ctx;
695 	struct ath12k_dp_rx_reo_cache_flush_elem *elem, *tmp;
696 
697 	if (status == HAL_REO_CMD_DRAIN) {
698 		goto free_desc;
699 	} else if (status != HAL_REO_CMD_SUCCESS) {
700 		/* Shouldn't happen! Cleanup in case of other failure? */
701 		ath12k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n",
702 			    rx_tid->tid, status);
703 		return;
704 	}
705 
706 	elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
707 	if (!elem)
708 		goto free_desc;
709 
710 	elem->ts = jiffies;
711 	memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
712 
713 	spin_lock_bh(&dp->reo_cmd_lock);
714 	list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
715 	dp->reo_cmd_cache_flush_count++;
716 
717 	/* Flush and invalidate aged REO desc from HW cache */
718 	list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
719 				 list) {
720 		if (dp->reo_cmd_cache_flush_count > ATH12K_DP_RX_REO_DESC_FREE_THRES ||
721 		    time_after(jiffies, elem->ts +
722 			       msecs_to_jiffies(ATH12K_DP_RX_REO_DESC_FREE_TIMEOUT_MS))) {
723 			list_del(&elem->list);
724 			dp->reo_cmd_cache_flush_count--;
725 
726 			/* Unlock the reo_cmd_lock before using ath12k_dp_reo_cmd_send()
727 			 * within ath12k_dp_reo_cache_flush. The reo_cmd_cache_flush_list
728 			 * is used in only two contexts, one is in this function called
729 			 * from napi and the other in ath12k_dp_free during core destroy.
730 			 * Before dp_free, the irqs would be disabled and would wait to
731 			 * synchronize. Hence there wouldn’t be any race against add or
732 			 * delete to this list. Hence unlock-lock is safe here.
733 			 */
734 			spin_unlock_bh(&dp->reo_cmd_lock);
735 
736 			ath12k_dp_reo_cache_flush(ab, &elem->data);
737 			kfree(elem);
738 			spin_lock_bh(&dp->reo_cmd_lock);
739 		}
740 	}
741 	spin_unlock_bh(&dp->reo_cmd_lock);
742 
743 	return;
744 free_desc:
745 	dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
746 			 DMA_BIDIRECTIONAL);
747 	kfree(rx_tid->vaddr);
748 	rx_tid->vaddr = NULL;
749 }
750 
751 static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u16 tid,
752 					  dma_addr_t paddr)
753 {
754 	struct ath12k_reo_queue_ref *qref;
755 	struct ath12k_dp *dp = &ab->dp;
756 
757 	if (!ab->hw_params->reoq_lut_support)
758 		return;
759 
760 	/* TODO: based on ML peer or not, select the LUT. below assumes non
761 	 * ML peer
762 	 */
763 	qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
764 			(peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
765 
766 	qref->info0 = u32_encode_bits(lower_32_bits(paddr),
767 				      BUFFER_ADDR_INFO0_ADDR);
768 	qref->info1 = u32_encode_bits(upper_32_bits(paddr),
769 				      BUFFER_ADDR_INFO1_ADDR) |
770 		      u32_encode_bits(tid, DP_REO_QREF_NUM);
771 }
772 
773 static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u16 tid)
774 {
775 	struct ath12k_reo_queue_ref *qref;
776 	struct ath12k_dp *dp = &ab->dp;
777 
778 	if (!ab->hw_params->reoq_lut_support)
779 		return;
780 
781 	/* TODO: based on ML peer or not, select the LUT. below assumes non
782 	 * ML peer
783 	 */
784 	qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
785 			(peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
786 
787 	qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR);
788 	qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR) |
789 		      u32_encode_bits(tid, DP_REO_QREF_NUM);
790 }
791 
792 void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
793 				  struct ath12k_peer *peer, u8 tid)
794 {
795 	struct ath12k_hal_reo_cmd cmd = {0};
796 	struct ath12k_dp_rx_tid *rx_tid = &peer->rx_tid[tid];
797 	int ret;
798 
799 	if (!rx_tid->active)
800 		return;
801 
802 	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
803 	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
804 	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
805 	cmd.upd0 = HAL_REO_CMD_UPD0_VLD;
806 	ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid,
807 				     HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
808 				     ath12k_dp_rx_tid_del_func);
809 	if (ret) {
810 		ath12k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
811 			   tid, ret);
812 		dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
813 				 DMA_BIDIRECTIONAL);
814 		kfree(rx_tid->vaddr);
815 		rx_tid->vaddr = NULL;
816 	}
817 
818 	ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid);
819 
820 	rx_tid->active = false;
821 }
822 
823 /* TODO: it's strange (and ugly) that struct hal_reo_dest_ring is converted
824  * to struct hal_wbm_release_ring, I couldn't figure out the logic behind
825  * that.
826  */
827 static int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab,
828 					 struct hal_reo_dest_ring *ring,
829 					 enum hal_wbm_rel_bm_act action)
830 {
831 	struct hal_wbm_release_ring *link_desc = (struct hal_wbm_release_ring *)ring;
832 	struct hal_wbm_release_ring *desc;
833 	struct ath12k_dp *dp = &ab->dp;
834 	struct hal_srng *srng;
835 	int ret = 0;
836 
837 	srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
838 
839 	spin_lock_bh(&srng->lock);
840 
841 	ath12k_hal_srng_access_begin(ab, srng);
842 
843 	desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
844 	if (!desc) {
845 		ret = -ENOBUFS;
846 		goto exit;
847 	}
848 
849 	ath12k_hal_rx_msdu_link_desc_set(ab, desc, link_desc, action);
850 
851 exit:
852 	ath12k_hal_srng_access_end(ab, srng);
853 
854 	spin_unlock_bh(&srng->lock);
855 
856 	return ret;
857 }
858 
859 static void ath12k_dp_rx_frags_cleanup(struct ath12k_dp_rx_tid *rx_tid,
860 				       bool rel_link_desc)
861 {
862 	struct ath12k_base *ab = rx_tid->ab;
863 
864 	lockdep_assert_held(&ab->base_lock);
865 
866 	if (rx_tid->dst_ring_desc) {
867 		if (rel_link_desc)
868 			ath12k_dp_rx_link_desc_return(ab, rx_tid->dst_ring_desc,
869 						      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
870 		kfree(rx_tid->dst_ring_desc);
871 		rx_tid->dst_ring_desc = NULL;
872 	}
873 
874 	rx_tid->cur_sn = 0;
875 	rx_tid->last_frag_no = 0;
876 	rx_tid->rx_frag_bitmap = 0;
877 	__skb_queue_purge(&rx_tid->rx_frags);
878 }
879 
880 void ath12k_dp_rx_peer_tid_cleanup(struct ath12k *ar, struct ath12k_peer *peer)
881 {
882 	struct ath12k_dp_rx_tid *rx_tid;
883 	int i;
884 
885 	lockdep_assert_held(&ar->ab->base_lock);
886 
887 	for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
888 		rx_tid = &peer->rx_tid[i];
889 
890 		ath12k_dp_rx_peer_tid_delete(ar, peer, i);
891 		ath12k_dp_rx_frags_cleanup(rx_tid, true);
892 
893 		spin_unlock_bh(&ar->ab->base_lock);
894 		del_timer_sync(&rx_tid->frag_timer);
895 		spin_lock_bh(&ar->ab->base_lock);
896 	}
897 }
898 
899 static int ath12k_peer_rx_tid_reo_update(struct ath12k *ar,
900 					 struct ath12k_peer *peer,
901 					 struct ath12k_dp_rx_tid *rx_tid,
902 					 u32 ba_win_sz, u16 ssn,
903 					 bool update_ssn)
904 {
905 	struct ath12k_hal_reo_cmd cmd = {0};
906 	int ret;
907 
908 	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
909 	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
910 	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
911 	cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
912 	cmd.ba_window_size = ba_win_sz;
913 
914 	if (update_ssn) {
915 		cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
916 		cmd.upd2 = u32_encode_bits(ssn, HAL_REO_CMD_UPD2_SSN);
917 	}
918 
919 	ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid,
920 				     HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
921 				     NULL);
922 	if (ret) {
923 		ath12k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
924 			    rx_tid->tid, ret);
925 		return ret;
926 	}
927 
928 	rx_tid->ba_win_sz = ba_win_sz;
929 
930 	return 0;
931 }
932 
933 int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id,
934 				u8 tid, u32 ba_win_sz, u16 ssn,
935 				enum hal_pn_type pn_type)
936 {
937 	struct ath12k_base *ab = ar->ab;
938 	struct ath12k_dp *dp = &ab->dp;
939 	struct hal_rx_reo_queue *addr_aligned;
940 	struct ath12k_peer *peer;
941 	struct ath12k_dp_rx_tid *rx_tid;
942 	u32 hw_desc_sz;
943 	void *vaddr;
944 	dma_addr_t paddr;
945 	int ret;
946 
947 	spin_lock_bh(&ab->base_lock);
948 
949 	peer = ath12k_peer_find(ab, vdev_id, peer_mac);
950 	if (!peer) {
951 		spin_unlock_bh(&ab->base_lock);
952 		ath12k_warn(ab, "failed to find the peer to set up rx tid\n");
953 		return -ENOENT;
954 	}
955 
956 	if (ab->hw_params->reoq_lut_support && !dp->reoq_lut.vaddr) {
957 		spin_unlock_bh(&ab->base_lock);
958 		ath12k_warn(ab, "reo qref table is not setup\n");
959 		return -EINVAL;
960 	}
961 
962 	if (peer->peer_id > DP_MAX_PEER_ID || tid > IEEE80211_NUM_TIDS) {
963 		ath12k_warn(ab, "peer id of peer %d or tid %d doesn't allow reoq setup\n",
964 			    peer->peer_id, tid);
965 		spin_unlock_bh(&ab->base_lock);
966 		return -EINVAL;
967 	}
968 
969 	rx_tid = &peer->rx_tid[tid];
970 	/* Update the tid queue if it is already setup */
971 	if (rx_tid->active) {
972 		paddr = rx_tid->paddr;
973 		ret = ath12k_peer_rx_tid_reo_update(ar, peer, rx_tid,
974 						    ba_win_sz, ssn, true);
975 		spin_unlock_bh(&ab->base_lock);
976 		if (ret) {
977 			ath12k_warn(ab, "failed to update reo for rx tid %d\n", tid);
978 			return ret;
979 		}
980 
981 		if (!ab->hw_params->reoq_lut_support) {
982 			ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
983 								     peer_mac,
984 								     paddr, tid, 1,
985 								     ba_win_sz);
986 			if (ret) {
987 				ath12k_warn(ab, "failed to setup peer rx reorder queuefor tid %d: %d\n",
988 					    tid, ret);
989 				return ret;
990 			}
991 		}
992 
993 		return 0;
994 	}
995 
996 	rx_tid->tid = tid;
997 
998 	rx_tid->ba_win_sz = ba_win_sz;
999 
1000 	/* TODO: Optimize the memory allocation for qos tid based on
1001 	 * the actual BA window size in REO tid update path.
1002 	 */
1003 	if (tid == HAL_DESC_REO_NON_QOS_TID)
1004 		hw_desc_sz = ath12k_hal_reo_qdesc_size(ba_win_sz, tid);
1005 	else
1006 		hw_desc_sz = ath12k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
1007 
1008 	vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
1009 	if (!vaddr) {
1010 		spin_unlock_bh(&ab->base_lock);
1011 		return -ENOMEM;
1012 	}
1013 
1014 	addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
1015 
1016 	ath12k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,
1017 				   ssn, pn_type);
1018 
1019 	paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
1020 			       DMA_BIDIRECTIONAL);
1021 
1022 	ret = dma_mapping_error(ab->dev, paddr);
1023 	if (ret) {
1024 		spin_unlock_bh(&ab->base_lock);
1025 		goto err_mem_free;
1026 	}
1027 
1028 	rx_tid->vaddr = vaddr;
1029 	rx_tid->paddr = paddr;
1030 	rx_tid->size = hw_desc_sz;
1031 	rx_tid->active = true;
1032 
1033 	if (ab->hw_params->reoq_lut_support) {
1034 		/* Update the REO queue LUT at the corresponding peer id
1035 		 * and tid with qaddr.
1036 		 */
1037 		ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid, paddr);
1038 		spin_unlock_bh(&ab->base_lock);
1039 	} else {
1040 		spin_unlock_bh(&ab->base_lock);
1041 		ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
1042 							     paddr, tid, 1, ba_win_sz);
1043 	}
1044 
1045 	return ret;
1046 
1047 err_mem_free:
1048 	kfree(vaddr);
1049 
1050 	return ret;
1051 }
1052 
1053 int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
1054 			     struct ieee80211_ampdu_params *params)
1055 {
1056 	struct ath12k_base *ab = ar->ab;
1057 	struct ath12k_sta *arsta = (void *)params->sta->drv_priv;
1058 	int vdev_id = arsta->arvif->vdev_id;
1059 	int ret;
1060 
1061 	ret = ath12k_dp_rx_peer_tid_setup(ar, params->sta->addr, vdev_id,
1062 					  params->tid, params->buf_size,
1063 					  params->ssn, arsta->pn_type);
1064 	if (ret)
1065 		ath12k_warn(ab, "failed to setup rx tid %d\n", ret);
1066 
1067 	return ret;
1068 }
1069 
1070 int ath12k_dp_rx_ampdu_stop(struct ath12k *ar,
1071 			    struct ieee80211_ampdu_params *params)
1072 {
1073 	struct ath12k_base *ab = ar->ab;
1074 	struct ath12k_peer *peer;
1075 	struct ath12k_sta *arsta = (void *)params->sta->drv_priv;
1076 	int vdev_id = arsta->arvif->vdev_id;
1077 	bool active;
1078 	int ret;
1079 
1080 	spin_lock_bh(&ab->base_lock);
1081 
1082 	peer = ath12k_peer_find(ab, vdev_id, params->sta->addr);
1083 	if (!peer) {
1084 		spin_unlock_bh(&ab->base_lock);
1085 		ath12k_warn(ab, "failed to find the peer to stop rx aggregation\n");
1086 		return -ENOENT;
1087 	}
1088 
1089 	active = peer->rx_tid[params->tid].active;
1090 
1091 	if (!active) {
1092 		spin_unlock_bh(&ab->base_lock);
1093 		return 0;
1094 	}
1095 
1096 	ret = ath12k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false);
1097 	spin_unlock_bh(&ab->base_lock);
1098 	if (ret) {
1099 		ath12k_warn(ab, "failed to update reo for rx tid %d: %d\n",
1100 			    params->tid, ret);
1101 		return ret;
1102 	}
1103 
1104 	return ret;
1105 }
1106 
1107 int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_vif *arvif,
1108 				       const u8 *peer_addr,
1109 				       enum set_key_cmd key_cmd,
1110 				       struct ieee80211_key_conf *key)
1111 {
1112 	struct ath12k *ar = arvif->ar;
1113 	struct ath12k_base *ab = ar->ab;
1114 	struct ath12k_hal_reo_cmd cmd = {0};
1115 	struct ath12k_peer *peer;
1116 	struct ath12k_dp_rx_tid *rx_tid;
1117 	u8 tid;
1118 	int ret = 0;
1119 
1120 	/* NOTE: Enable PN/TSC replay check offload only for unicast frames.
1121 	 * We use mac80211 PN/TSC replay check functionality for bcast/mcast
1122 	 * for now.
1123 	 */
1124 	if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1125 		return 0;
1126 
1127 	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
1128 	cmd.upd0 = HAL_REO_CMD_UPD0_PN |
1129 		    HAL_REO_CMD_UPD0_PN_SIZE |
1130 		    HAL_REO_CMD_UPD0_PN_VALID |
1131 		    HAL_REO_CMD_UPD0_PN_CHECK |
1132 		    HAL_REO_CMD_UPD0_SVLD;
1133 
1134 	switch (key->cipher) {
1135 	case WLAN_CIPHER_SUITE_TKIP:
1136 	case WLAN_CIPHER_SUITE_CCMP:
1137 	case WLAN_CIPHER_SUITE_CCMP_256:
1138 	case WLAN_CIPHER_SUITE_GCMP:
1139 	case WLAN_CIPHER_SUITE_GCMP_256:
1140 		if (key_cmd == SET_KEY) {
1141 			cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;
1142 			cmd.pn_size = 48;
1143 		}
1144 		break;
1145 	default:
1146 		break;
1147 	}
1148 
1149 	spin_lock_bh(&ab->base_lock);
1150 
1151 	peer = ath12k_peer_find(ab, arvif->vdev_id, peer_addr);
1152 	if (!peer) {
1153 		spin_unlock_bh(&ab->base_lock);
1154 		ath12k_warn(ab, "failed to find the peer %pM to configure pn replay detection\n",
1155 			    peer_addr);
1156 		return -ENOENT;
1157 	}
1158 
1159 	for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
1160 		rx_tid = &peer->rx_tid[tid];
1161 		if (!rx_tid->active)
1162 			continue;
1163 		cmd.addr_lo = lower_32_bits(rx_tid->paddr);
1164 		cmd.addr_hi = upper_32_bits(rx_tid->paddr);
1165 		ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
1166 					     HAL_REO_CMD_UPDATE_RX_QUEUE,
1167 					     &cmd, NULL);
1168 		if (ret) {
1169 			ath12k_warn(ab, "failed to configure rx tid %d queue of peer %pM for pn replay detection %d\n",
1170 				    tid, peer_addr, ret);
1171 			break;
1172 		}
1173 	}
1174 
1175 	spin_unlock_bh(&ab->base_lock);
1176 
1177 	return ret;
1178 }
1179 
1180 static int ath12k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
1181 				      u16 peer_id)
1182 {
1183 	int i;
1184 
1185 	for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {
1186 		if (ppdu_stats->user_stats[i].is_valid_peer_id) {
1187 			if (peer_id == ppdu_stats->user_stats[i].peer_id)
1188 				return i;
1189 		} else {
1190 			return i;
1191 		}
1192 	}
1193 
1194 	return -EINVAL;
1195 }
1196 
1197 static int ath12k_htt_tlv_ppdu_stats_parse(struct ath12k_base *ab,
1198 					   u16 tag, u16 len, const void *ptr,
1199 					   void *data)
1200 {
1201 	const struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *ba_status;
1202 	const struct htt_ppdu_stats_usr_cmpltn_cmn *cmplt_cmn;
1203 	const struct htt_ppdu_stats_user_rate *user_rate;
1204 	struct htt_ppdu_stats_info *ppdu_info;
1205 	struct htt_ppdu_user_stats *user_stats;
1206 	int cur_user;
1207 	u16 peer_id;
1208 
1209 	ppdu_info = data;
1210 
1211 	switch (tag) {
1212 	case HTT_PPDU_STATS_TAG_COMMON:
1213 		if (len < sizeof(struct htt_ppdu_stats_common)) {
1214 			ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1215 				    len, tag);
1216 			return -EINVAL;
1217 		}
1218 		memcpy(&ppdu_info->ppdu_stats.common, ptr,
1219 		       sizeof(struct htt_ppdu_stats_common));
1220 		break;
1221 	case HTT_PPDU_STATS_TAG_USR_RATE:
1222 		if (len < sizeof(struct htt_ppdu_stats_user_rate)) {
1223 			ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1224 				    len, tag);
1225 			return -EINVAL;
1226 		}
1227 		user_rate = ptr;
1228 		peer_id = le16_to_cpu(user_rate->sw_peer_id);
1229 		cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1230 						      peer_id);
1231 		if (cur_user < 0)
1232 			return -EINVAL;
1233 		user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1234 		user_stats->peer_id = peer_id;
1235 		user_stats->is_valid_peer_id = true;
1236 		memcpy(&user_stats->rate, ptr,
1237 		       sizeof(struct htt_ppdu_stats_user_rate));
1238 		user_stats->tlv_flags |= BIT(tag);
1239 		break;
1240 	case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:
1241 		if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
1242 			ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1243 				    len, tag);
1244 			return -EINVAL;
1245 		}
1246 
1247 		cmplt_cmn = ptr;
1248 		peer_id = le16_to_cpu(cmplt_cmn->sw_peer_id);
1249 		cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1250 						      peer_id);
1251 		if (cur_user < 0)
1252 			return -EINVAL;
1253 		user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1254 		user_stats->peer_id = peer_id;
1255 		user_stats->is_valid_peer_id = true;
1256 		memcpy(&user_stats->cmpltn_cmn, ptr,
1257 		       sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
1258 		user_stats->tlv_flags |= BIT(tag);
1259 		break;
1260 	case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:
1261 		if (len <
1262 		    sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {
1263 			ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1264 				    len, tag);
1265 			return -EINVAL;
1266 		}
1267 
1268 		ba_status = ptr;
1269 		peer_id = le16_to_cpu(ba_status->sw_peer_id);
1270 		cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1271 						      peer_id);
1272 		if (cur_user < 0)
1273 			return -EINVAL;
1274 		user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1275 		user_stats->peer_id = peer_id;
1276 		user_stats->is_valid_peer_id = true;
1277 		memcpy(&user_stats->ack_ba, ptr,
1278 		       sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));
1279 		user_stats->tlv_flags |= BIT(tag);
1280 		break;
1281 	}
1282 	return 0;
1283 }
1284 
1285 static int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
1286 				  int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
1287 					      const void *ptr, void *data),
1288 				  void *data)
1289 {
1290 	const struct htt_tlv *tlv;
1291 	const void *begin = ptr;
1292 	u16 tlv_tag, tlv_len;
1293 	int ret = -EINVAL;
1294 
1295 	while (len > 0) {
1296 		if (len < sizeof(*tlv)) {
1297 			ath12k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
1298 				   ptr - begin, len, sizeof(*tlv));
1299 			return -EINVAL;
1300 		}
1301 		tlv = (struct htt_tlv *)ptr;
1302 		tlv_tag = le32_get_bits(tlv->header, HTT_TLV_TAG);
1303 		tlv_len = le32_get_bits(tlv->header, HTT_TLV_LEN);
1304 		ptr += sizeof(*tlv);
1305 		len -= sizeof(*tlv);
1306 
1307 		if (tlv_len > len) {
1308 			ath12k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
1309 				   tlv_tag, ptr - begin, len, tlv_len);
1310 			return -EINVAL;
1311 		}
1312 		ret = iter(ab, tlv_tag, tlv_len, ptr, data);
1313 		if (ret == -ENOMEM)
1314 			return ret;
1315 
1316 		ptr += tlv_len;
1317 		len -= tlv_len;
1318 	}
1319 	return 0;
1320 }
1321 
1322 static void
1323 ath12k_update_per_peer_tx_stats(struct ath12k *ar,
1324 				struct htt_ppdu_stats *ppdu_stats, u8 user)
1325 {
1326 	struct ath12k_base *ab = ar->ab;
1327 	struct ath12k_peer *peer;
1328 	struct ieee80211_sta *sta;
1329 	struct ath12k_sta *arsta;
1330 	struct htt_ppdu_stats_user_rate *user_rate;
1331 	struct ath12k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
1332 	struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
1333 	struct htt_ppdu_stats_common *common = &ppdu_stats->common;
1334 	int ret;
1335 	u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0;
1336 	u32 v, succ_bytes = 0;
1337 	u16 tones, rate = 0, succ_pkts = 0;
1338 	u32 tx_duration = 0;
1339 	u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
1340 	bool is_ampdu = false;
1341 
1342 	if (!usr_stats)
1343 		return;
1344 
1345 	if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
1346 		return;
1347 
1348 	if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON))
1349 		is_ampdu =
1350 			HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
1351 
1352 	if (usr_stats->tlv_flags &
1353 	    BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
1354 		succ_bytes = le32_to_cpu(usr_stats->ack_ba.success_bytes);
1355 		succ_pkts = le32_get_bits(usr_stats->ack_ba.info,
1356 					  HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M);
1357 		tid = le32_get_bits(usr_stats->ack_ba.info,
1358 				    HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM);
1359 	}
1360 
1361 	if (common->fes_duration_us)
1362 		tx_duration = le32_to_cpu(common->fes_duration_us);
1363 
1364 	user_rate = &usr_stats->rate;
1365 	flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
1366 	bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;
1367 	nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
1368 	mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
1369 	sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
1370 	dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);
1371 
1372 	/* Note: If host configured fixed rates and in some other special
1373 	 * cases, the broadcast/management frames are sent in different rates.
1374 	 * Firmware rate's control to be skipped for this?
1375 	 */
1376 
1377 	if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH12K_HE_MCS_MAX) {
1378 		ath12k_warn(ab, "Invalid HE mcs %d peer stats",  mcs);
1379 		return;
1380 	}
1381 
1382 	if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH12K_VHT_MCS_MAX) {
1383 		ath12k_warn(ab, "Invalid VHT mcs %d peer stats",  mcs);
1384 		return;
1385 	}
1386 
1387 	if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH12K_HT_MCS_MAX || nss < 1)) {
1388 		ath12k_warn(ab, "Invalid HT mcs %d nss %d peer stats",
1389 			    mcs, nss);
1390 		return;
1391 	}
1392 
1393 	if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {
1394 		ret = ath12k_mac_hw_ratecode_to_legacy_rate(mcs,
1395 							    flags,
1396 							    &rate_idx,
1397 							    &rate);
1398 		if (ret < 0)
1399 			return;
1400 	}
1401 
1402 	rcu_read_lock();
1403 	spin_lock_bh(&ab->base_lock);
1404 	peer = ath12k_peer_find_by_id(ab, usr_stats->peer_id);
1405 
1406 	if (!peer || !peer->sta) {
1407 		spin_unlock_bh(&ab->base_lock);
1408 		rcu_read_unlock();
1409 		return;
1410 	}
1411 
1412 	sta = peer->sta;
1413 	arsta = (struct ath12k_sta *)sta->drv_priv;
1414 
1415 	memset(&arsta->txrate, 0, sizeof(arsta->txrate));
1416 
1417 	switch (flags) {
1418 	case WMI_RATE_PREAMBLE_OFDM:
1419 		arsta->txrate.legacy = rate;
1420 		break;
1421 	case WMI_RATE_PREAMBLE_CCK:
1422 		arsta->txrate.legacy = rate;
1423 		break;
1424 	case WMI_RATE_PREAMBLE_HT:
1425 		arsta->txrate.mcs = mcs + 8 * (nss - 1);
1426 		arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
1427 		if (sgi)
1428 			arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1429 		break;
1430 	case WMI_RATE_PREAMBLE_VHT:
1431 		arsta->txrate.mcs = mcs;
1432 		arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
1433 		if (sgi)
1434 			arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1435 		break;
1436 	case WMI_RATE_PREAMBLE_HE:
1437 		arsta->txrate.mcs = mcs;
1438 		arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
1439 		arsta->txrate.he_dcm = dcm;
1440 		arsta->txrate.he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
1441 		tones = le16_to_cpu(user_rate->ru_end) -
1442 			le16_to_cpu(user_rate->ru_start) + 1;
1443 		v = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(tones);
1444 		arsta->txrate.he_ru_alloc = v;
1445 		break;
1446 	}
1447 
1448 	arsta->txrate.nss = nss;
1449 	arsta->txrate.bw = ath12k_mac_bw_to_mac80211_bw(bw);
1450 	arsta->tx_duration += tx_duration;
1451 	memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
1452 
1453 	/* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
1454 	 * So skip peer stats update for mgmt packets.
1455 	 */
1456 	if (tid < HTT_PPDU_STATS_NON_QOS_TID) {
1457 		memset(peer_stats, 0, sizeof(*peer_stats));
1458 		peer_stats->succ_pkts = succ_pkts;
1459 		peer_stats->succ_bytes = succ_bytes;
1460 		peer_stats->is_ampdu = is_ampdu;
1461 		peer_stats->duration = tx_duration;
1462 		peer_stats->ba_fails =
1463 			HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
1464 			HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
1465 	}
1466 
1467 	spin_unlock_bh(&ab->base_lock);
1468 	rcu_read_unlock();
1469 }
1470 
1471 static void ath12k_htt_update_ppdu_stats(struct ath12k *ar,
1472 					 struct htt_ppdu_stats *ppdu_stats)
1473 {
1474 	u8 user;
1475 
1476 	for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
1477 		ath12k_update_per_peer_tx_stats(ar, ppdu_stats, user);
1478 }
1479 
1480 static
1481 struct htt_ppdu_stats_info *ath12k_dp_htt_get_ppdu_desc(struct ath12k *ar,
1482 							u32 ppdu_id)
1483 {
1484 	struct htt_ppdu_stats_info *ppdu_info;
1485 
1486 	lockdep_assert_held(&ar->data_lock);
1487 	if (!list_empty(&ar->ppdu_stats_info)) {
1488 		list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {
1489 			if (ppdu_info->ppdu_id == ppdu_id)
1490 				return ppdu_info;
1491 		}
1492 
1493 		if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
1494 			ppdu_info = list_first_entry(&ar->ppdu_stats_info,
1495 						     typeof(*ppdu_info), list);
1496 			list_del(&ppdu_info->list);
1497 			ar->ppdu_stat_list_depth--;
1498 			ath12k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats);
1499 			kfree(ppdu_info);
1500 		}
1501 	}
1502 
1503 	ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC);
1504 	if (!ppdu_info)
1505 		return NULL;
1506 
1507 	list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);
1508 	ar->ppdu_stat_list_depth++;
1509 
1510 	return ppdu_info;
1511 }
1512 
1513 static void ath12k_copy_to_delay_stats(struct ath12k_peer *peer,
1514 				       struct htt_ppdu_user_stats *usr_stats)
1515 {
1516 	peer->ppdu_stats_delayba.sw_peer_id = le16_to_cpu(usr_stats->rate.sw_peer_id);
1517 	peer->ppdu_stats_delayba.info0 = le32_to_cpu(usr_stats->rate.info0);
1518 	peer->ppdu_stats_delayba.ru_end = le16_to_cpu(usr_stats->rate.ru_end);
1519 	peer->ppdu_stats_delayba.ru_start = le16_to_cpu(usr_stats->rate.ru_start);
1520 	peer->ppdu_stats_delayba.info1 = le32_to_cpu(usr_stats->rate.info1);
1521 	peer->ppdu_stats_delayba.rate_flags = le32_to_cpu(usr_stats->rate.rate_flags);
1522 	peer->ppdu_stats_delayba.resp_rate_flags =
1523 		le32_to_cpu(usr_stats->rate.resp_rate_flags);
1524 
1525 	peer->delayba_flag = true;
1526 }
1527 
1528 static void ath12k_copy_to_bar(struct ath12k_peer *peer,
1529 			       struct htt_ppdu_user_stats *usr_stats)
1530 {
1531 	usr_stats->rate.sw_peer_id = cpu_to_le16(peer->ppdu_stats_delayba.sw_peer_id);
1532 	usr_stats->rate.info0 = cpu_to_le32(peer->ppdu_stats_delayba.info0);
1533 	usr_stats->rate.ru_end = cpu_to_le16(peer->ppdu_stats_delayba.ru_end);
1534 	usr_stats->rate.ru_start = cpu_to_le16(peer->ppdu_stats_delayba.ru_start);
1535 	usr_stats->rate.info1 = cpu_to_le32(peer->ppdu_stats_delayba.info1);
1536 	usr_stats->rate.rate_flags = cpu_to_le32(peer->ppdu_stats_delayba.rate_flags);
1537 	usr_stats->rate.resp_rate_flags =
1538 		cpu_to_le32(peer->ppdu_stats_delayba.resp_rate_flags);
1539 
1540 	peer->delayba_flag = false;
1541 }
1542 
1543 static int ath12k_htt_pull_ppdu_stats(struct ath12k_base *ab,
1544 				      struct sk_buff *skb)
1545 {
1546 	struct ath12k_htt_ppdu_stats_msg *msg;
1547 	struct htt_ppdu_stats_info *ppdu_info;
1548 	struct ath12k_peer *peer = NULL;
1549 	struct htt_ppdu_user_stats *usr_stats = NULL;
1550 	u32 peer_id = 0;
1551 	struct ath12k *ar;
1552 	int ret, i;
1553 	u8 pdev_id;
1554 	u32 ppdu_id, len;
1555 
1556 	msg = (struct ath12k_htt_ppdu_stats_msg *)skb->data;
1557 	len = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE);
1558 	if (len > (skb->len - struct_size(msg, data, 0))) {
1559 		ath12k_warn(ab,
1560 			    "HTT PPDU STATS event has unexpected payload size %u, should be smaller than %u\n",
1561 			    len, skb->len);
1562 		return -EINVAL;
1563 	}
1564 
1565 	pdev_id = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PDEV_ID);
1566 	ppdu_id = le32_to_cpu(msg->ppdu_id);
1567 
1568 	rcu_read_lock();
1569 	ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
1570 	if (!ar) {
1571 		ret = -EINVAL;
1572 		goto exit;
1573 	}
1574 
1575 	spin_lock_bh(&ar->data_lock);
1576 	ppdu_info = ath12k_dp_htt_get_ppdu_desc(ar, ppdu_id);
1577 	if (!ppdu_info) {
1578 		spin_unlock_bh(&ar->data_lock);
1579 		ret = -EINVAL;
1580 		goto exit;
1581 	}
1582 
1583 	ppdu_info->ppdu_id = ppdu_id;
1584 	ret = ath12k_dp_htt_tlv_iter(ab, msg->data, len,
1585 				     ath12k_htt_tlv_ppdu_stats_parse,
1586 				     (void *)ppdu_info);
1587 	if (ret) {
1588 		spin_unlock_bh(&ar->data_lock);
1589 		ath12k_warn(ab, "Failed to parse tlv %d\n", ret);
1590 		goto exit;
1591 	}
1592 
1593 	if (ppdu_info->ppdu_stats.common.num_users >= HTT_PPDU_STATS_MAX_USERS) {
1594 		spin_unlock_bh(&ar->data_lock);
1595 		ath12k_warn(ab,
1596 			    "HTT PPDU STATS event has unexpected num_users %u, should be smaller than %u\n",
1597 			    ppdu_info->ppdu_stats.common.num_users,
1598 			    HTT_PPDU_STATS_MAX_USERS);
1599 		ret = -EINVAL;
1600 		goto exit;
1601 	}
1602 
1603 	/* back up data rate tlv for all peers */
1604 	if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_DATA &&
1605 	    (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON)) &&
1606 	    ppdu_info->delay_ba) {
1607 		for (i = 0; i < ppdu_info->ppdu_stats.common.num_users; i++) {
1608 			peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id;
1609 			spin_lock_bh(&ab->base_lock);
1610 			peer = ath12k_peer_find_by_id(ab, peer_id);
1611 			if (!peer) {
1612 				spin_unlock_bh(&ab->base_lock);
1613 				continue;
1614 			}
1615 
1616 			usr_stats = &ppdu_info->ppdu_stats.user_stats[i];
1617 			if (usr_stats->delay_ba)
1618 				ath12k_copy_to_delay_stats(peer, usr_stats);
1619 			spin_unlock_bh(&ab->base_lock);
1620 		}
1621 	}
1622 
1623 	/* restore all peers' data rate tlv to mu-bar tlv */
1624 	if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_BAR &&
1625 	    (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON))) {
1626 		for (i = 0; i < ppdu_info->bar_num_users; i++) {
1627 			peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id;
1628 			spin_lock_bh(&ab->base_lock);
1629 			peer = ath12k_peer_find_by_id(ab, peer_id);
1630 			if (!peer) {
1631 				spin_unlock_bh(&ab->base_lock);
1632 				continue;
1633 			}
1634 
1635 			usr_stats = &ppdu_info->ppdu_stats.user_stats[i];
1636 			if (peer->delayba_flag)
1637 				ath12k_copy_to_bar(peer, usr_stats);
1638 			spin_unlock_bh(&ab->base_lock);
1639 		}
1640 	}
1641 
1642 	spin_unlock_bh(&ar->data_lock);
1643 
1644 exit:
1645 	rcu_read_unlock();
1646 
1647 	return ret;
1648 }
1649 
1650 static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab,
1651 						struct sk_buff *skb)
1652 {
1653 	struct ath12k_htt_mlo_offset_msg *msg;
1654 	struct ath12k_pdev *pdev;
1655 	struct ath12k *ar;
1656 	u8 pdev_id;
1657 
1658 	msg = (struct ath12k_htt_mlo_offset_msg *)skb->data;
1659 	pdev_id = u32_get_bits(__le32_to_cpu(msg->info),
1660 			       HTT_T2H_MLO_OFFSET_INFO_PDEV_ID);
1661 
1662 	rcu_read_lock();
1663 	ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
1664 	if (!ar) {
1665 		ath12k_warn(ab, "invalid pdev id %d on htt mlo offset\n", pdev_id);
1666 		goto exit;
1667 	}
1668 
1669 	spin_lock_bh(&ar->data_lock);
1670 	pdev = ar->pdev;
1671 
1672 	pdev->timestamp.info = __le32_to_cpu(msg->info);
1673 	pdev->timestamp.sync_timestamp_lo_us = __le32_to_cpu(msg->sync_timestamp_lo_us);
1674 	pdev->timestamp.sync_timestamp_hi_us = __le32_to_cpu(msg->sync_timestamp_hi_us);
1675 	pdev->timestamp.mlo_offset_lo = __le32_to_cpu(msg->mlo_offset_lo);
1676 	pdev->timestamp.mlo_offset_hi = __le32_to_cpu(msg->mlo_offset_hi);
1677 	pdev->timestamp.mlo_offset_clks = __le32_to_cpu(msg->mlo_offset_clks);
1678 	pdev->timestamp.mlo_comp_clks = __le32_to_cpu(msg->mlo_comp_clks);
1679 	pdev->timestamp.mlo_comp_timer = __le32_to_cpu(msg->mlo_comp_timer);
1680 
1681 	spin_unlock_bh(&ar->data_lock);
1682 exit:
1683 	rcu_read_unlock();
1684 }
1685 
1686 void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab,
1687 				       struct sk_buff *skb)
1688 {
1689 	struct ath12k_dp *dp = &ab->dp;
1690 	struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;
1691 	enum htt_t2h_msg_type type;
1692 	u16 peer_id;
1693 	u8 vdev_id;
1694 	u8 mac_addr[ETH_ALEN];
1695 	u16 peer_mac_h16;
1696 	u16 ast_hash = 0;
1697 	u16 hw_peer_id;
1698 
1699 	type = le32_get_bits(resp->version_msg.version, HTT_T2H_MSG_TYPE);
1700 
1701 	ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
1702 
1703 	switch (type) {
1704 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
1705 		dp->htt_tgt_ver_major = le32_get_bits(resp->version_msg.version,
1706 						      HTT_T2H_VERSION_CONF_MAJOR);
1707 		dp->htt_tgt_ver_minor = le32_get_bits(resp->version_msg.version,
1708 						      HTT_T2H_VERSION_CONF_MINOR);
1709 		complete(&dp->htt_tgt_version_received);
1710 		break;
1711 	/* TODO: remove unused peer map versions after testing */
1712 	case HTT_T2H_MSG_TYPE_PEER_MAP:
1713 		vdev_id = le32_get_bits(resp->peer_map_ev.info,
1714 					HTT_T2H_PEER_MAP_INFO_VDEV_ID);
1715 		peer_id = le32_get_bits(resp->peer_map_ev.info,
1716 					HTT_T2H_PEER_MAP_INFO_PEER_ID);
1717 		peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
1718 					     HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
1719 		ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
1720 				       peer_mac_h16, mac_addr);
1721 		ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0);
1722 		break;
1723 	case HTT_T2H_MSG_TYPE_PEER_MAP2:
1724 		vdev_id = le32_get_bits(resp->peer_map_ev.info,
1725 					HTT_T2H_PEER_MAP_INFO_VDEV_ID);
1726 		peer_id = le32_get_bits(resp->peer_map_ev.info,
1727 					HTT_T2H_PEER_MAP_INFO_PEER_ID);
1728 		peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
1729 					     HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
1730 		ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
1731 				       peer_mac_h16, mac_addr);
1732 		ast_hash = le32_get_bits(resp->peer_map_ev.info2,
1733 					 HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL);
1734 		hw_peer_id = le32_get_bits(resp->peer_map_ev.info1,
1735 					   HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID);
1736 		ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
1737 				      hw_peer_id);
1738 		break;
1739 	case HTT_T2H_MSG_TYPE_PEER_MAP3:
1740 		vdev_id = le32_get_bits(resp->peer_map_ev.info,
1741 					HTT_T2H_PEER_MAP_INFO_VDEV_ID);
1742 		peer_id = le32_get_bits(resp->peer_map_ev.info,
1743 					HTT_T2H_PEER_MAP_INFO_PEER_ID);
1744 		peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
1745 					     HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
1746 		ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
1747 				       peer_mac_h16, mac_addr);
1748 		ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
1749 				      peer_id);
1750 		break;
1751 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
1752 	case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
1753 		peer_id = le32_get_bits(resp->peer_unmap_ev.info,
1754 					HTT_T2H_PEER_UNMAP_INFO_PEER_ID);
1755 		ath12k_peer_unmap_event(ab, peer_id);
1756 		break;
1757 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
1758 		ath12k_htt_pull_ppdu_stats(ab, skb);
1759 		break;
1760 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
1761 		break;
1762 	case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND:
1763 		ath12k_htt_mlo_offset_event_handler(ab, skb);
1764 		break;
1765 	default:
1766 		ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt event %d not handled\n",
1767 			   type);
1768 		break;
1769 	}
1770 
1771 	dev_kfree_skb_any(skb);
1772 }
1773 
1774 static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar,
1775 				      struct sk_buff_head *msdu_list,
1776 				      struct sk_buff *first, struct sk_buff *last,
1777 				      u8 l3pad_bytes, int msdu_len)
1778 {
1779 	struct ath12k_base *ab = ar->ab;
1780 	struct sk_buff *skb;
1781 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first);
1782 	int buf_first_hdr_len, buf_first_len;
1783 	struct hal_rx_desc *ldesc;
1784 	int space_extra, rem_len, buf_len;
1785 	u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
1786 
1787 	/* As the msdu is spread across multiple rx buffers,
1788 	 * find the offset to the start of msdu for computing
1789 	 * the length of the msdu in the first buffer.
1790 	 */
1791 	buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes;
1792 	buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;
1793 
1794 	if (WARN_ON_ONCE(msdu_len <= buf_first_len)) {
1795 		skb_put(first, buf_first_hdr_len + msdu_len);
1796 		skb_pull(first, buf_first_hdr_len);
1797 		return 0;
1798 	}
1799 
1800 	ldesc = (struct hal_rx_desc *)last->data;
1801 	rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, ldesc);
1802 	rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, ldesc);
1803 
1804 	/* MSDU spans over multiple buffers because the length of the MSDU
1805 	 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
1806 	 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
1807 	 */
1808 	skb_put(first, DP_RX_BUFFER_SIZE);
1809 	skb_pull(first, buf_first_hdr_len);
1810 
1811 	/* When an MSDU spread over multiple buffers MSDU_END
1812 	 * tlvs are valid only in the last buffer. Copy those tlvs.
1813 	 */
1814 	ath12k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc);
1815 
1816 	space_extra = msdu_len - (buf_first_len + skb_tailroom(first));
1817 	if (space_extra > 0 &&
1818 	    (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) {
1819 		/* Free up all buffers of the MSDU */
1820 		while ((skb = __skb_dequeue(msdu_list)) != NULL) {
1821 			rxcb = ATH12K_SKB_RXCB(skb);
1822 			if (!rxcb->is_continuation) {
1823 				dev_kfree_skb_any(skb);
1824 				break;
1825 			}
1826 			dev_kfree_skb_any(skb);
1827 		}
1828 		return -ENOMEM;
1829 	}
1830 
1831 	rem_len = msdu_len - buf_first_len;
1832 	while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
1833 		rxcb = ATH12K_SKB_RXCB(skb);
1834 		if (rxcb->is_continuation)
1835 			buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz;
1836 		else
1837 			buf_len = rem_len;
1838 
1839 		if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) {
1840 			WARN_ON_ONCE(1);
1841 			dev_kfree_skb_any(skb);
1842 			return -EINVAL;
1843 		}
1844 
1845 		skb_put(skb, buf_len + hal_rx_desc_sz);
1846 		skb_pull(skb, hal_rx_desc_sz);
1847 		skb_copy_from_linear_data(skb, skb_put(first, buf_len),
1848 					  buf_len);
1849 		dev_kfree_skb_any(skb);
1850 
1851 		rem_len -= buf_len;
1852 		if (!rxcb->is_continuation)
1853 			break;
1854 	}
1855 
1856 	return 0;
1857 }
1858 
1859 static struct sk_buff *ath12k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,
1860 						      struct sk_buff *first)
1861 {
1862 	struct sk_buff *skb;
1863 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first);
1864 
1865 	if (!rxcb->is_continuation)
1866 		return first;
1867 
1868 	skb_queue_walk(msdu_list, skb) {
1869 		rxcb = ATH12K_SKB_RXCB(skb);
1870 		if (!rxcb->is_continuation)
1871 			return skb;
1872 	}
1873 
1874 	return NULL;
1875 }
1876 
1877 static void ath12k_dp_rx_h_csum_offload(struct ath12k *ar, struct sk_buff *msdu)
1878 {
1879 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
1880 	struct ath12k_base *ab = ar->ab;
1881 	bool ip_csum_fail, l4_csum_fail;
1882 
1883 	ip_csum_fail = ath12k_dp_rx_h_ip_cksum_fail(ab, rxcb->rx_desc);
1884 	l4_csum_fail = ath12k_dp_rx_h_l4_cksum_fail(ab, rxcb->rx_desc);
1885 
1886 	msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?
1887 			  CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
1888 }
1889 
1890 static int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar,
1891 				       enum hal_encrypt_type enctype)
1892 {
1893 	switch (enctype) {
1894 	case HAL_ENCRYPT_TYPE_OPEN:
1895 	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1896 	case HAL_ENCRYPT_TYPE_TKIP_MIC:
1897 		return 0;
1898 	case HAL_ENCRYPT_TYPE_CCMP_128:
1899 		return IEEE80211_CCMP_MIC_LEN;
1900 	case HAL_ENCRYPT_TYPE_CCMP_256:
1901 		return IEEE80211_CCMP_256_MIC_LEN;
1902 	case HAL_ENCRYPT_TYPE_GCMP_128:
1903 	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1904 		return IEEE80211_GCMP_MIC_LEN;
1905 	case HAL_ENCRYPT_TYPE_WEP_40:
1906 	case HAL_ENCRYPT_TYPE_WEP_104:
1907 	case HAL_ENCRYPT_TYPE_WEP_128:
1908 	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1909 	case HAL_ENCRYPT_TYPE_WAPI:
1910 		break;
1911 	}
1912 
1913 	ath12k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype);
1914 	return 0;
1915 }
1916 
1917 static int ath12k_dp_rx_crypto_param_len(struct ath12k *ar,
1918 					 enum hal_encrypt_type enctype)
1919 {
1920 	switch (enctype) {
1921 	case HAL_ENCRYPT_TYPE_OPEN:
1922 		return 0;
1923 	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1924 	case HAL_ENCRYPT_TYPE_TKIP_MIC:
1925 		return IEEE80211_TKIP_IV_LEN;
1926 	case HAL_ENCRYPT_TYPE_CCMP_128:
1927 		return IEEE80211_CCMP_HDR_LEN;
1928 	case HAL_ENCRYPT_TYPE_CCMP_256:
1929 		return IEEE80211_CCMP_256_HDR_LEN;
1930 	case HAL_ENCRYPT_TYPE_GCMP_128:
1931 	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1932 		return IEEE80211_GCMP_HDR_LEN;
1933 	case HAL_ENCRYPT_TYPE_WEP_40:
1934 	case HAL_ENCRYPT_TYPE_WEP_104:
1935 	case HAL_ENCRYPT_TYPE_WEP_128:
1936 	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1937 	case HAL_ENCRYPT_TYPE_WAPI:
1938 		break;
1939 	}
1940 
1941 	ath12k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1942 	return 0;
1943 }
1944 
1945 static int ath12k_dp_rx_crypto_icv_len(struct ath12k *ar,
1946 				       enum hal_encrypt_type enctype)
1947 {
1948 	switch (enctype) {
1949 	case HAL_ENCRYPT_TYPE_OPEN:
1950 	case HAL_ENCRYPT_TYPE_CCMP_128:
1951 	case HAL_ENCRYPT_TYPE_CCMP_256:
1952 	case HAL_ENCRYPT_TYPE_GCMP_128:
1953 	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1954 		return 0;
1955 	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1956 	case HAL_ENCRYPT_TYPE_TKIP_MIC:
1957 		return IEEE80211_TKIP_ICV_LEN;
1958 	case HAL_ENCRYPT_TYPE_WEP_40:
1959 	case HAL_ENCRYPT_TYPE_WEP_104:
1960 	case HAL_ENCRYPT_TYPE_WEP_128:
1961 	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1962 	case HAL_ENCRYPT_TYPE_WAPI:
1963 		break;
1964 	}
1965 
1966 	ath12k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1967 	return 0;
1968 }
1969 
1970 static void ath12k_dp_rx_h_undecap_nwifi(struct ath12k *ar,
1971 					 struct sk_buff *msdu,
1972 					 enum hal_encrypt_type enctype,
1973 					 struct ieee80211_rx_status *status)
1974 {
1975 	struct ath12k_base *ab = ar->ab;
1976 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
1977 	u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN];
1978 	struct ieee80211_hdr *hdr;
1979 	size_t hdr_len;
1980 	u8 *crypto_hdr;
1981 	u16 qos_ctl;
1982 
1983 	/* pull decapped header */
1984 	hdr = (struct ieee80211_hdr *)msdu->data;
1985 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1986 	skb_pull(msdu, hdr_len);
1987 
1988 	/*  Rebuild qos header */
1989 	hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1990 
1991 	/* Reset the order bit as the HT_Control header is stripped */
1992 	hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER));
1993 
1994 	qos_ctl = rxcb->tid;
1995 
1996 	if (ath12k_dp_rx_h_mesh_ctl_present(ab, rxcb->rx_desc))
1997 		qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
1998 
1999 	/* TODO: Add other QoS ctl fields when required */
2000 
2001 	/* copy decap header before overwriting for reuse below */
2002 	memcpy(decap_hdr, hdr, hdr_len);
2003 
2004 	/* Rebuild crypto header for mac80211 use */
2005 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
2006 		crypto_hdr = skb_push(msdu, ath12k_dp_rx_crypto_param_len(ar, enctype));
2007 		ath12k_dp_rx_desc_get_crypto_header(ar->ab,
2008 						    rxcb->rx_desc, crypto_hdr,
2009 						    enctype);
2010 	}
2011 
2012 	memcpy(skb_push(msdu,
2013 			IEEE80211_QOS_CTL_LEN), &qos_ctl,
2014 			IEEE80211_QOS_CTL_LEN);
2015 	memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len);
2016 }
2017 
2018 static void ath12k_dp_rx_h_undecap_raw(struct ath12k *ar, struct sk_buff *msdu,
2019 				       enum hal_encrypt_type enctype,
2020 				       struct ieee80211_rx_status *status,
2021 				       bool decrypted)
2022 {
2023 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
2024 	struct ieee80211_hdr *hdr;
2025 	size_t hdr_len;
2026 	size_t crypto_len;
2027 
2028 	if (!rxcb->is_first_msdu ||
2029 	    !(rxcb->is_first_msdu && rxcb->is_last_msdu)) {
2030 		WARN_ON_ONCE(1);
2031 		return;
2032 	}
2033 
2034 	skb_trim(msdu, msdu->len - FCS_LEN);
2035 
2036 	if (!decrypted)
2037 		return;
2038 
2039 	hdr = (void *)msdu->data;
2040 
2041 	/* Tail */
2042 	if (status->flag & RX_FLAG_IV_STRIPPED) {
2043 		skb_trim(msdu, msdu->len -
2044 			 ath12k_dp_rx_crypto_mic_len(ar, enctype));
2045 
2046 		skb_trim(msdu, msdu->len -
2047 			 ath12k_dp_rx_crypto_icv_len(ar, enctype));
2048 	} else {
2049 		/* MIC */
2050 		if (status->flag & RX_FLAG_MIC_STRIPPED)
2051 			skb_trim(msdu, msdu->len -
2052 				 ath12k_dp_rx_crypto_mic_len(ar, enctype));
2053 
2054 		/* ICV */
2055 		if (status->flag & RX_FLAG_ICV_STRIPPED)
2056 			skb_trim(msdu, msdu->len -
2057 				 ath12k_dp_rx_crypto_icv_len(ar, enctype));
2058 	}
2059 
2060 	/* MMIC */
2061 	if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
2062 	    !ieee80211_has_morefrags(hdr->frame_control) &&
2063 	    enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
2064 		skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
2065 
2066 	/* Head */
2067 	if (status->flag & RX_FLAG_IV_STRIPPED) {
2068 		hdr_len = ieee80211_hdrlen(hdr->frame_control);
2069 		crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
2070 
2071 		memmove(msdu->data + crypto_len, msdu->data, hdr_len);
2072 		skb_pull(msdu, crypto_len);
2073 	}
2074 }
2075 
2076 static void ath12k_get_dot11_hdr_from_rx_desc(struct ath12k *ar,
2077 					      struct sk_buff *msdu,
2078 					      struct ath12k_skb_rxcb *rxcb,
2079 					      struct ieee80211_rx_status *status,
2080 					      enum hal_encrypt_type enctype)
2081 {
2082 	struct hal_rx_desc *rx_desc = rxcb->rx_desc;
2083 	struct ath12k_base *ab = ar->ab;
2084 	size_t hdr_len, crypto_len;
2085 	struct ieee80211_hdr *hdr;
2086 	u16 qos_ctl;
2087 	__le16 fc;
2088 	u8 *crypto_hdr;
2089 
2090 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
2091 		crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
2092 		crypto_hdr = skb_push(msdu, crypto_len);
2093 		ath12k_dp_rx_desc_get_crypto_header(ab, rx_desc, crypto_hdr, enctype);
2094 	}
2095 
2096 	fc = cpu_to_le16(ath12k_dp_rxdesc_get_mpdu_frame_ctrl(ab, rx_desc));
2097 	hdr_len = ieee80211_hdrlen(fc);
2098 	skb_push(msdu, hdr_len);
2099 	hdr = (struct ieee80211_hdr *)msdu->data;
2100 	hdr->frame_control = fc;
2101 
2102 	/* Get wifi header from rx_desc */
2103 	ath12k_dp_rx_desc_get_dot11_hdr(ab, rx_desc, hdr);
2104 
2105 	if (rxcb->is_mcbc)
2106 		status->flag &= ~RX_FLAG_PN_VALIDATED;
2107 
2108 	/* Add QOS header */
2109 	if (ieee80211_is_data_qos(hdr->frame_control)) {
2110 		qos_ctl = rxcb->tid;
2111 		if (ath12k_dp_rx_h_mesh_ctl_present(ab, rx_desc))
2112 			qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
2113 
2114 		/* TODO: Add other QoS ctl fields when required */
2115 		memcpy(msdu->data + (hdr_len - IEEE80211_QOS_CTL_LEN),
2116 		       &qos_ctl, IEEE80211_QOS_CTL_LEN);
2117 	}
2118 }
2119 
2120 static void ath12k_dp_rx_h_undecap_eth(struct ath12k *ar,
2121 				       struct sk_buff *msdu,
2122 				       enum hal_encrypt_type enctype,
2123 				       struct ieee80211_rx_status *status)
2124 {
2125 	struct ieee80211_hdr *hdr;
2126 	struct ethhdr *eth;
2127 	u8 da[ETH_ALEN];
2128 	u8 sa[ETH_ALEN];
2129 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
2130 	struct ath12k_dp_rx_rfc1042_hdr rfc = {0xaa, 0xaa, 0x03, {0x00, 0x00, 0x00}};
2131 
2132 	eth = (struct ethhdr *)msdu->data;
2133 	ether_addr_copy(da, eth->h_dest);
2134 	ether_addr_copy(sa, eth->h_source);
2135 	rfc.snap_type = eth->h_proto;
2136 	skb_pull(msdu, sizeof(*eth));
2137 	memcpy(skb_push(msdu, sizeof(rfc)), &rfc,
2138 	       sizeof(rfc));
2139 	ath12k_get_dot11_hdr_from_rx_desc(ar, msdu, rxcb, status, enctype);
2140 
2141 	/* original 802.11 header has a different DA and in
2142 	 * case of 4addr it may also have different SA
2143 	 */
2144 	hdr = (struct ieee80211_hdr *)msdu->data;
2145 	ether_addr_copy(ieee80211_get_DA(hdr), da);
2146 	ether_addr_copy(ieee80211_get_SA(hdr), sa);
2147 }
2148 
2149 static void ath12k_dp_rx_h_undecap(struct ath12k *ar, struct sk_buff *msdu,
2150 				   struct hal_rx_desc *rx_desc,
2151 				   enum hal_encrypt_type enctype,
2152 				   struct ieee80211_rx_status *status,
2153 				   bool decrypted)
2154 {
2155 	struct ath12k_base *ab = ar->ab;
2156 	u8 decap;
2157 	struct ethhdr *ehdr;
2158 
2159 	decap = ath12k_dp_rx_h_decap_type(ab, rx_desc);
2160 
2161 	switch (decap) {
2162 	case DP_RX_DECAP_TYPE_NATIVE_WIFI:
2163 		ath12k_dp_rx_h_undecap_nwifi(ar, msdu, enctype, status);
2164 		break;
2165 	case DP_RX_DECAP_TYPE_RAW:
2166 		ath12k_dp_rx_h_undecap_raw(ar, msdu, enctype, status,
2167 					   decrypted);
2168 		break;
2169 	case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
2170 		ehdr = (struct ethhdr *)msdu->data;
2171 
2172 		/* mac80211 allows fast path only for authorized STA */
2173 		if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) {
2174 			ATH12K_SKB_RXCB(msdu)->is_eapol = true;
2175 			ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status);
2176 			break;
2177 		}
2178 
2179 		/* PN for mcast packets will be validated in mac80211;
2180 		 * remove eth header and add 802.11 header.
2181 		 */
2182 		if (ATH12K_SKB_RXCB(msdu)->is_mcbc && decrypted)
2183 			ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status);
2184 		break;
2185 	case DP_RX_DECAP_TYPE_8023:
2186 		/* TODO: Handle undecap for these formats */
2187 		break;
2188 	}
2189 }
2190 
2191 struct ath12k_peer *
2192 ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu)
2193 {
2194 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
2195 	struct hal_rx_desc *rx_desc = rxcb->rx_desc;
2196 	struct ath12k_peer *peer = NULL;
2197 
2198 	lockdep_assert_held(&ab->base_lock);
2199 
2200 	if (rxcb->peer_id)
2201 		peer = ath12k_peer_find_by_id(ab, rxcb->peer_id);
2202 
2203 	if (peer)
2204 		return peer;
2205 
2206 	if (!rx_desc || !(ath12k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)))
2207 		return NULL;
2208 
2209 	peer = ath12k_peer_find_by_addr(ab,
2210 					ath12k_dp_rxdesc_get_mpdu_start_addr2(ab,
2211 									      rx_desc));
2212 	return peer;
2213 }
2214 
2215 static void ath12k_dp_rx_h_mpdu(struct ath12k *ar,
2216 				struct sk_buff *msdu,
2217 				struct hal_rx_desc *rx_desc,
2218 				struct ieee80211_rx_status *rx_status)
2219 {
2220 	bool  fill_crypto_hdr;
2221 	struct ath12k_base *ab = ar->ab;
2222 	struct ath12k_skb_rxcb *rxcb;
2223 	enum hal_encrypt_type enctype;
2224 	bool is_decrypted = false;
2225 	struct ieee80211_hdr *hdr;
2226 	struct ath12k_peer *peer;
2227 	u32 err_bitmap;
2228 
2229 	/* PN for multicast packets will be checked in mac80211 */
2230 	rxcb = ATH12K_SKB_RXCB(msdu);
2231 	fill_crypto_hdr = ath12k_dp_rx_h_is_da_mcbc(ar->ab, rx_desc);
2232 	rxcb->is_mcbc = fill_crypto_hdr;
2233 
2234 	if (rxcb->is_mcbc)
2235 		rxcb->peer_id = ath12k_dp_rx_h_peer_id(ar->ab, rx_desc);
2236 
2237 	spin_lock_bh(&ar->ab->base_lock);
2238 	peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu);
2239 	if (peer) {
2240 		if (rxcb->is_mcbc)
2241 			enctype = peer->sec_type_grp;
2242 		else
2243 			enctype = peer->sec_type;
2244 	} else {
2245 		enctype = HAL_ENCRYPT_TYPE_OPEN;
2246 	}
2247 	spin_unlock_bh(&ar->ab->base_lock);
2248 
2249 	err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);
2250 	if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap)
2251 		is_decrypted = ath12k_dp_rx_h_is_decrypted(ab, rx_desc);
2252 
2253 	/* Clear per-MPDU flags while leaving per-PPDU flags intact */
2254 	rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
2255 			     RX_FLAG_MMIC_ERROR |
2256 			     RX_FLAG_DECRYPTED |
2257 			     RX_FLAG_IV_STRIPPED |
2258 			     RX_FLAG_MMIC_STRIPPED);
2259 
2260 	if (err_bitmap & HAL_RX_MPDU_ERR_FCS)
2261 		rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
2262 	if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC)
2263 		rx_status->flag |= RX_FLAG_MMIC_ERROR;
2264 
2265 	if (is_decrypted) {
2266 		rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
2267 
2268 		if (fill_crypto_hdr)
2269 			rx_status->flag |= RX_FLAG_MIC_STRIPPED |
2270 					RX_FLAG_ICV_STRIPPED;
2271 		else
2272 			rx_status->flag |= RX_FLAG_IV_STRIPPED |
2273 					   RX_FLAG_PN_VALIDATED;
2274 	}
2275 
2276 	ath12k_dp_rx_h_csum_offload(ar, msdu);
2277 	ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
2278 			       enctype, rx_status, is_decrypted);
2279 
2280 	if (!is_decrypted || fill_crypto_hdr)
2281 		return;
2282 
2283 	if (ath12k_dp_rx_h_decap_type(ar->ab, rx_desc) !=
2284 	    DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
2285 		hdr = (void *)msdu->data;
2286 		hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2287 	}
2288 }
2289 
2290 static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct hal_rx_desc *rx_desc,
2291 				struct ieee80211_rx_status *rx_status)
2292 {
2293 	struct ath12k_base *ab = ar->ab;
2294 	struct ieee80211_supported_band *sband;
2295 	enum rx_msdu_start_pkt_type pkt_type;
2296 	u8 bw;
2297 	u8 rate_mcs, nss;
2298 	u8 sgi;
2299 	bool is_cck;
2300 
2301 	pkt_type = ath12k_dp_rx_h_pkt_type(ab, rx_desc);
2302 	bw = ath12k_dp_rx_h_rx_bw(ab, rx_desc);
2303 	rate_mcs = ath12k_dp_rx_h_rate_mcs(ab, rx_desc);
2304 	nss = ath12k_dp_rx_h_nss(ab, rx_desc);
2305 	sgi = ath12k_dp_rx_h_sgi(ab, rx_desc);
2306 
2307 	switch (pkt_type) {
2308 	case RX_MSDU_START_PKT_TYPE_11A:
2309 	case RX_MSDU_START_PKT_TYPE_11B:
2310 		is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
2311 		sband = &ar->mac.sbands[rx_status->band];
2312 		rx_status->rate_idx = ath12k_mac_hw_rate_to_idx(sband, rate_mcs,
2313 								is_cck);
2314 		break;
2315 	case RX_MSDU_START_PKT_TYPE_11N:
2316 		rx_status->encoding = RX_ENC_HT;
2317 		if (rate_mcs > ATH12K_HT_MCS_MAX) {
2318 			ath12k_warn(ar->ab,
2319 				    "Received with invalid mcs in HT mode %d\n",
2320 				     rate_mcs);
2321 			break;
2322 		}
2323 		rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
2324 		if (sgi)
2325 			rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2326 		rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
2327 		break;
2328 	case RX_MSDU_START_PKT_TYPE_11AC:
2329 		rx_status->encoding = RX_ENC_VHT;
2330 		rx_status->rate_idx = rate_mcs;
2331 		if (rate_mcs > ATH12K_VHT_MCS_MAX) {
2332 			ath12k_warn(ar->ab,
2333 				    "Received with invalid mcs in VHT mode %d\n",
2334 				     rate_mcs);
2335 			break;
2336 		}
2337 		rx_status->nss = nss;
2338 		if (sgi)
2339 			rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2340 		rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
2341 		break;
2342 	case RX_MSDU_START_PKT_TYPE_11AX:
2343 		rx_status->rate_idx = rate_mcs;
2344 		if (rate_mcs > ATH12K_HE_MCS_MAX) {
2345 			ath12k_warn(ar->ab,
2346 				    "Received with invalid mcs in HE mode %d\n",
2347 				    rate_mcs);
2348 			break;
2349 		}
2350 		rx_status->encoding = RX_ENC_HE;
2351 		rx_status->nss = nss;
2352 		rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
2353 		rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
2354 		break;
2355 	}
2356 }
2357 
2358 void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
2359 			 struct ieee80211_rx_status *rx_status)
2360 {
2361 	struct ath12k_base *ab = ar->ab;
2362 	u8 channel_num;
2363 	u32 center_freq, meta_data;
2364 	struct ieee80211_channel *channel;
2365 
2366 	rx_status->freq = 0;
2367 	rx_status->rate_idx = 0;
2368 	rx_status->nss = 0;
2369 	rx_status->encoding = RX_ENC_LEGACY;
2370 	rx_status->bw = RATE_INFO_BW_20;
2371 	rx_status->enc_flags = 0;
2372 
2373 	rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2374 
2375 	meta_data = ath12k_dp_rx_h_freq(ab, rx_desc);
2376 	channel_num = meta_data;
2377 	center_freq = meta_data >> 16;
2378 
2379 	if (center_freq >= 5935 && center_freq <= 7105) {
2380 		rx_status->band = NL80211_BAND_6GHZ;
2381 	} else if (channel_num >= 1 && channel_num <= 14) {
2382 		rx_status->band = NL80211_BAND_2GHZ;
2383 	} else if (channel_num >= 36 && channel_num <= 173) {
2384 		rx_status->band = NL80211_BAND_5GHZ;
2385 	} else {
2386 		spin_lock_bh(&ar->data_lock);
2387 		channel = ar->rx_channel;
2388 		if (channel) {
2389 			rx_status->band = channel->band;
2390 			channel_num =
2391 				ieee80211_frequency_to_channel(channel->center_freq);
2392 		}
2393 		spin_unlock_bh(&ar->data_lock);
2394 		ath12k_dbg_dump(ar->ab, ATH12K_DBG_DATA, NULL, "rx_desc: ",
2395 				rx_desc, sizeof(*rx_desc));
2396 	}
2397 
2398 	rx_status->freq = ieee80211_channel_to_frequency(channel_num,
2399 							 rx_status->band);
2400 
2401 	ath12k_dp_rx_h_rate(ar, rx_desc, rx_status);
2402 }
2403 
2404 static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi,
2405 				      struct sk_buff *msdu,
2406 				      struct ieee80211_rx_status *status)
2407 {
2408 	struct ath12k_base *ab = ar->ab;
2409 	static const struct ieee80211_radiotap_he known = {
2410 		.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
2411 				     IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
2412 		.data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
2413 	};
2414 	struct ieee80211_radiotap_he *he;
2415 	struct ieee80211_rx_status *rx_status;
2416 	struct ieee80211_sta *pubsta;
2417 	struct ath12k_peer *peer;
2418 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
2419 	u8 decap = DP_RX_DECAP_TYPE_RAW;
2420 	bool is_mcbc = rxcb->is_mcbc;
2421 	bool is_eapol = rxcb->is_eapol;
2422 
2423 	if (status->encoding == RX_ENC_HE && !(status->flag & RX_FLAG_RADIOTAP_HE) &&
2424 	    !(status->flag & RX_FLAG_SKIP_MONITOR)) {
2425 		he = skb_push(msdu, sizeof(known));
2426 		memcpy(he, &known, sizeof(known));
2427 		status->flag |= RX_FLAG_RADIOTAP_HE;
2428 	}
2429 
2430 	if (!(status->flag & RX_FLAG_ONLY_MONITOR))
2431 		decap = ath12k_dp_rx_h_decap_type(ab, rxcb->rx_desc);
2432 
2433 	spin_lock_bh(&ab->base_lock);
2434 	peer = ath12k_dp_rx_h_find_peer(ab, msdu);
2435 
2436 	pubsta = peer ? peer->sta : NULL;
2437 
2438 	spin_unlock_bh(&ab->base_lock);
2439 
2440 	ath12k_dbg(ab, ATH12K_DBG_DATA,
2441 		   "rx skb %pK len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
2442 		   msdu,
2443 		   msdu->len,
2444 		   peer ? peer->addr : NULL,
2445 		   rxcb->tid,
2446 		   is_mcbc ? "mcast" : "ucast",
2447 		   ath12k_dp_rx_h_seq_no(ab, rxcb->rx_desc),
2448 		   (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
2449 		   (status->encoding == RX_ENC_HT) ? "ht" : "",
2450 		   (status->encoding == RX_ENC_VHT) ? "vht" : "",
2451 		   (status->encoding == RX_ENC_HE) ? "he" : "",
2452 		   (status->bw == RATE_INFO_BW_40) ? "40" : "",
2453 		   (status->bw == RATE_INFO_BW_80) ? "80" : "",
2454 		   (status->bw == RATE_INFO_BW_160) ? "160" : "",
2455 		   status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
2456 		   status->rate_idx,
2457 		   status->nss,
2458 		   status->freq,
2459 		   status->band, status->flag,
2460 		   !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
2461 		   !!(status->flag & RX_FLAG_MMIC_ERROR),
2462 		   !!(status->flag & RX_FLAG_AMSDU_MORE));
2463 
2464 	ath12k_dbg_dump(ab, ATH12K_DBG_DP_RX, NULL, "dp rx msdu: ",
2465 			msdu->data, msdu->len);
2466 
2467 	rx_status = IEEE80211_SKB_RXCB(msdu);
2468 	*rx_status = *status;
2469 
2470 	/* TODO: trace rx packet */
2471 
2472 	/* PN for multicast packets are not validate in HW,
2473 	 * so skip 802.3 rx path
2474 	 * Also, fast_rx expects the STA to be authorized, hence
2475 	 * eapol packets are sent in slow path.
2476 	 */
2477 	if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol &&
2478 	    !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
2479 		rx_status->flag |= RX_FLAG_8023;
2480 
2481 	ieee80211_rx_napi(ar->hw, pubsta, msdu, napi);
2482 }
2483 
2484 static int ath12k_dp_rx_process_msdu(struct ath12k *ar,
2485 				     struct sk_buff *msdu,
2486 				     struct sk_buff_head *msdu_list,
2487 				     struct ieee80211_rx_status *rx_status)
2488 {
2489 	struct ath12k_base *ab = ar->ab;
2490 	struct hal_rx_desc *rx_desc, *lrx_desc;
2491 	struct ath12k_skb_rxcb *rxcb;
2492 	struct sk_buff *last_buf;
2493 	u8 l3_pad_bytes;
2494 	u16 msdu_len;
2495 	int ret;
2496 	u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
2497 
2498 	last_buf = ath12k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
2499 	if (!last_buf) {
2500 		ath12k_warn(ab,
2501 			    "No valid Rx buffer to access MSDU_END tlv\n");
2502 		ret = -EIO;
2503 		goto free_out;
2504 	}
2505 
2506 	rx_desc = (struct hal_rx_desc *)msdu->data;
2507 	lrx_desc = (struct hal_rx_desc *)last_buf->data;
2508 	if (!ath12k_dp_rx_h_msdu_done(ab, lrx_desc)) {
2509 		ath12k_warn(ab, "msdu_done bit in msdu_end is not set\n");
2510 		ret = -EIO;
2511 		goto free_out;
2512 	}
2513 
2514 	rxcb = ATH12K_SKB_RXCB(msdu);
2515 	rxcb->rx_desc = rx_desc;
2516 	msdu_len = ath12k_dp_rx_h_msdu_len(ab, lrx_desc);
2517 	l3_pad_bytes = ath12k_dp_rx_h_l3pad(ab, lrx_desc);
2518 
2519 	if (rxcb->is_frag) {
2520 		skb_pull(msdu, hal_rx_desc_sz);
2521 	} else if (!rxcb->is_continuation) {
2522 		if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
2523 			ret = -EINVAL;
2524 			ath12k_warn(ab, "invalid msdu len %u\n", msdu_len);
2525 			ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", rx_desc,
2526 					sizeof(*rx_desc));
2527 			goto free_out;
2528 		}
2529 		skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len);
2530 		skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes);
2531 	} else {
2532 		ret = ath12k_dp_rx_msdu_coalesce(ar, msdu_list,
2533 						 msdu, last_buf,
2534 						 l3_pad_bytes, msdu_len);
2535 		if (ret) {
2536 			ath12k_warn(ab,
2537 				    "failed to coalesce msdu rx buffer%d\n", ret);
2538 			goto free_out;
2539 		}
2540 	}
2541 
2542 	ath12k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
2543 	ath12k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status);
2544 
2545 	rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
2546 
2547 	return 0;
2548 
2549 free_out:
2550 	return ret;
2551 }
2552 
2553 static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab,
2554 						  struct napi_struct *napi,
2555 						  struct sk_buff_head *msdu_list,
2556 						  int ring_id)
2557 {
2558 	struct ieee80211_rx_status rx_status = {0};
2559 	struct ath12k_skb_rxcb *rxcb;
2560 	struct sk_buff *msdu;
2561 	struct ath12k *ar;
2562 	u8 mac_id, pdev_id;
2563 	int ret;
2564 
2565 	if (skb_queue_empty(msdu_list))
2566 		return;
2567 
2568 	rcu_read_lock();
2569 
2570 	while ((msdu = __skb_dequeue(msdu_list))) {
2571 		rxcb = ATH12K_SKB_RXCB(msdu);
2572 		mac_id = rxcb->mac_id;
2573 		pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
2574 		ar = ab->pdevs[pdev_id].ar;
2575 		if (!rcu_dereference(ab->pdevs_active[pdev_id])) {
2576 			dev_kfree_skb_any(msdu);
2577 			continue;
2578 		}
2579 
2580 		if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
2581 			dev_kfree_skb_any(msdu);
2582 			continue;
2583 		}
2584 
2585 		ret = ath12k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status);
2586 		if (ret) {
2587 			ath12k_dbg(ab, ATH12K_DBG_DATA,
2588 				   "Unable to process msdu %d", ret);
2589 			dev_kfree_skb_any(msdu);
2590 			continue;
2591 		}
2592 
2593 		ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status);
2594 	}
2595 
2596 	rcu_read_unlock();
2597 }
2598 
2599 int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id,
2600 			 struct napi_struct *napi, int budget)
2601 {
2602 	struct ath12k_rx_desc_info *desc_info;
2603 	struct ath12k_dp *dp = &ab->dp;
2604 	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
2605 	struct hal_reo_dest_ring *desc;
2606 	int num_buffs_reaped = 0;
2607 	struct sk_buff_head msdu_list;
2608 	struct ath12k_skb_rxcb *rxcb;
2609 	int total_msdu_reaped = 0;
2610 	struct hal_srng *srng;
2611 	struct sk_buff *msdu;
2612 	bool done = false;
2613 	int mac_id;
2614 	u64 desc_va;
2615 
2616 	__skb_queue_head_init(&msdu_list);
2617 
2618 	srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
2619 
2620 	spin_lock_bh(&srng->lock);
2621 
2622 try_again:
2623 	ath12k_hal_srng_access_begin(ab, srng);
2624 
2625 	while ((desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
2626 		enum hal_reo_dest_ring_push_reason push_reason;
2627 		u32 cookie;
2628 
2629 		cookie = le32_get_bits(desc->buf_addr_info.info1,
2630 				       BUFFER_ADDR_INFO1_SW_COOKIE);
2631 
2632 		mac_id = le32_get_bits(desc->info0,
2633 				       HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
2634 
2635 		desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 |
2636 			   le32_to_cpu(desc->buf_va_lo));
2637 		desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va);
2638 
2639 		/* retry manual desc retrieval */
2640 		if (!desc_info) {
2641 			desc_info = ath12k_dp_get_rx_desc(ab, cookie);
2642 			if (!desc_info) {
2643 				ath12k_warn(ab, "Invalid cookie in manual desc retrieval");
2644 				continue;
2645 			}
2646 		}
2647 
2648 		if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
2649 			ath12k_warn(ab, "Check HW CC implementation");
2650 
2651 		msdu = desc_info->skb;
2652 		desc_info->skb = NULL;
2653 
2654 		spin_lock_bh(&dp->rx_desc_lock);
2655 		list_move_tail(&desc_info->list, &dp->rx_desc_free_list);
2656 		spin_unlock_bh(&dp->rx_desc_lock);
2657 
2658 		rxcb = ATH12K_SKB_RXCB(msdu);
2659 		dma_unmap_single(ab->dev, rxcb->paddr,
2660 				 msdu->len + skb_tailroom(msdu),
2661 				 DMA_FROM_DEVICE);
2662 
2663 		num_buffs_reaped++;
2664 
2665 		push_reason = le32_get_bits(desc->info0,
2666 					    HAL_REO_DEST_RING_INFO0_PUSH_REASON);
2667 		if (push_reason !=
2668 		    HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
2669 			dev_kfree_skb_any(msdu);
2670 			ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++;
2671 			continue;
2672 		}
2673 
2674 		rxcb->is_first_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
2675 					 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
2676 		rxcb->is_last_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
2677 					RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
2678 		rxcb->is_continuation = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
2679 					   RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
2680 		rxcb->mac_id = mac_id;
2681 		rxcb->peer_id = le32_get_bits(desc->rx_mpdu_info.peer_meta_data,
2682 					      RX_MPDU_DESC_META_DATA_PEER_ID);
2683 		rxcb->tid = le32_get_bits(desc->rx_mpdu_info.info0,
2684 					  RX_MPDU_DESC_INFO0_TID);
2685 
2686 		__skb_queue_tail(&msdu_list, msdu);
2687 
2688 		if (!rxcb->is_continuation) {
2689 			total_msdu_reaped++;
2690 			done = true;
2691 		} else {
2692 			done = false;
2693 		}
2694 
2695 		if (total_msdu_reaped >= budget)
2696 			break;
2697 	}
2698 
2699 	/* Hw might have updated the head pointer after we cached it.
2700 	 * In this case, even though there are entries in the ring we'll
2701 	 * get rx_desc NULL. Give the read another try with updated cached
2702 	 * head pointer so that we can reap complete MPDU in the current
2703 	 * rx processing.
2704 	 */
2705 	if (!done && ath12k_hal_srng_dst_num_free(ab, srng, true)) {
2706 		ath12k_hal_srng_access_end(ab, srng);
2707 		goto try_again;
2708 	}
2709 
2710 	ath12k_hal_srng_access_end(ab, srng);
2711 
2712 	spin_unlock_bh(&srng->lock);
2713 
2714 	if (!total_msdu_reaped)
2715 		goto exit;
2716 
2717 	/* TODO: Move to implicit BM? */
2718 	ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, num_buffs_reaped,
2719 				    ab->hw_params->hal_params->rx_buf_rbm, true);
2720 
2721 	ath12k_dp_rx_process_received_packets(ab, napi, &msdu_list,
2722 					      ring_id);
2723 
2724 exit:
2725 	return total_msdu_reaped;
2726 }
2727 
2728 static void ath12k_dp_rx_frag_timer(struct timer_list *timer)
2729 {
2730 	struct ath12k_dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer);
2731 
2732 	spin_lock_bh(&rx_tid->ab->base_lock);
2733 	if (rx_tid->last_frag_no &&
2734 	    rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) {
2735 		spin_unlock_bh(&rx_tid->ab->base_lock);
2736 		return;
2737 	}
2738 	ath12k_dp_rx_frags_cleanup(rx_tid, true);
2739 	spin_unlock_bh(&rx_tid->ab->base_lock);
2740 }
2741 
2742 int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id)
2743 {
2744 	struct ath12k_base *ab = ar->ab;
2745 	struct crypto_shash *tfm;
2746 	struct ath12k_peer *peer;
2747 	struct ath12k_dp_rx_tid *rx_tid;
2748 	int i;
2749 
2750 	tfm = crypto_alloc_shash("michael_mic", 0, 0);
2751 	if (IS_ERR(tfm))
2752 		return PTR_ERR(tfm);
2753 
2754 	spin_lock_bh(&ab->base_lock);
2755 
2756 	peer = ath12k_peer_find(ab, vdev_id, peer_mac);
2757 	if (!peer) {
2758 		spin_unlock_bh(&ab->base_lock);
2759 		ath12k_warn(ab, "failed to find the peer to set up fragment info\n");
2760 		return -ENOENT;
2761 	}
2762 
2763 	for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
2764 		rx_tid = &peer->rx_tid[i];
2765 		rx_tid->ab = ab;
2766 		timer_setup(&rx_tid->frag_timer, ath12k_dp_rx_frag_timer, 0);
2767 		skb_queue_head_init(&rx_tid->rx_frags);
2768 	}
2769 
2770 	peer->tfm_mmic = tfm;
2771 	peer->dp_setup_done = true;
2772 	spin_unlock_bh(&ab->base_lock);
2773 
2774 	return 0;
2775 }
2776 
2777 static int ath12k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,
2778 				      struct ieee80211_hdr *hdr, u8 *data,
2779 				      size_t data_len, u8 *mic)
2780 {
2781 	SHASH_DESC_ON_STACK(desc, tfm);
2782 	u8 mic_hdr[16] = {0};
2783 	u8 tid = 0;
2784 	int ret;
2785 
2786 	if (!tfm)
2787 		return -EINVAL;
2788 
2789 	desc->tfm = tfm;
2790 
2791 	ret = crypto_shash_setkey(tfm, key, 8);
2792 	if (ret)
2793 		goto out;
2794 
2795 	ret = crypto_shash_init(desc);
2796 	if (ret)
2797 		goto out;
2798 
2799 	/* TKIP MIC header */
2800 	memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN);
2801 	memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN);
2802 	if (ieee80211_is_data_qos(hdr->frame_control))
2803 		tid = ieee80211_get_tid(hdr);
2804 	mic_hdr[12] = tid;
2805 
2806 	ret = crypto_shash_update(desc, mic_hdr, 16);
2807 	if (ret)
2808 		goto out;
2809 	ret = crypto_shash_update(desc, data, data_len);
2810 	if (ret)
2811 		goto out;
2812 	ret = crypto_shash_final(desc, mic);
2813 out:
2814 	shash_desc_zero(desc);
2815 	return ret;
2816 }
2817 
2818 static int ath12k_dp_rx_h_verify_tkip_mic(struct ath12k *ar, struct ath12k_peer *peer,
2819 					  struct sk_buff *msdu)
2820 {
2821 	struct ath12k_base *ab = ar->ab;
2822 	struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
2823 	struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);
2824 	struct ieee80211_key_conf *key_conf;
2825 	struct ieee80211_hdr *hdr;
2826 	u8 mic[IEEE80211_CCMP_MIC_LEN];
2827 	int head_len, tail_len, ret;
2828 	size_t data_len;
2829 	u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
2830 	u8 *key, *data;
2831 	u8 key_idx;
2832 
2833 	if (ath12k_dp_rx_h_enctype(ab, rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC)
2834 		return 0;
2835 
2836 	hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
2837 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
2838 	head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN;
2839 	tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN;
2840 
2841 	if (!is_multicast_ether_addr(hdr->addr1))
2842 		key_idx = peer->ucast_keyidx;
2843 	else
2844 		key_idx = peer->mcast_keyidx;
2845 
2846 	key_conf = peer->keys[key_idx];
2847 
2848 	data = msdu->data + head_len;
2849 	data_len = msdu->len - head_len - tail_len;
2850 	key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
2851 
2852 	ret = ath12k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic);
2853 	if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN))
2854 		goto mic_fail;
2855 
2856 	return 0;
2857 
2858 mic_fail:
2859 	(ATH12K_SKB_RXCB(msdu))->is_first_msdu = true;
2860 	(ATH12K_SKB_RXCB(msdu))->is_last_msdu = true;
2861 
2862 	rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
2863 		    RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
2864 	skb_pull(msdu, hal_rx_desc_sz);
2865 
2866 	ath12k_dp_rx_h_ppdu(ar, rx_desc, rxs);
2867 	ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
2868 			       HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
2869 	ieee80211_rx(ar->hw, msdu);
2870 	return -EINVAL;
2871 }
2872 
2873 static void ath12k_dp_rx_h_undecap_frag(struct ath12k *ar, struct sk_buff *msdu,
2874 					enum hal_encrypt_type enctype, u32 flags)
2875 {
2876 	struct ieee80211_hdr *hdr;
2877 	size_t hdr_len;
2878 	size_t crypto_len;
2879 	u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
2880 
2881 	if (!flags)
2882 		return;
2883 
2884 	hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
2885 
2886 	if (flags & RX_FLAG_MIC_STRIPPED)
2887 		skb_trim(msdu, msdu->len -
2888 			 ath12k_dp_rx_crypto_mic_len(ar, enctype));
2889 
2890 	if (flags & RX_FLAG_ICV_STRIPPED)
2891 		skb_trim(msdu, msdu->len -
2892 			 ath12k_dp_rx_crypto_icv_len(ar, enctype));
2893 
2894 	if (flags & RX_FLAG_IV_STRIPPED) {
2895 		hdr_len = ieee80211_hdrlen(hdr->frame_control);
2896 		crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
2897 
2898 		memmove(msdu->data + hal_rx_desc_sz + crypto_len,
2899 			msdu->data + hal_rx_desc_sz, hdr_len);
2900 		skb_pull(msdu, crypto_len);
2901 	}
2902 }
2903 
2904 static int ath12k_dp_rx_h_defrag(struct ath12k *ar,
2905 				 struct ath12k_peer *peer,
2906 				 struct ath12k_dp_rx_tid *rx_tid,
2907 				 struct sk_buff **defrag_skb)
2908 {
2909 	struct ath12k_base *ab = ar->ab;
2910 	struct hal_rx_desc *rx_desc;
2911 	struct sk_buff *skb, *first_frag, *last_frag;
2912 	struct ieee80211_hdr *hdr;
2913 	enum hal_encrypt_type enctype;
2914 	bool is_decrypted = false;
2915 	int msdu_len = 0;
2916 	int extra_space;
2917 	u32 flags, hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
2918 
2919 	first_frag = skb_peek(&rx_tid->rx_frags);
2920 	last_frag = skb_peek_tail(&rx_tid->rx_frags);
2921 
2922 	skb_queue_walk(&rx_tid->rx_frags, skb) {
2923 		flags = 0;
2924 		rx_desc = (struct hal_rx_desc *)skb->data;
2925 		hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
2926 
2927 		enctype = ath12k_dp_rx_h_enctype(ab, rx_desc);
2928 		if (enctype != HAL_ENCRYPT_TYPE_OPEN)
2929 			is_decrypted = ath12k_dp_rx_h_is_decrypted(ab,
2930 								   rx_desc);
2931 
2932 		if (is_decrypted) {
2933 			if (skb != first_frag)
2934 				flags |= RX_FLAG_IV_STRIPPED;
2935 			if (skb != last_frag)
2936 				flags |= RX_FLAG_ICV_STRIPPED |
2937 					 RX_FLAG_MIC_STRIPPED;
2938 		}
2939 
2940 		/* RX fragments are always raw packets */
2941 		if (skb != last_frag)
2942 			skb_trim(skb, skb->len - FCS_LEN);
2943 		ath12k_dp_rx_h_undecap_frag(ar, skb, enctype, flags);
2944 
2945 		if (skb != first_frag)
2946 			skb_pull(skb, hal_rx_desc_sz +
2947 				      ieee80211_hdrlen(hdr->frame_control));
2948 		msdu_len += skb->len;
2949 	}
2950 
2951 	extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag));
2952 	if (extra_space > 0 &&
2953 	    (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0))
2954 		return -ENOMEM;
2955 
2956 	__skb_unlink(first_frag, &rx_tid->rx_frags);
2957 	while ((skb = __skb_dequeue(&rx_tid->rx_frags))) {
2958 		skb_put_data(first_frag, skb->data, skb->len);
2959 		dev_kfree_skb_any(skb);
2960 	}
2961 
2962 	hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz);
2963 	hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
2964 	ATH12K_SKB_RXCB(first_frag)->is_frag = 1;
2965 
2966 	if (ath12k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag))
2967 		first_frag = NULL;
2968 
2969 	*defrag_skb = first_frag;
2970 	return 0;
2971 }
2972 
2973 static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
2974 					      struct ath12k_dp_rx_tid *rx_tid,
2975 					      struct sk_buff *defrag_skb)
2976 {
2977 	struct ath12k_base *ab = ar->ab;
2978 	struct ath12k_dp *dp = &ab->dp;
2979 	struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data;
2980 	struct hal_reo_entrance_ring *reo_ent_ring;
2981 	struct hal_reo_dest_ring *reo_dest_ring;
2982 	struct dp_link_desc_bank *link_desc_banks;
2983 	struct hal_rx_msdu_link *msdu_link;
2984 	struct hal_rx_msdu_details *msdu0;
2985 	struct hal_srng *srng;
2986 	dma_addr_t link_paddr, buf_paddr;
2987 	u32 desc_bank, msdu_info, msdu_ext_info, mpdu_info;
2988 	u32 cookie, hal_rx_desc_sz, dest_ring_info0;
2989 	int ret;
2990 	struct ath12k_rx_desc_info *desc_info;
2991 	u8 dst_ind;
2992 
2993 	hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
2994 	link_desc_banks = dp->link_desc_banks;
2995 	reo_dest_ring = rx_tid->dst_ring_desc;
2996 
2997 	ath12k_hal_rx_reo_ent_paddr_get(ab, &reo_dest_ring->buf_addr_info,
2998 					&link_paddr, &cookie);
2999 	desc_bank = u32_get_bits(cookie, DP_LINK_DESC_BANK_MASK);
3000 
3001 	msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr +
3002 			(link_paddr - link_desc_banks[desc_bank].paddr));
3003 	msdu0 = &msdu_link->msdu_link[0];
3004 	msdu_ext_info = le32_to_cpu(msdu0->rx_msdu_ext_info.info0);
3005 	dst_ind = u32_get_bits(msdu_ext_info, RX_MSDU_EXT_DESC_INFO0_REO_DEST_IND);
3006 
3007 	memset(msdu0, 0, sizeof(*msdu0));
3008 
3009 	msdu_info = u32_encode_bits(1, RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU) |
3010 		    u32_encode_bits(1, RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU) |
3011 		    u32_encode_bits(0, RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) |
3012 		    u32_encode_bits(defrag_skb->len - hal_rx_desc_sz,
3013 				    RX_MSDU_DESC_INFO0_MSDU_LENGTH) |
3014 		    u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_SA) |
3015 		    u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_DA);
3016 	msdu0->rx_msdu_info.info0 = cpu_to_le32(msdu_info);
3017 	msdu0->rx_msdu_ext_info.info0 = cpu_to_le32(msdu_ext_info);
3018 
3019 	/* change msdu len in hal rx desc */
3020 	ath12k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz);
3021 
3022 	buf_paddr = dma_map_single(ab->dev, defrag_skb->data,
3023 				   defrag_skb->len + skb_tailroom(defrag_skb),
3024 				   DMA_FROM_DEVICE);
3025 	if (dma_mapping_error(ab->dev, buf_paddr))
3026 		return -ENOMEM;
3027 
3028 	spin_lock_bh(&dp->rx_desc_lock);
3029 	desc_info = list_first_entry_or_null(&dp->rx_desc_free_list,
3030 					     struct ath12k_rx_desc_info,
3031 					     list);
3032 	if (!desc_info) {
3033 		spin_unlock_bh(&dp->rx_desc_lock);
3034 		ath12k_warn(ab, "failed to find rx desc for reinject\n");
3035 		ret = -ENOMEM;
3036 		goto err_unmap_dma;
3037 	}
3038 
3039 	desc_info->skb = defrag_skb;
3040 
3041 	list_del(&desc_info->list);
3042 	list_add_tail(&desc_info->list, &dp->rx_desc_used_list);
3043 	spin_unlock_bh(&dp->rx_desc_lock);
3044 
3045 	ATH12K_SKB_RXCB(defrag_skb)->paddr = buf_paddr;
3046 
3047 	ath12k_hal_rx_buf_addr_info_set(&msdu0->buf_addr_info, buf_paddr,
3048 					desc_info->cookie,
3049 					HAL_RX_BUF_RBM_SW3_BM);
3050 
3051 	/* Fill mpdu details into reo entrance ring */
3052 	srng = &ab->hal.srng_list[dp->reo_reinject_ring.ring_id];
3053 
3054 	spin_lock_bh(&srng->lock);
3055 	ath12k_hal_srng_access_begin(ab, srng);
3056 
3057 	reo_ent_ring = ath12k_hal_srng_src_get_next_entry(ab, srng);
3058 	if (!reo_ent_ring) {
3059 		ath12k_hal_srng_access_end(ab, srng);
3060 		spin_unlock_bh(&srng->lock);
3061 		ret = -ENOSPC;
3062 		goto err_free_desc;
3063 	}
3064 	memset(reo_ent_ring, 0, sizeof(*reo_ent_ring));
3065 
3066 	ath12k_hal_rx_buf_addr_info_set(&reo_ent_ring->buf_addr_info, link_paddr,
3067 					cookie,
3068 					HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST);
3069 
3070 	mpdu_info = u32_encode_bits(1, RX_MPDU_DESC_INFO0_MSDU_COUNT) |
3071 		    u32_encode_bits(0, RX_MPDU_DESC_INFO0_FRAG_FLAG) |
3072 		    u32_encode_bits(1, RX_MPDU_DESC_INFO0_RAW_MPDU) |
3073 		    u32_encode_bits(1, RX_MPDU_DESC_INFO0_VALID_PN) |
3074 		    u32_encode_bits(rx_tid->tid, RX_MPDU_DESC_INFO0_TID);
3075 
3076 	reo_ent_ring->rx_mpdu_info.info0 = cpu_to_le32(mpdu_info);
3077 	reo_ent_ring->rx_mpdu_info.peer_meta_data =
3078 		reo_dest_ring->rx_mpdu_info.peer_meta_data;
3079 
3080 	/* Firmware expects physical address to be filled in queue_addr_lo in
3081 	 * the MLO scenario and in case of non MLO peer meta data needs to be
3082 	 * filled.
3083 	 * TODO: Need to handle for MLO scenario.
3084 	 */
3085 	reo_ent_ring->queue_addr_lo = reo_dest_ring->rx_mpdu_info.peer_meta_data;
3086 	reo_ent_ring->info0 = le32_encode_bits(dst_ind,
3087 					       HAL_REO_ENTR_RING_INFO0_DEST_IND);
3088 
3089 	reo_ent_ring->info1 = le32_encode_bits(rx_tid->cur_sn,
3090 					       HAL_REO_ENTR_RING_INFO1_MPDU_SEQ_NUM);
3091 	dest_ring_info0 = le32_get_bits(reo_dest_ring->info0,
3092 					HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
3093 	reo_ent_ring->info2 =
3094 		cpu_to_le32(u32_get_bits(dest_ring_info0,
3095 					 HAL_REO_ENTR_RING_INFO2_SRC_LINK_ID));
3096 
3097 	ath12k_hal_srng_access_end(ab, srng);
3098 	spin_unlock_bh(&srng->lock);
3099 
3100 	return 0;
3101 
3102 err_free_desc:
3103 	spin_lock_bh(&dp->rx_desc_lock);
3104 	list_del(&desc_info->list);
3105 	list_add_tail(&desc_info->list, &dp->rx_desc_free_list);
3106 	desc_info->skb = NULL;
3107 	spin_unlock_bh(&dp->rx_desc_lock);
3108 err_unmap_dma:
3109 	dma_unmap_single(ab->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb),
3110 			 DMA_FROM_DEVICE);
3111 	return ret;
3112 }
3113 
3114 static int ath12k_dp_rx_h_cmp_frags(struct ath12k_base *ab,
3115 				    struct sk_buff *a, struct sk_buff *b)
3116 {
3117 	int frag1, frag2;
3118 
3119 	frag1 = ath12k_dp_rx_h_frag_no(ab, a);
3120 	frag2 = ath12k_dp_rx_h_frag_no(ab, b);
3121 
3122 	return frag1 - frag2;
3123 }
3124 
3125 static void ath12k_dp_rx_h_sort_frags(struct ath12k_base *ab,
3126 				      struct sk_buff_head *frag_list,
3127 				      struct sk_buff *cur_frag)
3128 {
3129 	struct sk_buff *skb;
3130 	int cmp;
3131 
3132 	skb_queue_walk(frag_list, skb) {
3133 		cmp = ath12k_dp_rx_h_cmp_frags(ab, skb, cur_frag);
3134 		if (cmp < 0)
3135 			continue;
3136 		__skb_queue_before(frag_list, skb, cur_frag);
3137 		return;
3138 	}
3139 	__skb_queue_tail(frag_list, cur_frag);
3140 }
3141 
3142 static u64 ath12k_dp_rx_h_get_pn(struct ath12k *ar, struct sk_buff *skb)
3143 {
3144 	struct ieee80211_hdr *hdr;
3145 	u64 pn = 0;
3146 	u8 *ehdr;
3147 	u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
3148 
3149 	hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
3150 	ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control);
3151 
3152 	pn = ehdr[0];
3153 	pn |= (u64)ehdr[1] << 8;
3154 	pn |= (u64)ehdr[4] << 16;
3155 	pn |= (u64)ehdr[5] << 24;
3156 	pn |= (u64)ehdr[6] << 32;
3157 	pn |= (u64)ehdr[7] << 40;
3158 
3159 	return pn;
3160 }
3161 
3162 static bool
3163 ath12k_dp_rx_h_defrag_validate_incr_pn(struct ath12k *ar, struct ath12k_dp_rx_tid *rx_tid)
3164 {
3165 	struct ath12k_base *ab = ar->ab;
3166 	enum hal_encrypt_type encrypt_type;
3167 	struct sk_buff *first_frag, *skb;
3168 	struct hal_rx_desc *desc;
3169 	u64 last_pn;
3170 	u64 cur_pn;
3171 
3172 	first_frag = skb_peek(&rx_tid->rx_frags);
3173 	desc = (struct hal_rx_desc *)first_frag->data;
3174 
3175 	encrypt_type = ath12k_dp_rx_h_enctype(ab, desc);
3176 	if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 &&
3177 	    encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 &&
3178 	    encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 &&
3179 	    encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256)
3180 		return true;
3181 
3182 	last_pn = ath12k_dp_rx_h_get_pn(ar, first_frag);
3183 	skb_queue_walk(&rx_tid->rx_frags, skb) {
3184 		if (skb == first_frag)
3185 			continue;
3186 
3187 		cur_pn = ath12k_dp_rx_h_get_pn(ar, skb);
3188 		if (cur_pn != last_pn + 1)
3189 			return false;
3190 		last_pn = cur_pn;
3191 	}
3192 	return true;
3193 }
3194 
3195 static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar,
3196 				    struct sk_buff *msdu,
3197 				    struct hal_reo_dest_ring *ring_desc)
3198 {
3199 	struct ath12k_base *ab = ar->ab;
3200 	struct hal_rx_desc *rx_desc;
3201 	struct ath12k_peer *peer;
3202 	struct ath12k_dp_rx_tid *rx_tid;
3203 	struct sk_buff *defrag_skb = NULL;
3204 	u32 peer_id;
3205 	u16 seqno, frag_no;
3206 	u8 tid;
3207 	int ret = 0;
3208 	bool more_frags;
3209 
3210 	rx_desc = (struct hal_rx_desc *)msdu->data;
3211 	peer_id = ath12k_dp_rx_h_peer_id(ab, rx_desc);
3212 	tid = ath12k_dp_rx_h_tid(ab, rx_desc);
3213 	seqno = ath12k_dp_rx_h_seq_no(ab, rx_desc);
3214 	frag_no = ath12k_dp_rx_h_frag_no(ab, msdu);
3215 	more_frags = ath12k_dp_rx_h_more_frags(ab, msdu);
3216 
3217 	if (!ath12k_dp_rx_h_seq_ctrl_valid(ab, rx_desc) ||
3218 	    !ath12k_dp_rx_h_fc_valid(ab, rx_desc) ||
3219 	    tid > IEEE80211_NUM_TIDS)
3220 		return -EINVAL;
3221 
3222 	/* received unfragmented packet in reo
3223 	 * exception ring, this shouldn't happen
3224 	 * as these packets typically come from
3225 	 * reo2sw srngs.
3226 	 */
3227 	if (WARN_ON_ONCE(!frag_no && !more_frags))
3228 		return -EINVAL;
3229 
3230 	spin_lock_bh(&ab->base_lock);
3231 	peer = ath12k_peer_find_by_id(ab, peer_id);
3232 	if (!peer) {
3233 		ath12k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n",
3234 			    peer_id);
3235 		ret = -ENOENT;
3236 		goto out_unlock;
3237 	}
3238 
3239 	if (!peer->dp_setup_done) {
3240 		ath12k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n",
3241 			    peer->addr, peer_id);
3242 		ret = -ENOENT;
3243 		goto out_unlock;
3244 	}
3245 
3246 	rx_tid = &peer->rx_tid[tid];
3247 
3248 	if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
3249 	    skb_queue_empty(&rx_tid->rx_frags)) {
3250 		/* Flush stored fragments and start a new sequence */
3251 		ath12k_dp_rx_frags_cleanup(rx_tid, true);
3252 		rx_tid->cur_sn = seqno;
3253 	}
3254 
3255 	if (rx_tid->rx_frag_bitmap & BIT(frag_no)) {
3256 		/* Fragment already present */
3257 		ret = -EINVAL;
3258 		goto out_unlock;
3259 	}
3260 
3261 	if ((!rx_tid->rx_frag_bitmap || frag_no > __fls(rx_tid->rx_frag_bitmap)))
3262 		__skb_queue_tail(&rx_tid->rx_frags, msdu);
3263 	else
3264 		ath12k_dp_rx_h_sort_frags(ab, &rx_tid->rx_frags, msdu);
3265 
3266 	rx_tid->rx_frag_bitmap |= BIT(frag_no);
3267 	if (!more_frags)
3268 		rx_tid->last_frag_no = frag_no;
3269 
3270 	if (frag_no == 0) {
3271 		rx_tid->dst_ring_desc = kmemdup(ring_desc,
3272 						sizeof(*rx_tid->dst_ring_desc),
3273 						GFP_ATOMIC);
3274 		if (!rx_tid->dst_ring_desc) {
3275 			ret = -ENOMEM;
3276 			goto out_unlock;
3277 		}
3278 	} else {
3279 		ath12k_dp_rx_link_desc_return(ab, ring_desc,
3280 					      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3281 	}
3282 
3283 	if (!rx_tid->last_frag_no ||
3284 	    rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) {
3285 		mod_timer(&rx_tid->frag_timer, jiffies +
3286 					       ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS);
3287 		goto out_unlock;
3288 	}
3289 
3290 	spin_unlock_bh(&ab->base_lock);
3291 	del_timer_sync(&rx_tid->frag_timer);
3292 	spin_lock_bh(&ab->base_lock);
3293 
3294 	peer = ath12k_peer_find_by_id(ab, peer_id);
3295 	if (!peer)
3296 		goto err_frags_cleanup;
3297 
3298 	if (!ath12k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid))
3299 		goto err_frags_cleanup;
3300 
3301 	if (ath12k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb))
3302 		goto err_frags_cleanup;
3303 
3304 	if (!defrag_skb)
3305 		goto err_frags_cleanup;
3306 
3307 	if (ath12k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb))
3308 		goto err_frags_cleanup;
3309 
3310 	ath12k_dp_rx_frags_cleanup(rx_tid, false);
3311 	goto out_unlock;
3312 
3313 err_frags_cleanup:
3314 	dev_kfree_skb_any(defrag_skb);
3315 	ath12k_dp_rx_frags_cleanup(rx_tid, true);
3316 out_unlock:
3317 	spin_unlock_bh(&ab->base_lock);
3318 	return ret;
3319 }
3320 
3321 static int
3322 ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
3323 			     bool drop, u32 cookie)
3324 {
3325 	struct ath12k_base *ab = ar->ab;
3326 	struct sk_buff *msdu;
3327 	struct ath12k_skb_rxcb *rxcb;
3328 	struct hal_rx_desc *rx_desc;
3329 	u16 msdu_len;
3330 	u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
3331 	struct ath12k_rx_desc_info *desc_info;
3332 	u64 desc_va;
3333 
3334 	desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 |
3335 		   le32_to_cpu(desc->buf_va_lo));
3336 	desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va);
3337 
3338 	/* retry manual desc retrieval */
3339 	if (!desc_info) {
3340 		desc_info = ath12k_dp_get_rx_desc(ab, cookie);
3341 		if (!desc_info) {
3342 			ath12k_warn(ab, "Invalid cookie in manual desc retrieval");
3343 			return -EINVAL;
3344 		}
3345 	}
3346 
3347 	if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
3348 		ath12k_warn(ab, " RX Exception, Check HW CC implementation");
3349 
3350 	msdu = desc_info->skb;
3351 	desc_info->skb = NULL;
3352 	spin_lock_bh(&ab->dp.rx_desc_lock);
3353 	list_move_tail(&desc_info->list, &ab->dp.rx_desc_free_list);
3354 	spin_unlock_bh(&ab->dp.rx_desc_lock);
3355 
3356 	rxcb = ATH12K_SKB_RXCB(msdu);
3357 	dma_unmap_single(ar->ab->dev, rxcb->paddr,
3358 			 msdu->len + skb_tailroom(msdu),
3359 			 DMA_FROM_DEVICE);
3360 
3361 	if (drop) {
3362 		dev_kfree_skb_any(msdu);
3363 		return 0;
3364 	}
3365 
3366 	rcu_read_lock();
3367 	if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {
3368 		dev_kfree_skb_any(msdu);
3369 		goto exit;
3370 	}
3371 
3372 	if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
3373 		dev_kfree_skb_any(msdu);
3374 		goto exit;
3375 	}
3376 
3377 	rx_desc = (struct hal_rx_desc *)msdu->data;
3378 	msdu_len = ath12k_dp_rx_h_msdu_len(ar->ab, rx_desc);
3379 	if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
3380 		ath12k_warn(ar->ab, "invalid msdu leng %u", msdu_len);
3381 		ath12k_dbg_dump(ar->ab, ATH12K_DBG_DATA, NULL, "", rx_desc,
3382 				sizeof(*rx_desc));
3383 		dev_kfree_skb_any(msdu);
3384 		goto exit;
3385 	}
3386 
3387 	skb_put(msdu, hal_rx_desc_sz + msdu_len);
3388 
3389 	if (ath12k_dp_rx_frag_h_mpdu(ar, msdu, desc)) {
3390 		dev_kfree_skb_any(msdu);
3391 		ath12k_dp_rx_link_desc_return(ar->ab, desc,
3392 					      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3393 	}
3394 exit:
3395 	rcu_read_unlock();
3396 	return 0;
3397 }
3398 
3399 int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
3400 			     int budget)
3401 {
3402 	u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
3403 	struct dp_link_desc_bank *link_desc_banks;
3404 	enum hal_rx_buf_return_buf_manager rbm;
3405 	struct hal_rx_msdu_link *link_desc_va;
3406 	int tot_n_bufs_reaped, quota, ret, i;
3407 	struct hal_reo_dest_ring *reo_desc;
3408 	struct dp_rxdma_ring *rx_ring;
3409 	struct dp_srng *reo_except;
3410 	u32 desc_bank, num_msdus;
3411 	struct hal_srng *srng;
3412 	struct ath12k_dp *dp;
3413 	int mac_id;
3414 	struct ath12k *ar;
3415 	dma_addr_t paddr;
3416 	bool is_frag;
3417 	bool drop = false;
3418 	int pdev_id;
3419 
3420 	tot_n_bufs_reaped = 0;
3421 	quota = budget;
3422 
3423 	dp = &ab->dp;
3424 	reo_except = &dp->reo_except_ring;
3425 	link_desc_banks = dp->link_desc_banks;
3426 
3427 	srng = &ab->hal.srng_list[reo_except->ring_id];
3428 
3429 	spin_lock_bh(&srng->lock);
3430 
3431 	ath12k_hal_srng_access_begin(ab, srng);
3432 
3433 	while (budget &&
3434 	       (reo_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
3435 		ab->soc_stats.err_ring_pkts++;
3436 		ret = ath12k_hal_desc_reo_parse_err(ab, reo_desc, &paddr,
3437 						    &desc_bank);
3438 		if (ret) {
3439 			ath12k_warn(ab, "failed to parse error reo desc %d\n",
3440 				    ret);
3441 			continue;
3442 		}
3443 		link_desc_va = link_desc_banks[desc_bank].vaddr +
3444 			       (paddr - link_desc_banks[desc_bank].paddr);
3445 		ath12k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
3446 						 &rbm);
3447 		if (rbm != HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST &&
3448 		    rbm != HAL_RX_BUF_RBM_SW3_BM &&
3449 		    rbm != ab->hw_params->hal_params->rx_buf_rbm) {
3450 			ab->soc_stats.invalid_rbm++;
3451 			ath12k_warn(ab, "invalid return buffer manager %d\n", rbm);
3452 			ath12k_dp_rx_link_desc_return(ab, reo_desc,
3453 						      HAL_WBM_REL_BM_ACT_REL_MSDU);
3454 			continue;
3455 		}
3456 
3457 		is_frag = !!(le32_to_cpu(reo_desc->rx_mpdu_info.info0) &
3458 			     RX_MPDU_DESC_INFO0_FRAG_FLAG);
3459 
3460 		/* Process only rx fragments with one msdu per link desc below, and drop
3461 		 * msdu's indicated due to error reasons.
3462 		 */
3463 		if (!is_frag || num_msdus > 1) {
3464 			drop = true;
3465 			/* Return the link desc back to wbm idle list */
3466 			ath12k_dp_rx_link_desc_return(ab, reo_desc,
3467 						      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3468 		}
3469 
3470 		for (i = 0; i < num_msdus; i++) {
3471 			mac_id = le32_get_bits(reo_desc->info0,
3472 					       HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
3473 
3474 			pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
3475 			ar = ab->pdevs[pdev_id].ar;
3476 
3477 			if (!ath12k_dp_process_rx_err_buf(ar, reo_desc, drop,
3478 							  msdu_cookies[i]))
3479 				tot_n_bufs_reaped++;
3480 		}
3481 
3482 		if (tot_n_bufs_reaped >= quota) {
3483 			tot_n_bufs_reaped = quota;
3484 			goto exit;
3485 		}
3486 
3487 		budget = quota - tot_n_bufs_reaped;
3488 	}
3489 
3490 exit:
3491 	ath12k_hal_srng_access_end(ab, srng);
3492 
3493 	spin_unlock_bh(&srng->lock);
3494 
3495 	rx_ring = &dp->rx_refill_buf_ring;
3496 
3497 	ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, tot_n_bufs_reaped,
3498 				    ab->hw_params->hal_params->rx_buf_rbm, true);
3499 
3500 	return tot_n_bufs_reaped;
3501 }
3502 
3503 static void ath12k_dp_rx_null_q_desc_sg_drop(struct ath12k *ar,
3504 					     int msdu_len,
3505 					     struct sk_buff_head *msdu_list)
3506 {
3507 	struct sk_buff *skb, *tmp;
3508 	struct ath12k_skb_rxcb *rxcb;
3509 	int n_buffs;
3510 
3511 	n_buffs = DIV_ROUND_UP(msdu_len,
3512 			       (DP_RX_BUFFER_SIZE - ar->ab->hw_params->hal_desc_sz));
3513 
3514 	skb_queue_walk_safe(msdu_list, skb, tmp) {
3515 		rxcb = ATH12K_SKB_RXCB(skb);
3516 		if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO &&
3517 		    rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) {
3518 			if (!n_buffs)
3519 				break;
3520 			__skb_unlink(skb, msdu_list);
3521 			dev_kfree_skb_any(skb);
3522 			n_buffs--;
3523 		}
3524 	}
3525 }
3526 
3527 static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu,
3528 				      struct ieee80211_rx_status *status,
3529 				      struct sk_buff_head *msdu_list)
3530 {
3531 	struct ath12k_base *ab = ar->ab;
3532 	u16 msdu_len, peer_id;
3533 	struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
3534 	u8 l3pad_bytes;
3535 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
3536 	u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
3537 
3538 	msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc);
3539 	peer_id = ath12k_dp_rx_h_peer_id(ab, desc);
3540 
3541 	spin_lock(&ab->base_lock);
3542 	if (!ath12k_peer_find_by_id(ab, peer_id)) {
3543 		spin_unlock(&ab->base_lock);
3544 		ath12k_dbg(ab, ATH12K_DBG_DATA, "invalid peer id received in wbm err pkt%d\n",
3545 			   peer_id);
3546 		return -EINVAL;
3547 	}
3548 	spin_unlock(&ab->base_lock);
3549 
3550 	if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) {
3551 		/* First buffer will be freed by the caller, so deduct it's length */
3552 		msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz);
3553 		ath12k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);
3554 		return -EINVAL;
3555 	}
3556 
3557 	/* Even after cleaning up the sg buffers in the msdu list with above check
3558 	 * any msdu received with continuation flag needs to be dropped as invalid.
3559 	 * This protects against some random err frame with continuation flag.
3560 	 */
3561 	if (rxcb->is_continuation)
3562 		return -EINVAL;
3563 
3564 	if (!ath12k_dp_rx_h_msdu_done(ab, desc)) {
3565 		ath12k_warn(ar->ab,
3566 			    "msdu_done bit not set in null_q_des processing\n");
3567 		__skb_queue_purge(msdu_list);
3568 		return -EIO;
3569 	}
3570 
3571 	/* Handle NULL queue descriptor violations arising out a missing
3572 	 * REO queue for a given peer or a given TID. This typically
3573 	 * may happen if a packet is received on a QOS enabled TID before the
3574 	 * ADDBA negotiation for that TID, when the TID queue is setup. Or
3575 	 * it may also happen for MC/BC frames if they are not routed to the
3576 	 * non-QOS TID queue, in the absence of any other default TID queue.
3577 	 * This error can show up both in a REO destination or WBM release ring.
3578 	 */
3579 
3580 	if (rxcb->is_frag) {
3581 		skb_pull(msdu, hal_rx_desc_sz);
3582 	} else {
3583 		l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc);
3584 
3585 		if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
3586 			return -EINVAL;
3587 
3588 		skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
3589 		skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
3590 	}
3591 	ath12k_dp_rx_h_ppdu(ar, desc, status);
3592 
3593 	ath12k_dp_rx_h_mpdu(ar, msdu, desc, status);
3594 
3595 	rxcb->tid = ath12k_dp_rx_h_tid(ab, desc);
3596 
3597 	/* Please note that caller will having the access to msdu and completing
3598 	 * rx with mac80211. Need not worry about cleaning up amsdu_list.
3599 	 */
3600 
3601 	return 0;
3602 }
3603 
3604 static bool ath12k_dp_rx_h_reo_err(struct ath12k *ar, struct sk_buff *msdu,
3605 				   struct ieee80211_rx_status *status,
3606 				   struct sk_buff_head *msdu_list)
3607 {
3608 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
3609 	bool drop = false;
3610 
3611 	ar->ab->soc_stats.reo_error[rxcb->err_code]++;
3612 
3613 	switch (rxcb->err_code) {
3614 	case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
3615 		if (ath12k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))
3616 			drop = true;
3617 		break;
3618 	case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
3619 		/* TODO: Do not drop PN failed packets in the driver;
3620 		 * instead, it is good to drop such packets in mac80211
3621 		 * after incrementing the replay counters.
3622 		 */
3623 		fallthrough;
3624 	default:
3625 		/* TODO: Review other errors and process them to mac80211
3626 		 * as appropriate.
3627 		 */
3628 		drop = true;
3629 		break;
3630 	}
3631 
3632 	return drop;
3633 }
3634 
3635 static void ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
3636 					struct ieee80211_rx_status *status)
3637 {
3638 	struct ath12k_base *ab = ar->ab;
3639 	u16 msdu_len;
3640 	struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
3641 	u8 l3pad_bytes;
3642 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
3643 	u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
3644 
3645 	rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, desc);
3646 	rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, desc);
3647 
3648 	l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc);
3649 	msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc);
3650 	skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
3651 	skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
3652 
3653 	ath12k_dp_rx_h_ppdu(ar, desc, status);
3654 
3655 	status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
3656 			 RX_FLAG_DECRYPTED);
3657 
3658 	ath12k_dp_rx_h_undecap(ar, msdu, desc,
3659 			       HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
3660 }
3661 
3662 static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar,  struct sk_buff *msdu,
3663 				     struct ieee80211_rx_status *status)
3664 {
3665 	struct ath12k_base *ab = ar->ab;
3666 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
3667 	struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
3668 	bool drop = false;
3669 	u32 err_bitmap;
3670 
3671 	ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
3672 
3673 	switch (rxcb->err_code) {
3674 	case HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR:
3675 	case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
3676 		err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);
3677 		if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) {
3678 			ath12k_dp_rx_h_tkip_mic_err(ar, msdu, status);
3679 			break;
3680 		}
3681 		fallthrough;
3682 	default:
3683 		/* TODO: Review other rxdma error code to check if anything is
3684 		 * worth reporting to mac80211
3685 		 */
3686 		drop = true;
3687 		break;
3688 	}
3689 
3690 	return drop;
3691 }
3692 
3693 static void ath12k_dp_rx_wbm_err(struct ath12k *ar,
3694 				 struct napi_struct *napi,
3695 				 struct sk_buff *msdu,
3696 				 struct sk_buff_head *msdu_list)
3697 {
3698 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
3699 	struct ieee80211_rx_status rxs = {0};
3700 	bool drop = true;
3701 
3702 	switch (rxcb->err_rel_src) {
3703 	case HAL_WBM_REL_SRC_MODULE_REO:
3704 		drop = ath12k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list);
3705 		break;
3706 	case HAL_WBM_REL_SRC_MODULE_RXDMA:
3707 		drop = ath12k_dp_rx_h_rxdma_err(ar, msdu, &rxs);
3708 		break;
3709 	default:
3710 		/* msdu will get freed */
3711 		break;
3712 	}
3713 
3714 	if (drop) {
3715 		dev_kfree_skb_any(msdu);
3716 		return;
3717 	}
3718 
3719 	ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs);
3720 }
3721 
3722 int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
3723 				 struct napi_struct *napi, int budget)
3724 {
3725 	struct ath12k *ar;
3726 	struct ath12k_dp *dp = &ab->dp;
3727 	struct dp_rxdma_ring *rx_ring;
3728 	struct hal_rx_wbm_rel_info err_info;
3729 	struct hal_srng *srng;
3730 	struct sk_buff *msdu;
3731 	struct sk_buff_head msdu_list[MAX_RADIOS];
3732 	struct ath12k_skb_rxcb *rxcb;
3733 	void *rx_desc;
3734 	int mac_id;
3735 	int num_buffs_reaped = 0;
3736 	struct ath12k_rx_desc_info *desc_info;
3737 	int ret, i;
3738 
3739 	for (i = 0; i < ab->num_radios; i++)
3740 		__skb_queue_head_init(&msdu_list[i]);
3741 
3742 	srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
3743 	rx_ring = &dp->rx_refill_buf_ring;
3744 
3745 	spin_lock_bh(&srng->lock);
3746 
3747 	ath12k_hal_srng_access_begin(ab, srng);
3748 
3749 	while (budget) {
3750 		rx_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng);
3751 		if (!rx_desc)
3752 			break;
3753 
3754 		ret = ath12k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info);
3755 		if (ret) {
3756 			ath12k_warn(ab,
3757 				    "failed to parse rx error in wbm_rel ring desc %d\n",
3758 				    ret);
3759 			continue;
3760 		}
3761 
3762 		desc_info = (struct ath12k_rx_desc_info *)err_info.rx_desc;
3763 
3764 		/* retry manual desc retrieval if hw cc is not done */
3765 		if (!desc_info) {
3766 			desc_info = ath12k_dp_get_rx_desc(ab, err_info.cookie);
3767 			if (!desc_info) {
3768 				ath12k_warn(ab, "Invalid cookie in manual desc retrieval");
3769 				continue;
3770 			}
3771 		}
3772 
3773 		/* FIXME: Extract mac id correctly. Since descs are not tied
3774 		 * to mac, we can extract from vdev id in ring desc.
3775 		 */
3776 		mac_id = 0;
3777 
3778 		if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
3779 			ath12k_warn(ab, "WBM RX err, Check HW CC implementation");
3780 
3781 		msdu = desc_info->skb;
3782 		desc_info->skb = NULL;
3783 
3784 		spin_lock_bh(&dp->rx_desc_lock);
3785 		list_move_tail(&desc_info->list, &dp->rx_desc_free_list);
3786 		spin_unlock_bh(&dp->rx_desc_lock);
3787 
3788 		rxcb = ATH12K_SKB_RXCB(msdu);
3789 		dma_unmap_single(ab->dev, rxcb->paddr,
3790 				 msdu->len + skb_tailroom(msdu),
3791 				 DMA_FROM_DEVICE);
3792 
3793 		num_buffs_reaped++;
3794 
3795 		if (!err_info.continuation)
3796 			budget--;
3797 
3798 		if (err_info.push_reason !=
3799 		    HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
3800 			dev_kfree_skb_any(msdu);
3801 			continue;
3802 		}
3803 
3804 		rxcb->err_rel_src = err_info.err_rel_src;
3805 		rxcb->err_code = err_info.err_code;
3806 		rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
3807 		__skb_queue_tail(&msdu_list[mac_id], msdu);
3808 
3809 		rxcb->is_first_msdu = err_info.first_msdu;
3810 		rxcb->is_last_msdu = err_info.last_msdu;
3811 		rxcb->is_continuation = err_info.continuation;
3812 	}
3813 
3814 	ath12k_hal_srng_access_end(ab, srng);
3815 
3816 	spin_unlock_bh(&srng->lock);
3817 
3818 	if (!num_buffs_reaped)
3819 		goto done;
3820 
3821 	ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, num_buffs_reaped,
3822 				    ab->hw_params->hal_params->rx_buf_rbm, true);
3823 
3824 	rcu_read_lock();
3825 	for (i = 0; i <  ab->num_radios; i++) {
3826 		if (!rcu_dereference(ab->pdevs_active[i])) {
3827 			__skb_queue_purge(&msdu_list[i]);
3828 			continue;
3829 		}
3830 
3831 		ar = ab->pdevs[i].ar;
3832 
3833 		if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
3834 			__skb_queue_purge(&msdu_list[i]);
3835 			continue;
3836 		}
3837 
3838 		while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL)
3839 			ath12k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]);
3840 	}
3841 	rcu_read_unlock();
3842 done:
3843 	return num_buffs_reaped;
3844 }
3845 
3846 void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab)
3847 {
3848 	struct ath12k_dp *dp = &ab->dp;
3849 	struct hal_tlv_64_hdr *hdr;
3850 	struct hal_srng *srng;
3851 	struct ath12k_dp_rx_reo_cmd *cmd, *tmp;
3852 	bool found = false;
3853 	u16 tag;
3854 	struct hal_reo_status reo_status;
3855 
3856 	srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id];
3857 
3858 	memset(&reo_status, 0, sizeof(reo_status));
3859 
3860 	spin_lock_bh(&srng->lock);
3861 
3862 	ath12k_hal_srng_access_begin(ab, srng);
3863 
3864 	while ((hdr = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
3865 		tag = u64_get_bits(hdr->tl, HAL_SRNG_TLV_HDR_TAG);
3866 
3867 		switch (tag) {
3868 		case HAL_REO_GET_QUEUE_STATS_STATUS:
3869 			ath12k_hal_reo_status_queue_stats(ab, hdr,
3870 							  &reo_status);
3871 			break;
3872 		case HAL_REO_FLUSH_QUEUE_STATUS:
3873 			ath12k_hal_reo_flush_queue_status(ab, hdr,
3874 							  &reo_status);
3875 			break;
3876 		case HAL_REO_FLUSH_CACHE_STATUS:
3877 			ath12k_hal_reo_flush_cache_status(ab, hdr,
3878 							  &reo_status);
3879 			break;
3880 		case HAL_REO_UNBLOCK_CACHE_STATUS:
3881 			ath12k_hal_reo_unblk_cache_status(ab, hdr,
3882 							  &reo_status);
3883 			break;
3884 		case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
3885 			ath12k_hal_reo_flush_timeout_list_status(ab, hdr,
3886 								 &reo_status);
3887 			break;
3888 		case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
3889 			ath12k_hal_reo_desc_thresh_reached_status(ab, hdr,
3890 								  &reo_status);
3891 			break;
3892 		case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
3893 			ath12k_hal_reo_update_rx_reo_queue_status(ab, hdr,
3894 								  &reo_status);
3895 			break;
3896 		default:
3897 			ath12k_warn(ab, "Unknown reo status type %d\n", tag);
3898 			continue;
3899 		}
3900 
3901 		spin_lock_bh(&dp->reo_cmd_lock);
3902 		list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
3903 			if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
3904 				found = true;
3905 				list_del(&cmd->list);
3906 				break;
3907 			}
3908 		}
3909 		spin_unlock_bh(&dp->reo_cmd_lock);
3910 
3911 		if (found) {
3912 			cmd->handler(dp, (void *)&cmd->data,
3913 				     reo_status.uniform_hdr.cmd_status);
3914 			kfree(cmd);
3915 		}
3916 
3917 		found = false;
3918 	}
3919 
3920 	ath12k_hal_srng_access_end(ab, srng);
3921 
3922 	spin_unlock_bh(&srng->lock);
3923 }
3924 
3925 void ath12k_dp_rx_free(struct ath12k_base *ab)
3926 {
3927 	struct ath12k_dp *dp = &ab->dp;
3928 	int i;
3929 
3930 	ath12k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
3931 
3932 	for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
3933 		if (ab->hw_params->rx_mac_buf_ring)
3934 			ath12k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
3935 	}
3936 
3937 	for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++)
3938 		ath12k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]);
3939 
3940 	ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
3941 	ath12k_dp_srng_cleanup(ab, &dp->tx_mon_buf_ring.refill_buf_ring);
3942 
3943 	ath12k_dp_rxdma_buf_free(ab);
3944 }
3945 
3946 void ath12k_dp_rx_pdev_free(struct ath12k_base *ab, int mac_id)
3947 {
3948 	struct ath12k *ar = ab->pdevs[mac_id].ar;
3949 
3950 	ath12k_dp_rx_pdev_srng_free(ar);
3951 }
3952 
3953 int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab)
3954 {
3955 	struct ath12k_dp *dp = &ab->dp;
3956 	struct htt_rx_ring_tlv_filter tlv_filter = {0};
3957 	u32 ring_id;
3958 	int ret;
3959 	u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
3960 
3961 	ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
3962 
3963 	tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING;
3964 	tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR;
3965 	tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST |
3966 					HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST |
3967 					HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA;
3968 	tlv_filter.offset_valid = true;
3969 	tlv_filter.rx_packet_offset = hal_rx_desc_sz;
3970 
3971 	tlv_filter.rx_mpdu_start_offset =
3972 			ab->hw_params->hal_ops->rx_desc_get_mpdu_start_offset();
3973 	tlv_filter.rx_msdu_end_offset =
3974 		ab->hw_params->hal_ops->rx_desc_get_msdu_end_offset();
3975 
3976 	/* TODO: Selectively subscribe to required qwords within msdu_end
3977 	 * and mpdu_start and setup the mask in below msg
3978 	 * and modify the rx_desc struct
3979 	 */
3980 	ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, 0,
3981 					       HAL_RXDMA_BUF,
3982 					       DP_RXDMA_REFILL_RING_SIZE,
3983 					       &tlv_filter);
3984 
3985 	return ret;
3986 }
3987 
3988 int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab)
3989 {
3990 	struct ath12k_dp *dp = &ab->dp;
3991 	struct htt_rx_ring_tlv_filter tlv_filter = {0};
3992 	u32 ring_id;
3993 	int ret;
3994 	u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
3995 	int i;
3996 
3997 	ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
3998 
3999 	tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING;
4000 	tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR;
4001 	tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST |
4002 					HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST |
4003 					HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA;
4004 	tlv_filter.offset_valid = true;
4005 	tlv_filter.rx_packet_offset = hal_rx_desc_sz;
4006 
4007 	tlv_filter.rx_header_offset = offsetof(struct hal_rx_desc_wcn7850, pkt_hdr_tlv);
4008 
4009 	tlv_filter.rx_mpdu_start_offset =
4010 			ab->hw_params->hal_ops->rx_desc_get_mpdu_start_offset();
4011 	tlv_filter.rx_msdu_end_offset =
4012 		ab->hw_params->hal_ops->rx_desc_get_msdu_end_offset();
4013 
4014 	/* TODO: Selectively subscribe to required qwords within msdu_end
4015 	 * and mpdu_start and setup the mask in below msg
4016 	 * and modify the rx_desc struct
4017 	 */
4018 
4019 	for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
4020 		ring_id = dp->rx_mac_buf_ring[i].ring_id;
4021 		ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, i,
4022 						       HAL_RXDMA_BUF,
4023 						       DP_RXDMA_REFILL_RING_SIZE,
4024 						       &tlv_filter);
4025 	}
4026 
4027 	return ret;
4028 }
4029 
4030 int ath12k_dp_rx_htt_setup(struct ath12k_base *ab)
4031 {
4032 	struct ath12k_dp *dp = &ab->dp;
4033 	u32 ring_id;
4034 	int i, ret;
4035 
4036 	/* TODO: Need to verify the HTT setup for QCN9224 */
4037 	ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
4038 	ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 0, HAL_RXDMA_BUF);
4039 	if (ret) {
4040 		ath12k_warn(ab, "failed to configure rx_refill_buf_ring %d\n",
4041 			    ret);
4042 		return ret;
4043 	}
4044 
4045 	if (ab->hw_params->rx_mac_buf_ring) {
4046 		for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
4047 			ring_id = dp->rx_mac_buf_ring[i].ring_id;
4048 			ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
4049 							  i, HAL_RXDMA_BUF);
4050 			if (ret) {
4051 				ath12k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n",
4052 					    i, ret);
4053 				return ret;
4054 			}
4055 		}
4056 	}
4057 
4058 	for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) {
4059 		ring_id = dp->rxdma_err_dst_ring[i].ring_id;
4060 		ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
4061 						  i, HAL_RXDMA_DST);
4062 		if (ret) {
4063 			ath12k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n",
4064 				    i, ret);
4065 			return ret;
4066 		}
4067 	}
4068 
4069 	if (ab->hw_params->rxdma1_enable) {
4070 		ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
4071 		ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
4072 						  0, HAL_RXDMA_MONITOR_BUF);
4073 		if (ret) {
4074 			ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
4075 				    ret);
4076 			return ret;
4077 		}
4078 
4079 		ring_id = dp->tx_mon_buf_ring.refill_buf_ring.ring_id;
4080 		ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
4081 						  0, HAL_TX_MONITOR_BUF);
4082 		if (ret) {
4083 			ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
4084 				    ret);
4085 			return ret;
4086 		}
4087 	}
4088 
4089 	ret = ab->hw_params->hw_ops->rxdma_ring_sel_config(ab);
4090 	if (ret) {
4091 		ath12k_warn(ab, "failed to setup rxdma ring selection config\n");
4092 		return ret;
4093 	}
4094 
4095 	return 0;
4096 }
4097 
4098 int ath12k_dp_rx_alloc(struct ath12k_base *ab)
4099 {
4100 	struct ath12k_dp *dp = &ab->dp;
4101 	int i, ret;
4102 
4103 	idr_init(&dp->rx_refill_buf_ring.bufs_idr);
4104 	spin_lock_init(&dp->rx_refill_buf_ring.idr_lock);
4105 
4106 	idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
4107 	spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
4108 
4109 	idr_init(&dp->tx_mon_buf_ring.bufs_idr);
4110 	spin_lock_init(&dp->tx_mon_buf_ring.idr_lock);
4111 
4112 	ret = ath12k_dp_srng_setup(ab,
4113 				   &dp->rx_refill_buf_ring.refill_buf_ring,
4114 				   HAL_RXDMA_BUF, 0, 0,
4115 				   DP_RXDMA_BUF_RING_SIZE);
4116 	if (ret) {
4117 		ath12k_warn(ab, "failed to setup rx_refill_buf_ring\n");
4118 		return ret;
4119 	}
4120 
4121 	if (ab->hw_params->rx_mac_buf_ring) {
4122 		for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
4123 			ret = ath12k_dp_srng_setup(ab,
4124 						   &dp->rx_mac_buf_ring[i],
4125 						   HAL_RXDMA_BUF, 1,
4126 						   i, 1024);
4127 			if (ret) {
4128 				ath12k_warn(ab, "failed to setup rx_mac_buf_ring %d\n",
4129 					    i);
4130 				return ret;
4131 			}
4132 		}
4133 	}
4134 
4135 	for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) {
4136 		ret = ath12k_dp_srng_setup(ab, &dp->rxdma_err_dst_ring[i],
4137 					   HAL_RXDMA_DST, 0, i,
4138 					   DP_RXDMA_ERR_DST_RING_SIZE);
4139 		if (ret) {
4140 			ath12k_warn(ab, "failed to setup rxdma_err_dst_ring %d\n", i);
4141 			return ret;
4142 		}
4143 	}
4144 
4145 	if (ab->hw_params->rxdma1_enable) {
4146 		ret = ath12k_dp_srng_setup(ab,
4147 					   &dp->rxdma_mon_buf_ring.refill_buf_ring,
4148 					   HAL_RXDMA_MONITOR_BUF, 0, 0,
4149 					   DP_RXDMA_MONITOR_BUF_RING_SIZE);
4150 		if (ret) {
4151 			ath12k_warn(ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n");
4152 			return ret;
4153 		}
4154 
4155 		ret = ath12k_dp_srng_setup(ab,
4156 					   &dp->tx_mon_buf_ring.refill_buf_ring,
4157 					   HAL_TX_MONITOR_BUF, 0, 0,
4158 					   DP_TX_MONITOR_BUF_RING_SIZE);
4159 		if (ret) {
4160 			ath12k_warn(ab, "failed to setup DP_TX_MONITOR_BUF_RING_SIZE\n");
4161 			return ret;
4162 		}
4163 	}
4164 
4165 	ret = ath12k_dp_rxdma_buf_setup(ab);
4166 	if (ret) {
4167 		ath12k_warn(ab, "failed to setup rxdma ring\n");
4168 		return ret;
4169 	}
4170 
4171 	return 0;
4172 }
4173 
4174 int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int mac_id)
4175 {
4176 	struct ath12k *ar = ab->pdevs[mac_id].ar;
4177 	struct ath12k_pdev_dp *dp = &ar->dp;
4178 	u32 ring_id;
4179 	int i;
4180 	int ret;
4181 
4182 	if (!ab->hw_params->rxdma1_enable)
4183 		goto out;
4184 
4185 	ret = ath12k_dp_rx_pdev_srng_alloc(ar);
4186 	if (ret) {
4187 		ath12k_warn(ab, "failed to setup rx srngs\n");
4188 		return ret;
4189 	}
4190 
4191 	for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
4192 		ring_id = dp->rxdma_mon_dst_ring[i].ring_id;
4193 		ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
4194 						  mac_id + i,
4195 						  HAL_RXDMA_MONITOR_DST);
4196 		if (ret) {
4197 			ath12k_warn(ab,
4198 				    "failed to configure rxdma_mon_dst_ring %d %d\n",
4199 				    i, ret);
4200 			return ret;
4201 		}
4202 
4203 		ring_id = dp->tx_mon_dst_ring[i].ring_id;
4204 		ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
4205 						  mac_id + i,
4206 						  HAL_TX_MONITOR_DST);
4207 		if (ret) {
4208 			ath12k_warn(ab,
4209 				    "failed to configure tx_mon_dst_ring %d %d\n",
4210 				    i, ret);
4211 			return ret;
4212 		}
4213 	}
4214 out:
4215 	return 0;
4216 }
4217 
4218 static int ath12k_dp_rx_pdev_mon_status_attach(struct ath12k *ar)
4219 {
4220 	struct ath12k_pdev_dp *dp = &ar->dp;
4221 	struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&dp->mon_data;
4222 
4223 	skb_queue_head_init(&pmon->rx_status_q);
4224 
4225 	pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
4226 
4227 	memset(&pmon->rx_mon_stats, 0,
4228 	       sizeof(pmon->rx_mon_stats));
4229 	return 0;
4230 }
4231 
4232 int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar)
4233 {
4234 	struct ath12k_pdev_dp *dp = &ar->dp;
4235 	struct ath12k_mon_data *pmon = &dp->mon_data;
4236 	int ret = 0;
4237 
4238 	ret = ath12k_dp_rx_pdev_mon_status_attach(ar);
4239 	if (ret) {
4240 		ath12k_warn(ar->ab, "pdev_mon_status_attach() failed");
4241 		return ret;
4242 	}
4243 
4244 	/* if rxdma1_enable is false, no need to setup
4245 	 * rxdma_mon_desc_ring.
4246 	 */
4247 	if (!ar->ab->hw_params->rxdma1_enable)
4248 		return 0;
4249 
4250 	pmon->mon_last_linkdesc_paddr = 0;
4251 	pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
4252 	spin_lock_init(&pmon->mon_lock);
4253 
4254 	return 0;
4255 }
4256 
4257 int ath12k_dp_rx_pktlog_start(struct ath12k_base *ab)
4258 {
4259 	/* start reap timer */
4260 	mod_timer(&ab->mon_reap_timer,
4261 		  jiffies + msecs_to_jiffies(ATH12K_MON_TIMER_INTERVAL));
4262 
4263 	return 0;
4264 }
4265 
4266 int ath12k_dp_rx_pktlog_stop(struct ath12k_base *ab, bool stop_timer)
4267 {
4268 	int ret;
4269 
4270 	if (stop_timer)
4271 		del_timer_sync(&ab->mon_reap_timer);
4272 
4273 	/* reap all the monitor related rings */
4274 	ret = ath12k_dp_purge_mon_ring(ab);
4275 	if (ret) {
4276 		ath12k_warn(ab, "failed to purge dp mon ring: %d\n", ret);
4277 		return ret;
4278 	}
4279 
4280 	return 0;
4281 }
4282