xref: /openbmc/linux/drivers/net/wireless/ath/ath10k/htt_rx.c (revision 45cc842d5b75ba8f9a958f2dd12b95c6dd0452bd)
1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include "core.h"
19 #include "htc.h"
20 #include "htt.h"
21 #include "txrx.h"
22 #include "debug.h"
23 #include "trace.h"
24 #include "mac.h"
25 
26 #include <linux/log2.h>
27 
28 /* when under memory pressure rx ring refill may fail and needs a retry */
29 #define HTT_RX_RING_REFILL_RETRY_MS 50
30 
31 #define HTT_RX_RING_REFILL_RESCHED_MS 5
32 
33 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
34 
35 static struct sk_buff *
36 ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr)
37 {
38 	struct ath10k_skb_rxcb *rxcb;
39 
40 	hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
41 		if (rxcb->paddr == paddr)
42 			return ATH10K_RXCB_SKB(rxcb);
43 
44 	WARN_ON_ONCE(1);
45 	return NULL;
46 }
47 
48 static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
49 {
50 	struct sk_buff *skb;
51 	struct ath10k_skb_rxcb *rxcb;
52 	struct hlist_node *n;
53 	int i;
54 
55 	if (htt->rx_ring.in_ord_rx) {
56 		hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
57 			skb = ATH10K_RXCB_SKB(rxcb);
58 			dma_unmap_single(htt->ar->dev, rxcb->paddr,
59 					 skb->len + skb_tailroom(skb),
60 					 DMA_FROM_DEVICE);
61 			hash_del(&rxcb->hlist);
62 			dev_kfree_skb_any(skb);
63 		}
64 	} else {
65 		for (i = 0; i < htt->rx_ring.size; i++) {
66 			skb = htt->rx_ring.netbufs_ring[i];
67 			if (!skb)
68 				continue;
69 
70 			rxcb = ATH10K_SKB_RXCB(skb);
71 			dma_unmap_single(htt->ar->dev, rxcb->paddr,
72 					 skb->len + skb_tailroom(skb),
73 					 DMA_FROM_DEVICE);
74 			dev_kfree_skb_any(skb);
75 		}
76 	}
77 
78 	htt->rx_ring.fill_cnt = 0;
79 	hash_init(htt->rx_ring.skb_table);
80 	memset(htt->rx_ring.netbufs_ring, 0,
81 	       htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
82 }
83 
84 static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt)
85 {
86 	return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32);
87 }
88 
89 static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt)
90 {
91 	return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64);
92 }
93 
94 static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt,
95 					     void *vaddr)
96 {
97 	htt->rx_ring.paddrs_ring_32 = vaddr;
98 }
99 
100 static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt,
101 					     void *vaddr)
102 {
103 	htt->rx_ring.paddrs_ring_64 = vaddr;
104 }
105 
106 static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt,
107 					  dma_addr_t paddr, int idx)
108 {
109 	htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr);
110 }
111 
112 static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt,
113 					  dma_addr_t paddr, int idx)
114 {
115 	htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr);
116 }
117 
118 static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx)
119 {
120 	htt->rx_ring.paddrs_ring_32[idx] = 0;
121 }
122 
123 static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx)
124 {
125 	htt->rx_ring.paddrs_ring_64[idx] = 0;
126 }
127 
128 static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt)
129 {
130 	return (void *)htt->rx_ring.paddrs_ring_32;
131 }
132 
133 static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt)
134 {
135 	return (void *)htt->rx_ring.paddrs_ring_64;
136 }
137 
138 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
139 {
140 	struct htt_rx_desc *rx_desc;
141 	struct ath10k_skb_rxcb *rxcb;
142 	struct sk_buff *skb;
143 	dma_addr_t paddr;
144 	int ret = 0, idx;
145 
146 	/* The Full Rx Reorder firmware has no way of telling the host
147 	 * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
148 	 * To keep things simple make sure ring is always half empty. This
149 	 * guarantees there'll be no replenishment overruns possible.
150 	 */
151 	BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
152 
153 	idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
154 	while (num > 0) {
155 		skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
156 		if (!skb) {
157 			ret = -ENOMEM;
158 			goto fail;
159 		}
160 
161 		if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
162 			skb_pull(skb,
163 				 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
164 				 skb->data);
165 
166 		/* Clear rx_desc attention word before posting to Rx ring */
167 		rx_desc = (struct htt_rx_desc *)skb->data;
168 		rx_desc->attention.flags = __cpu_to_le32(0);
169 
170 		paddr = dma_map_single(htt->ar->dev, skb->data,
171 				       skb->len + skb_tailroom(skb),
172 				       DMA_FROM_DEVICE);
173 
174 		if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
175 			dev_kfree_skb_any(skb);
176 			ret = -ENOMEM;
177 			goto fail;
178 		}
179 
180 		rxcb = ATH10K_SKB_RXCB(skb);
181 		rxcb->paddr = paddr;
182 		htt->rx_ring.netbufs_ring[idx] = skb;
183 		htt->rx_ops->htt_set_paddrs_ring(htt, paddr, idx);
184 		htt->rx_ring.fill_cnt++;
185 
186 		if (htt->rx_ring.in_ord_rx) {
187 			hash_add(htt->rx_ring.skb_table,
188 				 &ATH10K_SKB_RXCB(skb)->hlist,
189 				 paddr);
190 		}
191 
192 		num--;
193 		idx++;
194 		idx &= htt->rx_ring.size_mask;
195 	}
196 
197 fail:
198 	/*
199 	 * Make sure the rx buffer is updated before available buffer
200 	 * index to avoid any potential rx ring corruption.
201 	 */
202 	mb();
203 	*htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
204 	return ret;
205 }
206 
207 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
208 {
209 	lockdep_assert_held(&htt->rx_ring.lock);
210 	return __ath10k_htt_rx_ring_fill_n(htt, num);
211 }
212 
213 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
214 {
215 	int ret, num_deficit, num_to_fill;
216 
217 	/* Refilling the whole RX ring buffer proves to be a bad idea. The
218 	 * reason is RX may take up significant amount of CPU cycles and starve
219 	 * other tasks, e.g. TX on an ethernet device while acting as a bridge
220 	 * with ath10k wlan interface. This ended up with very poor performance
221 	 * once CPU the host system was overwhelmed with RX on ath10k.
222 	 *
223 	 * By limiting the number of refills the replenishing occurs
224 	 * progressively. This in turns makes use of the fact tasklets are
225 	 * processed in FIFO order. This means actual RX processing can starve
226 	 * out refilling. If there's not enough buffers on RX ring FW will not
227 	 * report RX until it is refilled with enough buffers. This
228 	 * automatically balances load wrt to CPU power.
229 	 *
230 	 * This probably comes at a cost of lower maximum throughput but
231 	 * improves the average and stability.
232 	 */
233 	spin_lock_bh(&htt->rx_ring.lock);
234 	num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
235 	num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
236 	num_deficit -= num_to_fill;
237 	ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
238 	if (ret == -ENOMEM) {
239 		/*
240 		 * Failed to fill it to the desired level -
241 		 * we'll start a timer and try again next time.
242 		 * As long as enough buffers are left in the ring for
243 		 * another A-MPDU rx, no special recovery is needed.
244 		 */
245 		mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
246 			  msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
247 	} else if (num_deficit > 0) {
248 		mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
249 			  msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
250 	}
251 	spin_unlock_bh(&htt->rx_ring.lock);
252 }
253 
254 static void ath10k_htt_rx_ring_refill_retry(struct timer_list *t)
255 {
256 	struct ath10k_htt *htt = from_timer(htt, t, rx_ring.refill_retry_timer);
257 
258 	ath10k_htt_rx_msdu_buff_replenish(htt);
259 }
260 
261 int ath10k_htt_rx_ring_refill(struct ath10k *ar)
262 {
263 	struct ath10k_htt *htt = &ar->htt;
264 	int ret;
265 
266 	spin_lock_bh(&htt->rx_ring.lock);
267 	ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
268 					      htt->rx_ring.fill_cnt));
269 	spin_unlock_bh(&htt->rx_ring.lock);
270 
271 	if (ret)
272 		ath10k_htt_rx_ring_free(htt);
273 
274 	return ret;
275 }
276 
277 void ath10k_htt_rx_free(struct ath10k_htt *htt)
278 {
279 	del_timer_sync(&htt->rx_ring.refill_retry_timer);
280 
281 	skb_queue_purge(&htt->rx_msdus_q);
282 	skb_queue_purge(&htt->rx_in_ord_compl_q);
283 	skb_queue_purge(&htt->tx_fetch_ind_q);
284 
285 	ath10k_htt_rx_ring_free(htt);
286 
287 	dma_free_coherent(htt->ar->dev,
288 			  htt->rx_ops->htt_get_rx_ring_size(htt),
289 			  htt->rx_ops->htt_get_vaddr_ring(htt),
290 			  htt->rx_ring.base_paddr);
291 
292 	dma_free_coherent(htt->ar->dev,
293 			  sizeof(*htt->rx_ring.alloc_idx.vaddr),
294 			  htt->rx_ring.alloc_idx.vaddr,
295 			  htt->rx_ring.alloc_idx.paddr);
296 
297 	kfree(htt->rx_ring.netbufs_ring);
298 }
299 
300 static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
301 {
302 	struct ath10k *ar = htt->ar;
303 	int idx;
304 	struct sk_buff *msdu;
305 
306 	lockdep_assert_held(&htt->rx_ring.lock);
307 
308 	if (htt->rx_ring.fill_cnt == 0) {
309 		ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
310 		return NULL;
311 	}
312 
313 	idx = htt->rx_ring.sw_rd_idx.msdu_payld;
314 	msdu = htt->rx_ring.netbufs_ring[idx];
315 	htt->rx_ring.netbufs_ring[idx] = NULL;
316 	htt->rx_ops->htt_reset_paddrs_ring(htt, idx);
317 
318 	idx++;
319 	idx &= htt->rx_ring.size_mask;
320 	htt->rx_ring.sw_rd_idx.msdu_payld = idx;
321 	htt->rx_ring.fill_cnt--;
322 
323 	dma_unmap_single(htt->ar->dev,
324 			 ATH10K_SKB_RXCB(msdu)->paddr,
325 			 msdu->len + skb_tailroom(msdu),
326 			 DMA_FROM_DEVICE);
327 	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
328 			msdu->data, msdu->len + skb_tailroom(msdu));
329 
330 	return msdu;
331 }
332 
333 /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
334 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
335 				   struct sk_buff_head *amsdu)
336 {
337 	struct ath10k *ar = htt->ar;
338 	int msdu_len, msdu_chaining = 0;
339 	struct sk_buff *msdu;
340 	struct htt_rx_desc *rx_desc;
341 
342 	lockdep_assert_held(&htt->rx_ring.lock);
343 
344 	for (;;) {
345 		int last_msdu, msdu_len_invalid, msdu_chained;
346 
347 		msdu = ath10k_htt_rx_netbuf_pop(htt);
348 		if (!msdu) {
349 			__skb_queue_purge(amsdu);
350 			return -ENOENT;
351 		}
352 
353 		__skb_queue_tail(amsdu, msdu);
354 
355 		rx_desc = (struct htt_rx_desc *)msdu->data;
356 
357 		/* FIXME: we must report msdu payload since this is what caller
358 		 * expects now
359 		 */
360 		skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
361 		skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
362 
363 		/*
364 		 * Sanity check - confirm the HW is finished filling in the
365 		 * rx data.
366 		 * If the HW and SW are working correctly, then it's guaranteed
367 		 * that the HW's MAC DMA is done before this point in the SW.
368 		 * To prevent the case that we handle a stale Rx descriptor,
369 		 * just assert for now until we have a way to recover.
370 		 */
371 		if (!(__le32_to_cpu(rx_desc->attention.flags)
372 				& RX_ATTENTION_FLAGS_MSDU_DONE)) {
373 			__skb_queue_purge(amsdu);
374 			return -EIO;
375 		}
376 
377 		msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
378 					& (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
379 					   RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
380 		msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
381 			      RX_MSDU_START_INFO0_MSDU_LENGTH);
382 		msdu_chained = rx_desc->frag_info.ring2_more_count;
383 
384 		if (msdu_len_invalid)
385 			msdu_len = 0;
386 
387 		skb_trim(msdu, 0);
388 		skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
389 		msdu_len -= msdu->len;
390 
391 		/* Note: Chained buffers do not contain rx descriptor */
392 		while (msdu_chained--) {
393 			msdu = ath10k_htt_rx_netbuf_pop(htt);
394 			if (!msdu) {
395 				__skb_queue_purge(amsdu);
396 				return -ENOENT;
397 			}
398 
399 			__skb_queue_tail(amsdu, msdu);
400 			skb_trim(msdu, 0);
401 			skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
402 			msdu_len -= msdu->len;
403 			msdu_chaining = 1;
404 		}
405 
406 		last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
407 				RX_MSDU_END_INFO0_LAST_MSDU;
408 
409 		trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
410 					 sizeof(*rx_desc) - sizeof(u32));
411 
412 		if (last_msdu)
413 			break;
414 	}
415 
416 	if (skb_queue_empty(amsdu))
417 		msdu_chaining = -1;
418 
419 	/*
420 	 * Don't refill the ring yet.
421 	 *
422 	 * First, the elements popped here are still in use - it is not
423 	 * safe to overwrite them until the matching call to
424 	 * mpdu_desc_list_next. Second, for efficiency it is preferable to
425 	 * refill the rx ring with 1 PPDU's worth of rx buffers (something
426 	 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
427 	 * (something like 3 buffers). Consequently, we'll rely on the txrx
428 	 * SW to tell us when it is done pulling all the PPDU's rx buffers
429 	 * out of the rx ring, and then refill it just once.
430 	 */
431 
432 	return msdu_chaining;
433 }
434 
435 static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
436 					       u64 paddr)
437 {
438 	struct ath10k *ar = htt->ar;
439 	struct ath10k_skb_rxcb *rxcb;
440 	struct sk_buff *msdu;
441 
442 	lockdep_assert_held(&htt->rx_ring.lock);
443 
444 	msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
445 	if (!msdu)
446 		return NULL;
447 
448 	rxcb = ATH10K_SKB_RXCB(msdu);
449 	hash_del(&rxcb->hlist);
450 	htt->rx_ring.fill_cnt--;
451 
452 	dma_unmap_single(htt->ar->dev, rxcb->paddr,
453 			 msdu->len + skb_tailroom(msdu),
454 			 DMA_FROM_DEVICE);
455 	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
456 			msdu->data, msdu->len + skb_tailroom(msdu));
457 
458 	return msdu;
459 }
460 
461 static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,
462 					  struct htt_rx_in_ord_ind *ev,
463 					  struct sk_buff_head *list)
464 {
465 	struct ath10k *ar = htt->ar;
466 	struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32;
467 	struct htt_rx_desc *rxd;
468 	struct sk_buff *msdu;
469 	int msdu_count;
470 	bool is_offload;
471 	u32 paddr;
472 
473 	lockdep_assert_held(&htt->rx_ring.lock);
474 
475 	msdu_count = __le16_to_cpu(ev->msdu_count);
476 	is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
477 
478 	while (msdu_count--) {
479 		paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
480 
481 		msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
482 		if (!msdu) {
483 			__skb_queue_purge(list);
484 			return -ENOENT;
485 		}
486 
487 		__skb_queue_tail(list, msdu);
488 
489 		if (!is_offload) {
490 			rxd = (void *)msdu->data;
491 
492 			trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
493 
494 			skb_put(msdu, sizeof(*rxd));
495 			skb_pull(msdu, sizeof(*rxd));
496 			skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
497 
498 			if (!(__le32_to_cpu(rxd->attention.flags) &
499 			      RX_ATTENTION_FLAGS_MSDU_DONE)) {
500 				ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
501 				return -EIO;
502 			}
503 		}
504 
505 		msdu_desc++;
506 	}
507 
508 	return 0;
509 }
510 
511 static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt,
512 					  struct htt_rx_in_ord_ind *ev,
513 					  struct sk_buff_head *list)
514 {
515 	struct ath10k *ar = htt->ar;
516 	struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64;
517 	struct htt_rx_desc *rxd;
518 	struct sk_buff *msdu;
519 	int msdu_count;
520 	bool is_offload;
521 	u64 paddr;
522 
523 	lockdep_assert_held(&htt->rx_ring.lock);
524 
525 	msdu_count = __le16_to_cpu(ev->msdu_count);
526 	is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
527 
528 	while (msdu_count--) {
529 		paddr = __le64_to_cpu(msdu_desc->msdu_paddr);
530 		msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
531 		if (!msdu) {
532 			__skb_queue_purge(list);
533 			return -ENOENT;
534 		}
535 
536 		__skb_queue_tail(list, msdu);
537 
538 		if (!is_offload) {
539 			rxd = (void *)msdu->data;
540 
541 			trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
542 
543 			skb_put(msdu, sizeof(*rxd));
544 			skb_pull(msdu, sizeof(*rxd));
545 			skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
546 
547 			if (!(__le32_to_cpu(rxd->attention.flags) &
548 			      RX_ATTENTION_FLAGS_MSDU_DONE)) {
549 				ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
550 				return -EIO;
551 			}
552 		}
553 
554 		msdu_desc++;
555 	}
556 
557 	return 0;
558 }
559 
560 int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
561 {
562 	struct ath10k *ar = htt->ar;
563 	dma_addr_t paddr;
564 	void *vaddr, *vaddr_ring;
565 	size_t size;
566 	struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
567 
568 	htt->rx_confused = false;
569 
570 	/* XXX: The fill level could be changed during runtime in response to
571 	 * the host processing latency. Is this really worth it?
572 	 */
573 	htt->rx_ring.size = HTT_RX_RING_SIZE;
574 	htt->rx_ring.size_mask = htt->rx_ring.size - 1;
575 	htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level;
576 
577 	if (!is_power_of_2(htt->rx_ring.size)) {
578 		ath10k_warn(ar, "htt rx ring size is not power of 2\n");
579 		return -EINVAL;
580 	}
581 
582 	htt->rx_ring.netbufs_ring =
583 		kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
584 			GFP_KERNEL);
585 	if (!htt->rx_ring.netbufs_ring)
586 		goto err_netbuf;
587 
588 	size = htt->rx_ops->htt_get_rx_ring_size(htt);
589 
590 	vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
591 	if (!vaddr_ring)
592 		goto err_dma_ring;
593 
594 	htt->rx_ops->htt_config_paddrs_ring(htt, vaddr_ring);
595 	htt->rx_ring.base_paddr = paddr;
596 
597 	vaddr = dma_alloc_coherent(htt->ar->dev,
598 				   sizeof(*htt->rx_ring.alloc_idx.vaddr),
599 				   &paddr, GFP_KERNEL);
600 	if (!vaddr)
601 		goto err_dma_idx;
602 
603 	htt->rx_ring.alloc_idx.vaddr = vaddr;
604 	htt->rx_ring.alloc_idx.paddr = paddr;
605 	htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
606 	*htt->rx_ring.alloc_idx.vaddr = 0;
607 
608 	/* Initialize the Rx refill retry timer */
609 	timer_setup(timer, ath10k_htt_rx_ring_refill_retry, 0);
610 
611 	spin_lock_init(&htt->rx_ring.lock);
612 
613 	htt->rx_ring.fill_cnt = 0;
614 	htt->rx_ring.sw_rd_idx.msdu_payld = 0;
615 	hash_init(htt->rx_ring.skb_table);
616 
617 	skb_queue_head_init(&htt->rx_msdus_q);
618 	skb_queue_head_init(&htt->rx_in_ord_compl_q);
619 	skb_queue_head_init(&htt->tx_fetch_ind_q);
620 	atomic_set(&htt->num_mpdus_ready, 0);
621 
622 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
623 		   htt->rx_ring.size, htt->rx_ring.fill_level);
624 	return 0;
625 
626 err_dma_idx:
627 	dma_free_coherent(htt->ar->dev,
628 			  htt->rx_ops->htt_get_rx_ring_size(htt),
629 			  vaddr_ring,
630 			  htt->rx_ring.base_paddr);
631 err_dma_ring:
632 	kfree(htt->rx_ring.netbufs_ring);
633 err_netbuf:
634 	return -ENOMEM;
635 }
636 
637 static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
638 					  enum htt_rx_mpdu_encrypt_type type)
639 {
640 	switch (type) {
641 	case HTT_RX_MPDU_ENCRYPT_NONE:
642 		return 0;
643 	case HTT_RX_MPDU_ENCRYPT_WEP40:
644 	case HTT_RX_MPDU_ENCRYPT_WEP104:
645 		return IEEE80211_WEP_IV_LEN;
646 	case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
647 	case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
648 		return IEEE80211_TKIP_IV_LEN;
649 	case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
650 		return IEEE80211_CCMP_HDR_LEN;
651 	case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
652 		return IEEE80211_CCMP_256_HDR_LEN;
653 	case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
654 	case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
655 		return IEEE80211_GCMP_HDR_LEN;
656 	case HTT_RX_MPDU_ENCRYPT_WEP128:
657 	case HTT_RX_MPDU_ENCRYPT_WAPI:
658 		break;
659 	}
660 
661 	ath10k_warn(ar, "unsupported encryption type %d\n", type);
662 	return 0;
663 }
664 
665 #define MICHAEL_MIC_LEN 8
666 
667 static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar,
668 					enum htt_rx_mpdu_encrypt_type type)
669 {
670 	switch (type) {
671 	case HTT_RX_MPDU_ENCRYPT_NONE:
672 	case HTT_RX_MPDU_ENCRYPT_WEP40:
673 	case HTT_RX_MPDU_ENCRYPT_WEP104:
674 	case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
675 	case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
676 		return 0;
677 	case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
678 		return IEEE80211_CCMP_MIC_LEN;
679 	case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
680 		return IEEE80211_CCMP_256_MIC_LEN;
681 	case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
682 	case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
683 		return IEEE80211_GCMP_MIC_LEN;
684 	case HTT_RX_MPDU_ENCRYPT_WEP128:
685 	case HTT_RX_MPDU_ENCRYPT_WAPI:
686 		break;
687 	}
688 
689 	ath10k_warn(ar, "unsupported encryption type %d\n", type);
690 	return 0;
691 }
692 
693 static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar,
694 					enum htt_rx_mpdu_encrypt_type type)
695 {
696 	switch (type) {
697 	case HTT_RX_MPDU_ENCRYPT_NONE:
698 	case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
699 	case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
700 	case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
701 	case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
702 		return 0;
703 	case HTT_RX_MPDU_ENCRYPT_WEP40:
704 	case HTT_RX_MPDU_ENCRYPT_WEP104:
705 		return IEEE80211_WEP_ICV_LEN;
706 	case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
707 	case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
708 		return IEEE80211_TKIP_ICV_LEN;
709 	case HTT_RX_MPDU_ENCRYPT_WEP128:
710 	case HTT_RX_MPDU_ENCRYPT_WAPI:
711 		break;
712 	}
713 
714 	ath10k_warn(ar, "unsupported encryption type %d\n", type);
715 	return 0;
716 }
717 
718 struct amsdu_subframe_hdr {
719 	u8 dst[ETH_ALEN];
720 	u8 src[ETH_ALEN];
721 	__be16 len;
722 } __packed;
723 
724 #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
725 
726 static void ath10k_htt_rx_h_rates(struct ath10k *ar,
727 				  struct ieee80211_rx_status *status,
728 				  struct htt_rx_desc *rxd)
729 {
730 	struct ieee80211_supported_band *sband;
731 	u8 cck, rate, bw, sgi, mcs, nss;
732 	u8 preamble = 0;
733 	u8 group_id;
734 	u32 info1, info2, info3;
735 
736 	info1 = __le32_to_cpu(rxd->ppdu_start.info1);
737 	info2 = __le32_to_cpu(rxd->ppdu_start.info2);
738 	info3 = __le32_to_cpu(rxd->ppdu_start.info3);
739 
740 	preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
741 
742 	switch (preamble) {
743 	case HTT_RX_LEGACY:
744 		/* To get legacy rate index band is required. Since band can't
745 		 * be undefined check if freq is non-zero.
746 		 */
747 		if (!status->freq)
748 			return;
749 
750 		cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
751 		rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
752 		rate &= ~RX_PPDU_START_RATE_FLAG;
753 
754 		sband = &ar->mac.sbands[status->band];
755 		status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck);
756 		break;
757 	case HTT_RX_HT:
758 	case HTT_RX_HT_WITH_TXBF:
759 		/* HT-SIG - Table 20-11 in info2 and info3 */
760 		mcs = info2 & 0x1F;
761 		nss = mcs >> 3;
762 		bw = (info2 >> 7) & 1;
763 		sgi = (info3 >> 7) & 1;
764 
765 		status->rate_idx = mcs;
766 		status->encoding = RX_ENC_HT;
767 		if (sgi)
768 			status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
769 		if (bw)
770 			status->bw = RATE_INFO_BW_40;
771 		break;
772 	case HTT_RX_VHT:
773 	case HTT_RX_VHT_WITH_TXBF:
774 		/* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
775 		 * TODO check this
776 		 */
777 		bw = info2 & 3;
778 		sgi = info3 & 1;
779 		group_id = (info2 >> 4) & 0x3F;
780 
781 		if (GROUP_ID_IS_SU_MIMO(group_id)) {
782 			mcs = (info3 >> 4) & 0x0F;
783 			nss = ((info2 >> 10) & 0x07) + 1;
784 		} else {
785 			/* Hardware doesn't decode VHT-SIG-B into Rx descriptor
786 			 * so it's impossible to decode MCS. Also since
787 			 * firmware consumes Group Id Management frames host
788 			 * has no knowledge regarding group/user position
789 			 * mapping so it's impossible to pick the correct Nsts
790 			 * from VHT-SIG-A1.
791 			 *
792 			 * Bandwidth and SGI are valid so report the rateinfo
793 			 * on best-effort basis.
794 			 */
795 			mcs = 0;
796 			nss = 1;
797 		}
798 
799 		if (mcs > 0x09) {
800 			ath10k_warn(ar, "invalid MCS received %u\n", mcs);
801 			ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
802 				    __le32_to_cpu(rxd->attention.flags),
803 				    __le32_to_cpu(rxd->mpdu_start.info0),
804 				    __le32_to_cpu(rxd->mpdu_start.info1),
805 				    __le32_to_cpu(rxd->msdu_start.common.info0),
806 				    __le32_to_cpu(rxd->msdu_start.common.info1),
807 				    rxd->ppdu_start.info0,
808 				    __le32_to_cpu(rxd->ppdu_start.info1),
809 				    __le32_to_cpu(rxd->ppdu_start.info2),
810 				    __le32_to_cpu(rxd->ppdu_start.info3),
811 				    __le32_to_cpu(rxd->ppdu_start.info4));
812 
813 			ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
814 				    __le32_to_cpu(rxd->msdu_end.common.info0),
815 				    __le32_to_cpu(rxd->mpdu_end.info0));
816 
817 			ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
818 					"rx desc msdu payload: ",
819 					rxd->msdu_payload, 50);
820 		}
821 
822 		status->rate_idx = mcs;
823 		status->nss = nss;
824 
825 		if (sgi)
826 			status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
827 
828 		switch (bw) {
829 		/* 20MHZ */
830 		case 0:
831 			break;
832 		/* 40MHZ */
833 		case 1:
834 			status->bw = RATE_INFO_BW_40;
835 			break;
836 		/* 80MHZ */
837 		case 2:
838 			status->bw = RATE_INFO_BW_80;
839 			break;
840 		case 3:
841 			status->bw = RATE_INFO_BW_160;
842 			break;
843 		}
844 
845 		status->encoding = RX_ENC_VHT;
846 		break;
847 	default:
848 		break;
849 	}
850 }
851 
852 static struct ieee80211_channel *
853 ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
854 {
855 	struct ath10k_peer *peer;
856 	struct ath10k_vif *arvif;
857 	struct cfg80211_chan_def def;
858 	u16 peer_id;
859 
860 	lockdep_assert_held(&ar->data_lock);
861 
862 	if (!rxd)
863 		return NULL;
864 
865 	if (rxd->attention.flags &
866 	    __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
867 		return NULL;
868 
869 	if (!(rxd->msdu_end.common.info0 &
870 	      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
871 		return NULL;
872 
873 	peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
874 		     RX_MPDU_START_INFO0_PEER_IDX);
875 
876 	peer = ath10k_peer_find_by_id(ar, peer_id);
877 	if (!peer)
878 		return NULL;
879 
880 	arvif = ath10k_get_arvif(ar, peer->vdev_id);
881 	if (WARN_ON_ONCE(!arvif))
882 		return NULL;
883 
884 	if (ath10k_mac_vif_chan(arvif->vif, &def))
885 		return NULL;
886 
887 	return def.chan;
888 }
889 
890 static struct ieee80211_channel *
891 ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
892 {
893 	struct ath10k_vif *arvif;
894 	struct cfg80211_chan_def def;
895 
896 	lockdep_assert_held(&ar->data_lock);
897 
898 	list_for_each_entry(arvif, &ar->arvifs, list) {
899 		if (arvif->vdev_id == vdev_id &&
900 		    ath10k_mac_vif_chan(arvif->vif, &def) == 0)
901 			return def.chan;
902 	}
903 
904 	return NULL;
905 }
906 
907 static void
908 ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
909 			      struct ieee80211_chanctx_conf *conf,
910 			      void *data)
911 {
912 	struct cfg80211_chan_def *def = data;
913 
914 	*def = conf->def;
915 }
916 
917 static struct ieee80211_channel *
918 ath10k_htt_rx_h_any_channel(struct ath10k *ar)
919 {
920 	struct cfg80211_chan_def def = {};
921 
922 	ieee80211_iter_chan_contexts_atomic(ar->hw,
923 					    ath10k_htt_rx_h_any_chan_iter,
924 					    &def);
925 
926 	return def.chan;
927 }
928 
929 static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
930 				    struct ieee80211_rx_status *status,
931 				    struct htt_rx_desc *rxd,
932 				    u32 vdev_id)
933 {
934 	struct ieee80211_channel *ch;
935 
936 	spin_lock_bh(&ar->data_lock);
937 	ch = ar->scan_channel;
938 	if (!ch)
939 		ch = ar->rx_channel;
940 	if (!ch)
941 		ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
942 	if (!ch)
943 		ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
944 	if (!ch)
945 		ch = ath10k_htt_rx_h_any_channel(ar);
946 	if (!ch)
947 		ch = ar->tgt_oper_chan;
948 	spin_unlock_bh(&ar->data_lock);
949 
950 	if (!ch)
951 		return false;
952 
953 	status->band = ch->band;
954 	status->freq = ch->center_freq;
955 
956 	return true;
957 }
958 
959 static void ath10k_htt_rx_h_signal(struct ath10k *ar,
960 				   struct ieee80211_rx_status *status,
961 				   struct htt_rx_desc *rxd)
962 {
963 	int i;
964 
965 	for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) {
966 		status->chains &= ~BIT(i);
967 
968 		if (rxd->ppdu_start.rssi_chains[i].pri20_mhz != 0x80) {
969 			status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR +
970 				rxd->ppdu_start.rssi_chains[i].pri20_mhz;
971 
972 			status->chains |= BIT(i);
973 		}
974 	}
975 
976 	/* FIXME: Get real NF */
977 	status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
978 			 rxd->ppdu_start.rssi_comb;
979 	status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
980 }
981 
982 static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
983 				    struct ieee80211_rx_status *status,
984 				    struct htt_rx_desc *rxd)
985 {
986 	/* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
987 	 * means all prior MSDUs in a PPDU are reported to mac80211 without the
988 	 * TSF. Is it worth holding frames until end of PPDU is known?
989 	 *
990 	 * FIXME: Can we get/compute 64bit TSF?
991 	 */
992 	status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
993 	status->flag |= RX_FLAG_MACTIME_END;
994 }
995 
996 static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
997 				 struct sk_buff_head *amsdu,
998 				 struct ieee80211_rx_status *status,
999 				 u32 vdev_id)
1000 {
1001 	struct sk_buff *first;
1002 	struct htt_rx_desc *rxd;
1003 	bool is_first_ppdu;
1004 	bool is_last_ppdu;
1005 
1006 	if (skb_queue_empty(amsdu))
1007 		return;
1008 
1009 	first = skb_peek(amsdu);
1010 	rxd = (void *)first->data - sizeof(*rxd);
1011 
1012 	is_first_ppdu = !!(rxd->attention.flags &
1013 			   __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
1014 	is_last_ppdu = !!(rxd->attention.flags &
1015 			  __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
1016 
1017 	if (is_first_ppdu) {
1018 		/* New PPDU starts so clear out the old per-PPDU status. */
1019 		status->freq = 0;
1020 		status->rate_idx = 0;
1021 		status->nss = 0;
1022 		status->encoding = RX_ENC_LEGACY;
1023 		status->bw = RATE_INFO_BW_20;
1024 
1025 		status->flag &= ~RX_FLAG_MACTIME_END;
1026 		status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1027 
1028 		status->flag &= ~(RX_FLAG_AMPDU_IS_LAST);
1029 		status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
1030 		status->ampdu_reference = ar->ampdu_reference;
1031 
1032 		ath10k_htt_rx_h_signal(ar, status, rxd);
1033 		ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
1034 		ath10k_htt_rx_h_rates(ar, status, rxd);
1035 	}
1036 
1037 	if (is_last_ppdu) {
1038 		ath10k_htt_rx_h_mactime(ar, status, rxd);
1039 
1040 		/* set ampdu last segment flag */
1041 		status->flag |= RX_FLAG_AMPDU_IS_LAST;
1042 		ar->ampdu_reference++;
1043 	}
1044 }
1045 
1046 static const char * const tid_to_ac[] = {
1047 	"BE",
1048 	"BK",
1049 	"BK",
1050 	"BE",
1051 	"VI",
1052 	"VI",
1053 	"VO",
1054 	"VO",
1055 };
1056 
1057 static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
1058 {
1059 	u8 *qc;
1060 	int tid;
1061 
1062 	if (!ieee80211_is_data_qos(hdr->frame_control))
1063 		return "";
1064 
1065 	qc = ieee80211_get_qos_ctl(hdr);
1066 	tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
1067 	if (tid < 8)
1068 		snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
1069 	else
1070 		snprintf(out, size, "tid %d", tid);
1071 
1072 	return out;
1073 }
1074 
1075 static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar,
1076 				       struct ieee80211_rx_status *rx_status,
1077 				       struct sk_buff *skb)
1078 {
1079 	struct ieee80211_rx_status *status;
1080 
1081 	status = IEEE80211_SKB_RXCB(skb);
1082 	*status = *rx_status;
1083 
1084 	__skb_queue_tail(&ar->htt.rx_msdus_q, skb);
1085 }
1086 
1087 static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
1088 {
1089 	struct ieee80211_rx_status *status;
1090 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1091 	char tid[32];
1092 
1093 	status = IEEE80211_SKB_RXCB(skb);
1094 
1095 	ath10k_dbg(ar, ATH10K_DBG_DATA,
1096 		   "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
1097 		   skb,
1098 		   skb->len,
1099 		   ieee80211_get_SA(hdr),
1100 		   ath10k_get_tid(hdr, tid, sizeof(tid)),
1101 		   is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
1102 							"mcast" : "ucast",
1103 		   (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
1104 		   (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
1105 		   (status->encoding == RX_ENC_HT) ? "ht" : "",
1106 		   (status->encoding == RX_ENC_VHT) ? "vht" : "",
1107 		   (status->bw == RATE_INFO_BW_40) ? "40" : "",
1108 		   (status->bw == RATE_INFO_BW_80) ? "80" : "",
1109 		   (status->bw == RATE_INFO_BW_160) ? "160" : "",
1110 		   status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
1111 		   status->rate_idx,
1112 		   status->nss,
1113 		   status->freq,
1114 		   status->band, status->flag,
1115 		   !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
1116 		   !!(status->flag & RX_FLAG_MMIC_ERROR),
1117 		   !!(status->flag & RX_FLAG_AMSDU_MORE));
1118 	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
1119 			skb->data, skb->len);
1120 	trace_ath10k_rx_hdr(ar, skb->data, skb->len);
1121 	trace_ath10k_rx_payload(ar, skb->data, skb->len);
1122 
1123 	ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
1124 }
1125 
1126 static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
1127 				      struct ieee80211_hdr *hdr)
1128 {
1129 	int len = ieee80211_hdrlen(hdr->frame_control);
1130 
1131 	if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
1132 		      ar->running_fw->fw_file.fw_features))
1133 		len = round_up(len, 4);
1134 
1135 	return len;
1136 }
1137 
1138 static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
1139 					struct sk_buff *msdu,
1140 					struct ieee80211_rx_status *status,
1141 					enum htt_rx_mpdu_encrypt_type enctype,
1142 					bool is_decrypted)
1143 {
1144 	struct ieee80211_hdr *hdr;
1145 	struct htt_rx_desc *rxd;
1146 	size_t hdr_len;
1147 	size_t crypto_len;
1148 	bool is_first;
1149 	bool is_last;
1150 
1151 	rxd = (void *)msdu->data - sizeof(*rxd);
1152 	is_first = !!(rxd->msdu_end.common.info0 &
1153 		      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1154 	is_last = !!(rxd->msdu_end.common.info0 &
1155 		     __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1156 
1157 	/* Delivered decapped frame:
1158 	 * [802.11 header]
1159 	 * [crypto param] <-- can be trimmed if !fcs_err &&
1160 	 *                    !decrypt_err && !peer_idx_invalid
1161 	 * [amsdu header] <-- only if A-MSDU
1162 	 * [rfc1042/llc]
1163 	 * [payload]
1164 	 * [FCS] <-- at end, needs to be trimmed
1165 	 */
1166 
1167 	/* This probably shouldn't happen but warn just in case */
1168 	if (unlikely(WARN_ON_ONCE(!is_first)))
1169 		return;
1170 
1171 	/* This probably shouldn't happen but warn just in case */
1172 	if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
1173 		return;
1174 
1175 	skb_trim(msdu, msdu->len - FCS_LEN);
1176 
1177 	/* In most cases this will be true for sniffed frames. It makes sense
1178 	 * to deliver them as-is without stripping the crypto param. This is
1179 	 * necessary for software based decryption.
1180 	 *
1181 	 * If there's no error then the frame is decrypted. At least that is
1182 	 * the case for frames that come in via fragmented rx indication.
1183 	 */
1184 	if (!is_decrypted)
1185 		return;
1186 
1187 	/* The payload is decrypted so strip crypto params. Start from tail
1188 	 * since hdr is used to compute some stuff.
1189 	 */
1190 
1191 	hdr = (void *)msdu->data;
1192 
1193 	/* Tail */
1194 	if (status->flag & RX_FLAG_IV_STRIPPED) {
1195 		skb_trim(msdu, msdu->len -
1196 			 ath10k_htt_rx_crypto_mic_len(ar, enctype));
1197 
1198 		skb_trim(msdu, msdu->len -
1199 			 ath10k_htt_rx_crypto_icv_len(ar, enctype));
1200 	} else {
1201 		/* MIC */
1202 		if (status->flag & RX_FLAG_MIC_STRIPPED)
1203 			skb_trim(msdu, msdu->len -
1204 				 ath10k_htt_rx_crypto_mic_len(ar, enctype));
1205 
1206 		/* ICV */
1207 		if (status->flag & RX_FLAG_ICV_STRIPPED)
1208 			skb_trim(msdu, msdu->len -
1209 				 ath10k_htt_rx_crypto_icv_len(ar, enctype));
1210 	}
1211 
1212 	/* MMIC */
1213 	if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
1214 	    !ieee80211_has_morefrags(hdr->frame_control) &&
1215 	    enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1216 		skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN);
1217 
1218 	/* Head */
1219 	if (status->flag & RX_FLAG_IV_STRIPPED) {
1220 		hdr_len = ieee80211_hdrlen(hdr->frame_control);
1221 		crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1222 
1223 		memmove((void *)msdu->data + crypto_len,
1224 			(void *)msdu->data, hdr_len);
1225 		skb_pull(msdu, crypto_len);
1226 	}
1227 }
1228 
1229 static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
1230 					  struct sk_buff *msdu,
1231 					  struct ieee80211_rx_status *status,
1232 					  const u8 first_hdr[64],
1233 					  enum htt_rx_mpdu_encrypt_type enctype)
1234 {
1235 	struct ieee80211_hdr *hdr;
1236 	struct htt_rx_desc *rxd;
1237 	size_t hdr_len;
1238 	u8 da[ETH_ALEN];
1239 	u8 sa[ETH_ALEN];
1240 	int l3_pad_bytes;
1241 	int bytes_aligned = ar->hw_params.decap_align_bytes;
1242 
1243 	/* Delivered decapped frame:
1244 	 * [nwifi 802.11 header] <-- replaced with 802.11 hdr
1245 	 * [rfc1042/llc]
1246 	 *
1247 	 * Note: The nwifi header doesn't have QoS Control and is
1248 	 * (always?) a 3addr frame.
1249 	 *
1250 	 * Note2: There's no A-MSDU subframe header. Even if it's part
1251 	 * of an A-MSDU.
1252 	 */
1253 
1254 	/* pull decapped header and copy SA & DA */
1255 	rxd = (void *)msdu->data - sizeof(*rxd);
1256 
1257 	l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1258 	skb_put(msdu, l3_pad_bytes);
1259 
1260 	hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
1261 
1262 	hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
1263 	ether_addr_copy(da, ieee80211_get_DA(hdr));
1264 	ether_addr_copy(sa, ieee80211_get_SA(hdr));
1265 	skb_pull(msdu, hdr_len);
1266 
1267 	/* push original 802.11 header */
1268 	hdr = (struct ieee80211_hdr *)first_hdr;
1269 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1270 
1271 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1272 		memcpy(skb_push(msdu,
1273 				ath10k_htt_rx_crypto_param_len(ar, enctype)),
1274 		       (void *)hdr + round_up(hdr_len, bytes_aligned),
1275 			ath10k_htt_rx_crypto_param_len(ar, enctype));
1276 	}
1277 
1278 	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1279 
1280 	/* original 802.11 header has a different DA and in
1281 	 * case of 4addr it may also have different SA
1282 	 */
1283 	hdr = (struct ieee80211_hdr *)msdu->data;
1284 	ether_addr_copy(ieee80211_get_DA(hdr), da);
1285 	ether_addr_copy(ieee80211_get_SA(hdr), sa);
1286 }
1287 
1288 static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
1289 					  struct sk_buff *msdu,
1290 					  enum htt_rx_mpdu_encrypt_type enctype)
1291 {
1292 	struct ieee80211_hdr *hdr;
1293 	struct htt_rx_desc *rxd;
1294 	size_t hdr_len, crypto_len;
1295 	void *rfc1042;
1296 	bool is_first, is_last, is_amsdu;
1297 	int bytes_aligned = ar->hw_params.decap_align_bytes;
1298 
1299 	rxd = (void *)msdu->data - sizeof(*rxd);
1300 	hdr = (void *)rxd->rx_hdr_status;
1301 
1302 	is_first = !!(rxd->msdu_end.common.info0 &
1303 		      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1304 	is_last = !!(rxd->msdu_end.common.info0 &
1305 		     __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1306 	is_amsdu = !(is_first && is_last);
1307 
1308 	rfc1042 = hdr;
1309 
1310 	if (is_first) {
1311 		hdr_len = ieee80211_hdrlen(hdr->frame_control);
1312 		crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1313 
1314 		rfc1042 += round_up(hdr_len, bytes_aligned) +
1315 			   round_up(crypto_len, bytes_aligned);
1316 	}
1317 
1318 	if (is_amsdu)
1319 		rfc1042 += sizeof(struct amsdu_subframe_hdr);
1320 
1321 	return rfc1042;
1322 }
1323 
1324 static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1325 					struct sk_buff *msdu,
1326 					struct ieee80211_rx_status *status,
1327 					const u8 first_hdr[64],
1328 					enum htt_rx_mpdu_encrypt_type enctype)
1329 {
1330 	struct ieee80211_hdr *hdr;
1331 	struct ethhdr *eth;
1332 	size_t hdr_len;
1333 	void *rfc1042;
1334 	u8 da[ETH_ALEN];
1335 	u8 sa[ETH_ALEN];
1336 	int l3_pad_bytes;
1337 	struct htt_rx_desc *rxd;
1338 	int bytes_aligned = ar->hw_params.decap_align_bytes;
1339 
1340 	/* Delivered decapped frame:
1341 	 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
1342 	 * [payload]
1343 	 */
1344 
1345 	rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
1346 	if (WARN_ON_ONCE(!rfc1042))
1347 		return;
1348 
1349 	rxd = (void *)msdu->data - sizeof(*rxd);
1350 	l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1351 	skb_put(msdu, l3_pad_bytes);
1352 	skb_pull(msdu, l3_pad_bytes);
1353 
1354 	/* pull decapped header and copy SA & DA */
1355 	eth = (struct ethhdr *)msdu->data;
1356 	ether_addr_copy(da, eth->h_dest);
1357 	ether_addr_copy(sa, eth->h_source);
1358 	skb_pull(msdu, sizeof(struct ethhdr));
1359 
1360 	/* push rfc1042/llc/snap */
1361 	memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
1362 	       sizeof(struct rfc1042_hdr));
1363 
1364 	/* push original 802.11 header */
1365 	hdr = (struct ieee80211_hdr *)first_hdr;
1366 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1367 
1368 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1369 		memcpy(skb_push(msdu,
1370 				ath10k_htt_rx_crypto_param_len(ar, enctype)),
1371 		       (void *)hdr + round_up(hdr_len, bytes_aligned),
1372 			ath10k_htt_rx_crypto_param_len(ar, enctype));
1373 	}
1374 
1375 	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1376 
1377 	/* original 802.11 header has a different DA and in
1378 	 * case of 4addr it may also have different SA
1379 	 */
1380 	hdr = (struct ieee80211_hdr *)msdu->data;
1381 	ether_addr_copy(ieee80211_get_DA(hdr), da);
1382 	ether_addr_copy(ieee80211_get_SA(hdr), sa);
1383 }
1384 
1385 static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
1386 					 struct sk_buff *msdu,
1387 					 struct ieee80211_rx_status *status,
1388 					 const u8 first_hdr[64],
1389 					 enum htt_rx_mpdu_encrypt_type enctype)
1390 {
1391 	struct ieee80211_hdr *hdr;
1392 	size_t hdr_len;
1393 	int l3_pad_bytes;
1394 	struct htt_rx_desc *rxd;
1395 	int bytes_aligned = ar->hw_params.decap_align_bytes;
1396 
1397 	/* Delivered decapped frame:
1398 	 * [amsdu header] <-- replaced with 802.11 hdr
1399 	 * [rfc1042/llc]
1400 	 * [payload]
1401 	 */
1402 
1403 	rxd = (void *)msdu->data - sizeof(*rxd);
1404 	l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1405 
1406 	skb_put(msdu, l3_pad_bytes);
1407 	skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
1408 
1409 	hdr = (struct ieee80211_hdr *)first_hdr;
1410 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1411 
1412 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1413 		memcpy(skb_push(msdu,
1414 				ath10k_htt_rx_crypto_param_len(ar, enctype)),
1415 		       (void *)hdr + round_up(hdr_len, bytes_aligned),
1416 			ath10k_htt_rx_crypto_param_len(ar, enctype));
1417 	}
1418 
1419 	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1420 }
1421 
1422 static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
1423 				    struct sk_buff *msdu,
1424 				    struct ieee80211_rx_status *status,
1425 				    u8 first_hdr[64],
1426 				    enum htt_rx_mpdu_encrypt_type enctype,
1427 				    bool is_decrypted)
1428 {
1429 	struct htt_rx_desc *rxd;
1430 	enum rx_msdu_decap_format decap;
1431 
1432 	/* First msdu's decapped header:
1433 	 * [802.11 header] <-- padded to 4 bytes long
1434 	 * [crypto param] <-- padded to 4 bytes long
1435 	 * [amsdu header] <-- only if A-MSDU
1436 	 * [rfc1042/llc]
1437 	 *
1438 	 * Other (2nd, 3rd, ..) msdu's decapped header:
1439 	 * [amsdu header] <-- only if A-MSDU
1440 	 * [rfc1042/llc]
1441 	 */
1442 
1443 	rxd = (void *)msdu->data - sizeof(*rxd);
1444 	decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
1445 		   RX_MSDU_START_INFO1_DECAP_FORMAT);
1446 
1447 	switch (decap) {
1448 	case RX_MSDU_DECAP_RAW:
1449 		ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
1450 					    is_decrypted);
1451 		break;
1452 	case RX_MSDU_DECAP_NATIVE_WIFI:
1453 		ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr,
1454 					      enctype);
1455 		break;
1456 	case RX_MSDU_DECAP_ETHERNET2_DIX:
1457 		ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
1458 		break;
1459 	case RX_MSDU_DECAP_8023_SNAP_LLC:
1460 		ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr,
1461 					     enctype);
1462 		break;
1463 	}
1464 }
1465 
1466 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
1467 {
1468 	struct htt_rx_desc *rxd;
1469 	u32 flags, info;
1470 	bool is_ip4, is_ip6;
1471 	bool is_tcp, is_udp;
1472 	bool ip_csum_ok, tcpudp_csum_ok;
1473 
1474 	rxd = (void *)skb->data - sizeof(*rxd);
1475 	flags = __le32_to_cpu(rxd->attention.flags);
1476 	info = __le32_to_cpu(rxd->msdu_start.common.info1);
1477 
1478 	is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
1479 	is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
1480 	is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
1481 	is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
1482 	ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
1483 	tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
1484 
1485 	if (!is_ip4 && !is_ip6)
1486 		return CHECKSUM_NONE;
1487 	if (!is_tcp && !is_udp)
1488 		return CHECKSUM_NONE;
1489 	if (!ip_csum_ok)
1490 		return CHECKSUM_NONE;
1491 	if (!tcpudp_csum_ok)
1492 		return CHECKSUM_NONE;
1493 
1494 	return CHECKSUM_UNNECESSARY;
1495 }
1496 
1497 static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
1498 {
1499 	msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
1500 }
1501 
1502 static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1503 				 struct sk_buff_head *amsdu,
1504 				 struct ieee80211_rx_status *status,
1505 				 bool fill_crypt_header)
1506 {
1507 	struct sk_buff *first;
1508 	struct sk_buff *last;
1509 	struct sk_buff *msdu;
1510 	struct htt_rx_desc *rxd;
1511 	struct ieee80211_hdr *hdr;
1512 	enum htt_rx_mpdu_encrypt_type enctype;
1513 	u8 first_hdr[64];
1514 	u8 *qos;
1515 	bool has_fcs_err;
1516 	bool has_crypto_err;
1517 	bool has_tkip_err;
1518 	bool has_peer_idx_invalid;
1519 	bool is_decrypted;
1520 	bool is_mgmt;
1521 	u32 attention;
1522 
1523 	if (skb_queue_empty(amsdu))
1524 		return;
1525 
1526 	first = skb_peek(amsdu);
1527 	rxd = (void *)first->data - sizeof(*rxd);
1528 
1529 	is_mgmt = !!(rxd->attention.flags &
1530 		     __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
1531 
1532 	enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1533 		     RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1534 
1535 	/* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
1536 	 * decapped header. It'll be used for undecapping of each MSDU.
1537 	 */
1538 	hdr = (void *)rxd->rx_hdr_status;
1539 	memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
1540 
1541 	/* Each A-MSDU subframe will use the original header as the base and be
1542 	 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1543 	 */
1544 	hdr = (void *)first_hdr;
1545 
1546 	if (ieee80211_is_data_qos(hdr->frame_control)) {
1547 		qos = ieee80211_get_qos_ctl(hdr);
1548 		qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1549 	}
1550 
1551 	/* Some attention flags are valid only in the last MSDU. */
1552 	last = skb_peek_tail(amsdu);
1553 	rxd = (void *)last->data - sizeof(*rxd);
1554 	attention = __le32_to_cpu(rxd->attention.flags);
1555 
1556 	has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
1557 	has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
1558 	has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
1559 	has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
1560 
1561 	/* Note: If hardware captures an encrypted frame that it can't decrypt,
1562 	 * e.g. due to fcs error, missing peer or invalid key data it will
1563 	 * report the frame as raw.
1564 	 */
1565 	is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
1566 			!has_fcs_err &&
1567 			!has_crypto_err &&
1568 			!has_peer_idx_invalid);
1569 
1570 	/* Clear per-MPDU flags while leaving per-PPDU flags intact. */
1571 	status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
1572 			  RX_FLAG_MMIC_ERROR |
1573 			  RX_FLAG_DECRYPTED |
1574 			  RX_FLAG_IV_STRIPPED |
1575 			  RX_FLAG_ONLY_MONITOR |
1576 			  RX_FLAG_MMIC_STRIPPED);
1577 
1578 	if (has_fcs_err)
1579 		status->flag |= RX_FLAG_FAILED_FCS_CRC;
1580 
1581 	if (has_tkip_err)
1582 		status->flag |= RX_FLAG_MMIC_ERROR;
1583 
1584 	/* Firmware reports all necessary management frames via WMI already.
1585 	 * They are not reported to monitor interfaces at all so pass the ones
1586 	 * coming via HTT to monitor interfaces instead. This simplifies
1587 	 * matters a lot.
1588 	 */
1589 	if (is_mgmt)
1590 		status->flag |= RX_FLAG_ONLY_MONITOR;
1591 
1592 	if (is_decrypted) {
1593 		status->flag |= RX_FLAG_DECRYPTED;
1594 
1595 		if (likely(!is_mgmt))
1596 			status->flag |= RX_FLAG_MMIC_STRIPPED;
1597 
1598 		if (fill_crypt_header)
1599 			status->flag |= RX_FLAG_MIC_STRIPPED |
1600 					RX_FLAG_ICV_STRIPPED;
1601 		else
1602 			status->flag |= RX_FLAG_IV_STRIPPED;
1603 	}
1604 
1605 	skb_queue_walk(amsdu, msdu) {
1606 		ath10k_htt_rx_h_csum_offload(msdu);
1607 		ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
1608 					is_decrypted);
1609 
1610 		/* Undecapping involves copying the original 802.11 header back
1611 		 * to sk_buff. If frame is protected and hardware has decrypted
1612 		 * it then remove the protected bit.
1613 		 */
1614 		if (!is_decrypted)
1615 			continue;
1616 		if (is_mgmt)
1617 			continue;
1618 
1619 		if (fill_crypt_header)
1620 			continue;
1621 
1622 		hdr = (void *)msdu->data;
1623 		hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1624 	}
1625 }
1626 
1627 static void ath10k_htt_rx_h_enqueue(struct ath10k *ar,
1628 				    struct sk_buff_head *amsdu,
1629 				    struct ieee80211_rx_status *status)
1630 {
1631 	struct sk_buff *msdu;
1632 	struct sk_buff *first_subframe;
1633 
1634 	first_subframe = skb_peek(amsdu);
1635 
1636 	while ((msdu = __skb_dequeue(amsdu))) {
1637 		/* Setup per-MSDU flags */
1638 		if (skb_queue_empty(amsdu))
1639 			status->flag &= ~RX_FLAG_AMSDU_MORE;
1640 		else
1641 			status->flag |= RX_FLAG_AMSDU_MORE;
1642 
1643 		if (msdu == first_subframe) {
1644 			first_subframe = NULL;
1645 			status->flag &= ~RX_FLAG_ALLOW_SAME_PN;
1646 		} else {
1647 			status->flag |= RX_FLAG_ALLOW_SAME_PN;
1648 		}
1649 
1650 		ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
1651 	}
1652 }
1653 
1654 static int ath10k_unchain_msdu(struct sk_buff_head *amsdu)
1655 {
1656 	struct sk_buff *skb, *first;
1657 	int space;
1658 	int total_len = 0;
1659 
1660 	/* TODO:  Might could optimize this by using
1661 	 * skb_try_coalesce or similar method to
1662 	 * decrease copying, or maybe get mac80211 to
1663 	 * provide a way to just receive a list of
1664 	 * skb?
1665 	 */
1666 
1667 	first = __skb_dequeue(amsdu);
1668 
1669 	/* Allocate total length all at once. */
1670 	skb_queue_walk(amsdu, skb)
1671 		total_len += skb->len;
1672 
1673 	space = total_len - skb_tailroom(first);
1674 	if ((space > 0) &&
1675 	    (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
1676 		/* TODO:  bump some rx-oom error stat */
1677 		/* put it back together so we can free the
1678 		 * whole list at once.
1679 		 */
1680 		__skb_queue_head(amsdu, first);
1681 		return -1;
1682 	}
1683 
1684 	/* Walk list again, copying contents into
1685 	 * msdu_head
1686 	 */
1687 	while ((skb = __skb_dequeue(amsdu))) {
1688 		skb_copy_from_linear_data(skb, skb_put(first, skb->len),
1689 					  skb->len);
1690 		dev_kfree_skb_any(skb);
1691 	}
1692 
1693 	__skb_queue_head(amsdu, first);
1694 	return 0;
1695 }
1696 
1697 static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
1698 				    struct sk_buff_head *amsdu)
1699 {
1700 	struct sk_buff *first;
1701 	struct htt_rx_desc *rxd;
1702 	enum rx_msdu_decap_format decap;
1703 
1704 	first = skb_peek(amsdu);
1705 	rxd = (void *)first->data - sizeof(*rxd);
1706 	decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
1707 		   RX_MSDU_START_INFO1_DECAP_FORMAT);
1708 
1709 	/* FIXME: Current unchaining logic can only handle simple case of raw
1710 	 * msdu chaining. If decapping is other than raw the chaining may be
1711 	 * more complex and this isn't handled by the current code. Don't even
1712 	 * try re-constructing such frames - it'll be pretty much garbage.
1713 	 */
1714 	if (decap != RX_MSDU_DECAP_RAW ||
1715 	    skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
1716 		__skb_queue_purge(amsdu);
1717 		return;
1718 	}
1719 
1720 	ath10k_unchain_msdu(amsdu);
1721 }
1722 
1723 static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
1724 					struct sk_buff_head *amsdu,
1725 					struct ieee80211_rx_status *rx_status)
1726 {
1727 	/* FIXME: It might be a good idea to do some fuzzy-testing to drop
1728 	 * invalid/dangerous frames.
1729 	 */
1730 
1731 	if (!rx_status->freq) {
1732 		ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n");
1733 		return false;
1734 	}
1735 
1736 	if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
1737 		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
1738 		return false;
1739 	}
1740 
1741 	return true;
1742 }
1743 
1744 static void ath10k_htt_rx_h_filter(struct ath10k *ar,
1745 				   struct sk_buff_head *amsdu,
1746 				   struct ieee80211_rx_status *rx_status)
1747 {
1748 	if (skb_queue_empty(amsdu))
1749 		return;
1750 
1751 	if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
1752 		return;
1753 
1754 	__skb_queue_purge(amsdu);
1755 }
1756 
1757 static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
1758 {
1759 	struct ath10k *ar = htt->ar;
1760 	struct ieee80211_rx_status *rx_status = &htt->rx_status;
1761 	struct sk_buff_head amsdu;
1762 	int ret;
1763 
1764 	__skb_queue_head_init(&amsdu);
1765 
1766 	spin_lock_bh(&htt->rx_ring.lock);
1767 	if (htt->rx_confused) {
1768 		spin_unlock_bh(&htt->rx_ring.lock);
1769 		return -EIO;
1770 	}
1771 	ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
1772 	spin_unlock_bh(&htt->rx_ring.lock);
1773 
1774 	if (ret < 0) {
1775 		ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
1776 		__skb_queue_purge(&amsdu);
1777 		/* FIXME: It's probably a good idea to reboot the
1778 		 * device instead of leaving it inoperable.
1779 		 */
1780 		htt->rx_confused = true;
1781 		return ret;
1782 	}
1783 
1784 	ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
1785 
1786 	/* only for ret = 1 indicates chained msdus */
1787 	if (ret > 0)
1788 		ath10k_htt_rx_h_unchain(ar, &amsdu);
1789 
1790 	ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
1791 	ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true);
1792 	ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status);
1793 
1794 	return 0;
1795 }
1796 
1797 static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
1798 				      struct htt_rx_indication *rx)
1799 {
1800 	struct ath10k *ar = htt->ar;
1801 	struct htt_rx_indication_mpdu_range *mpdu_ranges;
1802 	int num_mpdu_ranges;
1803 	int i, mpdu_count = 0;
1804 
1805 	num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
1806 			     HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
1807 	mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
1808 
1809 	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
1810 			rx, sizeof(*rx) +
1811 			(sizeof(struct htt_rx_indication_mpdu_range) *
1812 				num_mpdu_ranges));
1813 
1814 	for (i = 0; i < num_mpdu_ranges; i++)
1815 		mpdu_count += mpdu_ranges[i].mpdu_count;
1816 
1817 	atomic_add(mpdu_count, &htt->num_mpdus_ready);
1818 }
1819 
1820 static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
1821 				       struct sk_buff *skb)
1822 {
1823 	struct ath10k_htt *htt = &ar->htt;
1824 	struct htt_resp *resp = (struct htt_resp *)skb->data;
1825 	struct htt_tx_done tx_done = {};
1826 	int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
1827 	__le16 msdu_id;
1828 	int i;
1829 
1830 	switch (status) {
1831 	case HTT_DATA_TX_STATUS_NO_ACK:
1832 		tx_done.status = HTT_TX_COMPL_STATE_NOACK;
1833 		break;
1834 	case HTT_DATA_TX_STATUS_OK:
1835 		tx_done.status = HTT_TX_COMPL_STATE_ACK;
1836 		break;
1837 	case HTT_DATA_TX_STATUS_DISCARD:
1838 	case HTT_DATA_TX_STATUS_POSTPONE:
1839 	case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
1840 		tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
1841 		break;
1842 	default:
1843 		ath10k_warn(ar, "unhandled tx completion status %d\n", status);
1844 		tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
1845 		break;
1846 	}
1847 
1848 	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
1849 		   resp->data_tx_completion.num_msdus);
1850 
1851 	for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
1852 		msdu_id = resp->data_tx_completion.msdus[i];
1853 		tx_done.msdu_id = __le16_to_cpu(msdu_id);
1854 
1855 		/* kfifo_put: In practice firmware shouldn't fire off per-CE
1856 		 * interrupt and main interrupt (MSI/-X range case) for the same
1857 		 * HTC service so it should be safe to use kfifo_put w/o lock.
1858 		 *
1859 		 * From kfifo_put() documentation:
1860 		 *  Note that with only one concurrent reader and one concurrent
1861 		 *  writer, you don't need extra locking to use these macro.
1862 		 */
1863 		if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
1864 			ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
1865 				    tx_done.msdu_id, tx_done.status);
1866 			ath10k_txrx_tx_unref(htt, &tx_done);
1867 		}
1868 	}
1869 }
1870 
1871 static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
1872 {
1873 	struct htt_rx_addba *ev = &resp->rx_addba;
1874 	struct ath10k_peer *peer;
1875 	struct ath10k_vif *arvif;
1876 	u16 info0, tid, peer_id;
1877 
1878 	info0 = __le16_to_cpu(ev->info0);
1879 	tid = MS(info0, HTT_RX_BA_INFO0_TID);
1880 	peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1881 
1882 	ath10k_dbg(ar, ATH10K_DBG_HTT,
1883 		   "htt rx addba tid %hu peer_id %hu size %hhu\n",
1884 		   tid, peer_id, ev->window_size);
1885 
1886 	spin_lock_bh(&ar->data_lock);
1887 	peer = ath10k_peer_find_by_id(ar, peer_id);
1888 	if (!peer) {
1889 		ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
1890 			    peer_id);
1891 		spin_unlock_bh(&ar->data_lock);
1892 		return;
1893 	}
1894 
1895 	arvif = ath10k_get_arvif(ar, peer->vdev_id);
1896 	if (!arvif) {
1897 		ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
1898 			    peer->vdev_id);
1899 		spin_unlock_bh(&ar->data_lock);
1900 		return;
1901 	}
1902 
1903 	ath10k_dbg(ar, ATH10K_DBG_HTT,
1904 		   "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
1905 		   peer->addr, tid, ev->window_size);
1906 
1907 	ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1908 	spin_unlock_bh(&ar->data_lock);
1909 }
1910 
1911 static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
1912 {
1913 	struct htt_rx_delba *ev = &resp->rx_delba;
1914 	struct ath10k_peer *peer;
1915 	struct ath10k_vif *arvif;
1916 	u16 info0, tid, peer_id;
1917 
1918 	info0 = __le16_to_cpu(ev->info0);
1919 	tid = MS(info0, HTT_RX_BA_INFO0_TID);
1920 	peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1921 
1922 	ath10k_dbg(ar, ATH10K_DBG_HTT,
1923 		   "htt rx delba tid %hu peer_id %hu\n",
1924 		   tid, peer_id);
1925 
1926 	spin_lock_bh(&ar->data_lock);
1927 	peer = ath10k_peer_find_by_id(ar, peer_id);
1928 	if (!peer) {
1929 		ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
1930 			    peer_id);
1931 		spin_unlock_bh(&ar->data_lock);
1932 		return;
1933 	}
1934 
1935 	arvif = ath10k_get_arvif(ar, peer->vdev_id);
1936 	if (!arvif) {
1937 		ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
1938 			    peer->vdev_id);
1939 		spin_unlock_bh(&ar->data_lock);
1940 		return;
1941 	}
1942 
1943 	ath10k_dbg(ar, ATH10K_DBG_HTT,
1944 		   "htt rx stop rx ba session sta %pM tid %hu\n",
1945 		   peer->addr, tid);
1946 
1947 	ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1948 	spin_unlock_bh(&ar->data_lock);
1949 }
1950 
1951 static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
1952 				       struct sk_buff_head *amsdu)
1953 {
1954 	struct sk_buff *msdu;
1955 	struct htt_rx_desc *rxd;
1956 
1957 	if (skb_queue_empty(list))
1958 		return -ENOBUFS;
1959 
1960 	if (WARN_ON(!skb_queue_empty(amsdu)))
1961 		return -EINVAL;
1962 
1963 	while ((msdu = __skb_dequeue(list))) {
1964 		__skb_queue_tail(amsdu, msdu);
1965 
1966 		rxd = (void *)msdu->data - sizeof(*rxd);
1967 		if (rxd->msdu_end.common.info0 &
1968 		    __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
1969 			break;
1970 	}
1971 
1972 	msdu = skb_peek_tail(amsdu);
1973 	rxd = (void *)msdu->data - sizeof(*rxd);
1974 	if (!(rxd->msdu_end.common.info0 &
1975 	      __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
1976 		skb_queue_splice_init(amsdu, list);
1977 		return -EAGAIN;
1978 	}
1979 
1980 	return 0;
1981 }
1982 
1983 static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
1984 					    struct sk_buff *skb)
1985 {
1986 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1987 
1988 	if (!ieee80211_has_protected(hdr->frame_control))
1989 		return;
1990 
1991 	/* Offloaded frames are already decrypted but firmware insists they are
1992 	 * protected in the 802.11 header. Strip the flag.  Otherwise mac80211
1993 	 * will drop the frame.
1994 	 */
1995 
1996 	hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1997 	status->flag |= RX_FLAG_DECRYPTED |
1998 			RX_FLAG_IV_STRIPPED |
1999 			RX_FLAG_MMIC_STRIPPED;
2000 }
2001 
2002 static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
2003 				       struct sk_buff_head *list)
2004 {
2005 	struct ath10k_htt *htt = &ar->htt;
2006 	struct ieee80211_rx_status *status = &htt->rx_status;
2007 	struct htt_rx_offload_msdu *rx;
2008 	struct sk_buff *msdu;
2009 	size_t offset;
2010 
2011 	while ((msdu = __skb_dequeue(list))) {
2012 		/* Offloaded frames don't have Rx descriptor. Instead they have
2013 		 * a short meta information header.
2014 		 */
2015 
2016 		rx = (void *)msdu->data;
2017 
2018 		skb_put(msdu, sizeof(*rx));
2019 		skb_pull(msdu, sizeof(*rx));
2020 
2021 		if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
2022 			ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
2023 			dev_kfree_skb_any(msdu);
2024 			continue;
2025 		}
2026 
2027 		skb_put(msdu, __le16_to_cpu(rx->msdu_len));
2028 
2029 		/* Offloaded rx header length isn't multiple of 2 nor 4 so the
2030 		 * actual payload is unaligned. Align the frame.  Otherwise
2031 		 * mac80211 complains.  This shouldn't reduce performance much
2032 		 * because these offloaded frames are rare.
2033 		 */
2034 		offset = 4 - ((unsigned long)msdu->data & 3);
2035 		skb_put(msdu, offset);
2036 		memmove(msdu->data + offset, msdu->data, msdu->len);
2037 		skb_pull(msdu, offset);
2038 
2039 		/* FIXME: The frame is NWifi. Re-construct QoS Control
2040 		 * if possible later.
2041 		 */
2042 
2043 		memset(status, 0, sizeof(*status));
2044 		status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2045 
2046 		ath10k_htt_rx_h_rx_offload_prot(status, msdu);
2047 		ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
2048 		ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
2049 	}
2050 }
2051 
2052 static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
2053 {
2054 	struct ath10k_htt *htt = &ar->htt;
2055 	struct htt_resp *resp = (void *)skb->data;
2056 	struct ieee80211_rx_status *status = &htt->rx_status;
2057 	struct sk_buff_head list;
2058 	struct sk_buff_head amsdu;
2059 	u16 peer_id;
2060 	u16 msdu_count;
2061 	u8 vdev_id;
2062 	u8 tid;
2063 	bool offload;
2064 	bool frag;
2065 	int ret;
2066 
2067 	lockdep_assert_held(&htt->rx_ring.lock);
2068 
2069 	if (htt->rx_confused)
2070 		return -EIO;
2071 
2072 	skb_pull(skb, sizeof(resp->hdr));
2073 	skb_pull(skb, sizeof(resp->rx_in_ord_ind));
2074 
2075 	peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
2076 	msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
2077 	vdev_id = resp->rx_in_ord_ind.vdev_id;
2078 	tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
2079 	offload = !!(resp->rx_in_ord_ind.info &
2080 			HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
2081 	frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
2082 
2083 	ath10k_dbg(ar, ATH10K_DBG_HTT,
2084 		   "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
2085 		   vdev_id, peer_id, tid, offload, frag, msdu_count);
2086 
2087 	if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) {
2088 		ath10k_warn(ar, "dropping invalid in order rx indication\n");
2089 		return -EINVAL;
2090 	}
2091 
2092 	/* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
2093 	 * extracted and processed.
2094 	 */
2095 	__skb_queue_head_init(&list);
2096 	if (ar->hw_params.target_64bit)
2097 		ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind,
2098 						     &list);
2099 	else
2100 		ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind,
2101 						     &list);
2102 
2103 	if (ret < 0) {
2104 		ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
2105 		htt->rx_confused = true;
2106 		return -EIO;
2107 	}
2108 
2109 	/* Offloaded frames are very different and need to be handled
2110 	 * separately.
2111 	 */
2112 	if (offload)
2113 		ath10k_htt_rx_h_rx_offload(ar, &list);
2114 
2115 	while (!skb_queue_empty(&list)) {
2116 		__skb_queue_head_init(&amsdu);
2117 		ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
2118 		switch (ret) {
2119 		case 0:
2120 			/* Note: The in-order indication may report interleaved
2121 			 * frames from different PPDUs meaning reported rx rate
2122 			 * to mac80211 isn't accurate/reliable. It's still
2123 			 * better to report something than nothing though. This
2124 			 * should still give an idea about rx rate to the user.
2125 			 */
2126 			ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
2127 			ath10k_htt_rx_h_filter(ar, &amsdu, status);
2128 			ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false);
2129 			ath10k_htt_rx_h_enqueue(ar, &amsdu, status);
2130 			break;
2131 		case -EAGAIN:
2132 			/* fall through */
2133 		default:
2134 			/* Should not happen. */
2135 			ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
2136 			htt->rx_confused = true;
2137 			__skb_queue_purge(&list);
2138 			return -EIO;
2139 		}
2140 	}
2141 	return ret;
2142 }
2143 
2144 static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
2145 						   const __le32 *resp_ids,
2146 						   int num_resp_ids)
2147 {
2148 	int i;
2149 	u32 resp_id;
2150 
2151 	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
2152 		   num_resp_ids);
2153 
2154 	for (i = 0; i < num_resp_ids; i++) {
2155 		resp_id = le32_to_cpu(resp_ids[i]);
2156 
2157 		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
2158 			   resp_id);
2159 
2160 		/* TODO: free resp_id */
2161 	}
2162 }
2163 
2164 static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
2165 {
2166 	struct ieee80211_hw *hw = ar->hw;
2167 	struct ieee80211_txq *txq;
2168 	struct htt_resp *resp = (struct htt_resp *)skb->data;
2169 	struct htt_tx_fetch_record *record;
2170 	size_t len;
2171 	size_t max_num_bytes;
2172 	size_t max_num_msdus;
2173 	size_t num_bytes;
2174 	size_t num_msdus;
2175 	const __le32 *resp_ids;
2176 	u16 num_records;
2177 	u16 num_resp_ids;
2178 	u16 peer_id;
2179 	u8 tid;
2180 	int ret;
2181 	int i;
2182 
2183 	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
2184 
2185 	len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
2186 	if (unlikely(skb->len < len)) {
2187 		ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
2188 		return;
2189 	}
2190 
2191 	num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
2192 	num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
2193 
2194 	len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
2195 	len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
2196 
2197 	if (unlikely(skb->len < len)) {
2198 		ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
2199 		return;
2200 	}
2201 
2202 	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
2203 		   num_records, num_resp_ids,
2204 		   le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
2205 
2206 	if (!ar->htt.tx_q_state.enabled) {
2207 		ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
2208 		return;
2209 	}
2210 
2211 	if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
2212 		ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
2213 		return;
2214 	}
2215 
2216 	rcu_read_lock();
2217 
2218 	for (i = 0; i < num_records; i++) {
2219 		record = &resp->tx_fetch_ind.records[i];
2220 		peer_id = MS(le16_to_cpu(record->info),
2221 			     HTT_TX_FETCH_RECORD_INFO_PEER_ID);
2222 		tid = MS(le16_to_cpu(record->info),
2223 			 HTT_TX_FETCH_RECORD_INFO_TID);
2224 		max_num_msdus = le16_to_cpu(record->num_msdus);
2225 		max_num_bytes = le32_to_cpu(record->num_bytes);
2226 
2227 		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
2228 			   i, peer_id, tid, max_num_msdus, max_num_bytes);
2229 
2230 		if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
2231 		    unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
2232 			ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
2233 				    peer_id, tid);
2234 			continue;
2235 		}
2236 
2237 		spin_lock_bh(&ar->data_lock);
2238 		txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
2239 		spin_unlock_bh(&ar->data_lock);
2240 
2241 		/* It is okay to release the lock and use txq because RCU read
2242 		 * lock is held.
2243 		 */
2244 
2245 		if (unlikely(!txq)) {
2246 			ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
2247 				    peer_id, tid);
2248 			continue;
2249 		}
2250 
2251 		num_msdus = 0;
2252 		num_bytes = 0;
2253 
2254 		while (num_msdus < max_num_msdus &&
2255 		       num_bytes < max_num_bytes) {
2256 			ret = ath10k_mac_tx_push_txq(hw, txq);
2257 			if (ret < 0)
2258 				break;
2259 
2260 			num_msdus++;
2261 			num_bytes += ret;
2262 		}
2263 
2264 		record->num_msdus = cpu_to_le16(num_msdus);
2265 		record->num_bytes = cpu_to_le32(num_bytes);
2266 
2267 		ath10k_htt_tx_txq_recalc(hw, txq);
2268 	}
2269 
2270 	rcu_read_unlock();
2271 
2272 	resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
2273 	ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
2274 
2275 	ret = ath10k_htt_tx_fetch_resp(ar,
2276 				       resp->tx_fetch_ind.token,
2277 				       resp->tx_fetch_ind.fetch_seq_num,
2278 				       resp->tx_fetch_ind.records,
2279 				       num_records);
2280 	if (unlikely(ret)) {
2281 		ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
2282 			    le32_to_cpu(resp->tx_fetch_ind.token), ret);
2283 		/* FIXME: request fw restart */
2284 	}
2285 
2286 	ath10k_htt_tx_txq_sync(ar);
2287 }
2288 
2289 static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
2290 					   struct sk_buff *skb)
2291 {
2292 	const struct htt_resp *resp = (void *)skb->data;
2293 	size_t len;
2294 	int num_resp_ids;
2295 
2296 	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
2297 
2298 	len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
2299 	if (unlikely(skb->len < len)) {
2300 		ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
2301 		return;
2302 	}
2303 
2304 	num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
2305 	len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
2306 
2307 	if (unlikely(skb->len < len)) {
2308 		ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
2309 		return;
2310 	}
2311 
2312 	ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
2313 					       resp->tx_fetch_confirm.resp_ids,
2314 					       num_resp_ids);
2315 }
2316 
2317 static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
2318 					     struct sk_buff *skb)
2319 {
2320 	const struct htt_resp *resp = (void *)skb->data;
2321 	const struct htt_tx_mode_switch_record *record;
2322 	struct ieee80211_txq *txq;
2323 	struct ath10k_txq *artxq;
2324 	size_t len;
2325 	size_t num_records;
2326 	enum htt_tx_mode_switch_mode mode;
2327 	bool enable;
2328 	u16 info0;
2329 	u16 info1;
2330 	u16 threshold;
2331 	u16 peer_id;
2332 	u8 tid;
2333 	int i;
2334 
2335 	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
2336 
2337 	len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
2338 	if (unlikely(skb->len < len)) {
2339 		ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
2340 		return;
2341 	}
2342 
2343 	info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
2344 	info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
2345 
2346 	enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
2347 	num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
2348 	mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
2349 	threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
2350 
2351 	ath10k_dbg(ar, ATH10K_DBG_HTT,
2352 		   "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
2353 		   info0, info1, enable, num_records, mode, threshold);
2354 
2355 	len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
2356 
2357 	if (unlikely(skb->len < len)) {
2358 		ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
2359 		return;
2360 	}
2361 
2362 	switch (mode) {
2363 	case HTT_TX_MODE_SWITCH_PUSH:
2364 	case HTT_TX_MODE_SWITCH_PUSH_PULL:
2365 		break;
2366 	default:
2367 		ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
2368 			    mode);
2369 		return;
2370 	}
2371 
2372 	if (!enable)
2373 		return;
2374 
2375 	ar->htt.tx_q_state.enabled = enable;
2376 	ar->htt.tx_q_state.mode = mode;
2377 	ar->htt.tx_q_state.num_push_allowed = threshold;
2378 
2379 	rcu_read_lock();
2380 
2381 	for (i = 0; i < num_records; i++) {
2382 		record = &resp->tx_mode_switch_ind.records[i];
2383 		info0 = le16_to_cpu(record->info0);
2384 		peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
2385 		tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
2386 
2387 		if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
2388 		    unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
2389 			ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
2390 				    peer_id, tid);
2391 			continue;
2392 		}
2393 
2394 		spin_lock_bh(&ar->data_lock);
2395 		txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
2396 		spin_unlock_bh(&ar->data_lock);
2397 
2398 		/* It is okay to release the lock and use txq because RCU read
2399 		 * lock is held.
2400 		 */
2401 
2402 		if (unlikely(!txq)) {
2403 			ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
2404 				    peer_id, tid);
2405 			continue;
2406 		}
2407 
2408 		spin_lock_bh(&ar->htt.tx_lock);
2409 		artxq = (void *)txq->drv_priv;
2410 		artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
2411 		spin_unlock_bh(&ar->htt.tx_lock);
2412 	}
2413 
2414 	rcu_read_unlock();
2415 
2416 	ath10k_mac_tx_push_pending(ar);
2417 }
2418 
2419 void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
2420 {
2421 	bool release;
2422 
2423 	release = ath10k_htt_t2h_msg_handler(ar, skb);
2424 
2425 	/* Free the indication buffer */
2426 	if (release)
2427 		dev_kfree_skb_any(skb);
2428 }
2429 
2430 static inline bool is_valid_legacy_rate(u8 rate)
2431 {
2432 	static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12,
2433 					  18, 24, 36, 48, 54};
2434 	int i;
2435 
2436 	for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) {
2437 		if (rate == legacy_rates[i])
2438 			return true;
2439 	}
2440 
2441 	return false;
2442 }
2443 
2444 static void
2445 ath10k_update_per_peer_tx_stats(struct ath10k *ar,
2446 				struct ieee80211_sta *sta,
2447 				struct ath10k_per_peer_tx_stats *peer_stats)
2448 {
2449 	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
2450 	u8 rate = 0, sgi;
2451 	struct rate_info txrate;
2452 
2453 	lockdep_assert_held(&ar->data_lock);
2454 
2455 	txrate.flags = ATH10K_HW_PREAMBLE(peer_stats->ratecode);
2456 	txrate.bw = ATH10K_HW_BW(peer_stats->flags);
2457 	txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode);
2458 	txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode);
2459 	sgi = ATH10K_HW_GI(peer_stats->flags);
2460 
2461 	if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) {
2462 		ath10k_warn(ar, "Invalid VHT mcs %hhd peer stats",  txrate.mcs);
2463 		return;
2464 	}
2465 
2466 	if (txrate.flags == WMI_RATE_PREAMBLE_HT &&
2467 	    (txrate.mcs > 7 || txrate.nss < 1)) {
2468 		ath10k_warn(ar, "Invalid HT mcs %hhd nss %hhd peer stats",
2469 			    txrate.mcs, txrate.nss);
2470 		return;
2471 	}
2472 
2473 	memset(&arsta->txrate, 0, sizeof(arsta->txrate));
2474 
2475 	if (txrate.flags == WMI_RATE_PREAMBLE_CCK ||
2476 	    txrate.flags == WMI_RATE_PREAMBLE_OFDM) {
2477 		rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode);
2478 
2479 		if (!is_valid_legacy_rate(rate)) {
2480 			ath10k_warn(ar, "Invalid legacy rate %hhd peer stats",
2481 				    rate);
2482 			return;
2483 		}
2484 
2485 		/* This is hacky, FW sends CCK rate 5.5Mbps as 6 */
2486 		rate *= 10;
2487 		if (rate == 60 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
2488 			rate = rate - 5;
2489 		arsta->txrate.legacy = rate;
2490 	} else if (txrate.flags == WMI_RATE_PREAMBLE_HT) {
2491 		arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
2492 		arsta->txrate.mcs = txrate.mcs + 8 * (txrate.nss - 1);
2493 	} else {
2494 		arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
2495 		arsta->txrate.mcs = txrate.mcs;
2496 	}
2497 
2498 	if (sgi)
2499 		arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
2500 
2501 	arsta->txrate.nss = txrate.nss;
2502 	arsta->txrate.bw = txrate.bw + RATE_INFO_BW_20;
2503 }
2504 
2505 static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
2506 					struct sk_buff *skb)
2507 {
2508 	struct htt_resp *resp = (struct htt_resp *)skb->data;
2509 	struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
2510 	struct htt_per_peer_tx_stats_ind *tx_stats;
2511 	struct ieee80211_sta *sta;
2512 	struct ath10k_peer *peer;
2513 	int peer_id, i;
2514 	u8 ppdu_len, num_ppdu;
2515 
2516 	num_ppdu = resp->peer_tx_stats.num_ppdu;
2517 	ppdu_len = resp->peer_tx_stats.ppdu_len * sizeof(__le32);
2518 
2519 	if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) {
2520 		ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len);
2521 		return;
2522 	}
2523 
2524 	tx_stats = (struct htt_per_peer_tx_stats_ind *)
2525 			(resp->peer_tx_stats.payload);
2526 	peer_id = __le16_to_cpu(tx_stats->peer_id);
2527 
2528 	rcu_read_lock();
2529 	spin_lock_bh(&ar->data_lock);
2530 	peer = ath10k_peer_find_by_id(ar, peer_id);
2531 	if (!peer) {
2532 		ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n",
2533 			    peer_id);
2534 		goto out;
2535 	}
2536 
2537 	sta = peer->sta;
2538 	for (i = 0; i < num_ppdu; i++) {
2539 		tx_stats = (struct htt_per_peer_tx_stats_ind *)
2540 			   (resp->peer_tx_stats.payload + i * ppdu_len);
2541 
2542 		p_tx_stats->succ_bytes = __le32_to_cpu(tx_stats->succ_bytes);
2543 		p_tx_stats->retry_bytes = __le32_to_cpu(tx_stats->retry_bytes);
2544 		p_tx_stats->failed_bytes =
2545 				__le32_to_cpu(tx_stats->failed_bytes);
2546 		p_tx_stats->ratecode = tx_stats->ratecode;
2547 		p_tx_stats->flags = tx_stats->flags;
2548 		p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts);
2549 		p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts);
2550 		p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts);
2551 
2552 		ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
2553 	}
2554 
2555 out:
2556 	spin_unlock_bh(&ar->data_lock);
2557 	rcu_read_unlock();
2558 }
2559 
2560 static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data)
2561 {
2562 	struct ath10k_pktlog_hdr *hdr = (struct ath10k_pktlog_hdr *)data;
2563 	struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
2564 	struct ath10k_10_2_peer_tx_stats *tx_stats;
2565 	struct ieee80211_sta *sta;
2566 	struct ath10k_peer *peer;
2567 	u16 log_type = __le16_to_cpu(hdr->log_type);
2568 	u32 peer_id = 0, i;
2569 
2570 	if (log_type != ATH_PKTLOG_TYPE_TX_STAT)
2571 		return;
2572 
2573 	tx_stats = (struct ath10k_10_2_peer_tx_stats *)((hdr->payload) +
2574 		    ATH10K_10_2_TX_STATS_OFFSET);
2575 
2576 	if (!tx_stats->tx_ppdu_cnt)
2577 		return;
2578 
2579 	peer_id = tx_stats->peer_id;
2580 
2581 	rcu_read_lock();
2582 	spin_lock_bh(&ar->data_lock);
2583 	peer = ath10k_peer_find_by_id(ar, peer_id);
2584 	if (!peer) {
2585 		ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n",
2586 			    peer_id);
2587 		goto out;
2588 	}
2589 
2590 	sta = peer->sta;
2591 	for (i = 0; i < tx_stats->tx_ppdu_cnt; i++) {
2592 		p_tx_stats->succ_bytes =
2593 			__le16_to_cpu(tx_stats->success_bytes[i]);
2594 		p_tx_stats->retry_bytes =
2595 			__le16_to_cpu(tx_stats->retry_bytes[i]);
2596 		p_tx_stats->failed_bytes =
2597 			__le16_to_cpu(tx_stats->failed_bytes[i]);
2598 		p_tx_stats->ratecode = tx_stats->ratecode[i];
2599 		p_tx_stats->flags = tx_stats->flags[i];
2600 		p_tx_stats->succ_pkts = tx_stats->success_pkts[i];
2601 		p_tx_stats->retry_pkts = tx_stats->retry_pkts[i];
2602 		p_tx_stats->failed_pkts = tx_stats->failed_pkts[i];
2603 
2604 		ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
2605 	}
2606 	spin_unlock_bh(&ar->data_lock);
2607 	rcu_read_unlock();
2608 
2609 	return;
2610 
2611 out:
2612 	spin_unlock_bh(&ar->data_lock);
2613 	rcu_read_unlock();
2614 }
2615 
2616 bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
2617 {
2618 	struct ath10k_htt *htt = &ar->htt;
2619 	struct htt_resp *resp = (struct htt_resp *)skb->data;
2620 	enum htt_t2h_msg_type type;
2621 
2622 	/* confirm alignment */
2623 	if (!IS_ALIGNED((unsigned long)skb->data, 4))
2624 		ath10k_warn(ar, "unaligned htt message, expect trouble\n");
2625 
2626 	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
2627 		   resp->hdr.msg_type);
2628 
2629 	if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
2630 		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
2631 			   resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
2632 		return true;
2633 	}
2634 	type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
2635 
2636 	switch (type) {
2637 	case HTT_T2H_MSG_TYPE_VERSION_CONF: {
2638 		htt->target_version_major = resp->ver_resp.major;
2639 		htt->target_version_minor = resp->ver_resp.minor;
2640 		complete(&htt->target_version_received);
2641 		break;
2642 	}
2643 	case HTT_T2H_MSG_TYPE_RX_IND:
2644 		ath10k_htt_rx_proc_rx_ind(htt, &resp->rx_ind);
2645 		break;
2646 	case HTT_T2H_MSG_TYPE_PEER_MAP: {
2647 		struct htt_peer_map_event ev = {
2648 			.vdev_id = resp->peer_map.vdev_id,
2649 			.peer_id = __le16_to_cpu(resp->peer_map.peer_id),
2650 		};
2651 		memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
2652 		ath10k_peer_map_event(htt, &ev);
2653 		break;
2654 	}
2655 	case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
2656 		struct htt_peer_unmap_event ev = {
2657 			.peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
2658 		};
2659 		ath10k_peer_unmap_event(htt, &ev);
2660 		break;
2661 	}
2662 	case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
2663 		struct htt_tx_done tx_done = {};
2664 		int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
2665 
2666 		tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
2667 
2668 		switch (status) {
2669 		case HTT_MGMT_TX_STATUS_OK:
2670 			tx_done.status = HTT_TX_COMPL_STATE_ACK;
2671 			break;
2672 		case HTT_MGMT_TX_STATUS_RETRY:
2673 			tx_done.status = HTT_TX_COMPL_STATE_NOACK;
2674 			break;
2675 		case HTT_MGMT_TX_STATUS_DROP:
2676 			tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
2677 			break;
2678 		}
2679 
2680 		status = ath10k_txrx_tx_unref(htt, &tx_done);
2681 		if (!status) {
2682 			spin_lock_bh(&htt->tx_lock);
2683 			ath10k_htt_tx_mgmt_dec_pending(htt);
2684 			spin_unlock_bh(&htt->tx_lock);
2685 		}
2686 		break;
2687 	}
2688 	case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
2689 		ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
2690 		break;
2691 	case HTT_T2H_MSG_TYPE_SEC_IND: {
2692 		struct ath10k *ar = htt->ar;
2693 		struct htt_security_indication *ev = &resp->security_indication;
2694 
2695 		ath10k_dbg(ar, ATH10K_DBG_HTT,
2696 			   "sec ind peer_id %d unicast %d type %d\n",
2697 			  __le16_to_cpu(ev->peer_id),
2698 			  !!(ev->flags & HTT_SECURITY_IS_UNICAST),
2699 			  MS(ev->flags, HTT_SECURITY_TYPE));
2700 		complete(&ar->install_key_done);
2701 		break;
2702 	}
2703 	case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
2704 		ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
2705 				skb->data, skb->len);
2706 		atomic_inc(&htt->num_mpdus_ready);
2707 		break;
2708 	}
2709 	case HTT_T2H_MSG_TYPE_TEST:
2710 		break;
2711 	case HTT_T2H_MSG_TYPE_STATS_CONF:
2712 		trace_ath10k_htt_stats(ar, skb->data, skb->len);
2713 		break;
2714 	case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
2715 		/* Firmware can return tx frames if it's unable to fully
2716 		 * process them and suspects host may be able to fix it. ath10k
2717 		 * sends all tx frames as already inspected so this shouldn't
2718 		 * happen unless fw has a bug.
2719 		 */
2720 		ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
2721 		break;
2722 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
2723 		ath10k_htt_rx_addba(ar, resp);
2724 		break;
2725 	case HTT_T2H_MSG_TYPE_RX_DELBA:
2726 		ath10k_htt_rx_delba(ar, resp);
2727 		break;
2728 	case HTT_T2H_MSG_TYPE_PKTLOG: {
2729 		trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
2730 					skb->len -
2731 					offsetof(struct htt_resp,
2732 						 pktlog_msg.payload));
2733 
2734 		if (ath10k_peer_stats_enabled(ar))
2735 			ath10k_fetch_10_2_tx_stats(ar,
2736 						   resp->pktlog_msg.payload);
2737 		break;
2738 	}
2739 	case HTT_T2H_MSG_TYPE_RX_FLUSH: {
2740 		/* Ignore this event because mac80211 takes care of Rx
2741 		 * aggregation reordering.
2742 		 */
2743 		break;
2744 	}
2745 	case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
2746 		__skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
2747 		return false;
2748 	}
2749 	case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
2750 		break;
2751 	case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
2752 		u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
2753 		u32 freq = __le32_to_cpu(resp->chan_change.freq);
2754 
2755 		ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq);
2756 		ath10k_dbg(ar, ATH10K_DBG_HTT,
2757 			   "htt chan change freq %u phymode %s\n",
2758 			   freq, ath10k_wmi_phymode_str(phymode));
2759 		break;
2760 	}
2761 	case HTT_T2H_MSG_TYPE_AGGR_CONF:
2762 		break;
2763 	case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
2764 		struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
2765 
2766 		if (!tx_fetch_ind) {
2767 			ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
2768 			break;
2769 		}
2770 		skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
2771 		break;
2772 	}
2773 	case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
2774 		ath10k_htt_rx_tx_fetch_confirm(ar, skb);
2775 		break;
2776 	case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
2777 		ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
2778 		break;
2779 	case HTT_T2H_MSG_TYPE_PEER_STATS:
2780 		ath10k_htt_fetch_peer_stats(ar, skb);
2781 		break;
2782 	case HTT_T2H_MSG_TYPE_EN_STATS:
2783 	default:
2784 		ath10k_warn(ar, "htt event (%d) not handled\n",
2785 			    resp->hdr.msg_type);
2786 		ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
2787 				skb->data, skb->len);
2788 		break;
2789 	}
2790 	return true;
2791 }
2792 EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
2793 
2794 void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
2795 					     struct sk_buff *skb)
2796 {
2797 	trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
2798 	dev_kfree_skb_any(skb);
2799 }
2800 EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
2801 
2802 static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
2803 {
2804 	struct sk_buff *skb;
2805 
2806 	while (quota < budget) {
2807 		if (skb_queue_empty(&ar->htt.rx_msdus_q))
2808 			break;
2809 
2810 		skb = __skb_dequeue(&ar->htt.rx_msdus_q);
2811 		if (!skb)
2812 			break;
2813 		ath10k_process_rx(ar, skb);
2814 		quota++;
2815 	}
2816 
2817 	return quota;
2818 }
2819 
2820 int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
2821 {
2822 	struct ath10k_htt *htt = &ar->htt;
2823 	struct htt_tx_done tx_done = {};
2824 	struct sk_buff_head tx_ind_q;
2825 	struct sk_buff *skb;
2826 	unsigned long flags;
2827 	int quota = 0, done, ret;
2828 	bool resched_napi = false;
2829 
2830 	__skb_queue_head_init(&tx_ind_q);
2831 
2832 	/* Process pending frames before dequeuing more data
2833 	 * from hardware.
2834 	 */
2835 	quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
2836 	if (quota == budget) {
2837 		resched_napi = true;
2838 		goto exit;
2839 	}
2840 
2841 	while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) {
2842 		spin_lock_bh(&htt->rx_ring.lock);
2843 		ret = ath10k_htt_rx_in_ord_ind(ar, skb);
2844 		spin_unlock_bh(&htt->rx_ring.lock);
2845 
2846 		dev_kfree_skb_any(skb);
2847 		if (ret == -EIO) {
2848 			resched_napi = true;
2849 			goto exit;
2850 		}
2851 	}
2852 
2853 	while (atomic_read(&htt->num_mpdus_ready)) {
2854 		ret = ath10k_htt_rx_handle_amsdu(htt);
2855 		if (ret == -EIO) {
2856 			resched_napi = true;
2857 			goto exit;
2858 		}
2859 		atomic_dec(&htt->num_mpdus_ready);
2860 	}
2861 
2862 	/* Deliver received data after processing data from hardware */
2863 	quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
2864 
2865 	/* From NAPI documentation:
2866 	 *  The napi poll() function may also process TX completions, in which
2867 	 *  case if it processes the entire TX ring then it should count that
2868 	 *  work as the rest of the budget.
2869 	 */
2870 	if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo))
2871 		quota = budget;
2872 
2873 	/* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
2874 	 * From kfifo_get() documentation:
2875 	 *  Note that with only one concurrent reader and one concurrent writer,
2876 	 *  you don't need extra locking to use these macro.
2877 	 */
2878 	while (kfifo_get(&htt->txdone_fifo, &tx_done))
2879 		ath10k_txrx_tx_unref(htt, &tx_done);
2880 
2881 	ath10k_mac_tx_push_pending(ar);
2882 
2883 	spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
2884 	skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
2885 	spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
2886 
2887 	while ((skb = __skb_dequeue(&tx_ind_q))) {
2888 		ath10k_htt_rx_tx_fetch_ind(ar, skb);
2889 		dev_kfree_skb_any(skb);
2890 	}
2891 
2892 exit:
2893 	ath10k_htt_rx_msdu_buff_replenish(htt);
2894 	/* In case of rx failure or more data to read, report budget
2895 	 * to reschedule NAPI poll
2896 	 */
2897 	done = resched_napi ? budget : quota;
2898 
2899 	return done;
2900 }
2901 EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);
2902 
2903 static const struct ath10k_htt_rx_ops htt_rx_ops_32 = {
2904 	.htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32,
2905 	.htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32,
2906 	.htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32,
2907 	.htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32,
2908 	.htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32,
2909 };
2910 
2911 static const struct ath10k_htt_rx_ops htt_rx_ops_64 = {
2912 	.htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64,
2913 	.htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64,
2914 	.htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64,
2915 	.htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64,
2916 	.htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64,
2917 };
2918 
2919 void ath10k_htt_set_rx_ops(struct ath10k_htt *htt)
2920 {
2921 	struct ath10k *ar = htt->ar;
2922 
2923 	if (ar->hw_params.target_64bit)
2924 		htt->rx_ops = &htt_rx_ops_64;
2925 	else
2926 		htt->rx_ops = &htt_rx_ops_32;
2927 }
2928