1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (c) 2005-2011 Atheros Communications Inc.
4  * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5  * Copyright (c) 2018, The Linux Foundation. All rights reserved.
6  */
7 
8 #include "core.h"
9 #include "htc.h"
10 #include "htt.h"
11 #include "txrx.h"
12 #include "debug.h"
13 #include "trace.h"
14 #include "mac.h"
15 
16 #include <linux/log2.h>
17 #include <linux/bitfield.h>
18 
19 /* when under memory pressure rx ring refill may fail and needs a retry */
20 #define HTT_RX_RING_REFILL_RETRY_MS 50
21 
22 #define HTT_RX_RING_REFILL_RESCHED_MS 5
23 
24 /* shortcut to interpret a raw memory buffer as a rx descriptor */
25 #define HTT_RX_BUF_TO_RX_DESC(hw, buf) ath10k_htt_rx_desc_from_raw_buffer(hw, buf)
26 
27 static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params *hw, struct sk_buff *skb);
28 
29 static struct sk_buff *
ath10k_htt_rx_find_skb_paddr(struct ath10k * ar,u64 paddr)30 ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr)
31 {
32 	struct ath10k_skb_rxcb *rxcb;
33 
34 	hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
35 		if (rxcb->paddr == paddr)
36 			return ATH10K_RXCB_SKB(rxcb);
37 
38 	WARN_ON_ONCE(1);
39 	return NULL;
40 }
41 
ath10k_htt_rx_ring_free(struct ath10k_htt * htt)42 static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
43 {
44 	struct sk_buff *skb;
45 	struct ath10k_skb_rxcb *rxcb;
46 	struct hlist_node *n;
47 	int i;
48 
49 	if (htt->rx_ring.in_ord_rx) {
50 		hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
51 			skb = ATH10K_RXCB_SKB(rxcb);
52 			dma_unmap_single(htt->ar->dev, rxcb->paddr,
53 					 skb->len + skb_tailroom(skb),
54 					 DMA_FROM_DEVICE);
55 			hash_del(&rxcb->hlist);
56 			dev_kfree_skb_any(skb);
57 		}
58 	} else {
59 		for (i = 0; i < htt->rx_ring.size; i++) {
60 			skb = htt->rx_ring.netbufs_ring[i];
61 			if (!skb)
62 				continue;
63 
64 			rxcb = ATH10K_SKB_RXCB(skb);
65 			dma_unmap_single(htt->ar->dev, rxcb->paddr,
66 					 skb->len + skb_tailroom(skb),
67 					 DMA_FROM_DEVICE);
68 			dev_kfree_skb_any(skb);
69 		}
70 	}
71 
72 	htt->rx_ring.fill_cnt = 0;
73 	hash_init(htt->rx_ring.skb_table);
74 	memset(htt->rx_ring.netbufs_ring, 0,
75 	       htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
76 }
77 
ath10k_htt_get_rx_ring_size_32(struct ath10k_htt * htt)78 static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt)
79 {
80 	return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32);
81 }
82 
ath10k_htt_get_rx_ring_size_64(struct ath10k_htt * htt)83 static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt)
84 {
85 	return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64);
86 }
87 
ath10k_htt_config_paddrs_ring_32(struct ath10k_htt * htt,void * vaddr)88 static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt,
89 					     void *vaddr)
90 {
91 	htt->rx_ring.paddrs_ring_32 = vaddr;
92 }
93 
ath10k_htt_config_paddrs_ring_64(struct ath10k_htt * htt,void * vaddr)94 static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt,
95 					     void *vaddr)
96 {
97 	htt->rx_ring.paddrs_ring_64 = vaddr;
98 }
99 
ath10k_htt_set_paddrs_ring_32(struct ath10k_htt * htt,dma_addr_t paddr,int idx)100 static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt,
101 					  dma_addr_t paddr, int idx)
102 {
103 	htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr);
104 }
105 
ath10k_htt_set_paddrs_ring_64(struct ath10k_htt * htt,dma_addr_t paddr,int idx)106 static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt,
107 					  dma_addr_t paddr, int idx)
108 {
109 	htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr);
110 }
111 
ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt * htt,int idx)112 static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx)
113 {
114 	htt->rx_ring.paddrs_ring_32[idx] = 0;
115 }
116 
ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt * htt,int idx)117 static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx)
118 {
119 	htt->rx_ring.paddrs_ring_64[idx] = 0;
120 }
121 
ath10k_htt_get_vaddr_ring_32(struct ath10k_htt * htt)122 static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt)
123 {
124 	return (void *)htt->rx_ring.paddrs_ring_32;
125 }
126 
ath10k_htt_get_vaddr_ring_64(struct ath10k_htt * htt)127 static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt)
128 {
129 	return (void *)htt->rx_ring.paddrs_ring_64;
130 }
131 
__ath10k_htt_rx_ring_fill_n(struct ath10k_htt * htt,int num)132 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
133 {
134 	struct ath10k_hw_params *hw = &htt->ar->hw_params;
135 	struct htt_rx_desc *rx_desc;
136 	struct ath10k_skb_rxcb *rxcb;
137 	struct sk_buff *skb;
138 	dma_addr_t paddr;
139 	int ret = 0, idx;
140 
141 	/* The Full Rx Reorder firmware has no way of telling the host
142 	 * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
143 	 * To keep things simple make sure ring is always half empty. This
144 	 * guarantees there'll be no replenishment overruns possible.
145 	 */
146 	BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
147 
148 	idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
149 
150 	if (idx < 0 || idx >= htt->rx_ring.size) {
151 		ath10k_err(htt->ar, "rx ring index is not valid, firmware malfunctioning?\n");
152 		idx &= htt->rx_ring.size_mask;
153 		ret = -ENOMEM;
154 		goto fail;
155 	}
156 
157 	while (num > 0) {
158 		skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
159 		if (!skb) {
160 			ret = -ENOMEM;
161 			goto fail;
162 		}
163 
164 		if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
165 			skb_pull(skb,
166 				 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
167 				 skb->data);
168 
169 		/* Clear rx_desc attention word before posting to Rx ring */
170 		rx_desc = HTT_RX_BUF_TO_RX_DESC(hw, skb->data);
171 		ath10k_htt_rx_desc_get_attention(hw, rx_desc)->flags = __cpu_to_le32(0);
172 
173 		paddr = dma_map_single(htt->ar->dev, skb->data,
174 				       skb->len + skb_tailroom(skb),
175 				       DMA_FROM_DEVICE);
176 
177 		if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
178 			dev_kfree_skb_any(skb);
179 			ret = -ENOMEM;
180 			goto fail;
181 		}
182 
183 		rxcb = ATH10K_SKB_RXCB(skb);
184 		rxcb->paddr = paddr;
185 		htt->rx_ring.netbufs_ring[idx] = skb;
186 		ath10k_htt_set_paddrs_ring(htt, paddr, idx);
187 		htt->rx_ring.fill_cnt++;
188 
189 		if (htt->rx_ring.in_ord_rx) {
190 			hash_add(htt->rx_ring.skb_table,
191 				 &ATH10K_SKB_RXCB(skb)->hlist,
192 				 paddr);
193 		}
194 
195 		num--;
196 		idx++;
197 		idx &= htt->rx_ring.size_mask;
198 	}
199 
200 fail:
201 	/*
202 	 * Make sure the rx buffer is updated before available buffer
203 	 * index to avoid any potential rx ring corruption.
204 	 */
205 	mb();
206 	*htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
207 	return ret;
208 }
209 
ath10k_htt_rx_ring_fill_n(struct ath10k_htt * htt,int num)210 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
211 {
212 	lockdep_assert_held(&htt->rx_ring.lock);
213 	return __ath10k_htt_rx_ring_fill_n(htt, num);
214 }
215 
ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt * htt)216 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
217 {
218 	int ret, num_deficit, num_to_fill;
219 
220 	/* Refilling the whole RX ring buffer proves to be a bad idea. The
221 	 * reason is RX may take up significant amount of CPU cycles and starve
222 	 * other tasks, e.g. TX on an ethernet device while acting as a bridge
223 	 * with ath10k wlan interface. This ended up with very poor performance
224 	 * once CPU the host system was overwhelmed with RX on ath10k.
225 	 *
226 	 * By limiting the number of refills the replenishing occurs
227 	 * progressively. This in turns makes use of the fact tasklets are
228 	 * processed in FIFO order. This means actual RX processing can starve
229 	 * out refilling. If there's not enough buffers on RX ring FW will not
230 	 * report RX until it is refilled with enough buffers. This
231 	 * automatically balances load wrt to CPU power.
232 	 *
233 	 * This probably comes at a cost of lower maximum throughput but
234 	 * improves the average and stability.
235 	 */
236 	spin_lock_bh(&htt->rx_ring.lock);
237 	num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
238 	num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
239 	num_deficit -= num_to_fill;
240 	ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
241 	if (ret == -ENOMEM) {
242 		/*
243 		 * Failed to fill it to the desired level -
244 		 * we'll start a timer and try again next time.
245 		 * As long as enough buffers are left in the ring for
246 		 * another A-MPDU rx, no special recovery is needed.
247 		 */
248 		mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
249 			  msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
250 	} else if (num_deficit > 0) {
251 		mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
252 			  msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
253 	}
254 	spin_unlock_bh(&htt->rx_ring.lock);
255 }
256 
ath10k_htt_rx_ring_refill_retry(struct timer_list * t)257 static void ath10k_htt_rx_ring_refill_retry(struct timer_list *t)
258 {
259 	struct ath10k_htt *htt = from_timer(htt, t, rx_ring.refill_retry_timer);
260 
261 	ath10k_htt_rx_msdu_buff_replenish(htt);
262 }
263 
ath10k_htt_rx_ring_refill(struct ath10k * ar)264 int ath10k_htt_rx_ring_refill(struct ath10k *ar)
265 {
266 	struct ath10k_htt *htt = &ar->htt;
267 	int ret;
268 
269 	if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
270 		return 0;
271 
272 	spin_lock_bh(&htt->rx_ring.lock);
273 	ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
274 					      htt->rx_ring.fill_cnt));
275 
276 	if (ret)
277 		ath10k_htt_rx_ring_free(htt);
278 
279 	spin_unlock_bh(&htt->rx_ring.lock);
280 
281 	return ret;
282 }
283 
ath10k_htt_rx_free(struct ath10k_htt * htt)284 void ath10k_htt_rx_free(struct ath10k_htt *htt)
285 {
286 	if (htt->ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
287 		return;
288 
289 	del_timer_sync(&htt->rx_ring.refill_retry_timer);
290 
291 	skb_queue_purge(&htt->rx_msdus_q);
292 	skb_queue_purge(&htt->rx_in_ord_compl_q);
293 	skb_queue_purge(&htt->tx_fetch_ind_q);
294 
295 	spin_lock_bh(&htt->rx_ring.lock);
296 	ath10k_htt_rx_ring_free(htt);
297 	spin_unlock_bh(&htt->rx_ring.lock);
298 
299 	dma_free_coherent(htt->ar->dev,
300 			  ath10k_htt_get_rx_ring_size(htt),
301 			  ath10k_htt_get_vaddr_ring(htt),
302 			  htt->rx_ring.base_paddr);
303 
304 	ath10k_htt_config_paddrs_ring(htt, NULL);
305 
306 	dma_free_coherent(htt->ar->dev,
307 			  sizeof(*htt->rx_ring.alloc_idx.vaddr),
308 			  htt->rx_ring.alloc_idx.vaddr,
309 			  htt->rx_ring.alloc_idx.paddr);
310 	htt->rx_ring.alloc_idx.vaddr = NULL;
311 
312 	kfree(htt->rx_ring.netbufs_ring);
313 	htt->rx_ring.netbufs_ring = NULL;
314 }
315 
ath10k_htt_rx_netbuf_pop(struct ath10k_htt * htt)316 static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
317 {
318 	struct ath10k *ar = htt->ar;
319 	int idx;
320 	struct sk_buff *msdu;
321 
322 	lockdep_assert_held(&htt->rx_ring.lock);
323 
324 	if (htt->rx_ring.fill_cnt == 0) {
325 		ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
326 		return NULL;
327 	}
328 
329 	idx = htt->rx_ring.sw_rd_idx.msdu_payld;
330 	msdu = htt->rx_ring.netbufs_ring[idx];
331 	htt->rx_ring.netbufs_ring[idx] = NULL;
332 	ath10k_htt_reset_paddrs_ring(htt, idx);
333 
334 	idx++;
335 	idx &= htt->rx_ring.size_mask;
336 	htt->rx_ring.sw_rd_idx.msdu_payld = idx;
337 	htt->rx_ring.fill_cnt--;
338 
339 	dma_unmap_single(htt->ar->dev,
340 			 ATH10K_SKB_RXCB(msdu)->paddr,
341 			 msdu->len + skb_tailroom(msdu),
342 			 DMA_FROM_DEVICE);
343 	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
344 			msdu->data, msdu->len + skb_tailroom(msdu));
345 
346 	return msdu;
347 }
348 
349 /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
ath10k_htt_rx_amsdu_pop(struct ath10k_htt * htt,struct sk_buff_head * amsdu)350 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
351 				   struct sk_buff_head *amsdu)
352 {
353 	struct ath10k *ar = htt->ar;
354 	struct ath10k_hw_params *hw = &ar->hw_params;
355 	int msdu_len, msdu_chaining = 0;
356 	struct sk_buff *msdu;
357 	struct htt_rx_desc *rx_desc;
358 	struct rx_attention *rx_desc_attention;
359 	struct rx_frag_info_common *rx_desc_frag_info_common;
360 	struct rx_msdu_start_common *rx_desc_msdu_start_common;
361 	struct rx_msdu_end_common *rx_desc_msdu_end_common;
362 
363 	lockdep_assert_held(&htt->rx_ring.lock);
364 
365 	for (;;) {
366 		int last_msdu, msdu_len_invalid, msdu_chained;
367 
368 		msdu = ath10k_htt_rx_netbuf_pop(htt);
369 		if (!msdu) {
370 			__skb_queue_purge(amsdu);
371 			return -ENOENT;
372 		}
373 
374 		__skb_queue_tail(amsdu, msdu);
375 
376 		rx_desc = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
377 		rx_desc_attention = ath10k_htt_rx_desc_get_attention(hw, rx_desc);
378 		rx_desc_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw,
379 									      rx_desc);
380 		rx_desc_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rx_desc);
381 		rx_desc_frag_info_common = ath10k_htt_rx_desc_get_frag_info(hw, rx_desc);
382 
383 		/* FIXME: we must report msdu payload since this is what caller
384 		 * expects now
385 		 */
386 		skb_put(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset);
387 		skb_pull(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset);
388 
389 		/*
390 		 * Sanity check - confirm the HW is finished filling in the
391 		 * rx data.
392 		 * If the HW and SW are working correctly, then it's guaranteed
393 		 * that the HW's MAC DMA is done before this point in the SW.
394 		 * To prevent the case that we handle a stale Rx descriptor,
395 		 * just assert for now until we have a way to recover.
396 		 */
397 		if (!(__le32_to_cpu(rx_desc_attention->flags)
398 				& RX_ATTENTION_FLAGS_MSDU_DONE)) {
399 			__skb_queue_purge(amsdu);
400 			return -EIO;
401 		}
402 
403 		msdu_len_invalid = !!(__le32_to_cpu(rx_desc_attention->flags)
404 					& (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
405 					   RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
406 		msdu_len = MS(__le32_to_cpu(rx_desc_msdu_start_common->info0),
407 			      RX_MSDU_START_INFO0_MSDU_LENGTH);
408 		msdu_chained = rx_desc_frag_info_common->ring2_more_count;
409 
410 		if (msdu_len_invalid)
411 			msdu_len = 0;
412 
413 		skb_trim(msdu, 0);
414 		skb_put(msdu, min(msdu_len, ath10k_htt_rx_msdu_size(hw)));
415 		msdu_len -= msdu->len;
416 
417 		/* Note: Chained buffers do not contain rx descriptor */
418 		while (msdu_chained--) {
419 			msdu = ath10k_htt_rx_netbuf_pop(htt);
420 			if (!msdu) {
421 				__skb_queue_purge(amsdu);
422 				return -ENOENT;
423 			}
424 
425 			__skb_queue_tail(amsdu, msdu);
426 			skb_trim(msdu, 0);
427 			skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
428 			msdu_len -= msdu->len;
429 			msdu_chaining = 1;
430 		}
431 
432 		last_msdu = __le32_to_cpu(rx_desc_msdu_end_common->info0) &
433 				RX_MSDU_END_INFO0_LAST_MSDU;
434 
435 		/* FIXME: why are we skipping the first part of the rx_desc? */
436 		trace_ath10k_htt_rx_desc(ar, (void *)rx_desc + sizeof(u32),
437 					 hw->rx_desc_ops->rx_desc_size - sizeof(u32));
438 
439 		if (last_msdu)
440 			break;
441 	}
442 
443 	if (skb_queue_empty(amsdu))
444 		msdu_chaining = -1;
445 
446 	/*
447 	 * Don't refill the ring yet.
448 	 *
449 	 * First, the elements popped here are still in use - it is not
450 	 * safe to overwrite them until the matching call to
451 	 * mpdu_desc_list_next. Second, for efficiency it is preferable to
452 	 * refill the rx ring with 1 PPDU's worth of rx buffers (something
453 	 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
454 	 * (something like 3 buffers). Consequently, we'll rely on the txrx
455 	 * SW to tell us when it is done pulling all the PPDU's rx buffers
456 	 * out of the rx ring, and then refill it just once.
457 	 */
458 
459 	return msdu_chaining;
460 }
461 
ath10k_htt_rx_pop_paddr(struct ath10k_htt * htt,u64 paddr)462 static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
463 					       u64 paddr)
464 {
465 	struct ath10k *ar = htt->ar;
466 	struct ath10k_skb_rxcb *rxcb;
467 	struct sk_buff *msdu;
468 
469 	lockdep_assert_held(&htt->rx_ring.lock);
470 
471 	msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
472 	if (!msdu)
473 		return NULL;
474 
475 	rxcb = ATH10K_SKB_RXCB(msdu);
476 	hash_del(&rxcb->hlist);
477 	htt->rx_ring.fill_cnt--;
478 
479 	dma_unmap_single(htt->ar->dev, rxcb->paddr,
480 			 msdu->len + skb_tailroom(msdu),
481 			 DMA_FROM_DEVICE);
482 	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
483 			msdu->data, msdu->len + skb_tailroom(msdu));
484 
485 	return msdu;
486 }
487 
ath10k_htt_append_frag_list(struct sk_buff * skb_head,struct sk_buff * frag_list,unsigned int frag_len)488 static inline void ath10k_htt_append_frag_list(struct sk_buff *skb_head,
489 					       struct sk_buff *frag_list,
490 					       unsigned int frag_len)
491 {
492 	skb_shinfo(skb_head)->frag_list = frag_list;
493 	skb_head->data_len = frag_len;
494 	skb_head->len += skb_head->data_len;
495 }
496 
ath10k_htt_rx_handle_amsdu_mon_32(struct ath10k_htt * htt,struct sk_buff * msdu,struct htt_rx_in_ord_msdu_desc ** msdu_desc)497 static int ath10k_htt_rx_handle_amsdu_mon_32(struct ath10k_htt *htt,
498 					     struct sk_buff *msdu,
499 					     struct htt_rx_in_ord_msdu_desc **msdu_desc)
500 {
501 	struct ath10k *ar = htt->ar;
502 	struct ath10k_hw_params *hw = &ar->hw_params;
503 	u32 paddr;
504 	struct sk_buff *frag_buf;
505 	struct sk_buff *prev_frag_buf;
506 	u8 last_frag;
507 	struct htt_rx_in_ord_msdu_desc *ind_desc = *msdu_desc;
508 	struct htt_rx_desc *rxd;
509 	int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);
510 
511 	rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
512 	trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
513 
514 	skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
515 	skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
516 	skb_put(msdu, min(amsdu_len, ath10k_htt_rx_msdu_size(hw)));
517 	amsdu_len -= msdu->len;
518 
519 	last_frag = ind_desc->reserved;
520 	if (last_frag) {
521 		if (amsdu_len) {
522 			ath10k_warn(ar, "invalid amsdu len %u, left %d",
523 				    __le16_to_cpu(ind_desc->msdu_len),
524 				    amsdu_len);
525 		}
526 		return 0;
527 	}
528 
529 	ind_desc++;
530 	paddr = __le32_to_cpu(ind_desc->msdu_paddr);
531 	frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
532 	if (!frag_buf) {
533 		ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%x", paddr);
534 		return -ENOENT;
535 	}
536 
537 	skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
538 	ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len);
539 
540 	amsdu_len -= frag_buf->len;
541 	prev_frag_buf = frag_buf;
542 	last_frag = ind_desc->reserved;
543 	while (!last_frag) {
544 		ind_desc++;
545 		paddr = __le32_to_cpu(ind_desc->msdu_paddr);
546 		frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
547 		if (!frag_buf) {
548 			ath10k_warn(ar, "failed to pop frag-n paddr: 0x%x",
549 				    paddr);
550 			prev_frag_buf->next = NULL;
551 			return -ENOENT;
552 		}
553 
554 		skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
555 		last_frag = ind_desc->reserved;
556 		amsdu_len -= frag_buf->len;
557 
558 		prev_frag_buf->next = frag_buf;
559 		prev_frag_buf = frag_buf;
560 	}
561 
562 	if (amsdu_len) {
563 		ath10k_warn(ar, "invalid amsdu len %u, left %d",
564 			    __le16_to_cpu(ind_desc->msdu_len), amsdu_len);
565 	}
566 
567 	*msdu_desc = ind_desc;
568 
569 	prev_frag_buf->next = NULL;
570 	return 0;
571 }
572 
573 static int
ath10k_htt_rx_handle_amsdu_mon_64(struct ath10k_htt * htt,struct sk_buff * msdu,struct htt_rx_in_ord_msdu_desc_ext ** msdu_desc)574 ath10k_htt_rx_handle_amsdu_mon_64(struct ath10k_htt *htt,
575 				  struct sk_buff *msdu,
576 				  struct htt_rx_in_ord_msdu_desc_ext **msdu_desc)
577 {
578 	struct ath10k *ar = htt->ar;
579 	struct ath10k_hw_params *hw = &ar->hw_params;
580 	u64 paddr;
581 	struct sk_buff *frag_buf;
582 	struct sk_buff *prev_frag_buf;
583 	u8 last_frag;
584 	struct htt_rx_in_ord_msdu_desc_ext *ind_desc = *msdu_desc;
585 	struct htt_rx_desc *rxd;
586 	int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);
587 
588 	rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
589 	trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
590 
591 	skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
592 	skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
593 	skb_put(msdu, min(amsdu_len, ath10k_htt_rx_msdu_size(hw)));
594 	amsdu_len -= msdu->len;
595 
596 	last_frag = ind_desc->reserved;
597 	if (last_frag) {
598 		if (amsdu_len) {
599 			ath10k_warn(ar, "invalid amsdu len %u, left %d",
600 				    __le16_to_cpu(ind_desc->msdu_len),
601 				    amsdu_len);
602 		}
603 		return 0;
604 	}
605 
606 	ind_desc++;
607 	paddr = __le64_to_cpu(ind_desc->msdu_paddr);
608 	frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
609 	if (!frag_buf) {
610 		ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%llx", paddr);
611 		return -ENOENT;
612 	}
613 
614 	skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
615 	ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len);
616 
617 	amsdu_len -= frag_buf->len;
618 	prev_frag_buf = frag_buf;
619 	last_frag = ind_desc->reserved;
620 	while (!last_frag) {
621 		ind_desc++;
622 		paddr = __le64_to_cpu(ind_desc->msdu_paddr);
623 		frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
624 		if (!frag_buf) {
625 			ath10k_warn(ar, "failed to pop frag-n paddr: 0x%llx",
626 				    paddr);
627 			prev_frag_buf->next = NULL;
628 			return -ENOENT;
629 		}
630 
631 		skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
632 		last_frag = ind_desc->reserved;
633 		amsdu_len -= frag_buf->len;
634 
635 		prev_frag_buf->next = frag_buf;
636 		prev_frag_buf = frag_buf;
637 	}
638 
639 	if (amsdu_len) {
640 		ath10k_warn(ar, "invalid amsdu len %u, left %d",
641 			    __le16_to_cpu(ind_desc->msdu_len), amsdu_len);
642 	}
643 
644 	*msdu_desc = ind_desc;
645 
646 	prev_frag_buf->next = NULL;
647 	return 0;
648 }
649 
ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt * htt,struct htt_rx_in_ord_ind * ev,struct sk_buff_head * list)650 static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,
651 					  struct htt_rx_in_ord_ind *ev,
652 					  struct sk_buff_head *list)
653 {
654 	struct ath10k *ar = htt->ar;
655 	struct ath10k_hw_params *hw = &ar->hw_params;
656 	struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32;
657 	struct htt_rx_desc *rxd;
658 	struct rx_attention *rxd_attention;
659 	struct sk_buff *msdu;
660 	int msdu_count, ret;
661 	bool is_offload;
662 	u32 paddr;
663 
664 	lockdep_assert_held(&htt->rx_ring.lock);
665 
666 	msdu_count = __le16_to_cpu(ev->msdu_count);
667 	is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
668 
669 	while (msdu_count--) {
670 		paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
671 
672 		msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
673 		if (!msdu) {
674 			__skb_queue_purge(list);
675 			return -ENOENT;
676 		}
677 
678 		if (!is_offload && ar->monitor_arvif) {
679 			ret = ath10k_htt_rx_handle_amsdu_mon_32(htt, msdu,
680 								&msdu_desc);
681 			if (ret) {
682 				__skb_queue_purge(list);
683 				return ret;
684 			}
685 			__skb_queue_tail(list, msdu);
686 			msdu_desc++;
687 			continue;
688 		}
689 
690 		__skb_queue_tail(list, msdu);
691 
692 		if (!is_offload) {
693 			rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
694 			rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
695 
696 			trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
697 
698 			skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
699 			skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
700 			skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
701 
702 			if (!(__le32_to_cpu(rxd_attention->flags) &
703 			      RX_ATTENTION_FLAGS_MSDU_DONE)) {
704 				ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
705 				return -EIO;
706 			}
707 		}
708 
709 		msdu_desc++;
710 	}
711 
712 	return 0;
713 }
714 
ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt * htt,struct htt_rx_in_ord_ind * ev,struct sk_buff_head * list)715 static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt,
716 					  struct htt_rx_in_ord_ind *ev,
717 					  struct sk_buff_head *list)
718 {
719 	struct ath10k *ar = htt->ar;
720 	struct ath10k_hw_params *hw = &ar->hw_params;
721 	struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64;
722 	struct htt_rx_desc *rxd;
723 	struct rx_attention *rxd_attention;
724 	struct sk_buff *msdu;
725 	int msdu_count, ret;
726 	bool is_offload;
727 	u64 paddr;
728 
729 	lockdep_assert_held(&htt->rx_ring.lock);
730 
731 	msdu_count = __le16_to_cpu(ev->msdu_count);
732 	is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
733 
734 	while (msdu_count--) {
735 		paddr = __le64_to_cpu(msdu_desc->msdu_paddr);
736 		msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
737 		if (!msdu) {
738 			__skb_queue_purge(list);
739 			return -ENOENT;
740 		}
741 
742 		if (!is_offload && ar->monitor_arvif) {
743 			ret = ath10k_htt_rx_handle_amsdu_mon_64(htt, msdu,
744 								&msdu_desc);
745 			if (ret) {
746 				__skb_queue_purge(list);
747 				return ret;
748 			}
749 			__skb_queue_tail(list, msdu);
750 			msdu_desc++;
751 			continue;
752 		}
753 
754 		__skb_queue_tail(list, msdu);
755 
756 		if (!is_offload) {
757 			rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
758 			rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
759 
760 			trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
761 
762 			skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
763 			skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
764 			skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
765 
766 			if (!(__le32_to_cpu(rxd_attention->flags) &
767 			      RX_ATTENTION_FLAGS_MSDU_DONE)) {
768 				ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
769 				return -EIO;
770 			}
771 		}
772 
773 		msdu_desc++;
774 	}
775 
776 	return 0;
777 }
778 
ath10k_htt_rx_alloc(struct ath10k_htt * htt)779 int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
780 {
781 	struct ath10k *ar = htt->ar;
782 	dma_addr_t paddr;
783 	void *vaddr, *vaddr_ring;
784 	size_t size;
785 	struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
786 
787 	if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
788 		return 0;
789 
790 	htt->rx_confused = false;
791 
792 	/* XXX: The fill level could be changed during runtime in response to
793 	 * the host processing latency. Is this really worth it?
794 	 */
795 	htt->rx_ring.size = HTT_RX_RING_SIZE;
796 	htt->rx_ring.size_mask = htt->rx_ring.size - 1;
797 	htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level;
798 
799 	if (!is_power_of_2(htt->rx_ring.size)) {
800 		ath10k_warn(ar, "htt rx ring size is not power of 2\n");
801 		return -EINVAL;
802 	}
803 
804 	htt->rx_ring.netbufs_ring =
805 		kcalloc(htt->rx_ring.size, sizeof(struct sk_buff *),
806 			GFP_KERNEL);
807 	if (!htt->rx_ring.netbufs_ring)
808 		goto err_netbuf;
809 
810 	size = ath10k_htt_get_rx_ring_size(htt);
811 
812 	vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
813 	if (!vaddr_ring)
814 		goto err_dma_ring;
815 
816 	ath10k_htt_config_paddrs_ring(htt, vaddr_ring);
817 	htt->rx_ring.base_paddr = paddr;
818 
819 	vaddr = dma_alloc_coherent(htt->ar->dev,
820 				   sizeof(*htt->rx_ring.alloc_idx.vaddr),
821 				   &paddr, GFP_KERNEL);
822 	if (!vaddr)
823 		goto err_dma_idx;
824 
825 	htt->rx_ring.alloc_idx.vaddr = vaddr;
826 	htt->rx_ring.alloc_idx.paddr = paddr;
827 	htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
828 	*htt->rx_ring.alloc_idx.vaddr = 0;
829 
830 	/* Initialize the Rx refill retry timer */
831 	timer_setup(timer, ath10k_htt_rx_ring_refill_retry, 0);
832 
833 	spin_lock_init(&htt->rx_ring.lock);
834 
835 	htt->rx_ring.fill_cnt = 0;
836 	htt->rx_ring.sw_rd_idx.msdu_payld = 0;
837 	hash_init(htt->rx_ring.skb_table);
838 
839 	skb_queue_head_init(&htt->rx_msdus_q);
840 	skb_queue_head_init(&htt->rx_in_ord_compl_q);
841 	skb_queue_head_init(&htt->tx_fetch_ind_q);
842 	atomic_set(&htt->num_mpdus_ready, 0);
843 
844 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
845 		   htt->rx_ring.size, htt->rx_ring.fill_level);
846 	return 0;
847 
848 err_dma_idx:
849 	dma_free_coherent(htt->ar->dev,
850 			  ath10k_htt_get_rx_ring_size(htt),
851 			  vaddr_ring,
852 			  htt->rx_ring.base_paddr);
853 	ath10k_htt_config_paddrs_ring(htt, NULL);
854 err_dma_ring:
855 	kfree(htt->rx_ring.netbufs_ring);
856 	htt->rx_ring.netbufs_ring = NULL;
857 err_netbuf:
858 	return -ENOMEM;
859 }
860 
ath10k_htt_rx_crypto_param_len(struct ath10k * ar,enum htt_rx_mpdu_encrypt_type type)861 static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
862 					  enum htt_rx_mpdu_encrypt_type type)
863 {
864 	switch (type) {
865 	case HTT_RX_MPDU_ENCRYPT_NONE:
866 		return 0;
867 	case HTT_RX_MPDU_ENCRYPT_WEP40:
868 	case HTT_RX_MPDU_ENCRYPT_WEP104:
869 		return IEEE80211_WEP_IV_LEN;
870 	case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
871 	case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
872 		return IEEE80211_TKIP_IV_LEN;
873 	case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
874 		return IEEE80211_CCMP_HDR_LEN;
875 	case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
876 		return IEEE80211_CCMP_256_HDR_LEN;
877 	case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
878 	case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
879 		return IEEE80211_GCMP_HDR_LEN;
880 	case HTT_RX_MPDU_ENCRYPT_WEP128:
881 	case HTT_RX_MPDU_ENCRYPT_WAPI:
882 		break;
883 	}
884 
885 	ath10k_warn(ar, "unsupported encryption type %d\n", type);
886 	return 0;
887 }
888 
889 #define MICHAEL_MIC_LEN 8
890 
ath10k_htt_rx_crypto_mic_len(struct ath10k * ar,enum htt_rx_mpdu_encrypt_type type)891 static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar,
892 					enum htt_rx_mpdu_encrypt_type type)
893 {
894 	switch (type) {
895 	case HTT_RX_MPDU_ENCRYPT_NONE:
896 	case HTT_RX_MPDU_ENCRYPT_WEP40:
897 	case HTT_RX_MPDU_ENCRYPT_WEP104:
898 	case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
899 	case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
900 		return 0;
901 	case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
902 		return IEEE80211_CCMP_MIC_LEN;
903 	case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
904 		return IEEE80211_CCMP_256_MIC_LEN;
905 	case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
906 	case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
907 		return IEEE80211_GCMP_MIC_LEN;
908 	case HTT_RX_MPDU_ENCRYPT_WEP128:
909 	case HTT_RX_MPDU_ENCRYPT_WAPI:
910 		break;
911 	}
912 
913 	ath10k_warn(ar, "unsupported encryption type %d\n", type);
914 	return 0;
915 }
916 
ath10k_htt_rx_crypto_icv_len(struct ath10k * ar,enum htt_rx_mpdu_encrypt_type type)917 static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar,
918 					enum htt_rx_mpdu_encrypt_type type)
919 {
920 	switch (type) {
921 	case HTT_RX_MPDU_ENCRYPT_NONE:
922 	case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
923 	case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
924 	case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
925 	case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
926 		return 0;
927 	case HTT_RX_MPDU_ENCRYPT_WEP40:
928 	case HTT_RX_MPDU_ENCRYPT_WEP104:
929 		return IEEE80211_WEP_ICV_LEN;
930 	case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
931 	case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
932 		return IEEE80211_TKIP_ICV_LEN;
933 	case HTT_RX_MPDU_ENCRYPT_WEP128:
934 	case HTT_RX_MPDU_ENCRYPT_WAPI:
935 		break;
936 	}
937 
938 	ath10k_warn(ar, "unsupported encryption type %d\n", type);
939 	return 0;
940 }
941 
942 struct amsdu_subframe_hdr {
943 	u8 dst[ETH_ALEN];
944 	u8 src[ETH_ALEN];
945 	__be16 len;
946 } __packed;
947 
948 #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
949 
ath10k_bw_to_mac80211_bw(u8 bw)950 static inline u8 ath10k_bw_to_mac80211_bw(u8 bw)
951 {
952 	u8 ret = 0;
953 
954 	switch (bw) {
955 	case 0:
956 		ret = RATE_INFO_BW_20;
957 		break;
958 	case 1:
959 		ret = RATE_INFO_BW_40;
960 		break;
961 	case 2:
962 		ret = RATE_INFO_BW_80;
963 		break;
964 	case 3:
965 		ret = RATE_INFO_BW_160;
966 		break;
967 	}
968 
969 	return ret;
970 }
971 
ath10k_htt_rx_h_rates(struct ath10k * ar,struct ieee80211_rx_status * status,struct htt_rx_desc * rxd)972 static void ath10k_htt_rx_h_rates(struct ath10k *ar,
973 				  struct ieee80211_rx_status *status,
974 				  struct htt_rx_desc *rxd)
975 {
976 	struct ath10k_hw_params *hw = &ar->hw_params;
977 	struct rx_attention *rxd_attention;
978 	struct rx_mpdu_start *rxd_mpdu_start;
979 	struct rx_mpdu_end *rxd_mpdu_end;
980 	struct rx_msdu_start_common *rxd_msdu_start_common;
981 	struct rx_msdu_end_common *rxd_msdu_end_common;
982 	struct rx_ppdu_start *rxd_ppdu_start;
983 	struct ieee80211_supported_band *sband;
984 	u8 cck, rate, bw, sgi, mcs, nss;
985 	u8 *rxd_msdu_payload;
986 	u8 preamble = 0;
987 	u8 group_id;
988 	u32 info1, info2, info3;
989 	u32 stbc, nsts_su;
990 
991 	rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
992 	rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
993 	rxd_mpdu_end = ath10k_htt_rx_desc_get_mpdu_end(hw, rxd);
994 	rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
995 	rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
996 	rxd_ppdu_start = ath10k_htt_rx_desc_get_ppdu_start(hw, rxd);
997 	rxd_msdu_payload = ath10k_htt_rx_desc_get_msdu_payload(hw, rxd);
998 
999 	info1 = __le32_to_cpu(rxd_ppdu_start->info1);
1000 	info2 = __le32_to_cpu(rxd_ppdu_start->info2);
1001 	info3 = __le32_to_cpu(rxd_ppdu_start->info3);
1002 
1003 	preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
1004 
1005 	switch (preamble) {
1006 	case HTT_RX_LEGACY:
1007 		/* To get legacy rate index band is required. Since band can't
1008 		 * be undefined check if freq is non-zero.
1009 		 */
1010 		if (!status->freq)
1011 			return;
1012 
1013 		cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
1014 		rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
1015 		rate &= ~RX_PPDU_START_RATE_FLAG;
1016 
1017 		sband = &ar->mac.sbands[status->band];
1018 		status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck);
1019 		break;
1020 	case HTT_RX_HT:
1021 	case HTT_RX_HT_WITH_TXBF:
1022 		/* HT-SIG - Table 20-11 in info2 and info3 */
1023 		mcs = info2 & 0x1F;
1024 		nss = mcs >> 3;
1025 		bw = (info2 >> 7) & 1;
1026 		sgi = (info3 >> 7) & 1;
1027 
1028 		status->rate_idx = mcs;
1029 		status->encoding = RX_ENC_HT;
1030 		if (sgi)
1031 			status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
1032 		if (bw)
1033 			status->bw = RATE_INFO_BW_40;
1034 		break;
1035 	case HTT_RX_VHT:
1036 	case HTT_RX_VHT_WITH_TXBF:
1037 		/* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
1038 		 * TODO check this
1039 		 */
1040 		bw = info2 & 3;
1041 		sgi = info3 & 1;
1042 		stbc = (info2 >> 3) & 1;
1043 		group_id = (info2 >> 4) & 0x3F;
1044 
1045 		if (GROUP_ID_IS_SU_MIMO(group_id)) {
1046 			mcs = (info3 >> 4) & 0x0F;
1047 			nsts_su = ((info2 >> 10) & 0x07);
1048 			if (stbc)
1049 				nss = (nsts_su >> 2) + 1;
1050 			else
1051 				nss = (nsts_su + 1);
1052 		} else {
1053 			/* Hardware doesn't decode VHT-SIG-B into Rx descriptor
1054 			 * so it's impossible to decode MCS. Also since
1055 			 * firmware consumes Group Id Management frames host
1056 			 * has no knowledge regarding group/user position
1057 			 * mapping so it's impossible to pick the correct Nsts
1058 			 * from VHT-SIG-A1.
1059 			 *
1060 			 * Bandwidth and SGI are valid so report the rateinfo
1061 			 * on best-effort basis.
1062 			 */
1063 			mcs = 0;
1064 			nss = 1;
1065 		}
1066 
1067 		if (mcs > 0x09) {
1068 			ath10k_warn(ar, "invalid MCS received %u\n", mcs);
1069 			ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
1070 				    __le32_to_cpu(rxd_attention->flags),
1071 				    __le32_to_cpu(rxd_mpdu_start->info0),
1072 				    __le32_to_cpu(rxd_mpdu_start->info1),
1073 				    __le32_to_cpu(rxd_msdu_start_common->info0),
1074 				    __le32_to_cpu(rxd_msdu_start_common->info1),
1075 				    rxd_ppdu_start->info0,
1076 				    __le32_to_cpu(rxd_ppdu_start->info1),
1077 				    __le32_to_cpu(rxd_ppdu_start->info2),
1078 				    __le32_to_cpu(rxd_ppdu_start->info3),
1079 				    __le32_to_cpu(rxd_ppdu_start->info4));
1080 
1081 			ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
1082 				    __le32_to_cpu(rxd_msdu_end_common->info0),
1083 				    __le32_to_cpu(rxd_mpdu_end->info0));
1084 
1085 			ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
1086 					"rx desc msdu payload: ",
1087 					rxd_msdu_payload, 50);
1088 		}
1089 
1090 		status->rate_idx = mcs;
1091 		status->nss = nss;
1092 
1093 		if (sgi)
1094 			status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
1095 
1096 		status->bw = ath10k_bw_to_mac80211_bw(bw);
1097 		status->encoding = RX_ENC_VHT;
1098 		break;
1099 	default:
1100 		break;
1101 	}
1102 }
1103 
1104 static struct ieee80211_channel *
ath10k_htt_rx_h_peer_channel(struct ath10k * ar,struct htt_rx_desc * rxd)1105 ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
1106 {
1107 	struct ath10k_hw_params *hw = &ar->hw_params;
1108 	struct rx_attention *rxd_attention;
1109 	struct rx_msdu_end_common *rxd_msdu_end_common;
1110 	struct rx_mpdu_start *rxd_mpdu_start;
1111 	struct ath10k_peer *peer;
1112 	struct ath10k_vif *arvif;
1113 	struct cfg80211_chan_def def;
1114 	u16 peer_id;
1115 
1116 	lockdep_assert_held(&ar->data_lock);
1117 
1118 	if (!rxd)
1119 		return NULL;
1120 
1121 	rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
1122 	rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
1123 	rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
1124 
1125 	if (rxd_attention->flags &
1126 	    __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
1127 		return NULL;
1128 
1129 	if (!(rxd_msdu_end_common->info0 &
1130 	      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
1131 		return NULL;
1132 
1133 	peer_id = MS(__le32_to_cpu(rxd_mpdu_start->info0),
1134 		     RX_MPDU_START_INFO0_PEER_IDX);
1135 
1136 	peer = ath10k_peer_find_by_id(ar, peer_id);
1137 	if (!peer)
1138 		return NULL;
1139 
1140 	arvif = ath10k_get_arvif(ar, peer->vdev_id);
1141 	if (WARN_ON_ONCE(!arvif))
1142 		return NULL;
1143 
1144 	if (ath10k_mac_vif_chan(arvif->vif, &def))
1145 		return NULL;
1146 
1147 	return def.chan;
1148 }
1149 
1150 static struct ieee80211_channel *
ath10k_htt_rx_h_vdev_channel(struct ath10k * ar,u32 vdev_id)1151 ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
1152 {
1153 	struct ath10k_vif *arvif;
1154 	struct cfg80211_chan_def def;
1155 
1156 	lockdep_assert_held(&ar->data_lock);
1157 
1158 	list_for_each_entry(arvif, &ar->arvifs, list) {
1159 		if (arvif->vdev_id == vdev_id &&
1160 		    ath10k_mac_vif_chan(arvif->vif, &def) == 0)
1161 			return def.chan;
1162 	}
1163 
1164 	return NULL;
1165 }
1166 
1167 static void
ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * conf,void * data)1168 ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
1169 			      struct ieee80211_chanctx_conf *conf,
1170 			      void *data)
1171 {
1172 	struct cfg80211_chan_def *def = data;
1173 
1174 	*def = conf->def;
1175 }
1176 
1177 static struct ieee80211_channel *
ath10k_htt_rx_h_any_channel(struct ath10k * ar)1178 ath10k_htt_rx_h_any_channel(struct ath10k *ar)
1179 {
1180 	struct cfg80211_chan_def def = {};
1181 
1182 	ieee80211_iter_chan_contexts_atomic(ar->hw,
1183 					    ath10k_htt_rx_h_any_chan_iter,
1184 					    &def);
1185 
1186 	return def.chan;
1187 }
1188 
ath10k_htt_rx_h_channel(struct ath10k * ar,struct ieee80211_rx_status * status,struct htt_rx_desc * rxd,u32 vdev_id)1189 static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
1190 				    struct ieee80211_rx_status *status,
1191 				    struct htt_rx_desc *rxd,
1192 				    u32 vdev_id)
1193 {
1194 	struct ieee80211_channel *ch;
1195 
1196 	spin_lock_bh(&ar->data_lock);
1197 	ch = ar->scan_channel;
1198 	if (!ch)
1199 		ch = ar->rx_channel;
1200 	if (!ch)
1201 		ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
1202 	if (!ch)
1203 		ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
1204 	if (!ch)
1205 		ch = ath10k_htt_rx_h_any_channel(ar);
1206 	if (!ch)
1207 		ch = ar->tgt_oper_chan;
1208 	spin_unlock_bh(&ar->data_lock);
1209 
1210 	if (!ch)
1211 		return false;
1212 
1213 	status->band = ch->band;
1214 	status->freq = ch->center_freq;
1215 
1216 	return true;
1217 }
1218 
ath10k_htt_rx_h_signal(struct ath10k * ar,struct ieee80211_rx_status * status,struct htt_rx_desc * rxd)1219 static void ath10k_htt_rx_h_signal(struct ath10k *ar,
1220 				   struct ieee80211_rx_status *status,
1221 				   struct htt_rx_desc *rxd)
1222 {
1223 	struct ath10k_hw_params *hw = &ar->hw_params;
1224 	struct rx_ppdu_start *rxd_ppdu_start = ath10k_htt_rx_desc_get_ppdu_start(hw, rxd);
1225 	int i;
1226 
1227 	for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) {
1228 		status->chains &= ~BIT(i);
1229 
1230 		if (rxd_ppdu_start->rssi_chains[i].pri20_mhz != 0x80) {
1231 			status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR +
1232 				rxd_ppdu_start->rssi_chains[i].pri20_mhz;
1233 
1234 			status->chains |= BIT(i);
1235 		}
1236 	}
1237 
1238 	/* FIXME: Get real NF */
1239 	status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
1240 			 rxd_ppdu_start->rssi_comb;
1241 	status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
1242 }
1243 
ath10k_htt_rx_h_mactime(struct ath10k * ar,struct ieee80211_rx_status * status,struct htt_rx_desc * rxd)1244 static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
1245 				    struct ieee80211_rx_status *status,
1246 				    struct htt_rx_desc *rxd)
1247 {
1248 	struct ath10k_hw_params *hw = &ar->hw_params;
1249 	struct rx_ppdu_end_common *rxd_ppdu_end_common;
1250 
1251 	rxd_ppdu_end_common = ath10k_htt_rx_desc_get_ppdu_end(hw, rxd);
1252 
1253 	/* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
1254 	 * means all prior MSDUs in a PPDU are reported to mac80211 without the
1255 	 * TSF. Is it worth holding frames until end of PPDU is known?
1256 	 *
1257 	 * FIXME: Can we get/compute 64bit TSF?
1258 	 */
1259 	status->mactime = __le32_to_cpu(rxd_ppdu_end_common->tsf_timestamp);
1260 	status->flag |= RX_FLAG_MACTIME_END;
1261 }
1262 
ath10k_htt_rx_h_ppdu(struct ath10k * ar,struct sk_buff_head * amsdu,struct ieee80211_rx_status * status,u32 vdev_id)1263 static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
1264 				 struct sk_buff_head *amsdu,
1265 				 struct ieee80211_rx_status *status,
1266 				 u32 vdev_id)
1267 {
1268 	struct sk_buff *first;
1269 	struct ath10k_hw_params *hw = &ar->hw_params;
1270 	struct htt_rx_desc *rxd;
1271 	struct rx_attention *rxd_attention;
1272 	bool is_first_ppdu;
1273 	bool is_last_ppdu;
1274 
1275 	if (skb_queue_empty(amsdu))
1276 		return;
1277 
1278 	first = skb_peek(amsdu);
1279 	rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1280 				    (void *)first->data - hw->rx_desc_ops->rx_desc_size);
1281 
1282 	rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
1283 
1284 	is_first_ppdu = !!(rxd_attention->flags &
1285 			   __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
1286 	is_last_ppdu = !!(rxd_attention->flags &
1287 			  __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
1288 
1289 	if (is_first_ppdu) {
1290 		/* New PPDU starts so clear out the old per-PPDU status. */
1291 		status->freq = 0;
1292 		status->rate_idx = 0;
1293 		status->nss = 0;
1294 		status->encoding = RX_ENC_LEGACY;
1295 		status->bw = RATE_INFO_BW_20;
1296 
1297 		status->flag &= ~RX_FLAG_MACTIME_END;
1298 		status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1299 
1300 		status->flag &= ~(RX_FLAG_AMPDU_IS_LAST);
1301 		status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
1302 		status->ampdu_reference = ar->ampdu_reference;
1303 
1304 		ath10k_htt_rx_h_signal(ar, status, rxd);
1305 		ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
1306 		ath10k_htt_rx_h_rates(ar, status, rxd);
1307 	}
1308 
1309 	if (is_last_ppdu) {
1310 		ath10k_htt_rx_h_mactime(ar, status, rxd);
1311 
1312 		/* set ampdu last segment flag */
1313 		status->flag |= RX_FLAG_AMPDU_IS_LAST;
1314 		ar->ampdu_reference++;
1315 	}
1316 }
1317 
1318 static const char * const tid_to_ac[] = {
1319 	"BE",
1320 	"BK",
1321 	"BK",
1322 	"BE",
1323 	"VI",
1324 	"VI",
1325 	"VO",
1326 	"VO",
1327 };
1328 
ath10k_get_tid(struct ieee80211_hdr * hdr,char * out,size_t size)1329 static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
1330 {
1331 	u8 *qc;
1332 	int tid;
1333 
1334 	if (!ieee80211_is_data_qos(hdr->frame_control))
1335 		return "";
1336 
1337 	qc = ieee80211_get_qos_ctl(hdr);
1338 	tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
1339 	if (tid < 8)
1340 		snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
1341 	else
1342 		snprintf(out, size, "tid %d", tid);
1343 
1344 	return out;
1345 }
1346 
ath10k_htt_rx_h_queue_msdu(struct ath10k * ar,struct ieee80211_rx_status * rx_status,struct sk_buff * skb)1347 static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar,
1348 				       struct ieee80211_rx_status *rx_status,
1349 				       struct sk_buff *skb)
1350 {
1351 	struct ieee80211_rx_status *status;
1352 
1353 	status = IEEE80211_SKB_RXCB(skb);
1354 	*status = *rx_status;
1355 
1356 	skb_queue_tail(&ar->htt.rx_msdus_q, skb);
1357 }
1358 
ath10k_process_rx(struct ath10k * ar,struct sk_buff * skb)1359 static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
1360 {
1361 	struct ieee80211_rx_status *status;
1362 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1363 	char tid[32];
1364 
1365 	status = IEEE80211_SKB_RXCB(skb);
1366 
1367 	if (!(ar->filter_flags & FIF_FCSFAIL) &&
1368 	    status->flag & RX_FLAG_FAILED_FCS_CRC) {
1369 		ar->stats.rx_crc_err_drop++;
1370 		dev_kfree_skb_any(skb);
1371 		return;
1372 	}
1373 
1374 	ath10k_dbg(ar, ATH10K_DBG_DATA,
1375 		   "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
1376 		   skb,
1377 		   skb->len,
1378 		   ieee80211_get_SA(hdr),
1379 		   ath10k_get_tid(hdr, tid, sizeof(tid)),
1380 		   is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
1381 							"mcast" : "ucast",
1382 		   IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl)),
1383 		   (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
1384 		   (status->encoding == RX_ENC_HT) ? "ht" : "",
1385 		   (status->encoding == RX_ENC_VHT) ? "vht" : "",
1386 		   (status->bw == RATE_INFO_BW_40) ? "40" : "",
1387 		   (status->bw == RATE_INFO_BW_80) ? "80" : "",
1388 		   (status->bw == RATE_INFO_BW_160) ? "160" : "",
1389 		   status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
1390 		   status->rate_idx,
1391 		   status->nss,
1392 		   status->freq,
1393 		   status->band, status->flag,
1394 		   !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
1395 		   !!(status->flag & RX_FLAG_MMIC_ERROR),
1396 		   !!(status->flag & RX_FLAG_AMSDU_MORE));
1397 	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
1398 			skb->data, skb->len);
1399 	trace_ath10k_rx_hdr(ar, skb->data, skb->len);
1400 	trace_ath10k_rx_payload(ar, skb->data, skb->len);
1401 
1402 	ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
1403 }
1404 
ath10k_htt_rx_nwifi_hdrlen(struct ath10k * ar,struct ieee80211_hdr * hdr)1405 static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
1406 				      struct ieee80211_hdr *hdr)
1407 {
1408 	int len = ieee80211_hdrlen(hdr->frame_control);
1409 
1410 	if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
1411 		      ar->running_fw->fw_file.fw_features))
1412 		len = round_up(len, 4);
1413 
1414 	return len;
1415 }
1416 
ath10k_htt_rx_h_undecap_raw(struct ath10k * ar,struct sk_buff * msdu,struct ieee80211_rx_status * status,enum htt_rx_mpdu_encrypt_type enctype,bool is_decrypted,const u8 first_hdr[64])1417 static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
1418 					struct sk_buff *msdu,
1419 					struct ieee80211_rx_status *status,
1420 					enum htt_rx_mpdu_encrypt_type enctype,
1421 					bool is_decrypted,
1422 					const u8 first_hdr[64])
1423 {
1424 	struct ieee80211_hdr *hdr;
1425 	struct ath10k_hw_params *hw = &ar->hw_params;
1426 	struct htt_rx_desc *rxd;
1427 	struct rx_msdu_end_common *rxd_msdu_end_common;
1428 	size_t hdr_len;
1429 	size_t crypto_len;
1430 	bool is_first;
1431 	bool is_last;
1432 	bool msdu_limit_err;
1433 	int bytes_aligned = ar->hw_params.decap_align_bytes;
1434 	u8 *qos;
1435 
1436 	rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1437 				    (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
1438 
1439 	rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
1440 	is_first = !!(rxd_msdu_end_common->info0 &
1441 		      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1442 	is_last = !!(rxd_msdu_end_common->info0 &
1443 		     __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1444 
1445 	/* Delivered decapped frame:
1446 	 * [802.11 header]
1447 	 * [crypto param] <-- can be trimmed if !fcs_err &&
1448 	 *                    !decrypt_err && !peer_idx_invalid
1449 	 * [amsdu header] <-- only if A-MSDU
1450 	 * [rfc1042/llc]
1451 	 * [payload]
1452 	 * [FCS] <-- at end, needs to be trimmed
1453 	 */
1454 
1455 	/* Some hardwares(QCA99x0 variants) limit number of msdus in a-msdu when
1456 	 * deaggregate, so that unwanted MSDU-deaggregation is avoided for
1457 	 * error packets. If limit exceeds, hw sends all remaining MSDUs as
1458 	 * a single last MSDU with this msdu limit error set.
1459 	 */
1460 	msdu_limit_err = ath10k_htt_rx_desc_msdu_limit_error(hw, rxd);
1461 
1462 	/* If MSDU limit error happens, then don't warn on, the partial raw MSDU
1463 	 * without first MSDU is expected in that case, and handled later here.
1464 	 */
1465 	/* This probably shouldn't happen but warn just in case */
1466 	if (WARN_ON_ONCE(!is_first && !msdu_limit_err))
1467 		return;
1468 
1469 	/* This probably shouldn't happen but warn just in case */
1470 	if (WARN_ON_ONCE(!(is_first && is_last) && !msdu_limit_err))
1471 		return;
1472 
1473 	skb_trim(msdu, msdu->len - FCS_LEN);
1474 
1475 	/* Push original 80211 header */
1476 	if (unlikely(msdu_limit_err)) {
1477 		hdr = (struct ieee80211_hdr *)first_hdr;
1478 		hdr_len = ieee80211_hdrlen(hdr->frame_control);
1479 		crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1480 
1481 		if (ieee80211_is_data_qos(hdr->frame_control)) {
1482 			qos = ieee80211_get_qos_ctl(hdr);
1483 			qos[0] |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1484 		}
1485 
1486 		if (crypto_len)
1487 			memcpy(skb_push(msdu, crypto_len),
1488 			       (void *)hdr + round_up(hdr_len, bytes_aligned),
1489 			       crypto_len);
1490 
1491 		memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1492 	}
1493 
1494 	/* In most cases this will be true for sniffed frames. It makes sense
1495 	 * to deliver them as-is without stripping the crypto param. This is
1496 	 * necessary for software based decryption.
1497 	 *
1498 	 * If there's no error then the frame is decrypted. At least that is
1499 	 * the case for frames that come in via fragmented rx indication.
1500 	 */
1501 	if (!is_decrypted)
1502 		return;
1503 
1504 	/* The payload is decrypted so strip crypto params. Start from tail
1505 	 * since hdr is used to compute some stuff.
1506 	 */
1507 
1508 	hdr = (void *)msdu->data;
1509 
1510 	/* Tail */
1511 	if (status->flag & RX_FLAG_IV_STRIPPED) {
1512 		skb_trim(msdu, msdu->len -
1513 			 ath10k_htt_rx_crypto_mic_len(ar, enctype));
1514 
1515 		skb_trim(msdu, msdu->len -
1516 			 ath10k_htt_rx_crypto_icv_len(ar, enctype));
1517 	} else {
1518 		/* MIC */
1519 		if (status->flag & RX_FLAG_MIC_STRIPPED)
1520 			skb_trim(msdu, msdu->len -
1521 				 ath10k_htt_rx_crypto_mic_len(ar, enctype));
1522 
1523 		/* ICV */
1524 		if (status->flag & RX_FLAG_ICV_STRIPPED)
1525 			skb_trim(msdu, msdu->len -
1526 				 ath10k_htt_rx_crypto_icv_len(ar, enctype));
1527 	}
1528 
1529 	/* MMIC */
1530 	if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
1531 	    !ieee80211_has_morefrags(hdr->frame_control) &&
1532 	    enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1533 		skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN);
1534 
1535 	/* Head */
1536 	if (status->flag & RX_FLAG_IV_STRIPPED) {
1537 		hdr_len = ieee80211_hdrlen(hdr->frame_control);
1538 		crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1539 
1540 		memmove((void *)msdu->data + crypto_len,
1541 			(void *)msdu->data, hdr_len);
1542 		skb_pull(msdu, crypto_len);
1543 	}
1544 }
1545 
ath10k_htt_rx_h_undecap_nwifi(struct ath10k * ar,struct sk_buff * msdu,struct ieee80211_rx_status * status,const u8 first_hdr[64],enum htt_rx_mpdu_encrypt_type enctype)1546 static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
1547 					  struct sk_buff *msdu,
1548 					  struct ieee80211_rx_status *status,
1549 					  const u8 first_hdr[64],
1550 					  enum htt_rx_mpdu_encrypt_type enctype)
1551 {
1552 	struct ath10k_hw_params *hw = &ar->hw_params;
1553 	struct ieee80211_hdr *hdr;
1554 	struct htt_rx_desc *rxd;
1555 	size_t hdr_len;
1556 	u8 da[ETH_ALEN];
1557 	u8 sa[ETH_ALEN];
1558 	int l3_pad_bytes;
1559 	int bytes_aligned = ar->hw_params.decap_align_bytes;
1560 
1561 	/* Delivered decapped frame:
1562 	 * [nwifi 802.11 header] <-- replaced with 802.11 hdr
1563 	 * [rfc1042/llc]
1564 	 *
1565 	 * Note: The nwifi header doesn't have QoS Control and is
1566 	 * (always?) a 3addr frame.
1567 	 *
1568 	 * Note2: There's no A-MSDU subframe header. Even if it's part
1569 	 * of an A-MSDU.
1570 	 */
1571 
1572 	/* pull decapped header and copy SA & DA */
1573 	rxd = HTT_RX_BUF_TO_RX_DESC(hw, (void *)msdu->data -
1574 				    hw->rx_desc_ops->rx_desc_size);
1575 
1576 	l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1577 	skb_put(msdu, l3_pad_bytes);
1578 
1579 	hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
1580 
1581 	hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
1582 	ether_addr_copy(da, ieee80211_get_DA(hdr));
1583 	ether_addr_copy(sa, ieee80211_get_SA(hdr));
1584 	skb_pull(msdu, hdr_len);
1585 
1586 	/* push original 802.11 header */
1587 	hdr = (struct ieee80211_hdr *)first_hdr;
1588 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1589 
1590 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1591 		memcpy(skb_push(msdu,
1592 				ath10k_htt_rx_crypto_param_len(ar, enctype)),
1593 		       (void *)hdr + round_up(hdr_len, bytes_aligned),
1594 			ath10k_htt_rx_crypto_param_len(ar, enctype));
1595 	}
1596 
1597 	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1598 
1599 	/* original 802.11 header has a different DA and in
1600 	 * case of 4addr it may also have different SA
1601 	 */
1602 	hdr = (struct ieee80211_hdr *)msdu->data;
1603 	ether_addr_copy(ieee80211_get_DA(hdr), da);
1604 	ether_addr_copy(ieee80211_get_SA(hdr), sa);
1605 }
1606 
ath10k_htt_rx_h_find_rfc1042(struct ath10k * ar,struct sk_buff * msdu,enum htt_rx_mpdu_encrypt_type enctype)1607 static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
1608 					  struct sk_buff *msdu,
1609 					  enum htt_rx_mpdu_encrypt_type enctype)
1610 {
1611 	struct ieee80211_hdr *hdr;
1612 	struct ath10k_hw_params *hw = &ar->hw_params;
1613 	struct htt_rx_desc *rxd;
1614 	struct rx_msdu_end_common *rxd_msdu_end_common;
1615 	u8 *rxd_rx_hdr_status;
1616 	size_t hdr_len, crypto_len;
1617 	void *rfc1042;
1618 	bool is_first, is_last, is_amsdu;
1619 	int bytes_aligned = ar->hw_params.decap_align_bytes;
1620 
1621 	rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1622 				    (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
1623 
1624 	rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
1625 	rxd_rx_hdr_status = ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);
1626 	hdr = (void *)rxd_rx_hdr_status;
1627 
1628 	is_first = !!(rxd_msdu_end_common->info0 &
1629 		      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1630 	is_last = !!(rxd_msdu_end_common->info0 &
1631 		     __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1632 	is_amsdu = !(is_first && is_last);
1633 
1634 	rfc1042 = hdr;
1635 
1636 	if (is_first) {
1637 		hdr_len = ieee80211_hdrlen(hdr->frame_control);
1638 		crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1639 
1640 		rfc1042 += round_up(hdr_len, bytes_aligned) +
1641 			   round_up(crypto_len, bytes_aligned);
1642 	}
1643 
1644 	if (is_amsdu)
1645 		rfc1042 += sizeof(struct amsdu_subframe_hdr);
1646 
1647 	return rfc1042;
1648 }
1649 
ath10k_htt_rx_h_undecap_eth(struct ath10k * ar,struct sk_buff * msdu,struct ieee80211_rx_status * status,const u8 first_hdr[64],enum htt_rx_mpdu_encrypt_type enctype)1650 static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1651 					struct sk_buff *msdu,
1652 					struct ieee80211_rx_status *status,
1653 					const u8 first_hdr[64],
1654 					enum htt_rx_mpdu_encrypt_type enctype)
1655 {
1656 	struct ath10k_hw_params *hw = &ar->hw_params;
1657 	struct ieee80211_hdr *hdr;
1658 	struct ethhdr *eth;
1659 	size_t hdr_len;
1660 	void *rfc1042;
1661 	u8 da[ETH_ALEN];
1662 	u8 sa[ETH_ALEN];
1663 	int l3_pad_bytes;
1664 	struct htt_rx_desc *rxd;
1665 	int bytes_aligned = ar->hw_params.decap_align_bytes;
1666 
1667 	/* Delivered decapped frame:
1668 	 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
1669 	 * [payload]
1670 	 */
1671 
1672 	rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
1673 	if (WARN_ON_ONCE(!rfc1042))
1674 		return;
1675 
1676 	rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1677 				    (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
1678 
1679 	l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1680 	skb_put(msdu, l3_pad_bytes);
1681 	skb_pull(msdu, l3_pad_bytes);
1682 
1683 	/* pull decapped header and copy SA & DA */
1684 	eth = (struct ethhdr *)msdu->data;
1685 	ether_addr_copy(da, eth->h_dest);
1686 	ether_addr_copy(sa, eth->h_source);
1687 	skb_pull(msdu, sizeof(struct ethhdr));
1688 
1689 	/* push rfc1042/llc/snap */
1690 	memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
1691 	       sizeof(struct rfc1042_hdr));
1692 
1693 	/* push original 802.11 header */
1694 	hdr = (struct ieee80211_hdr *)first_hdr;
1695 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1696 
1697 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1698 		memcpy(skb_push(msdu,
1699 				ath10k_htt_rx_crypto_param_len(ar, enctype)),
1700 		       (void *)hdr + round_up(hdr_len, bytes_aligned),
1701 			ath10k_htt_rx_crypto_param_len(ar, enctype));
1702 	}
1703 
1704 	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1705 
1706 	/* original 802.11 header has a different DA and in
1707 	 * case of 4addr it may also have different SA
1708 	 */
1709 	hdr = (struct ieee80211_hdr *)msdu->data;
1710 	ether_addr_copy(ieee80211_get_DA(hdr), da);
1711 	ether_addr_copy(ieee80211_get_SA(hdr), sa);
1712 }
1713 
ath10k_htt_rx_h_undecap_snap(struct ath10k * ar,struct sk_buff * msdu,struct ieee80211_rx_status * status,const u8 first_hdr[64],enum htt_rx_mpdu_encrypt_type enctype)1714 static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
1715 					 struct sk_buff *msdu,
1716 					 struct ieee80211_rx_status *status,
1717 					 const u8 first_hdr[64],
1718 					 enum htt_rx_mpdu_encrypt_type enctype)
1719 {
1720 	struct ath10k_hw_params *hw = &ar->hw_params;
1721 	struct ieee80211_hdr *hdr;
1722 	size_t hdr_len;
1723 	int l3_pad_bytes;
1724 	struct htt_rx_desc *rxd;
1725 	int bytes_aligned = ar->hw_params.decap_align_bytes;
1726 
1727 	/* Delivered decapped frame:
1728 	 * [amsdu header] <-- replaced with 802.11 hdr
1729 	 * [rfc1042/llc]
1730 	 * [payload]
1731 	 */
1732 
1733 	rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1734 				    (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
1735 
1736 	l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1737 
1738 	skb_put(msdu, l3_pad_bytes);
1739 	skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
1740 
1741 	hdr = (struct ieee80211_hdr *)first_hdr;
1742 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1743 
1744 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1745 		memcpy(skb_push(msdu,
1746 				ath10k_htt_rx_crypto_param_len(ar, enctype)),
1747 		       (void *)hdr + round_up(hdr_len, bytes_aligned),
1748 			ath10k_htt_rx_crypto_param_len(ar, enctype));
1749 	}
1750 
1751 	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1752 }
1753 
ath10k_htt_rx_h_undecap(struct ath10k * ar,struct sk_buff * msdu,struct ieee80211_rx_status * status,u8 first_hdr[64],enum htt_rx_mpdu_encrypt_type enctype,bool is_decrypted)1754 static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
1755 				    struct sk_buff *msdu,
1756 				    struct ieee80211_rx_status *status,
1757 				    u8 first_hdr[64],
1758 				    enum htt_rx_mpdu_encrypt_type enctype,
1759 				    bool is_decrypted)
1760 {
1761 	struct ath10k_hw_params *hw = &ar->hw_params;
1762 	struct htt_rx_desc *rxd;
1763 	struct rx_msdu_start_common *rxd_msdu_start_common;
1764 	enum rx_msdu_decap_format decap;
1765 
1766 	/* First msdu's decapped header:
1767 	 * [802.11 header] <-- padded to 4 bytes long
1768 	 * [crypto param] <-- padded to 4 bytes long
1769 	 * [amsdu header] <-- only if A-MSDU
1770 	 * [rfc1042/llc]
1771 	 *
1772 	 * Other (2nd, 3rd, ..) msdu's decapped header:
1773 	 * [amsdu header] <-- only if A-MSDU
1774 	 * [rfc1042/llc]
1775 	 */
1776 
1777 	rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1778 				    (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
1779 
1780 	rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
1781 	decap = MS(__le32_to_cpu(rxd_msdu_start_common->info1),
1782 		   RX_MSDU_START_INFO1_DECAP_FORMAT);
1783 
1784 	switch (decap) {
1785 	case RX_MSDU_DECAP_RAW:
1786 		ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
1787 					    is_decrypted, first_hdr);
1788 		break;
1789 	case RX_MSDU_DECAP_NATIVE_WIFI:
1790 		ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr,
1791 					      enctype);
1792 		break;
1793 	case RX_MSDU_DECAP_ETHERNET2_DIX:
1794 		ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
1795 		break;
1796 	case RX_MSDU_DECAP_8023_SNAP_LLC:
1797 		ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr,
1798 					     enctype);
1799 		break;
1800 	}
1801 }
1802 
ath10k_htt_rx_get_csum_state(struct ath10k_hw_params * hw,struct sk_buff * skb)1803 static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params *hw, struct sk_buff *skb)
1804 {
1805 	struct htt_rx_desc *rxd;
1806 	struct rx_attention *rxd_attention;
1807 	struct rx_msdu_start_common *rxd_msdu_start_common;
1808 	u32 flags, info;
1809 	bool is_ip4, is_ip6;
1810 	bool is_tcp, is_udp;
1811 	bool ip_csum_ok, tcpudp_csum_ok;
1812 
1813 	rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1814 				    (void *)skb->data - hw->rx_desc_ops->rx_desc_size);
1815 
1816 	rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
1817 	rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
1818 	flags = __le32_to_cpu(rxd_attention->flags);
1819 	info = __le32_to_cpu(rxd_msdu_start_common->info1);
1820 
1821 	is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
1822 	is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
1823 	is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
1824 	is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
1825 	ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
1826 	tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
1827 
1828 	if (!is_ip4 && !is_ip6)
1829 		return CHECKSUM_NONE;
1830 	if (!is_tcp && !is_udp)
1831 		return CHECKSUM_NONE;
1832 	if (!ip_csum_ok)
1833 		return CHECKSUM_NONE;
1834 	if (!tcpudp_csum_ok)
1835 		return CHECKSUM_NONE;
1836 
1837 	return CHECKSUM_UNNECESSARY;
1838 }
1839 
ath10k_htt_rx_h_csum_offload(struct ath10k_hw_params * hw,struct sk_buff * msdu)1840 static void ath10k_htt_rx_h_csum_offload(struct ath10k_hw_params *hw,
1841 					 struct sk_buff *msdu)
1842 {
1843 	msdu->ip_summed = ath10k_htt_rx_get_csum_state(hw, msdu);
1844 }
1845 
ath10k_htt_rx_h_get_pn(struct ath10k * ar,struct sk_buff * skb,enum htt_rx_mpdu_encrypt_type enctype)1846 static u64 ath10k_htt_rx_h_get_pn(struct ath10k *ar, struct sk_buff *skb,
1847 				  enum htt_rx_mpdu_encrypt_type enctype)
1848 {
1849 	struct ieee80211_hdr *hdr;
1850 	u64 pn = 0;
1851 	u8 *ehdr;
1852 
1853 	hdr = (struct ieee80211_hdr *)skb->data;
1854 	ehdr = skb->data + ieee80211_hdrlen(hdr->frame_control);
1855 
1856 	if (enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2) {
1857 		pn = ehdr[0];
1858 		pn |= (u64)ehdr[1] << 8;
1859 		pn |= (u64)ehdr[4] << 16;
1860 		pn |= (u64)ehdr[5] << 24;
1861 		pn |= (u64)ehdr[6] << 32;
1862 		pn |= (u64)ehdr[7] << 40;
1863 	}
1864 	return pn;
1865 }
1866 
ath10k_htt_rx_h_frag_multicast_check(struct ath10k * ar,struct sk_buff * skb)1867 static bool ath10k_htt_rx_h_frag_multicast_check(struct ath10k *ar,
1868 						 struct sk_buff *skb)
1869 {
1870 	struct ieee80211_hdr *hdr;
1871 
1872 	hdr = (struct ieee80211_hdr *)skb->data;
1873 	return !is_multicast_ether_addr(hdr->addr1);
1874 }
1875 
ath10k_htt_rx_h_frag_pn_check(struct ath10k * ar,struct sk_buff * skb,u16 peer_id,enum htt_rx_mpdu_encrypt_type enctype)1876 static bool ath10k_htt_rx_h_frag_pn_check(struct ath10k *ar,
1877 					  struct sk_buff *skb,
1878 					  u16 peer_id,
1879 					  enum htt_rx_mpdu_encrypt_type enctype)
1880 {
1881 	struct ath10k_peer *peer;
1882 	union htt_rx_pn_t *last_pn, new_pn = {0};
1883 	struct ieee80211_hdr *hdr;
1884 	u8 tid, frag_number;
1885 	u32 seq;
1886 
1887 	peer = ath10k_peer_find_by_id(ar, peer_id);
1888 	if (!peer) {
1889 		ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer for frag pn check\n");
1890 		return false;
1891 	}
1892 
1893 	hdr = (struct ieee80211_hdr *)skb->data;
1894 	if (ieee80211_is_data_qos(hdr->frame_control))
1895 		tid = ieee80211_get_tid(hdr);
1896 	else
1897 		tid = ATH10K_TXRX_NON_QOS_TID;
1898 
1899 	last_pn = &peer->frag_tids_last_pn[tid];
1900 	new_pn.pn48 = ath10k_htt_rx_h_get_pn(ar, skb, enctype);
1901 	frag_number = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
1902 	seq = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl));
1903 
1904 	if (frag_number == 0) {
1905 		last_pn->pn48 = new_pn.pn48;
1906 		peer->frag_tids_seq[tid] = seq;
1907 	} else {
1908 		if (seq != peer->frag_tids_seq[tid])
1909 			return false;
1910 
1911 		if (new_pn.pn48 != last_pn->pn48 + 1)
1912 			return false;
1913 
1914 		last_pn->pn48 = new_pn.pn48;
1915 	}
1916 
1917 	return true;
1918 }
1919 
ath10k_htt_rx_h_mpdu(struct ath10k * ar,struct sk_buff_head * amsdu,struct ieee80211_rx_status * status,bool fill_crypt_header,u8 * rx_hdr,enum ath10k_pkt_rx_err * err,u16 peer_id,bool frag)1920 static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1921 				 struct sk_buff_head *amsdu,
1922 				 struct ieee80211_rx_status *status,
1923 				 bool fill_crypt_header,
1924 				 u8 *rx_hdr,
1925 				 enum ath10k_pkt_rx_err *err,
1926 				 u16 peer_id,
1927 				 bool frag)
1928 {
1929 	struct sk_buff *first;
1930 	struct sk_buff *last;
1931 	struct sk_buff *msdu, *temp;
1932 	struct ath10k_hw_params *hw = &ar->hw_params;
1933 	struct htt_rx_desc *rxd;
1934 	struct rx_attention *rxd_attention;
1935 	struct rx_mpdu_start *rxd_mpdu_start;
1936 
1937 	struct ieee80211_hdr *hdr;
1938 	enum htt_rx_mpdu_encrypt_type enctype;
1939 	u8 first_hdr[64];
1940 	u8 *qos;
1941 	bool has_fcs_err;
1942 	bool has_crypto_err;
1943 	bool has_tkip_err;
1944 	bool has_peer_idx_invalid;
1945 	bool is_decrypted;
1946 	bool is_mgmt;
1947 	u32 attention;
1948 	bool frag_pn_check = true, multicast_check = true;
1949 
1950 	if (skb_queue_empty(amsdu))
1951 		return;
1952 
1953 	first = skb_peek(amsdu);
1954 	rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1955 				    (void *)first->data - hw->rx_desc_ops->rx_desc_size);
1956 
1957 	rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
1958 	rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
1959 
1960 	is_mgmt = !!(rxd_attention->flags &
1961 		     __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
1962 
1963 	enctype = MS(__le32_to_cpu(rxd_mpdu_start->info0),
1964 		     RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1965 
1966 	/* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
1967 	 * decapped header. It'll be used for undecapping of each MSDU.
1968 	 */
1969 	hdr = (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);
1970 	memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
1971 
1972 	if (rx_hdr)
1973 		memcpy(rx_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
1974 
1975 	/* Each A-MSDU subframe will use the original header as the base and be
1976 	 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1977 	 */
1978 	hdr = (void *)first_hdr;
1979 
1980 	if (ieee80211_is_data_qos(hdr->frame_control)) {
1981 		qos = ieee80211_get_qos_ctl(hdr);
1982 		qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1983 	}
1984 
1985 	/* Some attention flags are valid only in the last MSDU. */
1986 	last = skb_peek_tail(amsdu);
1987 	rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1988 				    (void *)last->data - hw->rx_desc_ops->rx_desc_size);
1989 
1990 	rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
1991 	attention = __le32_to_cpu(rxd_attention->flags);
1992 
1993 	has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
1994 	has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
1995 	has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
1996 	has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
1997 
1998 	/* Note: If hardware captures an encrypted frame that it can't decrypt,
1999 	 * e.g. due to fcs error, missing peer or invalid key data it will
2000 	 * report the frame as raw.
2001 	 */
2002 	is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
2003 			!has_fcs_err &&
2004 			!has_crypto_err &&
2005 			!has_peer_idx_invalid);
2006 
2007 	/* Clear per-MPDU flags while leaving per-PPDU flags intact. */
2008 	status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
2009 			  RX_FLAG_MMIC_ERROR |
2010 			  RX_FLAG_DECRYPTED |
2011 			  RX_FLAG_IV_STRIPPED |
2012 			  RX_FLAG_ONLY_MONITOR |
2013 			  RX_FLAG_MMIC_STRIPPED);
2014 
2015 	if (has_fcs_err)
2016 		status->flag |= RX_FLAG_FAILED_FCS_CRC;
2017 
2018 	if (has_tkip_err)
2019 		status->flag |= RX_FLAG_MMIC_ERROR;
2020 
2021 	if (err) {
2022 		if (has_fcs_err)
2023 			*err = ATH10K_PKT_RX_ERR_FCS;
2024 		else if (has_tkip_err)
2025 			*err = ATH10K_PKT_RX_ERR_TKIP;
2026 		else if (has_crypto_err)
2027 			*err = ATH10K_PKT_RX_ERR_CRYPT;
2028 		else if (has_peer_idx_invalid)
2029 			*err = ATH10K_PKT_RX_ERR_PEER_IDX_INVAL;
2030 	}
2031 
2032 	/* Firmware reports all necessary management frames via WMI already.
2033 	 * They are not reported to monitor interfaces at all so pass the ones
2034 	 * coming via HTT to monitor interfaces instead. This simplifies
2035 	 * matters a lot.
2036 	 */
2037 	if (is_mgmt)
2038 		status->flag |= RX_FLAG_ONLY_MONITOR;
2039 
2040 	if (is_decrypted) {
2041 		status->flag |= RX_FLAG_DECRYPTED;
2042 
2043 		if (likely(!is_mgmt))
2044 			status->flag |= RX_FLAG_MMIC_STRIPPED;
2045 
2046 		if (fill_crypt_header)
2047 			status->flag |= RX_FLAG_MIC_STRIPPED |
2048 					RX_FLAG_ICV_STRIPPED;
2049 		else
2050 			status->flag |= RX_FLAG_IV_STRIPPED;
2051 	}
2052 
2053 	skb_queue_walk(amsdu, msdu) {
2054 		if (frag && !fill_crypt_header && is_decrypted &&
2055 		    enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
2056 			frag_pn_check = ath10k_htt_rx_h_frag_pn_check(ar,
2057 								      msdu,
2058 								      peer_id,
2059 								      enctype);
2060 
2061 		if (frag)
2062 			multicast_check = ath10k_htt_rx_h_frag_multicast_check(ar,
2063 									       msdu);
2064 
2065 		if (!frag_pn_check || !multicast_check) {
2066 			/* Discard the fragment with invalid PN or multicast DA
2067 			 */
2068 			temp = msdu->prev;
2069 			__skb_unlink(msdu, amsdu);
2070 			dev_kfree_skb_any(msdu);
2071 			msdu = temp;
2072 			frag_pn_check = true;
2073 			multicast_check = true;
2074 			continue;
2075 		}
2076 
2077 		ath10k_htt_rx_h_csum_offload(&ar->hw_params, msdu);
2078 
2079 		if (frag && !fill_crypt_header &&
2080 		    enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
2081 			status->flag &= ~RX_FLAG_MMIC_STRIPPED;
2082 
2083 		ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
2084 					is_decrypted);
2085 
2086 		/* Undecapping involves copying the original 802.11 header back
2087 		 * to sk_buff. If frame is protected and hardware has decrypted
2088 		 * it then remove the protected bit.
2089 		 */
2090 		if (!is_decrypted)
2091 			continue;
2092 		if (is_mgmt)
2093 			continue;
2094 
2095 		if (fill_crypt_header)
2096 			continue;
2097 
2098 		hdr = (void *)msdu->data;
2099 		hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2100 
2101 		if (frag && !fill_crypt_header &&
2102 		    enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
2103 			status->flag &= ~RX_FLAG_IV_STRIPPED &
2104 					~RX_FLAG_MMIC_STRIPPED;
2105 	}
2106 }
2107 
ath10k_htt_rx_h_enqueue(struct ath10k * ar,struct sk_buff_head * amsdu,struct ieee80211_rx_status * status)2108 static void ath10k_htt_rx_h_enqueue(struct ath10k *ar,
2109 				    struct sk_buff_head *amsdu,
2110 				    struct ieee80211_rx_status *status)
2111 {
2112 	struct sk_buff *msdu;
2113 	struct sk_buff *first_subframe;
2114 
2115 	first_subframe = skb_peek(amsdu);
2116 
2117 	while ((msdu = __skb_dequeue(amsdu))) {
2118 		/* Setup per-MSDU flags */
2119 		if (skb_queue_empty(amsdu))
2120 			status->flag &= ~RX_FLAG_AMSDU_MORE;
2121 		else
2122 			status->flag |= RX_FLAG_AMSDU_MORE;
2123 
2124 		if (msdu == first_subframe) {
2125 			first_subframe = NULL;
2126 			status->flag &= ~RX_FLAG_ALLOW_SAME_PN;
2127 		} else {
2128 			status->flag |= RX_FLAG_ALLOW_SAME_PN;
2129 		}
2130 
2131 		ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
2132 	}
2133 }
2134 
ath10k_unchain_msdu(struct sk_buff_head * amsdu,unsigned long * unchain_cnt)2135 static int ath10k_unchain_msdu(struct sk_buff_head *amsdu,
2136 			       unsigned long *unchain_cnt)
2137 {
2138 	struct sk_buff *skb, *first;
2139 	int space;
2140 	int total_len = 0;
2141 	int amsdu_len = skb_queue_len(amsdu);
2142 
2143 	/* TODO:  Might could optimize this by using
2144 	 * skb_try_coalesce or similar method to
2145 	 * decrease copying, or maybe get mac80211 to
2146 	 * provide a way to just receive a list of
2147 	 * skb?
2148 	 */
2149 
2150 	first = __skb_dequeue(amsdu);
2151 
2152 	/* Allocate total length all at once. */
2153 	skb_queue_walk(amsdu, skb)
2154 		total_len += skb->len;
2155 
2156 	space = total_len - skb_tailroom(first);
2157 	if ((space > 0) &&
2158 	    (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
2159 		/* TODO:  bump some rx-oom error stat */
2160 		/* put it back together so we can free the
2161 		 * whole list at once.
2162 		 */
2163 		__skb_queue_head(amsdu, first);
2164 		return -1;
2165 	}
2166 
2167 	/* Walk list again, copying contents into
2168 	 * msdu_head
2169 	 */
2170 	while ((skb = __skb_dequeue(amsdu))) {
2171 		skb_copy_from_linear_data(skb, skb_put(first, skb->len),
2172 					  skb->len);
2173 		dev_kfree_skb_any(skb);
2174 	}
2175 
2176 	__skb_queue_head(amsdu, first);
2177 
2178 	*unchain_cnt += amsdu_len - 1;
2179 
2180 	return 0;
2181 }
2182 
ath10k_htt_rx_h_unchain(struct ath10k * ar,struct sk_buff_head * amsdu,unsigned long * drop_cnt,unsigned long * unchain_cnt)2183 static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
2184 				    struct sk_buff_head *amsdu,
2185 				    unsigned long *drop_cnt,
2186 				    unsigned long *unchain_cnt)
2187 {
2188 	struct sk_buff *first;
2189 	struct ath10k_hw_params *hw = &ar->hw_params;
2190 	struct htt_rx_desc *rxd;
2191 	struct rx_msdu_start_common *rxd_msdu_start_common;
2192 	struct rx_frag_info_common *rxd_frag_info;
2193 	enum rx_msdu_decap_format decap;
2194 
2195 	first = skb_peek(amsdu);
2196 	rxd = HTT_RX_BUF_TO_RX_DESC(hw,
2197 				    (void *)first->data - hw->rx_desc_ops->rx_desc_size);
2198 
2199 	rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
2200 	rxd_frag_info = ath10k_htt_rx_desc_get_frag_info(hw, rxd);
2201 	decap = MS(__le32_to_cpu(rxd_msdu_start_common->info1),
2202 		   RX_MSDU_START_INFO1_DECAP_FORMAT);
2203 
2204 	/* FIXME: Current unchaining logic can only handle simple case of raw
2205 	 * msdu chaining. If decapping is other than raw the chaining may be
2206 	 * more complex and this isn't handled by the current code. Don't even
2207 	 * try re-constructing such frames - it'll be pretty much garbage.
2208 	 */
2209 	if (decap != RX_MSDU_DECAP_RAW ||
2210 	    skb_queue_len(amsdu) != 1 + rxd_frag_info->ring2_more_count) {
2211 		*drop_cnt += skb_queue_len(amsdu);
2212 		__skb_queue_purge(amsdu);
2213 		return;
2214 	}
2215 
2216 	ath10k_unchain_msdu(amsdu, unchain_cnt);
2217 }
2218 
ath10k_htt_rx_validate_amsdu(struct ath10k * ar,struct sk_buff_head * amsdu)2219 static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar,
2220 					 struct sk_buff_head *amsdu)
2221 {
2222 	u8 *subframe_hdr;
2223 	struct sk_buff *first;
2224 	bool is_first, is_last;
2225 	struct ath10k_hw_params *hw = &ar->hw_params;
2226 	struct htt_rx_desc *rxd;
2227 	struct rx_msdu_end_common *rxd_msdu_end_common;
2228 	struct rx_mpdu_start *rxd_mpdu_start;
2229 	struct ieee80211_hdr *hdr;
2230 	size_t hdr_len, crypto_len;
2231 	enum htt_rx_mpdu_encrypt_type enctype;
2232 	int bytes_aligned = ar->hw_params.decap_align_bytes;
2233 
2234 	first = skb_peek(amsdu);
2235 
2236 	rxd = HTT_RX_BUF_TO_RX_DESC(hw,
2237 				    (void *)first->data - hw->rx_desc_ops->rx_desc_size);
2238 
2239 	rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
2240 	rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
2241 	hdr = (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);
2242 
2243 	is_first = !!(rxd_msdu_end_common->info0 &
2244 		      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
2245 	is_last = !!(rxd_msdu_end_common->info0 &
2246 		     __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
2247 
2248 	/* Return in case of non-aggregated msdu */
2249 	if (is_first && is_last)
2250 		return true;
2251 
2252 	/* First msdu flag is not set for the first msdu of the list */
2253 	if (!is_first)
2254 		return false;
2255 
2256 	enctype = MS(__le32_to_cpu(rxd_mpdu_start->info0),
2257 		     RX_MPDU_START_INFO0_ENCRYPT_TYPE);
2258 
2259 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
2260 	crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
2261 
2262 	subframe_hdr = (u8 *)hdr + round_up(hdr_len, bytes_aligned) +
2263 		       crypto_len;
2264 
2265 	/* Validate if the amsdu has a proper first subframe.
2266 	 * There are chances a single msdu can be received as amsdu when
2267 	 * the unauthenticated amsdu flag of a QoS header
2268 	 * gets flipped in non-SPP AMSDU's, in such cases the first
2269 	 * subframe has llc/snap header in place of a valid da.
2270 	 * return false if the da matches rfc1042 pattern
2271 	 */
2272 	if (ether_addr_equal(subframe_hdr, rfc1042_header))
2273 		return false;
2274 
2275 	return true;
2276 }
2277 
ath10k_htt_rx_amsdu_allowed(struct ath10k * ar,struct sk_buff_head * amsdu,struct ieee80211_rx_status * rx_status)2278 static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
2279 					struct sk_buff_head *amsdu,
2280 					struct ieee80211_rx_status *rx_status)
2281 {
2282 	if (!rx_status->freq) {
2283 		ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n");
2284 		return false;
2285 	}
2286 
2287 	if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
2288 		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
2289 		return false;
2290 	}
2291 
2292 	if (!ath10k_htt_rx_validate_amsdu(ar, amsdu)) {
2293 		ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid amsdu received\n");
2294 		return false;
2295 	}
2296 
2297 	return true;
2298 }
2299 
ath10k_htt_rx_h_filter(struct ath10k * ar,struct sk_buff_head * amsdu,struct ieee80211_rx_status * rx_status,unsigned long * drop_cnt)2300 static void ath10k_htt_rx_h_filter(struct ath10k *ar,
2301 				   struct sk_buff_head *amsdu,
2302 				   struct ieee80211_rx_status *rx_status,
2303 				   unsigned long *drop_cnt)
2304 {
2305 	if (skb_queue_empty(amsdu))
2306 		return;
2307 
2308 	if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
2309 		return;
2310 
2311 	if (drop_cnt)
2312 		*drop_cnt += skb_queue_len(amsdu);
2313 
2314 	__skb_queue_purge(amsdu);
2315 }
2316 
ath10k_htt_rx_handle_amsdu(struct ath10k_htt * htt)2317 static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
2318 {
2319 	struct ath10k *ar = htt->ar;
2320 	struct ieee80211_rx_status *rx_status = &htt->rx_status;
2321 	struct sk_buff_head amsdu;
2322 	int ret;
2323 	unsigned long drop_cnt = 0;
2324 	unsigned long unchain_cnt = 0;
2325 	unsigned long drop_cnt_filter = 0;
2326 	unsigned long msdus_to_queue, num_msdus;
2327 	enum ath10k_pkt_rx_err err = ATH10K_PKT_RX_ERR_MAX;
2328 	u8 first_hdr[RX_HTT_HDR_STATUS_LEN];
2329 
2330 	__skb_queue_head_init(&amsdu);
2331 
2332 	spin_lock_bh(&htt->rx_ring.lock);
2333 	if (htt->rx_confused) {
2334 		spin_unlock_bh(&htt->rx_ring.lock);
2335 		return -EIO;
2336 	}
2337 	ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
2338 	spin_unlock_bh(&htt->rx_ring.lock);
2339 
2340 	if (ret < 0) {
2341 		ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
2342 		__skb_queue_purge(&amsdu);
2343 		/* FIXME: It's probably a good idea to reboot the
2344 		 * device instead of leaving it inoperable.
2345 		 */
2346 		htt->rx_confused = true;
2347 		return ret;
2348 	}
2349 
2350 	num_msdus = skb_queue_len(&amsdu);
2351 
2352 	ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
2353 
2354 	/* only for ret = 1 indicates chained msdus */
2355 	if (ret > 0)
2356 		ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt);
2357 
2358 	ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter);
2359 	ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err, 0,
2360 			     false);
2361 	msdus_to_queue = skb_queue_len(&amsdu);
2362 	ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status);
2363 
2364 	ath10k_sta_update_rx_tid_stats(ar, first_hdr, num_msdus, err,
2365 				       unchain_cnt, drop_cnt, drop_cnt_filter,
2366 				       msdus_to_queue);
2367 
2368 	return 0;
2369 }
2370 
ath10k_htt_rx_mpdu_desc_pn_hl(struct htt_hl_rx_desc * rx_desc,union htt_rx_pn_t * pn,int pn_len_bits)2371 static void ath10k_htt_rx_mpdu_desc_pn_hl(struct htt_hl_rx_desc *rx_desc,
2372 					  union htt_rx_pn_t *pn,
2373 					  int pn_len_bits)
2374 {
2375 	switch (pn_len_bits) {
2376 	case 48:
2377 		pn->pn48 = __le32_to_cpu(rx_desc->pn_31_0) +
2378 			   ((u64)(__le32_to_cpu(rx_desc->u0.pn_63_32) & 0xFFFF) << 32);
2379 		break;
2380 	case 24:
2381 		pn->pn24 = __le32_to_cpu(rx_desc->pn_31_0);
2382 		break;
2383 	}
2384 }
2385 
ath10k_htt_rx_pn_cmp48(union htt_rx_pn_t * new_pn,union htt_rx_pn_t * old_pn)2386 static bool ath10k_htt_rx_pn_cmp48(union htt_rx_pn_t *new_pn,
2387 				   union htt_rx_pn_t *old_pn)
2388 {
2389 	return ((new_pn->pn48 & 0xffffffffffffULL) <=
2390 		(old_pn->pn48 & 0xffffffffffffULL));
2391 }
2392 
ath10k_htt_rx_pn_check_replay_hl(struct ath10k * ar,struct ath10k_peer * peer,struct htt_rx_indication_hl * rx)2393 static bool ath10k_htt_rx_pn_check_replay_hl(struct ath10k *ar,
2394 					     struct ath10k_peer *peer,
2395 					     struct htt_rx_indication_hl *rx)
2396 {
2397 	bool last_pn_valid, pn_invalid = false;
2398 	enum htt_txrx_sec_cast_type sec_index;
2399 	enum htt_security_types sec_type;
2400 	union htt_rx_pn_t new_pn = {0};
2401 	struct htt_hl_rx_desc *rx_desc;
2402 	union htt_rx_pn_t *last_pn;
2403 	u32 rx_desc_info, tid;
2404 	int num_mpdu_ranges;
2405 
2406 	lockdep_assert_held(&ar->data_lock);
2407 
2408 	if (!peer)
2409 		return false;
2410 
2411 	if (!(rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU))
2412 		return false;
2413 
2414 	num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
2415 			     HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
2416 
2417 	rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges];
2418 	rx_desc_info = __le32_to_cpu(rx_desc->info);
2419 
2420 	if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED))
2421 		return false;
2422 
2423 	tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
2424 	last_pn_valid = peer->tids_last_pn_valid[tid];
2425 	last_pn = &peer->tids_last_pn[tid];
2426 
2427 	if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST))
2428 		sec_index = HTT_TXRX_SEC_MCAST;
2429 	else
2430 		sec_index = HTT_TXRX_SEC_UCAST;
2431 
2432 	sec_type = peer->rx_pn[sec_index].sec_type;
2433 	ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
2434 
2435 	if (sec_type != HTT_SECURITY_AES_CCMP &&
2436 	    sec_type != HTT_SECURITY_TKIP &&
2437 	    sec_type != HTT_SECURITY_TKIP_NOMIC)
2438 		return false;
2439 
2440 	if (last_pn_valid)
2441 		pn_invalid = ath10k_htt_rx_pn_cmp48(&new_pn, last_pn);
2442 	else
2443 		peer->tids_last_pn_valid[tid] = true;
2444 
2445 	if (!pn_invalid)
2446 		last_pn->pn48 = new_pn.pn48;
2447 
2448 	return pn_invalid;
2449 }
2450 
ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt * htt,struct htt_rx_indication_hl * rx,struct sk_buff * skb,enum htt_rx_pn_check_type check_pn_type,enum htt_rx_tkip_demic_type tkip_mic_type)2451 static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
2452 					 struct htt_rx_indication_hl *rx,
2453 					 struct sk_buff *skb,
2454 					 enum htt_rx_pn_check_type check_pn_type,
2455 					 enum htt_rx_tkip_demic_type tkip_mic_type)
2456 {
2457 	struct ath10k *ar = htt->ar;
2458 	struct ath10k_peer *peer;
2459 	struct htt_rx_indication_mpdu_range *mpdu_ranges;
2460 	struct fw_rx_desc_hl *fw_desc;
2461 	enum htt_txrx_sec_cast_type sec_index;
2462 	enum htt_security_types sec_type;
2463 	union htt_rx_pn_t new_pn = {0};
2464 	struct htt_hl_rx_desc *rx_desc;
2465 	struct ieee80211_hdr *hdr;
2466 	struct ieee80211_rx_status *rx_status;
2467 	u16 peer_id;
2468 	u8 rx_desc_len;
2469 	int num_mpdu_ranges;
2470 	size_t tot_hdr_len;
2471 	struct ieee80211_channel *ch;
2472 	bool pn_invalid, qos, first_msdu;
2473 	u32 tid, rx_desc_info;
2474 
2475 	peer_id = __le16_to_cpu(rx->hdr.peer_id);
2476 	tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
2477 
2478 	spin_lock_bh(&ar->data_lock);
2479 	peer = ath10k_peer_find_by_id(ar, peer_id);
2480 	spin_unlock_bh(&ar->data_lock);
2481 	if (!peer && peer_id != HTT_INVALID_PEERID)
2482 		ath10k_warn(ar, "Got RX ind from invalid peer: %u\n", peer_id);
2483 
2484 	if (!peer)
2485 		return true;
2486 
2487 	num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
2488 			     HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
2489 	mpdu_ranges = htt_rx_ind_get_mpdu_ranges_hl(rx);
2490 	fw_desc = &rx->fw_desc;
2491 	rx_desc_len = fw_desc->len;
2492 
2493 	if (fw_desc->u.bits.discard) {
2494 		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt discard mpdu\n");
2495 		goto err;
2496 	}
2497 
2498 	/* I have not yet seen any case where num_mpdu_ranges > 1.
2499 	 * qcacld does not seem handle that case either, so we introduce the
2500 	 * same limitation here as well.
2501 	 */
2502 	if (num_mpdu_ranges > 1)
2503 		ath10k_warn(ar,
2504 			    "Unsupported number of MPDU ranges: %d, ignoring all but the first\n",
2505 			    num_mpdu_ranges);
2506 
2507 	if (mpdu_ranges->mpdu_range_status !=
2508 	    HTT_RX_IND_MPDU_STATUS_OK &&
2509 	    mpdu_ranges->mpdu_range_status !=
2510 	    HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR) {
2511 		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt mpdu_range_status %d\n",
2512 			   mpdu_ranges->mpdu_range_status);
2513 		goto err;
2514 	}
2515 
2516 	rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges];
2517 	rx_desc_info = __le32_to_cpu(rx_desc->info);
2518 
2519 	if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST))
2520 		sec_index = HTT_TXRX_SEC_MCAST;
2521 	else
2522 		sec_index = HTT_TXRX_SEC_UCAST;
2523 
2524 	sec_type = peer->rx_pn[sec_index].sec_type;
2525 	first_msdu = rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU;
2526 
2527 	ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
2528 
2529 	if (check_pn_type == HTT_RX_PN_CHECK && tid >= IEEE80211_NUM_TIDS) {
2530 		spin_lock_bh(&ar->data_lock);
2531 		pn_invalid = ath10k_htt_rx_pn_check_replay_hl(ar, peer, rx);
2532 		spin_unlock_bh(&ar->data_lock);
2533 
2534 		if (pn_invalid)
2535 			goto err;
2536 	}
2537 
2538 	/* Strip off all headers before the MAC header before delivery to
2539 	 * mac80211
2540 	 */
2541 	tot_hdr_len = sizeof(struct htt_resp_hdr) + sizeof(rx->hdr) +
2542 		      sizeof(rx->ppdu) + sizeof(rx->prefix) +
2543 		      sizeof(rx->fw_desc) +
2544 		      sizeof(*mpdu_ranges) * num_mpdu_ranges + rx_desc_len;
2545 
2546 	skb_pull(skb, tot_hdr_len);
2547 
2548 	hdr = (struct ieee80211_hdr *)skb->data;
2549 	qos = ieee80211_is_data_qos(hdr->frame_control);
2550 
2551 	rx_status = IEEE80211_SKB_RXCB(skb);
2552 	memset(rx_status, 0, sizeof(*rx_status));
2553 
2554 	if (rx->ppdu.combined_rssi == 0) {
2555 		/* SDIO firmware does not provide signal */
2556 		rx_status->signal = 0;
2557 		rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2558 	} else {
2559 		rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
2560 			rx->ppdu.combined_rssi;
2561 		rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
2562 	}
2563 
2564 	spin_lock_bh(&ar->data_lock);
2565 	ch = ar->scan_channel;
2566 	if (!ch)
2567 		ch = ar->rx_channel;
2568 	if (!ch)
2569 		ch = ath10k_htt_rx_h_any_channel(ar);
2570 	if (!ch)
2571 		ch = ar->tgt_oper_chan;
2572 	spin_unlock_bh(&ar->data_lock);
2573 
2574 	if (ch) {
2575 		rx_status->band = ch->band;
2576 		rx_status->freq = ch->center_freq;
2577 	}
2578 	if (rx->fw_desc.flags & FW_RX_DESC_FLAGS_LAST_MSDU)
2579 		rx_status->flag &= ~RX_FLAG_AMSDU_MORE;
2580 	else
2581 		rx_status->flag |= RX_FLAG_AMSDU_MORE;
2582 
2583 	/* Not entirely sure about this, but all frames from the chipset has
2584 	 * the protected flag set even though they have already been decrypted.
2585 	 * Unmasking this flag is necessary in order for mac80211 not to drop
2586 	 * the frame.
2587 	 * TODO: Verify this is always the case or find out a way to check
2588 	 * if there has been hw decryption.
2589 	 */
2590 	if (ieee80211_has_protected(hdr->frame_control)) {
2591 		hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2592 		rx_status->flag |= RX_FLAG_DECRYPTED |
2593 				   RX_FLAG_IV_STRIPPED |
2594 				   RX_FLAG_MMIC_STRIPPED;
2595 
2596 		if (tid < IEEE80211_NUM_TIDS &&
2597 		    first_msdu &&
2598 		    check_pn_type == HTT_RX_PN_CHECK &&
2599 		   (sec_type == HTT_SECURITY_AES_CCMP ||
2600 		    sec_type == HTT_SECURITY_TKIP ||
2601 		    sec_type == HTT_SECURITY_TKIP_NOMIC)) {
2602 			u8 offset, *ivp, i;
2603 			s8 keyidx = 0;
2604 			__le64 pn48 = cpu_to_le64(new_pn.pn48);
2605 
2606 			hdr = (struct ieee80211_hdr *)skb->data;
2607 			offset = ieee80211_hdrlen(hdr->frame_control);
2608 			hdr->frame_control |= __cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2609 			rx_status->flag &= ~RX_FLAG_IV_STRIPPED;
2610 
2611 			memmove(skb->data - IEEE80211_CCMP_HDR_LEN,
2612 				skb->data, offset);
2613 			skb_push(skb, IEEE80211_CCMP_HDR_LEN);
2614 			ivp = skb->data + offset;
2615 			memset(skb->data + offset, 0, IEEE80211_CCMP_HDR_LEN);
2616 			/* Ext IV */
2617 			ivp[IEEE80211_WEP_IV_LEN - 1] |= ATH10K_IEEE80211_EXTIV;
2618 
2619 			for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
2620 				if (peer->keys[i] &&
2621 				    peer->keys[i]->flags & IEEE80211_KEY_FLAG_PAIRWISE)
2622 					keyidx = peer->keys[i]->keyidx;
2623 			}
2624 
2625 			/* Key ID */
2626 			ivp[IEEE80211_WEP_IV_LEN - 1] |= keyidx << 6;
2627 
2628 			if (sec_type == HTT_SECURITY_AES_CCMP) {
2629 				rx_status->flag |= RX_FLAG_MIC_STRIPPED;
2630 				/* pn 0, pn 1 */
2631 				memcpy(skb->data + offset, &pn48, 2);
2632 				/* pn 1, pn 3 , pn 34 , pn 5 */
2633 				memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4);
2634 			} else {
2635 				rx_status->flag |= RX_FLAG_ICV_STRIPPED;
2636 				/* TSC 0 */
2637 				memcpy(skb->data + offset + 2, &pn48, 1);
2638 				/* TSC 1 */
2639 				memcpy(skb->data + offset, ((u8 *)&pn48) + 1, 1);
2640 				/* TSC 2 , TSC 3 , TSC 4 , TSC 5*/
2641 				memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4);
2642 			}
2643 		}
2644 	}
2645 
2646 	if (tkip_mic_type == HTT_RX_TKIP_MIC)
2647 		rx_status->flag &= ~RX_FLAG_IV_STRIPPED &
2648 				   ~RX_FLAG_MMIC_STRIPPED;
2649 
2650 	if (mpdu_ranges->mpdu_range_status == HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR)
2651 		rx_status->flag |= RX_FLAG_MMIC_ERROR;
2652 
2653 	if (!qos && tid < IEEE80211_NUM_TIDS) {
2654 		u8 offset;
2655 		__le16 qos_ctrl = 0;
2656 
2657 		hdr = (struct ieee80211_hdr *)skb->data;
2658 		offset = ieee80211_hdrlen(hdr->frame_control);
2659 
2660 		hdr->frame_control |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
2661 		memmove(skb->data - IEEE80211_QOS_CTL_LEN, skb->data, offset);
2662 		skb_push(skb, IEEE80211_QOS_CTL_LEN);
2663 		qos_ctrl = cpu_to_le16(tid);
2664 		memcpy(skb->data + offset, &qos_ctrl, IEEE80211_QOS_CTL_LEN);
2665 	}
2666 
2667 	if (ar->napi.dev)
2668 		ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
2669 	else
2670 		ieee80211_rx_ni(ar->hw, skb);
2671 
2672 	/* We have delivered the skb to the upper layers (mac80211) so we
2673 	 * must not free it.
2674 	 */
2675 	return false;
2676 err:
2677 	/* Tell the caller that it must free the skb since we have not
2678 	 * consumed it
2679 	 */
2680 	return true;
2681 }
2682 
ath10k_htt_rx_frag_tkip_decap_nomic(struct sk_buff * skb,u16 head_len,u16 hdr_len)2683 static int ath10k_htt_rx_frag_tkip_decap_nomic(struct sk_buff *skb,
2684 					       u16 head_len,
2685 					       u16 hdr_len)
2686 {
2687 	u8 *ivp, *orig_hdr;
2688 
2689 	orig_hdr = skb->data;
2690 	ivp = orig_hdr + hdr_len + head_len;
2691 
2692 	/* the ExtIV bit is always set to 1 for TKIP */
2693 	if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
2694 		return -EINVAL;
2695 
2696 	memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len);
2697 	skb_pull(skb, IEEE80211_TKIP_IV_LEN);
2698 	skb_trim(skb, skb->len - ATH10K_IEEE80211_TKIP_MICLEN);
2699 	return 0;
2700 }
2701 
ath10k_htt_rx_frag_tkip_decap_withmic(struct sk_buff * skb,u16 head_len,u16 hdr_len)2702 static int ath10k_htt_rx_frag_tkip_decap_withmic(struct sk_buff *skb,
2703 						 u16 head_len,
2704 						 u16 hdr_len)
2705 {
2706 	u8 *ivp, *orig_hdr;
2707 
2708 	orig_hdr = skb->data;
2709 	ivp = orig_hdr + hdr_len + head_len;
2710 
2711 	/* the ExtIV bit is always set to 1 for TKIP */
2712 	if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
2713 		return -EINVAL;
2714 
2715 	memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len);
2716 	skb_pull(skb, IEEE80211_TKIP_IV_LEN);
2717 	skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN);
2718 	return 0;
2719 }
2720 
ath10k_htt_rx_frag_ccmp_decap(struct sk_buff * skb,u16 head_len,u16 hdr_len)2721 static int ath10k_htt_rx_frag_ccmp_decap(struct sk_buff *skb,
2722 					 u16 head_len,
2723 					 u16 hdr_len)
2724 {
2725 	u8 *ivp, *orig_hdr;
2726 
2727 	orig_hdr = skb->data;
2728 	ivp = orig_hdr + hdr_len + head_len;
2729 
2730 	/* the ExtIV bit is always set to 1 for CCMP */
2731 	if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
2732 		return -EINVAL;
2733 
2734 	skb_trim(skb, skb->len - IEEE80211_CCMP_MIC_LEN);
2735 	memmove(orig_hdr + IEEE80211_CCMP_HDR_LEN, orig_hdr, head_len + hdr_len);
2736 	skb_pull(skb, IEEE80211_CCMP_HDR_LEN);
2737 	return 0;
2738 }
2739 
ath10k_htt_rx_frag_wep_decap(struct sk_buff * skb,u16 head_len,u16 hdr_len)2740 static int ath10k_htt_rx_frag_wep_decap(struct sk_buff *skb,
2741 					u16 head_len,
2742 					u16 hdr_len)
2743 {
2744 	u8 *orig_hdr;
2745 
2746 	orig_hdr = skb->data;
2747 
2748 	memmove(orig_hdr + IEEE80211_WEP_IV_LEN,
2749 		orig_hdr, head_len + hdr_len);
2750 	skb_pull(skb, IEEE80211_WEP_IV_LEN);
2751 	skb_trim(skb, skb->len - IEEE80211_WEP_ICV_LEN);
2752 	return 0;
2753 }
2754 
ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt * htt,struct htt_rx_fragment_indication * rx,struct sk_buff * skb)2755 static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt *htt,
2756 					      struct htt_rx_fragment_indication *rx,
2757 					      struct sk_buff *skb)
2758 {
2759 	struct ath10k *ar = htt->ar;
2760 	enum htt_rx_tkip_demic_type tkip_mic = HTT_RX_NON_TKIP_MIC;
2761 	enum htt_txrx_sec_cast_type sec_index;
2762 	struct htt_rx_indication_hl *rx_hl;
2763 	enum htt_security_types sec_type;
2764 	u32 tid, frag, seq, rx_desc_info;
2765 	union htt_rx_pn_t new_pn = {0};
2766 	struct htt_hl_rx_desc *rx_desc;
2767 	u16 peer_id, sc, hdr_space;
2768 	union htt_rx_pn_t *last_pn;
2769 	struct ieee80211_hdr *hdr;
2770 	int ret, num_mpdu_ranges;
2771 	struct ath10k_peer *peer;
2772 	struct htt_resp *resp;
2773 	size_t tot_hdr_len;
2774 
2775 	resp = (struct htt_resp *)(skb->data + HTT_RX_FRAG_IND_INFO0_HEADER_LEN);
2776 	skb_pull(skb, HTT_RX_FRAG_IND_INFO0_HEADER_LEN);
2777 	skb_trim(skb, skb->len - FCS_LEN);
2778 
2779 	peer_id = __le16_to_cpu(rx->peer_id);
2780 	rx_hl = (struct htt_rx_indication_hl *)(&resp->rx_ind_hl);
2781 
2782 	spin_lock_bh(&ar->data_lock);
2783 	peer = ath10k_peer_find_by_id(ar, peer_id);
2784 	if (!peer) {
2785 		ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer: %u\n", peer_id);
2786 		goto err;
2787 	}
2788 
2789 	num_mpdu_ranges = MS(__le32_to_cpu(rx_hl->hdr.info1),
2790 			     HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
2791 
2792 	tot_hdr_len = sizeof(struct htt_resp_hdr) +
2793 		      sizeof(rx_hl->hdr) +
2794 		      sizeof(rx_hl->ppdu) +
2795 		      sizeof(rx_hl->prefix) +
2796 		      sizeof(rx_hl->fw_desc) +
2797 		      sizeof(struct htt_rx_indication_mpdu_range) * num_mpdu_ranges;
2798 
2799 	tid =  MS(rx_hl->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
2800 	rx_desc = (struct htt_hl_rx_desc *)(skb->data + tot_hdr_len);
2801 	rx_desc_info = __le32_to_cpu(rx_desc->info);
2802 
2803 	hdr = (struct ieee80211_hdr *)((u8 *)rx_desc + rx_hl->fw_desc.len);
2804 
2805 	if (is_multicast_ether_addr(hdr->addr1)) {
2806 		/* Discard the fragment with multicast DA */
2807 		goto err;
2808 	}
2809 
2810 	if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED)) {
2811 		spin_unlock_bh(&ar->data_lock);
2812 		return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
2813 						    HTT_RX_NON_PN_CHECK,
2814 						    HTT_RX_NON_TKIP_MIC);
2815 	}
2816 
2817 	if (ieee80211_has_retry(hdr->frame_control))
2818 		goto err;
2819 
2820 	hdr_space = ieee80211_hdrlen(hdr->frame_control);
2821 	sc = __le16_to_cpu(hdr->seq_ctrl);
2822 	seq = IEEE80211_SEQ_TO_SN(sc);
2823 	frag = sc & IEEE80211_SCTL_FRAG;
2824 
2825 	sec_index = MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST) ?
2826 		    HTT_TXRX_SEC_MCAST : HTT_TXRX_SEC_UCAST;
2827 	sec_type = peer->rx_pn[sec_index].sec_type;
2828 	ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
2829 
2830 	switch (sec_type) {
2831 	case HTT_SECURITY_TKIP:
2832 		tkip_mic = HTT_RX_TKIP_MIC;
2833 		ret = ath10k_htt_rx_frag_tkip_decap_withmic(skb,
2834 							    tot_hdr_len +
2835 							    rx_hl->fw_desc.len,
2836 							    hdr_space);
2837 		if (ret)
2838 			goto err;
2839 		break;
2840 	case HTT_SECURITY_TKIP_NOMIC:
2841 		ret = ath10k_htt_rx_frag_tkip_decap_nomic(skb,
2842 							  tot_hdr_len +
2843 							  rx_hl->fw_desc.len,
2844 							  hdr_space);
2845 		if (ret)
2846 			goto err;
2847 		break;
2848 	case HTT_SECURITY_AES_CCMP:
2849 		ret = ath10k_htt_rx_frag_ccmp_decap(skb,
2850 						    tot_hdr_len + rx_hl->fw_desc.len,
2851 						    hdr_space);
2852 		if (ret)
2853 			goto err;
2854 		break;
2855 	case HTT_SECURITY_WEP128:
2856 	case HTT_SECURITY_WEP104:
2857 	case HTT_SECURITY_WEP40:
2858 		ret = ath10k_htt_rx_frag_wep_decap(skb,
2859 						   tot_hdr_len + rx_hl->fw_desc.len,
2860 						   hdr_space);
2861 		if (ret)
2862 			goto err;
2863 		break;
2864 	default:
2865 		break;
2866 	}
2867 
2868 	resp = (struct htt_resp *)(skb->data);
2869 
2870 	if (sec_type != HTT_SECURITY_AES_CCMP &&
2871 	    sec_type != HTT_SECURITY_TKIP &&
2872 	    sec_type != HTT_SECURITY_TKIP_NOMIC) {
2873 		spin_unlock_bh(&ar->data_lock);
2874 		return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
2875 						    HTT_RX_NON_PN_CHECK,
2876 						    HTT_RX_NON_TKIP_MIC);
2877 	}
2878 
2879 	last_pn = &peer->frag_tids_last_pn[tid];
2880 
2881 	if (frag == 0) {
2882 		if (ath10k_htt_rx_pn_check_replay_hl(ar, peer, &resp->rx_ind_hl))
2883 			goto err;
2884 
2885 		last_pn->pn48 = new_pn.pn48;
2886 		peer->frag_tids_seq[tid] = seq;
2887 	} else if (sec_type == HTT_SECURITY_AES_CCMP) {
2888 		if (seq != peer->frag_tids_seq[tid])
2889 			goto err;
2890 
2891 		if (new_pn.pn48 != last_pn->pn48 + 1)
2892 			goto err;
2893 
2894 		last_pn->pn48 = new_pn.pn48;
2895 		last_pn = &peer->tids_last_pn[tid];
2896 		last_pn->pn48 = new_pn.pn48;
2897 	}
2898 
2899 	spin_unlock_bh(&ar->data_lock);
2900 
2901 	return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
2902 					    HTT_RX_NON_PN_CHECK, tkip_mic);
2903 
2904 err:
2905 	spin_unlock_bh(&ar->data_lock);
2906 
2907 	/* Tell the caller that it must free the skb since we have not
2908 	 * consumed it
2909 	 */
2910 	return true;
2911 }
2912 
ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt * htt,struct htt_rx_indication * rx)2913 static void ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt *htt,
2914 					 struct htt_rx_indication *rx)
2915 {
2916 	struct ath10k *ar = htt->ar;
2917 	struct htt_rx_indication_mpdu_range *mpdu_ranges;
2918 	int num_mpdu_ranges;
2919 	int i, mpdu_count = 0;
2920 	u16 peer_id;
2921 	u8 tid;
2922 
2923 	num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
2924 			     HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
2925 	peer_id = __le16_to_cpu(rx->hdr.peer_id);
2926 	tid =  MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
2927 
2928 	mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
2929 
2930 	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
2931 			rx, struct_size(rx, mpdu_ranges, num_mpdu_ranges));
2932 
2933 	for (i = 0; i < num_mpdu_ranges; i++)
2934 		mpdu_count += mpdu_ranges[i].mpdu_count;
2935 
2936 	atomic_add(mpdu_count, &htt->num_mpdus_ready);
2937 
2938 	ath10k_sta_update_rx_tid_stats_ampdu(ar, peer_id, tid, mpdu_ranges,
2939 					     num_mpdu_ranges);
2940 }
2941 
ath10k_htt_rx_tx_compl_ind(struct ath10k * ar,struct sk_buff * skb)2942 static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
2943 				       struct sk_buff *skb)
2944 {
2945 	struct ath10k_htt *htt = &ar->htt;
2946 	struct htt_resp *resp = (struct htt_resp *)skb->data;
2947 	struct htt_tx_done tx_done = {};
2948 	int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
2949 	__le16 msdu_id, *msdus;
2950 	bool rssi_enabled = false;
2951 	u8 msdu_count = 0, num_airtime_records, tid;
2952 	int i, htt_pad = 0;
2953 	struct htt_data_tx_compl_ppdu_dur *ppdu_info;
2954 	struct ath10k_peer *peer;
2955 	u16 ppdu_info_offset = 0, peer_id;
2956 	u32 tx_duration;
2957 
2958 	switch (status) {
2959 	case HTT_DATA_TX_STATUS_NO_ACK:
2960 		tx_done.status = HTT_TX_COMPL_STATE_NOACK;
2961 		break;
2962 	case HTT_DATA_TX_STATUS_OK:
2963 		tx_done.status = HTT_TX_COMPL_STATE_ACK;
2964 		break;
2965 	case HTT_DATA_TX_STATUS_DISCARD:
2966 	case HTT_DATA_TX_STATUS_POSTPONE:
2967 	case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
2968 		tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
2969 		break;
2970 	default:
2971 		ath10k_warn(ar, "unhandled tx completion status %d\n", status);
2972 		tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
2973 		break;
2974 	}
2975 
2976 	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
2977 		   resp->data_tx_completion.num_msdus);
2978 
2979 	msdu_count = resp->data_tx_completion.num_msdus;
2980 	msdus = resp->data_tx_completion.msdus;
2981 	rssi_enabled = ath10k_is_rssi_enable(&ar->hw_params, resp);
2982 
2983 	if (rssi_enabled)
2984 		htt_pad = ath10k_tx_data_rssi_get_pad_bytes(&ar->hw_params,
2985 							    resp);
2986 
2987 	for (i = 0; i < msdu_count; i++) {
2988 		msdu_id = msdus[i];
2989 		tx_done.msdu_id = __le16_to_cpu(msdu_id);
2990 
2991 		if (rssi_enabled) {
2992 			/* Total no of MSDUs should be even,
2993 			 * if odd MSDUs are sent firmware fills
2994 			 * last msdu id with 0xffff
2995 			 */
2996 			if (msdu_count & 0x01) {
2997 				msdu_id = msdus[msdu_count +  i + 1 + htt_pad];
2998 				tx_done.ack_rssi = __le16_to_cpu(msdu_id);
2999 			} else {
3000 				msdu_id = msdus[msdu_count +  i + htt_pad];
3001 				tx_done.ack_rssi = __le16_to_cpu(msdu_id);
3002 			}
3003 		}
3004 
3005 		/* kfifo_put: In practice firmware shouldn't fire off per-CE
3006 		 * interrupt and main interrupt (MSI/-X range case) for the same
3007 		 * HTC service so it should be safe to use kfifo_put w/o lock.
3008 		 *
3009 		 * From kfifo_put() documentation:
3010 		 *  Note that with only one concurrent reader and one concurrent
3011 		 *  writer, you don't need extra locking to use these macro.
3012 		 */
3013 		if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) {
3014 			ath10k_txrx_tx_unref(htt, &tx_done);
3015 		} else if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
3016 			ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
3017 				    tx_done.msdu_id, tx_done.status);
3018 			ath10k_txrx_tx_unref(htt, &tx_done);
3019 		}
3020 	}
3021 
3022 	if (!(resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_PPDU_DURATION_PRESENT))
3023 		return;
3024 
3025 	ppdu_info_offset = (msdu_count & 0x01) ? msdu_count + 1 : msdu_count;
3026 
3027 	if (rssi_enabled)
3028 		ppdu_info_offset += ppdu_info_offset;
3029 
3030 	if (resp->data_tx_completion.flags2 &
3031 	    (HTT_TX_CMPL_FLAG_PPID_PRESENT | HTT_TX_CMPL_FLAG_PA_PRESENT))
3032 		ppdu_info_offset += 2;
3033 
3034 	ppdu_info = (struct htt_data_tx_compl_ppdu_dur *)&msdus[ppdu_info_offset];
3035 	num_airtime_records = FIELD_GET(HTT_TX_COMPL_PPDU_DUR_INFO0_NUM_ENTRIES_MASK,
3036 					__le32_to_cpu(ppdu_info->info0));
3037 
3038 	for (i = 0; i < num_airtime_records; i++) {
3039 		struct htt_data_tx_ppdu_dur *ppdu_dur;
3040 		u32 info0;
3041 
3042 		ppdu_dur = &ppdu_info->ppdu_dur[i];
3043 		info0 = __le32_to_cpu(ppdu_dur->info0);
3044 
3045 		peer_id = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_PEER_ID_MASK,
3046 				    info0);
3047 		rcu_read_lock();
3048 		spin_lock_bh(&ar->data_lock);
3049 
3050 		peer = ath10k_peer_find_by_id(ar, peer_id);
3051 		if (!peer || !peer->sta) {
3052 			spin_unlock_bh(&ar->data_lock);
3053 			rcu_read_unlock();
3054 			continue;
3055 		}
3056 
3057 		tid = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_TID_MASK, info0) &
3058 						IEEE80211_QOS_CTL_TID_MASK;
3059 		tx_duration = __le32_to_cpu(ppdu_dur->tx_duration);
3060 
3061 		ieee80211_sta_register_airtime(peer->sta, tid, tx_duration, 0);
3062 
3063 		spin_unlock_bh(&ar->data_lock);
3064 		rcu_read_unlock();
3065 	}
3066 }
3067 
ath10k_htt_rx_addba(struct ath10k * ar,struct htt_resp * resp)3068 static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
3069 {
3070 	struct htt_rx_addba *ev = &resp->rx_addba;
3071 	struct ath10k_peer *peer;
3072 	struct ath10k_vif *arvif;
3073 	u16 info0, tid, peer_id;
3074 
3075 	info0 = __le16_to_cpu(ev->info0);
3076 	tid = MS(info0, HTT_RX_BA_INFO0_TID);
3077 	peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
3078 
3079 	ath10k_dbg(ar, ATH10K_DBG_HTT,
3080 		   "htt rx addba tid %u peer_id %u size %u\n",
3081 		   tid, peer_id, ev->window_size);
3082 
3083 	spin_lock_bh(&ar->data_lock);
3084 	peer = ath10k_peer_find_by_id(ar, peer_id);
3085 	if (!peer) {
3086 		ath10k_warn(ar, "received addba event for invalid peer_id: %u\n",
3087 			    peer_id);
3088 		spin_unlock_bh(&ar->data_lock);
3089 		return;
3090 	}
3091 
3092 	arvif = ath10k_get_arvif(ar, peer->vdev_id);
3093 	if (!arvif) {
3094 		ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
3095 			    peer->vdev_id);
3096 		spin_unlock_bh(&ar->data_lock);
3097 		return;
3098 	}
3099 
3100 	ath10k_dbg(ar, ATH10K_DBG_HTT,
3101 		   "htt rx start rx ba session sta %pM tid %u size %u\n",
3102 		   peer->addr, tid, ev->window_size);
3103 
3104 	ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
3105 	spin_unlock_bh(&ar->data_lock);
3106 }
3107 
ath10k_htt_rx_delba(struct ath10k * ar,struct htt_resp * resp)3108 static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
3109 {
3110 	struct htt_rx_delba *ev = &resp->rx_delba;
3111 	struct ath10k_peer *peer;
3112 	struct ath10k_vif *arvif;
3113 	u16 info0, tid, peer_id;
3114 
3115 	info0 = __le16_to_cpu(ev->info0);
3116 	tid = MS(info0, HTT_RX_BA_INFO0_TID);
3117 	peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
3118 
3119 	ath10k_dbg(ar, ATH10K_DBG_HTT,
3120 		   "htt rx delba tid %u peer_id %u\n",
3121 		   tid, peer_id);
3122 
3123 	spin_lock_bh(&ar->data_lock);
3124 	peer = ath10k_peer_find_by_id(ar, peer_id);
3125 	if (!peer) {
3126 		ath10k_warn(ar, "received addba event for invalid peer_id: %u\n",
3127 			    peer_id);
3128 		spin_unlock_bh(&ar->data_lock);
3129 		return;
3130 	}
3131 
3132 	arvif = ath10k_get_arvif(ar, peer->vdev_id);
3133 	if (!arvif) {
3134 		ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
3135 			    peer->vdev_id);
3136 		spin_unlock_bh(&ar->data_lock);
3137 		return;
3138 	}
3139 
3140 	ath10k_dbg(ar, ATH10K_DBG_HTT,
3141 		   "htt rx stop rx ba session sta %pM tid %u\n",
3142 		   peer->addr, tid);
3143 
3144 	ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
3145 	spin_unlock_bh(&ar->data_lock);
3146 }
3147 
ath10k_htt_rx_extract_amsdu(struct ath10k_hw_params * hw,struct sk_buff_head * list,struct sk_buff_head * amsdu)3148 static int ath10k_htt_rx_extract_amsdu(struct ath10k_hw_params *hw,
3149 				       struct sk_buff_head *list,
3150 				       struct sk_buff_head *amsdu)
3151 {
3152 	struct sk_buff *msdu;
3153 	struct htt_rx_desc *rxd;
3154 	struct rx_msdu_end_common *rxd_msdu_end_common;
3155 
3156 	if (skb_queue_empty(list))
3157 		return -ENOBUFS;
3158 
3159 	if (WARN_ON(!skb_queue_empty(amsdu)))
3160 		return -EINVAL;
3161 
3162 	while ((msdu = __skb_dequeue(list))) {
3163 		__skb_queue_tail(amsdu, msdu);
3164 
3165 		rxd = HTT_RX_BUF_TO_RX_DESC(hw,
3166 					    (void *)msdu->data -
3167 					    hw->rx_desc_ops->rx_desc_size);
3168 
3169 		rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
3170 		if (rxd_msdu_end_common->info0 &
3171 		    __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
3172 			break;
3173 	}
3174 
3175 	msdu = skb_peek_tail(amsdu);
3176 	rxd = HTT_RX_BUF_TO_RX_DESC(hw,
3177 				    (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
3178 
3179 	rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
3180 	if (!(rxd_msdu_end_common->info0 &
3181 	      __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
3182 		skb_queue_splice_init(amsdu, list);
3183 		return -EAGAIN;
3184 	}
3185 
3186 	return 0;
3187 }
3188 
ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status * status,struct sk_buff * skb)3189 static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
3190 					    struct sk_buff *skb)
3191 {
3192 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
3193 
3194 	if (!ieee80211_has_protected(hdr->frame_control))
3195 		return;
3196 
3197 	/* Offloaded frames are already decrypted but firmware insists they are
3198 	 * protected in the 802.11 header. Strip the flag.  Otherwise mac80211
3199 	 * will drop the frame.
3200 	 */
3201 
3202 	hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
3203 	status->flag |= RX_FLAG_DECRYPTED |
3204 			RX_FLAG_IV_STRIPPED |
3205 			RX_FLAG_MMIC_STRIPPED;
3206 }
3207 
ath10k_htt_rx_h_rx_offload(struct ath10k * ar,struct sk_buff_head * list)3208 static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
3209 				       struct sk_buff_head *list)
3210 {
3211 	struct ath10k_htt *htt = &ar->htt;
3212 	struct ieee80211_rx_status *status = &htt->rx_status;
3213 	struct htt_rx_offload_msdu *rx;
3214 	struct sk_buff *msdu;
3215 	size_t offset;
3216 
3217 	while ((msdu = __skb_dequeue(list))) {
3218 		/* Offloaded frames don't have Rx descriptor. Instead they have
3219 		 * a short meta information header.
3220 		 */
3221 
3222 		rx = (void *)msdu->data;
3223 
3224 		skb_put(msdu, sizeof(*rx));
3225 		skb_pull(msdu, sizeof(*rx));
3226 
3227 		if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
3228 			ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
3229 			dev_kfree_skb_any(msdu);
3230 			continue;
3231 		}
3232 
3233 		skb_put(msdu, __le16_to_cpu(rx->msdu_len));
3234 
3235 		/* Offloaded rx header length isn't multiple of 2 nor 4 so the
3236 		 * actual payload is unaligned. Align the frame.  Otherwise
3237 		 * mac80211 complains.  This shouldn't reduce performance much
3238 		 * because these offloaded frames are rare.
3239 		 */
3240 		offset = 4 - ((unsigned long)msdu->data & 3);
3241 		skb_put(msdu, offset);
3242 		memmove(msdu->data + offset, msdu->data, msdu->len);
3243 		skb_pull(msdu, offset);
3244 
3245 		/* FIXME: The frame is NWifi. Re-construct QoS Control
3246 		 * if possible later.
3247 		 */
3248 
3249 		memset(status, 0, sizeof(*status));
3250 		status->flag |= RX_FLAG_NO_SIGNAL_VAL;
3251 
3252 		ath10k_htt_rx_h_rx_offload_prot(status, msdu);
3253 		ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
3254 		ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
3255 	}
3256 }
3257 
ath10k_htt_rx_in_ord_ind(struct ath10k * ar,struct sk_buff * skb)3258 static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
3259 {
3260 	struct ath10k_htt *htt = &ar->htt;
3261 	struct htt_resp *resp = (void *)skb->data;
3262 	struct ieee80211_rx_status *status = &htt->rx_status;
3263 	struct sk_buff_head list;
3264 	struct sk_buff_head amsdu;
3265 	u16 peer_id;
3266 	u16 msdu_count;
3267 	u8 vdev_id;
3268 	u8 tid;
3269 	bool offload;
3270 	bool frag;
3271 	int ret;
3272 
3273 	lockdep_assert_held(&htt->rx_ring.lock);
3274 
3275 	if (htt->rx_confused)
3276 		return -EIO;
3277 
3278 	skb_pull(skb, sizeof(resp->hdr));
3279 	skb_pull(skb, sizeof(resp->rx_in_ord_ind));
3280 
3281 	peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
3282 	msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
3283 	vdev_id = resp->rx_in_ord_ind.vdev_id;
3284 	tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
3285 	offload = !!(resp->rx_in_ord_ind.info &
3286 			HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
3287 	frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
3288 
3289 	ath10k_dbg(ar, ATH10K_DBG_HTT,
3290 		   "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
3291 		   vdev_id, peer_id, tid, offload, frag, msdu_count);
3292 
3293 	if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) {
3294 		ath10k_warn(ar, "dropping invalid in order rx indication\n");
3295 		return -EINVAL;
3296 	}
3297 
3298 	/* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
3299 	 * extracted and processed.
3300 	 */
3301 	__skb_queue_head_init(&list);
3302 	if (ar->hw_params.target_64bit)
3303 		ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind,
3304 						     &list);
3305 	else
3306 		ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind,
3307 						     &list);
3308 
3309 	if (ret < 0) {
3310 		ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
3311 		htt->rx_confused = true;
3312 		return -EIO;
3313 	}
3314 
3315 	/* Offloaded frames are very different and need to be handled
3316 	 * separately.
3317 	 */
3318 	if (offload)
3319 		ath10k_htt_rx_h_rx_offload(ar, &list);
3320 
3321 	while (!skb_queue_empty(&list)) {
3322 		__skb_queue_head_init(&amsdu);
3323 		ret = ath10k_htt_rx_extract_amsdu(&ar->hw_params, &list, &amsdu);
3324 		switch (ret) {
3325 		case 0:
3326 			/* Note: The in-order indication may report interleaved
3327 			 * frames from different PPDUs meaning reported rx rate
3328 			 * to mac80211 isn't accurate/reliable. It's still
3329 			 * better to report something than nothing though. This
3330 			 * should still give an idea about rx rate to the user.
3331 			 */
3332 			ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
3333 			ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL);
3334 			ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL,
3335 					     NULL, peer_id, frag);
3336 			ath10k_htt_rx_h_enqueue(ar, &amsdu, status);
3337 			break;
3338 		case -EAGAIN:
3339 			fallthrough;
3340 		default:
3341 			/* Should not happen. */
3342 			ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
3343 			htt->rx_confused = true;
3344 			__skb_queue_purge(&list);
3345 			return -EIO;
3346 		}
3347 	}
3348 	return ret;
3349 }
3350 
ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k * ar,const __le32 * resp_ids,int num_resp_ids)3351 static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
3352 						   const __le32 *resp_ids,
3353 						   int num_resp_ids)
3354 {
3355 	int i;
3356 	u32 resp_id;
3357 
3358 	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
3359 		   num_resp_ids);
3360 
3361 	for (i = 0; i < num_resp_ids; i++) {
3362 		resp_id = le32_to_cpu(resp_ids[i]);
3363 
3364 		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
3365 			   resp_id);
3366 
3367 		/* TODO: free resp_id */
3368 	}
3369 }
3370 
ath10k_htt_rx_tx_fetch_ind(struct ath10k * ar,struct sk_buff * skb)3371 static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
3372 {
3373 	struct ieee80211_hw *hw = ar->hw;
3374 	struct ieee80211_txq *txq;
3375 	struct htt_resp *resp = (struct htt_resp *)skb->data;
3376 	struct htt_tx_fetch_record *record;
3377 	size_t len;
3378 	size_t max_num_bytes;
3379 	size_t max_num_msdus;
3380 	size_t num_bytes;
3381 	size_t num_msdus;
3382 	const __le32 *resp_ids;
3383 	u16 num_records;
3384 	u16 num_resp_ids;
3385 	u16 peer_id;
3386 	u8 tid;
3387 	int ret;
3388 	int i;
3389 	bool may_tx;
3390 
3391 	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
3392 
3393 	len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
3394 	if (unlikely(skb->len < len)) {
3395 		ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
3396 		return;
3397 	}
3398 
3399 	num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
3400 	num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
3401 
3402 	len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
3403 	len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
3404 
3405 	if (unlikely(skb->len < len)) {
3406 		ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
3407 		return;
3408 	}
3409 
3410 	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %u num resps %u seq %u\n",
3411 		   num_records, num_resp_ids,
3412 		   le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
3413 
3414 	if (!ar->htt.tx_q_state.enabled) {
3415 		ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
3416 		return;
3417 	}
3418 
3419 	if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
3420 		ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
3421 		return;
3422 	}
3423 
3424 	rcu_read_lock();
3425 
3426 	for (i = 0; i < num_records; i++) {
3427 		record = &resp->tx_fetch_ind.records[i];
3428 		peer_id = MS(le16_to_cpu(record->info),
3429 			     HTT_TX_FETCH_RECORD_INFO_PEER_ID);
3430 		tid = MS(le16_to_cpu(record->info),
3431 			 HTT_TX_FETCH_RECORD_INFO_TID);
3432 		max_num_msdus = le16_to_cpu(record->num_msdus);
3433 		max_num_bytes = le32_to_cpu(record->num_bytes);
3434 
3435 		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %u tid %u msdus %zu bytes %zu\n",
3436 			   i, peer_id, tid, max_num_msdus, max_num_bytes);
3437 
3438 		if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
3439 		    unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
3440 			ath10k_warn(ar, "received out of range peer_id %u tid %u\n",
3441 				    peer_id, tid);
3442 			continue;
3443 		}
3444 
3445 		spin_lock_bh(&ar->data_lock);
3446 		txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
3447 		spin_unlock_bh(&ar->data_lock);
3448 
3449 		/* It is okay to release the lock and use txq because RCU read
3450 		 * lock is held.
3451 		 */
3452 
3453 		if (unlikely(!txq)) {
3454 			ath10k_warn(ar, "failed to lookup txq for peer_id %u tid %u\n",
3455 				    peer_id, tid);
3456 			continue;
3457 		}
3458 
3459 		num_msdus = 0;
3460 		num_bytes = 0;
3461 
3462 		ieee80211_txq_schedule_start(hw, txq->ac);
3463 		may_tx = ieee80211_txq_may_transmit(hw, txq);
3464 		while (num_msdus < max_num_msdus &&
3465 		       num_bytes < max_num_bytes) {
3466 			if (!may_tx)
3467 				break;
3468 
3469 			ret = ath10k_mac_tx_push_txq(hw, txq);
3470 			if (ret < 0)
3471 				break;
3472 
3473 			num_msdus++;
3474 			num_bytes += ret;
3475 		}
3476 		ieee80211_return_txq(hw, txq, false);
3477 		ieee80211_txq_schedule_end(hw, txq->ac);
3478 
3479 		record->num_msdus = cpu_to_le16(num_msdus);
3480 		record->num_bytes = cpu_to_le32(num_bytes);
3481 
3482 		ath10k_htt_tx_txq_recalc(hw, txq);
3483 	}
3484 
3485 	rcu_read_unlock();
3486 
3487 	resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
3488 	ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
3489 
3490 	ret = ath10k_htt_tx_fetch_resp(ar,
3491 				       resp->tx_fetch_ind.token,
3492 				       resp->tx_fetch_ind.fetch_seq_num,
3493 				       resp->tx_fetch_ind.records,
3494 				       num_records);
3495 	if (unlikely(ret)) {
3496 		ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
3497 			    le32_to_cpu(resp->tx_fetch_ind.token), ret);
3498 		/* FIXME: request fw restart */
3499 	}
3500 
3501 	ath10k_htt_tx_txq_sync(ar);
3502 }
3503 
ath10k_htt_rx_tx_fetch_confirm(struct ath10k * ar,struct sk_buff * skb)3504 static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
3505 					   struct sk_buff *skb)
3506 {
3507 	const struct htt_resp *resp = (void *)skb->data;
3508 	size_t len;
3509 	int num_resp_ids;
3510 
3511 	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
3512 
3513 	len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
3514 	if (unlikely(skb->len < len)) {
3515 		ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
3516 		return;
3517 	}
3518 
3519 	num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
3520 	len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
3521 
3522 	if (unlikely(skb->len < len)) {
3523 		ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
3524 		return;
3525 	}
3526 
3527 	ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
3528 					       resp->tx_fetch_confirm.resp_ids,
3529 					       num_resp_ids);
3530 }
3531 
ath10k_htt_rx_tx_mode_switch_ind(struct ath10k * ar,struct sk_buff * skb)3532 static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
3533 					     struct sk_buff *skb)
3534 {
3535 	const struct htt_resp *resp = (void *)skb->data;
3536 	const struct htt_tx_mode_switch_record *record;
3537 	struct ieee80211_txq *txq;
3538 	struct ath10k_txq *artxq;
3539 	size_t len;
3540 	size_t num_records;
3541 	enum htt_tx_mode_switch_mode mode;
3542 	bool enable;
3543 	u16 info0;
3544 	u16 info1;
3545 	u16 threshold;
3546 	u16 peer_id;
3547 	u8 tid;
3548 	int i;
3549 
3550 	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
3551 
3552 	len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
3553 	if (unlikely(skb->len < len)) {
3554 		ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
3555 		return;
3556 	}
3557 
3558 	info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
3559 	info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
3560 
3561 	enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
3562 	num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
3563 	mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
3564 	threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
3565 
3566 	ath10k_dbg(ar, ATH10K_DBG_HTT,
3567 		   "htt rx tx mode switch ind info0 0x%04x info1 0x%04x enable %d num records %zd mode %d threshold %u\n",
3568 		   info0, info1, enable, num_records, mode, threshold);
3569 
3570 	len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
3571 
3572 	if (unlikely(skb->len < len)) {
3573 		ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
3574 		return;
3575 	}
3576 
3577 	switch (mode) {
3578 	case HTT_TX_MODE_SWITCH_PUSH:
3579 	case HTT_TX_MODE_SWITCH_PUSH_PULL:
3580 		break;
3581 	default:
3582 		ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
3583 			    mode);
3584 		return;
3585 	}
3586 
3587 	if (!enable)
3588 		return;
3589 
3590 	ar->htt.tx_q_state.enabled = enable;
3591 	ar->htt.tx_q_state.mode = mode;
3592 	ar->htt.tx_q_state.num_push_allowed = threshold;
3593 
3594 	rcu_read_lock();
3595 
3596 	for (i = 0; i < num_records; i++) {
3597 		record = &resp->tx_mode_switch_ind.records[i];
3598 		info0 = le16_to_cpu(record->info0);
3599 		peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
3600 		tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
3601 
3602 		if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
3603 		    unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
3604 			ath10k_warn(ar, "received out of range peer_id %u tid %u\n",
3605 				    peer_id, tid);
3606 			continue;
3607 		}
3608 
3609 		spin_lock_bh(&ar->data_lock);
3610 		txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
3611 		spin_unlock_bh(&ar->data_lock);
3612 
3613 		/* It is okay to release the lock and use txq because RCU read
3614 		 * lock is held.
3615 		 */
3616 
3617 		if (unlikely(!txq)) {
3618 			ath10k_warn(ar, "failed to lookup txq for peer_id %u tid %u\n",
3619 				    peer_id, tid);
3620 			continue;
3621 		}
3622 
3623 		spin_lock_bh(&ar->htt.tx_lock);
3624 		artxq = (void *)txq->drv_priv;
3625 		artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
3626 		spin_unlock_bh(&ar->htt.tx_lock);
3627 	}
3628 
3629 	rcu_read_unlock();
3630 
3631 	ath10k_mac_tx_push_pending(ar);
3632 }
3633 
ath10k_htt_htc_t2h_msg_handler(struct ath10k * ar,struct sk_buff * skb)3634 void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
3635 {
3636 	bool release;
3637 
3638 	release = ath10k_htt_t2h_msg_handler(ar, skb);
3639 
3640 	/* Free the indication buffer */
3641 	if (release)
3642 		dev_kfree_skb_any(skb);
3643 }
3644 
ath10k_get_legacy_rate_idx(struct ath10k * ar,u8 rate)3645 static inline s8 ath10k_get_legacy_rate_idx(struct ath10k *ar, u8 rate)
3646 {
3647 	static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12,
3648 					  18, 24, 36, 48, 54};
3649 	int i;
3650 
3651 	for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) {
3652 		if (rate == legacy_rates[i])
3653 			return i;
3654 	}
3655 
3656 	ath10k_warn(ar, "Invalid legacy rate %d peer stats", rate);
3657 	return -EINVAL;
3658 }
3659 
3660 static void
ath10k_accumulate_per_peer_tx_stats(struct ath10k * ar,struct ath10k_sta * arsta,struct ath10k_per_peer_tx_stats * pstats,s8 legacy_rate_idx)3661 ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
3662 				    struct ath10k_sta *arsta,
3663 				    struct ath10k_per_peer_tx_stats *pstats,
3664 				    s8 legacy_rate_idx)
3665 {
3666 	struct rate_info *txrate = &arsta->txrate;
3667 	struct ath10k_htt_tx_stats *tx_stats;
3668 	int idx, ht_idx, gi, mcs, bw, nss;
3669 	unsigned long flags;
3670 
3671 	if (!arsta->tx_stats)
3672 		return;
3673 
3674 	tx_stats = arsta->tx_stats;
3675 	flags = txrate->flags;
3676 	gi = test_bit(ATH10K_RATE_INFO_FLAGS_SGI_BIT, &flags);
3677 	mcs = ATH10K_HW_MCS_RATE(pstats->ratecode);
3678 	bw = txrate->bw;
3679 	nss = txrate->nss;
3680 	ht_idx = mcs + (nss - 1) * 8;
3681 	idx = mcs * 8 + 8 * 10 * (nss - 1);
3682 	idx += bw * 2 + gi;
3683 
3684 #define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name]
3685 
3686 	if (txrate->flags & RATE_INFO_FLAGS_VHT_MCS) {
3687 		STATS_OP_FMT(SUCC).vht[0][mcs] += pstats->succ_bytes;
3688 		STATS_OP_FMT(SUCC).vht[1][mcs] += pstats->succ_pkts;
3689 		STATS_OP_FMT(FAIL).vht[0][mcs] += pstats->failed_bytes;
3690 		STATS_OP_FMT(FAIL).vht[1][mcs] += pstats->failed_pkts;
3691 		STATS_OP_FMT(RETRY).vht[0][mcs] += pstats->retry_bytes;
3692 		STATS_OP_FMT(RETRY).vht[1][mcs] += pstats->retry_pkts;
3693 	} else if (txrate->flags & RATE_INFO_FLAGS_MCS) {
3694 		STATS_OP_FMT(SUCC).ht[0][ht_idx] += pstats->succ_bytes;
3695 		STATS_OP_FMT(SUCC).ht[1][ht_idx] += pstats->succ_pkts;
3696 		STATS_OP_FMT(FAIL).ht[0][ht_idx] += pstats->failed_bytes;
3697 		STATS_OP_FMT(FAIL).ht[1][ht_idx] += pstats->failed_pkts;
3698 		STATS_OP_FMT(RETRY).ht[0][ht_idx] += pstats->retry_bytes;
3699 		STATS_OP_FMT(RETRY).ht[1][ht_idx] += pstats->retry_pkts;
3700 	} else {
3701 		mcs = legacy_rate_idx;
3702 
3703 		STATS_OP_FMT(SUCC).legacy[0][mcs] += pstats->succ_bytes;
3704 		STATS_OP_FMT(SUCC).legacy[1][mcs] += pstats->succ_pkts;
3705 		STATS_OP_FMT(FAIL).legacy[0][mcs] += pstats->failed_bytes;
3706 		STATS_OP_FMT(FAIL).legacy[1][mcs] += pstats->failed_pkts;
3707 		STATS_OP_FMT(RETRY).legacy[0][mcs] += pstats->retry_bytes;
3708 		STATS_OP_FMT(RETRY).legacy[1][mcs] += pstats->retry_pkts;
3709 	}
3710 
3711 	if (ATH10K_HW_AMPDU(pstats->flags)) {
3712 		tx_stats->ba_fails += ATH10K_HW_BA_FAIL(pstats->flags);
3713 
3714 		if (txrate->flags & RATE_INFO_FLAGS_MCS) {
3715 			STATS_OP_FMT(AMPDU).ht[0][ht_idx] +=
3716 				pstats->succ_bytes + pstats->retry_bytes;
3717 			STATS_OP_FMT(AMPDU).ht[1][ht_idx] +=
3718 				pstats->succ_pkts + pstats->retry_pkts;
3719 		} else {
3720 			STATS_OP_FMT(AMPDU).vht[0][mcs] +=
3721 				pstats->succ_bytes + pstats->retry_bytes;
3722 			STATS_OP_FMT(AMPDU).vht[1][mcs] +=
3723 				pstats->succ_pkts + pstats->retry_pkts;
3724 		}
3725 		STATS_OP_FMT(AMPDU).bw[0][bw] +=
3726 			pstats->succ_bytes + pstats->retry_bytes;
3727 		STATS_OP_FMT(AMPDU).nss[0][nss - 1] +=
3728 			pstats->succ_bytes + pstats->retry_bytes;
3729 		STATS_OP_FMT(AMPDU).gi[0][gi] +=
3730 			pstats->succ_bytes + pstats->retry_bytes;
3731 		STATS_OP_FMT(AMPDU).rate_table[0][idx] +=
3732 			pstats->succ_bytes + pstats->retry_bytes;
3733 		STATS_OP_FMT(AMPDU).bw[1][bw] +=
3734 			pstats->succ_pkts + pstats->retry_pkts;
3735 		STATS_OP_FMT(AMPDU).nss[1][nss - 1] +=
3736 			pstats->succ_pkts + pstats->retry_pkts;
3737 		STATS_OP_FMT(AMPDU).gi[1][gi] +=
3738 			pstats->succ_pkts + pstats->retry_pkts;
3739 		STATS_OP_FMT(AMPDU).rate_table[1][idx] +=
3740 			pstats->succ_pkts + pstats->retry_pkts;
3741 	} else {
3742 		tx_stats->ack_fails +=
3743 				ATH10K_HW_BA_FAIL(pstats->flags);
3744 	}
3745 
3746 	STATS_OP_FMT(SUCC).bw[0][bw] += pstats->succ_bytes;
3747 	STATS_OP_FMT(SUCC).nss[0][nss - 1] += pstats->succ_bytes;
3748 	STATS_OP_FMT(SUCC).gi[0][gi] += pstats->succ_bytes;
3749 
3750 	STATS_OP_FMT(SUCC).bw[1][bw] += pstats->succ_pkts;
3751 	STATS_OP_FMT(SUCC).nss[1][nss - 1] += pstats->succ_pkts;
3752 	STATS_OP_FMT(SUCC).gi[1][gi] += pstats->succ_pkts;
3753 
3754 	STATS_OP_FMT(FAIL).bw[0][bw] += pstats->failed_bytes;
3755 	STATS_OP_FMT(FAIL).nss[0][nss - 1] += pstats->failed_bytes;
3756 	STATS_OP_FMT(FAIL).gi[0][gi] += pstats->failed_bytes;
3757 
3758 	STATS_OP_FMT(FAIL).bw[1][bw] += pstats->failed_pkts;
3759 	STATS_OP_FMT(FAIL).nss[1][nss - 1] += pstats->failed_pkts;
3760 	STATS_OP_FMT(FAIL).gi[1][gi] += pstats->failed_pkts;
3761 
3762 	STATS_OP_FMT(RETRY).bw[0][bw] += pstats->retry_bytes;
3763 	STATS_OP_FMT(RETRY).nss[0][nss - 1] += pstats->retry_bytes;
3764 	STATS_OP_FMT(RETRY).gi[0][gi] += pstats->retry_bytes;
3765 
3766 	STATS_OP_FMT(RETRY).bw[1][bw] += pstats->retry_pkts;
3767 	STATS_OP_FMT(RETRY).nss[1][nss - 1] += pstats->retry_pkts;
3768 	STATS_OP_FMT(RETRY).gi[1][gi] += pstats->retry_pkts;
3769 
3770 	if (txrate->flags >= RATE_INFO_FLAGS_MCS) {
3771 		STATS_OP_FMT(SUCC).rate_table[0][idx] += pstats->succ_bytes;
3772 		STATS_OP_FMT(SUCC).rate_table[1][idx] += pstats->succ_pkts;
3773 		STATS_OP_FMT(FAIL).rate_table[0][idx] += pstats->failed_bytes;
3774 		STATS_OP_FMT(FAIL).rate_table[1][idx] += pstats->failed_pkts;
3775 		STATS_OP_FMT(RETRY).rate_table[0][idx] += pstats->retry_bytes;
3776 		STATS_OP_FMT(RETRY).rate_table[1][idx] += pstats->retry_pkts;
3777 	}
3778 
3779 	tx_stats->tx_duration += pstats->duration;
3780 }
3781 
3782 static void
ath10k_update_per_peer_tx_stats(struct ath10k * ar,struct ieee80211_sta * sta,struct ath10k_per_peer_tx_stats * peer_stats)3783 ath10k_update_per_peer_tx_stats(struct ath10k *ar,
3784 				struct ieee80211_sta *sta,
3785 				struct ath10k_per_peer_tx_stats *peer_stats)
3786 {
3787 	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
3788 	struct ieee80211_chanctx_conf *conf = NULL;
3789 	u8 rate = 0, sgi;
3790 	s8 rate_idx = 0;
3791 	bool skip_auto_rate;
3792 	struct rate_info txrate;
3793 
3794 	lockdep_assert_held(&ar->data_lock);
3795 
3796 	txrate.flags = ATH10K_HW_PREAMBLE(peer_stats->ratecode);
3797 	txrate.bw = ATH10K_HW_BW(peer_stats->flags);
3798 	txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode);
3799 	txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode);
3800 	sgi = ATH10K_HW_GI(peer_stats->flags);
3801 	skip_auto_rate = ATH10K_FW_SKIPPED_RATE_CTRL(peer_stats->flags);
3802 
3803 	/* Firmware's rate control skips broadcast/management frames,
3804 	 * if host has configure fixed rates and in some other special cases.
3805 	 */
3806 	if (skip_auto_rate)
3807 		return;
3808 
3809 	if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) {
3810 		ath10k_warn(ar, "Invalid VHT mcs %d peer stats",  txrate.mcs);
3811 		return;
3812 	}
3813 
3814 	if (txrate.flags == WMI_RATE_PREAMBLE_HT &&
3815 	    (txrate.mcs > 7 || txrate.nss < 1)) {
3816 		ath10k_warn(ar, "Invalid HT mcs %d nss %d peer stats",
3817 			    txrate.mcs, txrate.nss);
3818 		return;
3819 	}
3820 
3821 	memset(&arsta->txrate, 0, sizeof(arsta->txrate));
3822 	memset(&arsta->tx_info.status, 0, sizeof(arsta->tx_info.status));
3823 	if (txrate.flags == WMI_RATE_PREAMBLE_CCK ||
3824 	    txrate.flags == WMI_RATE_PREAMBLE_OFDM) {
3825 		rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode);
3826 		/* This is hacky, FW sends CCK rate 5.5Mbps as 6 */
3827 		if (rate == 6 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
3828 			rate = 5;
3829 		rate_idx = ath10k_get_legacy_rate_idx(ar, rate);
3830 		if (rate_idx < 0)
3831 			return;
3832 		arsta->txrate.legacy = rate;
3833 	} else if (txrate.flags == WMI_RATE_PREAMBLE_HT) {
3834 		arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
3835 		arsta->txrate.mcs = txrate.mcs + 8 * (txrate.nss - 1);
3836 	} else {
3837 		arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
3838 		arsta->txrate.mcs = txrate.mcs;
3839 	}
3840 
3841 	switch (txrate.flags) {
3842 	case WMI_RATE_PREAMBLE_OFDM:
3843 		if (arsta->arvif && arsta->arvif->vif)
3844 			conf = rcu_dereference(arsta->arvif->vif->bss_conf.chanctx_conf);
3845 		if (conf && conf->def.chan->band == NL80211_BAND_5GHZ)
3846 			arsta->tx_info.status.rates[0].idx = rate_idx - 4;
3847 		break;
3848 	case WMI_RATE_PREAMBLE_CCK:
3849 		arsta->tx_info.status.rates[0].idx = rate_idx;
3850 		if (sgi)
3851 			arsta->tx_info.status.rates[0].flags |=
3852 				(IEEE80211_TX_RC_USE_SHORT_PREAMBLE |
3853 				 IEEE80211_TX_RC_SHORT_GI);
3854 		break;
3855 	case WMI_RATE_PREAMBLE_HT:
3856 		arsta->tx_info.status.rates[0].idx =
3857 				txrate.mcs + ((txrate.nss - 1) * 8);
3858 		if (sgi)
3859 			arsta->tx_info.status.rates[0].flags |=
3860 					IEEE80211_TX_RC_SHORT_GI;
3861 		arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_MCS;
3862 		break;
3863 	case WMI_RATE_PREAMBLE_VHT:
3864 		ieee80211_rate_set_vht(&arsta->tx_info.status.rates[0],
3865 				       txrate.mcs, txrate.nss);
3866 		if (sgi)
3867 			arsta->tx_info.status.rates[0].flags |=
3868 						IEEE80211_TX_RC_SHORT_GI;
3869 		arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_VHT_MCS;
3870 		break;
3871 	}
3872 
3873 	arsta->txrate.nss = txrate.nss;
3874 	arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw);
3875 	arsta->last_tx_bitrate = cfg80211_calculate_bitrate(&arsta->txrate);
3876 	if (sgi)
3877 		arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
3878 
3879 	switch (arsta->txrate.bw) {
3880 	case RATE_INFO_BW_40:
3881 		arsta->tx_info.status.rates[0].flags |=
3882 				IEEE80211_TX_RC_40_MHZ_WIDTH;
3883 		break;
3884 	case RATE_INFO_BW_80:
3885 		arsta->tx_info.status.rates[0].flags |=
3886 				IEEE80211_TX_RC_80_MHZ_WIDTH;
3887 		break;
3888 	case RATE_INFO_BW_160:
3889 		arsta->tx_info.status.rates[0].flags |=
3890 				IEEE80211_TX_RC_160_MHZ_WIDTH;
3891 		break;
3892 	}
3893 
3894 	if (peer_stats->succ_pkts) {
3895 		arsta->tx_info.flags = IEEE80211_TX_STAT_ACK;
3896 		arsta->tx_info.status.rates[0].count = 1;
3897 		ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info);
3898 	}
3899 
3900 	if (ar->htt.disable_tx_comp) {
3901 		arsta->tx_failed += peer_stats->failed_pkts;
3902 		ath10k_dbg(ar, ATH10K_DBG_HTT, "tx failed %d\n",
3903 			   arsta->tx_failed);
3904 	}
3905 
3906 	arsta->tx_retries += peer_stats->retry_pkts;
3907 	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx retries %d", arsta->tx_retries);
3908 
3909 	if (ath10k_debug_is_extd_tx_stats_enabled(ar))
3910 		ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats,
3911 						    rate_idx);
3912 }
3913 
ath10k_htt_fetch_peer_stats(struct ath10k * ar,struct sk_buff * skb)3914 static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
3915 					struct sk_buff *skb)
3916 {
3917 	struct htt_resp *resp = (struct htt_resp *)skb->data;
3918 	struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
3919 	struct htt_per_peer_tx_stats_ind *tx_stats;
3920 	struct ieee80211_sta *sta;
3921 	struct ath10k_peer *peer;
3922 	int peer_id, i;
3923 	u8 ppdu_len, num_ppdu;
3924 
3925 	num_ppdu = resp->peer_tx_stats.num_ppdu;
3926 	ppdu_len = resp->peer_tx_stats.ppdu_len * sizeof(__le32);
3927 
3928 	if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) {
3929 		ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len);
3930 		return;
3931 	}
3932 
3933 	tx_stats = (struct htt_per_peer_tx_stats_ind *)
3934 			(resp->peer_tx_stats.payload);
3935 	peer_id = __le16_to_cpu(tx_stats->peer_id);
3936 
3937 	rcu_read_lock();
3938 	spin_lock_bh(&ar->data_lock);
3939 	peer = ath10k_peer_find_by_id(ar, peer_id);
3940 	if (!peer || !peer->sta) {
3941 		ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n",
3942 			    peer_id);
3943 		goto out;
3944 	}
3945 
3946 	sta = peer->sta;
3947 	for (i = 0; i < num_ppdu; i++) {
3948 		tx_stats = (struct htt_per_peer_tx_stats_ind *)
3949 			   (resp->peer_tx_stats.payload + i * ppdu_len);
3950 
3951 		p_tx_stats->succ_bytes = __le32_to_cpu(tx_stats->succ_bytes);
3952 		p_tx_stats->retry_bytes = __le32_to_cpu(tx_stats->retry_bytes);
3953 		p_tx_stats->failed_bytes =
3954 				__le32_to_cpu(tx_stats->failed_bytes);
3955 		p_tx_stats->ratecode = tx_stats->ratecode;
3956 		p_tx_stats->flags = tx_stats->flags;
3957 		p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts);
3958 		p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts);
3959 		p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts);
3960 		p_tx_stats->duration = __le16_to_cpu(tx_stats->tx_duration);
3961 
3962 		ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
3963 	}
3964 
3965 out:
3966 	spin_unlock_bh(&ar->data_lock);
3967 	rcu_read_unlock();
3968 }
3969 
ath10k_fetch_10_2_tx_stats(struct ath10k * ar,u8 * data)3970 static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data)
3971 {
3972 	struct ath10k_pktlog_hdr *hdr = (struct ath10k_pktlog_hdr *)data;
3973 	struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
3974 	struct ath10k_10_2_peer_tx_stats *tx_stats;
3975 	struct ieee80211_sta *sta;
3976 	struct ath10k_peer *peer;
3977 	u16 log_type = __le16_to_cpu(hdr->log_type);
3978 	u32 peer_id = 0, i;
3979 
3980 	if (log_type != ATH_PKTLOG_TYPE_TX_STAT)
3981 		return;
3982 
3983 	tx_stats = (struct ath10k_10_2_peer_tx_stats *)((hdr->payload) +
3984 		    ATH10K_10_2_TX_STATS_OFFSET);
3985 
3986 	if (!tx_stats->tx_ppdu_cnt)
3987 		return;
3988 
3989 	peer_id = tx_stats->peer_id;
3990 
3991 	rcu_read_lock();
3992 	spin_lock_bh(&ar->data_lock);
3993 	peer = ath10k_peer_find_by_id(ar, peer_id);
3994 	if (!peer || !peer->sta) {
3995 		ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n",
3996 			    peer_id);
3997 		goto out;
3998 	}
3999 
4000 	sta = peer->sta;
4001 	for (i = 0; i < tx_stats->tx_ppdu_cnt; i++) {
4002 		p_tx_stats->succ_bytes =
4003 			__le16_to_cpu(tx_stats->success_bytes[i]);
4004 		p_tx_stats->retry_bytes =
4005 			__le16_to_cpu(tx_stats->retry_bytes[i]);
4006 		p_tx_stats->failed_bytes =
4007 			__le16_to_cpu(tx_stats->failed_bytes[i]);
4008 		p_tx_stats->ratecode = tx_stats->ratecode[i];
4009 		p_tx_stats->flags = tx_stats->flags[i];
4010 		p_tx_stats->succ_pkts = tx_stats->success_pkts[i];
4011 		p_tx_stats->retry_pkts = tx_stats->retry_pkts[i];
4012 		p_tx_stats->failed_pkts = tx_stats->failed_pkts[i];
4013 
4014 		ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
4015 	}
4016 	spin_unlock_bh(&ar->data_lock);
4017 	rcu_read_unlock();
4018 
4019 	return;
4020 
4021 out:
4022 	spin_unlock_bh(&ar->data_lock);
4023 	rcu_read_unlock();
4024 }
4025 
ath10k_htt_rx_pn_len(enum htt_security_types sec_type)4026 static int ath10k_htt_rx_pn_len(enum htt_security_types sec_type)
4027 {
4028 	switch (sec_type) {
4029 	case HTT_SECURITY_TKIP:
4030 	case HTT_SECURITY_TKIP_NOMIC:
4031 	case HTT_SECURITY_AES_CCMP:
4032 		return 48;
4033 	default:
4034 		return 0;
4035 	}
4036 }
4037 
ath10k_htt_rx_sec_ind_handler(struct ath10k * ar,struct htt_security_indication * ev)4038 static void ath10k_htt_rx_sec_ind_handler(struct ath10k *ar,
4039 					  struct htt_security_indication *ev)
4040 {
4041 	enum htt_txrx_sec_cast_type sec_index;
4042 	enum htt_security_types sec_type;
4043 	struct ath10k_peer *peer;
4044 
4045 	spin_lock_bh(&ar->data_lock);
4046 
4047 	peer = ath10k_peer_find_by_id(ar, __le16_to_cpu(ev->peer_id));
4048 	if (!peer) {
4049 		ath10k_warn(ar, "failed to find peer id %d for security indication",
4050 			    __le16_to_cpu(ev->peer_id));
4051 		goto out;
4052 	}
4053 
4054 	sec_type = MS(ev->flags, HTT_SECURITY_TYPE);
4055 
4056 	if (ev->flags & HTT_SECURITY_IS_UNICAST)
4057 		sec_index = HTT_TXRX_SEC_UCAST;
4058 	else
4059 		sec_index = HTT_TXRX_SEC_MCAST;
4060 
4061 	peer->rx_pn[sec_index].sec_type = sec_type;
4062 	peer->rx_pn[sec_index].pn_len = ath10k_htt_rx_pn_len(sec_type);
4063 
4064 	memset(peer->tids_last_pn_valid, 0, sizeof(peer->tids_last_pn_valid));
4065 	memset(peer->tids_last_pn, 0, sizeof(peer->tids_last_pn));
4066 
4067 out:
4068 	spin_unlock_bh(&ar->data_lock);
4069 }
4070 
ath10k_htt_t2h_msg_handler(struct ath10k * ar,struct sk_buff * skb)4071 bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
4072 {
4073 	struct ath10k_htt *htt = &ar->htt;
4074 	struct htt_resp *resp = (struct htt_resp *)skb->data;
4075 	enum htt_t2h_msg_type type;
4076 
4077 	/* confirm alignment */
4078 	if (!IS_ALIGNED((unsigned long)skb->data, 4))
4079 		ath10k_warn(ar, "unaligned htt message, expect trouble\n");
4080 
4081 	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
4082 		   resp->hdr.msg_type);
4083 
4084 	if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
4085 		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
4086 			   resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
4087 		return true;
4088 	}
4089 	type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
4090 
4091 	switch (type) {
4092 	case HTT_T2H_MSG_TYPE_VERSION_CONF: {
4093 		htt->target_version_major = resp->ver_resp.major;
4094 		htt->target_version_minor = resp->ver_resp.minor;
4095 		complete(&htt->target_version_received);
4096 		break;
4097 	}
4098 	case HTT_T2H_MSG_TYPE_RX_IND:
4099 		if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) {
4100 			ath10k_htt_rx_proc_rx_ind_ll(htt, &resp->rx_ind);
4101 		} else {
4102 			skb_queue_tail(&htt->rx_indication_head, skb);
4103 			return false;
4104 		}
4105 		break;
4106 	case HTT_T2H_MSG_TYPE_PEER_MAP: {
4107 		struct htt_peer_map_event ev = {
4108 			.vdev_id = resp->peer_map.vdev_id,
4109 			.peer_id = __le16_to_cpu(resp->peer_map.peer_id),
4110 		};
4111 		memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
4112 		ath10k_peer_map_event(htt, &ev);
4113 		break;
4114 	}
4115 	case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
4116 		struct htt_peer_unmap_event ev = {
4117 			.peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
4118 		};
4119 		ath10k_peer_unmap_event(htt, &ev);
4120 		break;
4121 	}
4122 	case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
4123 		struct htt_tx_done tx_done = {};
4124 		struct ath10k_htt *htt = &ar->htt;
4125 		struct ath10k_htc *htc = &ar->htc;
4126 		struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid];
4127 		int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
4128 		int info = __le32_to_cpu(resp->mgmt_tx_completion.info);
4129 
4130 		tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
4131 
4132 		switch (status) {
4133 		case HTT_MGMT_TX_STATUS_OK:
4134 			tx_done.status = HTT_TX_COMPL_STATE_ACK;
4135 			if (test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
4136 				     ar->wmi.svc_map) &&
4137 			    (resp->mgmt_tx_completion.flags &
4138 			     HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI)) {
4139 				tx_done.ack_rssi =
4140 				FIELD_GET(HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK,
4141 					  info);
4142 			}
4143 			break;
4144 		case HTT_MGMT_TX_STATUS_RETRY:
4145 			tx_done.status = HTT_TX_COMPL_STATE_NOACK;
4146 			break;
4147 		case HTT_MGMT_TX_STATUS_DROP:
4148 			tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
4149 			break;
4150 		}
4151 
4152 		if (htt->disable_tx_comp) {
4153 			spin_lock_bh(&htc->tx_lock);
4154 			ep->tx_credits++;
4155 			spin_unlock_bh(&htc->tx_lock);
4156 		}
4157 
4158 		status = ath10k_txrx_tx_unref(htt, &tx_done);
4159 		if (!status) {
4160 			spin_lock_bh(&htt->tx_lock);
4161 			ath10k_htt_tx_mgmt_dec_pending(htt);
4162 			spin_unlock_bh(&htt->tx_lock);
4163 		}
4164 		break;
4165 	}
4166 	case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
4167 		ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
4168 		break;
4169 	case HTT_T2H_MSG_TYPE_SEC_IND: {
4170 		struct ath10k *ar = htt->ar;
4171 		struct htt_security_indication *ev = &resp->security_indication;
4172 
4173 		ath10k_htt_rx_sec_ind_handler(ar, ev);
4174 		ath10k_dbg(ar, ATH10K_DBG_HTT,
4175 			   "sec ind peer_id %d unicast %d type %d\n",
4176 			  __le16_to_cpu(ev->peer_id),
4177 			  !!(ev->flags & HTT_SECURITY_IS_UNICAST),
4178 			  MS(ev->flags, HTT_SECURITY_TYPE));
4179 		complete(&ar->install_key_done);
4180 		break;
4181 	}
4182 	case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
4183 		ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
4184 				skb->data, skb->len);
4185 		atomic_inc(&htt->num_mpdus_ready);
4186 
4187 		return ath10k_htt_rx_proc_rx_frag_ind(htt,
4188 						      &resp->rx_frag_ind,
4189 						      skb);
4190 	}
4191 	case HTT_T2H_MSG_TYPE_TEST:
4192 		break;
4193 	case HTT_T2H_MSG_TYPE_STATS_CONF:
4194 		trace_ath10k_htt_stats(ar, skb->data, skb->len);
4195 		break;
4196 	case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
4197 		/* Firmware can return tx frames if it's unable to fully
4198 		 * process them and suspects host may be able to fix it. ath10k
4199 		 * sends all tx frames as already inspected so this shouldn't
4200 		 * happen unless fw has a bug.
4201 		 */
4202 		ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
4203 		break;
4204 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
4205 		ath10k_htt_rx_addba(ar, resp);
4206 		break;
4207 	case HTT_T2H_MSG_TYPE_RX_DELBA:
4208 		ath10k_htt_rx_delba(ar, resp);
4209 		break;
4210 	case HTT_T2H_MSG_TYPE_PKTLOG: {
4211 		trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
4212 					skb->len -
4213 					offsetof(struct htt_resp,
4214 						 pktlog_msg.payload));
4215 
4216 		if (ath10k_peer_stats_enabled(ar))
4217 			ath10k_fetch_10_2_tx_stats(ar,
4218 						   resp->pktlog_msg.payload);
4219 		break;
4220 	}
4221 	case HTT_T2H_MSG_TYPE_RX_FLUSH: {
4222 		/* Ignore this event because mac80211 takes care of Rx
4223 		 * aggregation reordering.
4224 		 */
4225 		break;
4226 	}
4227 	case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
4228 		skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
4229 		return false;
4230 	}
4231 	case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND: {
4232 		struct ath10k_htt *htt = &ar->htt;
4233 		struct ath10k_htc *htc = &ar->htc;
4234 		struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid];
4235 		u32 msg_word = __le32_to_cpu(*(__le32 *)resp);
4236 		int htt_credit_delta;
4237 
4238 		htt_credit_delta = HTT_TX_CREDIT_DELTA_ABS_GET(msg_word);
4239 		if (HTT_TX_CREDIT_SIGN_BIT_GET(msg_word))
4240 			htt_credit_delta = -htt_credit_delta;
4241 
4242 		ath10k_dbg(ar, ATH10K_DBG_HTT,
4243 			   "htt credit update delta %d\n",
4244 			   htt_credit_delta);
4245 
4246 		if (htt->disable_tx_comp) {
4247 			spin_lock_bh(&htc->tx_lock);
4248 			ep->tx_credits += htt_credit_delta;
4249 			spin_unlock_bh(&htc->tx_lock);
4250 			ath10k_dbg(ar, ATH10K_DBG_HTT,
4251 				   "htt credit total %d\n",
4252 				   ep->tx_credits);
4253 			ep->ep_ops.ep_tx_credits(htc->ar);
4254 		}
4255 		break;
4256 	}
4257 	case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
4258 		u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
4259 		u32 freq = __le32_to_cpu(resp->chan_change.freq);
4260 
4261 		ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq);
4262 		ath10k_dbg(ar, ATH10K_DBG_HTT,
4263 			   "htt chan change freq %u phymode %s\n",
4264 			   freq, ath10k_wmi_phymode_str(phymode));
4265 		break;
4266 	}
4267 	case HTT_T2H_MSG_TYPE_AGGR_CONF:
4268 		break;
4269 	case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
4270 		struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
4271 
4272 		if (!tx_fetch_ind) {
4273 			ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
4274 			break;
4275 		}
4276 		skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
4277 		break;
4278 	}
4279 	case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
4280 		ath10k_htt_rx_tx_fetch_confirm(ar, skb);
4281 		break;
4282 	case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
4283 		ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
4284 		break;
4285 	case HTT_T2H_MSG_TYPE_PEER_STATS:
4286 		ath10k_htt_fetch_peer_stats(ar, skb);
4287 		break;
4288 	case HTT_T2H_MSG_TYPE_EN_STATS:
4289 	default:
4290 		ath10k_warn(ar, "htt event (%d) not handled\n",
4291 			    resp->hdr.msg_type);
4292 		ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
4293 				skb->data, skb->len);
4294 		break;
4295 	}
4296 	return true;
4297 }
4298 EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
4299 
ath10k_htt_rx_pktlog_completion_handler(struct ath10k * ar,struct sk_buff * skb)4300 void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
4301 					     struct sk_buff *skb)
4302 {
4303 	trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
4304 	dev_kfree_skb_any(skb);
4305 }
4306 EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
4307 
ath10k_htt_rx_deliver_msdu(struct ath10k * ar,int quota,int budget)4308 static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
4309 {
4310 	struct sk_buff *skb;
4311 
4312 	while (quota < budget) {
4313 		if (skb_queue_empty(&ar->htt.rx_msdus_q))
4314 			break;
4315 
4316 		skb = skb_dequeue(&ar->htt.rx_msdus_q);
4317 		if (!skb)
4318 			break;
4319 		ath10k_process_rx(ar, skb);
4320 		quota++;
4321 	}
4322 
4323 	return quota;
4324 }
4325 
ath10k_htt_rx_hl_indication(struct ath10k * ar,int budget)4326 int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget)
4327 {
4328 	struct htt_resp *resp;
4329 	struct ath10k_htt *htt = &ar->htt;
4330 	struct sk_buff *skb;
4331 	bool release;
4332 	int quota;
4333 
4334 	for (quota = 0; quota < budget; quota++) {
4335 		skb = skb_dequeue(&htt->rx_indication_head);
4336 		if (!skb)
4337 			break;
4338 
4339 		resp = (struct htt_resp *)skb->data;
4340 
4341 		release = ath10k_htt_rx_proc_rx_ind_hl(htt,
4342 						       &resp->rx_ind_hl,
4343 						       skb,
4344 						       HTT_RX_PN_CHECK,
4345 						       HTT_RX_NON_TKIP_MIC);
4346 
4347 		if (release)
4348 			dev_kfree_skb_any(skb);
4349 
4350 		ath10k_dbg(ar, ATH10K_DBG_HTT, "rx indication poll pending count:%d\n",
4351 			   skb_queue_len(&htt->rx_indication_head));
4352 	}
4353 	return quota;
4354 }
4355 EXPORT_SYMBOL(ath10k_htt_rx_hl_indication);
4356 
ath10k_htt_txrx_compl_task(struct ath10k * ar,int budget)4357 int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
4358 {
4359 	struct ath10k_htt *htt = &ar->htt;
4360 	struct htt_tx_done tx_done = {};
4361 	struct sk_buff_head tx_ind_q;
4362 	struct sk_buff *skb;
4363 	unsigned long flags;
4364 	int quota = 0, done, ret;
4365 	bool resched_napi = false;
4366 
4367 	__skb_queue_head_init(&tx_ind_q);
4368 
4369 	/* Process pending frames before dequeuing more data
4370 	 * from hardware.
4371 	 */
4372 	quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
4373 	if (quota == budget) {
4374 		resched_napi = true;
4375 		goto exit;
4376 	}
4377 
4378 	while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) {
4379 		spin_lock_bh(&htt->rx_ring.lock);
4380 		ret = ath10k_htt_rx_in_ord_ind(ar, skb);
4381 		spin_unlock_bh(&htt->rx_ring.lock);
4382 
4383 		dev_kfree_skb_any(skb);
4384 		if (ret == -EIO) {
4385 			resched_napi = true;
4386 			goto exit;
4387 		}
4388 	}
4389 
4390 	while (atomic_read(&htt->num_mpdus_ready)) {
4391 		ret = ath10k_htt_rx_handle_amsdu(htt);
4392 		if (ret == -EIO) {
4393 			resched_napi = true;
4394 			goto exit;
4395 		}
4396 		atomic_dec(&htt->num_mpdus_ready);
4397 	}
4398 
4399 	/* Deliver received data after processing data from hardware */
4400 	quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
4401 
4402 	/* From NAPI documentation:
4403 	 *  The napi poll() function may also process TX completions, in which
4404 	 *  case if it processes the entire TX ring then it should count that
4405 	 *  work as the rest of the budget.
4406 	 */
4407 	if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo))
4408 		quota = budget;
4409 
4410 	/* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
4411 	 * From kfifo_get() documentation:
4412 	 *  Note that with only one concurrent reader and one concurrent writer,
4413 	 *  you don't need extra locking to use these macro.
4414 	 */
4415 	while (kfifo_get(&htt->txdone_fifo, &tx_done))
4416 		ath10k_txrx_tx_unref(htt, &tx_done);
4417 
4418 	ath10k_mac_tx_push_pending(ar);
4419 
4420 	spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
4421 	skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
4422 	spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
4423 
4424 	while ((skb = __skb_dequeue(&tx_ind_q))) {
4425 		ath10k_htt_rx_tx_fetch_ind(ar, skb);
4426 		dev_kfree_skb_any(skb);
4427 	}
4428 
4429 exit:
4430 	ath10k_htt_rx_msdu_buff_replenish(htt);
4431 	/* In case of rx failure or more data to read, report budget
4432 	 * to reschedule NAPI poll
4433 	 */
4434 	done = resched_napi ? budget : quota;
4435 
4436 	return done;
4437 }
4438 EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);
4439 
4440 static const struct ath10k_htt_rx_ops htt_rx_ops_32 = {
4441 	.htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32,
4442 	.htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32,
4443 	.htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32,
4444 	.htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32,
4445 	.htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32,
4446 };
4447 
4448 static const struct ath10k_htt_rx_ops htt_rx_ops_64 = {
4449 	.htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64,
4450 	.htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64,
4451 	.htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64,
4452 	.htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64,
4453 	.htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64,
4454 };
4455 
4456 static const struct ath10k_htt_rx_ops htt_rx_ops_hl = {
4457 	.htt_rx_proc_rx_frag_ind = ath10k_htt_rx_proc_rx_frag_ind_hl,
4458 };
4459 
ath10k_htt_set_rx_ops(struct ath10k_htt * htt)4460 void ath10k_htt_set_rx_ops(struct ath10k_htt *htt)
4461 {
4462 	struct ath10k *ar = htt->ar;
4463 
4464 	if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
4465 		htt->rx_ops = &htt_rx_ops_hl;
4466 	else if (ar->hw_params.target_64bit)
4467 		htt->rx_ops = &htt_rx_ops_64;
4468 	else
4469 		htt->rx_ops = &htt_rx_ops_32;
4470 }
4471