1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include "core.h"
19 #include "htc.h"
20 #include "htt.h"
21 #include "txrx.h"
22 #include "debug.h"
23 
24 #include <linux/log2.h>
25 
26 /* slightly larger than one large A-MPDU */
27 #define HTT_RX_RING_SIZE_MIN 128
28 
29 /* roughly 20 ms @ 1 Gbps of 1500B MSDUs */
30 #define HTT_RX_RING_SIZE_MAX 2048
31 
32 #define HTT_RX_AVG_FRM_BYTES 1000
33 
34 /* ms, very conservative */
35 #define HTT_RX_HOST_LATENCY_MAX_MS 20
36 
37 /* ms, conservative */
38 #define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10
39 
40 /* when under memory pressure rx ring refill may fail and needs a retry */
41 #define HTT_RX_RING_REFILL_RETRY_MS 50
42 
43 static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
44 {
45 	int size;
46 
47 	/*
48 	 * It is expected that the host CPU will typically be able to
49 	 * service the rx indication from one A-MPDU before the rx
50 	 * indication from the subsequent A-MPDU happens, roughly 1-2 ms
51 	 * later. However, the rx ring should be sized very conservatively,
52 	 * to accomodate the worst reasonable delay before the host CPU
53 	 * services a rx indication interrupt.
54 	 *
55 	 * The rx ring need not be kept full of empty buffers. In theory,
56 	 * the htt host SW can dynamically track the low-water mark in the
57 	 * rx ring, and dynamically adjust the level to which the rx ring
58 	 * is filled with empty buffers, to dynamically meet the desired
59 	 * low-water mark.
60 	 *
61 	 * In contrast, it's difficult to resize the rx ring itself, once
62 	 * it's in use. Thus, the ring itself should be sized very
63 	 * conservatively, while the degree to which the ring is filled
64 	 * with empty buffers should be sized moderately conservatively.
65 	 */
66 
67 	/* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
68 	size =
69 	    htt->max_throughput_mbps +
70 	    1000  /
71 	    (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS;
72 
73 	if (size < HTT_RX_RING_SIZE_MIN)
74 		size = HTT_RX_RING_SIZE_MIN;
75 
76 	if (size > HTT_RX_RING_SIZE_MAX)
77 		size = HTT_RX_RING_SIZE_MAX;
78 
79 	size = roundup_pow_of_two(size);
80 
81 	return size;
82 }
83 
84 static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt *htt)
85 {
86 	int size;
87 
88 	/* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
89 	size =
90 	    htt->max_throughput_mbps *
91 	    1000  /
92 	    (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS;
93 
94 	/*
95 	 * Make sure the fill level is at least 1 less than the ring size.
96 	 * Leaving 1 element empty allows the SW to easily distinguish
97 	 * between a full ring vs. an empty ring.
98 	 */
99 	if (size >= htt->rx_ring.size)
100 		size = htt->rx_ring.size - 1;
101 
102 	return size;
103 }
104 
105 static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
106 {
107 	struct sk_buff *skb;
108 	struct ath10k_skb_cb *cb;
109 	int i;
110 
111 	for (i = 0; i < htt->rx_ring.fill_cnt; i++) {
112 		skb = htt->rx_ring.netbufs_ring[i];
113 		cb = ATH10K_SKB_CB(skb);
114 		dma_unmap_single(htt->ar->dev, cb->paddr,
115 				 skb->len + skb_tailroom(skb),
116 				 DMA_FROM_DEVICE);
117 		dev_kfree_skb_any(skb);
118 	}
119 
120 	htt->rx_ring.fill_cnt = 0;
121 }
122 
123 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
124 {
125 	struct htt_rx_desc *rx_desc;
126 	struct sk_buff *skb;
127 	dma_addr_t paddr;
128 	int ret = 0, idx;
129 
130 	idx = __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr));
131 	while (num > 0) {
132 		skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
133 		if (!skb) {
134 			ret = -ENOMEM;
135 			goto fail;
136 		}
137 
138 		if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
139 			skb_pull(skb,
140 				 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
141 				 skb->data);
142 
143 		/* Clear rx_desc attention word before posting to Rx ring */
144 		rx_desc = (struct htt_rx_desc *)skb->data;
145 		rx_desc->attention.flags = __cpu_to_le32(0);
146 
147 		paddr = dma_map_single(htt->ar->dev, skb->data,
148 				       skb->len + skb_tailroom(skb),
149 				       DMA_FROM_DEVICE);
150 
151 		if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
152 			dev_kfree_skb_any(skb);
153 			ret = -ENOMEM;
154 			goto fail;
155 		}
156 
157 		ATH10K_SKB_CB(skb)->paddr = paddr;
158 		htt->rx_ring.netbufs_ring[idx] = skb;
159 		htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
160 		htt->rx_ring.fill_cnt++;
161 
162 		num--;
163 		idx++;
164 		idx &= htt->rx_ring.size_mask;
165 	}
166 
167 fail:
168 	*(htt->rx_ring.alloc_idx.vaddr) = __cpu_to_le32(idx);
169 	return ret;
170 }
171 
172 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
173 {
174 	lockdep_assert_held(&htt->rx_ring.lock);
175 	return __ath10k_htt_rx_ring_fill_n(htt, num);
176 }
177 
178 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
179 {
180 	int ret, num_to_fill;
181 
182 	spin_lock_bh(&htt->rx_ring.lock);
183 	num_to_fill = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
184 	ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
185 	if (ret == -ENOMEM) {
186 		/*
187 		 * Failed to fill it to the desired level -
188 		 * we'll start a timer and try again next time.
189 		 * As long as enough buffers are left in the ring for
190 		 * another A-MPDU rx, no special recovery is needed.
191 		 */
192 		mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
193 			  msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
194 	}
195 	spin_unlock_bh(&htt->rx_ring.lock);
196 }
197 
198 static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
199 {
200 	struct ath10k_htt *htt = (struct ath10k_htt *)arg;
201 	ath10k_htt_rx_msdu_buff_replenish(htt);
202 }
203 
204 static unsigned ath10k_htt_rx_ring_elems(struct ath10k_htt *htt)
205 {
206 	return (__le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr) -
207 		htt->rx_ring.sw_rd_idx.msdu_payld) & htt->rx_ring.size_mask;
208 }
209 
210 void ath10k_htt_rx_detach(struct ath10k_htt *htt)
211 {
212 	int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld;
213 
214 	del_timer_sync(&htt->rx_ring.refill_retry_timer);
215 
216 	while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) {
217 		struct sk_buff *skb =
218 				htt->rx_ring.netbufs_ring[sw_rd_idx];
219 		struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
220 
221 		dma_unmap_single(htt->ar->dev, cb->paddr,
222 				 skb->len + skb_tailroom(skb),
223 				 DMA_FROM_DEVICE);
224 		dev_kfree_skb_any(htt->rx_ring.netbufs_ring[sw_rd_idx]);
225 		sw_rd_idx++;
226 		sw_rd_idx &= htt->rx_ring.size_mask;
227 	}
228 
229 	dma_free_coherent(htt->ar->dev,
230 			  (htt->rx_ring.size *
231 			   sizeof(htt->rx_ring.paddrs_ring)),
232 			  htt->rx_ring.paddrs_ring,
233 			  htt->rx_ring.base_paddr);
234 
235 	dma_free_coherent(htt->ar->dev,
236 			  sizeof(*htt->rx_ring.alloc_idx.vaddr),
237 			  htt->rx_ring.alloc_idx.vaddr,
238 			  htt->rx_ring.alloc_idx.paddr);
239 
240 	kfree(htt->rx_ring.netbufs_ring);
241 }
242 
243 static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
244 {
245 	int idx;
246 	struct sk_buff *msdu;
247 
248 	spin_lock_bh(&htt->rx_ring.lock);
249 
250 	if (ath10k_htt_rx_ring_elems(htt) == 0)
251 		ath10k_warn("htt rx ring is empty!\n");
252 
253 	idx = htt->rx_ring.sw_rd_idx.msdu_payld;
254 	msdu = htt->rx_ring.netbufs_ring[idx];
255 
256 	idx++;
257 	idx &= htt->rx_ring.size_mask;
258 	htt->rx_ring.sw_rd_idx.msdu_payld = idx;
259 	htt->rx_ring.fill_cnt--;
260 
261 	spin_unlock_bh(&htt->rx_ring.lock);
262 	return msdu;
263 }
264 
265 static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb)
266 {
267 	struct sk_buff *next;
268 
269 	while (skb) {
270 		next = skb->next;
271 		dev_kfree_skb_any(skb);
272 		skb = next;
273 	}
274 }
275 
276 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
277 				   u8 **fw_desc, int *fw_desc_len,
278 				   struct sk_buff **head_msdu,
279 				   struct sk_buff **tail_msdu)
280 {
281 	int msdu_len, msdu_chaining = 0;
282 	struct sk_buff *msdu;
283 	struct htt_rx_desc *rx_desc;
284 
285 	if (ath10k_htt_rx_ring_elems(htt) == 0)
286 		ath10k_warn("htt rx ring is empty!\n");
287 
288 	if (htt->rx_confused) {
289 		ath10k_warn("htt is confused. refusing rx\n");
290 		return 0;
291 	}
292 
293 	msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt);
294 	while (msdu) {
295 		int last_msdu, msdu_len_invalid, msdu_chained;
296 
297 		dma_unmap_single(htt->ar->dev,
298 				 ATH10K_SKB_CB(msdu)->paddr,
299 				 msdu->len + skb_tailroom(msdu),
300 				 DMA_FROM_DEVICE);
301 
302 		ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ",
303 				msdu->data, msdu->len + skb_tailroom(msdu));
304 
305 		rx_desc = (struct htt_rx_desc *)msdu->data;
306 
307 		/* FIXME: we must report msdu payload since this is what caller
308 		 *        expects now */
309 		skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
310 		skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
311 
312 		/*
313 		 * Sanity check - confirm the HW is finished filling in the
314 		 * rx data.
315 		 * If the HW and SW are working correctly, then it's guaranteed
316 		 * that the HW's MAC DMA is done before this point in the SW.
317 		 * To prevent the case that we handle a stale Rx descriptor,
318 		 * just assert for now until we have a way to recover.
319 		 */
320 		if (!(__le32_to_cpu(rx_desc->attention.flags)
321 				& RX_ATTENTION_FLAGS_MSDU_DONE)) {
322 			ath10k_htt_rx_free_msdu_chain(*head_msdu);
323 			*head_msdu = NULL;
324 			msdu = NULL;
325 			ath10k_err("htt rx stopped. cannot recover\n");
326 			htt->rx_confused = true;
327 			break;
328 		}
329 
330 		/*
331 		 * Copy the FW rx descriptor for this MSDU from the rx
332 		 * indication message into the MSDU's netbuf. HL uses the
333 		 * same rx indication message definition as LL, and simply
334 		 * appends new info (fields from the HW rx desc, and the
335 		 * MSDU payload itself). So, the offset into the rx
336 		 * indication message only has to account for the standard
337 		 * offset of the per-MSDU FW rx desc info within the
338 		 * message, and how many bytes of the per-MSDU FW rx desc
339 		 * info have already been consumed. (And the endianness of
340 		 * the host, since for a big-endian host, the rx ind
341 		 * message contents, including the per-MSDU rx desc bytes,
342 		 * were byteswapped during upload.)
343 		 */
344 		if (*fw_desc_len > 0) {
345 			rx_desc->fw_desc.info0 = **fw_desc;
346 			/*
347 			 * The target is expected to only provide the basic
348 			 * per-MSDU rx descriptors. Just to be sure, verify
349 			 * that the target has not attached extension data
350 			 * (e.g. LRO flow ID).
351 			 */
352 
353 			/* or more, if there's extension data */
354 			(*fw_desc)++;
355 			(*fw_desc_len)--;
356 		} else {
357 			/*
358 			 * When an oversized AMSDU happened, FW will lost
359 			 * some of MSDU status - in this case, the FW
360 			 * descriptors provided will be less than the
361 			 * actual MSDUs inside this MPDU. Mark the FW
362 			 * descriptors so that it will still deliver to
363 			 * upper stack, if no CRC error for this MPDU.
364 			 *
365 			 * FIX THIS - the FW descriptors are actually for
366 			 * MSDUs in the end of this A-MSDU instead of the
367 			 * beginning.
368 			 */
369 			rx_desc->fw_desc.info0 = 0;
370 		}
371 
372 		msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
373 					& (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
374 					   RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
375 		msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
376 			      RX_MSDU_START_INFO0_MSDU_LENGTH);
377 		msdu_chained = rx_desc->frag_info.ring2_more_count;
378 
379 		if (msdu_len_invalid)
380 			msdu_len = 0;
381 
382 		skb_trim(msdu, 0);
383 		skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
384 		msdu_len -= msdu->len;
385 
386 		/* FIXME: Do chained buffers include htt_rx_desc or not? */
387 		while (msdu_chained--) {
388 			struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt);
389 
390 			dma_unmap_single(htt->ar->dev,
391 					 ATH10K_SKB_CB(next)->paddr,
392 					 next->len + skb_tailroom(next),
393 					 DMA_FROM_DEVICE);
394 
395 			ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ",
396 					next->data,
397 					next->len + skb_tailroom(next));
398 
399 			skb_trim(next, 0);
400 			skb_put(next, min(msdu_len, HTT_RX_BUF_SIZE));
401 			msdu_len -= next->len;
402 
403 			msdu->next = next;
404 			msdu = next;
405 			msdu_chaining = 1;
406 		}
407 
408 		if (msdu_len > 0) {
409 			/* This may suggest FW bug? */
410 			ath10k_warn("htt rx msdu len not consumed (%d)\n",
411 				    msdu_len);
412 		}
413 
414 		last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
415 				RX_MSDU_END_INFO0_LAST_MSDU;
416 
417 		if (last_msdu) {
418 			msdu->next = NULL;
419 			break;
420 		} else {
421 			struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt);
422 			msdu->next = next;
423 			msdu = next;
424 		}
425 	}
426 	*tail_msdu = msdu;
427 
428 	/*
429 	 * Don't refill the ring yet.
430 	 *
431 	 * First, the elements popped here are still in use - it is not
432 	 * safe to overwrite them until the matching call to
433 	 * mpdu_desc_list_next. Second, for efficiency it is preferable to
434 	 * refill the rx ring with 1 PPDU's worth of rx buffers (something
435 	 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
436 	 * (something like 3 buffers). Consequently, we'll rely on the txrx
437 	 * SW to tell us when it is done pulling all the PPDU's rx buffers
438 	 * out of the rx ring, and then refill it just once.
439 	 */
440 
441 	return msdu_chaining;
442 }
443 
444 int ath10k_htt_rx_attach(struct ath10k_htt *htt)
445 {
446 	dma_addr_t paddr;
447 	void *vaddr;
448 	struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
449 
450 	htt->rx_ring.size = ath10k_htt_rx_ring_size(htt);
451 	if (!is_power_of_2(htt->rx_ring.size)) {
452 		ath10k_warn("htt rx ring size is not power of 2\n");
453 		return -EINVAL;
454 	}
455 
456 	htt->rx_ring.size_mask = htt->rx_ring.size - 1;
457 
458 	/*
459 	 * Set the initial value for the level to which the rx ring
460 	 * should be filled, based on the max throughput and the
461 	 * worst likely latency for the host to fill the rx ring
462 	 * with new buffers. In theory, this fill level can be
463 	 * dynamically adjusted from the initial value set here, to
464 	 * reflect the actual host latency rather than a
465 	 * conservative assumption about the host latency.
466 	 */
467 	htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt);
468 
469 	htt->rx_ring.netbufs_ring =
470 		kmalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
471 			GFP_KERNEL);
472 	if (!htt->rx_ring.netbufs_ring)
473 		goto err_netbuf;
474 
475 	vaddr = dma_alloc_coherent(htt->ar->dev,
476 		   (htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring)),
477 		   &paddr, GFP_DMA);
478 	if (!vaddr)
479 		goto err_dma_ring;
480 
481 	htt->rx_ring.paddrs_ring = vaddr;
482 	htt->rx_ring.base_paddr = paddr;
483 
484 	vaddr = dma_alloc_coherent(htt->ar->dev,
485 				   sizeof(*htt->rx_ring.alloc_idx.vaddr),
486 				   &paddr, GFP_DMA);
487 	if (!vaddr)
488 		goto err_dma_idx;
489 
490 	htt->rx_ring.alloc_idx.vaddr = vaddr;
491 	htt->rx_ring.alloc_idx.paddr = paddr;
492 	htt->rx_ring.sw_rd_idx.msdu_payld = 0;
493 	*htt->rx_ring.alloc_idx.vaddr = 0;
494 
495 	/* Initialize the Rx refill retry timer */
496 	setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
497 
498 	spin_lock_init(&htt->rx_ring.lock);
499 
500 	htt->rx_ring.fill_cnt = 0;
501 	if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level))
502 		goto err_fill_ring;
503 
504 	ath10k_dbg(ATH10K_DBG_HTT, "HTT RX ring size: %d, fill_level: %d\n",
505 		   htt->rx_ring.size, htt->rx_ring.fill_level);
506 	return 0;
507 
508 err_fill_ring:
509 	ath10k_htt_rx_ring_free(htt);
510 	dma_free_coherent(htt->ar->dev,
511 			  sizeof(*htt->rx_ring.alloc_idx.vaddr),
512 			  htt->rx_ring.alloc_idx.vaddr,
513 			  htt->rx_ring.alloc_idx.paddr);
514 err_dma_idx:
515 	dma_free_coherent(htt->ar->dev,
516 			  (htt->rx_ring.size *
517 			   sizeof(htt->rx_ring.paddrs_ring)),
518 			  htt->rx_ring.paddrs_ring,
519 			  htt->rx_ring.base_paddr);
520 err_dma_ring:
521 	kfree(htt->rx_ring.netbufs_ring);
522 err_netbuf:
523 	return -ENOMEM;
524 }
525 
526 static int ath10k_htt_rx_crypto_param_len(enum htt_rx_mpdu_encrypt_type type)
527 {
528 	switch (type) {
529 	case HTT_RX_MPDU_ENCRYPT_WEP40:
530 	case HTT_RX_MPDU_ENCRYPT_WEP104:
531 		return 4;
532 	case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
533 	case HTT_RX_MPDU_ENCRYPT_WEP128: /* not tested */
534 	case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
535 	case HTT_RX_MPDU_ENCRYPT_WAPI: /* not tested */
536 	case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
537 		return 8;
538 	case HTT_RX_MPDU_ENCRYPT_NONE:
539 		return 0;
540 	}
541 
542 	ath10k_warn("unknown encryption type %d\n", type);
543 	return 0;
544 }
545 
546 static int ath10k_htt_rx_crypto_tail_len(enum htt_rx_mpdu_encrypt_type type)
547 {
548 	switch (type) {
549 	case HTT_RX_MPDU_ENCRYPT_NONE:
550 	case HTT_RX_MPDU_ENCRYPT_WEP40:
551 	case HTT_RX_MPDU_ENCRYPT_WEP104:
552 	case HTT_RX_MPDU_ENCRYPT_WEP128:
553 	case HTT_RX_MPDU_ENCRYPT_WAPI:
554 		return 0;
555 	case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
556 	case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
557 		return 4;
558 	case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
559 		return 8;
560 	}
561 
562 	ath10k_warn("unknown encryption type %d\n", type);
563 	return 0;
564 }
565 
566 /* Applies for first msdu in chain, before altering it. */
567 static struct ieee80211_hdr *ath10k_htt_rx_skb_get_hdr(struct sk_buff *skb)
568 {
569 	struct htt_rx_desc *rxd;
570 	enum rx_msdu_decap_format fmt;
571 
572 	rxd = (void *)skb->data - sizeof(*rxd);
573 	fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
574 			RX_MSDU_START_INFO1_DECAP_FORMAT);
575 
576 	if (fmt == RX_MSDU_DECAP_RAW)
577 		return (void *)skb->data;
578 	else
579 		return (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
580 }
581 
582 /* This function only applies for first msdu in an msdu chain */
583 static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr)
584 {
585 	if (ieee80211_is_data_qos(hdr->frame_control)) {
586 		u8 *qc = ieee80211_get_qos_ctl(hdr);
587 		if (qc[0] & 0x80)
588 			return true;
589 	}
590 	return false;
591 }
592 
593 static int ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
594 			struct htt_rx_info *info)
595 {
596 	struct htt_rx_desc *rxd;
597 	struct sk_buff *amsdu;
598 	struct sk_buff *first;
599 	struct ieee80211_hdr *hdr;
600 	struct sk_buff *skb = info->skb;
601 	enum rx_msdu_decap_format fmt;
602 	enum htt_rx_mpdu_encrypt_type enctype;
603 	unsigned int hdr_len;
604 	int crypto_len;
605 
606 	rxd = (void *)skb->data - sizeof(*rxd);
607 	fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
608 			RX_MSDU_START_INFO1_DECAP_FORMAT);
609 	enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
610 			RX_MPDU_START_INFO0_ENCRYPT_TYPE);
611 
612 	/* FIXME: No idea what assumptions are safe here. Need logs */
613 	if ((fmt == RX_MSDU_DECAP_RAW && skb->next) ||
614 	    (fmt == RX_MSDU_DECAP_8023_SNAP_LLC)) {
615 		ath10k_htt_rx_free_msdu_chain(skb->next);
616 		skb->next = NULL;
617 		return -ENOTSUPP;
618 	}
619 
620 	/* A-MSDU max is a little less than 8K */
621 	amsdu = dev_alloc_skb(8*1024);
622 	if (!amsdu) {
623 		ath10k_warn("A-MSDU allocation failed\n");
624 		ath10k_htt_rx_free_msdu_chain(skb->next);
625 		skb->next = NULL;
626 		return -ENOMEM;
627 	}
628 
629 	if (fmt >= RX_MSDU_DECAP_NATIVE_WIFI) {
630 		int hdrlen;
631 
632 		hdr = (void *)rxd->rx_hdr_status;
633 		hdrlen = ieee80211_hdrlen(hdr->frame_control);
634 		memcpy(skb_put(amsdu, hdrlen), hdr, hdrlen);
635 	}
636 
637 	first = skb;
638 	while (skb) {
639 		void *decap_hdr;
640 		int decap_len = 0;
641 
642 		rxd = (void *)skb->data - sizeof(*rxd);
643 		fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
644 				RX_MSDU_START_INFO1_DECAP_FORMAT);
645 		decap_hdr = (void *)rxd->rx_hdr_status;
646 
647 		if (skb == first) {
648 			/* We receive linked A-MSDU subframe skbuffs. The
649 			 * first one contains the original 802.11 header (and
650 			 * possible crypto param) in the RX descriptor. The
651 			 * A-MSDU subframe header follows that. Each part is
652 			 * aligned to 4 byte boundary. */
653 
654 			hdr = (void *)amsdu->data;
655 			hdr_len = ieee80211_hdrlen(hdr->frame_control);
656 			crypto_len = ath10k_htt_rx_crypto_param_len(enctype);
657 
658 			decap_hdr += roundup(hdr_len, 4);
659 			decap_hdr += roundup(crypto_len, 4);
660 		}
661 
662 		if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) {
663 			/* Ethernet2 decap inserts ethernet header in place of
664 			 * A-MSDU subframe header. */
665 			skb_pull(skb, 6 + 6 + 2);
666 
667 			/* A-MSDU subframe header length */
668 			decap_len += 6 + 6 + 2;
669 
670 			/* Ethernet2 decap also strips the LLC/SNAP so we need
671 			 * to re-insert it. The LLC/SNAP follows A-MSDU
672 			 * subframe header. */
673 			/* FIXME: Not all LLCs are 8 bytes long */
674 			decap_len += 8;
675 
676 			memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len);
677 		}
678 
679 		if (fmt == RX_MSDU_DECAP_NATIVE_WIFI) {
680 			/* Native Wifi decap inserts regular 802.11 header
681 			 * in place of A-MSDU subframe header. */
682 			hdr = (struct ieee80211_hdr *)skb->data;
683 			skb_pull(skb, ieee80211_hdrlen(hdr->frame_control));
684 
685 			/* A-MSDU subframe header length */
686 			decap_len += 6 + 6 + 2;
687 
688 			memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len);
689 		}
690 
691 		if (fmt == RX_MSDU_DECAP_RAW)
692 			skb_trim(skb, skb->len - 4); /* remove FCS */
693 
694 		memcpy(skb_put(amsdu, skb->len), skb->data, skb->len);
695 
696 		/* A-MSDU subframes are padded to 4bytes
697 		 * but relative to first subframe, not the whole MPDU */
698 		if (skb->next && ((decap_len + skb->len) & 3)) {
699 			int padlen = 4 - ((decap_len + skb->len) & 3);
700 			memset(skb_put(amsdu, padlen), 0, padlen);
701 		}
702 
703 		skb = skb->next;
704 	}
705 
706 	info->skb = amsdu;
707 	info->encrypt_type = enctype;
708 
709 	ath10k_htt_rx_free_msdu_chain(first);
710 
711 	return 0;
712 }
713 
714 static int ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
715 {
716 	struct sk_buff *skb = info->skb;
717 	struct htt_rx_desc *rxd;
718 	struct ieee80211_hdr *hdr;
719 	enum rx_msdu_decap_format fmt;
720 	enum htt_rx_mpdu_encrypt_type enctype;
721 
722 	/* This shouldn't happen. If it does than it may be a FW bug. */
723 	if (skb->next) {
724 		ath10k_warn("received chained non A-MSDU frame\n");
725 		ath10k_htt_rx_free_msdu_chain(skb->next);
726 		skb->next = NULL;
727 	}
728 
729 	rxd = (void *)skb->data - sizeof(*rxd);
730 	fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
731 			RX_MSDU_START_INFO1_DECAP_FORMAT);
732 	enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
733 			RX_MPDU_START_INFO0_ENCRYPT_TYPE);
734 	hdr = (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
735 
736 	switch (fmt) {
737 	case RX_MSDU_DECAP_RAW:
738 		/* remove trailing FCS */
739 		skb_trim(skb, skb->len - 4);
740 		break;
741 	case RX_MSDU_DECAP_NATIVE_WIFI:
742 		/* nothing to do here */
743 		break;
744 	case RX_MSDU_DECAP_ETHERNET2_DIX:
745 		/* macaddr[6] + macaddr[6] + ethertype[2] */
746 		skb_pull(skb, 6 + 6 + 2);
747 		break;
748 	case RX_MSDU_DECAP_8023_SNAP_LLC:
749 		/* macaddr[6] + macaddr[6] + len[2] */
750 		/* we don't need this for non-A-MSDU */
751 		skb_pull(skb, 6 + 6 + 2);
752 		break;
753 	}
754 
755 	if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) {
756 		void *llc;
757 		int llclen;
758 
759 		llclen = 8;
760 		llc  = hdr;
761 		llc += roundup(ieee80211_hdrlen(hdr->frame_control), 4);
762 		llc += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4);
763 
764 		skb_push(skb, llclen);
765 		memcpy(skb->data, llc, llclen);
766 	}
767 
768 	if (fmt >= RX_MSDU_DECAP_ETHERNET2_DIX) {
769 		int len = ieee80211_hdrlen(hdr->frame_control);
770 		skb_push(skb, len);
771 		memcpy(skb->data, hdr, len);
772 	}
773 
774 	info->skb = skb;
775 	info->encrypt_type = enctype;
776 	return 0;
777 }
778 
779 static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb)
780 {
781 	struct htt_rx_desc *rxd;
782 	u32 flags;
783 
784 	rxd = (void *)skb->data - sizeof(*rxd);
785 	flags = __le32_to_cpu(rxd->attention.flags);
786 
787 	if (flags & RX_ATTENTION_FLAGS_DECRYPT_ERR)
788 		return true;
789 
790 	return false;
791 }
792 
793 static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb)
794 {
795 	struct htt_rx_desc *rxd;
796 	u32 flags;
797 
798 	rxd = (void *)skb->data - sizeof(*rxd);
799 	flags = __le32_to_cpu(rxd->attention.flags);
800 
801 	if (flags & RX_ATTENTION_FLAGS_FCS_ERR)
802 		return true;
803 
804 	return false;
805 }
806 
807 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
808 {
809 	struct htt_rx_desc *rxd;
810 	u32 flags, info;
811 	bool is_ip4, is_ip6;
812 	bool is_tcp, is_udp;
813 	bool ip_csum_ok, tcpudp_csum_ok;
814 
815 	rxd = (void *)skb->data - sizeof(*rxd);
816 	flags = __le32_to_cpu(rxd->attention.flags);
817 	info = __le32_to_cpu(rxd->msdu_start.info1);
818 
819 	is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
820 	is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
821 	is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
822 	is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
823 	ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
824 	tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
825 
826 	if (!is_ip4 && !is_ip6)
827 		return CHECKSUM_NONE;
828 	if (!is_tcp && !is_udp)
829 		return CHECKSUM_NONE;
830 	if (!ip_csum_ok)
831 		return CHECKSUM_NONE;
832 	if (!tcpudp_csum_ok)
833 		return CHECKSUM_NONE;
834 
835 	return CHECKSUM_UNNECESSARY;
836 }
837 
838 static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
839 				  struct htt_rx_indication *rx)
840 {
841 	struct htt_rx_info info;
842 	struct htt_rx_indication_mpdu_range *mpdu_ranges;
843 	struct ieee80211_hdr *hdr;
844 	int num_mpdu_ranges;
845 	int fw_desc_len;
846 	u8 *fw_desc;
847 	int i, j;
848 	int ret;
849 	int ip_summed;
850 
851 	memset(&info, 0, sizeof(info));
852 
853 	fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
854 	fw_desc = (u8 *)&rx->fw_desc;
855 
856 	num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
857 			     HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
858 	mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
859 
860 	ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
861 			rx, sizeof(*rx) +
862 			(sizeof(struct htt_rx_indication_mpdu_range) *
863 				num_mpdu_ranges));
864 
865 	for (i = 0; i < num_mpdu_ranges; i++) {
866 		info.status = mpdu_ranges[i].mpdu_range_status;
867 
868 		for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) {
869 			struct sk_buff *msdu_head, *msdu_tail;
870 			enum htt_rx_mpdu_status status;
871 			int msdu_chaining;
872 
873 			msdu_head = NULL;
874 			msdu_tail = NULL;
875 			msdu_chaining = ath10k_htt_rx_amsdu_pop(htt,
876 							 &fw_desc,
877 							 &fw_desc_len,
878 							 &msdu_head,
879 							 &msdu_tail);
880 
881 			if (!msdu_head) {
882 				ath10k_warn("htt rx no data!\n");
883 				continue;
884 			}
885 
886 			if (msdu_head->len == 0) {
887 				ath10k_dbg(ATH10K_DBG_HTT,
888 					   "htt rx dropping due to zero-len\n");
889 				ath10k_htt_rx_free_msdu_chain(msdu_head);
890 				continue;
891 			}
892 
893 			if (ath10k_htt_rx_has_decrypt_err(msdu_head)) {
894 				ath10k_htt_rx_free_msdu_chain(msdu_head);
895 				continue;
896 			}
897 
898 			status = info.status;
899 
900 			/* Skip mgmt frames while we handle this in WMI */
901 			if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL) {
902 				ath10k_htt_rx_free_msdu_chain(msdu_head);
903 				continue;
904 			}
905 
906 			if (status != HTT_RX_IND_MPDU_STATUS_OK &&
907 			    status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
908 			    !htt->ar->monitor_enabled) {
909 				ath10k_dbg(ATH10K_DBG_HTT,
910 					   "htt rx ignoring frame w/ status %d\n",
911 					   status);
912 				ath10k_htt_rx_free_msdu_chain(msdu_head);
913 				continue;
914 			}
915 
916 			/* FIXME: we do not support chaining yet.
917 			 * this needs investigation */
918 			if (msdu_chaining) {
919 				ath10k_warn("msdu_chaining is true\n");
920 				ath10k_htt_rx_free_msdu_chain(msdu_head);
921 				continue;
922 			}
923 
924 			/* The skb is not yet processed and it may be
925 			 * reallocated. Since the offload is in the original
926 			 * skb extract the checksum now and assign it later */
927 			ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
928 
929 			info.skb     = msdu_head;
930 			info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
931 			info.signal  = ATH10K_DEFAULT_NOISE_FLOOR;
932 			info.signal += rx->ppdu.combined_rssi;
933 
934 			info.rate.info0 = rx->ppdu.info0;
935 			info.rate.info1 = __le32_to_cpu(rx->ppdu.info1);
936 			info.rate.info2 = __le32_to_cpu(rx->ppdu.info2);
937 
938 			hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
939 
940 			if (ath10k_htt_rx_hdr_is_amsdu(hdr))
941 				ret = ath10k_htt_rx_amsdu(htt, &info);
942 			else
943 				ret = ath10k_htt_rx_msdu(htt, &info);
944 
945 			if (ret && !info.fcs_err) {
946 				ath10k_warn("error processing msdus %d\n", ret);
947 				dev_kfree_skb_any(info.skb);
948 				continue;
949 			}
950 
951 			if (ath10k_htt_rx_hdr_is_amsdu((void *)info.skb->data))
952 				ath10k_dbg(ATH10K_DBG_HTT, "htt mpdu is amsdu\n");
953 
954 			info.skb->ip_summed = ip_summed;
955 
956 			ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt mpdu: ",
957 					info.skb->data, info.skb->len);
958 			ath10k_process_rx(htt->ar, &info);
959 		}
960 	}
961 
962 	ath10k_htt_rx_msdu_buff_replenish(htt);
963 }
964 
965 static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
966 				struct htt_rx_fragment_indication *frag)
967 {
968 	struct sk_buff *msdu_head, *msdu_tail;
969 	struct htt_rx_desc *rxd;
970 	enum rx_msdu_decap_format fmt;
971 	struct htt_rx_info info = {};
972 	struct ieee80211_hdr *hdr;
973 	int msdu_chaining;
974 	bool tkip_mic_err;
975 	bool decrypt_err;
976 	u8 *fw_desc;
977 	int fw_desc_len, hdrlen, paramlen;
978 	int trim;
979 
980 	fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
981 	fw_desc = (u8 *)frag->fw_msdu_rx_desc;
982 
983 	msdu_head = NULL;
984 	msdu_tail = NULL;
985 	msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
986 						&msdu_head, &msdu_tail);
987 
988 	ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
989 
990 	if (!msdu_head) {
991 		ath10k_warn("htt rx frag no data\n");
992 		return;
993 	}
994 
995 	if (msdu_chaining || msdu_head != msdu_tail) {
996 		ath10k_warn("aggregation with fragmentation?!\n");
997 		ath10k_htt_rx_free_msdu_chain(msdu_head);
998 		return;
999 	}
1000 
1001 	/* FIXME: implement signal strength */
1002 
1003 	hdr = (struct ieee80211_hdr *)msdu_head->data;
1004 	rxd = (void *)msdu_head->data - sizeof(*rxd);
1005 	tkip_mic_err = !!(__le32_to_cpu(rxd->attention.flags) &
1006 				RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
1007 	decrypt_err = !!(__le32_to_cpu(rxd->attention.flags) &
1008 				RX_ATTENTION_FLAGS_DECRYPT_ERR);
1009 	fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
1010 			RX_MSDU_START_INFO1_DECAP_FORMAT);
1011 
1012 	if (fmt != RX_MSDU_DECAP_RAW) {
1013 		ath10k_warn("we dont support non-raw fragmented rx yet\n");
1014 		dev_kfree_skb_any(msdu_head);
1015 		goto end;
1016 	}
1017 
1018 	info.skb = msdu_head;
1019 	info.status = HTT_RX_IND_MPDU_STATUS_OK;
1020 	info.encrypt_type = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1021 				RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1022 	info.skb->ip_summed = ath10k_htt_rx_get_csum_state(info.skb);
1023 
1024 	if (tkip_mic_err) {
1025 		ath10k_warn("tkip mic error\n");
1026 		info.status = HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR;
1027 	}
1028 
1029 	if (decrypt_err) {
1030 		ath10k_warn("decryption err in fragmented rx\n");
1031 		dev_kfree_skb_any(info.skb);
1032 		goto end;
1033 	}
1034 
1035 	if (info.encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) {
1036 		hdrlen = ieee80211_hdrlen(hdr->frame_control);
1037 		paramlen = ath10k_htt_rx_crypto_param_len(info.encrypt_type);
1038 
1039 		/* It is more efficient to move the header than the payload */
1040 		memmove((void *)info.skb->data + paramlen,
1041 			(void *)info.skb->data,
1042 			hdrlen);
1043 		skb_pull(info.skb, paramlen);
1044 		hdr = (struct ieee80211_hdr *)info.skb->data;
1045 	}
1046 
1047 	/* remove trailing FCS */
1048 	trim  = 4;
1049 
1050 	/* remove crypto trailer */
1051 	trim += ath10k_htt_rx_crypto_tail_len(info.encrypt_type);
1052 
1053 	/* last fragment of TKIP frags has MIC */
1054 	if (!ieee80211_has_morefrags(hdr->frame_control) &&
1055 	    info.encrypt_type == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1056 		trim += 8;
1057 
1058 	if (trim > info.skb->len) {
1059 		ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n");
1060 		dev_kfree_skb_any(info.skb);
1061 		goto end;
1062 	}
1063 
1064 	skb_trim(info.skb, info.skb->len - trim);
1065 
1066 	ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt frag mpdu: ",
1067 			info.skb->data, info.skb->len);
1068 	ath10k_process_rx(htt->ar, &info);
1069 
1070 end:
1071 	if (fw_desc_len > 0) {
1072 		ath10k_dbg(ATH10K_DBG_HTT,
1073 			   "expecting more fragmented rx in one indication %d\n",
1074 			   fw_desc_len);
1075 	}
1076 }
1077 
1078 void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
1079 {
1080 	struct ath10k_htt *htt = &ar->htt;
1081 	struct htt_resp *resp = (struct htt_resp *)skb->data;
1082 
1083 	/* confirm alignment */
1084 	if (!IS_ALIGNED((unsigned long)skb->data, 4))
1085 		ath10k_warn("unaligned htt message, expect trouble\n");
1086 
1087 	ath10k_dbg(ATH10K_DBG_HTT, "HTT RX, msg_type: 0x%0X\n",
1088 		   resp->hdr.msg_type);
1089 	switch (resp->hdr.msg_type) {
1090 	case HTT_T2H_MSG_TYPE_VERSION_CONF: {
1091 		htt->target_version_major = resp->ver_resp.major;
1092 		htt->target_version_minor = resp->ver_resp.minor;
1093 		complete(&htt->target_version_received);
1094 		break;
1095 	}
1096 	case HTT_T2H_MSG_TYPE_RX_IND: {
1097 		ath10k_htt_rx_handler(htt, &resp->rx_ind);
1098 		break;
1099 	}
1100 	case HTT_T2H_MSG_TYPE_PEER_MAP: {
1101 		struct htt_peer_map_event ev = {
1102 			.vdev_id = resp->peer_map.vdev_id,
1103 			.peer_id = __le16_to_cpu(resp->peer_map.peer_id),
1104 		};
1105 		memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
1106 		ath10k_peer_map_event(htt, &ev);
1107 		break;
1108 	}
1109 	case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
1110 		struct htt_peer_unmap_event ev = {
1111 			.peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
1112 		};
1113 		ath10k_peer_unmap_event(htt, &ev);
1114 		break;
1115 	}
1116 	case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
1117 		struct htt_tx_done tx_done = {};
1118 		int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
1119 
1120 		tx_done.msdu_id =
1121 			__le32_to_cpu(resp->mgmt_tx_completion.desc_id);
1122 
1123 		switch (status) {
1124 		case HTT_MGMT_TX_STATUS_OK:
1125 			break;
1126 		case HTT_MGMT_TX_STATUS_RETRY:
1127 			tx_done.no_ack = true;
1128 			break;
1129 		case HTT_MGMT_TX_STATUS_DROP:
1130 			tx_done.discard = true;
1131 			break;
1132 		}
1133 
1134 		ath10k_txrx_tx_completed(htt, &tx_done);
1135 		break;
1136 	}
1137 	case HTT_T2H_MSG_TYPE_TX_COMPL_IND: {
1138 		struct htt_tx_done tx_done = {};
1139 		int status = MS(resp->data_tx_completion.flags,
1140 				HTT_DATA_TX_STATUS);
1141 		__le16 msdu_id;
1142 		int i;
1143 
1144 		switch (status) {
1145 		case HTT_DATA_TX_STATUS_NO_ACK:
1146 			tx_done.no_ack = true;
1147 			break;
1148 		case HTT_DATA_TX_STATUS_OK:
1149 			break;
1150 		case HTT_DATA_TX_STATUS_DISCARD:
1151 		case HTT_DATA_TX_STATUS_POSTPONE:
1152 		case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
1153 			tx_done.discard = true;
1154 			break;
1155 		default:
1156 			ath10k_warn("unhandled tx completion status %d\n",
1157 				    status);
1158 			tx_done.discard = true;
1159 			break;
1160 		}
1161 
1162 		ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
1163 			   resp->data_tx_completion.num_msdus);
1164 
1165 		for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
1166 			msdu_id = resp->data_tx_completion.msdus[i];
1167 			tx_done.msdu_id = __le16_to_cpu(msdu_id);
1168 			ath10k_txrx_tx_completed(htt, &tx_done);
1169 		}
1170 		break;
1171 	}
1172 	case HTT_T2H_MSG_TYPE_SEC_IND: {
1173 		struct ath10k *ar = htt->ar;
1174 		struct htt_security_indication *ev = &resp->security_indication;
1175 
1176 		ath10k_dbg(ATH10K_DBG_HTT,
1177 			   "sec ind peer_id %d unicast %d type %d\n",
1178 			  __le16_to_cpu(ev->peer_id),
1179 			  !!(ev->flags & HTT_SECURITY_IS_UNICAST),
1180 			  MS(ev->flags, HTT_SECURITY_TYPE));
1181 		complete(&ar->install_key_done);
1182 		break;
1183 	}
1184 	case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
1185 		ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
1186 				skb->data, skb->len);
1187 		ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
1188 		break;
1189 	}
1190 	case HTT_T2H_MSG_TYPE_TEST:
1191 		/* FIX THIS */
1192 		break;
1193 	case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
1194 	case HTT_T2H_MSG_TYPE_STATS_CONF:
1195 	case HTT_T2H_MSG_TYPE_RX_ADDBA:
1196 	case HTT_T2H_MSG_TYPE_RX_DELBA:
1197 	case HTT_T2H_MSG_TYPE_RX_FLUSH:
1198 	default:
1199 		ath10k_dbg(ATH10K_DBG_HTT, "htt event (%d) not handled\n",
1200 			   resp->hdr.msg_type);
1201 		ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
1202 				skb->data, skb->len);
1203 		break;
1204 	};
1205 
1206 	/* Free the indication buffer */
1207 	dev_kfree_skb_any(skb);
1208 }
1209