xref: /openbmc/linux/drivers/net/wireless/ath/ath9k/recv.c (revision 089a49b6)
1 /*
2  * Copyright (c) 2008-2011 Atheros Communications Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/dma-mapping.h>
18 #include <linux/relay.h>
19 #include "ath9k.h"
20 #include "ar9003_mac.h"
21 
22 #define SKB_CB_ATHBUF(__skb)	(*((struct ath_buf **)__skb->cb))
23 
24 static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
25 {
26 	return sc->ps_enabled &&
27 	       (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP);
28 }
29 
30 /*
31  * Setup and link descriptors.
32  *
33  * 11N: we can no longer afford to self link the last descriptor.
34  * MAC acknowledges BA status as long as it copies frames to host
35  * buffer (or rx fifo). This can incorrectly acknowledge packets
36  * to a sender if last desc is self-linked.
37  */
38 static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
39 {
40 	struct ath_hw *ah = sc->sc_ah;
41 	struct ath_common *common = ath9k_hw_common(ah);
42 	struct ath_desc *ds;
43 	struct sk_buff *skb;
44 
45 	ds = bf->bf_desc;
46 	ds->ds_link = 0; /* link to null */
47 	ds->ds_data = bf->bf_buf_addr;
48 
49 	/* virtual addr of the beginning of the buffer. */
50 	skb = bf->bf_mpdu;
51 	BUG_ON(skb == NULL);
52 	ds->ds_vdata = skb->data;
53 
54 	/*
55 	 * setup rx descriptors. The rx_bufsize here tells the hardware
56 	 * how much data it can DMA to us and that we are prepared
57 	 * to process
58 	 */
59 	ath9k_hw_setuprxdesc(ah, ds,
60 			     common->rx_bufsize,
61 			     0);
62 
63 	if (sc->rx.rxlink == NULL)
64 		ath9k_hw_putrxbuf(ah, bf->bf_daddr);
65 	else
66 		*sc->rx.rxlink = bf->bf_daddr;
67 
68 	sc->rx.rxlink = &ds->ds_link;
69 }
70 
71 static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_buf *bf)
72 {
73 	if (sc->rx.buf_hold)
74 		ath_rx_buf_link(sc, sc->rx.buf_hold);
75 
76 	sc->rx.buf_hold = bf;
77 }
78 
79 static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
80 {
81 	/* XXX block beacon interrupts */
82 	ath9k_hw_setantenna(sc->sc_ah, antenna);
83 	sc->rx.defant = antenna;
84 	sc->rx.rxotherant = 0;
85 }
86 
87 static void ath_opmode_init(struct ath_softc *sc)
88 {
89 	struct ath_hw *ah = sc->sc_ah;
90 	struct ath_common *common = ath9k_hw_common(ah);
91 
92 	u32 rfilt, mfilt[2];
93 
94 	/* configure rx filter */
95 	rfilt = ath_calcrxfilter(sc);
96 	ath9k_hw_setrxfilter(ah, rfilt);
97 
98 	/* configure bssid mask */
99 	ath_hw_setbssidmask(common);
100 
101 	/* configure operational mode */
102 	ath9k_hw_setopmode(ah);
103 
104 	/* calculate and install multicast filter */
105 	mfilt[0] = mfilt[1] = ~0;
106 	ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
107 }
108 
109 static bool ath_rx_edma_buf_link(struct ath_softc *sc,
110 				 enum ath9k_rx_qtype qtype)
111 {
112 	struct ath_hw *ah = sc->sc_ah;
113 	struct ath_rx_edma *rx_edma;
114 	struct sk_buff *skb;
115 	struct ath_buf *bf;
116 
117 	rx_edma = &sc->rx.rx_edma[qtype];
118 	if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize)
119 		return false;
120 
121 	bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
122 	list_del_init(&bf->list);
123 
124 	skb = bf->bf_mpdu;
125 
126 	memset(skb->data, 0, ah->caps.rx_status_len);
127 	dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
128 				ah->caps.rx_status_len, DMA_TO_DEVICE);
129 
130 	SKB_CB_ATHBUF(skb) = bf;
131 	ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype);
132 	__skb_queue_tail(&rx_edma->rx_fifo, skb);
133 
134 	return true;
135 }
136 
137 static void ath_rx_addbuffer_edma(struct ath_softc *sc,
138 				  enum ath9k_rx_qtype qtype)
139 {
140 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
141 	struct ath_buf *bf, *tbf;
142 
143 	if (list_empty(&sc->rx.rxbuf)) {
144 		ath_dbg(common, QUEUE, "No free rx buf available\n");
145 		return;
146 	}
147 
148 	list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list)
149 		if (!ath_rx_edma_buf_link(sc, qtype))
150 			break;
151 
152 }
153 
154 static void ath_rx_remove_buffer(struct ath_softc *sc,
155 				 enum ath9k_rx_qtype qtype)
156 {
157 	struct ath_buf *bf;
158 	struct ath_rx_edma *rx_edma;
159 	struct sk_buff *skb;
160 
161 	rx_edma = &sc->rx.rx_edma[qtype];
162 
163 	while ((skb = __skb_dequeue(&rx_edma->rx_fifo)) != NULL) {
164 		bf = SKB_CB_ATHBUF(skb);
165 		BUG_ON(!bf);
166 		list_add_tail(&bf->list, &sc->rx.rxbuf);
167 	}
168 }
169 
170 static void ath_rx_edma_cleanup(struct ath_softc *sc)
171 {
172 	struct ath_hw *ah = sc->sc_ah;
173 	struct ath_common *common = ath9k_hw_common(ah);
174 	struct ath_buf *bf;
175 
176 	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
177 	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
178 
179 	list_for_each_entry(bf, &sc->rx.rxbuf, list) {
180 		if (bf->bf_mpdu) {
181 			dma_unmap_single(sc->dev, bf->bf_buf_addr,
182 					common->rx_bufsize,
183 					DMA_BIDIRECTIONAL);
184 			dev_kfree_skb_any(bf->bf_mpdu);
185 			bf->bf_buf_addr = 0;
186 			bf->bf_mpdu = NULL;
187 		}
188 	}
189 }
190 
191 static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
192 {
193 	__skb_queue_head_init(&rx_edma->rx_fifo);
194 	rx_edma->rx_fifo_hwsize = size;
195 }
196 
197 static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
198 {
199 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
200 	struct ath_hw *ah = sc->sc_ah;
201 	struct sk_buff *skb;
202 	struct ath_buf *bf;
203 	int error = 0, i;
204 	u32 size;
205 
206 	ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
207 				    ah->caps.rx_status_len);
208 
209 	ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP],
210 			       ah->caps.rx_lp_qdepth);
211 	ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP],
212 			       ah->caps.rx_hp_qdepth);
213 
214 	size = sizeof(struct ath_buf) * nbufs;
215 	bf = devm_kzalloc(sc->dev, size, GFP_KERNEL);
216 	if (!bf)
217 		return -ENOMEM;
218 
219 	INIT_LIST_HEAD(&sc->rx.rxbuf);
220 
221 	for (i = 0; i < nbufs; i++, bf++) {
222 		skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
223 		if (!skb) {
224 			error = -ENOMEM;
225 			goto rx_init_fail;
226 		}
227 
228 		memset(skb->data, 0, common->rx_bufsize);
229 		bf->bf_mpdu = skb;
230 
231 		bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
232 						 common->rx_bufsize,
233 						 DMA_BIDIRECTIONAL);
234 		if (unlikely(dma_mapping_error(sc->dev,
235 						bf->bf_buf_addr))) {
236 				dev_kfree_skb_any(skb);
237 				bf->bf_mpdu = NULL;
238 				bf->bf_buf_addr = 0;
239 				ath_err(common,
240 					"dma_mapping_error() on RX init\n");
241 				error = -ENOMEM;
242 				goto rx_init_fail;
243 		}
244 
245 		list_add_tail(&bf->list, &sc->rx.rxbuf);
246 	}
247 
248 	return 0;
249 
250 rx_init_fail:
251 	ath_rx_edma_cleanup(sc);
252 	return error;
253 }
254 
255 static void ath_edma_start_recv(struct ath_softc *sc)
256 {
257 	ath9k_hw_rxena(sc->sc_ah);
258 	ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP);
259 	ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP);
260 	ath_opmode_init(sc);
261 	ath9k_hw_startpcureceive(sc->sc_ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL));
262 }
263 
264 static void ath_edma_stop_recv(struct ath_softc *sc)
265 {
266 	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
267 	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
268 }
269 
270 int ath_rx_init(struct ath_softc *sc, int nbufs)
271 {
272 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
273 	struct sk_buff *skb;
274 	struct ath_buf *bf;
275 	int error = 0;
276 
277 	spin_lock_init(&sc->sc_pcu_lock);
278 
279 	common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
280 			     sc->sc_ah->caps.rx_status_len;
281 
282 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
283 		return ath_rx_edma_init(sc, nbufs);
284 
285 	ath_dbg(common, CONFIG, "cachelsz %u rxbufsize %u\n",
286 		common->cachelsz, common->rx_bufsize);
287 
288 	/* Initialize rx descriptors */
289 
290 	error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
291 				  "rx", nbufs, 1, 0);
292 	if (error != 0) {
293 		ath_err(common,
294 			"failed to allocate rx descriptors: %d\n",
295 			error);
296 		goto err;
297 	}
298 
299 	list_for_each_entry(bf, &sc->rx.rxbuf, list) {
300 		skb = ath_rxbuf_alloc(common, common->rx_bufsize,
301 				      GFP_KERNEL);
302 		if (skb == NULL) {
303 			error = -ENOMEM;
304 			goto err;
305 		}
306 
307 		bf->bf_mpdu = skb;
308 		bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
309 						 common->rx_bufsize,
310 						 DMA_FROM_DEVICE);
311 		if (unlikely(dma_mapping_error(sc->dev,
312 					       bf->bf_buf_addr))) {
313 			dev_kfree_skb_any(skb);
314 			bf->bf_mpdu = NULL;
315 			bf->bf_buf_addr = 0;
316 			ath_err(common,
317 				"dma_mapping_error() on RX init\n");
318 			error = -ENOMEM;
319 			goto err;
320 		}
321 	}
322 	sc->rx.rxlink = NULL;
323 err:
324 	if (error)
325 		ath_rx_cleanup(sc);
326 
327 	return error;
328 }
329 
330 void ath_rx_cleanup(struct ath_softc *sc)
331 {
332 	struct ath_hw *ah = sc->sc_ah;
333 	struct ath_common *common = ath9k_hw_common(ah);
334 	struct sk_buff *skb;
335 	struct ath_buf *bf;
336 
337 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
338 		ath_rx_edma_cleanup(sc);
339 		return;
340 	}
341 
342 	list_for_each_entry(bf, &sc->rx.rxbuf, list) {
343 		skb = bf->bf_mpdu;
344 		if (skb) {
345 			dma_unmap_single(sc->dev, bf->bf_buf_addr,
346 					 common->rx_bufsize,
347 					 DMA_FROM_DEVICE);
348 			dev_kfree_skb(skb);
349 			bf->bf_buf_addr = 0;
350 			bf->bf_mpdu = NULL;
351 		}
352 	}
353 }
354 
355 /*
356  * Calculate the receive filter according to the
357  * operating mode and state:
358  *
359  * o always accept unicast, broadcast, and multicast traffic
360  * o maintain current state of phy error reception (the hal
361  *   may enable phy error frames for noise immunity work)
362  * o probe request frames are accepted only when operating in
363  *   hostap, adhoc, or monitor modes
364  * o enable promiscuous mode according to the interface state
365  * o accept beacons:
366  *   - when operating in adhoc mode so the 802.11 layer creates
367  *     node table entries for peers,
368  *   - when operating in station mode for collecting rssi data when
369  *     the station is otherwise quiet, or
370  *   - when operating as a repeater so we see repeater-sta beacons
371  *   - when scanning
372  */
373 
374 u32 ath_calcrxfilter(struct ath_softc *sc)
375 {
376 	u32 rfilt;
377 
378 	rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
379 		| ATH9K_RX_FILTER_MCAST;
380 
381 	/* if operating on a DFS channel, enable radar pulse detection */
382 	if (sc->hw->conf.radar_enabled)
383 		rfilt |= ATH9K_RX_FILTER_PHYRADAR | ATH9K_RX_FILTER_PHYERR;
384 
385 	if (sc->rx.rxfilter & FIF_PROBE_REQ)
386 		rfilt |= ATH9K_RX_FILTER_PROBEREQ;
387 
388 	/*
389 	 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
390 	 * mode interface or when in monitor mode. AP mode does not need this
391 	 * since it receives all in-BSS frames anyway.
392 	 */
393 	if (sc->sc_ah->is_monitoring)
394 		rfilt |= ATH9K_RX_FILTER_PROM;
395 
396 	if (sc->rx.rxfilter & FIF_CONTROL)
397 		rfilt |= ATH9K_RX_FILTER_CONTROL;
398 
399 	if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) &&
400 	    (sc->nvifs <= 1) &&
401 	    !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC))
402 		rfilt |= ATH9K_RX_FILTER_MYBEACON;
403 	else
404 		rfilt |= ATH9K_RX_FILTER_BEACON;
405 
406 	if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
407 	    (sc->rx.rxfilter & FIF_PSPOLL))
408 		rfilt |= ATH9K_RX_FILTER_PSPOLL;
409 
410 	if (conf_is_ht(&sc->hw->conf))
411 		rfilt |= ATH9K_RX_FILTER_COMP_BAR;
412 
413 	if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) {
414 		/* This is needed for older chips */
415 		if (sc->sc_ah->hw_version.macVersion <= AR_SREV_VERSION_9160)
416 			rfilt |= ATH9K_RX_FILTER_PROM;
417 		rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
418 	}
419 
420 	if (AR_SREV_9550(sc->sc_ah))
421 		rfilt |= ATH9K_RX_FILTER_4ADDRESS;
422 
423 	return rfilt;
424 
425 }
426 
427 int ath_startrecv(struct ath_softc *sc)
428 {
429 	struct ath_hw *ah = sc->sc_ah;
430 	struct ath_buf *bf, *tbf;
431 
432 	if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
433 		ath_edma_start_recv(sc);
434 		return 0;
435 	}
436 
437 	if (list_empty(&sc->rx.rxbuf))
438 		goto start_recv;
439 
440 	sc->rx.buf_hold = NULL;
441 	sc->rx.rxlink = NULL;
442 	list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
443 		ath_rx_buf_link(sc, bf);
444 	}
445 
446 	/* We could have deleted elements so the list may be empty now */
447 	if (list_empty(&sc->rx.rxbuf))
448 		goto start_recv;
449 
450 	bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
451 	ath9k_hw_putrxbuf(ah, bf->bf_daddr);
452 	ath9k_hw_rxena(ah);
453 
454 start_recv:
455 	ath_opmode_init(sc);
456 	ath9k_hw_startpcureceive(ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL));
457 
458 	return 0;
459 }
460 
461 static void ath_flushrecv(struct ath_softc *sc)
462 {
463 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
464 		ath_rx_tasklet(sc, 1, true);
465 	ath_rx_tasklet(sc, 1, false);
466 }
467 
468 bool ath_stoprecv(struct ath_softc *sc)
469 {
470 	struct ath_hw *ah = sc->sc_ah;
471 	bool stopped, reset = false;
472 
473 	ath9k_hw_abortpcurecv(ah);
474 	ath9k_hw_setrxfilter(ah, 0);
475 	stopped = ath9k_hw_stopdmarecv(ah, &reset);
476 
477 	ath_flushrecv(sc);
478 
479 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
480 		ath_edma_stop_recv(sc);
481 	else
482 		sc->rx.rxlink = NULL;
483 
484 	if (!(ah->ah_flags & AH_UNPLUGGED) &&
485 	    unlikely(!stopped)) {
486 		ath_err(ath9k_hw_common(sc->sc_ah),
487 			"Could not stop RX, we could be "
488 			"confusing the DMA engine when we start RX up\n");
489 		ATH_DBG_WARN_ON_ONCE(!stopped);
490 	}
491 	return stopped && !reset;
492 }
493 
494 static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
495 {
496 	/* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
497 	struct ieee80211_mgmt *mgmt;
498 	u8 *pos, *end, id, elen;
499 	struct ieee80211_tim_ie *tim;
500 
501 	mgmt = (struct ieee80211_mgmt *)skb->data;
502 	pos = mgmt->u.beacon.variable;
503 	end = skb->data + skb->len;
504 
505 	while (pos + 2 < end) {
506 		id = *pos++;
507 		elen = *pos++;
508 		if (pos + elen > end)
509 			break;
510 
511 		if (id == WLAN_EID_TIM) {
512 			if (elen < sizeof(*tim))
513 				break;
514 			tim = (struct ieee80211_tim_ie *) pos;
515 			if (tim->dtim_count != 0)
516 				break;
517 			return tim->bitmap_ctrl & 0x01;
518 		}
519 
520 		pos += elen;
521 	}
522 
523 	return false;
524 }
525 
526 static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
527 {
528 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
529 
530 	if (skb->len < 24 + 8 + 2 + 2)
531 		return;
532 
533 	sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
534 
535 	if (sc->ps_flags & PS_BEACON_SYNC) {
536 		sc->ps_flags &= ~PS_BEACON_SYNC;
537 		ath_dbg(common, PS,
538 			"Reconfigure beacon timers based on synchronized timestamp\n");
539 		ath9k_set_beacon(sc);
540 	}
541 
542 	if (ath_beacon_dtim_pending_cab(skb)) {
543 		/*
544 		 * Remain awake waiting for buffered broadcast/multicast
545 		 * frames. If the last broadcast/multicast frame is not
546 		 * received properly, the next beacon frame will work as
547 		 * a backup trigger for returning into NETWORK SLEEP state,
548 		 * so we are waiting for it as well.
549 		 */
550 		ath_dbg(common, PS,
551 			"Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n");
552 		sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON;
553 		return;
554 	}
555 
556 	if (sc->ps_flags & PS_WAIT_FOR_CAB) {
557 		/*
558 		 * This can happen if a broadcast frame is dropped or the AP
559 		 * fails to send a frame indicating that all CAB frames have
560 		 * been delivered.
561 		 */
562 		sc->ps_flags &= ~PS_WAIT_FOR_CAB;
563 		ath_dbg(common, PS, "PS wait for CAB frames timed out\n");
564 	}
565 }
566 
567 static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon)
568 {
569 	struct ieee80211_hdr *hdr;
570 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
571 
572 	hdr = (struct ieee80211_hdr *)skb->data;
573 
574 	/* Process Beacon and CAB receive in PS state */
575 	if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc))
576 	    && mybeacon) {
577 		ath_rx_ps_beacon(sc, skb);
578 	} else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
579 		   (ieee80211_is_data(hdr->frame_control) ||
580 		    ieee80211_is_action(hdr->frame_control)) &&
581 		   is_multicast_ether_addr(hdr->addr1) &&
582 		   !ieee80211_has_moredata(hdr->frame_control)) {
583 		/*
584 		 * No more broadcast/multicast frames to be received at this
585 		 * point.
586 		 */
587 		sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON);
588 		ath_dbg(common, PS,
589 			"All PS CAB frames received, back to sleep\n");
590 	} else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
591 		   !is_multicast_ether_addr(hdr->addr1) &&
592 		   !ieee80211_has_morefrags(hdr->frame_control)) {
593 		sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
594 		ath_dbg(common, PS,
595 			"Going back to sleep after having received PS-Poll data (0x%lx)\n",
596 			sc->ps_flags & (PS_WAIT_FOR_BEACON |
597 					PS_WAIT_FOR_CAB |
598 					PS_WAIT_FOR_PSPOLL_DATA |
599 					PS_WAIT_FOR_TX_ACK));
600 	}
601 }
602 
603 static bool ath_edma_get_buffers(struct ath_softc *sc,
604 				 enum ath9k_rx_qtype qtype,
605 				 struct ath_rx_status *rs,
606 				 struct ath_buf **dest)
607 {
608 	struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
609 	struct ath_hw *ah = sc->sc_ah;
610 	struct ath_common *common = ath9k_hw_common(ah);
611 	struct sk_buff *skb;
612 	struct ath_buf *bf;
613 	int ret;
614 
615 	skb = skb_peek(&rx_edma->rx_fifo);
616 	if (!skb)
617 		return false;
618 
619 	bf = SKB_CB_ATHBUF(skb);
620 	BUG_ON(!bf);
621 
622 	dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
623 				common->rx_bufsize, DMA_FROM_DEVICE);
624 
625 	ret = ath9k_hw_process_rxdesc_edma(ah, rs, skb->data);
626 	if (ret == -EINPROGRESS) {
627 		/*let device gain the buffer again*/
628 		dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
629 				common->rx_bufsize, DMA_FROM_DEVICE);
630 		return false;
631 	}
632 
633 	__skb_unlink(skb, &rx_edma->rx_fifo);
634 	if (ret == -EINVAL) {
635 		/* corrupt descriptor, skip this one and the following one */
636 		list_add_tail(&bf->list, &sc->rx.rxbuf);
637 		ath_rx_edma_buf_link(sc, qtype);
638 
639 		skb = skb_peek(&rx_edma->rx_fifo);
640 		if (skb) {
641 			bf = SKB_CB_ATHBUF(skb);
642 			BUG_ON(!bf);
643 
644 			__skb_unlink(skb, &rx_edma->rx_fifo);
645 			list_add_tail(&bf->list, &sc->rx.rxbuf);
646 			ath_rx_edma_buf_link(sc, qtype);
647 		}
648 
649 		bf = NULL;
650 	}
651 
652 	*dest = bf;
653 	return true;
654 }
655 
656 static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
657 						struct ath_rx_status *rs,
658 						enum ath9k_rx_qtype qtype)
659 {
660 	struct ath_buf *bf = NULL;
661 
662 	while (ath_edma_get_buffers(sc, qtype, rs, &bf)) {
663 		if (!bf)
664 			continue;
665 
666 		return bf;
667 	}
668 	return NULL;
669 }
670 
671 static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
672 					   struct ath_rx_status *rs)
673 {
674 	struct ath_hw *ah = sc->sc_ah;
675 	struct ath_common *common = ath9k_hw_common(ah);
676 	struct ath_desc *ds;
677 	struct ath_buf *bf;
678 	int ret;
679 
680 	if (list_empty(&sc->rx.rxbuf)) {
681 		sc->rx.rxlink = NULL;
682 		return NULL;
683 	}
684 
685 	bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
686 	if (bf == sc->rx.buf_hold)
687 		return NULL;
688 
689 	ds = bf->bf_desc;
690 
691 	/*
692 	 * Must provide the virtual address of the current
693 	 * descriptor, the physical address, and the virtual
694 	 * address of the next descriptor in the h/w chain.
695 	 * This allows the HAL to look ahead to see if the
696 	 * hardware is done with a descriptor by checking the
697 	 * done bit in the following descriptor and the address
698 	 * of the current descriptor the DMA engine is working
699 	 * on.  All this is necessary because of our use of
700 	 * a self-linked list to avoid rx overruns.
701 	 */
702 	ret = ath9k_hw_rxprocdesc(ah, ds, rs);
703 	if (ret == -EINPROGRESS) {
704 		struct ath_rx_status trs;
705 		struct ath_buf *tbf;
706 		struct ath_desc *tds;
707 
708 		memset(&trs, 0, sizeof(trs));
709 		if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
710 			sc->rx.rxlink = NULL;
711 			return NULL;
712 		}
713 
714 		tbf = list_entry(bf->list.next, struct ath_buf, list);
715 
716 		/*
717 		 * On some hardware the descriptor status words could
718 		 * get corrupted, including the done bit. Because of
719 		 * this, check if the next descriptor's done bit is
720 		 * set or not.
721 		 *
722 		 * If the next descriptor's done bit is set, the current
723 		 * descriptor has been corrupted. Force s/w to discard
724 		 * this descriptor and continue...
725 		 */
726 
727 		tds = tbf->bf_desc;
728 		ret = ath9k_hw_rxprocdesc(ah, tds, &trs);
729 		if (ret == -EINPROGRESS)
730 			return NULL;
731 
732 		/*
733 		 * mark descriptor as zero-length and set the 'more'
734 		 * flag to ensure that both buffers get discarded
735 		 */
736 		rs->rs_datalen = 0;
737 		rs->rs_more = true;
738 	}
739 
740 	list_del(&bf->list);
741 	if (!bf->bf_mpdu)
742 		return bf;
743 
744 	/*
745 	 * Synchronize the DMA transfer with CPU before
746 	 * 1. accessing the frame
747 	 * 2. requeueing the same buffer to h/w
748 	 */
749 	dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
750 			common->rx_bufsize,
751 			DMA_FROM_DEVICE);
752 
753 	return bf;
754 }
755 
756 /* Assumes you've already done the endian to CPU conversion */
757 static bool ath9k_rx_accept(struct ath_common *common,
758 			    struct ieee80211_hdr *hdr,
759 			    struct ieee80211_rx_status *rxs,
760 			    struct ath_rx_status *rx_stats,
761 			    bool *decrypt_error)
762 {
763 	struct ath_softc *sc = (struct ath_softc *) common->priv;
764 	bool is_mc, is_valid_tkip, strip_mic, mic_error;
765 	struct ath_hw *ah = common->ah;
766 	__le16 fc;
767 
768 	fc = hdr->frame_control;
769 
770 	is_mc = !!is_multicast_ether_addr(hdr->addr1);
771 	is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID &&
772 		test_bit(rx_stats->rs_keyix, common->tkip_keymap);
773 	strip_mic = is_valid_tkip && ieee80211_is_data(fc) &&
774 		ieee80211_has_protected(fc) &&
775 		!(rx_stats->rs_status &
776 		(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC |
777 		 ATH9K_RXERR_KEYMISS));
778 
779 	/*
780 	 * Key miss events are only relevant for pairwise keys where the
781 	 * descriptor does contain a valid key index. This has been observed
782 	 * mostly with CCMP encryption.
783 	 */
784 	if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID ||
785 	    !test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
786 		rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
787 
788 	mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) &&
789 		!ieee80211_has_morefrags(fc) &&
790 		!(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
791 		(rx_stats->rs_status & ATH9K_RXERR_MIC);
792 
793 	/*
794 	 * The rx_stats->rs_status will not be set until the end of the
795 	 * chained descriptors so it can be ignored if rs_more is set. The
796 	 * rs_more will be false at the last element of the chained
797 	 * descriptors.
798 	 */
799 	if (rx_stats->rs_status != 0) {
800 		u8 status_mask;
801 
802 		if (rx_stats->rs_status & ATH9K_RXERR_CRC) {
803 			rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
804 			mic_error = false;
805 		}
806 
807 		if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) ||
808 		    (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) {
809 			*decrypt_error = true;
810 			mic_error = false;
811 		}
812 
813 		/*
814 		 * Reject error frames with the exception of
815 		 * decryption and MIC failures. For monitor mode,
816 		 * we also ignore the CRC error.
817 		 */
818 		status_mask = ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
819 			      ATH9K_RXERR_KEYMISS;
820 
821 		if (ah->is_monitoring && (sc->rx.rxfilter & FIF_FCSFAIL))
822 			status_mask |= ATH9K_RXERR_CRC;
823 
824 		if (rx_stats->rs_status & ~status_mask)
825 			return false;
826 	}
827 
828 	/*
829 	 * For unicast frames the MIC error bit can have false positives,
830 	 * so all MIC error reports need to be validated in software.
831 	 * False negatives are not common, so skip software verification
832 	 * if the hardware considers the MIC valid.
833 	 */
834 	if (strip_mic)
835 		rxs->flag |= RX_FLAG_MMIC_STRIPPED;
836 	else if (is_mc && mic_error)
837 		rxs->flag |= RX_FLAG_MMIC_ERROR;
838 
839 	return true;
840 }
841 
842 static int ath9k_process_rate(struct ath_common *common,
843 			      struct ieee80211_hw *hw,
844 			      struct ath_rx_status *rx_stats,
845 			      struct ieee80211_rx_status *rxs)
846 {
847 	struct ieee80211_supported_band *sband;
848 	enum ieee80211_band band;
849 	unsigned int i = 0;
850 	struct ath_softc __maybe_unused *sc = common->priv;
851 
852 	band = hw->conf.chandef.chan->band;
853 	sband = hw->wiphy->bands[band];
854 
855 	switch (hw->conf.chandef.width) {
856 	case NL80211_CHAN_WIDTH_5:
857 		rxs->flag |= RX_FLAG_5MHZ;
858 		break;
859 	case NL80211_CHAN_WIDTH_10:
860 		rxs->flag |= RX_FLAG_10MHZ;
861 		break;
862 	default:
863 		break;
864 	}
865 
866 	if (rx_stats->rs_rate & 0x80) {
867 		/* HT rate */
868 		rxs->flag |= RX_FLAG_HT;
869 		rxs->flag |= rx_stats->flag;
870 		rxs->rate_idx = rx_stats->rs_rate & 0x7f;
871 		return 0;
872 	}
873 
874 	for (i = 0; i < sband->n_bitrates; i++) {
875 		if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
876 			rxs->rate_idx = i;
877 			return 0;
878 		}
879 		if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
880 			rxs->flag |= RX_FLAG_SHORTPRE;
881 			rxs->rate_idx = i;
882 			return 0;
883 		}
884 	}
885 
886 	/*
887 	 * No valid hardware bitrate found -- we should not get here
888 	 * because hardware has already validated this frame as OK.
889 	 */
890 	ath_dbg(common, ANY,
891 		"unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
892 		rx_stats->rs_rate);
893 	RX_STAT_INC(rx_rate_err);
894 	return -EINVAL;
895 }
896 
897 static void ath9k_process_rssi(struct ath_common *common,
898 			       struct ieee80211_hw *hw,
899 			       struct ath_rx_status *rx_stats,
900 			       struct ieee80211_rx_status *rxs)
901 {
902 	struct ath_softc *sc = hw->priv;
903 	struct ath_hw *ah = common->ah;
904 	int last_rssi;
905 	int rssi = rx_stats->rs_rssi;
906 
907 	/*
908 	 * RSSI is not available for subframes in an A-MPDU.
909 	 */
910 	if (rx_stats->rs_moreaggr) {
911 		rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
912 		return;
913 	}
914 
915 	/*
916 	 * Check if the RSSI for the last subframe in an A-MPDU
917 	 * or an unaggregated frame is valid.
918 	 */
919 	if (rx_stats->rs_rssi == ATH9K_RSSI_BAD) {
920 		rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
921 		return;
922 	}
923 
924 	/*
925 	 * Update Beacon RSSI, this is used by ANI.
926 	 */
927 	if (rx_stats->is_mybeacon &&
928 	    ((ah->opmode == NL80211_IFTYPE_STATION) ||
929 	     (ah->opmode == NL80211_IFTYPE_ADHOC))) {
930 		ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
931 		last_rssi = sc->last_rssi;
932 
933 		if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
934 			rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
935 		if (rssi < 0)
936 			rssi = 0;
937 
938 		ah->stats.avgbrssi = rssi;
939 	}
940 
941 	rxs->signal = ah->noise + rx_stats->rs_rssi;
942 }
943 
944 static void ath9k_process_tsf(struct ath_rx_status *rs,
945 			      struct ieee80211_rx_status *rxs,
946 			      u64 tsf)
947 {
948 	u32 tsf_lower = tsf & 0xffffffff;
949 
950 	rxs->mactime = (tsf & ~0xffffffffULL) | rs->rs_tstamp;
951 	if (rs->rs_tstamp > tsf_lower &&
952 	    unlikely(rs->rs_tstamp - tsf_lower > 0x10000000))
953 		rxs->mactime -= 0x100000000ULL;
954 
955 	if (rs->rs_tstamp < tsf_lower &&
956 	    unlikely(tsf_lower - rs->rs_tstamp > 0x10000000))
957 		rxs->mactime += 0x100000000ULL;
958 }
959 
960 #ifdef CONFIG_ATH9K_DEBUGFS
961 static s8 fix_rssi_inv_only(u8 rssi_val)
962 {
963 	if (rssi_val == 128)
964 		rssi_val = 0;
965 	return (s8) rssi_val;
966 }
967 #endif
968 
969 /* returns 1 if this was a spectral frame, even if not handled. */
970 static int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
971 			   struct ath_rx_status *rs, u64 tsf)
972 {
973 #ifdef CONFIG_ATH9K_DEBUGFS
974 	struct ath_hw *ah = sc->sc_ah;
975 	u8 bins[SPECTRAL_HT20_NUM_BINS];
976 	u8 *vdata = (u8 *)hdr;
977 	struct fft_sample_ht20 fft_sample;
978 	struct ath_radar_info *radar_info;
979 	struct ath_ht20_mag_info *mag_info;
980 	int len = rs->rs_datalen;
981 	int dc_pos;
982 	u16 length, max_magnitude;
983 
984 	/* AR9280 and before report via ATH9K_PHYERR_RADAR, AR93xx and newer
985 	 * via ATH9K_PHYERR_SPECTRAL. Haven't seen ATH9K_PHYERR_FALSE_RADAR_EXT
986 	 * yet, but this is supposed to be possible as well.
987 	 */
988 	if (rs->rs_phyerr != ATH9K_PHYERR_RADAR &&
989 	    rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT &&
990 	    rs->rs_phyerr != ATH9K_PHYERR_SPECTRAL)
991 		return 0;
992 
993 	/* check if spectral scan bit is set. This does not have to be checked
994 	 * if received through a SPECTRAL phy error, but shouldn't hurt.
995 	 */
996 	radar_info = ((struct ath_radar_info *)&vdata[len]) - 1;
997 	if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
998 		return 0;
999 
1000 	/* Variation in the data length is possible and will be fixed later.
1001 	 * Note that we only support HT20 for now.
1002 	 *
1003 	 * TODO: add HT20_40 support as well.
1004 	 */
1005 	if ((len > SPECTRAL_HT20_TOTAL_DATA_LEN + 2) ||
1006 	    (len < SPECTRAL_HT20_TOTAL_DATA_LEN - 1))
1007 		return 1;
1008 
1009 	fft_sample.tlv.type = ATH_FFT_SAMPLE_HT20;
1010 	length = sizeof(fft_sample) - sizeof(fft_sample.tlv);
1011 	fft_sample.tlv.length = __cpu_to_be16(length);
1012 
1013 	fft_sample.freq = __cpu_to_be16(ah->curchan->chan->center_freq);
1014 	fft_sample.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
1015 	fft_sample.noise = ah->noise;
1016 
1017 	switch (len - SPECTRAL_HT20_TOTAL_DATA_LEN) {
1018 	case 0:
1019 		/* length correct, nothing to do. */
1020 		memcpy(bins, vdata, SPECTRAL_HT20_NUM_BINS);
1021 		break;
1022 	case -1:
1023 		/* first byte missing, duplicate it. */
1024 		memcpy(&bins[1], vdata, SPECTRAL_HT20_NUM_BINS - 1);
1025 		bins[0] = vdata[0];
1026 		break;
1027 	case 2:
1028 		/* MAC added 2 extra bytes at bin 30 and 32, remove them. */
1029 		memcpy(bins, vdata, 30);
1030 		bins[30] = vdata[31];
1031 		memcpy(&bins[31], &vdata[33], SPECTRAL_HT20_NUM_BINS - 31);
1032 		break;
1033 	case 1:
1034 		/* MAC added 2 extra bytes AND first byte is missing. */
1035 		bins[0] = vdata[0];
1036 		memcpy(&bins[0], vdata, 30);
1037 		bins[31] = vdata[31];
1038 		memcpy(&bins[32], &vdata[33], SPECTRAL_HT20_NUM_BINS - 32);
1039 		break;
1040 	default:
1041 		return 1;
1042 	}
1043 
1044 	/* DC value (value in the middle) is the blind spot of the spectral
1045 	 * sample and invalid, interpolate it.
1046 	 */
1047 	dc_pos = SPECTRAL_HT20_NUM_BINS / 2;
1048 	bins[dc_pos] = (bins[dc_pos + 1] + bins[dc_pos - 1]) / 2;
1049 
1050 	/* mag data is at the end of the frame, in front of radar_info */
1051 	mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1;
1052 
1053 	/* copy raw bins without scaling them */
1054 	memcpy(fft_sample.data, bins, SPECTRAL_HT20_NUM_BINS);
1055 	fft_sample.max_exp = mag_info->max_exp & 0xf;
1056 
1057 	max_magnitude = spectral_max_magnitude(mag_info->all_bins);
1058 	fft_sample.max_magnitude = __cpu_to_be16(max_magnitude);
1059 	fft_sample.max_index = spectral_max_index(mag_info->all_bins);
1060 	fft_sample.bitmap_weight = spectral_bitmap_weight(mag_info->all_bins);
1061 	fft_sample.tsf = __cpu_to_be64(tsf);
1062 
1063 	ath_debug_send_fft_sample(sc, &fft_sample.tlv);
1064 	return 1;
1065 #else
1066 	return 0;
1067 #endif
1068 }
1069 
1070 static bool ath9k_is_mybeacon(struct ath_softc *sc, struct ieee80211_hdr *hdr)
1071 {
1072 	struct ath_hw *ah = sc->sc_ah;
1073 	struct ath_common *common = ath9k_hw_common(ah);
1074 
1075 	if (ieee80211_is_beacon(hdr->frame_control)) {
1076 		RX_STAT_INC(rx_beacons);
1077 		if (!is_zero_ether_addr(common->curbssid) &&
1078 		    ether_addr_equal(hdr->addr3, common->curbssid))
1079 			return true;
1080 	}
1081 
1082 	return false;
1083 }
1084 
1085 /*
1086  * For Decrypt or Demic errors, we only mark packet status here and always push
1087  * up the frame up to let mac80211 handle the actual error case, be it no
1088  * decryption key or real decryption error. This let us keep statistics there.
1089  */
1090 static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
1091 				   struct sk_buff *skb,
1092 				   struct ath_rx_status *rx_stats,
1093 				   struct ieee80211_rx_status *rx_status,
1094 				   bool *decrypt_error, u64 tsf)
1095 {
1096 	struct ieee80211_hw *hw = sc->hw;
1097 	struct ath_hw *ah = sc->sc_ah;
1098 	struct ath_common *common = ath9k_hw_common(ah);
1099 	struct ieee80211_hdr *hdr;
1100 	bool discard_current = sc->rx.discard_next;
1101 	int ret = 0;
1102 
1103 	/*
1104 	 * Discard corrupt descriptors which are marked in
1105 	 * ath_get_next_rx_buf().
1106 	 */
1107 	sc->rx.discard_next = rx_stats->rs_more;
1108 	if (discard_current)
1109 		return -EINVAL;
1110 
1111 	/*
1112 	 * Discard zero-length packets.
1113 	 */
1114 	if (!rx_stats->rs_datalen) {
1115 		RX_STAT_INC(rx_len_err);
1116 		return -EINVAL;
1117 	}
1118 
1119         /*
1120          * rs_status follows rs_datalen so if rs_datalen is too large
1121          * we can take a hint that hardware corrupted it, so ignore
1122          * those frames.
1123          */
1124 	if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) {
1125 		RX_STAT_INC(rx_len_err);
1126 		return -EINVAL;
1127 	}
1128 
1129 	/* Only use status info from the last fragment */
1130 	if (rx_stats->rs_more)
1131 		return 0;
1132 
1133 	/*
1134 	 * Return immediately if the RX descriptor has been marked
1135 	 * as corrupt based on the various error bits.
1136 	 *
1137 	 * This is different from the other corrupt descriptor
1138 	 * condition handled above.
1139 	 */
1140 	if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC) {
1141 		ret = -EINVAL;
1142 		goto exit;
1143 	}
1144 
1145 	hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len);
1146 
1147 	ath9k_process_tsf(rx_stats, rx_status, tsf);
1148 	ath_debug_stat_rx(sc, rx_stats);
1149 
1150 	/*
1151 	 * Process PHY errors and return so that the packet
1152 	 * can be dropped.
1153 	 */
1154 	if (rx_stats->rs_status & ATH9K_RXERR_PHY) {
1155 		ath9k_dfs_process_phyerr(sc, hdr, rx_stats, rx_status->mactime);
1156 		if (ath_process_fft(sc, hdr, rx_stats, rx_status->mactime))
1157 			RX_STAT_INC(rx_spectral);
1158 
1159 		ret = -EINVAL;
1160 		goto exit;
1161 	}
1162 
1163 	/*
1164 	 * everything but the rate is checked here, the rate check is done
1165 	 * separately to avoid doing two lookups for a rate for each frame.
1166 	 */
1167 	if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) {
1168 		ret = -EINVAL;
1169 		goto exit;
1170 	}
1171 
1172 	rx_stats->is_mybeacon = ath9k_is_mybeacon(sc, hdr);
1173 	if (rx_stats->is_mybeacon) {
1174 		sc->hw_busy_count = 0;
1175 		ath_start_rx_poll(sc, 3);
1176 	}
1177 
1178 	if (ath9k_process_rate(common, hw, rx_stats, rx_status)) {
1179 		ret =-EINVAL;
1180 		goto exit;
1181 	}
1182 
1183 	ath9k_process_rssi(common, hw, rx_stats, rx_status);
1184 
1185 	rx_status->band = hw->conf.chandef.chan->band;
1186 	rx_status->freq = hw->conf.chandef.chan->center_freq;
1187 	rx_status->antenna = rx_stats->rs_antenna;
1188 	rx_status->flag |= RX_FLAG_MACTIME_END;
1189 
1190 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
1191 	if (ieee80211_is_data_present(hdr->frame_control) &&
1192 	    !ieee80211_is_qos_nullfunc(hdr->frame_control))
1193 		sc->rx.num_pkts++;
1194 #endif
1195 
1196 exit:
1197 	sc->rx.discard_next = false;
1198 	return ret;
1199 }
1200 
1201 static void ath9k_rx_skb_postprocess(struct ath_common *common,
1202 				     struct sk_buff *skb,
1203 				     struct ath_rx_status *rx_stats,
1204 				     struct ieee80211_rx_status *rxs,
1205 				     bool decrypt_error)
1206 {
1207 	struct ath_hw *ah = common->ah;
1208 	struct ieee80211_hdr *hdr;
1209 	int hdrlen, padpos, padsize;
1210 	u8 keyix;
1211 	__le16 fc;
1212 
1213 	/* see if any padding is done by the hw and remove it */
1214 	hdr = (struct ieee80211_hdr *) skb->data;
1215 	hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1216 	fc = hdr->frame_control;
1217 	padpos = ieee80211_hdrlen(fc);
1218 
1219 	/* The MAC header is padded to have 32-bit boundary if the
1220 	 * packet payload is non-zero. The general calculation for
1221 	 * padsize would take into account odd header lengths:
1222 	 * padsize = (4 - padpos % 4) % 4; However, since only
1223 	 * even-length headers are used, padding can only be 0 or 2
1224 	 * bytes and we can optimize this a bit. In addition, we must
1225 	 * not try to remove padding from short control frames that do
1226 	 * not have payload. */
1227 	padsize = padpos & 3;
1228 	if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
1229 		memmove(skb->data + padsize, skb->data, padpos);
1230 		skb_pull(skb, padsize);
1231 	}
1232 
1233 	keyix = rx_stats->rs_keyix;
1234 
1235 	if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
1236 	    ieee80211_has_protected(fc)) {
1237 		rxs->flag |= RX_FLAG_DECRYPTED;
1238 	} else if (ieee80211_has_protected(fc)
1239 		   && !decrypt_error && skb->len >= hdrlen + 4) {
1240 		keyix = skb->data[hdrlen + 3] >> 6;
1241 
1242 		if (test_bit(keyix, common->keymap))
1243 			rxs->flag |= RX_FLAG_DECRYPTED;
1244 	}
1245 	if (ah->sw_mgmt_crypto &&
1246 	    (rxs->flag & RX_FLAG_DECRYPTED) &&
1247 	    ieee80211_is_mgmt(fc))
1248 		/* Use software decrypt for management frames. */
1249 		rxs->flag &= ~RX_FLAG_DECRYPTED;
1250 }
1251 
1252 /*
1253  * Run the LNA combining algorithm only in these cases:
1254  *
1255  * Standalone WLAN cards with both LNA/Antenna diversity
1256  * enabled in the EEPROM.
1257  *
1258  * WLAN+BT cards which are in the supported card list
1259  * in ath_pci_id_table and the user has loaded the
1260  * driver with "bt_ant_diversity" set to true.
1261  */
1262 static void ath9k_antenna_check(struct ath_softc *sc,
1263 				struct ath_rx_status *rs)
1264 {
1265 	struct ath_hw *ah = sc->sc_ah;
1266 	struct ath9k_hw_capabilities *pCap = &ah->caps;
1267 	struct ath_common *common = ath9k_hw_common(ah);
1268 
1269 	if (!(ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB))
1270 		return;
1271 
1272 	/*
1273 	 * All MPDUs in an aggregate will use the same LNA
1274 	 * as the first MPDU.
1275 	 */
1276 	if (rs->rs_isaggr && !rs->rs_firstaggr)
1277 		return;
1278 
1279 	/*
1280 	 * Change the default rx antenna if rx diversity
1281 	 * chooses the other antenna 3 times in a row.
1282 	 */
1283 	if (sc->rx.defant != rs->rs_antenna) {
1284 		if (++sc->rx.rxotherant >= 3)
1285 			ath_setdefantenna(sc, rs->rs_antenna);
1286 	} else {
1287 		sc->rx.rxotherant = 0;
1288 	}
1289 
1290 	if (pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV) {
1291 		if (common->bt_ant_diversity)
1292 			ath_ant_comb_scan(sc, rs);
1293 	} else {
1294 		ath_ant_comb_scan(sc, rs);
1295 	}
1296 }
1297 
1298 static void ath9k_apply_ampdu_details(struct ath_softc *sc,
1299 	struct ath_rx_status *rs, struct ieee80211_rx_status *rxs)
1300 {
1301 	if (rs->rs_isaggr) {
1302 		rxs->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
1303 
1304 		rxs->ampdu_reference = sc->rx.ampdu_ref;
1305 
1306 		if (!rs->rs_moreaggr) {
1307 			rxs->flag |= RX_FLAG_AMPDU_IS_LAST;
1308 			sc->rx.ampdu_ref++;
1309 		}
1310 
1311 		if (rs->rs_flags & ATH9K_RX_DELIM_CRC_PRE)
1312 			rxs->flag |= RX_FLAG_AMPDU_DELIM_CRC_ERROR;
1313 	}
1314 }
1315 
1316 int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1317 {
1318 	struct ath_buf *bf;
1319 	struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb;
1320 	struct ieee80211_rx_status *rxs;
1321 	struct ath_hw *ah = sc->sc_ah;
1322 	struct ath_common *common = ath9k_hw_common(ah);
1323 	struct ieee80211_hw *hw = sc->hw;
1324 	int retval;
1325 	struct ath_rx_status rs;
1326 	enum ath9k_rx_qtype qtype;
1327 	bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
1328 	int dma_type;
1329 	u64 tsf = 0;
1330 	unsigned long flags;
1331 	dma_addr_t new_buf_addr;
1332 
1333 	if (edma)
1334 		dma_type = DMA_BIDIRECTIONAL;
1335 	else
1336 		dma_type = DMA_FROM_DEVICE;
1337 
1338 	qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
1339 
1340 	tsf = ath9k_hw_gettsf64(ah);
1341 
1342 	do {
1343 		bool decrypt_error = false;
1344 
1345 		memset(&rs, 0, sizeof(rs));
1346 		if (edma)
1347 			bf = ath_edma_get_next_rx_buf(sc, &rs, qtype);
1348 		else
1349 			bf = ath_get_next_rx_buf(sc, &rs);
1350 
1351 		if (!bf)
1352 			break;
1353 
1354 		skb = bf->bf_mpdu;
1355 		if (!skb)
1356 			continue;
1357 
1358 		/*
1359 		 * Take frame header from the first fragment and RX status from
1360 		 * the last one.
1361 		 */
1362 		if (sc->rx.frag)
1363 			hdr_skb = sc->rx.frag;
1364 		else
1365 			hdr_skb = skb;
1366 
1367 		rxs = IEEE80211_SKB_RXCB(hdr_skb);
1368 		memset(rxs, 0, sizeof(struct ieee80211_rx_status));
1369 
1370 		retval = ath9k_rx_skb_preprocess(sc, hdr_skb, &rs, rxs,
1371 						 &decrypt_error, tsf);
1372 		if (retval)
1373 			goto requeue_drop_frag;
1374 
1375 		/* Ensure we always have an skb to requeue once we are done
1376 		 * processing the current buffer's skb */
1377 		requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
1378 
1379 		/* If there is no memory we ignore the current RX'd frame,
1380 		 * tell hardware it can give us a new frame using the old
1381 		 * skb and put it at the tail of the sc->rx.rxbuf list for
1382 		 * processing. */
1383 		if (!requeue_skb) {
1384 			RX_STAT_INC(rx_oom_err);
1385 			goto requeue_drop_frag;
1386 		}
1387 
1388 		/* We will now give hardware our shiny new allocated skb */
1389 		new_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
1390 					      common->rx_bufsize, dma_type);
1391 		if (unlikely(dma_mapping_error(sc->dev, new_buf_addr))) {
1392 			dev_kfree_skb_any(requeue_skb);
1393 			goto requeue_drop_frag;
1394 		}
1395 
1396 		/* Unmap the frame */
1397 		dma_unmap_single(sc->dev, bf->bf_buf_addr,
1398 				 common->rx_bufsize, dma_type);
1399 
1400 		bf->bf_mpdu = requeue_skb;
1401 		bf->bf_buf_addr = new_buf_addr;
1402 
1403 		skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len);
1404 		if (ah->caps.rx_status_len)
1405 			skb_pull(skb, ah->caps.rx_status_len);
1406 
1407 		if (!rs.rs_more)
1408 			ath9k_rx_skb_postprocess(common, hdr_skb, &rs,
1409 						 rxs, decrypt_error);
1410 
1411 		if (rs.rs_more) {
1412 			RX_STAT_INC(rx_frags);
1413 			/*
1414 			 * rs_more indicates chained descriptors which can be
1415 			 * used to link buffers together for a sort of
1416 			 * scatter-gather operation.
1417 			 */
1418 			if (sc->rx.frag) {
1419 				/* too many fragments - cannot handle frame */
1420 				dev_kfree_skb_any(sc->rx.frag);
1421 				dev_kfree_skb_any(skb);
1422 				RX_STAT_INC(rx_too_many_frags_err);
1423 				skb = NULL;
1424 			}
1425 			sc->rx.frag = skb;
1426 			goto requeue;
1427 		}
1428 
1429 		if (sc->rx.frag) {
1430 			int space = skb->len - skb_tailroom(hdr_skb);
1431 
1432 			if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) {
1433 				dev_kfree_skb(skb);
1434 				RX_STAT_INC(rx_oom_err);
1435 				goto requeue_drop_frag;
1436 			}
1437 
1438 			sc->rx.frag = NULL;
1439 
1440 			skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len),
1441 						  skb->len);
1442 			dev_kfree_skb_any(skb);
1443 			skb = hdr_skb;
1444 		}
1445 
1446 		if (rxs->flag & RX_FLAG_MMIC_STRIPPED)
1447 			skb_trim(skb, skb->len - 8);
1448 
1449 		spin_lock_irqsave(&sc->sc_pm_lock, flags);
1450 		if ((sc->ps_flags & (PS_WAIT_FOR_BEACON |
1451 				     PS_WAIT_FOR_CAB |
1452 				     PS_WAIT_FOR_PSPOLL_DATA)) ||
1453 		    ath9k_check_auto_sleep(sc))
1454 			ath_rx_ps(sc, skb, rs.is_mybeacon);
1455 		spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
1456 
1457 		ath9k_antenna_check(sc, &rs);
1458 
1459 		ath9k_apply_ampdu_details(sc, &rs, rxs);
1460 
1461 		ieee80211_rx(hw, skb);
1462 
1463 requeue_drop_frag:
1464 		if (sc->rx.frag) {
1465 			dev_kfree_skb_any(sc->rx.frag);
1466 			sc->rx.frag = NULL;
1467 		}
1468 requeue:
1469 		list_add_tail(&bf->list, &sc->rx.rxbuf);
1470 		if (flush)
1471 			continue;
1472 
1473 		if (edma) {
1474 			ath_rx_edma_buf_link(sc, qtype);
1475 		} else {
1476 			ath_rx_buf_relink(sc, bf);
1477 			ath9k_hw_rxena(ah);
1478 		}
1479 	} while (1);
1480 
1481 	if (!(ah->imask & ATH9K_INT_RXEOL)) {
1482 		ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
1483 		ath9k_hw_set_interrupts(ah);
1484 	}
1485 
1486 	return 0;
1487 }
1488