xref: /openbmc/linux/drivers/net/wireless/ath/ath9k/recv.c (revision 7dd65feb)
1 /*
2  * Copyright (c) 2008-2009 Atheros Communications Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include "ath9k.h"
18 
19 static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc,
20 					     struct ieee80211_hdr *hdr)
21 {
22 	struct ieee80211_hw *hw = sc->pri_wiphy->hw;
23 	int i;
24 
25 	spin_lock_bh(&sc->wiphy_lock);
26 	for (i = 0; i < sc->num_sec_wiphy; i++) {
27 		struct ath_wiphy *aphy = sc->sec_wiphy[i];
28 		if (aphy == NULL)
29 			continue;
30 		if (compare_ether_addr(hdr->addr1, aphy->hw->wiphy->perm_addr)
31 		    == 0) {
32 			hw = aphy->hw;
33 			break;
34 		}
35 	}
36 	spin_unlock_bh(&sc->wiphy_lock);
37 	return hw;
38 }
39 
40 /*
41  * Setup and link descriptors.
42  *
43  * 11N: we can no longer afford to self link the last descriptor.
44  * MAC acknowledges BA status as long as it copies frames to host
45  * buffer (or rx fifo). This can incorrectly acknowledge packets
46  * to a sender if last desc is self-linked.
47  */
48 static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
49 {
50 	struct ath_hw *ah = sc->sc_ah;
51 	struct ath_common *common = ath9k_hw_common(ah);
52 	struct ath_desc *ds;
53 	struct sk_buff *skb;
54 
55 	ATH_RXBUF_RESET(bf);
56 
57 	ds = bf->bf_desc;
58 	ds->ds_link = 0; /* link to null */
59 	ds->ds_data = bf->bf_buf_addr;
60 
61 	/* virtual addr of the beginning of the buffer. */
62 	skb = bf->bf_mpdu;
63 	BUG_ON(skb == NULL);
64 	ds->ds_vdata = skb->data;
65 
66 	/*
67 	 * setup rx descriptors. The rx_bufsize here tells the hardware
68 	 * how much data it can DMA to us and that we are prepared
69 	 * to process
70 	 */
71 	ath9k_hw_setuprxdesc(ah, ds,
72 			     common->rx_bufsize,
73 			     0);
74 
75 	if (sc->rx.rxlink == NULL)
76 		ath9k_hw_putrxbuf(ah, bf->bf_daddr);
77 	else
78 		*sc->rx.rxlink = bf->bf_daddr;
79 
80 	sc->rx.rxlink = &ds->ds_link;
81 	ath9k_hw_rxena(ah);
82 }
83 
84 static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
85 {
86 	/* XXX block beacon interrupts */
87 	ath9k_hw_setantenna(sc->sc_ah, antenna);
88 	sc->rx.defant = antenna;
89 	sc->rx.rxotherant = 0;
90 }
91 
92 static void ath_opmode_init(struct ath_softc *sc)
93 {
94 	struct ath_hw *ah = sc->sc_ah;
95 	struct ath_common *common = ath9k_hw_common(ah);
96 
97 	u32 rfilt, mfilt[2];
98 
99 	/* configure rx filter */
100 	rfilt = ath_calcrxfilter(sc);
101 	ath9k_hw_setrxfilter(ah, rfilt);
102 
103 	/* configure bssid mask */
104 	if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
105 		ath_hw_setbssidmask(common);
106 
107 	/* configure operational mode */
108 	ath9k_hw_setopmode(ah);
109 
110 	/* Handle any link-level address change. */
111 	ath9k_hw_setmac(ah, common->macaddr);
112 
113 	/* calculate and install multicast filter */
114 	mfilt[0] = mfilt[1] = ~0;
115 	ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
116 }
117 
118 int ath_rx_init(struct ath_softc *sc, int nbufs)
119 {
120 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
121 	struct sk_buff *skb;
122 	struct ath_buf *bf;
123 	int error = 0;
124 
125 	spin_lock_init(&sc->rx.rxflushlock);
126 	sc->sc_flags &= ~SC_OP_RXFLUSH;
127 	spin_lock_init(&sc->rx.rxbuflock);
128 
129 	common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
130 				     min(common->cachelsz, (u16)64));
131 
132 	ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
133 		  common->cachelsz, common->rx_bufsize);
134 
135 	/* Initialize rx descriptors */
136 
137 	error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
138 				  "rx", nbufs, 1);
139 	if (error != 0) {
140 		ath_print(common, ATH_DBG_FATAL,
141 			  "failed to allocate rx descriptors: %d\n", error);
142 		goto err;
143 	}
144 
145 	list_for_each_entry(bf, &sc->rx.rxbuf, list) {
146 		skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
147 		if (skb == NULL) {
148 			error = -ENOMEM;
149 			goto err;
150 		}
151 
152 		bf->bf_mpdu = skb;
153 		bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
154 						 common->rx_bufsize,
155 						 DMA_FROM_DEVICE);
156 		if (unlikely(dma_mapping_error(sc->dev,
157 					       bf->bf_buf_addr))) {
158 			dev_kfree_skb_any(skb);
159 			bf->bf_mpdu = NULL;
160 			ath_print(common, ATH_DBG_FATAL,
161 				  "dma_mapping_error() on RX init\n");
162 			error = -ENOMEM;
163 			goto err;
164 		}
165 		bf->bf_dmacontext = bf->bf_buf_addr;
166 	}
167 	sc->rx.rxlink = NULL;
168 
169 err:
170 	if (error)
171 		ath_rx_cleanup(sc);
172 
173 	return error;
174 }
175 
176 void ath_rx_cleanup(struct ath_softc *sc)
177 {
178 	struct ath_hw *ah = sc->sc_ah;
179 	struct ath_common *common = ath9k_hw_common(ah);
180 	struct sk_buff *skb;
181 	struct ath_buf *bf;
182 
183 	list_for_each_entry(bf, &sc->rx.rxbuf, list) {
184 		skb = bf->bf_mpdu;
185 		if (skb) {
186 			dma_unmap_single(sc->dev, bf->bf_buf_addr,
187 					 common->rx_bufsize, DMA_FROM_DEVICE);
188 			dev_kfree_skb(skb);
189 		}
190 	}
191 
192 	if (sc->rx.rxdma.dd_desc_len != 0)
193 		ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf);
194 }
195 
196 /*
197  * Calculate the receive filter according to the
198  * operating mode and state:
199  *
200  * o always accept unicast, broadcast, and multicast traffic
201  * o maintain current state of phy error reception (the hal
202  *   may enable phy error frames for noise immunity work)
203  * o probe request frames are accepted only when operating in
204  *   hostap, adhoc, or monitor modes
205  * o enable promiscuous mode according to the interface state
206  * o accept beacons:
207  *   - when operating in adhoc mode so the 802.11 layer creates
208  *     node table entries for peers,
209  *   - when operating in station mode for collecting rssi data when
210  *     the station is otherwise quiet, or
211  *   - when operating as a repeater so we see repeater-sta beacons
212  *   - when scanning
213  */
214 
215 u32 ath_calcrxfilter(struct ath_softc *sc)
216 {
217 #define	RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
218 
219 	u32 rfilt;
220 
221 	rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE)
222 		| ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
223 		| ATH9K_RX_FILTER_MCAST;
224 
225 	/* If not a STA, enable processing of Probe Requests */
226 	if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION)
227 		rfilt |= ATH9K_RX_FILTER_PROBEREQ;
228 
229 	/*
230 	 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
231 	 * mode interface or when in monitor mode. AP mode does not need this
232 	 * since it receives all in-BSS frames anyway.
233 	 */
234 	if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) &&
235 	     (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) ||
236 	    (sc->sc_ah->opmode == NL80211_IFTYPE_MONITOR))
237 		rfilt |= ATH9K_RX_FILTER_PROM;
238 
239 	if (sc->rx.rxfilter & FIF_CONTROL)
240 		rfilt |= ATH9K_RX_FILTER_CONTROL;
241 
242 	if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) &&
243 	    !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC))
244 		rfilt |= ATH9K_RX_FILTER_MYBEACON;
245 	else
246 		rfilt |= ATH9K_RX_FILTER_BEACON;
247 
248 	if ((AR_SREV_9280_10_OR_LATER(sc->sc_ah) ||
249 	    AR_SREV_9285_10_OR_LATER(sc->sc_ah)) &&
250 	    (sc->sc_ah->opmode == NL80211_IFTYPE_AP) &&
251 	    (sc->rx.rxfilter & FIF_PSPOLL))
252 		rfilt |= ATH9K_RX_FILTER_PSPOLL;
253 
254 	if (conf_is_ht(&sc->hw->conf))
255 		rfilt |= ATH9K_RX_FILTER_COMP_BAR;
256 
257 	if (sc->sec_wiphy || (sc->rx.rxfilter & FIF_OTHER_BSS)) {
258 		/* TODO: only needed if more than one BSSID is in use in
259 		 * station/adhoc mode */
260 		/* The following may also be needed for other older chips */
261 		if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160)
262 			rfilt |= ATH9K_RX_FILTER_PROM;
263 		rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
264 	}
265 
266 	return rfilt;
267 
268 #undef RX_FILTER_PRESERVE
269 }
270 
271 int ath_startrecv(struct ath_softc *sc)
272 {
273 	struct ath_hw *ah = sc->sc_ah;
274 	struct ath_buf *bf, *tbf;
275 
276 	spin_lock_bh(&sc->rx.rxbuflock);
277 	if (list_empty(&sc->rx.rxbuf))
278 		goto start_recv;
279 
280 	sc->rx.rxlink = NULL;
281 	list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
282 		ath_rx_buf_link(sc, bf);
283 	}
284 
285 	/* We could have deleted elements so the list may be empty now */
286 	if (list_empty(&sc->rx.rxbuf))
287 		goto start_recv;
288 
289 	bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
290 	ath9k_hw_putrxbuf(ah, bf->bf_daddr);
291 	ath9k_hw_rxena(ah);
292 
293 start_recv:
294 	spin_unlock_bh(&sc->rx.rxbuflock);
295 	ath_opmode_init(sc);
296 	ath9k_hw_startpcureceive(ah);
297 
298 	return 0;
299 }
300 
301 bool ath_stoprecv(struct ath_softc *sc)
302 {
303 	struct ath_hw *ah = sc->sc_ah;
304 	bool stopped;
305 
306 	ath9k_hw_stoppcurecv(ah);
307 	ath9k_hw_setrxfilter(ah, 0);
308 	stopped = ath9k_hw_stopdmarecv(ah);
309 	sc->rx.rxlink = NULL;
310 
311 	return stopped;
312 }
313 
314 void ath_flushrecv(struct ath_softc *sc)
315 {
316 	spin_lock_bh(&sc->rx.rxflushlock);
317 	sc->sc_flags |= SC_OP_RXFLUSH;
318 	ath_rx_tasklet(sc, 1);
319 	sc->sc_flags &= ~SC_OP_RXFLUSH;
320 	spin_unlock_bh(&sc->rx.rxflushlock);
321 }
322 
323 static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
324 {
325 	/* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
326 	struct ieee80211_mgmt *mgmt;
327 	u8 *pos, *end, id, elen;
328 	struct ieee80211_tim_ie *tim;
329 
330 	mgmt = (struct ieee80211_mgmt *)skb->data;
331 	pos = mgmt->u.beacon.variable;
332 	end = skb->data + skb->len;
333 
334 	while (pos + 2 < end) {
335 		id = *pos++;
336 		elen = *pos++;
337 		if (pos + elen > end)
338 			break;
339 
340 		if (id == WLAN_EID_TIM) {
341 			if (elen < sizeof(*tim))
342 				break;
343 			tim = (struct ieee80211_tim_ie *) pos;
344 			if (tim->dtim_count != 0)
345 				break;
346 			return tim->bitmap_ctrl & 0x01;
347 		}
348 
349 		pos += elen;
350 	}
351 
352 	return false;
353 }
354 
355 static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
356 {
357 	struct ieee80211_mgmt *mgmt;
358 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
359 
360 	if (skb->len < 24 + 8 + 2 + 2)
361 		return;
362 
363 	mgmt = (struct ieee80211_mgmt *)skb->data;
364 	if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0)
365 		return; /* not from our current AP */
366 
367 	sc->sc_flags &= ~SC_OP_WAIT_FOR_BEACON;
368 
369 	if (sc->sc_flags & SC_OP_BEACON_SYNC) {
370 		sc->sc_flags &= ~SC_OP_BEACON_SYNC;
371 		ath_print(common, ATH_DBG_PS,
372 			  "Reconfigure Beacon timers based on "
373 			  "timestamp from the AP\n");
374 		ath_beacon_config(sc, NULL);
375 	}
376 
377 	if (ath_beacon_dtim_pending_cab(skb)) {
378 		/*
379 		 * Remain awake waiting for buffered broadcast/multicast
380 		 * frames. If the last broadcast/multicast frame is not
381 		 * received properly, the next beacon frame will work as
382 		 * a backup trigger for returning into NETWORK SLEEP state,
383 		 * so we are waiting for it as well.
384 		 */
385 		ath_print(common, ATH_DBG_PS, "Received DTIM beacon indicating "
386 			  "buffered broadcast/multicast frame(s)\n");
387 		sc->sc_flags |= SC_OP_WAIT_FOR_CAB | SC_OP_WAIT_FOR_BEACON;
388 		return;
389 	}
390 
391 	if (sc->sc_flags & SC_OP_WAIT_FOR_CAB) {
392 		/*
393 		 * This can happen if a broadcast frame is dropped or the AP
394 		 * fails to send a frame indicating that all CAB frames have
395 		 * been delivered.
396 		 */
397 		sc->sc_flags &= ~SC_OP_WAIT_FOR_CAB;
398 		ath_print(common, ATH_DBG_PS,
399 			  "PS wait for CAB frames timed out\n");
400 	}
401 }
402 
403 static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
404 {
405 	struct ieee80211_hdr *hdr;
406 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
407 
408 	hdr = (struct ieee80211_hdr *)skb->data;
409 
410 	/* Process Beacon and CAB receive in PS state */
411 	if ((sc->sc_flags & SC_OP_WAIT_FOR_BEACON) &&
412 	    ieee80211_is_beacon(hdr->frame_control))
413 		ath_rx_ps_beacon(sc, skb);
414 	else if ((sc->sc_flags & SC_OP_WAIT_FOR_CAB) &&
415 		 (ieee80211_is_data(hdr->frame_control) ||
416 		  ieee80211_is_action(hdr->frame_control)) &&
417 		 is_multicast_ether_addr(hdr->addr1) &&
418 		 !ieee80211_has_moredata(hdr->frame_control)) {
419 		/*
420 		 * No more broadcast/multicast frames to be received at this
421 		 * point.
422 		 */
423 		sc->sc_flags &= ~SC_OP_WAIT_FOR_CAB;
424 		ath_print(common, ATH_DBG_PS,
425 			  "All PS CAB frames received, back to sleep\n");
426 	} else if ((sc->sc_flags & SC_OP_WAIT_FOR_PSPOLL_DATA) &&
427 		   !is_multicast_ether_addr(hdr->addr1) &&
428 		   !ieee80211_has_morefrags(hdr->frame_control)) {
429 		sc->sc_flags &= ~SC_OP_WAIT_FOR_PSPOLL_DATA;
430 		ath_print(common, ATH_DBG_PS,
431 			  "Going back to sleep after having received "
432 			  "PS-Poll data (0x%x)\n",
433 			sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
434 					SC_OP_WAIT_FOR_CAB |
435 					SC_OP_WAIT_FOR_PSPOLL_DATA |
436 					SC_OP_WAIT_FOR_TX_ACK));
437 	}
438 }
439 
440 static void ath_rx_send_to_mac80211(struct ieee80211_hw *hw,
441 				    struct ath_softc *sc, struct sk_buff *skb,
442 				    struct ieee80211_rx_status *rxs)
443 {
444 	struct ieee80211_hdr *hdr;
445 
446 	hdr = (struct ieee80211_hdr *)skb->data;
447 
448 	/* Send the frame to mac80211 */
449 	if (is_multicast_ether_addr(hdr->addr1)) {
450 		int i;
451 		/*
452 		 * Deliver broadcast/multicast frames to all suitable
453 		 * virtual wiphys.
454 		 */
455 		/* TODO: filter based on channel configuration */
456 		for (i = 0; i < sc->num_sec_wiphy; i++) {
457 			struct ath_wiphy *aphy = sc->sec_wiphy[i];
458 			struct sk_buff *nskb;
459 			if (aphy == NULL)
460 				continue;
461 			nskb = skb_copy(skb, GFP_ATOMIC);
462 			if (!nskb)
463 				continue;
464 			ieee80211_rx(aphy->hw, nskb);
465 		}
466 		ieee80211_rx(sc->hw, skb);
467 	} else
468 		/* Deliver unicast frames based on receiver address */
469 		ieee80211_rx(hw, skb);
470 }
471 
472 int ath_rx_tasklet(struct ath_softc *sc, int flush)
473 {
474 #define PA2DESC(_sc, _pa)                                               \
475 	((struct ath_desc *)((caddr_t)(_sc)->rx.rxdma.dd_desc +		\
476 			     ((_pa) - (_sc)->rx.rxdma.dd_desc_paddr)))
477 
478 	struct ath_buf *bf;
479 	struct ath_desc *ds;
480 	struct ath_rx_status *rx_stats;
481 	struct sk_buff *skb = NULL, *requeue_skb;
482 	struct ieee80211_rx_status *rxs;
483 	struct ath_hw *ah = sc->sc_ah;
484 	struct ath_common *common = ath9k_hw_common(ah);
485 	/*
486 	 * The hw can techncically differ from common->hw when using ath9k
487 	 * virtual wiphy so to account for that we iterate over the active
488 	 * wiphys and find the appropriate wiphy and therefore hw.
489 	 */
490 	struct ieee80211_hw *hw = NULL;
491 	struct ieee80211_hdr *hdr;
492 	int retval;
493 	bool decrypt_error = false;
494 
495 	spin_lock_bh(&sc->rx.rxbuflock);
496 
497 	do {
498 		/* If handling rx interrupt and flush is in progress => exit */
499 		if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
500 			break;
501 
502 		if (list_empty(&sc->rx.rxbuf)) {
503 			sc->rx.rxlink = NULL;
504 			break;
505 		}
506 
507 		bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
508 		ds = bf->bf_desc;
509 
510 		/*
511 		 * Must provide the virtual address of the current
512 		 * descriptor, the physical address, and the virtual
513 		 * address of the next descriptor in the h/w chain.
514 		 * This allows the HAL to look ahead to see if the
515 		 * hardware is done with a descriptor by checking the
516 		 * done bit in the following descriptor and the address
517 		 * of the current descriptor the DMA engine is working
518 		 * on.  All this is necessary because of our use of
519 		 * a self-linked list to avoid rx overruns.
520 		 */
521 		retval = ath9k_hw_rxprocdesc(ah, ds,
522 					     bf->bf_daddr,
523 					     PA2DESC(sc, ds->ds_link),
524 					     0);
525 		if (retval == -EINPROGRESS) {
526 			struct ath_buf *tbf;
527 			struct ath_desc *tds;
528 
529 			if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
530 				sc->rx.rxlink = NULL;
531 				break;
532 			}
533 
534 			tbf = list_entry(bf->list.next, struct ath_buf, list);
535 
536 			/*
537 			 * On some hardware the descriptor status words could
538 			 * get corrupted, including the done bit. Because of
539 			 * this, check if the next descriptor's done bit is
540 			 * set or not.
541 			 *
542 			 * If the next descriptor's done bit is set, the current
543 			 * descriptor has been corrupted. Force s/w to discard
544 			 * this descriptor and continue...
545 			 */
546 
547 			tds = tbf->bf_desc;
548 			retval = ath9k_hw_rxprocdesc(ah, tds, tbf->bf_daddr,
549 					     PA2DESC(sc, tds->ds_link), 0);
550 			if (retval == -EINPROGRESS) {
551 				break;
552 			}
553 		}
554 
555 		skb = bf->bf_mpdu;
556 		if (!skb)
557 			continue;
558 
559 		/*
560 		 * Synchronize the DMA transfer with CPU before
561 		 * 1. accessing the frame
562 		 * 2. requeueing the same buffer to h/w
563 		 */
564 		dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
565 				common->rx_bufsize,
566 				DMA_FROM_DEVICE);
567 
568 		hdr = (struct ieee80211_hdr *) skb->data;
569 		rxs =  IEEE80211_SKB_RXCB(skb);
570 
571 		hw = ath_get_virt_hw(sc, hdr);
572 		rx_stats = &ds->ds_rxstat;
573 
574 		/*
575 		 * If we're asked to flush receive queue, directly
576 		 * chain it back at the queue without processing it.
577 		 */
578 		if (flush)
579 			goto requeue;
580 
581 		retval = ath9k_cmn_rx_skb_preprocess(common, hw, skb, rx_stats,
582 						     rxs, &decrypt_error);
583 		if (retval)
584 			goto requeue;
585 
586 		/* Ensure we always have an skb to requeue once we are done
587 		 * processing the current buffer's skb */
588 		requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
589 
590 		/* If there is no memory we ignore the current RX'd frame,
591 		 * tell hardware it can give us a new frame using the old
592 		 * skb and put it at the tail of the sc->rx.rxbuf list for
593 		 * processing. */
594 		if (!requeue_skb)
595 			goto requeue;
596 
597 		/* Unmap the frame */
598 		dma_unmap_single(sc->dev, bf->bf_buf_addr,
599 				 common->rx_bufsize,
600 				 DMA_FROM_DEVICE);
601 
602 		skb_put(skb, rx_stats->rs_datalen);
603 
604 		ath9k_cmn_rx_skb_postprocess(common, skb, rx_stats,
605 					     rxs, decrypt_error);
606 
607 		/* We will now give hardware our shiny new allocated skb */
608 		bf->bf_mpdu = requeue_skb;
609 		bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
610 						 common->rx_bufsize,
611 						 DMA_FROM_DEVICE);
612 		if (unlikely(dma_mapping_error(sc->dev,
613 			  bf->bf_buf_addr))) {
614 			dev_kfree_skb_any(requeue_skb);
615 			bf->bf_mpdu = NULL;
616 			ath_print(common, ATH_DBG_FATAL,
617 				  "dma_mapping_error() on RX\n");
618 			ath_rx_send_to_mac80211(hw, sc, skb, rxs);
619 			break;
620 		}
621 		bf->bf_dmacontext = bf->bf_buf_addr;
622 
623 		/*
624 		 * change the default rx antenna if rx diversity chooses the
625 		 * other antenna 3 times in a row.
626 		 */
627 		if (sc->rx.defant != ds->ds_rxstat.rs_antenna) {
628 			if (++sc->rx.rxotherant >= 3)
629 				ath_setdefantenna(sc, rx_stats->rs_antenna);
630 		} else {
631 			sc->rx.rxotherant = 0;
632 		}
633 
634 		if (unlikely(sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
635 					     SC_OP_WAIT_FOR_CAB |
636 					     SC_OP_WAIT_FOR_PSPOLL_DATA)))
637 			ath_rx_ps(sc, skb);
638 
639 		ath_rx_send_to_mac80211(hw, sc, skb, rxs);
640 
641 requeue:
642 		list_move_tail(&bf->list, &sc->rx.rxbuf);
643 		ath_rx_buf_link(sc, bf);
644 	} while (1);
645 
646 	spin_unlock_bh(&sc->rx.rxbuflock);
647 
648 	return 0;
649 #undef PA2DESC
650 }
651