xref: /openbmc/linux/drivers/net/wireless/ath/ath9k/recv.c (revision e93d083f42a126b5ad8137b5f0e8d6f900b332b8)
1203c4805SLuis R. Rodriguez /*
25b68138eSSujith Manoharan  * Copyright (c) 2008-2011 Atheros Communications Inc.
3203c4805SLuis R. Rodriguez  *
4203c4805SLuis R. Rodriguez  * Permission to use, copy, modify, and/or distribute this software for any
5203c4805SLuis R. Rodriguez  * purpose with or without fee is hereby granted, provided that the above
6203c4805SLuis R. Rodriguez  * copyright notice and this permission notice appear in all copies.
7203c4805SLuis R. Rodriguez  *
8203c4805SLuis R. Rodriguez  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9203c4805SLuis R. Rodriguez  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10203c4805SLuis R. Rodriguez  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11203c4805SLuis R. Rodriguez  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12203c4805SLuis R. Rodriguez  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13203c4805SLuis R. Rodriguez  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14203c4805SLuis R. Rodriguez  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15203c4805SLuis R. Rodriguez  */
16203c4805SLuis R. Rodriguez 
17b7f080cfSAlexey Dobriyan #include <linux/dma-mapping.h>
18*e93d083fSSimon Wunderlich #include <linux/relay.h>
19203c4805SLuis R. Rodriguez #include "ath9k.h"
20b622a720SLuis R. Rodriguez #include "ar9003_mac.h"
21203c4805SLuis R. Rodriguez 
22b5c80475SFelix Fietkau #define SKB_CB_ATHBUF(__skb)	(*((struct ath_buf **)__skb->cb))
23b5c80475SFelix Fietkau 
24ededf1f8SVasanthakumar Thiagarajan static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
25ededf1f8SVasanthakumar Thiagarajan {
26ededf1f8SVasanthakumar Thiagarajan 	return sc->ps_enabled &&
27ededf1f8SVasanthakumar Thiagarajan 	       (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP);
28ededf1f8SVasanthakumar Thiagarajan }
29ededf1f8SVasanthakumar Thiagarajan 
30203c4805SLuis R. Rodriguez /*
31203c4805SLuis R. Rodriguez  * Setup and link descriptors.
32203c4805SLuis R. Rodriguez  *
33203c4805SLuis R. Rodriguez  * 11N: we can no longer afford to self link the last descriptor.
34203c4805SLuis R. Rodriguez  * MAC acknowledges BA status as long as it copies frames to host
35203c4805SLuis R. Rodriguez  * buffer (or rx fifo). This can incorrectly acknowledge packets
36203c4805SLuis R. Rodriguez  * to a sender if last desc is self-linked.
37203c4805SLuis R. Rodriguez  */
38203c4805SLuis R. Rodriguez static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
39203c4805SLuis R. Rodriguez {
40203c4805SLuis R. Rodriguez 	struct ath_hw *ah = sc->sc_ah;
41cc861f74SLuis R. Rodriguez 	struct ath_common *common = ath9k_hw_common(ah);
42203c4805SLuis R. Rodriguez 	struct ath_desc *ds;
43203c4805SLuis R. Rodriguez 	struct sk_buff *skb;
44203c4805SLuis R. Rodriguez 
45203c4805SLuis R. Rodriguez 	ATH_RXBUF_RESET(bf);
46203c4805SLuis R. Rodriguez 
47203c4805SLuis R. Rodriguez 	ds = bf->bf_desc;
48203c4805SLuis R. Rodriguez 	ds->ds_link = 0; /* link to null */
49203c4805SLuis R. Rodriguez 	ds->ds_data = bf->bf_buf_addr;
50203c4805SLuis R. Rodriguez 
51203c4805SLuis R. Rodriguez 	/* virtual addr of the beginning of the buffer. */
52203c4805SLuis R. Rodriguez 	skb = bf->bf_mpdu;
539680e8a3SLuis R. Rodriguez 	BUG_ON(skb == NULL);
54203c4805SLuis R. Rodriguez 	ds->ds_vdata = skb->data;
55203c4805SLuis R. Rodriguez 
56cc861f74SLuis R. Rodriguez 	/*
57cc861f74SLuis R. Rodriguez 	 * setup rx descriptors. The rx_bufsize here tells the hardware
58203c4805SLuis R. Rodriguez 	 * how much data it can DMA to us and that we are prepared
59cc861f74SLuis R. Rodriguez 	 * to process
60cc861f74SLuis R. Rodriguez 	 */
61203c4805SLuis R. Rodriguez 	ath9k_hw_setuprxdesc(ah, ds,
62cc861f74SLuis R. Rodriguez 			     common->rx_bufsize,
63203c4805SLuis R. Rodriguez 			     0);
64203c4805SLuis R. Rodriguez 
65203c4805SLuis R. Rodriguez 	if (sc->rx.rxlink == NULL)
66203c4805SLuis R. Rodriguez 		ath9k_hw_putrxbuf(ah, bf->bf_daddr);
67203c4805SLuis R. Rodriguez 	else
68203c4805SLuis R. Rodriguez 		*sc->rx.rxlink = bf->bf_daddr;
69203c4805SLuis R. Rodriguez 
70203c4805SLuis R. Rodriguez 	sc->rx.rxlink = &ds->ds_link;
71203c4805SLuis R. Rodriguez }
72203c4805SLuis R. Rodriguez 
73203c4805SLuis R. Rodriguez static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
74203c4805SLuis R. Rodriguez {
75203c4805SLuis R. Rodriguez 	/* XXX block beacon interrupts */
76203c4805SLuis R. Rodriguez 	ath9k_hw_setantenna(sc->sc_ah, antenna);
77203c4805SLuis R. Rodriguez 	sc->rx.defant = antenna;
78203c4805SLuis R. Rodriguez 	sc->rx.rxotherant = 0;
79203c4805SLuis R. Rodriguez }
80203c4805SLuis R. Rodriguez 
81203c4805SLuis R. Rodriguez static void ath_opmode_init(struct ath_softc *sc)
82203c4805SLuis R. Rodriguez {
83203c4805SLuis R. Rodriguez 	struct ath_hw *ah = sc->sc_ah;
841510718dSLuis R. Rodriguez 	struct ath_common *common = ath9k_hw_common(ah);
851510718dSLuis R. Rodriguez 
86203c4805SLuis R. Rodriguez 	u32 rfilt, mfilt[2];
87203c4805SLuis R. Rodriguez 
88203c4805SLuis R. Rodriguez 	/* configure rx filter */
89203c4805SLuis R. Rodriguez 	rfilt = ath_calcrxfilter(sc);
90203c4805SLuis R. Rodriguez 	ath9k_hw_setrxfilter(ah, rfilt);
91203c4805SLuis R. Rodriguez 
92203c4805SLuis R. Rodriguez 	/* configure bssid mask */
9313b81559SLuis R. Rodriguez 	ath_hw_setbssidmask(common);
94203c4805SLuis R. Rodriguez 
95203c4805SLuis R. Rodriguez 	/* configure operational mode */
96203c4805SLuis R. Rodriguez 	ath9k_hw_setopmode(ah);
97203c4805SLuis R. Rodriguez 
98203c4805SLuis R. Rodriguez 	/* calculate and install multicast filter */
99203c4805SLuis R. Rodriguez 	mfilt[0] = mfilt[1] = ~0;
100203c4805SLuis R. Rodriguez 	ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
101203c4805SLuis R. Rodriguez }
102203c4805SLuis R. Rodriguez 
103b5c80475SFelix Fietkau static bool ath_rx_edma_buf_link(struct ath_softc *sc,
104b5c80475SFelix Fietkau 				 enum ath9k_rx_qtype qtype)
105b5c80475SFelix Fietkau {
106b5c80475SFelix Fietkau 	struct ath_hw *ah = sc->sc_ah;
107b5c80475SFelix Fietkau 	struct ath_rx_edma *rx_edma;
108b5c80475SFelix Fietkau 	struct sk_buff *skb;
109b5c80475SFelix Fietkau 	struct ath_buf *bf;
110b5c80475SFelix Fietkau 
111b5c80475SFelix Fietkau 	rx_edma = &sc->rx.rx_edma[qtype];
112b5c80475SFelix Fietkau 	if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize)
113b5c80475SFelix Fietkau 		return false;
114b5c80475SFelix Fietkau 
115b5c80475SFelix Fietkau 	bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
116b5c80475SFelix Fietkau 	list_del_init(&bf->list);
117b5c80475SFelix Fietkau 
118b5c80475SFelix Fietkau 	skb = bf->bf_mpdu;
119b5c80475SFelix Fietkau 
120b5c80475SFelix Fietkau 	ATH_RXBUF_RESET(bf);
121b5c80475SFelix Fietkau 	memset(skb->data, 0, ah->caps.rx_status_len);
122b5c80475SFelix Fietkau 	dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
123b5c80475SFelix Fietkau 				ah->caps.rx_status_len, DMA_TO_DEVICE);
124b5c80475SFelix Fietkau 
125b5c80475SFelix Fietkau 	SKB_CB_ATHBUF(skb) = bf;
126b5c80475SFelix Fietkau 	ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype);
127b5c80475SFelix Fietkau 	skb_queue_tail(&rx_edma->rx_fifo, skb);
128b5c80475SFelix Fietkau 
129b5c80475SFelix Fietkau 	return true;
130b5c80475SFelix Fietkau }
131b5c80475SFelix Fietkau 
132b5c80475SFelix Fietkau static void ath_rx_addbuffer_edma(struct ath_softc *sc,
133b5c80475SFelix Fietkau 				  enum ath9k_rx_qtype qtype, int size)
134b5c80475SFelix Fietkau {
135b5c80475SFelix Fietkau 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1366a01f0c0SMohammed Shafi Shajakhan 	struct ath_buf *bf, *tbf;
137b5c80475SFelix Fietkau 
138b5c80475SFelix Fietkau 	if (list_empty(&sc->rx.rxbuf)) {
139d2182b69SJoe Perches 		ath_dbg(common, QUEUE, "No free rx buf available\n");
140b5c80475SFelix Fietkau 		return;
141b5c80475SFelix Fietkau 	}
142b5c80475SFelix Fietkau 
1436a01f0c0SMohammed Shafi Shajakhan 	list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list)
144b5c80475SFelix Fietkau 		if (!ath_rx_edma_buf_link(sc, qtype))
145b5c80475SFelix Fietkau 			break;
146b5c80475SFelix Fietkau 
147b5c80475SFelix Fietkau }
148b5c80475SFelix Fietkau 
149b5c80475SFelix Fietkau static void ath_rx_remove_buffer(struct ath_softc *sc,
150b5c80475SFelix Fietkau 				 enum ath9k_rx_qtype qtype)
151b5c80475SFelix Fietkau {
152b5c80475SFelix Fietkau 	struct ath_buf *bf;
153b5c80475SFelix Fietkau 	struct ath_rx_edma *rx_edma;
154b5c80475SFelix Fietkau 	struct sk_buff *skb;
155b5c80475SFelix Fietkau 
156b5c80475SFelix Fietkau 	rx_edma = &sc->rx.rx_edma[qtype];
157b5c80475SFelix Fietkau 
158b5c80475SFelix Fietkau 	while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) {
159b5c80475SFelix Fietkau 		bf = SKB_CB_ATHBUF(skb);
160b5c80475SFelix Fietkau 		BUG_ON(!bf);
161b5c80475SFelix Fietkau 		list_add_tail(&bf->list, &sc->rx.rxbuf);
162b5c80475SFelix Fietkau 	}
163b5c80475SFelix Fietkau }
164b5c80475SFelix Fietkau 
165b5c80475SFelix Fietkau static void ath_rx_edma_cleanup(struct ath_softc *sc)
166b5c80475SFelix Fietkau {
167ba542385SMohammed Shafi Shajakhan 	struct ath_hw *ah = sc->sc_ah;
168ba542385SMohammed Shafi Shajakhan 	struct ath_common *common = ath9k_hw_common(ah);
169b5c80475SFelix Fietkau 	struct ath_buf *bf;
170b5c80475SFelix Fietkau 
171b5c80475SFelix Fietkau 	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
172b5c80475SFelix Fietkau 	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
173b5c80475SFelix Fietkau 
174b5c80475SFelix Fietkau 	list_for_each_entry(bf, &sc->rx.rxbuf, list) {
175ba542385SMohammed Shafi Shajakhan 		if (bf->bf_mpdu) {
176ba542385SMohammed Shafi Shajakhan 			dma_unmap_single(sc->dev, bf->bf_buf_addr,
177ba542385SMohammed Shafi Shajakhan 					common->rx_bufsize,
178ba542385SMohammed Shafi Shajakhan 					DMA_BIDIRECTIONAL);
179b5c80475SFelix Fietkau 			dev_kfree_skb_any(bf->bf_mpdu);
180ba542385SMohammed Shafi Shajakhan 			bf->bf_buf_addr = 0;
181ba542385SMohammed Shafi Shajakhan 			bf->bf_mpdu = NULL;
182ba542385SMohammed Shafi Shajakhan 		}
183b5c80475SFelix Fietkau 	}
184b5c80475SFelix Fietkau }
185b5c80475SFelix Fietkau 
186b5c80475SFelix Fietkau static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
187b5c80475SFelix Fietkau {
188b5c80475SFelix Fietkau 	skb_queue_head_init(&rx_edma->rx_fifo);
189b5c80475SFelix Fietkau 	rx_edma->rx_fifo_hwsize = size;
190b5c80475SFelix Fietkau }
191b5c80475SFelix Fietkau 
192b5c80475SFelix Fietkau static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
193b5c80475SFelix Fietkau {
194b5c80475SFelix Fietkau 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
195b5c80475SFelix Fietkau 	struct ath_hw *ah = sc->sc_ah;
196b5c80475SFelix Fietkau 	struct sk_buff *skb;
197b5c80475SFelix Fietkau 	struct ath_buf *bf;
198b5c80475SFelix Fietkau 	int error = 0, i;
199b5c80475SFelix Fietkau 	u32 size;
200b5c80475SFelix Fietkau 
201b5c80475SFelix Fietkau 	ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
202b5c80475SFelix Fietkau 				    ah->caps.rx_status_len);
203b5c80475SFelix Fietkau 
204b5c80475SFelix Fietkau 	ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP],
205b5c80475SFelix Fietkau 			       ah->caps.rx_lp_qdepth);
206b5c80475SFelix Fietkau 	ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP],
207b5c80475SFelix Fietkau 			       ah->caps.rx_hp_qdepth);
208b5c80475SFelix Fietkau 
209b5c80475SFelix Fietkau 	size = sizeof(struct ath_buf) * nbufs;
210b81950b1SFelix Fietkau 	bf = devm_kzalloc(sc->dev, size, GFP_KERNEL);
211b5c80475SFelix Fietkau 	if (!bf)
212b5c80475SFelix Fietkau 		return -ENOMEM;
213b5c80475SFelix Fietkau 
214b5c80475SFelix Fietkau 	INIT_LIST_HEAD(&sc->rx.rxbuf);
215b5c80475SFelix Fietkau 
216b5c80475SFelix Fietkau 	for (i = 0; i < nbufs; i++, bf++) {
217b5c80475SFelix Fietkau 		skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
218b5c80475SFelix Fietkau 		if (!skb) {
219b5c80475SFelix Fietkau 			error = -ENOMEM;
220b5c80475SFelix Fietkau 			goto rx_init_fail;
221b5c80475SFelix Fietkau 		}
222b5c80475SFelix Fietkau 
223b5c80475SFelix Fietkau 		memset(skb->data, 0, common->rx_bufsize);
224b5c80475SFelix Fietkau 		bf->bf_mpdu = skb;
225b5c80475SFelix Fietkau 
226b5c80475SFelix Fietkau 		bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
227b5c80475SFelix Fietkau 						 common->rx_bufsize,
228b5c80475SFelix Fietkau 						 DMA_BIDIRECTIONAL);
229b5c80475SFelix Fietkau 		if (unlikely(dma_mapping_error(sc->dev,
230b5c80475SFelix Fietkau 						bf->bf_buf_addr))) {
231b5c80475SFelix Fietkau 				dev_kfree_skb_any(skb);
232b5c80475SFelix Fietkau 				bf->bf_mpdu = NULL;
2336cf9e995SBen Greear 				bf->bf_buf_addr = 0;
2343800276aSJoe Perches 				ath_err(common,
235b5c80475SFelix Fietkau 					"dma_mapping_error() on RX init\n");
236b5c80475SFelix Fietkau 				error = -ENOMEM;
237b5c80475SFelix Fietkau 				goto rx_init_fail;
238b5c80475SFelix Fietkau 		}
239b5c80475SFelix Fietkau 
240b5c80475SFelix Fietkau 		list_add_tail(&bf->list, &sc->rx.rxbuf);
241b5c80475SFelix Fietkau 	}
242b5c80475SFelix Fietkau 
243b5c80475SFelix Fietkau 	return 0;
244b5c80475SFelix Fietkau 
245b5c80475SFelix Fietkau rx_init_fail:
246b5c80475SFelix Fietkau 	ath_rx_edma_cleanup(sc);
247b5c80475SFelix Fietkau 	return error;
248b5c80475SFelix Fietkau }
249b5c80475SFelix Fietkau 
250b5c80475SFelix Fietkau static void ath_edma_start_recv(struct ath_softc *sc)
251b5c80475SFelix Fietkau {
252b5c80475SFelix Fietkau 	spin_lock_bh(&sc->rx.rxbuflock);
253b5c80475SFelix Fietkau 
254b5c80475SFelix Fietkau 	ath9k_hw_rxena(sc->sc_ah);
255b5c80475SFelix Fietkau 
256b5c80475SFelix Fietkau 	ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP,
257b5c80475SFelix Fietkau 			      sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize);
258b5c80475SFelix Fietkau 
259b5c80475SFelix Fietkau 	ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP,
260b5c80475SFelix Fietkau 			      sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize);
261b5c80475SFelix Fietkau 
262b5c80475SFelix Fietkau 	ath_opmode_init(sc);
263b5c80475SFelix Fietkau 
2644cb54fa3SSujith Manoharan 	ath9k_hw_startpcureceive(sc->sc_ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL));
2657583c550SLuis R. Rodriguez 
2667583c550SLuis R. Rodriguez 	spin_unlock_bh(&sc->rx.rxbuflock);
267b5c80475SFelix Fietkau }
268b5c80475SFelix Fietkau 
269b5c80475SFelix Fietkau static void ath_edma_stop_recv(struct ath_softc *sc)
270b5c80475SFelix Fietkau {
271b5c80475SFelix Fietkau 	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
272b5c80475SFelix Fietkau 	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
273b5c80475SFelix Fietkau }
274b5c80475SFelix Fietkau 
275203c4805SLuis R. Rodriguez int ath_rx_init(struct ath_softc *sc, int nbufs)
276203c4805SLuis R. Rodriguez {
27727c51f1aSLuis R. Rodriguez 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
278203c4805SLuis R. Rodriguez 	struct sk_buff *skb;
279203c4805SLuis R. Rodriguez 	struct ath_buf *bf;
280203c4805SLuis R. Rodriguez 	int error = 0;
281203c4805SLuis R. Rodriguez 
2824bdd1e97SLuis R. Rodriguez 	spin_lock_init(&sc->sc_pcu_lock);
283203c4805SLuis R. Rodriguez 	spin_lock_init(&sc->rx.rxbuflock);
284781b14a3SSujith Manoharan 	clear_bit(SC_OP_RXFLUSH, &sc->sc_flags);
285203c4805SLuis R. Rodriguez 
2860d95521eSFelix Fietkau 	common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
2870d95521eSFelix Fietkau 			     sc->sc_ah->caps.rx_status_len;
2880d95521eSFelix Fietkau 
289b5c80475SFelix Fietkau 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
290b5c80475SFelix Fietkau 		return ath_rx_edma_init(sc, nbufs);
291b5c80475SFelix Fietkau 	} else {
292d2182b69SJoe Perches 		ath_dbg(common, CONFIG, "cachelsz %u rxbufsize %u\n",
293cc861f74SLuis R. Rodriguez 			common->cachelsz, common->rx_bufsize);
294203c4805SLuis R. Rodriguez 
295203c4805SLuis R. Rodriguez 		/* Initialize rx descriptors */
296203c4805SLuis R. Rodriguez 
297203c4805SLuis R. Rodriguez 		error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
2984adfcdedSVasanthakumar Thiagarajan 				"rx", nbufs, 1, 0);
299203c4805SLuis R. Rodriguez 		if (error != 0) {
3003800276aSJoe Perches 			ath_err(common,
301b5c80475SFelix Fietkau 				"failed to allocate rx descriptors: %d\n",
302b5c80475SFelix Fietkau 				error);
303203c4805SLuis R. Rodriguez 			goto err;
304203c4805SLuis R. Rodriguez 		}
305203c4805SLuis R. Rodriguez 
306203c4805SLuis R. Rodriguez 		list_for_each_entry(bf, &sc->rx.rxbuf, list) {
307b5c80475SFelix Fietkau 			skb = ath_rxbuf_alloc(common, common->rx_bufsize,
308b5c80475SFelix Fietkau 					      GFP_KERNEL);
309203c4805SLuis R. Rodriguez 			if (skb == NULL) {
310203c4805SLuis R. Rodriguez 				error = -ENOMEM;
311203c4805SLuis R. Rodriguez 				goto err;
312203c4805SLuis R. Rodriguez 			}
313203c4805SLuis R. Rodriguez 
314203c4805SLuis R. Rodriguez 			bf->bf_mpdu = skb;
315203c4805SLuis R. Rodriguez 			bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
316cc861f74SLuis R. Rodriguez 					common->rx_bufsize,
317203c4805SLuis R. Rodriguez 					DMA_FROM_DEVICE);
318203c4805SLuis R. Rodriguez 			if (unlikely(dma_mapping_error(sc->dev,
319203c4805SLuis R. Rodriguez 							bf->bf_buf_addr))) {
320203c4805SLuis R. Rodriguez 				dev_kfree_skb_any(skb);
321203c4805SLuis R. Rodriguez 				bf->bf_mpdu = NULL;
3226cf9e995SBen Greear 				bf->bf_buf_addr = 0;
3233800276aSJoe Perches 				ath_err(common,
324203c4805SLuis R. Rodriguez 					"dma_mapping_error() on RX init\n");
325203c4805SLuis R. Rodriguez 				error = -ENOMEM;
326203c4805SLuis R. Rodriguez 				goto err;
327203c4805SLuis R. Rodriguez 			}
328203c4805SLuis R. Rodriguez 		}
329203c4805SLuis R. Rodriguez 		sc->rx.rxlink = NULL;
330b5c80475SFelix Fietkau 	}
331203c4805SLuis R. Rodriguez 
332203c4805SLuis R. Rodriguez err:
333203c4805SLuis R. Rodriguez 	if (error)
334203c4805SLuis R. Rodriguez 		ath_rx_cleanup(sc);
335203c4805SLuis R. Rodriguez 
336203c4805SLuis R. Rodriguez 	return error;
337203c4805SLuis R. Rodriguez }
338203c4805SLuis R. Rodriguez 
339203c4805SLuis R. Rodriguez void ath_rx_cleanup(struct ath_softc *sc)
340203c4805SLuis R. Rodriguez {
341cc861f74SLuis R. Rodriguez 	struct ath_hw *ah = sc->sc_ah;
342cc861f74SLuis R. Rodriguez 	struct ath_common *common = ath9k_hw_common(ah);
343203c4805SLuis R. Rodriguez 	struct sk_buff *skb;
344203c4805SLuis R. Rodriguez 	struct ath_buf *bf;
345203c4805SLuis R. Rodriguez 
346b5c80475SFelix Fietkau 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
347b5c80475SFelix Fietkau 		ath_rx_edma_cleanup(sc);
348b5c80475SFelix Fietkau 		return;
349b5c80475SFelix Fietkau 	} else {
350203c4805SLuis R. Rodriguez 		list_for_each_entry(bf, &sc->rx.rxbuf, list) {
351203c4805SLuis R. Rodriguez 			skb = bf->bf_mpdu;
352203c4805SLuis R. Rodriguez 			if (skb) {
353203c4805SLuis R. Rodriguez 				dma_unmap_single(sc->dev, bf->bf_buf_addr,
354b5c80475SFelix Fietkau 						common->rx_bufsize,
355b5c80475SFelix Fietkau 						DMA_FROM_DEVICE);
356203c4805SLuis R. Rodriguez 				dev_kfree_skb(skb);
3576cf9e995SBen Greear 				bf->bf_buf_addr = 0;
3586cf9e995SBen Greear 				bf->bf_mpdu = NULL;
359203c4805SLuis R. Rodriguez 			}
360203c4805SLuis R. Rodriguez 		}
361203c4805SLuis R. Rodriguez 	}
362b5c80475SFelix Fietkau }
363203c4805SLuis R. Rodriguez 
364203c4805SLuis R. Rodriguez /*
365203c4805SLuis R. Rodriguez  * Calculate the receive filter according to the
366203c4805SLuis R. Rodriguez  * operating mode and state:
367203c4805SLuis R. Rodriguez  *
368203c4805SLuis R. Rodriguez  * o always accept unicast, broadcast, and multicast traffic
369203c4805SLuis R. Rodriguez  * o maintain current state of phy error reception (the hal
370203c4805SLuis R. Rodriguez  *   may enable phy error frames for noise immunity work)
371203c4805SLuis R. Rodriguez  * o probe request frames are accepted only when operating in
372203c4805SLuis R. Rodriguez  *   hostap, adhoc, or monitor modes
373203c4805SLuis R. Rodriguez  * o enable promiscuous mode according to the interface state
374203c4805SLuis R. Rodriguez  * o accept beacons:
375203c4805SLuis R. Rodriguez  *   - when operating in adhoc mode so the 802.11 layer creates
376203c4805SLuis R. Rodriguez  *     node table entries for peers,
377203c4805SLuis R. Rodriguez  *   - when operating in station mode for collecting rssi data when
378203c4805SLuis R. Rodriguez  *     the station is otherwise quiet, or
379203c4805SLuis R. Rodriguez  *   - when operating as a repeater so we see repeater-sta beacons
380203c4805SLuis R. Rodriguez  *   - when scanning
381203c4805SLuis R. Rodriguez  */
382203c4805SLuis R. Rodriguez 
383203c4805SLuis R. Rodriguez u32 ath_calcrxfilter(struct ath_softc *sc)
384203c4805SLuis R. Rodriguez {
385203c4805SLuis R. Rodriguez 	u32 rfilt;
386203c4805SLuis R. Rodriguez 
387ac06697cSFelix Fietkau 	rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
388203c4805SLuis R. Rodriguez 		| ATH9K_RX_FILTER_MCAST;
389203c4805SLuis R. Rodriguez 
3909c1d8e4aSJouni Malinen 	if (sc->rx.rxfilter & FIF_PROBE_REQ)
391203c4805SLuis R. Rodriguez 		rfilt |= ATH9K_RX_FILTER_PROBEREQ;
392203c4805SLuis R. Rodriguez 
393203c4805SLuis R. Rodriguez 	/*
394203c4805SLuis R. Rodriguez 	 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
395203c4805SLuis R. Rodriguez 	 * mode interface or when in monitor mode. AP mode does not need this
396203c4805SLuis R. Rodriguez 	 * since it receives all in-BSS frames anyway.
397203c4805SLuis R. Rodriguez 	 */
3982e286947SFelix Fietkau 	if (sc->sc_ah->is_monitoring)
399203c4805SLuis R. Rodriguez 		rfilt |= ATH9K_RX_FILTER_PROM;
400203c4805SLuis R. Rodriguez 
401203c4805SLuis R. Rodriguez 	if (sc->rx.rxfilter & FIF_CONTROL)
402203c4805SLuis R. Rodriguez 		rfilt |= ATH9K_RX_FILTER_CONTROL;
403203c4805SLuis R. Rodriguez 
404203c4805SLuis R. Rodriguez 	if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) &&
405cfda6695SBen Greear 	    (sc->nvifs <= 1) &&
406203c4805SLuis R. Rodriguez 	    !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC))
407203c4805SLuis R. Rodriguez 		rfilt |= ATH9K_RX_FILTER_MYBEACON;
408203c4805SLuis R. Rodriguez 	else
409203c4805SLuis R. Rodriguez 		rfilt |= ATH9K_RX_FILTER_BEACON;
410203c4805SLuis R. Rodriguez 
411264bbec8SFelix Fietkau 	if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
41266afad01SSenthil Balasubramanian 	    (sc->rx.rxfilter & FIF_PSPOLL))
413203c4805SLuis R. Rodriguez 		rfilt |= ATH9K_RX_FILTER_PSPOLL;
414203c4805SLuis R. Rodriguez 
4157ea310beSSujith 	if (conf_is_ht(&sc->hw->conf))
4167ea310beSSujith 		rfilt |= ATH9K_RX_FILTER_COMP_BAR;
4177ea310beSSujith 
4187545daf4SFelix Fietkau 	if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) {
419a549459cSThomas Wagner 		/* This is needed for older chips */
420a549459cSThomas Wagner 		if (sc->sc_ah->hw_version.macVersion <= AR_SREV_VERSION_9160)
4215eb6ba83SJavier Cardona 			rfilt |= ATH9K_RX_FILTER_PROM;
422203c4805SLuis R. Rodriguez 		rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
423203c4805SLuis R. Rodriguez 	}
424203c4805SLuis R. Rodriguez 
425b3d7aa43SGabor Juhos 	if (AR_SREV_9550(sc->sc_ah))
426b3d7aa43SGabor Juhos 		rfilt |= ATH9K_RX_FILTER_4ADDRESS;
427b3d7aa43SGabor Juhos 
428203c4805SLuis R. Rodriguez 	return rfilt;
429203c4805SLuis R. Rodriguez 
430203c4805SLuis R. Rodriguez }
431203c4805SLuis R. Rodriguez 
432203c4805SLuis R. Rodriguez int ath_startrecv(struct ath_softc *sc)
433203c4805SLuis R. Rodriguez {
434203c4805SLuis R. Rodriguez 	struct ath_hw *ah = sc->sc_ah;
435203c4805SLuis R. Rodriguez 	struct ath_buf *bf, *tbf;
436203c4805SLuis R. Rodriguez 
437b5c80475SFelix Fietkau 	if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
438b5c80475SFelix Fietkau 		ath_edma_start_recv(sc);
439b5c80475SFelix Fietkau 		return 0;
440b5c80475SFelix Fietkau 	}
441b5c80475SFelix Fietkau 
442203c4805SLuis R. Rodriguez 	spin_lock_bh(&sc->rx.rxbuflock);
443203c4805SLuis R. Rodriguez 	if (list_empty(&sc->rx.rxbuf))
444203c4805SLuis R. Rodriguez 		goto start_recv;
445203c4805SLuis R. Rodriguez 
446203c4805SLuis R. Rodriguez 	sc->rx.rxlink = NULL;
447203c4805SLuis R. Rodriguez 	list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
448203c4805SLuis R. Rodriguez 		ath_rx_buf_link(sc, bf);
449203c4805SLuis R. Rodriguez 	}
450203c4805SLuis R. Rodriguez 
451203c4805SLuis R. Rodriguez 	/* We could have deleted elements so the list may be empty now */
452203c4805SLuis R. Rodriguez 	if (list_empty(&sc->rx.rxbuf))
453203c4805SLuis R. Rodriguez 		goto start_recv;
454203c4805SLuis R. Rodriguez 
455203c4805SLuis R. Rodriguez 	bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
456203c4805SLuis R. Rodriguez 	ath9k_hw_putrxbuf(ah, bf->bf_daddr);
457203c4805SLuis R. Rodriguez 	ath9k_hw_rxena(ah);
458203c4805SLuis R. Rodriguez 
459203c4805SLuis R. Rodriguez start_recv:
460203c4805SLuis R. Rodriguez 	ath_opmode_init(sc);
4614cb54fa3SSujith Manoharan 	ath9k_hw_startpcureceive(ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL));
462203c4805SLuis R. Rodriguez 
4637583c550SLuis R. Rodriguez 	spin_unlock_bh(&sc->rx.rxbuflock);
4647583c550SLuis R. Rodriguez 
465203c4805SLuis R. Rodriguez 	return 0;
466203c4805SLuis R. Rodriguez }
467203c4805SLuis R. Rodriguez 
468203c4805SLuis R. Rodriguez bool ath_stoprecv(struct ath_softc *sc)
469203c4805SLuis R. Rodriguez {
470203c4805SLuis R. Rodriguez 	struct ath_hw *ah = sc->sc_ah;
4715882da02SFelix Fietkau 	bool stopped, reset = false;
472203c4805SLuis R. Rodriguez 
4731e450285SLuis R. Rodriguez 	spin_lock_bh(&sc->rx.rxbuflock);
474d47844a0SFelix Fietkau 	ath9k_hw_abortpcurecv(ah);
475203c4805SLuis R. Rodriguez 	ath9k_hw_setrxfilter(ah, 0);
4765882da02SFelix Fietkau 	stopped = ath9k_hw_stopdmarecv(ah, &reset);
477b5c80475SFelix Fietkau 
478b5c80475SFelix Fietkau 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
479b5c80475SFelix Fietkau 		ath_edma_stop_recv(sc);
480b5c80475SFelix Fietkau 	else
481203c4805SLuis R. Rodriguez 		sc->rx.rxlink = NULL;
4821e450285SLuis R. Rodriguez 	spin_unlock_bh(&sc->rx.rxbuflock);
483203c4805SLuis R. Rodriguez 
484d584747bSRajkumar Manoharan 	if (!(ah->ah_flags & AH_UNPLUGGED) &&
485d584747bSRajkumar Manoharan 	    unlikely(!stopped)) {
486d7fd1b50SBen Greear 		ath_err(ath9k_hw_common(sc->sc_ah),
487d7fd1b50SBen Greear 			"Could not stop RX, we could be "
48878a7685eSLuis R. Rodriguez 			"confusing the DMA engine when we start RX up\n");
489d7fd1b50SBen Greear 		ATH_DBG_WARN_ON_ONCE(!stopped);
490d7fd1b50SBen Greear 	}
4912232d31bSFelix Fietkau 	return stopped && !reset;
492203c4805SLuis R. Rodriguez }
493203c4805SLuis R. Rodriguez 
494203c4805SLuis R. Rodriguez void ath_flushrecv(struct ath_softc *sc)
495203c4805SLuis R. Rodriguez {
496781b14a3SSujith Manoharan 	set_bit(SC_OP_RXFLUSH, &sc->sc_flags);
497b5c80475SFelix Fietkau 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
498b5c80475SFelix Fietkau 		ath_rx_tasklet(sc, 1, true);
499b5c80475SFelix Fietkau 	ath_rx_tasklet(sc, 1, false);
500781b14a3SSujith Manoharan 	clear_bit(SC_OP_RXFLUSH, &sc->sc_flags);
501203c4805SLuis R. Rodriguez }
502203c4805SLuis R. Rodriguez 
503cc65965cSJouni Malinen static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
504cc65965cSJouni Malinen {
505cc65965cSJouni Malinen 	/* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
506cc65965cSJouni Malinen 	struct ieee80211_mgmt *mgmt;
507cc65965cSJouni Malinen 	u8 *pos, *end, id, elen;
508cc65965cSJouni Malinen 	struct ieee80211_tim_ie *tim;
509cc65965cSJouni Malinen 
510cc65965cSJouni Malinen 	mgmt = (struct ieee80211_mgmt *)skb->data;
511cc65965cSJouni Malinen 	pos = mgmt->u.beacon.variable;
512cc65965cSJouni Malinen 	end = skb->data + skb->len;
513cc65965cSJouni Malinen 
514cc65965cSJouni Malinen 	while (pos + 2 < end) {
515cc65965cSJouni Malinen 		id = *pos++;
516cc65965cSJouni Malinen 		elen = *pos++;
517cc65965cSJouni Malinen 		if (pos + elen > end)
518cc65965cSJouni Malinen 			break;
519cc65965cSJouni Malinen 
520cc65965cSJouni Malinen 		if (id == WLAN_EID_TIM) {
521cc65965cSJouni Malinen 			if (elen < sizeof(*tim))
522cc65965cSJouni Malinen 				break;
523cc65965cSJouni Malinen 			tim = (struct ieee80211_tim_ie *) pos;
524cc65965cSJouni Malinen 			if (tim->dtim_count != 0)
525cc65965cSJouni Malinen 				break;
526cc65965cSJouni Malinen 			return tim->bitmap_ctrl & 0x01;
527cc65965cSJouni Malinen 		}
528cc65965cSJouni Malinen 
529cc65965cSJouni Malinen 		pos += elen;
530cc65965cSJouni Malinen 	}
531cc65965cSJouni Malinen 
532cc65965cSJouni Malinen 	return false;
533cc65965cSJouni Malinen }
534cc65965cSJouni Malinen 
535cc65965cSJouni Malinen static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
536cc65965cSJouni Malinen {
5371510718dSLuis R. Rodriguez 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
538cc65965cSJouni Malinen 
539cc65965cSJouni Malinen 	if (skb->len < 24 + 8 + 2 + 2)
540cc65965cSJouni Malinen 		return;
541cc65965cSJouni Malinen 
5421b04b930SSujith 	sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
543293dc5dfSGabor Juhos 
5441b04b930SSujith 	if (sc->ps_flags & PS_BEACON_SYNC) {
5451b04b930SSujith 		sc->ps_flags &= ~PS_BEACON_SYNC;
546d2182b69SJoe Perches 		ath_dbg(common, PS,
547226afe68SJoe Perches 			"Reconfigure Beacon timers based on timestamp from the AP\n");
548ef4ad633SSujith Manoharan 		ath9k_set_beacon(sc);
549ccdfeab6SJouni Malinen 	}
550ccdfeab6SJouni Malinen 
551cc65965cSJouni Malinen 	if (ath_beacon_dtim_pending_cab(skb)) {
552cc65965cSJouni Malinen 		/*
553cc65965cSJouni Malinen 		 * Remain awake waiting for buffered broadcast/multicast
55458f5fffdSGabor Juhos 		 * frames. If the last broadcast/multicast frame is not
55558f5fffdSGabor Juhos 		 * received properly, the next beacon frame will work as
55658f5fffdSGabor Juhos 		 * a backup trigger for returning into NETWORK SLEEP state,
55758f5fffdSGabor Juhos 		 * so we are waiting for it as well.
558cc65965cSJouni Malinen 		 */
559d2182b69SJoe Perches 		ath_dbg(common, PS,
560226afe68SJoe Perches 			"Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n");
5611b04b930SSujith 		sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON;
562cc65965cSJouni Malinen 		return;
563cc65965cSJouni Malinen 	}
564cc65965cSJouni Malinen 
5651b04b930SSujith 	if (sc->ps_flags & PS_WAIT_FOR_CAB) {
566cc65965cSJouni Malinen 		/*
567cc65965cSJouni Malinen 		 * This can happen if a broadcast frame is dropped or the AP
568cc65965cSJouni Malinen 		 * fails to send a frame indicating that all CAB frames have
569cc65965cSJouni Malinen 		 * been delivered.
570cc65965cSJouni Malinen 		 */
5711b04b930SSujith 		sc->ps_flags &= ~PS_WAIT_FOR_CAB;
572d2182b69SJoe Perches 		ath_dbg(common, PS, "PS wait for CAB frames timed out\n");
573cc65965cSJouni Malinen 	}
574cc65965cSJouni Malinen }
575cc65965cSJouni Malinen 
576f73c604cSRajkumar Manoharan static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon)
577cc65965cSJouni Malinen {
578cc65965cSJouni Malinen 	struct ieee80211_hdr *hdr;
579c46917bbSLuis R. Rodriguez 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
580cc65965cSJouni Malinen 
581cc65965cSJouni Malinen 	hdr = (struct ieee80211_hdr *)skb->data;
582cc65965cSJouni Malinen 
583cc65965cSJouni Malinen 	/* Process Beacon and CAB receive in PS state */
584ededf1f8SVasanthakumar Thiagarajan 	if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc))
58507c15a3fSSujith Manoharan 	    && mybeacon) {
586cc65965cSJouni Malinen 		ath_rx_ps_beacon(sc, skb);
58707c15a3fSSujith Manoharan 	} else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
588cc65965cSJouni Malinen 		   (ieee80211_is_data(hdr->frame_control) ||
589cc65965cSJouni Malinen 		    ieee80211_is_action(hdr->frame_control)) &&
590cc65965cSJouni Malinen 		   is_multicast_ether_addr(hdr->addr1) &&
591cc65965cSJouni Malinen 		   !ieee80211_has_moredata(hdr->frame_control)) {
592cc65965cSJouni Malinen 		/*
593cc65965cSJouni Malinen 		 * No more broadcast/multicast frames to be received at this
594cc65965cSJouni Malinen 		 * point.
595cc65965cSJouni Malinen 		 */
5963fac6dfdSSenthil Balasubramanian 		sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON);
597d2182b69SJoe Perches 		ath_dbg(common, PS,
598c46917bbSLuis R. Rodriguez 			"All PS CAB frames received, back to sleep\n");
5991b04b930SSujith 	} else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
6009a23f9caSJouni Malinen 		   !is_multicast_ether_addr(hdr->addr1) &&
6019a23f9caSJouni Malinen 		   !ieee80211_has_morefrags(hdr->frame_control)) {
6021b04b930SSujith 		sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
603d2182b69SJoe Perches 		ath_dbg(common, PS,
604226afe68SJoe Perches 			"Going back to sleep after having received PS-Poll data (0x%lx)\n",
6051b04b930SSujith 			sc->ps_flags & (PS_WAIT_FOR_BEACON |
6061b04b930SSujith 					PS_WAIT_FOR_CAB |
6071b04b930SSujith 					PS_WAIT_FOR_PSPOLL_DATA |
6081b04b930SSujith 					PS_WAIT_FOR_TX_ACK));
609cc65965cSJouni Malinen 	}
610cc65965cSJouni Malinen }
611cc65965cSJouni Malinen 
612b5c80475SFelix Fietkau static bool ath_edma_get_buffers(struct ath_softc *sc,
6133a2923e8SFelix Fietkau 				 enum ath9k_rx_qtype qtype,
6143a2923e8SFelix Fietkau 				 struct ath_rx_status *rs,
6153a2923e8SFelix Fietkau 				 struct ath_buf **dest)
616203c4805SLuis R. Rodriguez {
617b5c80475SFelix Fietkau 	struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
618203c4805SLuis R. Rodriguez 	struct ath_hw *ah = sc->sc_ah;
61927c51f1aSLuis R. Rodriguez 	struct ath_common *common = ath9k_hw_common(ah);
620b5c80475SFelix Fietkau 	struct sk_buff *skb;
621b5c80475SFelix Fietkau 	struct ath_buf *bf;
622b5c80475SFelix Fietkau 	int ret;
623203c4805SLuis R. Rodriguez 
624b5c80475SFelix Fietkau 	skb = skb_peek(&rx_edma->rx_fifo);
625b5c80475SFelix Fietkau 	if (!skb)
626b5c80475SFelix Fietkau 		return false;
627203c4805SLuis R. Rodriguez 
628b5c80475SFelix Fietkau 	bf = SKB_CB_ATHBUF(skb);
629b5c80475SFelix Fietkau 	BUG_ON(!bf);
630b5c80475SFelix Fietkau 
631ce9426d1SMing Lei 	dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
632b5c80475SFelix Fietkau 				common->rx_bufsize, DMA_FROM_DEVICE);
633b5c80475SFelix Fietkau 
6343a2923e8SFelix Fietkau 	ret = ath9k_hw_process_rxdesc_edma(ah, rs, skb->data);
635ce9426d1SMing Lei 	if (ret == -EINPROGRESS) {
636ce9426d1SMing Lei 		/*let device gain the buffer again*/
637ce9426d1SMing Lei 		dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
638ce9426d1SMing Lei 				common->rx_bufsize, DMA_FROM_DEVICE);
639b5c80475SFelix Fietkau 		return false;
640ce9426d1SMing Lei 	}
641b5c80475SFelix Fietkau 
642b5c80475SFelix Fietkau 	__skb_unlink(skb, &rx_edma->rx_fifo);
643b5c80475SFelix Fietkau 	if (ret == -EINVAL) {
644b5c80475SFelix Fietkau 		/* corrupt descriptor, skip this one and the following one */
645b5c80475SFelix Fietkau 		list_add_tail(&bf->list, &sc->rx.rxbuf);
646b5c80475SFelix Fietkau 		ath_rx_edma_buf_link(sc, qtype);
647b5c80475SFelix Fietkau 
6483a2923e8SFelix Fietkau 		skb = skb_peek(&rx_edma->rx_fifo);
6493a2923e8SFelix Fietkau 		if (skb) {
650b5c80475SFelix Fietkau 			bf = SKB_CB_ATHBUF(skb);
651b5c80475SFelix Fietkau 			BUG_ON(!bf);
652b5c80475SFelix Fietkau 
653b5c80475SFelix Fietkau 			__skb_unlink(skb, &rx_edma->rx_fifo);
654b5c80475SFelix Fietkau 			list_add_tail(&bf->list, &sc->rx.rxbuf);
655b5c80475SFelix Fietkau 			ath_rx_edma_buf_link(sc, qtype);
656b5c80475SFelix Fietkau 		}
6576bb51c70STom Hughes 
6586bb51c70STom Hughes 		bf = NULL;
6593a2923e8SFelix Fietkau 	}
660b5c80475SFelix Fietkau 
6613a2923e8SFelix Fietkau 	*dest = bf;
662b5c80475SFelix Fietkau 	return true;
663b5c80475SFelix Fietkau }
664b5c80475SFelix Fietkau 
665b5c80475SFelix Fietkau static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
666b5c80475SFelix Fietkau 						struct ath_rx_status *rs,
667b5c80475SFelix Fietkau 						enum ath9k_rx_qtype qtype)
668b5c80475SFelix Fietkau {
6693a2923e8SFelix Fietkau 	struct ath_buf *bf = NULL;
670b5c80475SFelix Fietkau 
6713a2923e8SFelix Fietkau 	while (ath_edma_get_buffers(sc, qtype, rs, &bf)) {
6723a2923e8SFelix Fietkau 		if (!bf)
6733a2923e8SFelix Fietkau 			continue;
674b5c80475SFelix Fietkau 
675b5c80475SFelix Fietkau 		return bf;
676b5c80475SFelix Fietkau 	}
6773a2923e8SFelix Fietkau 	return NULL;
6783a2923e8SFelix Fietkau }
679b5c80475SFelix Fietkau 
680b5c80475SFelix Fietkau static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
681b5c80475SFelix Fietkau 					   struct ath_rx_status *rs)
682b5c80475SFelix Fietkau {
683b5c80475SFelix Fietkau 	struct ath_hw *ah = sc->sc_ah;
684b5c80475SFelix Fietkau 	struct ath_common *common = ath9k_hw_common(ah);
685b5c80475SFelix Fietkau 	struct ath_desc *ds;
686b5c80475SFelix Fietkau 	struct ath_buf *bf;
687b5c80475SFelix Fietkau 	int ret;
688203c4805SLuis R. Rodriguez 
689203c4805SLuis R. Rodriguez 	if (list_empty(&sc->rx.rxbuf)) {
690203c4805SLuis R. Rodriguez 		sc->rx.rxlink = NULL;
691b5c80475SFelix Fietkau 		return NULL;
692203c4805SLuis R. Rodriguez 	}
693203c4805SLuis R. Rodriguez 
694203c4805SLuis R. Rodriguez 	bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
695203c4805SLuis R. Rodriguez 	ds = bf->bf_desc;
696203c4805SLuis R. Rodriguez 
697203c4805SLuis R. Rodriguez 	/*
698203c4805SLuis R. Rodriguez 	 * Must provide the virtual address of the current
699203c4805SLuis R. Rodriguez 	 * descriptor, the physical address, and the virtual
700203c4805SLuis R. Rodriguez 	 * address of the next descriptor in the h/w chain.
701203c4805SLuis R. Rodriguez 	 * This allows the HAL to look ahead to see if the
702203c4805SLuis R. Rodriguez 	 * hardware is done with a descriptor by checking the
703203c4805SLuis R. Rodriguez 	 * done bit in the following descriptor and the address
704203c4805SLuis R. Rodriguez 	 * of the current descriptor the DMA engine is working
705203c4805SLuis R. Rodriguez 	 * on.  All this is necessary because of our use of
706203c4805SLuis R. Rodriguez 	 * a self-linked list to avoid rx overruns.
707203c4805SLuis R. Rodriguez 	 */
7083de21116SRajkumar Manoharan 	ret = ath9k_hw_rxprocdesc(ah, ds, rs);
709b5c80475SFelix Fietkau 	if (ret == -EINPROGRESS) {
71029bffa96SFelix Fietkau 		struct ath_rx_status trs;
711203c4805SLuis R. Rodriguez 		struct ath_buf *tbf;
712203c4805SLuis R. Rodriguez 		struct ath_desc *tds;
713203c4805SLuis R. Rodriguez 
71429bffa96SFelix Fietkau 		memset(&trs, 0, sizeof(trs));
715203c4805SLuis R. Rodriguez 		if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
716203c4805SLuis R. Rodriguez 			sc->rx.rxlink = NULL;
717b5c80475SFelix Fietkau 			return NULL;
718203c4805SLuis R. Rodriguez 		}
719203c4805SLuis R. Rodriguez 
720203c4805SLuis R. Rodriguez 		tbf = list_entry(bf->list.next, struct ath_buf, list);
721203c4805SLuis R. Rodriguez 
722203c4805SLuis R. Rodriguez 		/*
723203c4805SLuis R. Rodriguez 		 * On some hardware the descriptor status words could
724203c4805SLuis R. Rodriguez 		 * get corrupted, including the done bit. Because of
725203c4805SLuis R. Rodriguez 		 * this, check if the next descriptor's done bit is
726203c4805SLuis R. Rodriguez 		 * set or not.
727203c4805SLuis R. Rodriguez 		 *
728203c4805SLuis R. Rodriguez 		 * If the next descriptor's done bit is set, the current
729203c4805SLuis R. Rodriguez 		 * descriptor has been corrupted. Force s/w to discard
730203c4805SLuis R. Rodriguez 		 * this descriptor and continue...
731203c4805SLuis R. Rodriguez 		 */
732203c4805SLuis R. Rodriguez 
733203c4805SLuis R. Rodriguez 		tds = tbf->bf_desc;
7343de21116SRajkumar Manoharan 		ret = ath9k_hw_rxprocdesc(ah, tds, &trs);
735b5c80475SFelix Fietkau 		if (ret == -EINPROGRESS)
736b5c80475SFelix Fietkau 			return NULL;
737203c4805SLuis R. Rodriguez 	}
738203c4805SLuis R. Rodriguez 
739b5c80475SFelix Fietkau 	if (!bf->bf_mpdu)
740b5c80475SFelix Fietkau 		return bf;
741203c4805SLuis R. Rodriguez 
742203c4805SLuis R. Rodriguez 	/*
743203c4805SLuis R. Rodriguez 	 * Synchronize the DMA transfer with CPU before
744203c4805SLuis R. Rodriguez 	 * 1. accessing the frame
745203c4805SLuis R. Rodriguez 	 * 2. requeueing the same buffer to h/w
746203c4805SLuis R. Rodriguez 	 */
747ce9426d1SMing Lei 	dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
748cc861f74SLuis R. Rodriguez 			common->rx_bufsize,
749203c4805SLuis R. Rodriguez 			DMA_FROM_DEVICE);
750203c4805SLuis R. Rodriguez 
751b5c80475SFelix Fietkau 	return bf;
752b5c80475SFelix Fietkau }
753b5c80475SFelix Fietkau 
754d435700fSSujith /* Assumes you've already done the endian to CPU conversion */
755d435700fSSujith static bool ath9k_rx_accept(struct ath_common *common,
7569f167f64SVasanthakumar Thiagarajan 			    struct ieee80211_hdr *hdr,
757d435700fSSujith 			    struct ieee80211_rx_status *rxs,
758d435700fSSujith 			    struct ath_rx_status *rx_stats,
759d435700fSSujith 			    bool *decrypt_error)
760d435700fSSujith {
761ec205999SFelix Fietkau 	struct ath_softc *sc = (struct ath_softc *) common->priv;
76266760eacSFelix Fietkau 	bool is_mc, is_valid_tkip, strip_mic, mic_error;
763d435700fSSujith 	struct ath_hw *ah = common->ah;
764d435700fSSujith 	__le16 fc;
765b7b1b512SVasanthakumar Thiagarajan 	u8 rx_status_len = ah->caps.rx_status_len;
766d435700fSSujith 
767d435700fSSujith 	fc = hdr->frame_control;
768d435700fSSujith 
76966760eacSFelix Fietkau 	is_mc = !!is_multicast_ether_addr(hdr->addr1);
77066760eacSFelix Fietkau 	is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID &&
77166760eacSFelix Fietkau 		test_bit(rx_stats->rs_keyix, common->tkip_keymap);
772152e585dSBill Jordan 	strip_mic = is_valid_tkip && ieee80211_is_data(fc) &&
7732a5783b8SMichael Liang 		ieee80211_has_protected(fc) &&
774152e585dSBill Jordan 		!(rx_stats->rs_status &
775846d9363SFelix Fietkau 		(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC |
776846d9363SFelix Fietkau 		 ATH9K_RXERR_KEYMISS));
77766760eacSFelix Fietkau 
778f88373faSFelix Fietkau 	/*
779f88373faSFelix Fietkau 	 * Key miss events are only relevant for pairwise keys where the
780f88373faSFelix Fietkau 	 * descriptor does contain a valid key index. This has been observed
781f88373faSFelix Fietkau 	 * mostly with CCMP encryption.
782f88373faSFelix Fietkau 	 */
783bed3d9c0SFelix Fietkau 	if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID ||
784bed3d9c0SFelix Fietkau 	    !test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
785f88373faSFelix Fietkau 		rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
786f88373faSFelix Fietkau 
78715072189SBen Greear 	if (!rx_stats->rs_datalen) {
78815072189SBen Greear 		RX_STAT_INC(rx_len_err);
789d435700fSSujith 		return false;
79015072189SBen Greear 	}
79115072189SBen Greear 
792d435700fSSujith         /*
793d435700fSSujith          * rs_status follows rs_datalen so if rs_datalen is too large
794d435700fSSujith          * we can take a hint that hardware corrupted it, so ignore
795d435700fSSujith          * those frames.
796d435700fSSujith          */
79715072189SBen Greear 	if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) {
79815072189SBen Greear 		RX_STAT_INC(rx_len_err);
799d435700fSSujith 		return false;
80015072189SBen Greear 	}
801d435700fSSujith 
8020d95521eSFelix Fietkau 	/* Only use error bits from the last fragment */
803d435700fSSujith 	if (rx_stats->rs_more)
8040d95521eSFelix Fietkau 		return true;
805d435700fSSujith 
80666760eacSFelix Fietkau 	mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) &&
80766760eacSFelix Fietkau 		!ieee80211_has_morefrags(fc) &&
80866760eacSFelix Fietkau 		!(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
80966760eacSFelix Fietkau 		(rx_stats->rs_status & ATH9K_RXERR_MIC);
81066760eacSFelix Fietkau 
811d435700fSSujith 	/*
812d435700fSSujith 	 * The rx_stats->rs_status will not be set until the end of the
813d435700fSSujith 	 * chained descriptors so it can be ignored if rs_more is set. The
814d435700fSSujith 	 * rs_more will be false at the last element of the chained
815d435700fSSujith 	 * descriptors.
816d435700fSSujith 	 */
817d435700fSSujith 	if (rx_stats->rs_status != 0) {
818846d9363SFelix Fietkau 		u8 status_mask;
819846d9363SFelix Fietkau 
82066760eacSFelix Fietkau 		if (rx_stats->rs_status & ATH9K_RXERR_CRC) {
821d435700fSSujith 			rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
82266760eacSFelix Fietkau 			mic_error = false;
82366760eacSFelix Fietkau 		}
824d435700fSSujith 		if (rx_stats->rs_status & ATH9K_RXERR_PHY)
825d435700fSSujith 			return false;
826d435700fSSujith 
827846d9363SFelix Fietkau 		if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) ||
828846d9363SFelix Fietkau 		    (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) {
829d435700fSSujith 			*decrypt_error = true;
83066760eacSFelix Fietkau 			mic_error = false;
831d435700fSSujith 		}
83266760eacSFelix Fietkau 
833d435700fSSujith 		/*
834d435700fSSujith 		 * Reject error frames with the exception of
835d435700fSSujith 		 * decryption and MIC failures. For monitor mode,
836d435700fSSujith 		 * we also ignore the CRC error.
837d435700fSSujith 		 */
838846d9363SFelix Fietkau 		status_mask = ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
839846d9363SFelix Fietkau 			      ATH9K_RXERR_KEYMISS;
840846d9363SFelix Fietkau 
841ec205999SFelix Fietkau 		if (ah->is_monitoring && (sc->rx.rxfilter & FIF_FCSFAIL))
842846d9363SFelix Fietkau 			status_mask |= ATH9K_RXERR_CRC;
843846d9363SFelix Fietkau 
844846d9363SFelix Fietkau 		if (rx_stats->rs_status & ~status_mask)
845d435700fSSujith 			return false;
846d435700fSSujith 	}
84766760eacSFelix Fietkau 
84866760eacSFelix Fietkau 	/*
84966760eacSFelix Fietkau 	 * For unicast frames the MIC error bit can have false positives,
85066760eacSFelix Fietkau 	 * so all MIC error reports need to be validated in software.
85166760eacSFelix Fietkau 	 * False negatives are not common, so skip software verification
85266760eacSFelix Fietkau 	 * if the hardware considers the MIC valid.
85366760eacSFelix Fietkau 	 */
85466760eacSFelix Fietkau 	if (strip_mic)
85566760eacSFelix Fietkau 		rxs->flag |= RX_FLAG_MMIC_STRIPPED;
85666760eacSFelix Fietkau 	else if (is_mc && mic_error)
85766760eacSFelix Fietkau 		rxs->flag |= RX_FLAG_MMIC_ERROR;
85866760eacSFelix Fietkau 
859d435700fSSujith 	return true;
860d435700fSSujith }
861d435700fSSujith 
862d435700fSSujith static int ath9k_process_rate(struct ath_common *common,
863d435700fSSujith 			      struct ieee80211_hw *hw,
864d435700fSSujith 			      struct ath_rx_status *rx_stats,
8659f167f64SVasanthakumar Thiagarajan 			      struct ieee80211_rx_status *rxs)
866d435700fSSujith {
867d435700fSSujith 	struct ieee80211_supported_band *sband;
868d435700fSSujith 	enum ieee80211_band band;
869d435700fSSujith 	unsigned int i = 0;
870990e08a0SBen Greear 	struct ath_softc __maybe_unused *sc = common->priv;
871d435700fSSujith 
872d435700fSSujith 	band = hw->conf.channel->band;
873d435700fSSujith 	sband = hw->wiphy->bands[band];
874d435700fSSujith 
875d435700fSSujith 	if (rx_stats->rs_rate & 0x80) {
876d435700fSSujith 		/* HT rate */
877d435700fSSujith 		rxs->flag |= RX_FLAG_HT;
878d435700fSSujith 		if (rx_stats->rs_flags & ATH9K_RX_2040)
879d435700fSSujith 			rxs->flag |= RX_FLAG_40MHZ;
880d435700fSSujith 		if (rx_stats->rs_flags & ATH9K_RX_GI)
881d435700fSSujith 			rxs->flag |= RX_FLAG_SHORT_GI;
882d435700fSSujith 		rxs->rate_idx = rx_stats->rs_rate & 0x7f;
883d435700fSSujith 		return 0;
884d435700fSSujith 	}
885d435700fSSujith 
886d435700fSSujith 	for (i = 0; i < sband->n_bitrates; i++) {
887d435700fSSujith 		if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
888d435700fSSujith 			rxs->rate_idx = i;
889d435700fSSujith 			return 0;
890d435700fSSujith 		}
891d435700fSSujith 		if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
892d435700fSSujith 			rxs->flag |= RX_FLAG_SHORTPRE;
893d435700fSSujith 			rxs->rate_idx = i;
894d435700fSSujith 			return 0;
895d435700fSSujith 		}
896d435700fSSujith 	}
897d435700fSSujith 
898d435700fSSujith 	/*
899d435700fSSujith 	 * No valid hardware bitrate found -- we should not get here
900d435700fSSujith 	 * because hardware has already validated this frame as OK.
901d435700fSSujith 	 */
902d2182b69SJoe Perches 	ath_dbg(common, ANY,
903226afe68SJoe Perches 		"unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
904226afe68SJoe Perches 		rx_stats->rs_rate);
90515072189SBen Greear 	RX_STAT_INC(rx_rate_err);
906d435700fSSujith 	return -EINVAL;
907d435700fSSujith }
908d435700fSSujith 
909d435700fSSujith static void ath9k_process_rssi(struct ath_common *common,
910d435700fSSujith 			       struct ieee80211_hw *hw,
9119f167f64SVasanthakumar Thiagarajan 			       struct ieee80211_hdr *hdr,
912d435700fSSujith 			       struct ath_rx_status *rx_stats)
913d435700fSSujith {
9149ac58615SFelix Fietkau 	struct ath_softc *sc = hw->priv;
915d435700fSSujith 	struct ath_hw *ah = common->ah;
9169fa23e17SFelix Fietkau 	int last_rssi;
9172ef16755SFelix Fietkau 	int rssi = rx_stats->rs_rssi;
918d435700fSSujith 
919cf3af748SRajkumar Manoharan 	if (!rx_stats->is_mybeacon ||
920cf3af748SRajkumar Manoharan 	    ((ah->opmode != NL80211_IFTYPE_STATION) &&
921cf3af748SRajkumar Manoharan 	     (ah->opmode != NL80211_IFTYPE_ADHOC)))
9229fa23e17SFelix Fietkau 		return;
9239fa23e17SFelix Fietkau 
9249fa23e17SFelix Fietkau 	if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr)
9259ac58615SFelix Fietkau 		ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
926686b9cb9SBen Greear 
9279ac58615SFelix Fietkau 	last_rssi = sc->last_rssi;
928d435700fSSujith 	if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
9292ef16755SFelix Fietkau 		rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
9302ef16755SFelix Fietkau 	if (rssi < 0)
9312ef16755SFelix Fietkau 		rssi = 0;
932d435700fSSujith 
933d435700fSSujith 	/* Update Beacon RSSI, this is used by ANI. */
9342ef16755SFelix Fietkau 	ah->stats.avgbrssi = rssi;
935d435700fSSujith }
936d435700fSSujith 
937d435700fSSujith /*
938d435700fSSujith  * For Decrypt or Demic errors, we only mark packet status here and always push
939d435700fSSujith  * up the frame up to let mac80211 handle the actual error case, be it no
940d435700fSSujith  * decryption key or real decryption error. This let us keep statistics there.
941d435700fSSujith  */
942d435700fSSujith static int ath9k_rx_skb_preprocess(struct ath_common *common,
943d435700fSSujith 				   struct ieee80211_hw *hw,
9449f167f64SVasanthakumar Thiagarajan 				   struct ieee80211_hdr *hdr,
945d435700fSSujith 				   struct ath_rx_status *rx_stats,
946d435700fSSujith 				   struct ieee80211_rx_status *rx_status,
947d435700fSSujith 				   bool *decrypt_error)
948d435700fSSujith {
949f749b946SFelix Fietkau 	struct ath_hw *ah = common->ah;
950f749b946SFelix Fietkau 
951d435700fSSujith 	/*
952d435700fSSujith 	 * everything but the rate is checked here, the rate check is done
953d435700fSSujith 	 * separately to avoid doing two lookups for a rate for each frame.
954d435700fSSujith 	 */
9559f167f64SVasanthakumar Thiagarajan 	if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
956d435700fSSujith 		return -EINVAL;
957d435700fSSujith 
9580d95521eSFelix Fietkau 	/* Only use status info from the last fragment */
9590d95521eSFelix Fietkau 	if (rx_stats->rs_more)
9600d95521eSFelix Fietkau 		return 0;
9610d95521eSFelix Fietkau 
9629f167f64SVasanthakumar Thiagarajan 	ath9k_process_rssi(common, hw, hdr, rx_stats);
963d435700fSSujith 
9649f167f64SVasanthakumar Thiagarajan 	if (ath9k_process_rate(common, hw, rx_stats, rx_status))
965d435700fSSujith 		return -EINVAL;
966d435700fSSujith 
967d435700fSSujith 	rx_status->band = hw->conf.channel->band;
968d435700fSSujith 	rx_status->freq = hw->conf.channel->center_freq;
969f749b946SFelix Fietkau 	rx_status->signal = ah->noise + rx_stats->rs_rssi;
970d435700fSSujith 	rx_status->antenna = rx_stats->rs_antenna;
97196d21371SThomas Pedersen 	rx_status->flag |= RX_FLAG_MACTIME_END;
9722ef16755SFelix Fietkau 	if (rx_stats->rs_moreaggr)
9732ef16755SFelix Fietkau 		rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
974d435700fSSujith 
975d435700fSSujith 	return 0;
976d435700fSSujith }
977d435700fSSujith 
978d435700fSSujith static void ath9k_rx_skb_postprocess(struct ath_common *common,
979d435700fSSujith 				     struct sk_buff *skb,
980d435700fSSujith 				     struct ath_rx_status *rx_stats,
981d435700fSSujith 				     struct ieee80211_rx_status *rxs,
982d435700fSSujith 				     bool decrypt_error)
983d435700fSSujith {
984d435700fSSujith 	struct ath_hw *ah = common->ah;
985d435700fSSujith 	struct ieee80211_hdr *hdr;
986d435700fSSujith 	int hdrlen, padpos, padsize;
987d435700fSSujith 	u8 keyix;
988d435700fSSujith 	__le16 fc;
989d435700fSSujith 
990d435700fSSujith 	/* see if any padding is done by the hw and remove it */
991d435700fSSujith 	hdr = (struct ieee80211_hdr *) skb->data;
992d435700fSSujith 	hdrlen = ieee80211_get_hdrlen_from_skb(skb);
993d435700fSSujith 	fc = hdr->frame_control;
994d435700fSSujith 	padpos = ath9k_cmn_padpos(hdr->frame_control);
995d435700fSSujith 
996d435700fSSujith 	/* The MAC header is padded to have 32-bit boundary if the
997d435700fSSujith 	 * packet payload is non-zero. The general calculation for
998d435700fSSujith 	 * padsize would take into account odd header lengths:
999d435700fSSujith 	 * padsize = (4 - padpos % 4) % 4; However, since only
1000d435700fSSujith 	 * even-length headers are used, padding can only be 0 or 2
1001d435700fSSujith 	 * bytes and we can optimize this a bit. In addition, we must
1002d435700fSSujith 	 * not try to remove padding from short control frames that do
1003d435700fSSujith 	 * not have payload. */
1004d435700fSSujith 	padsize = padpos & 3;
1005d435700fSSujith 	if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
1006d435700fSSujith 		memmove(skb->data + padsize, skb->data, padpos);
1007d435700fSSujith 		skb_pull(skb, padsize);
1008d435700fSSujith 	}
1009d435700fSSujith 
1010d435700fSSujith 	keyix = rx_stats->rs_keyix;
1011d435700fSSujith 
1012d435700fSSujith 	if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
1013d435700fSSujith 	    ieee80211_has_protected(fc)) {
1014d435700fSSujith 		rxs->flag |= RX_FLAG_DECRYPTED;
1015d435700fSSujith 	} else if (ieee80211_has_protected(fc)
1016d435700fSSujith 		   && !decrypt_error && skb->len >= hdrlen + 4) {
1017d435700fSSujith 		keyix = skb->data[hdrlen + 3] >> 6;
1018d435700fSSujith 
1019d435700fSSujith 		if (test_bit(keyix, common->keymap))
1020d435700fSSujith 			rxs->flag |= RX_FLAG_DECRYPTED;
1021d435700fSSujith 	}
1022d435700fSSujith 	if (ah->sw_mgmt_crypto &&
1023d435700fSSujith 	    (rxs->flag & RX_FLAG_DECRYPTED) &&
1024d435700fSSujith 	    ieee80211_is_mgmt(fc))
1025d435700fSSujith 		/* Use software decrypt for management frames. */
1026d435700fSSujith 		rxs->flag &= ~RX_FLAG_DECRYPTED;
1027d435700fSSujith }
1028b5c80475SFelix Fietkau 
1029*e93d083fSSimon Wunderlich static s8 fix_rssi_inv_only(u8 rssi_val)
1030*e93d083fSSimon Wunderlich {
1031*e93d083fSSimon Wunderlich 	if (rssi_val == 128)
1032*e93d083fSSimon Wunderlich 		rssi_val = 0;
1033*e93d083fSSimon Wunderlich 	return (s8) rssi_val;
1034*e93d083fSSimon Wunderlich }
1035*e93d083fSSimon Wunderlich 
1036*e93d083fSSimon Wunderlich 
1037*e93d083fSSimon Wunderlich static void ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
1038*e93d083fSSimon Wunderlich 			    struct ath_rx_status *rs, u64 tsf)
1039*e93d083fSSimon Wunderlich {
1040*e93d083fSSimon Wunderlich #ifdef CONFIG_ATH_DEBUG
1041*e93d083fSSimon Wunderlich 	struct ath_hw *ah = sc->sc_ah;
1042*e93d083fSSimon Wunderlich 	u8 bins[SPECTRAL_HT20_NUM_BINS];
1043*e93d083fSSimon Wunderlich 	u8 *vdata = (u8 *)hdr;
1044*e93d083fSSimon Wunderlich 	struct fft_sample_ht20 fft_sample;
1045*e93d083fSSimon Wunderlich 	struct ath_radar_info *radar_info;
1046*e93d083fSSimon Wunderlich 	struct ath_ht20_mag_info *mag_info;
1047*e93d083fSSimon Wunderlich 	int len = rs->rs_datalen;
1048*e93d083fSSimon Wunderlich 	int i, dc_pos;
1049*e93d083fSSimon Wunderlich 
1050*e93d083fSSimon Wunderlich 	/* AR9280 and before report via ATH9K_PHYERR_RADAR, AR93xx and newer
1051*e93d083fSSimon Wunderlich 	 * via ATH9K_PHYERR_SPECTRAL. Haven't seen ATH9K_PHYERR_FALSE_RADAR_EXT
1052*e93d083fSSimon Wunderlich 	 * yet, but this is supposed to be possible as well.
1053*e93d083fSSimon Wunderlich 	 */
1054*e93d083fSSimon Wunderlich 	if (rs->rs_phyerr != ATH9K_PHYERR_RADAR &&
1055*e93d083fSSimon Wunderlich 	    rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT &&
1056*e93d083fSSimon Wunderlich 	    rs->rs_phyerr != ATH9K_PHYERR_SPECTRAL)
1057*e93d083fSSimon Wunderlich 		return;
1058*e93d083fSSimon Wunderlich 
1059*e93d083fSSimon Wunderlich 	/* Variation in the data length is possible and will be fixed later.
1060*e93d083fSSimon Wunderlich 	 * Note that we only support HT20 for now.
1061*e93d083fSSimon Wunderlich 	 *
1062*e93d083fSSimon Wunderlich 	 * TODO: add HT20_40 support as well.
1063*e93d083fSSimon Wunderlich 	 */
1064*e93d083fSSimon Wunderlich 	if ((len > SPECTRAL_HT20_TOTAL_DATA_LEN + 2) ||
1065*e93d083fSSimon Wunderlich 	    (len < SPECTRAL_HT20_TOTAL_DATA_LEN - 1))
1066*e93d083fSSimon Wunderlich 		return;
1067*e93d083fSSimon Wunderlich 
1068*e93d083fSSimon Wunderlich 	/* check if spectral scan bit is set. This does not have to be checked
1069*e93d083fSSimon Wunderlich 	 * if received through a SPECTRAL phy error, but shouldn't hurt.
1070*e93d083fSSimon Wunderlich 	 */
1071*e93d083fSSimon Wunderlich 	radar_info = ((struct ath_radar_info *)&vdata[len]) - 1;
1072*e93d083fSSimon Wunderlich 	if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
1073*e93d083fSSimon Wunderlich 		return;
1074*e93d083fSSimon Wunderlich 
1075*e93d083fSSimon Wunderlich 	fft_sample.tlv.type = ATH_FFT_SAMPLE_HT20;
1076*e93d083fSSimon Wunderlich 	fft_sample.tlv.length = sizeof(fft_sample) - sizeof(fft_sample.tlv);
1077*e93d083fSSimon Wunderlich 
1078*e93d083fSSimon Wunderlich 	fft_sample.freq = ah->curchan->chan->center_freq;
1079*e93d083fSSimon Wunderlich 	fft_sample.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
1080*e93d083fSSimon Wunderlich 	fft_sample.noise = ah->noise;
1081*e93d083fSSimon Wunderlich 
1082*e93d083fSSimon Wunderlich 	switch (len - SPECTRAL_HT20_TOTAL_DATA_LEN) {
1083*e93d083fSSimon Wunderlich 	case 0:
1084*e93d083fSSimon Wunderlich 		/* length correct, nothing to do. */
1085*e93d083fSSimon Wunderlich 		memcpy(bins, vdata, SPECTRAL_HT20_NUM_BINS);
1086*e93d083fSSimon Wunderlich 		break;
1087*e93d083fSSimon Wunderlich 	case -1:
1088*e93d083fSSimon Wunderlich 		/* first byte missing, duplicate it. */
1089*e93d083fSSimon Wunderlich 		memcpy(&bins[1], vdata, SPECTRAL_HT20_NUM_BINS - 1);
1090*e93d083fSSimon Wunderlich 		bins[0] = vdata[0];
1091*e93d083fSSimon Wunderlich 		break;
1092*e93d083fSSimon Wunderlich 	case 2:
1093*e93d083fSSimon Wunderlich 		/* MAC added 2 extra bytes at bin 30 and 32, remove them. */
1094*e93d083fSSimon Wunderlich 		memcpy(bins, vdata, 30);
1095*e93d083fSSimon Wunderlich 		bins[30] = vdata[31];
1096*e93d083fSSimon Wunderlich 		memcpy(&bins[31], &vdata[33], SPECTRAL_HT20_NUM_BINS - 31);
1097*e93d083fSSimon Wunderlich 		break;
1098*e93d083fSSimon Wunderlich 	case 1:
1099*e93d083fSSimon Wunderlich 		/* MAC added 2 extra bytes AND first byte is missing. */
1100*e93d083fSSimon Wunderlich 		bins[0] = vdata[0];
1101*e93d083fSSimon Wunderlich 		memcpy(&bins[0], vdata, 30);
1102*e93d083fSSimon Wunderlich 		bins[31] = vdata[31];
1103*e93d083fSSimon Wunderlich 		memcpy(&bins[32], &vdata[33], SPECTRAL_HT20_NUM_BINS - 32);
1104*e93d083fSSimon Wunderlich 		break;
1105*e93d083fSSimon Wunderlich 	default:
1106*e93d083fSSimon Wunderlich 		return;
1107*e93d083fSSimon Wunderlich 	}
1108*e93d083fSSimon Wunderlich 
1109*e93d083fSSimon Wunderlich 	/* DC value (value in the middle) is the blind spot of the spectral
1110*e93d083fSSimon Wunderlich 	 * sample and invalid, interpolate it.
1111*e93d083fSSimon Wunderlich 	 */
1112*e93d083fSSimon Wunderlich 	dc_pos = SPECTRAL_HT20_NUM_BINS / 2;
1113*e93d083fSSimon Wunderlich 	bins[dc_pos] = (bins[dc_pos + 1] + bins[dc_pos - 1]) / 2;
1114*e93d083fSSimon Wunderlich 
1115*e93d083fSSimon Wunderlich 	/* mag data is at the end of the frame, in front of radar_info */
1116*e93d083fSSimon Wunderlich 	mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1;
1117*e93d083fSSimon Wunderlich 
1118*e93d083fSSimon Wunderlich 	/* Apply exponent and grab further auxiliary information. */
1119*e93d083fSSimon Wunderlich 	for (i = 0; i < SPECTRAL_HT20_NUM_BINS; i++)
1120*e93d083fSSimon Wunderlich 		fft_sample.data[i] = bins[i] << mag_info->max_exp;
1121*e93d083fSSimon Wunderlich 
1122*e93d083fSSimon Wunderlich 	fft_sample.max_magnitude = spectral_max_magnitude(mag_info->all_bins);
1123*e93d083fSSimon Wunderlich 	fft_sample.max_index = spectral_max_index(mag_info->all_bins);
1124*e93d083fSSimon Wunderlich 	fft_sample.bitmap_weight = spectral_bitmap_weight(mag_info->all_bins);
1125*e93d083fSSimon Wunderlich 	fft_sample.tsf = tsf;
1126*e93d083fSSimon Wunderlich 
1127*e93d083fSSimon Wunderlich 	ath_debug_send_fft_sample(sc, &fft_sample.tlv);
1128*e93d083fSSimon Wunderlich #endif
1129*e93d083fSSimon Wunderlich }
1130*e93d083fSSimon Wunderlich 
1131b5c80475SFelix Fietkau int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1132b5c80475SFelix Fietkau {
1133b5c80475SFelix Fietkau 	struct ath_buf *bf;
11340d95521eSFelix Fietkau 	struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb;
1135b5c80475SFelix Fietkau 	struct ieee80211_rx_status *rxs;
1136b5c80475SFelix Fietkau 	struct ath_hw *ah = sc->sc_ah;
1137b5c80475SFelix Fietkau 	struct ath_common *common = ath9k_hw_common(ah);
11387545daf4SFelix Fietkau 	struct ieee80211_hw *hw = sc->hw;
1139b5c80475SFelix Fietkau 	struct ieee80211_hdr *hdr;
1140b5c80475SFelix Fietkau 	int retval;
1141b5c80475SFelix Fietkau 	struct ath_rx_status rs;
1142b5c80475SFelix Fietkau 	enum ath9k_rx_qtype qtype;
1143b5c80475SFelix Fietkau 	bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
1144b5c80475SFelix Fietkau 	int dma_type;
11455c6dd921SVasanthakumar Thiagarajan 	u8 rx_status_len = ah->caps.rx_status_len;
1146a6d2055bSFelix Fietkau 	u64 tsf = 0;
1147a6d2055bSFelix Fietkau 	u32 tsf_lower = 0;
11488ab2cd09SLuis R. Rodriguez 	unsigned long flags;
1149b5c80475SFelix Fietkau 
1150b5c80475SFelix Fietkau 	if (edma)
1151b5c80475SFelix Fietkau 		dma_type = DMA_BIDIRECTIONAL;
115256824223SMing Lei 	else
115356824223SMing Lei 		dma_type = DMA_FROM_DEVICE;
1154b5c80475SFelix Fietkau 
1155b5c80475SFelix Fietkau 	qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
1156b5c80475SFelix Fietkau 	spin_lock_bh(&sc->rx.rxbuflock);
1157b5c80475SFelix Fietkau 
1158a6d2055bSFelix Fietkau 	tsf = ath9k_hw_gettsf64(ah);
1159a6d2055bSFelix Fietkau 	tsf_lower = tsf & 0xffffffff;
1160a6d2055bSFelix Fietkau 
1161b5c80475SFelix Fietkau 	do {
1162e1352fdeSLorenzo Bianconi 		bool decrypt_error = false;
1163b5c80475SFelix Fietkau 		/* If handling rx interrupt and flush is in progress => exit */
1164781b14a3SSujith Manoharan 		if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags) && (flush == 0))
1165b5c80475SFelix Fietkau 			break;
1166b5c80475SFelix Fietkau 
1167b5c80475SFelix Fietkau 		memset(&rs, 0, sizeof(rs));
1168b5c80475SFelix Fietkau 		if (edma)
1169b5c80475SFelix Fietkau 			bf = ath_edma_get_next_rx_buf(sc, &rs, qtype);
1170b5c80475SFelix Fietkau 		else
1171b5c80475SFelix Fietkau 			bf = ath_get_next_rx_buf(sc, &rs);
1172b5c80475SFelix Fietkau 
1173b5c80475SFelix Fietkau 		if (!bf)
1174b5c80475SFelix Fietkau 			break;
1175b5c80475SFelix Fietkau 
1176b5c80475SFelix Fietkau 		skb = bf->bf_mpdu;
1177b5c80475SFelix Fietkau 		if (!skb)
1178b5c80475SFelix Fietkau 			continue;
1179b5c80475SFelix Fietkau 
11800d95521eSFelix Fietkau 		/*
11810d95521eSFelix Fietkau 		 * Take frame header from the first fragment and RX status from
11820d95521eSFelix Fietkau 		 * the last one.
11830d95521eSFelix Fietkau 		 */
11840d95521eSFelix Fietkau 		if (sc->rx.frag)
11850d95521eSFelix Fietkau 			hdr_skb = sc->rx.frag;
11860d95521eSFelix Fietkau 		else
11870d95521eSFelix Fietkau 			hdr_skb = skb;
11880d95521eSFelix Fietkau 
11890d95521eSFelix Fietkau 		hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
11900d95521eSFelix Fietkau 		rxs = IEEE80211_SKB_RXCB(hdr_skb);
119115072189SBen Greear 		if (ieee80211_is_beacon(hdr->frame_control)) {
119215072189SBen Greear 			RX_STAT_INC(rx_beacons);
119315072189SBen Greear 			if (!is_zero_ether_addr(common->curbssid) &&
11942e42e474SJoe Perches 			    ether_addr_equal(hdr->addr3, common->curbssid))
1195cf3af748SRajkumar Manoharan 				rs.is_mybeacon = true;
1196cf3af748SRajkumar Manoharan 			else
1197cf3af748SRajkumar Manoharan 				rs.is_mybeacon = false;
119815072189SBen Greear 		}
119915072189SBen Greear 		else
120015072189SBen Greear 			rs.is_mybeacon = false;
12015ca42627SLuis R. Rodriguez 
1202be41b052SMohammed Shafi Shajakhan 		if (ieee80211_is_data_present(hdr->frame_control) &&
1203be41b052SMohammed Shafi Shajakhan 		    !ieee80211_is_qos_nullfunc(hdr->frame_control))
12046995fb80SRajkumar Manoharan 			sc->rx.num_pkts++;
1205be41b052SMohammed Shafi Shajakhan 
120629bffa96SFelix Fietkau 		ath_debug_stat_rx(sc, &rs);
12071395d3f0SSujith 
1208203c4805SLuis R. Rodriguez 		/*
1209203c4805SLuis R. Rodriguez 		 * If we're asked to flush receive queue, directly
1210203c4805SLuis R. Rodriguez 		 * chain it back at the queue without processing it.
1211203c4805SLuis R. Rodriguez 		 */
1212781b14a3SSujith Manoharan 		if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags)) {
121315072189SBen Greear 			RX_STAT_INC(rx_drop_rxflush);
12140d95521eSFelix Fietkau 			goto requeue_drop_frag;
121515072189SBen Greear 		}
1216203c4805SLuis R. Rodriguez 
1217ffb1c56aSAshok Nagarajan 		memset(rxs, 0, sizeof(struct ieee80211_rx_status));
1218ffb1c56aSAshok Nagarajan 
1219a6d2055bSFelix Fietkau 		rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
1220a6d2055bSFelix Fietkau 		if (rs.rs_tstamp > tsf_lower &&
1221a6d2055bSFelix Fietkau 		    unlikely(rs.rs_tstamp - tsf_lower > 0x10000000))
1222a6d2055bSFelix Fietkau 			rxs->mactime -= 0x100000000ULL;
1223a6d2055bSFelix Fietkau 
1224a6d2055bSFelix Fietkau 		if (rs.rs_tstamp < tsf_lower &&
1225a6d2055bSFelix Fietkau 		    unlikely(tsf_lower - rs.rs_tstamp > 0x10000000))
1226a6d2055bSFelix Fietkau 			rxs->mactime += 0x100000000ULL;
1227a6d2055bSFelix Fietkau 
1228*e93d083fSSimon Wunderlich 		if ((rs.rs_status & ATH9K_RXERR_PHY))
1229*e93d083fSSimon Wunderlich 			ath_process_fft(sc, hdr, &rs, rxs->mactime);
1230*e93d083fSSimon Wunderlich 
123183c76570SZefir Kurtisi 		retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
123283c76570SZefir Kurtisi 						 rxs, &decrypt_error);
123383c76570SZefir Kurtisi 		if (retval)
123483c76570SZefir Kurtisi 			goto requeue_drop_frag;
123583c76570SZefir Kurtisi 
123601e18918SRajkumar Manoharan 		if (rs.is_mybeacon) {
123701e18918SRajkumar Manoharan 			sc->hw_busy_count = 0;
123801e18918SRajkumar Manoharan 			ath_start_rx_poll(sc, 3);
123901e18918SRajkumar Manoharan 		}
1240203c4805SLuis R. Rodriguez 		/* Ensure we always have an skb to requeue once we are done
1241203c4805SLuis R. Rodriguez 		 * processing the current buffer's skb */
1242cc861f74SLuis R. Rodriguez 		requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
1243203c4805SLuis R. Rodriguez 
1244203c4805SLuis R. Rodriguez 		/* If there is no memory we ignore the current RX'd frame,
1245203c4805SLuis R. Rodriguez 		 * tell hardware it can give us a new frame using the old
1246203c4805SLuis R. Rodriguez 		 * skb and put it at the tail of the sc->rx.rxbuf list for
1247203c4805SLuis R. Rodriguez 		 * processing. */
124815072189SBen Greear 		if (!requeue_skb) {
124915072189SBen Greear 			RX_STAT_INC(rx_oom_err);
12500d95521eSFelix Fietkau 			goto requeue_drop_frag;
125115072189SBen Greear 		}
1252203c4805SLuis R. Rodriguez 
1253203c4805SLuis R. Rodriguez 		/* Unmap the frame */
1254203c4805SLuis R. Rodriguez 		dma_unmap_single(sc->dev, bf->bf_buf_addr,
1255cc861f74SLuis R. Rodriguez 				 common->rx_bufsize,
1256b5c80475SFelix Fietkau 				 dma_type);
1257203c4805SLuis R. Rodriguez 
1258b5c80475SFelix Fietkau 		skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len);
1259b5c80475SFelix Fietkau 		if (ah->caps.rx_status_len)
1260b5c80475SFelix Fietkau 			skb_pull(skb, ah->caps.rx_status_len);
1261203c4805SLuis R. Rodriguez 
12620d95521eSFelix Fietkau 		if (!rs.rs_more)
12630d95521eSFelix Fietkau 			ath9k_rx_skb_postprocess(common, hdr_skb, &rs,
1264c9b14170SLuis R. Rodriguez 						 rxs, decrypt_error);
1265203c4805SLuis R. Rodriguez 
1266203c4805SLuis R. Rodriguez 		/* We will now give hardware our shiny new allocated skb */
1267203c4805SLuis R. Rodriguez 		bf->bf_mpdu = requeue_skb;
1268203c4805SLuis R. Rodriguez 		bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
1269cc861f74SLuis R. Rodriguez 						 common->rx_bufsize,
1270b5c80475SFelix Fietkau 						 dma_type);
1271203c4805SLuis R. Rodriguez 		if (unlikely(dma_mapping_error(sc->dev,
1272203c4805SLuis R. Rodriguez 			  bf->bf_buf_addr))) {
1273203c4805SLuis R. Rodriguez 			dev_kfree_skb_any(requeue_skb);
1274203c4805SLuis R. Rodriguez 			bf->bf_mpdu = NULL;
12756cf9e995SBen Greear 			bf->bf_buf_addr = 0;
12763800276aSJoe Perches 			ath_err(common, "dma_mapping_error() on RX\n");
12777545daf4SFelix Fietkau 			ieee80211_rx(hw, skb);
1278203c4805SLuis R. Rodriguez 			break;
1279203c4805SLuis R. Rodriguez 		}
1280203c4805SLuis R. Rodriguez 
12810d95521eSFelix Fietkau 		if (rs.rs_more) {
128215072189SBen Greear 			RX_STAT_INC(rx_frags);
12830d95521eSFelix Fietkau 			/*
12840d95521eSFelix Fietkau 			 * rs_more indicates chained descriptors which can be
12850d95521eSFelix Fietkau 			 * used to link buffers together for a sort of
12860d95521eSFelix Fietkau 			 * scatter-gather operation.
12870d95521eSFelix Fietkau 			 */
12880d95521eSFelix Fietkau 			if (sc->rx.frag) {
12890d95521eSFelix Fietkau 				/* too many fragments - cannot handle frame */
12900d95521eSFelix Fietkau 				dev_kfree_skb_any(sc->rx.frag);
12910d95521eSFelix Fietkau 				dev_kfree_skb_any(skb);
129215072189SBen Greear 				RX_STAT_INC(rx_too_many_frags_err);
12930d95521eSFelix Fietkau 				skb = NULL;
12940d95521eSFelix Fietkau 			}
12950d95521eSFelix Fietkau 			sc->rx.frag = skb;
12960d95521eSFelix Fietkau 			goto requeue;
12970d95521eSFelix Fietkau 		}
12980d95521eSFelix Fietkau 
12990d95521eSFelix Fietkau 		if (sc->rx.frag) {
13000d95521eSFelix Fietkau 			int space = skb->len - skb_tailroom(hdr_skb);
13010d95521eSFelix Fietkau 
13020d95521eSFelix Fietkau 			if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) {
13030d95521eSFelix Fietkau 				dev_kfree_skb(skb);
130415072189SBen Greear 				RX_STAT_INC(rx_oom_err);
13050d95521eSFelix Fietkau 				goto requeue_drop_frag;
13060d95521eSFelix Fietkau 			}
13070d95521eSFelix Fietkau 
1308b5447ff9SEric Dumazet 			sc->rx.frag = NULL;
1309b5447ff9SEric Dumazet 
13100d95521eSFelix Fietkau 			skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len),
13110d95521eSFelix Fietkau 						  skb->len);
13120d95521eSFelix Fietkau 			dev_kfree_skb_any(skb);
13130d95521eSFelix Fietkau 			skb = hdr_skb;
13140d95521eSFelix Fietkau 		}
13150d95521eSFelix Fietkau 
1316eb840a80SMohammed Shafi Shajakhan 
1317eb840a80SMohammed Shafi Shajakhan 		if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
1318eb840a80SMohammed Shafi Shajakhan 
1319203c4805SLuis R. Rodriguez 			/*
1320eb840a80SMohammed Shafi Shajakhan 			 * change the default rx antenna if rx diversity
1321eb840a80SMohammed Shafi Shajakhan 			 * chooses the other antenna 3 times in a row.
1322203c4805SLuis R. Rodriguez 			 */
132329bffa96SFelix Fietkau 			if (sc->rx.defant != rs.rs_antenna) {
1324203c4805SLuis R. Rodriguez 				if (++sc->rx.rxotherant >= 3)
132529bffa96SFelix Fietkau 					ath_setdefantenna(sc, rs.rs_antenna);
1326203c4805SLuis R. Rodriguez 			} else {
1327203c4805SLuis R. Rodriguez 				sc->rx.rxotherant = 0;
1328203c4805SLuis R. Rodriguez 			}
1329203c4805SLuis R. Rodriguez 
1330eb840a80SMohammed Shafi Shajakhan 		}
1331eb840a80SMohammed Shafi Shajakhan 
133266760eacSFelix Fietkau 		if (rxs->flag & RX_FLAG_MMIC_STRIPPED)
133366760eacSFelix Fietkau 			skb_trim(skb, skb->len - 8);
133466760eacSFelix Fietkau 
13358ab2cd09SLuis R. Rodriguez 		spin_lock_irqsave(&sc->sc_pm_lock, flags);
1336aaef24b4SMohammed Shafi Shajakhan 		if ((sc->ps_flags & (PS_WAIT_FOR_BEACON |
13371b04b930SSujith 				     PS_WAIT_FOR_CAB |
1338aaef24b4SMohammed Shafi Shajakhan 				     PS_WAIT_FOR_PSPOLL_DATA)) ||
1339cedc7e3dSMohammed Shafi Shajakhan 		    ath9k_check_auto_sleep(sc))
1340f73c604cSRajkumar Manoharan 			ath_rx_ps(sc, skb, rs.is_mybeacon);
13418ab2cd09SLuis R. Rodriguez 		spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
1342cc65965cSJouni Malinen 
134343c35284SFelix Fietkau 		if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx == 3)
1344102885a5SVasanthakumar Thiagarajan 			ath_ant_comb_scan(sc, &rs);
1345102885a5SVasanthakumar Thiagarajan 
13467545daf4SFelix Fietkau 		ieee80211_rx(hw, skb);
1347cc65965cSJouni Malinen 
13480d95521eSFelix Fietkau requeue_drop_frag:
13490d95521eSFelix Fietkau 		if (sc->rx.frag) {
13500d95521eSFelix Fietkau 			dev_kfree_skb_any(sc->rx.frag);
13510d95521eSFelix Fietkau 			sc->rx.frag = NULL;
13520d95521eSFelix Fietkau 		}
1353203c4805SLuis R. Rodriguez requeue:
1354b5c80475SFelix Fietkau 		if (edma) {
1355b5c80475SFelix Fietkau 			list_add_tail(&bf->list, &sc->rx.rxbuf);
1356b5c80475SFelix Fietkau 			ath_rx_edma_buf_link(sc, qtype);
1357b5c80475SFelix Fietkau 		} else {
1358203c4805SLuis R. Rodriguez 			list_move_tail(&bf->list, &sc->rx.rxbuf);
1359203c4805SLuis R. Rodriguez 			ath_rx_buf_link(sc, bf);
13603483288cSFelix Fietkau 			if (!flush)
136195294973SFelix Fietkau 				ath9k_hw_rxena(ah);
1362b5c80475SFelix Fietkau 		}
1363203c4805SLuis R. Rodriguez 	} while (1);
1364203c4805SLuis R. Rodriguez 
1365203c4805SLuis R. Rodriguez 	spin_unlock_bh(&sc->rx.rxbuflock);
1366203c4805SLuis R. Rodriguez 
136729ab0b36SRajkumar Manoharan 	if (!(ah->imask & ATH9K_INT_RXEOL)) {
136829ab0b36SRajkumar Manoharan 		ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
136972d874c6SFelix Fietkau 		ath9k_hw_set_interrupts(ah);
137029ab0b36SRajkumar Manoharan 	}
137129ab0b36SRajkumar Manoharan 
1372203c4805SLuis R. Rodriguez 	return 0;
1373203c4805SLuis R. Rodriguez }
1374