xref: /openbmc/linux/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c (revision df2634f43f5106947f3735a0b61a6527a4b278cd)
1 /*
2  * Copyright (c) 2010 Atheros Communications Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include "htc.h"
18 
19 /******/
20 /* TX */
21 /******/
22 
23 static const int subtype_txq_to_hwq[] = {
24 	[WME_AC_BE] = ATH_TXQ_AC_BE,
25 	[WME_AC_BK] = ATH_TXQ_AC_BK,
26 	[WME_AC_VI] = ATH_TXQ_AC_VI,
27 	[WME_AC_VO] = ATH_TXQ_AC_VO,
28 };
29 
30 #define ATH9K_HTC_INIT_TXQ(subtype) do {			\
31 		qi.tqi_subtype = subtype_txq_to_hwq[subtype];	\
32 		qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;		\
33 		qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;		\
34 		qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;		\
35 		qi.tqi_physCompBuf = 0;				\
36 		qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |	\
37 			TXQ_FLAG_TXDESCINT_ENABLE;		\
38 	} while (0)
39 
40 int get_hw_qnum(u16 queue, int *hwq_map)
41 {
42 	switch (queue) {
43 	case 0:
44 		return hwq_map[WME_AC_VO];
45 	case 1:
46 		return hwq_map[WME_AC_VI];
47 	case 2:
48 		return hwq_map[WME_AC_BE];
49 	case 3:
50 		return hwq_map[WME_AC_BK];
51 	default:
52 		return hwq_map[WME_AC_BE];
53 	}
54 }
55 
56 int ath_htc_txq_update(struct ath9k_htc_priv *priv, int qnum,
57 		       struct ath9k_tx_queue_info *qinfo)
58 {
59 	struct ath_hw *ah = priv->ah;
60 	int error = 0;
61 	struct ath9k_tx_queue_info qi;
62 
63 	ath9k_hw_get_txq_props(ah, qnum, &qi);
64 
65 	qi.tqi_aifs = qinfo->tqi_aifs;
66 	qi.tqi_cwmin = qinfo->tqi_cwmin / 2; /* XXX */
67 	qi.tqi_cwmax = qinfo->tqi_cwmax;
68 	qi.tqi_burstTime = qinfo->tqi_burstTime;
69 	qi.tqi_readyTime = qinfo->tqi_readyTime;
70 
71 	if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
72 		ath_err(ath9k_hw_common(ah),
73 			"Unable to update hardware queue %u!\n", qnum);
74 		error = -EIO;
75 	} else {
76 		ath9k_hw_resettxqueue(ah, qnum);
77 	}
78 
79 	return error;
80 }
81 
82 int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
83 {
84 	struct ieee80211_hdr *hdr;
85 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
86 	struct ieee80211_sta *sta = tx_info->control.sta;
87 	struct ath9k_htc_sta *ista;
88 	struct ath9k_htc_tx_ctl tx_ctl;
89 	enum htc_endpoint_id epid;
90 	u16 qnum;
91 	__le16 fc;
92 	u8 *tx_fhdr;
93 	u8 sta_idx, vif_idx;
94 
95 	hdr = (struct ieee80211_hdr *) skb->data;
96 	fc = hdr->frame_control;
97 
98 	if (tx_info->control.vif &&
99 			(struct ath9k_htc_vif *) tx_info->control.vif->drv_priv)
100 		vif_idx = ((struct ath9k_htc_vif *)
101 				tx_info->control.vif->drv_priv)->index;
102 	else
103 		vif_idx = priv->nvifs;
104 
105 	if (sta) {
106 		ista = (struct ath9k_htc_sta *) sta->drv_priv;
107 		sta_idx = ista->index;
108 	} else {
109 		sta_idx = 0;
110 	}
111 
112 	memset(&tx_ctl, 0, sizeof(struct ath9k_htc_tx_ctl));
113 
114 	if (ieee80211_is_data(fc)) {
115 		struct tx_frame_hdr tx_hdr;
116 		u32 flags = 0;
117 		u8 *qc;
118 
119 		memset(&tx_hdr, 0, sizeof(struct tx_frame_hdr));
120 
121 		tx_hdr.node_idx = sta_idx;
122 		tx_hdr.vif_idx = vif_idx;
123 
124 		if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
125 			tx_ctl.type = ATH9K_HTC_AMPDU;
126 			tx_hdr.data_type = ATH9K_HTC_AMPDU;
127 		} else {
128 			tx_ctl.type = ATH9K_HTC_NORMAL;
129 			tx_hdr.data_type = ATH9K_HTC_NORMAL;
130 		}
131 
132 		if (ieee80211_is_data_qos(fc)) {
133 			qc = ieee80211_get_qos_ctl(hdr);
134 			tx_hdr.tidno = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
135 		}
136 
137 		/* Check for RTS protection */
138 		if (priv->hw->wiphy->rts_threshold != (u32) -1)
139 			if (skb->len > priv->hw->wiphy->rts_threshold)
140 				flags |= ATH9K_HTC_TX_RTSCTS;
141 
142 		/* CTS-to-self */
143 		if (!(flags & ATH9K_HTC_TX_RTSCTS) &&
144 		    (priv->op_flags & OP_PROTECT_ENABLE))
145 			flags |= ATH9K_HTC_TX_CTSONLY;
146 
147 		tx_hdr.flags = cpu_to_be32(flags);
148 		tx_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb);
149 		if (tx_hdr.key_type == ATH9K_KEY_TYPE_CLEAR)
150 			tx_hdr.keyix = (u8) ATH9K_TXKEYIX_INVALID;
151 		else
152 			tx_hdr.keyix = tx_info->control.hw_key->hw_key_idx;
153 
154 		tx_fhdr = skb_push(skb, sizeof(tx_hdr));
155 		memcpy(tx_fhdr, (u8 *) &tx_hdr, sizeof(tx_hdr));
156 
157 		qnum = skb_get_queue_mapping(skb);
158 
159 		switch (qnum) {
160 		case 0:
161 			TX_QSTAT_INC(WME_AC_VO);
162 			epid = priv->data_vo_ep;
163 			break;
164 		case 1:
165 			TX_QSTAT_INC(WME_AC_VI);
166 			epid = priv->data_vi_ep;
167 			break;
168 		case 2:
169 			TX_QSTAT_INC(WME_AC_BE);
170 			epid = priv->data_be_ep;
171 			break;
172 		case 3:
173 		default:
174 			TX_QSTAT_INC(WME_AC_BK);
175 			epid = priv->data_bk_ep;
176 			break;
177 		}
178 	} else {
179 		struct tx_mgmt_hdr mgmt_hdr;
180 
181 		memset(&mgmt_hdr, 0, sizeof(struct tx_mgmt_hdr));
182 
183 		tx_ctl.type = ATH9K_HTC_NORMAL;
184 
185 		mgmt_hdr.node_idx = sta_idx;
186 		mgmt_hdr.vif_idx = vif_idx;
187 		mgmt_hdr.tidno = 0;
188 		mgmt_hdr.flags = 0;
189 
190 		mgmt_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb);
191 		if (mgmt_hdr.key_type == ATH9K_KEY_TYPE_CLEAR)
192 			mgmt_hdr.keyix = (u8) ATH9K_TXKEYIX_INVALID;
193 		else
194 			mgmt_hdr.keyix = tx_info->control.hw_key->hw_key_idx;
195 
196 		tx_fhdr = skb_push(skb, sizeof(mgmt_hdr));
197 		memcpy(tx_fhdr, (u8 *) &mgmt_hdr, sizeof(mgmt_hdr));
198 		epid = priv->mgmt_ep;
199 	}
200 
201 	return htc_send(priv->htc, skb, epid, &tx_ctl);
202 }
203 
204 static bool ath9k_htc_check_tx_aggr(struct ath9k_htc_priv *priv,
205 				    struct ath9k_htc_sta *ista, u8 tid)
206 {
207 	bool ret = false;
208 
209 	spin_lock_bh(&priv->tx_lock);
210 	if ((tid < ATH9K_HTC_MAX_TID) && (ista->tid_state[tid] == AGGR_STOP))
211 		ret = true;
212 	spin_unlock_bh(&priv->tx_lock);
213 
214 	return ret;
215 }
216 
217 void ath9k_tx_tasklet(unsigned long data)
218 {
219 	struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
220 	struct ieee80211_sta *sta;
221 	struct ieee80211_hdr *hdr;
222 	struct ieee80211_tx_info *tx_info;
223 	struct sk_buff *skb = NULL;
224 	__le16 fc;
225 
226 	while ((skb = skb_dequeue(&priv->tx_queue)) != NULL) {
227 
228 		hdr = (struct ieee80211_hdr *) skb->data;
229 		fc = hdr->frame_control;
230 		tx_info = IEEE80211_SKB_CB(skb);
231 
232 		memset(&tx_info->status, 0, sizeof(tx_info->status));
233 
234 		rcu_read_lock();
235 
236 		sta = ieee80211_find_sta(priv->vif, hdr->addr1);
237 		if (!sta) {
238 			rcu_read_unlock();
239 			ieee80211_tx_status(priv->hw, skb);
240 			continue;
241 		}
242 
243 		/* Check if we need to start aggregation */
244 
245 		if (sta && conf_is_ht(&priv->hw->conf) &&
246 		    !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
247 			if (ieee80211_is_data_qos(fc)) {
248 				u8 *qc, tid;
249 				struct ath9k_htc_sta *ista;
250 
251 				qc = ieee80211_get_qos_ctl(hdr);
252 				tid = qc[0] & 0xf;
253 				ista = (struct ath9k_htc_sta *)sta->drv_priv;
254 
255 				if (ath9k_htc_check_tx_aggr(priv, ista, tid)) {
256 					ieee80211_start_tx_ba_session(sta, tid, 0);
257 					spin_lock_bh(&priv->tx_lock);
258 					ista->tid_state[tid] = AGGR_PROGRESS;
259 					spin_unlock_bh(&priv->tx_lock);
260 				}
261 			}
262 		}
263 
264 		rcu_read_unlock();
265 
266 		/* Send status to mac80211 */
267 		ieee80211_tx_status(priv->hw, skb);
268 	}
269 
270 	/* Wake TX queues if needed */
271 	spin_lock_bh(&priv->tx_lock);
272 	if (priv->tx_queues_stop) {
273 		priv->tx_queues_stop = false;
274 		spin_unlock_bh(&priv->tx_lock);
275 		ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
276 			"Waking up TX queues\n");
277 		ieee80211_wake_queues(priv->hw);
278 		return;
279 	}
280 	spin_unlock_bh(&priv->tx_lock);
281 }
282 
283 void ath9k_htc_txep(void *drv_priv, struct sk_buff *skb,
284 		    enum htc_endpoint_id ep_id, bool txok)
285 {
286 	struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) drv_priv;
287 	struct ath_common *common = ath9k_hw_common(priv->ah);
288 	struct ieee80211_tx_info *tx_info;
289 
290 	if (!skb)
291 		return;
292 
293 	if (ep_id == priv->mgmt_ep) {
294 		skb_pull(skb, sizeof(struct tx_mgmt_hdr));
295 	} else if ((ep_id == priv->data_bk_ep) ||
296 		   (ep_id == priv->data_be_ep) ||
297 		   (ep_id == priv->data_vi_ep) ||
298 		   (ep_id == priv->data_vo_ep)) {
299 		skb_pull(skb, sizeof(struct tx_frame_hdr));
300 	} else {
301 		ath_err(common, "Unsupported TX EPID: %d\n", ep_id);
302 		dev_kfree_skb_any(skb);
303 		return;
304 	}
305 
306 	tx_info = IEEE80211_SKB_CB(skb);
307 
308 	if (txok)
309 		tx_info->flags |= IEEE80211_TX_STAT_ACK;
310 
311 	skb_queue_tail(&priv->tx_queue, skb);
312 	tasklet_schedule(&priv->tx_tasklet);
313 }
314 
315 int ath9k_tx_init(struct ath9k_htc_priv *priv)
316 {
317 	skb_queue_head_init(&priv->tx_queue);
318 	return 0;
319 }
320 
321 void ath9k_tx_cleanup(struct ath9k_htc_priv *priv)
322 {
323 
324 }
325 
326 bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv, int subtype)
327 {
328 	struct ath_hw *ah = priv->ah;
329 	struct ath_common *common = ath9k_hw_common(ah);
330 	struct ath9k_tx_queue_info qi;
331 	int qnum;
332 
333 	memset(&qi, 0, sizeof(qi));
334 	ATH9K_HTC_INIT_TXQ(subtype);
335 
336 	qnum = ath9k_hw_setuptxqueue(priv->ah, ATH9K_TX_QUEUE_DATA, &qi);
337 	if (qnum == -1)
338 		return false;
339 
340 	if (qnum >= ARRAY_SIZE(priv->hwq_map)) {
341 		ath_err(common, "qnum %u out of range, max %zu!\n",
342 			qnum, ARRAY_SIZE(priv->hwq_map));
343 		ath9k_hw_releasetxqueue(ah, qnum);
344 		return false;
345 	}
346 
347 	priv->hwq_map[subtype] = qnum;
348 	return true;
349 }
350 
351 int ath9k_htc_cabq_setup(struct ath9k_htc_priv *priv)
352 {
353 	struct ath9k_tx_queue_info qi;
354 
355 	memset(&qi, 0, sizeof(qi));
356 	ATH9K_HTC_INIT_TXQ(0);
357 
358 	return ath9k_hw_setuptxqueue(priv->ah, ATH9K_TX_QUEUE_CAB, &qi);
359 }
360 
361 /******/
362 /* RX */
363 /******/
364 
365 /*
366  * Calculate the RX filter to be set in the HW.
367  */
368 u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
369 {
370 #define	RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
371 
372 	struct ath_hw *ah = priv->ah;
373 	u32 rfilt;
374 
375 	rfilt = (ath9k_hw_getrxfilter(ah) & RX_FILTER_PRESERVE)
376 		| ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
377 		| ATH9K_RX_FILTER_MCAST;
378 
379 	if (priv->rxfilter & FIF_PROBE_REQ)
380 		rfilt |= ATH9K_RX_FILTER_PROBEREQ;
381 
382 	/*
383 	 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
384 	 * mode interface or when in monitor mode. AP mode does not need this
385 	 * since it receives all in-BSS frames anyway.
386 	 */
387 	if (((ah->opmode != NL80211_IFTYPE_AP) &&
388 	     (priv->rxfilter & FIF_PROMISC_IN_BSS)) ||
389 	    (ah->opmode == NL80211_IFTYPE_MONITOR))
390 		rfilt |= ATH9K_RX_FILTER_PROM;
391 
392 	if (priv->rxfilter & FIF_CONTROL)
393 		rfilt |= ATH9K_RX_FILTER_CONTROL;
394 
395 	if ((ah->opmode == NL80211_IFTYPE_STATION) &&
396 	    !(priv->rxfilter & FIF_BCN_PRBRESP_PROMISC))
397 		rfilt |= ATH9K_RX_FILTER_MYBEACON;
398 	else
399 		rfilt |= ATH9K_RX_FILTER_BEACON;
400 
401 	if (conf_is_ht(&priv->hw->conf))
402 		rfilt |= ATH9K_RX_FILTER_COMP_BAR;
403 
404 	return rfilt;
405 
406 #undef RX_FILTER_PRESERVE
407 }
408 
409 /*
410  * Recv initialization for opmode change.
411  */
412 static void ath9k_htc_opmode_init(struct ath9k_htc_priv *priv)
413 {
414 	struct ath_hw *ah = priv->ah;
415 	struct ath_common *common = ath9k_hw_common(ah);
416 
417 	u32 rfilt, mfilt[2];
418 
419 	/* configure rx filter */
420 	rfilt = ath9k_htc_calcrxfilter(priv);
421 	ath9k_hw_setrxfilter(ah, rfilt);
422 
423 	/* configure bssid mask */
424 	ath_hw_setbssidmask(common);
425 
426 	/* configure operational mode */
427 	ath9k_hw_setopmode(ah);
428 
429 	/* calculate and install multicast filter */
430 	mfilt[0] = mfilt[1] = ~0;
431 	ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
432 }
433 
434 void ath9k_host_rx_init(struct ath9k_htc_priv *priv)
435 {
436 	ath9k_hw_rxena(priv->ah);
437 	ath9k_htc_opmode_init(priv);
438 	ath9k_hw_startpcureceive(priv->ah, (priv->op_flags & OP_SCANNING));
439 	priv->rx.last_rssi = ATH_RSSI_DUMMY_MARKER;
440 }
441 
442 static void ath9k_process_rate(struct ieee80211_hw *hw,
443 			       struct ieee80211_rx_status *rxs,
444 			       u8 rx_rate, u8 rs_flags)
445 {
446 	struct ieee80211_supported_band *sband;
447 	enum ieee80211_band band;
448 	unsigned int i = 0;
449 
450 	if (rx_rate & 0x80) {
451 		/* HT rate */
452 		rxs->flag |= RX_FLAG_HT;
453 		if (rs_flags & ATH9K_RX_2040)
454 			rxs->flag |= RX_FLAG_40MHZ;
455 		if (rs_flags & ATH9K_RX_GI)
456 			rxs->flag |= RX_FLAG_SHORT_GI;
457 		rxs->rate_idx = rx_rate & 0x7f;
458 		return;
459 	}
460 
461 	band = hw->conf.channel->band;
462 	sband = hw->wiphy->bands[band];
463 
464 	for (i = 0; i < sband->n_bitrates; i++) {
465 		if (sband->bitrates[i].hw_value == rx_rate) {
466 			rxs->rate_idx = i;
467 			return;
468 		}
469 		if (sband->bitrates[i].hw_value_short == rx_rate) {
470 			rxs->rate_idx = i;
471 			rxs->flag |= RX_FLAG_SHORTPRE;
472 			return;
473 		}
474 	}
475 
476 }
477 
478 static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
479 			     struct ath9k_htc_rxbuf *rxbuf,
480 			     struct ieee80211_rx_status *rx_status)
481 
482 {
483 	struct ieee80211_hdr *hdr;
484 	struct ieee80211_hw *hw = priv->hw;
485 	struct sk_buff *skb = rxbuf->skb;
486 	struct ath_common *common = ath9k_hw_common(priv->ah);
487 	struct ath_htc_rx_status *rxstatus;
488 	int hdrlen, padpos, padsize;
489 	int last_rssi = ATH_RSSI_DUMMY_MARKER;
490 	__le16 fc;
491 
492 	if (skb->len <= HTC_RX_FRAME_HEADER_SIZE) {
493 		ath_err(common, "Corrupted RX frame, dropping\n");
494 		goto rx_next;
495 	}
496 
497 	rxstatus = (struct ath_htc_rx_status *)skb->data;
498 
499 	if (be16_to_cpu(rxstatus->rs_datalen) -
500 	    (skb->len - HTC_RX_FRAME_HEADER_SIZE) != 0) {
501 		ath_err(common,
502 			"Corrupted RX data len, dropping (dlen: %d, skblen: %d)\n",
503 			rxstatus->rs_datalen, skb->len);
504 		goto rx_next;
505 	}
506 
507 	/* Get the RX status information */
508 	memcpy(&rxbuf->rxstatus, rxstatus, HTC_RX_FRAME_HEADER_SIZE);
509 	skb_pull(skb, HTC_RX_FRAME_HEADER_SIZE);
510 
511 	hdr = (struct ieee80211_hdr *)skb->data;
512 	fc = hdr->frame_control;
513 	hdrlen = ieee80211_get_hdrlen_from_skb(skb);
514 
515 	padpos = ath9k_cmn_padpos(fc);
516 
517 	padsize = padpos & 3;
518 	if (padsize && skb->len >= padpos+padsize+FCS_LEN) {
519 		memmove(skb->data + padsize, skb->data, padpos);
520 		skb_pull(skb, padsize);
521 	}
522 
523 	memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
524 
525 	if (rxbuf->rxstatus.rs_status != 0) {
526 		if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_CRC)
527 			rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
528 		if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_PHY)
529 			goto rx_next;
530 
531 		if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_DECRYPT) {
532 			/* FIXME */
533 		} else if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_MIC) {
534 			if (ieee80211_is_ctl(fc))
535 				/*
536 				 * Sometimes, we get invalid
537 				 * MIC failures on valid control frames.
538 				 * Remove these mic errors.
539 				 */
540 				rxbuf->rxstatus.rs_status &= ~ATH9K_RXERR_MIC;
541 			else
542 				rx_status->flag |= RX_FLAG_MMIC_ERROR;
543 		}
544 
545 		/*
546 		 * Reject error frames with the exception of
547 		 * decryption and MIC failures. For monitor mode,
548 		 * we also ignore the CRC error.
549 		 */
550 		if (priv->ah->opmode == NL80211_IFTYPE_MONITOR) {
551 			if (rxbuf->rxstatus.rs_status &
552 			    ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
553 			      ATH9K_RXERR_CRC))
554 				goto rx_next;
555 		} else {
556 			if (rxbuf->rxstatus.rs_status &
557 			    ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
558 				goto rx_next;
559 			}
560 		}
561 	}
562 
563 	if (!(rxbuf->rxstatus.rs_status & ATH9K_RXERR_DECRYPT)) {
564 		u8 keyix;
565 		keyix = rxbuf->rxstatus.rs_keyix;
566 		if (keyix != ATH9K_RXKEYIX_INVALID) {
567 			rx_status->flag |= RX_FLAG_DECRYPTED;
568 		} else if (ieee80211_has_protected(fc) &&
569 			   skb->len >= hdrlen + 4) {
570 			keyix = skb->data[hdrlen + 3] >> 6;
571 			if (test_bit(keyix, common->keymap))
572 				rx_status->flag |= RX_FLAG_DECRYPTED;
573 		}
574 	}
575 
576 	ath9k_process_rate(hw, rx_status, rxbuf->rxstatus.rs_rate,
577 			   rxbuf->rxstatus.rs_flags);
578 
579 	if (priv->op_flags & OP_ASSOCIATED) {
580 		if (rxbuf->rxstatus.rs_rssi != ATH9K_RSSI_BAD &&
581 		    !rxbuf->rxstatus.rs_moreaggr)
582 			ATH_RSSI_LPF(priv->rx.last_rssi,
583 				     rxbuf->rxstatus.rs_rssi);
584 
585 		last_rssi = priv->rx.last_rssi;
586 
587 		if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
588 			rxbuf->rxstatus.rs_rssi = ATH_EP_RND(last_rssi,
589 							     ATH_RSSI_EP_MULTIPLIER);
590 
591 		if (rxbuf->rxstatus.rs_rssi < 0)
592 			rxbuf->rxstatus.rs_rssi = 0;
593 
594 		if (ieee80211_is_beacon(fc))
595 			priv->ah->stats.avgbrssi = rxbuf->rxstatus.rs_rssi;
596 	}
597 
598 	rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp);
599 	rx_status->band = hw->conf.channel->band;
600 	rx_status->freq = hw->conf.channel->center_freq;
601 	rx_status->signal =  rxbuf->rxstatus.rs_rssi + ATH_DEFAULT_NOISE_FLOOR;
602 	rx_status->antenna = rxbuf->rxstatus.rs_antenna;
603 	rx_status->flag |= RX_FLAG_TSFT;
604 
605 	return true;
606 
607 rx_next:
608 	return false;
609 }
610 
611 /*
612  * FIXME: Handle FLUSH later on.
613  */
614 void ath9k_rx_tasklet(unsigned long data)
615 {
616 	struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
617 	struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL;
618 	struct ieee80211_rx_status rx_status;
619 	struct sk_buff *skb;
620 	unsigned long flags;
621 	struct ieee80211_hdr *hdr;
622 
623 	do {
624 		spin_lock_irqsave(&priv->rx.rxbuflock, flags);
625 		list_for_each_entry(tmp_buf, &priv->rx.rxbuf, list) {
626 			if (tmp_buf->in_process) {
627 				rxbuf = tmp_buf;
628 				break;
629 			}
630 		}
631 
632 		if (rxbuf == NULL) {
633 			spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
634 			break;
635 		}
636 
637 		if (!rxbuf->skb)
638 			goto requeue;
639 
640 		if (!ath9k_rx_prepare(priv, rxbuf, &rx_status)) {
641 			dev_kfree_skb_any(rxbuf->skb);
642 			goto requeue;
643 		}
644 
645 		memcpy(IEEE80211_SKB_RXCB(rxbuf->skb), &rx_status,
646 		       sizeof(struct ieee80211_rx_status));
647 		skb = rxbuf->skb;
648 		hdr = (struct ieee80211_hdr *) skb->data;
649 
650 		if (ieee80211_is_beacon(hdr->frame_control) && priv->ps_enabled)
651 				ieee80211_queue_work(priv->hw, &priv->ps_work);
652 
653 		spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
654 
655 		ieee80211_rx(priv->hw, skb);
656 
657 		spin_lock_irqsave(&priv->rx.rxbuflock, flags);
658 requeue:
659 		rxbuf->in_process = false;
660 		rxbuf->skb = NULL;
661 		list_move_tail(&rxbuf->list, &priv->rx.rxbuf);
662 		rxbuf = NULL;
663 		spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
664 	} while (1);
665 
666 }
667 
668 void ath9k_htc_rxep(void *drv_priv, struct sk_buff *skb,
669 		    enum htc_endpoint_id ep_id)
670 {
671 	struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)drv_priv;
672 	struct ath_hw *ah = priv->ah;
673 	struct ath_common *common = ath9k_hw_common(ah);
674 	struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL;
675 
676 	spin_lock(&priv->rx.rxbuflock);
677 	list_for_each_entry(tmp_buf, &priv->rx.rxbuf, list) {
678 		if (!tmp_buf->in_process) {
679 			rxbuf = tmp_buf;
680 			break;
681 		}
682 	}
683 	spin_unlock(&priv->rx.rxbuflock);
684 
685 	if (rxbuf == NULL) {
686 		ath_dbg(common, ATH_DBG_ANY,
687 			"No free RX buffer\n");
688 		goto err;
689 	}
690 
691 	spin_lock(&priv->rx.rxbuflock);
692 	rxbuf->skb = skb;
693 	rxbuf->in_process = true;
694 	spin_unlock(&priv->rx.rxbuflock);
695 
696 	tasklet_schedule(&priv->rx_tasklet);
697 	return;
698 err:
699 	dev_kfree_skb_any(skb);
700 }
701 
702 /* FIXME: Locking for cleanup/init */
703 
704 void ath9k_rx_cleanup(struct ath9k_htc_priv *priv)
705 {
706 	struct ath9k_htc_rxbuf *rxbuf, *tbuf;
707 
708 	list_for_each_entry_safe(rxbuf, tbuf, &priv->rx.rxbuf, list) {
709 		list_del(&rxbuf->list);
710 		if (rxbuf->skb)
711 			dev_kfree_skb_any(rxbuf->skb);
712 		kfree(rxbuf);
713 	}
714 }
715 
716 int ath9k_rx_init(struct ath9k_htc_priv *priv)
717 {
718 	struct ath_hw *ah = priv->ah;
719 	struct ath_common *common = ath9k_hw_common(ah);
720 	struct ath9k_htc_rxbuf *rxbuf;
721 	int i = 0;
722 
723 	INIT_LIST_HEAD(&priv->rx.rxbuf);
724 	spin_lock_init(&priv->rx.rxbuflock);
725 
726 	for (i = 0; i < ATH9K_HTC_RXBUF; i++) {
727 		rxbuf = kzalloc(sizeof(struct ath9k_htc_rxbuf), GFP_KERNEL);
728 		if (rxbuf == NULL) {
729 			ath_err(common, "Unable to allocate RX buffers\n");
730 			goto err;
731 		}
732 		list_add_tail(&rxbuf->list, &priv->rx.rxbuf);
733 	}
734 
735 	return 0;
736 
737 err:
738 	ath9k_rx_cleanup(priv);
739 	return -ENOMEM;
740 }
741