1 /*
2  * Copyright (c) 2010-2011 Atheros Communications Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 #include <linux/export.h>
17 #include "hw.h"
18 #include "ar9003_mac.h"
19 #include "ar9003_mci.h"
20 
21 static void ar9003_hw_rx_enable(struct ath_hw *hw)
22 {
23 	REG_WRITE(hw, AR_CR, 0);
24 }
25 
26 static void
27 ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
28 {
29 	struct ar9003_txc *ads = ds;
30 	int checksum = 0;
31 	u32 val, ctl12, ctl17;
32 	u8 desc_len;
33 
34 	desc_len = ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x18 : 0x17);
35 
36 	val = (ATHEROS_VENDOR_ID << AR_DescId_S) |
37 	      (1 << AR_TxRxDesc_S) |
38 	      (1 << AR_CtrlStat_S) |
39 	      (i->qcu << AR_TxQcuNum_S) | desc_len;
40 
41 	checksum += val;
42 	ACCESS_ONCE(ads->info) = val;
43 
44 	checksum += i->link;
45 	ACCESS_ONCE(ads->link) = i->link;
46 
47 	checksum += i->buf_addr[0];
48 	ACCESS_ONCE(ads->data0) = i->buf_addr[0];
49 	checksum += i->buf_addr[1];
50 	ACCESS_ONCE(ads->data1) = i->buf_addr[1];
51 	checksum += i->buf_addr[2];
52 	ACCESS_ONCE(ads->data2) = i->buf_addr[2];
53 	checksum += i->buf_addr[3];
54 	ACCESS_ONCE(ads->data3) = i->buf_addr[3];
55 
56 	checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
57 	ACCESS_ONCE(ads->ctl3) = val;
58 	checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
59 	ACCESS_ONCE(ads->ctl5) = val;
60 	checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
61 	ACCESS_ONCE(ads->ctl7) = val;
62 	checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
63 	ACCESS_ONCE(ads->ctl9) = val;
64 
65 	checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
66 	ACCESS_ONCE(ads->ctl10) = checksum;
67 
68 	if (i->is_first || i->is_last) {
69 		ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
70 			| set11nTries(i->rates, 1)
71 			| set11nTries(i->rates, 2)
72 			| set11nTries(i->rates, 3)
73 			| (i->dur_update ? AR_DurUpdateEna : 0)
74 			| SM(0, AR_BurstDur);
75 
76 		ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
77 			| set11nRate(i->rates, 1)
78 			| set11nRate(i->rates, 2)
79 			| set11nRate(i->rates, 3);
80 	} else {
81 		ACCESS_ONCE(ads->ctl13) = 0;
82 		ACCESS_ONCE(ads->ctl14) = 0;
83 	}
84 
85 	ads->ctl20 = 0;
86 	ads->ctl21 = 0;
87 	ads->ctl22 = 0;
88 	ads->ctl23 = 0;
89 
90 	ctl17 = SM(i->keytype, AR_EncrType);
91 	if (!i->is_first) {
92 		ACCESS_ONCE(ads->ctl11) = 0;
93 		ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
94 		ACCESS_ONCE(ads->ctl15) = 0;
95 		ACCESS_ONCE(ads->ctl16) = 0;
96 		ACCESS_ONCE(ads->ctl17) = ctl17;
97 		ACCESS_ONCE(ads->ctl18) = 0;
98 		ACCESS_ONCE(ads->ctl19) = 0;
99 		return;
100 	}
101 
102 	ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
103 		| (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
104 		| SM(i->txpower[0], AR_XmitPower0)
105 		| (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
106 		| (i->keyix != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0)
107 		| (i->flags & ATH9K_TXDESC_LOWRXCHAIN ? AR_LowRxChain : 0)
108 		| (i->flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
109 		| (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
110 		   (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
111 
112 	ctl12 = (i->keyix != ATH9K_TXKEYIX_INVALID ?
113 		 SM(i->keyix, AR_DestIdx) : 0)
114 		| SM(i->type, AR_FrameType)
115 		| (i->flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
116 		| (i->flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
117 		| (i->flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
118 
119 	ctl17 |= (i->flags & ATH9K_TXDESC_LDPC ? AR_LDPC : 0);
120 	switch (i->aggr) {
121 	case AGGR_BUF_FIRST:
122 		ctl17 |= SM(i->aggr_len, AR_AggrLen);
123 		/* fall through */
124 	case AGGR_BUF_MIDDLE:
125 		ctl12 |= AR_IsAggr | AR_MoreAggr;
126 		ctl17 |= SM(i->ndelim, AR_PadDelim);
127 		break;
128 	case AGGR_BUF_LAST:
129 		ctl12 |= AR_IsAggr;
130 		break;
131 	case AGGR_BUF_NONE:
132 		break;
133 	}
134 
135 	val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
136 	ctl12 |= SM(val, AR_PAPRDChainMask);
137 
138 	ACCESS_ONCE(ads->ctl12) = ctl12;
139 	ACCESS_ONCE(ads->ctl17) = ctl17;
140 
141 	ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
142 		| set11nPktDurRTSCTS(i->rates, 1);
143 
144 	ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
145 		| set11nPktDurRTSCTS(i->rates, 3);
146 
147 	ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
148 		| set11nRateFlags(i->rates, 1)
149 		| set11nRateFlags(i->rates, 2)
150 		| set11nRateFlags(i->rates, 3)
151 		| SM(i->rtscts_rate, AR_RTSCTSRate);
152 
153 	ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
154 
155 	ACCESS_ONCE(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
156 	ACCESS_ONCE(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
157 	ACCESS_ONCE(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
158 }
159 
160 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
161 {
162 	int checksum;
163 
164 	checksum = ads->info + ads->link
165 		+ ads->data0 + ads->ctl3
166 		+ ads->data1 + ads->ctl5
167 		+ ads->data2 + ads->ctl7
168 		+ ads->data3 + ads->ctl9;
169 
170 	return ((checksum & 0xffff) + (checksum >> 16)) & AR_TxPtrChkSum;
171 }
172 
173 static void ar9003_hw_set_desc_link(void *ds, u32 ds_link)
174 {
175 	struct ar9003_txc *ads = ds;
176 
177 	ads->link = ds_link;
178 	ads->ctl10 &= ~AR_TxPtrChkSum;
179 	ads->ctl10 |= ar9003_calc_ptr_chksum(ads);
180 }
181 
182 static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked,
183 			      u32 *sync_cause_p)
184 {
185 	u32 isr = 0;
186 	u32 mask2 = 0;
187 	struct ath9k_hw_capabilities *pCap = &ah->caps;
188 	struct ath_common *common = ath9k_hw_common(ah);
189 	u32 sync_cause = 0, async_cause, async_mask = AR_INTR_MAC_IRQ;
190 	bool fatal_int;
191 
192 	if (ath9k_hw_mci_is_enabled(ah))
193 		async_mask |= AR_INTR_ASYNC_MASK_MCI;
194 
195 	async_cause = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
196 
197 	if (async_cause & async_mask) {
198 		if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
199 				== AR_RTC_STATUS_ON)
200 			isr = REG_READ(ah, AR_ISR);
201 	}
202 
203 
204 	sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) & AR_INTR_SYNC_DEFAULT;
205 
206 	*masked = 0;
207 
208 	if (!isr && !sync_cause && !async_cause)
209 		return false;
210 
211 	if (isr) {
212 		if (isr & AR_ISR_BCNMISC) {
213 			u32 isr2;
214 			isr2 = REG_READ(ah, AR_ISR_S2);
215 
216 			mask2 |= ((isr2 & AR_ISR_S2_TIM) >>
217 				  MAP_ISR_S2_TIM);
218 			mask2 |= ((isr2 & AR_ISR_S2_DTIM) >>
219 				  MAP_ISR_S2_DTIM);
220 			mask2 |= ((isr2 & AR_ISR_S2_DTIMSYNC) >>
221 				  MAP_ISR_S2_DTIMSYNC);
222 			mask2 |= ((isr2 & AR_ISR_S2_CABEND) >>
223 				  MAP_ISR_S2_CABEND);
224 			mask2 |= ((isr2 & AR_ISR_S2_GTT) <<
225 				  MAP_ISR_S2_GTT);
226 			mask2 |= ((isr2 & AR_ISR_S2_CST) <<
227 				  MAP_ISR_S2_CST);
228 			mask2 |= ((isr2 & AR_ISR_S2_TSFOOR) >>
229 				  MAP_ISR_S2_TSFOOR);
230 			mask2 |= ((isr2 & AR_ISR_S2_BB_WATCHDOG) >>
231 				  MAP_ISR_S2_BB_WATCHDOG);
232 
233 			if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
234 				REG_WRITE(ah, AR_ISR_S2, isr2);
235 				isr &= ~AR_ISR_BCNMISC;
236 			}
237 		}
238 
239 		if ((pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED))
240 			isr = REG_READ(ah, AR_ISR_RAC);
241 
242 		if (isr == 0xffffffff) {
243 			*masked = 0;
244 			return false;
245 		}
246 
247 		*masked = isr & ATH9K_INT_COMMON;
248 
249 		if (ah->config.rx_intr_mitigation)
250 			if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
251 				*masked |= ATH9K_INT_RXLP;
252 
253 		if (ah->config.tx_intr_mitigation)
254 			if (isr & (AR_ISR_TXMINTR | AR_ISR_TXINTM))
255 				*masked |= ATH9K_INT_TX;
256 
257 		if (isr & (AR_ISR_LP_RXOK | AR_ISR_RXERR))
258 			*masked |= ATH9K_INT_RXLP;
259 
260 		if (isr & AR_ISR_HP_RXOK)
261 			*masked |= ATH9K_INT_RXHP;
262 
263 		if (isr & (AR_ISR_TXOK | AR_ISR_TXERR | AR_ISR_TXEOL)) {
264 			*masked |= ATH9K_INT_TX;
265 
266 			if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
267 				u32 s0, s1;
268 				s0 = REG_READ(ah, AR_ISR_S0);
269 				REG_WRITE(ah, AR_ISR_S0, s0);
270 				s1 = REG_READ(ah, AR_ISR_S1);
271 				REG_WRITE(ah, AR_ISR_S1, s1);
272 
273 				isr &= ~(AR_ISR_TXOK | AR_ISR_TXERR |
274 					 AR_ISR_TXEOL);
275 			}
276 		}
277 
278 		if (isr & AR_ISR_GENTMR) {
279 			u32 s5;
280 
281 			if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)
282 				s5 = REG_READ(ah, AR_ISR_S5_S);
283 			else
284 				s5 = REG_READ(ah, AR_ISR_S5);
285 
286 			ah->intr_gen_timer_trigger =
287 				MS(s5, AR_ISR_S5_GENTIMER_TRIG);
288 
289 			ah->intr_gen_timer_thresh =
290 				MS(s5, AR_ISR_S5_GENTIMER_THRESH);
291 
292 			if (ah->intr_gen_timer_trigger)
293 				*masked |= ATH9K_INT_GENTIMER;
294 
295 			if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
296 				REG_WRITE(ah, AR_ISR_S5, s5);
297 				isr &= ~AR_ISR_GENTMR;
298 			}
299 
300 		}
301 
302 		*masked |= mask2;
303 
304 		if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
305 			REG_WRITE(ah, AR_ISR, isr);
306 
307 			(void) REG_READ(ah, AR_ISR);
308 		}
309 
310 		if (*masked & ATH9K_INT_BB_WATCHDOG)
311 			ar9003_hw_bb_watchdog_read(ah);
312 	}
313 
314 	if (async_cause & AR_INTR_ASYNC_MASK_MCI)
315 		ar9003_mci_get_isr(ah, masked);
316 
317 	if (sync_cause) {
318 		if (sync_cause_p)
319 			*sync_cause_p = sync_cause;
320 		fatal_int =
321 			(sync_cause &
322 			 (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
323 			? true : false;
324 
325 		if (fatal_int) {
326 			if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) {
327 				ath_dbg(common, ANY,
328 					"received PCI FATAL interrupt\n");
329 			}
330 			if (sync_cause & AR_INTR_SYNC_HOST1_PERR) {
331 				ath_dbg(common, ANY,
332 					"received PCI PERR interrupt\n");
333 			}
334 			*masked |= ATH9K_INT_FATAL;
335 		}
336 
337 		if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
338 			REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
339 			REG_WRITE(ah, AR_RC, 0);
340 			*masked |= ATH9K_INT_FATAL;
341 		}
342 
343 		if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT)
344 			ath_dbg(common, INTERRUPT,
345 				"AR_INTR_SYNC_LOCAL_TIMEOUT\n");
346 
347 		REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
348 		(void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
349 
350 	}
351 	return true;
352 }
353 
354 static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
355 				 struct ath_tx_status *ts)
356 {
357 	struct ar9003_txs *ads;
358 	u32 status;
359 
360 	ads = &ah->ts_ring[ah->ts_tail];
361 
362 	status = ACCESS_ONCE(ads->status8);
363 	if ((status & AR_TxDone) == 0)
364 		return -EINPROGRESS;
365 
366 	ah->ts_tail = (ah->ts_tail + 1) % ah->ts_size;
367 
368 	if ((MS(ads->ds_info, AR_DescId) != ATHEROS_VENDOR_ID) ||
369 	    (MS(ads->ds_info, AR_TxRxDesc) != 1)) {
370 		ath_dbg(ath9k_hw_common(ah), XMIT,
371 			"Tx Descriptor error %x\n", ads->ds_info);
372 		memset(ads, 0, sizeof(*ads));
373 		return -EIO;
374 	}
375 
376 	ts->ts_rateindex = MS(status, AR_FinalTxIdx);
377 	ts->ts_seqnum = MS(status, AR_SeqNum);
378 	ts->tid = MS(status, AR_TxTid);
379 
380 	ts->qid = MS(ads->ds_info, AR_TxQcuNum);
381 	ts->desc_id = MS(ads->status1, AR_TxDescId);
382 	ts->ts_tstamp = ads->status4;
383 	ts->ts_status = 0;
384 	ts->ts_flags  = 0;
385 
386 	if (status & AR_TxOpExceeded)
387 		ts->ts_status |= ATH9K_TXERR_XTXOP;
388 	status = ACCESS_ONCE(ads->status2);
389 	ts->ts_rssi_ctl0 = MS(status, AR_TxRSSIAnt00);
390 	ts->ts_rssi_ctl1 = MS(status, AR_TxRSSIAnt01);
391 	ts->ts_rssi_ctl2 = MS(status, AR_TxRSSIAnt02);
392 	if (status & AR_TxBaStatus) {
393 		ts->ts_flags |= ATH9K_TX_BA;
394 		ts->ba_low = ads->status5;
395 		ts->ba_high = ads->status6;
396 	}
397 
398 	status = ACCESS_ONCE(ads->status3);
399 	if (status & AR_ExcessiveRetries)
400 		ts->ts_status |= ATH9K_TXERR_XRETRY;
401 	if (status & AR_Filtered)
402 		ts->ts_status |= ATH9K_TXERR_FILT;
403 	if (status & AR_FIFOUnderrun) {
404 		ts->ts_status |= ATH9K_TXERR_FIFO;
405 		ath9k_hw_updatetxtriglevel(ah, true);
406 	}
407 	if (status & AR_TxTimerExpired)
408 		ts->ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
409 	if (status & AR_DescCfgErr)
410 		ts->ts_flags |= ATH9K_TX_DESC_CFG_ERR;
411 	if (status & AR_TxDataUnderrun) {
412 		ts->ts_flags |= ATH9K_TX_DATA_UNDERRUN;
413 		ath9k_hw_updatetxtriglevel(ah, true);
414 	}
415 	if (status & AR_TxDelimUnderrun) {
416 		ts->ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
417 		ath9k_hw_updatetxtriglevel(ah, true);
418 	}
419 	ts->ts_shortretry = MS(status, AR_RTSFailCnt);
420 	ts->ts_longretry = MS(status, AR_DataFailCnt);
421 	ts->ts_virtcol = MS(status, AR_VirtRetryCnt);
422 
423 	status = ACCESS_ONCE(ads->status7);
424 	ts->ts_rssi = MS(status, AR_TxRSSICombined);
425 	ts->ts_rssi_ext0 = MS(status, AR_TxRSSIAnt10);
426 	ts->ts_rssi_ext1 = MS(status, AR_TxRSSIAnt11);
427 	ts->ts_rssi_ext2 = MS(status, AR_TxRSSIAnt12);
428 
429 	memset(ads, 0, sizeof(*ads));
430 
431 	return 0;
432 }
433 
434 static int ar9003_hw_get_duration(struct ath_hw *ah, const void *ds, int index)
435 {
436 	const struct ar9003_txc *adc = ds;
437 
438 	switch (index) {
439 	case 0:
440 		return MS(ACCESS_ONCE(adc->ctl15), AR_PacketDur0);
441 	case 1:
442 		return MS(ACCESS_ONCE(adc->ctl15), AR_PacketDur1);
443 	case 2:
444 		return MS(ACCESS_ONCE(adc->ctl16), AR_PacketDur2);
445 	case 3:
446 		return MS(ACCESS_ONCE(adc->ctl16), AR_PacketDur3);
447 	default:
448 		return 0;
449 	}
450 }
451 
452 void ar9003_hw_attach_mac_ops(struct ath_hw *hw)
453 {
454 	struct ath_hw_ops *ops = ath9k_hw_ops(hw);
455 
456 	ops->rx_enable = ar9003_hw_rx_enable;
457 	ops->set_desc_link = ar9003_hw_set_desc_link;
458 	ops->get_isr = ar9003_hw_get_isr;
459 	ops->set_txdesc = ar9003_set_txdesc;
460 	ops->proc_txdesc = ar9003_hw_proc_txdesc;
461 	ops->get_duration = ar9003_hw_get_duration;
462 }
463 
464 void ath9k_hw_set_rx_bufsize(struct ath_hw *ah, u16 buf_size)
465 {
466 	REG_WRITE(ah, AR_DATABUF_SIZE, buf_size & AR_DATABUF_SIZE_MASK);
467 }
468 EXPORT_SYMBOL(ath9k_hw_set_rx_bufsize);
469 
470 void ath9k_hw_addrxbuf_edma(struct ath_hw *ah, u32 rxdp,
471 			    enum ath9k_rx_qtype qtype)
472 {
473 	if (qtype == ATH9K_RX_QUEUE_HP)
474 		REG_WRITE(ah, AR_HP_RXDP, rxdp);
475 	else
476 		REG_WRITE(ah, AR_LP_RXDP, rxdp);
477 }
478 EXPORT_SYMBOL(ath9k_hw_addrxbuf_edma);
479 
480 int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
481 				 void *buf_addr)
482 {
483 	struct ar9003_rxs *rxsp = (struct ar9003_rxs *) buf_addr;
484 	unsigned int phyerr;
485 
486 	if ((rxsp->status11 & AR_RxDone) == 0)
487 		return -EINPROGRESS;
488 
489 	if (MS(rxsp->ds_info, AR_DescId) != 0x168c)
490 		return -EINVAL;
491 
492 	if ((rxsp->ds_info & (AR_TxRxDesc | AR_CtrlStat)) != 0)
493 		return -EINPROGRESS;
494 
495 	rxs->rs_status = 0;
496 	rxs->rs_flags =  0;
497 	rxs->flag =  0;
498 
499 	rxs->rs_datalen = rxsp->status2 & AR_DataLen;
500 	rxs->rs_tstamp =  rxsp->status3;
501 
502 	/* XXX: Keycache */
503 	rxs->rs_rssi = MS(rxsp->status5, AR_RxRSSICombined);
504 	rxs->rs_rssi_ctl[0] = MS(rxsp->status1, AR_RxRSSIAnt00);
505 	rxs->rs_rssi_ctl[1] = MS(rxsp->status1, AR_RxRSSIAnt01);
506 	rxs->rs_rssi_ctl[2] = MS(rxsp->status1, AR_RxRSSIAnt02);
507 	rxs->rs_rssi_ext[0] = MS(rxsp->status5, AR_RxRSSIAnt10);
508 	rxs->rs_rssi_ext[1] = MS(rxsp->status5, AR_RxRSSIAnt11);
509 	rxs->rs_rssi_ext[2] = MS(rxsp->status5, AR_RxRSSIAnt12);
510 
511 	if (rxsp->status11 & AR_RxKeyIdxValid)
512 		rxs->rs_keyix = MS(rxsp->status11, AR_KeyIdx);
513 	else
514 		rxs->rs_keyix = ATH9K_RXKEYIX_INVALID;
515 
516 	rxs->rs_rate = MS(rxsp->status1, AR_RxRate);
517 	rxs->rs_more = (rxsp->status2 & AR_RxMore) ? 1 : 0;
518 
519 	rxs->rs_firstaggr = (rxsp->status11 & AR_RxFirstAggr) ? 1 : 0;
520 	rxs->rs_isaggr = (rxsp->status11 & AR_RxAggr) ? 1 : 0;
521 	rxs->rs_moreaggr = (rxsp->status11 & AR_RxMoreAggr) ? 1 : 0;
522 	rxs->rs_antenna = (MS(rxsp->status4, AR_RxAntenna) & 0x7);
523 	rxs->flag  |= (rxsp->status4 & AR_GI) ? RX_FLAG_SHORT_GI : 0;
524 	rxs->flag  |= (rxsp->status4 & AR_2040) ? RX_FLAG_40MHZ : 0;
525 
526 	rxs->evm0 = rxsp->status6;
527 	rxs->evm1 = rxsp->status7;
528 	rxs->evm2 = rxsp->status8;
529 	rxs->evm3 = rxsp->status9;
530 	rxs->evm4 = (rxsp->status10 & 0xffff);
531 
532 	if (rxsp->status11 & AR_PreDelimCRCErr)
533 		rxs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
534 
535 	if (rxsp->status11 & AR_PostDelimCRCErr)
536 		rxs->rs_flags |= ATH9K_RX_DELIM_CRC_POST;
537 
538 	if (rxsp->status11 & AR_DecryptBusyErr)
539 		rxs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
540 
541 	if ((rxsp->status11 & AR_RxFrameOK) == 0) {
542 		/*
543 		 * AR_CRCErr will bet set to true if we're on the last
544 		 * subframe and the AR_PostDelimCRCErr is caught.
545 		 * In a way this also gives us a guarantee that when
546 		 * (!(AR_CRCErr) && (AR_PostDelimCRCErr)) we cannot
547 		 * possibly be reviewing the last subframe. AR_CRCErr
548 		 * is the CRC of the actual data.
549 		 */
550 		if (rxsp->status11 & AR_CRCErr)
551 			rxs->rs_status |= ATH9K_RXERR_CRC;
552 		else if (rxsp->status11 & AR_DecryptCRCErr)
553 			rxs->rs_status |= ATH9K_RXERR_DECRYPT;
554 		else if (rxsp->status11 & AR_MichaelErr)
555 			rxs->rs_status |= ATH9K_RXERR_MIC;
556 		if (rxsp->status11 & AR_PHYErr) {
557 			phyerr = MS(rxsp->status11, AR_PHYErrCode);
558 			/*
559 			 * If we reach a point here where AR_PostDelimCRCErr is
560 			 * true it implies we're *not* on the last subframe. In
561 			 * in that case that we know already that the CRC of
562 			 * the frame was OK, and MAC would send an ACK for that
563 			 * subframe, even if we did get a phy error of type
564 			 * ATH9K_PHYERR_OFDM_RESTART. This is only applicable
565 			 * to frame that are prior to the last subframe.
566 			 * The AR_PostDelimCRCErr is the CRC for the MPDU
567 			 * delimiter, which contains the 4 reserved bits,
568 			 * the MPDU length (12 bits), and follows the MPDU
569 			 * delimiter for an A-MPDU subframe (0x4E = 'N' ASCII).
570 			 */
571 			if ((phyerr == ATH9K_PHYERR_OFDM_RESTART) &&
572 			    (rxsp->status11 & AR_PostDelimCRCErr)) {
573 				rxs->rs_phyerr = 0;
574 			} else {
575 				rxs->rs_status |= ATH9K_RXERR_PHY;
576 				rxs->rs_phyerr = phyerr;
577 			}
578 		}
579 	}
580 
581 	if (rxsp->status11 & AR_KeyMiss)
582 		rxs->rs_status |= ATH9K_RXERR_KEYMISS;
583 
584 	return 0;
585 }
586 EXPORT_SYMBOL(ath9k_hw_process_rxdesc_edma);
587 
588 void ath9k_hw_reset_txstatus_ring(struct ath_hw *ah)
589 {
590 	ah->ts_tail = 0;
591 
592 	memset((void *) ah->ts_ring, 0,
593 		ah->ts_size * sizeof(struct ar9003_txs));
594 
595 	ath_dbg(ath9k_hw_common(ah), XMIT,
596 		"TS Start 0x%x End 0x%x Virt %p, Size %d\n",
597 		ah->ts_paddr_start, ah->ts_paddr_end,
598 		ah->ts_ring, ah->ts_size);
599 
600 	REG_WRITE(ah, AR_Q_STATUS_RING_START, ah->ts_paddr_start);
601 	REG_WRITE(ah, AR_Q_STATUS_RING_END, ah->ts_paddr_end);
602 }
603 
604 void ath9k_hw_setup_statusring(struct ath_hw *ah, void *ts_start,
605 			       u32 ts_paddr_start,
606 			       u16 size)
607 {
608 
609 	ah->ts_paddr_start = ts_paddr_start;
610 	ah->ts_paddr_end = ts_paddr_start + (size * sizeof(struct ar9003_txs));
611 	ah->ts_size = size;
612 	ah->ts_ring = (struct ar9003_txs *) ts_start;
613 
614 	ath9k_hw_reset_txstatus_ring(ah);
615 }
616 EXPORT_SYMBOL(ath9k_hw_setup_statusring);
617