1 /* 2 * Copyright (c) 2008-2009 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include "hw.h" 18 #include "hw-ops.h" 19 20 static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah, 21 struct ath9k_tx_queue_info *qi) 22 { 23 ath_dbg(ath9k_hw_common(ah), ATH_DBG_INTERRUPT, 24 "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n", 25 ah->txok_interrupt_mask, ah->txerr_interrupt_mask, 26 ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask, 27 ah->txurn_interrupt_mask); 28 29 ENABLE_REGWRITE_BUFFER(ah); 30 31 REG_WRITE(ah, AR_IMR_S0, 32 SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK) 33 | SM(ah->txdesc_interrupt_mask, AR_IMR_S0_QCU_TXDESC)); 34 REG_WRITE(ah, AR_IMR_S1, 35 SM(ah->txerr_interrupt_mask, AR_IMR_S1_QCU_TXERR) 36 | SM(ah->txeol_interrupt_mask, AR_IMR_S1_QCU_TXEOL)); 37 38 ah->imrs2_reg &= ~AR_IMR_S2_QCU_TXURN; 39 ah->imrs2_reg |= (ah->txurn_interrupt_mask & AR_IMR_S2_QCU_TXURN); 40 REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg); 41 42 REGWRITE_BUFFER_FLUSH(ah); 43 } 44 45 u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q) 46 { 47 return REG_READ(ah, AR_QTXDP(q)); 48 } 49 EXPORT_SYMBOL(ath9k_hw_gettxbuf); 50 51 void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp) 52 { 53 REG_WRITE(ah, AR_QTXDP(q), txdp); 54 } 55 EXPORT_SYMBOL(ath9k_hw_puttxbuf); 56 57 void ath9k_hw_txstart(struct ath_hw *ah, u32 q) 58 { 59 ath_dbg(ath9k_hw_common(ah), ATH_DBG_QUEUE, 60 "Enable TXE on queue: %u\n", q); 61 REG_WRITE(ah, AR_Q_TXE, 1 << q); 62 } 63 EXPORT_SYMBOL(ath9k_hw_txstart); 64 65 void ath9k_hw_cleartxdesc(struct ath_hw *ah, void *ds) 66 { 67 struct ar5416_desc *ads = AR5416DESC(ds); 68 69 ads->ds_txstatus0 = ads->ds_txstatus1 = 0; 70 ads->ds_txstatus2 = ads->ds_txstatus3 = 0; 71 ads->ds_txstatus4 = ads->ds_txstatus5 = 0; 72 ads->ds_txstatus6 = ads->ds_txstatus7 = 0; 73 ads->ds_txstatus8 = ads->ds_txstatus9 = 0; 74 } 75 EXPORT_SYMBOL(ath9k_hw_cleartxdesc); 76 77 u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q) 78 { 79 u32 npend; 80 81 npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT; 82 if (npend == 0) { 83 84 if (REG_READ(ah, AR_Q_TXE) & (1 << q)) 85 npend = 1; 86 } 87 88 return npend; 89 } 90 EXPORT_SYMBOL(ath9k_hw_numtxpending); 91 92 /** 93 * ath9k_hw_updatetxtriglevel - adjusts the frame trigger level 94 * 95 * @ah: atheros hardware struct 96 * @bIncTrigLevel: whether or not the frame trigger level should be updated 97 * 98 * The frame trigger level specifies the minimum number of bytes, 99 * in units of 64 bytes, that must be DMA'ed into the PCU TX FIFO 100 * before the PCU will initiate sending the frame on the air. This can 101 * mean we initiate transmit before a full frame is on the PCU TX FIFO. 102 * Resets to 0x1 (meaning 64 bytes or a full frame, whichever occurs 103 * first) 104 * 105 * Caution must be taken to ensure to set the frame trigger level based 106 * on the DMA request size. For example if the DMA request size is set to 107 * 128 bytes the trigger level cannot exceed 6 * 64 = 384. This is because 108 * there need to be enough space in the tx FIFO for the requested transfer 109 * size. Hence the tx FIFO will stop with 512 - 128 = 384 bytes. If we set 110 * the threshold to a value beyond 6, then the transmit will hang. 111 * 112 * Current dual stream devices have a PCU TX FIFO size of 8 KB. 113 * Current single stream devices have a PCU TX FIFO size of 4 KB, however, 114 * there is a hardware issue which forces us to use 2 KB instead so the 115 * frame trigger level must not exceed 2 KB for these chipsets. 116 */ 117 bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel) 118 { 119 u32 txcfg, curLevel, newLevel; 120 121 if (ah->tx_trig_level >= ah->config.max_txtrig_level) 122 return false; 123 124 ath9k_hw_disable_interrupts(ah); 125 126 txcfg = REG_READ(ah, AR_TXCFG); 127 curLevel = MS(txcfg, AR_FTRIG); 128 newLevel = curLevel; 129 if (bIncTrigLevel) { 130 if (curLevel < ah->config.max_txtrig_level) 131 newLevel++; 132 } else if (curLevel > MIN_TX_FIFO_THRESHOLD) 133 newLevel--; 134 if (newLevel != curLevel) 135 REG_WRITE(ah, AR_TXCFG, 136 (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG)); 137 138 ath9k_hw_enable_interrupts(ah); 139 140 ah->tx_trig_level = newLevel; 141 142 return newLevel != curLevel; 143 } 144 EXPORT_SYMBOL(ath9k_hw_updatetxtriglevel); 145 146 bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q) 147 { 148 #define ATH9K_TX_STOP_DMA_TIMEOUT 4000 /* usec */ 149 #define ATH9K_TIME_QUANTUM 100 /* usec */ 150 struct ath_common *common = ath9k_hw_common(ah); 151 struct ath9k_hw_capabilities *pCap = &ah->caps; 152 struct ath9k_tx_queue_info *qi; 153 u32 tsfLow, j, wait; 154 u32 wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM; 155 156 if (q >= pCap->total_queues) { 157 ath_dbg(common, ATH_DBG_QUEUE, 158 "Stopping TX DMA, invalid queue: %u\n", q); 159 return false; 160 } 161 162 qi = &ah->txq[q]; 163 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 164 ath_dbg(common, ATH_DBG_QUEUE, 165 "Stopping TX DMA, inactive queue: %u\n", q); 166 return false; 167 } 168 169 REG_WRITE(ah, AR_Q_TXD, 1 << q); 170 171 for (wait = wait_time; wait != 0; wait--) { 172 if (ath9k_hw_numtxpending(ah, q) == 0) 173 break; 174 udelay(ATH9K_TIME_QUANTUM); 175 } 176 177 if (ath9k_hw_numtxpending(ah, q)) { 178 ath_dbg(common, ATH_DBG_QUEUE, 179 "%s: Num of pending TX Frames %d on Q %d\n", 180 __func__, ath9k_hw_numtxpending(ah, q), q); 181 182 for (j = 0; j < 2; j++) { 183 tsfLow = REG_READ(ah, AR_TSF_L32); 184 REG_WRITE(ah, AR_QUIET2, 185 SM(10, AR_QUIET2_QUIET_DUR)); 186 REG_WRITE(ah, AR_QUIET_PERIOD, 100); 187 REG_WRITE(ah, AR_NEXT_QUIET_TIMER, tsfLow >> 10); 188 REG_SET_BIT(ah, AR_TIMER_MODE, 189 AR_QUIET_TIMER_EN); 190 191 if ((REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10)) 192 break; 193 194 ath_dbg(common, ATH_DBG_QUEUE, 195 "TSF has moved while trying to set quiet time TSF: 0x%08x\n", 196 tsfLow); 197 } 198 199 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH); 200 201 udelay(200); 202 REG_CLR_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN); 203 204 wait = wait_time; 205 while (ath9k_hw_numtxpending(ah, q)) { 206 if ((--wait) == 0) { 207 ath_err(common, 208 "Failed to stop TX DMA in 100 msec after killing last frame\n"); 209 break; 210 } 211 udelay(ATH9K_TIME_QUANTUM); 212 } 213 214 REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH); 215 } 216 217 REG_WRITE(ah, AR_Q_TXD, 0); 218 return wait != 0; 219 220 #undef ATH9K_TX_STOP_DMA_TIMEOUT 221 #undef ATH9K_TIME_QUANTUM 222 } 223 EXPORT_SYMBOL(ath9k_hw_stoptxdma); 224 225 void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs) 226 { 227 *txqs &= ah->intr_txqs; 228 ah->intr_txqs &= ~(*txqs); 229 } 230 EXPORT_SYMBOL(ath9k_hw_gettxintrtxqs); 231 232 bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q, 233 const struct ath9k_tx_queue_info *qinfo) 234 { 235 u32 cw; 236 struct ath_common *common = ath9k_hw_common(ah); 237 struct ath9k_hw_capabilities *pCap = &ah->caps; 238 struct ath9k_tx_queue_info *qi; 239 240 if (q >= pCap->total_queues) { 241 ath_dbg(common, ATH_DBG_QUEUE, 242 "Set TXQ properties, invalid queue: %u\n", q); 243 return false; 244 } 245 246 qi = &ah->txq[q]; 247 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 248 ath_dbg(common, ATH_DBG_QUEUE, 249 "Set TXQ properties, inactive queue: %u\n", q); 250 return false; 251 } 252 253 ath_dbg(common, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q); 254 255 qi->tqi_ver = qinfo->tqi_ver; 256 qi->tqi_subtype = qinfo->tqi_subtype; 257 qi->tqi_qflags = qinfo->tqi_qflags; 258 qi->tqi_priority = qinfo->tqi_priority; 259 if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT) 260 qi->tqi_aifs = min(qinfo->tqi_aifs, 255U); 261 else 262 qi->tqi_aifs = INIT_AIFS; 263 if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) { 264 cw = min(qinfo->tqi_cwmin, 1024U); 265 qi->tqi_cwmin = 1; 266 while (qi->tqi_cwmin < cw) 267 qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1; 268 } else 269 qi->tqi_cwmin = qinfo->tqi_cwmin; 270 if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) { 271 cw = min(qinfo->tqi_cwmax, 1024U); 272 qi->tqi_cwmax = 1; 273 while (qi->tqi_cwmax < cw) 274 qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1; 275 } else 276 qi->tqi_cwmax = INIT_CWMAX; 277 278 if (qinfo->tqi_shretry != 0) 279 qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U); 280 else 281 qi->tqi_shretry = INIT_SH_RETRY; 282 if (qinfo->tqi_lgretry != 0) 283 qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U); 284 else 285 qi->tqi_lgretry = INIT_LG_RETRY; 286 qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod; 287 qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit; 288 qi->tqi_burstTime = qinfo->tqi_burstTime; 289 qi->tqi_readyTime = qinfo->tqi_readyTime; 290 291 switch (qinfo->tqi_subtype) { 292 case ATH9K_WME_UPSD: 293 if (qi->tqi_type == ATH9K_TX_QUEUE_DATA) 294 qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS; 295 break; 296 default: 297 break; 298 } 299 300 return true; 301 } 302 EXPORT_SYMBOL(ath9k_hw_set_txq_props); 303 304 bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q, 305 struct ath9k_tx_queue_info *qinfo) 306 { 307 struct ath_common *common = ath9k_hw_common(ah); 308 struct ath9k_hw_capabilities *pCap = &ah->caps; 309 struct ath9k_tx_queue_info *qi; 310 311 if (q >= pCap->total_queues) { 312 ath_dbg(common, ATH_DBG_QUEUE, 313 "Get TXQ properties, invalid queue: %u\n", q); 314 return false; 315 } 316 317 qi = &ah->txq[q]; 318 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 319 ath_dbg(common, ATH_DBG_QUEUE, 320 "Get TXQ properties, inactive queue: %u\n", q); 321 return false; 322 } 323 324 qinfo->tqi_qflags = qi->tqi_qflags; 325 qinfo->tqi_ver = qi->tqi_ver; 326 qinfo->tqi_subtype = qi->tqi_subtype; 327 qinfo->tqi_qflags = qi->tqi_qflags; 328 qinfo->tqi_priority = qi->tqi_priority; 329 qinfo->tqi_aifs = qi->tqi_aifs; 330 qinfo->tqi_cwmin = qi->tqi_cwmin; 331 qinfo->tqi_cwmax = qi->tqi_cwmax; 332 qinfo->tqi_shretry = qi->tqi_shretry; 333 qinfo->tqi_lgretry = qi->tqi_lgretry; 334 qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod; 335 qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit; 336 qinfo->tqi_burstTime = qi->tqi_burstTime; 337 qinfo->tqi_readyTime = qi->tqi_readyTime; 338 339 return true; 340 } 341 EXPORT_SYMBOL(ath9k_hw_get_txq_props); 342 343 int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type, 344 const struct ath9k_tx_queue_info *qinfo) 345 { 346 struct ath_common *common = ath9k_hw_common(ah); 347 struct ath9k_tx_queue_info *qi; 348 struct ath9k_hw_capabilities *pCap = &ah->caps; 349 int q; 350 351 switch (type) { 352 case ATH9K_TX_QUEUE_BEACON: 353 q = pCap->total_queues - 1; 354 break; 355 case ATH9K_TX_QUEUE_CAB: 356 q = pCap->total_queues - 2; 357 break; 358 case ATH9K_TX_QUEUE_PSPOLL: 359 q = 1; 360 break; 361 case ATH9K_TX_QUEUE_UAPSD: 362 q = pCap->total_queues - 3; 363 break; 364 case ATH9K_TX_QUEUE_DATA: 365 for (q = 0; q < pCap->total_queues; q++) 366 if (ah->txq[q].tqi_type == 367 ATH9K_TX_QUEUE_INACTIVE) 368 break; 369 if (q == pCap->total_queues) { 370 ath_err(common, "No available TX queue\n"); 371 return -1; 372 } 373 break; 374 default: 375 ath_err(common, "Invalid TX queue type: %u\n", type); 376 return -1; 377 } 378 379 ath_dbg(common, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q); 380 381 qi = &ah->txq[q]; 382 if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) { 383 ath_err(common, "TX queue: %u already active\n", q); 384 return -1; 385 } 386 memset(qi, 0, sizeof(struct ath9k_tx_queue_info)); 387 qi->tqi_type = type; 388 if (qinfo == NULL) { 389 qi->tqi_qflags = 390 TXQ_FLAG_TXOKINT_ENABLE 391 | TXQ_FLAG_TXERRINT_ENABLE 392 | TXQ_FLAG_TXDESCINT_ENABLE | TXQ_FLAG_TXURNINT_ENABLE; 393 qi->tqi_aifs = INIT_AIFS; 394 qi->tqi_cwmin = ATH9K_TXQ_USEDEFAULT; 395 qi->tqi_cwmax = INIT_CWMAX; 396 qi->tqi_shretry = INIT_SH_RETRY; 397 qi->tqi_lgretry = INIT_LG_RETRY; 398 qi->tqi_physCompBuf = 0; 399 } else { 400 qi->tqi_physCompBuf = qinfo->tqi_physCompBuf; 401 (void) ath9k_hw_set_txq_props(ah, q, qinfo); 402 } 403 404 return q; 405 } 406 EXPORT_SYMBOL(ath9k_hw_setuptxqueue); 407 408 bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q) 409 { 410 struct ath9k_hw_capabilities *pCap = &ah->caps; 411 struct ath_common *common = ath9k_hw_common(ah); 412 struct ath9k_tx_queue_info *qi; 413 414 if (q >= pCap->total_queues) { 415 ath_dbg(common, ATH_DBG_QUEUE, 416 "Release TXQ, invalid queue: %u\n", q); 417 return false; 418 } 419 qi = &ah->txq[q]; 420 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 421 ath_dbg(common, ATH_DBG_QUEUE, 422 "Release TXQ, inactive queue: %u\n", q); 423 return false; 424 } 425 426 ath_dbg(common, ATH_DBG_QUEUE, "Release TX queue: %u\n", q); 427 428 qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE; 429 ah->txok_interrupt_mask &= ~(1 << q); 430 ah->txerr_interrupt_mask &= ~(1 << q); 431 ah->txdesc_interrupt_mask &= ~(1 << q); 432 ah->txeol_interrupt_mask &= ~(1 << q); 433 ah->txurn_interrupt_mask &= ~(1 << q); 434 ath9k_hw_set_txq_interrupts(ah, qi); 435 436 return true; 437 } 438 EXPORT_SYMBOL(ath9k_hw_releasetxqueue); 439 440 bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q) 441 { 442 struct ath9k_hw_capabilities *pCap = &ah->caps; 443 struct ath_common *common = ath9k_hw_common(ah); 444 struct ath9k_channel *chan = ah->curchan; 445 struct ath9k_tx_queue_info *qi; 446 u32 cwMin, chanCwMin, value; 447 448 if (q >= pCap->total_queues) { 449 ath_dbg(common, ATH_DBG_QUEUE, 450 "Reset TXQ, invalid queue: %u\n", q); 451 return false; 452 } 453 454 qi = &ah->txq[q]; 455 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 456 ath_dbg(common, ATH_DBG_QUEUE, 457 "Reset TXQ, inactive queue: %u\n", q); 458 return true; 459 } 460 461 ath_dbg(common, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q); 462 463 if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) { 464 if (chan && IS_CHAN_B(chan)) 465 chanCwMin = INIT_CWMIN_11B; 466 else 467 chanCwMin = INIT_CWMIN; 468 469 for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1); 470 } else 471 cwMin = qi->tqi_cwmin; 472 473 ENABLE_REGWRITE_BUFFER(ah); 474 475 REG_WRITE(ah, AR_DLCL_IFS(q), 476 SM(cwMin, AR_D_LCL_IFS_CWMIN) | 477 SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) | 478 SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS)); 479 480 REG_WRITE(ah, AR_DRETRY_LIMIT(q), 481 SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) | 482 SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) | 483 SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH)); 484 485 REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ); 486 REG_WRITE(ah, AR_DMISC(q), 487 AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2); 488 489 if (qi->tqi_cbrPeriod) { 490 REG_WRITE(ah, AR_QCBRCFG(q), 491 SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) | 492 SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH)); 493 REG_WRITE(ah, AR_QMISC(q), 494 REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_FSP_CBR | 495 (qi->tqi_cbrOverflowLimit ? 496 AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0)); 497 } 498 if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) { 499 REG_WRITE(ah, AR_QRDYTIMECFG(q), 500 SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) | 501 AR_Q_RDYTIMECFG_EN); 502 } 503 504 REG_WRITE(ah, AR_DCHNTIME(q), 505 SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) | 506 (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0)); 507 508 if (qi->tqi_burstTime 509 && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)) { 510 REG_WRITE(ah, AR_QMISC(q), 511 REG_READ(ah, AR_QMISC(q)) | 512 AR_Q_MISC_RDYTIME_EXP_POLICY); 513 514 } 515 516 if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE) { 517 REG_WRITE(ah, AR_DMISC(q), 518 REG_READ(ah, AR_DMISC(q)) | 519 AR_D_MISC_POST_FR_BKOFF_DIS); 520 } 521 522 REGWRITE_BUFFER_FLUSH(ah); 523 524 if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) { 525 REG_WRITE(ah, AR_DMISC(q), 526 REG_READ(ah, AR_DMISC(q)) | 527 AR_D_MISC_FRAG_BKOFF_EN); 528 } 529 switch (qi->tqi_type) { 530 case ATH9K_TX_QUEUE_BEACON: 531 ENABLE_REGWRITE_BUFFER(ah); 532 533 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q)) 534 | AR_Q_MISC_FSP_DBA_GATED 535 | AR_Q_MISC_BEACON_USE 536 | AR_Q_MISC_CBR_INCR_DIS1); 537 538 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) 539 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL << 540 AR_D_MISC_ARB_LOCKOUT_CNTRL_S) 541 | AR_D_MISC_BEACON_USE 542 | AR_D_MISC_POST_FR_BKOFF_DIS); 543 544 REGWRITE_BUFFER_FLUSH(ah); 545 546 /* 547 * cwmin and cwmax should be 0 for beacon queue 548 * but not for IBSS as we would create an imbalance 549 * on beaconing fairness for participating nodes. 550 */ 551 if (AR_SREV_9300_20_OR_LATER(ah) && 552 ah->opmode != NL80211_IFTYPE_ADHOC) { 553 REG_WRITE(ah, AR_DLCL_IFS(q), SM(0, AR_D_LCL_IFS_CWMIN) 554 | SM(0, AR_D_LCL_IFS_CWMAX) 555 | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS)); 556 } 557 break; 558 case ATH9K_TX_QUEUE_CAB: 559 ENABLE_REGWRITE_BUFFER(ah); 560 561 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q)) 562 | AR_Q_MISC_FSP_DBA_GATED 563 | AR_Q_MISC_CBR_INCR_DIS1 564 | AR_Q_MISC_CBR_INCR_DIS0); 565 value = (qi->tqi_readyTime - 566 (ah->config.sw_beacon_response_time - 567 ah->config.dma_beacon_response_time) - 568 ah->config.additional_swba_backoff) * 1024; 569 REG_WRITE(ah, AR_QRDYTIMECFG(q), 570 value | AR_Q_RDYTIMECFG_EN); 571 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) 572 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL << 573 AR_D_MISC_ARB_LOCKOUT_CNTRL_S)); 574 575 REGWRITE_BUFFER_FLUSH(ah); 576 577 break; 578 case ATH9K_TX_QUEUE_PSPOLL: 579 REG_WRITE(ah, AR_QMISC(q), 580 REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_CBR_INCR_DIS1); 581 break; 582 case ATH9K_TX_QUEUE_UAPSD: 583 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) | 584 AR_D_MISC_POST_FR_BKOFF_DIS); 585 break; 586 default: 587 break; 588 } 589 590 if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) { 591 REG_WRITE(ah, AR_DMISC(q), 592 REG_READ(ah, AR_DMISC(q)) | 593 SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL, 594 AR_D_MISC_ARB_LOCKOUT_CNTRL) | 595 AR_D_MISC_POST_FR_BKOFF_DIS); 596 } 597 598 if (AR_SREV_9300_20_OR_LATER(ah)) 599 REG_WRITE(ah, AR_Q_DESC_CRCCHK, AR_Q_DESC_CRCCHK_EN); 600 601 if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE) 602 ah->txok_interrupt_mask |= 1 << q; 603 else 604 ah->txok_interrupt_mask &= ~(1 << q); 605 if (qi->tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE) 606 ah->txerr_interrupt_mask |= 1 << q; 607 else 608 ah->txerr_interrupt_mask &= ~(1 << q); 609 if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE) 610 ah->txdesc_interrupt_mask |= 1 << q; 611 else 612 ah->txdesc_interrupt_mask &= ~(1 << q); 613 if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE) 614 ah->txeol_interrupt_mask |= 1 << q; 615 else 616 ah->txeol_interrupt_mask &= ~(1 << q); 617 if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE) 618 ah->txurn_interrupt_mask |= 1 << q; 619 else 620 ah->txurn_interrupt_mask &= ~(1 << q); 621 ath9k_hw_set_txq_interrupts(ah, qi); 622 623 return true; 624 } 625 EXPORT_SYMBOL(ath9k_hw_resettxqueue); 626 627 int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds, 628 struct ath_rx_status *rs, u64 tsf) 629 { 630 struct ar5416_desc ads; 631 struct ar5416_desc *adsp = AR5416DESC(ds); 632 u32 phyerr; 633 634 if ((adsp->ds_rxstatus8 & AR_RxDone) == 0) 635 return -EINPROGRESS; 636 637 ads.u.rx = adsp->u.rx; 638 639 rs->rs_status = 0; 640 rs->rs_flags = 0; 641 642 rs->rs_datalen = ads.ds_rxstatus1 & AR_DataLen; 643 rs->rs_tstamp = ads.AR_RcvTimestamp; 644 645 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) { 646 rs->rs_rssi = ATH9K_RSSI_BAD; 647 rs->rs_rssi_ctl0 = ATH9K_RSSI_BAD; 648 rs->rs_rssi_ctl1 = ATH9K_RSSI_BAD; 649 rs->rs_rssi_ctl2 = ATH9K_RSSI_BAD; 650 rs->rs_rssi_ext0 = ATH9K_RSSI_BAD; 651 rs->rs_rssi_ext1 = ATH9K_RSSI_BAD; 652 rs->rs_rssi_ext2 = ATH9K_RSSI_BAD; 653 } else { 654 rs->rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined); 655 rs->rs_rssi_ctl0 = MS(ads.ds_rxstatus0, 656 AR_RxRSSIAnt00); 657 rs->rs_rssi_ctl1 = MS(ads.ds_rxstatus0, 658 AR_RxRSSIAnt01); 659 rs->rs_rssi_ctl2 = MS(ads.ds_rxstatus0, 660 AR_RxRSSIAnt02); 661 rs->rs_rssi_ext0 = MS(ads.ds_rxstatus4, 662 AR_RxRSSIAnt10); 663 rs->rs_rssi_ext1 = MS(ads.ds_rxstatus4, 664 AR_RxRSSIAnt11); 665 rs->rs_rssi_ext2 = MS(ads.ds_rxstatus4, 666 AR_RxRSSIAnt12); 667 } 668 if (ads.ds_rxstatus8 & AR_RxKeyIdxValid) 669 rs->rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx); 670 else 671 rs->rs_keyix = ATH9K_RXKEYIX_INVALID; 672 673 rs->rs_rate = RXSTATUS_RATE(ah, (&ads)); 674 rs->rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0; 675 676 rs->rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0; 677 rs->rs_moreaggr = 678 (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0; 679 rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna); 680 rs->rs_flags = 681 (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0; 682 rs->rs_flags |= 683 (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0; 684 685 if (ads.ds_rxstatus8 & AR_PreDelimCRCErr) 686 rs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE; 687 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) 688 rs->rs_flags |= ATH9K_RX_DELIM_CRC_POST; 689 if (ads.ds_rxstatus8 & AR_DecryptBusyErr) 690 rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY; 691 692 if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) { 693 if (ads.ds_rxstatus8 & AR_CRCErr) 694 rs->rs_status |= ATH9K_RXERR_CRC; 695 if (ads.ds_rxstatus8 & AR_PHYErr) { 696 rs->rs_status |= ATH9K_RXERR_PHY; 697 phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode); 698 rs->rs_phyerr = phyerr; 699 } 700 if (ads.ds_rxstatus8 & AR_DecryptCRCErr) 701 rs->rs_status |= ATH9K_RXERR_DECRYPT; 702 if (ads.ds_rxstatus8 & AR_MichaelErr) 703 rs->rs_status |= ATH9K_RXERR_MIC; 704 if (ads.ds_rxstatus8 & AR_KeyMiss) 705 rs->rs_status |= ATH9K_RXERR_DECRYPT; 706 } 707 708 return 0; 709 } 710 EXPORT_SYMBOL(ath9k_hw_rxprocdesc); 711 712 /* 713 * This can stop or re-enables RX. 714 * 715 * If bool is set this will kill any frame which is currently being 716 * transferred between the MAC and baseband and also prevent any new 717 * frames from getting started. 718 */ 719 bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set) 720 { 721 u32 reg; 722 723 if (set) { 724 REG_SET_BIT(ah, AR_DIAG_SW, 725 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); 726 727 if (!ath9k_hw_wait(ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE, 728 0, AH_WAIT_TIMEOUT)) { 729 REG_CLR_BIT(ah, AR_DIAG_SW, 730 (AR_DIAG_RX_DIS | 731 AR_DIAG_RX_ABORT)); 732 733 reg = REG_READ(ah, AR_OBS_BUS_1); 734 ath_err(ath9k_hw_common(ah), 735 "RX failed to go idle in 10 ms RXSM=0x%x\n", 736 reg); 737 738 return false; 739 } 740 } else { 741 REG_CLR_BIT(ah, AR_DIAG_SW, 742 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); 743 } 744 745 return true; 746 } 747 EXPORT_SYMBOL(ath9k_hw_setrxabort); 748 749 void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp) 750 { 751 REG_WRITE(ah, AR_RXDP, rxdp); 752 } 753 EXPORT_SYMBOL(ath9k_hw_putrxbuf); 754 755 void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning) 756 { 757 ath9k_enable_mib_counters(ah); 758 759 ath9k_ani_reset(ah, is_scanning); 760 761 REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); 762 } 763 EXPORT_SYMBOL(ath9k_hw_startpcureceive); 764 765 void ath9k_hw_abortpcurecv(struct ath_hw *ah) 766 { 767 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_ABORT | AR_DIAG_RX_DIS); 768 769 ath9k_hw_disable_mib_counters(ah); 770 } 771 EXPORT_SYMBOL(ath9k_hw_abortpcurecv); 772 773 bool ath9k_hw_stopdmarecv(struct ath_hw *ah) 774 { 775 #define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */ 776 #define AH_RX_TIME_QUANTUM 100 /* usec */ 777 struct ath_common *common = ath9k_hw_common(ah); 778 int i; 779 780 REG_WRITE(ah, AR_CR, AR_CR_RXD); 781 782 /* Wait for rx enable bit to go low */ 783 for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) { 784 if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0) 785 break; 786 udelay(AH_TIME_QUANTUM); 787 } 788 789 if (i == 0) { 790 ath_err(common, 791 "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n", 792 AH_RX_STOP_DMA_TIMEOUT / 1000, 793 REG_READ(ah, AR_CR), 794 REG_READ(ah, AR_DIAG_SW)); 795 return false; 796 } else { 797 return true; 798 } 799 800 #undef AH_RX_TIME_QUANTUM 801 #undef AH_RX_STOP_DMA_TIMEOUT 802 } 803 EXPORT_SYMBOL(ath9k_hw_stopdmarecv); 804 805 int ath9k_hw_beaconq_setup(struct ath_hw *ah) 806 { 807 struct ath9k_tx_queue_info qi; 808 809 memset(&qi, 0, sizeof(qi)); 810 qi.tqi_aifs = 1; 811 qi.tqi_cwmin = 0; 812 qi.tqi_cwmax = 0; 813 /* NB: don't enable any interrupts */ 814 return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi); 815 } 816 EXPORT_SYMBOL(ath9k_hw_beaconq_setup); 817 818 bool ath9k_hw_intrpend(struct ath_hw *ah) 819 { 820 u32 host_isr; 821 822 if (AR_SREV_9100(ah)) 823 return true; 824 825 host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE); 826 if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS)) 827 return true; 828 829 host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE); 830 if ((host_isr & AR_INTR_SYNC_DEFAULT) 831 && (host_isr != AR_INTR_SPURIOUS)) 832 return true; 833 834 return false; 835 } 836 EXPORT_SYMBOL(ath9k_hw_intrpend); 837 838 void ath9k_hw_disable_interrupts(struct ath_hw *ah) 839 { 840 struct ath_common *common = ath9k_hw_common(ah); 841 842 ath_dbg(common, ATH_DBG_INTERRUPT, "disable IER\n"); 843 REG_WRITE(ah, AR_IER, AR_IER_DISABLE); 844 (void) REG_READ(ah, AR_IER); 845 if (!AR_SREV_9100(ah)) { 846 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0); 847 (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE); 848 849 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0); 850 (void) REG_READ(ah, AR_INTR_SYNC_ENABLE); 851 } 852 } 853 EXPORT_SYMBOL(ath9k_hw_disable_interrupts); 854 855 void ath9k_hw_enable_interrupts(struct ath_hw *ah) 856 { 857 struct ath_common *common = ath9k_hw_common(ah); 858 859 if (!(ah->imask & ATH9K_INT_GLOBAL)) 860 return; 861 862 ath_dbg(common, ATH_DBG_INTERRUPT, "enable IER\n"); 863 REG_WRITE(ah, AR_IER, AR_IER_ENABLE); 864 if (!AR_SREV_9100(ah)) { 865 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 866 AR_INTR_MAC_IRQ); 867 REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ); 868 869 870 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 871 AR_INTR_SYNC_DEFAULT); 872 REG_WRITE(ah, AR_INTR_SYNC_MASK, 873 AR_INTR_SYNC_DEFAULT); 874 } 875 ath_dbg(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n", 876 REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER)); 877 } 878 EXPORT_SYMBOL(ath9k_hw_enable_interrupts); 879 880 void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints) 881 { 882 enum ath9k_int omask = ah->imask; 883 u32 mask, mask2; 884 struct ath9k_hw_capabilities *pCap = &ah->caps; 885 struct ath_common *common = ath9k_hw_common(ah); 886 887 if (!(ints & ATH9K_INT_GLOBAL)) 888 ath9k_hw_disable_interrupts(ah); 889 890 ath_dbg(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints); 891 892 /* TODO: global int Ref count */ 893 mask = ints & ATH9K_INT_COMMON; 894 mask2 = 0; 895 896 if (ints & ATH9K_INT_TX) { 897 if (ah->config.tx_intr_mitigation) 898 mask |= AR_IMR_TXMINTR | AR_IMR_TXINTM; 899 else { 900 if (ah->txok_interrupt_mask) 901 mask |= AR_IMR_TXOK; 902 if (ah->txdesc_interrupt_mask) 903 mask |= AR_IMR_TXDESC; 904 } 905 if (ah->txerr_interrupt_mask) 906 mask |= AR_IMR_TXERR; 907 if (ah->txeol_interrupt_mask) 908 mask |= AR_IMR_TXEOL; 909 } 910 if (ints & ATH9K_INT_RX) { 911 if (AR_SREV_9300_20_OR_LATER(ah)) { 912 mask |= AR_IMR_RXERR | AR_IMR_RXOK_HP; 913 if (ah->config.rx_intr_mitigation) { 914 mask &= ~AR_IMR_RXOK_LP; 915 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM; 916 } else { 917 mask |= AR_IMR_RXOK_LP; 918 } 919 } else { 920 if (ah->config.rx_intr_mitigation) 921 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM; 922 else 923 mask |= AR_IMR_RXOK | AR_IMR_RXDESC; 924 } 925 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) 926 mask |= AR_IMR_GENTMR; 927 } 928 929 if (ints & (ATH9K_INT_BMISC)) { 930 mask |= AR_IMR_BCNMISC; 931 if (ints & ATH9K_INT_TIM) 932 mask2 |= AR_IMR_S2_TIM; 933 if (ints & ATH9K_INT_DTIM) 934 mask2 |= AR_IMR_S2_DTIM; 935 if (ints & ATH9K_INT_DTIMSYNC) 936 mask2 |= AR_IMR_S2_DTIMSYNC; 937 if (ints & ATH9K_INT_CABEND) 938 mask2 |= AR_IMR_S2_CABEND; 939 if (ints & ATH9K_INT_TSFOOR) 940 mask2 |= AR_IMR_S2_TSFOOR; 941 } 942 943 if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) { 944 mask |= AR_IMR_BCNMISC; 945 if (ints & ATH9K_INT_GTT) 946 mask2 |= AR_IMR_S2_GTT; 947 if (ints & ATH9K_INT_CST) 948 mask2 |= AR_IMR_S2_CST; 949 } 950 951 ath_dbg(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask); 952 REG_WRITE(ah, AR_IMR, mask); 953 ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC | 954 AR_IMR_S2_CABEND | AR_IMR_S2_CABTO | 955 AR_IMR_S2_TSFOOR | AR_IMR_S2_GTT | AR_IMR_S2_CST); 956 ah->imrs2_reg |= mask2; 957 REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg); 958 959 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { 960 if (ints & ATH9K_INT_TIM_TIMER) 961 REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER); 962 else 963 REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER); 964 } 965 966 if (ints & ATH9K_INT_GLOBAL) 967 ath9k_hw_enable_interrupts(ah); 968 969 return; 970 } 971 EXPORT_SYMBOL(ath9k_hw_set_interrupts); 972