1 /* 2 * Copyright (c) 2008-2011 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include "hw.h" 18 #include "hw-ops.h" 19 20 static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah, 21 struct ath9k_tx_queue_info *qi) 22 { 23 ath_dbg(ath9k_hw_common(ah), ATH_DBG_INTERRUPT, 24 "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n", 25 ah->txok_interrupt_mask, ah->txerr_interrupt_mask, 26 ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask, 27 ah->txurn_interrupt_mask); 28 29 ENABLE_REGWRITE_BUFFER(ah); 30 31 REG_WRITE(ah, AR_IMR_S0, 32 SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK) 33 | SM(ah->txdesc_interrupt_mask, AR_IMR_S0_QCU_TXDESC)); 34 REG_WRITE(ah, AR_IMR_S1, 35 SM(ah->txerr_interrupt_mask, AR_IMR_S1_QCU_TXERR) 36 | SM(ah->txeol_interrupt_mask, AR_IMR_S1_QCU_TXEOL)); 37 38 ah->imrs2_reg &= ~AR_IMR_S2_QCU_TXURN; 39 ah->imrs2_reg |= (ah->txurn_interrupt_mask & AR_IMR_S2_QCU_TXURN); 40 REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg); 41 42 REGWRITE_BUFFER_FLUSH(ah); 43 } 44 45 u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q) 46 { 47 return REG_READ(ah, AR_QTXDP(q)); 48 } 49 EXPORT_SYMBOL(ath9k_hw_gettxbuf); 50 51 void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp) 52 { 53 REG_WRITE(ah, AR_QTXDP(q), txdp); 54 } 55 EXPORT_SYMBOL(ath9k_hw_puttxbuf); 56 57 void ath9k_hw_txstart(struct ath_hw *ah, u32 q) 58 { 59 ath_dbg(ath9k_hw_common(ah), ATH_DBG_QUEUE, 60 "Enable TXE on queue: %u\n", q); 61 REG_WRITE(ah, AR_Q_TXE, 1 << q); 62 } 63 EXPORT_SYMBOL(ath9k_hw_txstart); 64 65 u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q) 66 { 67 u32 npend; 68 69 npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT; 70 if (npend == 0) { 71 72 if (REG_READ(ah, AR_Q_TXE) & (1 << q)) 73 npend = 1; 74 } 75 76 return npend; 77 } 78 EXPORT_SYMBOL(ath9k_hw_numtxpending); 79 80 /** 81 * ath9k_hw_updatetxtriglevel - adjusts the frame trigger level 82 * 83 * @ah: atheros hardware struct 84 * @bIncTrigLevel: whether or not the frame trigger level should be updated 85 * 86 * The frame trigger level specifies the minimum number of bytes, 87 * in units of 64 bytes, that must be DMA'ed into the PCU TX FIFO 88 * before the PCU will initiate sending the frame on the air. This can 89 * mean we initiate transmit before a full frame is on the PCU TX FIFO. 90 * Resets to 0x1 (meaning 64 bytes or a full frame, whichever occurs 91 * first) 92 * 93 * Caution must be taken to ensure to set the frame trigger level based 94 * on the DMA request size. For example if the DMA request size is set to 95 * 128 bytes the trigger level cannot exceed 6 * 64 = 384. This is because 96 * there need to be enough space in the tx FIFO for the requested transfer 97 * size. Hence the tx FIFO will stop with 512 - 128 = 384 bytes. If we set 98 * the threshold to a value beyond 6, then the transmit will hang. 99 * 100 * Current dual stream devices have a PCU TX FIFO size of 8 KB. 101 * Current single stream devices have a PCU TX FIFO size of 4 KB, however, 102 * there is a hardware issue which forces us to use 2 KB instead so the 103 * frame trigger level must not exceed 2 KB for these chipsets. 104 */ 105 bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel) 106 { 107 u32 txcfg, curLevel, newLevel; 108 109 if (ah->tx_trig_level >= ah->config.max_txtrig_level) 110 return false; 111 112 ath9k_hw_disable_interrupts(ah); 113 114 txcfg = REG_READ(ah, AR_TXCFG); 115 curLevel = MS(txcfg, AR_FTRIG); 116 newLevel = curLevel; 117 if (bIncTrigLevel) { 118 if (curLevel < ah->config.max_txtrig_level) 119 newLevel++; 120 } else if (curLevel > MIN_TX_FIFO_THRESHOLD) 121 newLevel--; 122 if (newLevel != curLevel) 123 REG_WRITE(ah, AR_TXCFG, 124 (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG)); 125 126 ath9k_hw_enable_interrupts(ah); 127 128 ah->tx_trig_level = newLevel; 129 130 return newLevel != curLevel; 131 } 132 EXPORT_SYMBOL(ath9k_hw_updatetxtriglevel); 133 134 void ath9k_hw_abort_tx_dma(struct ath_hw *ah) 135 { 136 int i, q; 137 138 REG_WRITE(ah, AR_Q_TXD, AR_Q_TXD_M); 139 140 REG_SET_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF); 141 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH); 142 REG_SET_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF); 143 144 for (q = 0; q < AR_NUM_QCU; q++) { 145 for (i = 0; i < 1000; i++) { 146 if (i) 147 udelay(5); 148 149 if (!ath9k_hw_numtxpending(ah, q)) 150 break; 151 } 152 } 153 154 REG_CLR_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF); 155 REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH); 156 REG_CLR_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF); 157 158 REG_WRITE(ah, AR_Q_TXD, 0); 159 } 160 EXPORT_SYMBOL(ath9k_hw_abort_tx_dma); 161 162 bool ath9k_hw_stop_dma_queue(struct ath_hw *ah, u32 q) 163 { 164 #define ATH9K_TX_STOP_DMA_TIMEOUT 1000 /* usec */ 165 #define ATH9K_TIME_QUANTUM 100 /* usec */ 166 int wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM; 167 int wait; 168 169 REG_WRITE(ah, AR_Q_TXD, 1 << q); 170 171 for (wait = wait_time; wait != 0; wait--) { 172 if (wait != wait_time) 173 udelay(ATH9K_TIME_QUANTUM); 174 175 if (ath9k_hw_numtxpending(ah, q) == 0) 176 break; 177 } 178 179 REG_WRITE(ah, AR_Q_TXD, 0); 180 181 return wait != 0; 182 183 #undef ATH9K_TX_STOP_DMA_TIMEOUT 184 #undef ATH9K_TIME_QUANTUM 185 } 186 EXPORT_SYMBOL(ath9k_hw_stop_dma_queue); 187 188 void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs) 189 { 190 *txqs &= ah->intr_txqs; 191 ah->intr_txqs &= ~(*txqs); 192 } 193 EXPORT_SYMBOL(ath9k_hw_gettxintrtxqs); 194 195 bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q, 196 const struct ath9k_tx_queue_info *qinfo) 197 { 198 u32 cw; 199 struct ath_common *common = ath9k_hw_common(ah); 200 struct ath9k_tx_queue_info *qi; 201 202 qi = &ah->txq[q]; 203 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 204 ath_dbg(common, ATH_DBG_QUEUE, 205 "Set TXQ properties, inactive queue: %u\n", q); 206 return false; 207 } 208 209 ath_dbg(common, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q); 210 211 qi->tqi_ver = qinfo->tqi_ver; 212 qi->tqi_subtype = qinfo->tqi_subtype; 213 qi->tqi_qflags = qinfo->tqi_qflags; 214 qi->tqi_priority = qinfo->tqi_priority; 215 if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT) 216 qi->tqi_aifs = min(qinfo->tqi_aifs, 255U); 217 else 218 qi->tqi_aifs = INIT_AIFS; 219 if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) { 220 cw = min(qinfo->tqi_cwmin, 1024U); 221 qi->tqi_cwmin = 1; 222 while (qi->tqi_cwmin < cw) 223 qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1; 224 } else 225 qi->tqi_cwmin = qinfo->tqi_cwmin; 226 if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) { 227 cw = min(qinfo->tqi_cwmax, 1024U); 228 qi->tqi_cwmax = 1; 229 while (qi->tqi_cwmax < cw) 230 qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1; 231 } else 232 qi->tqi_cwmax = INIT_CWMAX; 233 234 if (qinfo->tqi_shretry != 0) 235 qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U); 236 else 237 qi->tqi_shretry = INIT_SH_RETRY; 238 if (qinfo->tqi_lgretry != 0) 239 qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U); 240 else 241 qi->tqi_lgretry = INIT_LG_RETRY; 242 qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod; 243 qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit; 244 qi->tqi_burstTime = qinfo->tqi_burstTime; 245 qi->tqi_readyTime = qinfo->tqi_readyTime; 246 247 switch (qinfo->tqi_subtype) { 248 case ATH9K_WME_UPSD: 249 if (qi->tqi_type == ATH9K_TX_QUEUE_DATA) 250 qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS; 251 break; 252 default: 253 break; 254 } 255 256 return true; 257 } 258 EXPORT_SYMBOL(ath9k_hw_set_txq_props); 259 260 bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q, 261 struct ath9k_tx_queue_info *qinfo) 262 { 263 struct ath_common *common = ath9k_hw_common(ah); 264 struct ath9k_tx_queue_info *qi; 265 266 qi = &ah->txq[q]; 267 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 268 ath_dbg(common, ATH_DBG_QUEUE, 269 "Get TXQ properties, inactive queue: %u\n", q); 270 return false; 271 } 272 273 qinfo->tqi_qflags = qi->tqi_qflags; 274 qinfo->tqi_ver = qi->tqi_ver; 275 qinfo->tqi_subtype = qi->tqi_subtype; 276 qinfo->tqi_qflags = qi->tqi_qflags; 277 qinfo->tqi_priority = qi->tqi_priority; 278 qinfo->tqi_aifs = qi->tqi_aifs; 279 qinfo->tqi_cwmin = qi->tqi_cwmin; 280 qinfo->tqi_cwmax = qi->tqi_cwmax; 281 qinfo->tqi_shretry = qi->tqi_shretry; 282 qinfo->tqi_lgretry = qi->tqi_lgretry; 283 qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod; 284 qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit; 285 qinfo->tqi_burstTime = qi->tqi_burstTime; 286 qinfo->tqi_readyTime = qi->tqi_readyTime; 287 288 return true; 289 } 290 EXPORT_SYMBOL(ath9k_hw_get_txq_props); 291 292 int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type, 293 const struct ath9k_tx_queue_info *qinfo) 294 { 295 struct ath_common *common = ath9k_hw_common(ah); 296 struct ath9k_tx_queue_info *qi; 297 int q; 298 299 switch (type) { 300 case ATH9K_TX_QUEUE_BEACON: 301 q = ATH9K_NUM_TX_QUEUES - 1; 302 break; 303 case ATH9K_TX_QUEUE_CAB: 304 q = ATH9K_NUM_TX_QUEUES - 2; 305 break; 306 case ATH9K_TX_QUEUE_PSPOLL: 307 q = 1; 308 break; 309 case ATH9K_TX_QUEUE_UAPSD: 310 q = ATH9K_NUM_TX_QUEUES - 3; 311 break; 312 case ATH9K_TX_QUEUE_DATA: 313 for (q = 0; q < ATH9K_NUM_TX_QUEUES; q++) 314 if (ah->txq[q].tqi_type == 315 ATH9K_TX_QUEUE_INACTIVE) 316 break; 317 if (q == ATH9K_NUM_TX_QUEUES) { 318 ath_err(common, "No available TX queue\n"); 319 return -1; 320 } 321 break; 322 default: 323 ath_err(common, "Invalid TX queue type: %u\n", type); 324 return -1; 325 } 326 327 ath_dbg(common, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q); 328 329 qi = &ah->txq[q]; 330 if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) { 331 ath_err(common, "TX queue: %u already active\n", q); 332 return -1; 333 } 334 memset(qi, 0, sizeof(struct ath9k_tx_queue_info)); 335 qi->tqi_type = type; 336 qi->tqi_physCompBuf = qinfo->tqi_physCompBuf; 337 (void) ath9k_hw_set_txq_props(ah, q, qinfo); 338 339 return q; 340 } 341 EXPORT_SYMBOL(ath9k_hw_setuptxqueue); 342 343 bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q) 344 { 345 struct ath_common *common = ath9k_hw_common(ah); 346 struct ath9k_tx_queue_info *qi; 347 348 qi = &ah->txq[q]; 349 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 350 ath_dbg(common, ATH_DBG_QUEUE, 351 "Release TXQ, inactive queue: %u\n", q); 352 return false; 353 } 354 355 ath_dbg(common, ATH_DBG_QUEUE, "Release TX queue: %u\n", q); 356 357 qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE; 358 ah->txok_interrupt_mask &= ~(1 << q); 359 ah->txerr_interrupt_mask &= ~(1 << q); 360 ah->txdesc_interrupt_mask &= ~(1 << q); 361 ah->txeol_interrupt_mask &= ~(1 << q); 362 ah->txurn_interrupt_mask &= ~(1 << q); 363 ath9k_hw_set_txq_interrupts(ah, qi); 364 365 return true; 366 } 367 EXPORT_SYMBOL(ath9k_hw_releasetxqueue); 368 369 bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q) 370 { 371 struct ath_common *common = ath9k_hw_common(ah); 372 struct ath9k_channel *chan = ah->curchan; 373 struct ath9k_tx_queue_info *qi; 374 u32 cwMin, chanCwMin, value; 375 376 qi = &ah->txq[q]; 377 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 378 ath_dbg(common, ATH_DBG_QUEUE, 379 "Reset TXQ, inactive queue: %u\n", q); 380 return true; 381 } 382 383 ath_dbg(common, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q); 384 385 if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) { 386 if (chan && IS_CHAN_B(chan)) 387 chanCwMin = INIT_CWMIN_11B; 388 else 389 chanCwMin = INIT_CWMIN; 390 391 for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1); 392 } else 393 cwMin = qi->tqi_cwmin; 394 395 ENABLE_REGWRITE_BUFFER(ah); 396 397 REG_WRITE(ah, AR_DLCL_IFS(q), 398 SM(cwMin, AR_D_LCL_IFS_CWMIN) | 399 SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) | 400 SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS)); 401 402 REG_WRITE(ah, AR_DRETRY_LIMIT(q), 403 SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) | 404 SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) | 405 SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH)); 406 407 REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ); 408 409 if (AR_SREV_9340(ah)) 410 REG_WRITE(ah, AR_DMISC(q), 411 AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x1); 412 else 413 REG_WRITE(ah, AR_DMISC(q), 414 AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2); 415 416 if (qi->tqi_cbrPeriod) { 417 REG_WRITE(ah, AR_QCBRCFG(q), 418 SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) | 419 SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH)); 420 REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_FSP_CBR | 421 (qi->tqi_cbrOverflowLimit ? 422 AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0)); 423 } 424 if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) { 425 REG_WRITE(ah, AR_QRDYTIMECFG(q), 426 SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) | 427 AR_Q_RDYTIMECFG_EN); 428 } 429 430 REG_WRITE(ah, AR_DCHNTIME(q), 431 SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) | 432 (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0)); 433 434 if (qi->tqi_burstTime 435 && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)) 436 REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_RDYTIME_EXP_POLICY); 437 438 if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE) 439 REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS); 440 441 REGWRITE_BUFFER_FLUSH(ah); 442 443 if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) 444 REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_FRAG_BKOFF_EN); 445 446 switch (qi->tqi_type) { 447 case ATH9K_TX_QUEUE_BEACON: 448 ENABLE_REGWRITE_BUFFER(ah); 449 450 REG_SET_BIT(ah, AR_QMISC(q), 451 AR_Q_MISC_FSP_DBA_GATED 452 | AR_Q_MISC_BEACON_USE 453 | AR_Q_MISC_CBR_INCR_DIS1); 454 455 REG_SET_BIT(ah, AR_DMISC(q), 456 (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL << 457 AR_D_MISC_ARB_LOCKOUT_CNTRL_S) 458 | AR_D_MISC_BEACON_USE 459 | AR_D_MISC_POST_FR_BKOFF_DIS); 460 461 REGWRITE_BUFFER_FLUSH(ah); 462 463 /* 464 * cwmin and cwmax should be 0 for beacon queue 465 * but not for IBSS as we would create an imbalance 466 * on beaconing fairness for participating nodes. 467 */ 468 if (AR_SREV_9300_20_OR_LATER(ah) && 469 ah->opmode != NL80211_IFTYPE_ADHOC) { 470 REG_WRITE(ah, AR_DLCL_IFS(q), SM(0, AR_D_LCL_IFS_CWMIN) 471 | SM(0, AR_D_LCL_IFS_CWMAX) 472 | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS)); 473 } 474 break; 475 case ATH9K_TX_QUEUE_CAB: 476 ENABLE_REGWRITE_BUFFER(ah); 477 478 REG_SET_BIT(ah, AR_QMISC(q), 479 AR_Q_MISC_FSP_DBA_GATED 480 | AR_Q_MISC_CBR_INCR_DIS1 481 | AR_Q_MISC_CBR_INCR_DIS0); 482 value = (qi->tqi_readyTime - 483 (ah->config.sw_beacon_response_time - 484 ah->config.dma_beacon_response_time) - 485 ah->config.additional_swba_backoff) * 1024; 486 REG_WRITE(ah, AR_QRDYTIMECFG(q), 487 value | AR_Q_RDYTIMECFG_EN); 488 REG_SET_BIT(ah, AR_DMISC(q), 489 (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL << 490 AR_D_MISC_ARB_LOCKOUT_CNTRL_S)); 491 492 REGWRITE_BUFFER_FLUSH(ah); 493 494 break; 495 case ATH9K_TX_QUEUE_PSPOLL: 496 REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_CBR_INCR_DIS1); 497 break; 498 case ATH9K_TX_QUEUE_UAPSD: 499 REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS); 500 break; 501 default: 502 break; 503 } 504 505 if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) { 506 REG_SET_BIT(ah, AR_DMISC(q), 507 SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL, 508 AR_D_MISC_ARB_LOCKOUT_CNTRL) | 509 AR_D_MISC_POST_FR_BKOFF_DIS); 510 } 511 512 if (AR_SREV_9300_20_OR_LATER(ah)) 513 REG_WRITE(ah, AR_Q_DESC_CRCCHK, AR_Q_DESC_CRCCHK_EN); 514 515 if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE) 516 ah->txok_interrupt_mask |= 1 << q; 517 else 518 ah->txok_interrupt_mask &= ~(1 << q); 519 if (qi->tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE) 520 ah->txerr_interrupt_mask |= 1 << q; 521 else 522 ah->txerr_interrupt_mask &= ~(1 << q); 523 if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE) 524 ah->txdesc_interrupt_mask |= 1 << q; 525 else 526 ah->txdesc_interrupt_mask &= ~(1 << q); 527 if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE) 528 ah->txeol_interrupt_mask |= 1 << q; 529 else 530 ah->txeol_interrupt_mask &= ~(1 << q); 531 if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE) 532 ah->txurn_interrupt_mask |= 1 << q; 533 else 534 ah->txurn_interrupt_mask &= ~(1 << q); 535 ath9k_hw_set_txq_interrupts(ah, qi); 536 537 return true; 538 } 539 EXPORT_SYMBOL(ath9k_hw_resettxqueue); 540 541 int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds, 542 struct ath_rx_status *rs) 543 { 544 struct ar5416_desc ads; 545 struct ar5416_desc *adsp = AR5416DESC(ds); 546 u32 phyerr; 547 548 if ((adsp->ds_rxstatus8 & AR_RxDone) == 0) 549 return -EINPROGRESS; 550 551 ads.u.rx = adsp->u.rx; 552 553 rs->rs_status = 0; 554 rs->rs_flags = 0; 555 556 rs->rs_datalen = ads.ds_rxstatus1 & AR_DataLen; 557 rs->rs_tstamp = ads.AR_RcvTimestamp; 558 559 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) { 560 rs->rs_rssi = ATH9K_RSSI_BAD; 561 rs->rs_rssi_ctl0 = ATH9K_RSSI_BAD; 562 rs->rs_rssi_ctl1 = ATH9K_RSSI_BAD; 563 rs->rs_rssi_ctl2 = ATH9K_RSSI_BAD; 564 rs->rs_rssi_ext0 = ATH9K_RSSI_BAD; 565 rs->rs_rssi_ext1 = ATH9K_RSSI_BAD; 566 rs->rs_rssi_ext2 = ATH9K_RSSI_BAD; 567 } else { 568 rs->rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined); 569 rs->rs_rssi_ctl0 = MS(ads.ds_rxstatus0, 570 AR_RxRSSIAnt00); 571 rs->rs_rssi_ctl1 = MS(ads.ds_rxstatus0, 572 AR_RxRSSIAnt01); 573 rs->rs_rssi_ctl2 = MS(ads.ds_rxstatus0, 574 AR_RxRSSIAnt02); 575 rs->rs_rssi_ext0 = MS(ads.ds_rxstatus4, 576 AR_RxRSSIAnt10); 577 rs->rs_rssi_ext1 = MS(ads.ds_rxstatus4, 578 AR_RxRSSIAnt11); 579 rs->rs_rssi_ext2 = MS(ads.ds_rxstatus4, 580 AR_RxRSSIAnt12); 581 } 582 if (ads.ds_rxstatus8 & AR_RxKeyIdxValid) 583 rs->rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx); 584 else 585 rs->rs_keyix = ATH9K_RXKEYIX_INVALID; 586 587 rs->rs_rate = MS(ads.ds_rxstatus0, AR_RxRate); 588 rs->rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0; 589 590 rs->rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0; 591 rs->rs_moreaggr = 592 (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0; 593 rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna); 594 rs->rs_flags = 595 (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0; 596 rs->rs_flags |= 597 (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0; 598 599 if (ads.ds_rxstatus8 & AR_PreDelimCRCErr) 600 rs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE; 601 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) 602 rs->rs_flags |= ATH9K_RX_DELIM_CRC_POST; 603 if (ads.ds_rxstatus8 & AR_DecryptBusyErr) 604 rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY; 605 606 if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) { 607 /* 608 * Treat these errors as mutually exclusive to avoid spurious 609 * extra error reports from the hardware. If a CRC error is 610 * reported, then decryption and MIC errors are irrelevant, 611 * the frame is going to be dropped either way 612 */ 613 if (ads.ds_rxstatus8 & AR_CRCErr) 614 rs->rs_status |= ATH9K_RXERR_CRC; 615 else if (ads.ds_rxstatus8 & AR_PHYErr) { 616 rs->rs_status |= ATH9K_RXERR_PHY; 617 phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode); 618 rs->rs_phyerr = phyerr; 619 } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr) 620 rs->rs_status |= ATH9K_RXERR_DECRYPT; 621 else if (ads.ds_rxstatus8 & AR_MichaelErr) 622 rs->rs_status |= ATH9K_RXERR_MIC; 623 else if (ads.ds_rxstatus8 & AR_KeyMiss) 624 rs->rs_status |= ATH9K_RXERR_DECRYPT; 625 } 626 627 return 0; 628 } 629 EXPORT_SYMBOL(ath9k_hw_rxprocdesc); 630 631 /* 632 * This can stop or re-enables RX. 633 * 634 * If bool is set this will kill any frame which is currently being 635 * transferred between the MAC and baseband and also prevent any new 636 * frames from getting started. 637 */ 638 bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set) 639 { 640 u32 reg; 641 642 if (set) { 643 REG_SET_BIT(ah, AR_DIAG_SW, 644 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); 645 646 if (!ath9k_hw_wait(ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE, 647 0, AH_WAIT_TIMEOUT)) { 648 REG_CLR_BIT(ah, AR_DIAG_SW, 649 (AR_DIAG_RX_DIS | 650 AR_DIAG_RX_ABORT)); 651 652 reg = REG_READ(ah, AR_OBS_BUS_1); 653 ath_err(ath9k_hw_common(ah), 654 "RX failed to go idle in 10 ms RXSM=0x%x\n", 655 reg); 656 657 return false; 658 } 659 } else { 660 REG_CLR_BIT(ah, AR_DIAG_SW, 661 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); 662 } 663 664 return true; 665 } 666 EXPORT_SYMBOL(ath9k_hw_setrxabort); 667 668 void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp) 669 { 670 REG_WRITE(ah, AR_RXDP, rxdp); 671 } 672 EXPORT_SYMBOL(ath9k_hw_putrxbuf); 673 674 void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning) 675 { 676 ath9k_enable_mib_counters(ah); 677 678 ath9k_ani_reset(ah, is_scanning); 679 680 REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); 681 } 682 EXPORT_SYMBOL(ath9k_hw_startpcureceive); 683 684 void ath9k_hw_abortpcurecv(struct ath_hw *ah) 685 { 686 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_ABORT | AR_DIAG_RX_DIS); 687 688 ath9k_hw_disable_mib_counters(ah); 689 } 690 EXPORT_SYMBOL(ath9k_hw_abortpcurecv); 691 692 bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset) 693 { 694 #define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */ 695 struct ath_common *common = ath9k_hw_common(ah); 696 u32 mac_status, last_mac_status = 0; 697 int i; 698 699 /* Enable access to the DMA observation bus */ 700 REG_WRITE(ah, AR_MACMISC, 701 ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) | 702 (AR_MACMISC_MISC_OBS_BUS_1 << 703 AR_MACMISC_MISC_OBS_BUS_MSB_S))); 704 705 REG_WRITE(ah, AR_CR, AR_CR_RXD); 706 707 /* Wait for rx enable bit to go low */ 708 for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) { 709 if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0) 710 break; 711 712 if (!AR_SREV_9300_20_OR_LATER(ah)) { 713 mac_status = REG_READ(ah, AR_DMADBG_7) & 0x7f0; 714 if (mac_status == 0x1c0 && mac_status == last_mac_status) { 715 *reset = true; 716 break; 717 } 718 719 last_mac_status = mac_status; 720 } 721 722 udelay(AH_TIME_QUANTUM); 723 } 724 725 if (i == 0) { 726 ath_err(common, 727 "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x DMADBG_7=0x%08x\n", 728 AH_RX_STOP_DMA_TIMEOUT / 1000, 729 REG_READ(ah, AR_CR), 730 REG_READ(ah, AR_DIAG_SW), 731 REG_READ(ah, AR_DMADBG_7)); 732 return false; 733 } else { 734 return true; 735 } 736 737 #undef AH_RX_STOP_DMA_TIMEOUT 738 } 739 EXPORT_SYMBOL(ath9k_hw_stopdmarecv); 740 741 int ath9k_hw_beaconq_setup(struct ath_hw *ah) 742 { 743 struct ath9k_tx_queue_info qi; 744 745 memset(&qi, 0, sizeof(qi)); 746 qi.tqi_aifs = 1; 747 qi.tqi_cwmin = 0; 748 qi.tqi_cwmax = 0; 749 /* NB: don't enable any interrupts */ 750 return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi); 751 } 752 EXPORT_SYMBOL(ath9k_hw_beaconq_setup); 753 754 bool ath9k_hw_intrpend(struct ath_hw *ah) 755 { 756 u32 host_isr; 757 758 if (AR_SREV_9100(ah)) 759 return true; 760 761 host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE); 762 if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS)) 763 return true; 764 765 host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE); 766 if ((host_isr & AR_INTR_SYNC_DEFAULT) 767 && (host_isr != AR_INTR_SPURIOUS)) 768 return true; 769 770 return false; 771 } 772 EXPORT_SYMBOL(ath9k_hw_intrpend); 773 774 void ath9k_hw_disable_interrupts(struct ath_hw *ah) 775 { 776 struct ath_common *common = ath9k_hw_common(ah); 777 778 if (!(ah->imask & ATH9K_INT_GLOBAL)) 779 atomic_set(&ah->intr_ref_cnt, -1); 780 else 781 atomic_dec(&ah->intr_ref_cnt); 782 783 ath_dbg(common, ATH_DBG_INTERRUPT, "disable IER\n"); 784 REG_WRITE(ah, AR_IER, AR_IER_DISABLE); 785 (void) REG_READ(ah, AR_IER); 786 if (!AR_SREV_9100(ah)) { 787 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0); 788 (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE); 789 790 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0); 791 (void) REG_READ(ah, AR_INTR_SYNC_ENABLE); 792 } 793 } 794 EXPORT_SYMBOL(ath9k_hw_disable_interrupts); 795 796 void ath9k_hw_enable_interrupts(struct ath_hw *ah) 797 { 798 struct ath_common *common = ath9k_hw_common(ah); 799 u32 sync_default = AR_INTR_SYNC_DEFAULT; 800 801 if (!(ah->imask & ATH9K_INT_GLOBAL)) 802 return; 803 804 if (!atomic_inc_and_test(&ah->intr_ref_cnt)) { 805 ath_dbg(common, ATH_DBG_INTERRUPT, 806 "Do not enable IER ref count %d\n", 807 atomic_read(&ah->intr_ref_cnt)); 808 return; 809 } 810 811 if (AR_SREV_9340(ah)) 812 sync_default &= ~AR_INTR_SYNC_HOST1_FATAL; 813 814 ath_dbg(common, ATH_DBG_INTERRUPT, "enable IER\n"); 815 REG_WRITE(ah, AR_IER, AR_IER_ENABLE); 816 if (!AR_SREV_9100(ah)) { 817 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 818 AR_INTR_MAC_IRQ); 819 REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ); 820 821 822 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default); 823 REG_WRITE(ah, AR_INTR_SYNC_MASK, sync_default); 824 } 825 ath_dbg(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n", 826 REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER)); 827 } 828 EXPORT_SYMBOL(ath9k_hw_enable_interrupts); 829 830 void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints) 831 { 832 enum ath9k_int omask = ah->imask; 833 u32 mask, mask2; 834 struct ath9k_hw_capabilities *pCap = &ah->caps; 835 struct ath_common *common = ath9k_hw_common(ah); 836 837 if (!(ints & ATH9K_INT_GLOBAL)) 838 ath9k_hw_disable_interrupts(ah); 839 840 ath_dbg(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints); 841 842 mask = ints & ATH9K_INT_COMMON; 843 mask2 = 0; 844 845 if (ints & ATH9K_INT_TX) { 846 if (ah->config.tx_intr_mitigation) 847 mask |= AR_IMR_TXMINTR | AR_IMR_TXINTM; 848 else { 849 if (ah->txok_interrupt_mask) 850 mask |= AR_IMR_TXOK; 851 if (ah->txdesc_interrupt_mask) 852 mask |= AR_IMR_TXDESC; 853 } 854 if (ah->txerr_interrupt_mask) 855 mask |= AR_IMR_TXERR; 856 if (ah->txeol_interrupt_mask) 857 mask |= AR_IMR_TXEOL; 858 } 859 if (ints & ATH9K_INT_RX) { 860 if (AR_SREV_9300_20_OR_LATER(ah)) { 861 mask |= AR_IMR_RXERR | AR_IMR_RXOK_HP; 862 if (ah->config.rx_intr_mitigation) { 863 mask &= ~AR_IMR_RXOK_LP; 864 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM; 865 } else { 866 mask |= AR_IMR_RXOK_LP; 867 } 868 } else { 869 if (ah->config.rx_intr_mitigation) 870 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM; 871 else 872 mask |= AR_IMR_RXOK | AR_IMR_RXDESC; 873 } 874 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) 875 mask |= AR_IMR_GENTMR; 876 } 877 878 if (ints & ATH9K_INT_GENTIMER) 879 mask |= AR_IMR_GENTMR; 880 881 if (ints & (ATH9K_INT_BMISC)) { 882 mask |= AR_IMR_BCNMISC; 883 if (ints & ATH9K_INT_TIM) 884 mask2 |= AR_IMR_S2_TIM; 885 if (ints & ATH9K_INT_DTIM) 886 mask2 |= AR_IMR_S2_DTIM; 887 if (ints & ATH9K_INT_DTIMSYNC) 888 mask2 |= AR_IMR_S2_DTIMSYNC; 889 if (ints & ATH9K_INT_CABEND) 890 mask2 |= AR_IMR_S2_CABEND; 891 if (ints & ATH9K_INT_TSFOOR) 892 mask2 |= AR_IMR_S2_TSFOOR; 893 } 894 895 if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) { 896 mask |= AR_IMR_BCNMISC; 897 if (ints & ATH9K_INT_GTT) 898 mask2 |= AR_IMR_S2_GTT; 899 if (ints & ATH9K_INT_CST) 900 mask2 |= AR_IMR_S2_CST; 901 } 902 903 ath_dbg(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask); 904 REG_WRITE(ah, AR_IMR, mask); 905 ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC | 906 AR_IMR_S2_CABEND | AR_IMR_S2_CABTO | 907 AR_IMR_S2_TSFOOR | AR_IMR_S2_GTT | AR_IMR_S2_CST); 908 ah->imrs2_reg |= mask2; 909 REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg); 910 911 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { 912 if (ints & ATH9K_INT_TIM_TIMER) 913 REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER); 914 else 915 REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER); 916 } 917 918 return; 919 } 920 EXPORT_SYMBOL(ath9k_hw_set_interrupts); 921