1 /* 2 * Copyright (c) 2008-2011 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include "hw.h" 18 #include "hw-ops.h" 19 #include <linux/export.h> 20 21 static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah, 22 struct ath9k_tx_queue_info *qi) 23 { 24 ath_dbg(ath9k_hw_common(ah), ATH_DBG_INTERRUPT, 25 "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n", 26 ah->txok_interrupt_mask, ah->txerr_interrupt_mask, 27 ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask, 28 ah->txurn_interrupt_mask); 29 30 ENABLE_REGWRITE_BUFFER(ah); 31 32 REG_WRITE(ah, AR_IMR_S0, 33 SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK) 34 | SM(ah->txdesc_interrupt_mask, AR_IMR_S0_QCU_TXDESC)); 35 REG_WRITE(ah, AR_IMR_S1, 36 SM(ah->txerr_interrupt_mask, AR_IMR_S1_QCU_TXERR) 37 | SM(ah->txeol_interrupt_mask, AR_IMR_S1_QCU_TXEOL)); 38 39 ah->imrs2_reg &= ~AR_IMR_S2_QCU_TXURN; 40 ah->imrs2_reg |= (ah->txurn_interrupt_mask & AR_IMR_S2_QCU_TXURN); 41 REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg); 42 43 REGWRITE_BUFFER_FLUSH(ah); 44 } 45 46 u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q) 47 { 48 return REG_READ(ah, AR_QTXDP(q)); 49 } 50 EXPORT_SYMBOL(ath9k_hw_gettxbuf); 51 52 void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp) 53 { 54 REG_WRITE(ah, AR_QTXDP(q), txdp); 55 } 56 EXPORT_SYMBOL(ath9k_hw_puttxbuf); 57 58 void ath9k_hw_txstart(struct ath_hw *ah, u32 q) 59 { 60 ath_dbg(ath9k_hw_common(ah), ATH_DBG_QUEUE, 61 "Enable TXE on queue: %u\n", q); 62 REG_WRITE(ah, AR_Q_TXE, 1 << q); 63 } 64 EXPORT_SYMBOL(ath9k_hw_txstart); 65 66 u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q) 67 { 68 u32 npend; 69 70 npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT; 71 if (npend == 0) { 72 73 if (REG_READ(ah, AR_Q_TXE) & (1 << q)) 74 npend = 1; 75 } 76 77 return npend; 78 } 79 EXPORT_SYMBOL(ath9k_hw_numtxpending); 80 81 /** 82 * ath9k_hw_updatetxtriglevel - adjusts the frame trigger level 83 * 84 * @ah: atheros hardware struct 85 * @bIncTrigLevel: whether or not the frame trigger level should be updated 86 * 87 * The frame trigger level specifies the minimum number of bytes, 88 * in units of 64 bytes, that must be DMA'ed into the PCU TX FIFO 89 * before the PCU will initiate sending the frame on the air. This can 90 * mean we initiate transmit before a full frame is on the PCU TX FIFO. 91 * Resets to 0x1 (meaning 64 bytes or a full frame, whichever occurs 92 * first) 93 * 94 * Caution must be taken to ensure to set the frame trigger level based 95 * on the DMA request size. For example if the DMA request size is set to 96 * 128 bytes the trigger level cannot exceed 6 * 64 = 384. This is because 97 * there need to be enough space in the tx FIFO for the requested transfer 98 * size. Hence the tx FIFO will stop with 512 - 128 = 384 bytes. If we set 99 * the threshold to a value beyond 6, then the transmit will hang. 100 * 101 * Current dual stream devices have a PCU TX FIFO size of 8 KB. 102 * Current single stream devices have a PCU TX FIFO size of 4 KB, however, 103 * there is a hardware issue which forces us to use 2 KB instead so the 104 * frame trigger level must not exceed 2 KB for these chipsets. 105 */ 106 bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel) 107 { 108 u32 txcfg, curLevel, newLevel; 109 110 if (ah->tx_trig_level >= ah->config.max_txtrig_level) 111 return false; 112 113 ath9k_hw_disable_interrupts(ah); 114 115 txcfg = REG_READ(ah, AR_TXCFG); 116 curLevel = MS(txcfg, AR_FTRIG); 117 newLevel = curLevel; 118 if (bIncTrigLevel) { 119 if (curLevel < ah->config.max_txtrig_level) 120 newLevel++; 121 } else if (curLevel > MIN_TX_FIFO_THRESHOLD) 122 newLevel--; 123 if (newLevel != curLevel) 124 REG_WRITE(ah, AR_TXCFG, 125 (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG)); 126 127 ath9k_hw_enable_interrupts(ah); 128 129 ah->tx_trig_level = newLevel; 130 131 return newLevel != curLevel; 132 } 133 EXPORT_SYMBOL(ath9k_hw_updatetxtriglevel); 134 135 void ath9k_hw_abort_tx_dma(struct ath_hw *ah) 136 { 137 int i, q; 138 139 REG_WRITE(ah, AR_Q_TXD, AR_Q_TXD_M); 140 141 REG_SET_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF); 142 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH); 143 REG_SET_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF); 144 145 for (q = 0; q < AR_NUM_QCU; q++) { 146 for (i = 0; i < 1000; i++) { 147 if (i) 148 udelay(5); 149 150 if (!ath9k_hw_numtxpending(ah, q)) 151 break; 152 } 153 } 154 155 REG_CLR_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF); 156 REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH); 157 REG_CLR_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF); 158 159 REG_WRITE(ah, AR_Q_TXD, 0); 160 } 161 EXPORT_SYMBOL(ath9k_hw_abort_tx_dma); 162 163 bool ath9k_hw_stop_dma_queue(struct ath_hw *ah, u32 q) 164 { 165 #define ATH9K_TX_STOP_DMA_TIMEOUT 1000 /* usec */ 166 #define ATH9K_TIME_QUANTUM 100 /* usec */ 167 int wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM; 168 int wait; 169 170 REG_WRITE(ah, AR_Q_TXD, 1 << q); 171 172 for (wait = wait_time; wait != 0; wait--) { 173 if (wait != wait_time) 174 udelay(ATH9K_TIME_QUANTUM); 175 176 if (ath9k_hw_numtxpending(ah, q) == 0) 177 break; 178 } 179 180 REG_WRITE(ah, AR_Q_TXD, 0); 181 182 return wait != 0; 183 184 #undef ATH9K_TX_STOP_DMA_TIMEOUT 185 #undef ATH9K_TIME_QUANTUM 186 } 187 EXPORT_SYMBOL(ath9k_hw_stop_dma_queue); 188 189 void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs) 190 { 191 *txqs &= ah->intr_txqs; 192 ah->intr_txqs &= ~(*txqs); 193 } 194 EXPORT_SYMBOL(ath9k_hw_gettxintrtxqs); 195 196 bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q, 197 const struct ath9k_tx_queue_info *qinfo) 198 { 199 u32 cw; 200 struct ath_common *common = ath9k_hw_common(ah); 201 struct ath9k_tx_queue_info *qi; 202 203 qi = &ah->txq[q]; 204 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 205 ath_dbg(common, ATH_DBG_QUEUE, 206 "Set TXQ properties, inactive queue: %u\n", q); 207 return false; 208 } 209 210 ath_dbg(common, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q); 211 212 qi->tqi_ver = qinfo->tqi_ver; 213 qi->tqi_subtype = qinfo->tqi_subtype; 214 qi->tqi_qflags = qinfo->tqi_qflags; 215 qi->tqi_priority = qinfo->tqi_priority; 216 if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT) 217 qi->tqi_aifs = min(qinfo->tqi_aifs, 255U); 218 else 219 qi->tqi_aifs = INIT_AIFS; 220 if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) { 221 cw = min(qinfo->tqi_cwmin, 1024U); 222 qi->tqi_cwmin = 1; 223 while (qi->tqi_cwmin < cw) 224 qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1; 225 } else 226 qi->tqi_cwmin = qinfo->tqi_cwmin; 227 if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) { 228 cw = min(qinfo->tqi_cwmax, 1024U); 229 qi->tqi_cwmax = 1; 230 while (qi->tqi_cwmax < cw) 231 qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1; 232 } else 233 qi->tqi_cwmax = INIT_CWMAX; 234 235 if (qinfo->tqi_shretry != 0) 236 qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U); 237 else 238 qi->tqi_shretry = INIT_SH_RETRY; 239 if (qinfo->tqi_lgretry != 0) 240 qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U); 241 else 242 qi->tqi_lgretry = INIT_LG_RETRY; 243 qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod; 244 qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit; 245 qi->tqi_burstTime = qinfo->tqi_burstTime; 246 qi->tqi_readyTime = qinfo->tqi_readyTime; 247 248 switch (qinfo->tqi_subtype) { 249 case ATH9K_WME_UPSD: 250 if (qi->tqi_type == ATH9K_TX_QUEUE_DATA) 251 qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS; 252 break; 253 default: 254 break; 255 } 256 257 return true; 258 } 259 EXPORT_SYMBOL(ath9k_hw_set_txq_props); 260 261 bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q, 262 struct ath9k_tx_queue_info *qinfo) 263 { 264 struct ath_common *common = ath9k_hw_common(ah); 265 struct ath9k_tx_queue_info *qi; 266 267 qi = &ah->txq[q]; 268 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 269 ath_dbg(common, ATH_DBG_QUEUE, 270 "Get TXQ properties, inactive queue: %u\n", q); 271 return false; 272 } 273 274 qinfo->tqi_qflags = qi->tqi_qflags; 275 qinfo->tqi_ver = qi->tqi_ver; 276 qinfo->tqi_subtype = qi->tqi_subtype; 277 qinfo->tqi_qflags = qi->tqi_qflags; 278 qinfo->tqi_priority = qi->tqi_priority; 279 qinfo->tqi_aifs = qi->tqi_aifs; 280 qinfo->tqi_cwmin = qi->tqi_cwmin; 281 qinfo->tqi_cwmax = qi->tqi_cwmax; 282 qinfo->tqi_shretry = qi->tqi_shretry; 283 qinfo->tqi_lgretry = qi->tqi_lgretry; 284 qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod; 285 qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit; 286 qinfo->tqi_burstTime = qi->tqi_burstTime; 287 qinfo->tqi_readyTime = qi->tqi_readyTime; 288 289 return true; 290 } 291 EXPORT_SYMBOL(ath9k_hw_get_txq_props); 292 293 int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type, 294 const struct ath9k_tx_queue_info *qinfo) 295 { 296 struct ath_common *common = ath9k_hw_common(ah); 297 struct ath9k_tx_queue_info *qi; 298 int q; 299 300 switch (type) { 301 case ATH9K_TX_QUEUE_BEACON: 302 q = ATH9K_NUM_TX_QUEUES - 1; 303 break; 304 case ATH9K_TX_QUEUE_CAB: 305 q = ATH9K_NUM_TX_QUEUES - 2; 306 break; 307 case ATH9K_TX_QUEUE_PSPOLL: 308 q = 1; 309 break; 310 case ATH9K_TX_QUEUE_UAPSD: 311 q = ATH9K_NUM_TX_QUEUES - 3; 312 break; 313 case ATH9K_TX_QUEUE_DATA: 314 for (q = 0; q < ATH9K_NUM_TX_QUEUES; q++) 315 if (ah->txq[q].tqi_type == 316 ATH9K_TX_QUEUE_INACTIVE) 317 break; 318 if (q == ATH9K_NUM_TX_QUEUES) { 319 ath_err(common, "No available TX queue\n"); 320 return -1; 321 } 322 break; 323 default: 324 ath_err(common, "Invalid TX queue type: %u\n", type); 325 return -1; 326 } 327 328 ath_dbg(common, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q); 329 330 qi = &ah->txq[q]; 331 if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) { 332 ath_err(common, "TX queue: %u already active\n", q); 333 return -1; 334 } 335 memset(qi, 0, sizeof(struct ath9k_tx_queue_info)); 336 qi->tqi_type = type; 337 qi->tqi_physCompBuf = qinfo->tqi_physCompBuf; 338 (void) ath9k_hw_set_txq_props(ah, q, qinfo); 339 340 return q; 341 } 342 EXPORT_SYMBOL(ath9k_hw_setuptxqueue); 343 344 bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q) 345 { 346 struct ath_common *common = ath9k_hw_common(ah); 347 struct ath9k_tx_queue_info *qi; 348 349 qi = &ah->txq[q]; 350 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 351 ath_dbg(common, ATH_DBG_QUEUE, 352 "Release TXQ, inactive queue: %u\n", q); 353 return false; 354 } 355 356 ath_dbg(common, ATH_DBG_QUEUE, "Release TX queue: %u\n", q); 357 358 qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE; 359 ah->txok_interrupt_mask &= ~(1 << q); 360 ah->txerr_interrupt_mask &= ~(1 << q); 361 ah->txdesc_interrupt_mask &= ~(1 << q); 362 ah->txeol_interrupt_mask &= ~(1 << q); 363 ah->txurn_interrupt_mask &= ~(1 << q); 364 ath9k_hw_set_txq_interrupts(ah, qi); 365 366 return true; 367 } 368 EXPORT_SYMBOL(ath9k_hw_releasetxqueue); 369 370 bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q) 371 { 372 struct ath_common *common = ath9k_hw_common(ah); 373 struct ath9k_channel *chan = ah->curchan; 374 struct ath9k_tx_queue_info *qi; 375 u32 cwMin, chanCwMin, value; 376 377 qi = &ah->txq[q]; 378 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 379 ath_dbg(common, ATH_DBG_QUEUE, 380 "Reset TXQ, inactive queue: %u\n", q); 381 return true; 382 } 383 384 ath_dbg(common, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q); 385 386 if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) { 387 if (chan && IS_CHAN_B(chan)) 388 chanCwMin = INIT_CWMIN_11B; 389 else 390 chanCwMin = INIT_CWMIN; 391 392 for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1); 393 } else 394 cwMin = qi->tqi_cwmin; 395 396 ENABLE_REGWRITE_BUFFER(ah); 397 398 REG_WRITE(ah, AR_DLCL_IFS(q), 399 SM(cwMin, AR_D_LCL_IFS_CWMIN) | 400 SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) | 401 SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS)); 402 403 REG_WRITE(ah, AR_DRETRY_LIMIT(q), 404 SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) | 405 SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) | 406 SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH)); 407 408 REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ); 409 410 if (AR_SREV_9340(ah)) 411 REG_WRITE(ah, AR_DMISC(q), 412 AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x1); 413 else 414 REG_WRITE(ah, AR_DMISC(q), 415 AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2); 416 417 if (qi->tqi_cbrPeriod) { 418 REG_WRITE(ah, AR_QCBRCFG(q), 419 SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) | 420 SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH)); 421 REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_FSP_CBR | 422 (qi->tqi_cbrOverflowLimit ? 423 AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0)); 424 } 425 if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) { 426 REG_WRITE(ah, AR_QRDYTIMECFG(q), 427 SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) | 428 AR_Q_RDYTIMECFG_EN); 429 } 430 431 REG_WRITE(ah, AR_DCHNTIME(q), 432 SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) | 433 (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0)); 434 435 if (qi->tqi_burstTime 436 && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)) 437 REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_RDYTIME_EXP_POLICY); 438 439 if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE) 440 REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS); 441 442 REGWRITE_BUFFER_FLUSH(ah); 443 444 if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) 445 REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_FRAG_BKOFF_EN); 446 447 switch (qi->tqi_type) { 448 case ATH9K_TX_QUEUE_BEACON: 449 ENABLE_REGWRITE_BUFFER(ah); 450 451 REG_SET_BIT(ah, AR_QMISC(q), 452 AR_Q_MISC_FSP_DBA_GATED 453 | AR_Q_MISC_BEACON_USE 454 | AR_Q_MISC_CBR_INCR_DIS1); 455 456 REG_SET_BIT(ah, AR_DMISC(q), 457 (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL << 458 AR_D_MISC_ARB_LOCKOUT_CNTRL_S) 459 | AR_D_MISC_BEACON_USE 460 | AR_D_MISC_POST_FR_BKOFF_DIS); 461 462 REGWRITE_BUFFER_FLUSH(ah); 463 464 /* 465 * cwmin and cwmax should be 0 for beacon queue 466 * but not for IBSS as we would create an imbalance 467 * on beaconing fairness for participating nodes. 468 */ 469 if (AR_SREV_9300_20_OR_LATER(ah) && 470 ah->opmode != NL80211_IFTYPE_ADHOC) { 471 REG_WRITE(ah, AR_DLCL_IFS(q), SM(0, AR_D_LCL_IFS_CWMIN) 472 | SM(0, AR_D_LCL_IFS_CWMAX) 473 | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS)); 474 } 475 break; 476 case ATH9K_TX_QUEUE_CAB: 477 ENABLE_REGWRITE_BUFFER(ah); 478 479 REG_SET_BIT(ah, AR_QMISC(q), 480 AR_Q_MISC_FSP_DBA_GATED 481 | AR_Q_MISC_CBR_INCR_DIS1 482 | AR_Q_MISC_CBR_INCR_DIS0); 483 value = (qi->tqi_readyTime - 484 (ah->config.sw_beacon_response_time - 485 ah->config.dma_beacon_response_time) - 486 ah->config.additional_swba_backoff) * 1024; 487 REG_WRITE(ah, AR_QRDYTIMECFG(q), 488 value | AR_Q_RDYTIMECFG_EN); 489 REG_SET_BIT(ah, AR_DMISC(q), 490 (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL << 491 AR_D_MISC_ARB_LOCKOUT_CNTRL_S)); 492 493 REGWRITE_BUFFER_FLUSH(ah); 494 495 break; 496 case ATH9K_TX_QUEUE_PSPOLL: 497 REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_CBR_INCR_DIS1); 498 break; 499 case ATH9K_TX_QUEUE_UAPSD: 500 REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS); 501 break; 502 default: 503 break; 504 } 505 506 if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) { 507 REG_SET_BIT(ah, AR_DMISC(q), 508 SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL, 509 AR_D_MISC_ARB_LOCKOUT_CNTRL) | 510 AR_D_MISC_POST_FR_BKOFF_DIS); 511 } 512 513 if (AR_SREV_9300_20_OR_LATER(ah)) 514 REG_WRITE(ah, AR_Q_DESC_CRCCHK, AR_Q_DESC_CRCCHK_EN); 515 516 if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE) 517 ah->txok_interrupt_mask |= 1 << q; 518 else 519 ah->txok_interrupt_mask &= ~(1 << q); 520 if (qi->tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE) 521 ah->txerr_interrupt_mask |= 1 << q; 522 else 523 ah->txerr_interrupt_mask &= ~(1 << q); 524 if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE) 525 ah->txdesc_interrupt_mask |= 1 << q; 526 else 527 ah->txdesc_interrupt_mask &= ~(1 << q); 528 if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE) 529 ah->txeol_interrupt_mask |= 1 << q; 530 else 531 ah->txeol_interrupt_mask &= ~(1 << q); 532 if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE) 533 ah->txurn_interrupt_mask |= 1 << q; 534 else 535 ah->txurn_interrupt_mask &= ~(1 << q); 536 ath9k_hw_set_txq_interrupts(ah, qi); 537 538 return true; 539 } 540 EXPORT_SYMBOL(ath9k_hw_resettxqueue); 541 542 int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds, 543 struct ath_rx_status *rs) 544 { 545 struct ar5416_desc ads; 546 struct ar5416_desc *adsp = AR5416DESC(ds); 547 u32 phyerr; 548 549 if ((adsp->ds_rxstatus8 & AR_RxDone) == 0) 550 return -EINPROGRESS; 551 552 ads.u.rx = adsp->u.rx; 553 554 rs->rs_status = 0; 555 rs->rs_flags = 0; 556 557 rs->rs_datalen = ads.ds_rxstatus1 & AR_DataLen; 558 rs->rs_tstamp = ads.AR_RcvTimestamp; 559 560 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) { 561 rs->rs_rssi = ATH9K_RSSI_BAD; 562 rs->rs_rssi_ctl0 = ATH9K_RSSI_BAD; 563 rs->rs_rssi_ctl1 = ATH9K_RSSI_BAD; 564 rs->rs_rssi_ctl2 = ATH9K_RSSI_BAD; 565 rs->rs_rssi_ext0 = ATH9K_RSSI_BAD; 566 rs->rs_rssi_ext1 = ATH9K_RSSI_BAD; 567 rs->rs_rssi_ext2 = ATH9K_RSSI_BAD; 568 } else { 569 rs->rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined); 570 rs->rs_rssi_ctl0 = MS(ads.ds_rxstatus0, 571 AR_RxRSSIAnt00); 572 rs->rs_rssi_ctl1 = MS(ads.ds_rxstatus0, 573 AR_RxRSSIAnt01); 574 rs->rs_rssi_ctl2 = MS(ads.ds_rxstatus0, 575 AR_RxRSSIAnt02); 576 rs->rs_rssi_ext0 = MS(ads.ds_rxstatus4, 577 AR_RxRSSIAnt10); 578 rs->rs_rssi_ext1 = MS(ads.ds_rxstatus4, 579 AR_RxRSSIAnt11); 580 rs->rs_rssi_ext2 = MS(ads.ds_rxstatus4, 581 AR_RxRSSIAnt12); 582 } 583 if (ads.ds_rxstatus8 & AR_RxKeyIdxValid) 584 rs->rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx); 585 else 586 rs->rs_keyix = ATH9K_RXKEYIX_INVALID; 587 588 rs->rs_rate = MS(ads.ds_rxstatus0, AR_RxRate); 589 rs->rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0; 590 591 rs->rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0; 592 rs->rs_moreaggr = 593 (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0; 594 rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna); 595 rs->rs_flags = 596 (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0; 597 rs->rs_flags |= 598 (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0; 599 600 if (ads.ds_rxstatus8 & AR_PreDelimCRCErr) 601 rs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE; 602 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) 603 rs->rs_flags |= ATH9K_RX_DELIM_CRC_POST; 604 if (ads.ds_rxstatus8 & AR_DecryptBusyErr) 605 rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY; 606 607 if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) { 608 /* 609 * Treat these errors as mutually exclusive to avoid spurious 610 * extra error reports from the hardware. If a CRC error is 611 * reported, then decryption and MIC errors are irrelevant, 612 * the frame is going to be dropped either way 613 */ 614 if (ads.ds_rxstatus8 & AR_CRCErr) 615 rs->rs_status |= ATH9K_RXERR_CRC; 616 else if (ads.ds_rxstatus8 & AR_PHYErr) { 617 rs->rs_status |= ATH9K_RXERR_PHY; 618 phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode); 619 rs->rs_phyerr = phyerr; 620 } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr) 621 rs->rs_status |= ATH9K_RXERR_DECRYPT; 622 else if (ads.ds_rxstatus8 & AR_MichaelErr) 623 rs->rs_status |= ATH9K_RXERR_MIC; 624 if (ads.ds_rxstatus8 & AR_KeyMiss) 625 rs->rs_status |= ATH9K_RXERR_KEYMISS; 626 } 627 628 return 0; 629 } 630 EXPORT_SYMBOL(ath9k_hw_rxprocdesc); 631 632 /* 633 * This can stop or re-enables RX. 634 * 635 * If bool is set this will kill any frame which is currently being 636 * transferred between the MAC and baseband and also prevent any new 637 * frames from getting started. 638 */ 639 bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set) 640 { 641 u32 reg; 642 643 if (set) { 644 REG_SET_BIT(ah, AR_DIAG_SW, 645 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); 646 647 if (!ath9k_hw_wait(ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE, 648 0, AH_WAIT_TIMEOUT)) { 649 REG_CLR_BIT(ah, AR_DIAG_SW, 650 (AR_DIAG_RX_DIS | 651 AR_DIAG_RX_ABORT)); 652 653 reg = REG_READ(ah, AR_OBS_BUS_1); 654 ath_err(ath9k_hw_common(ah), 655 "RX failed to go idle in 10 ms RXSM=0x%x\n", 656 reg); 657 658 return false; 659 } 660 } else { 661 REG_CLR_BIT(ah, AR_DIAG_SW, 662 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); 663 } 664 665 return true; 666 } 667 EXPORT_SYMBOL(ath9k_hw_setrxabort); 668 669 void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp) 670 { 671 REG_WRITE(ah, AR_RXDP, rxdp); 672 } 673 EXPORT_SYMBOL(ath9k_hw_putrxbuf); 674 675 void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning) 676 { 677 ath9k_enable_mib_counters(ah); 678 679 ath9k_ani_reset(ah, is_scanning); 680 681 REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); 682 } 683 EXPORT_SYMBOL(ath9k_hw_startpcureceive); 684 685 void ath9k_hw_abortpcurecv(struct ath_hw *ah) 686 { 687 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_ABORT | AR_DIAG_RX_DIS); 688 689 ath9k_hw_disable_mib_counters(ah); 690 } 691 EXPORT_SYMBOL(ath9k_hw_abortpcurecv); 692 693 bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset) 694 { 695 #define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */ 696 struct ath_common *common = ath9k_hw_common(ah); 697 u32 mac_status, last_mac_status = 0; 698 int i; 699 700 /* Enable access to the DMA observation bus */ 701 REG_WRITE(ah, AR_MACMISC, 702 ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) | 703 (AR_MACMISC_MISC_OBS_BUS_1 << 704 AR_MACMISC_MISC_OBS_BUS_MSB_S))); 705 706 REG_WRITE(ah, AR_CR, AR_CR_RXD); 707 708 /* Wait for rx enable bit to go low */ 709 for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) { 710 if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0) 711 break; 712 713 if (!AR_SREV_9300_20_OR_LATER(ah)) { 714 mac_status = REG_READ(ah, AR_DMADBG_7) & 0x7f0; 715 if (mac_status == 0x1c0 && mac_status == last_mac_status) { 716 *reset = true; 717 break; 718 } 719 720 last_mac_status = mac_status; 721 } 722 723 udelay(AH_TIME_QUANTUM); 724 } 725 726 if (i == 0) { 727 ath_err(common, 728 "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x DMADBG_7=0x%08x\n", 729 AH_RX_STOP_DMA_TIMEOUT / 1000, 730 REG_READ(ah, AR_CR), 731 REG_READ(ah, AR_DIAG_SW), 732 REG_READ(ah, AR_DMADBG_7)); 733 return false; 734 } else { 735 return true; 736 } 737 738 #undef AH_RX_STOP_DMA_TIMEOUT 739 } 740 EXPORT_SYMBOL(ath9k_hw_stopdmarecv); 741 742 int ath9k_hw_beaconq_setup(struct ath_hw *ah) 743 { 744 struct ath9k_tx_queue_info qi; 745 746 memset(&qi, 0, sizeof(qi)); 747 qi.tqi_aifs = 1; 748 qi.tqi_cwmin = 0; 749 qi.tqi_cwmax = 0; 750 /* NB: don't enable any interrupts */ 751 return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi); 752 } 753 EXPORT_SYMBOL(ath9k_hw_beaconq_setup); 754 755 bool ath9k_hw_intrpend(struct ath_hw *ah) 756 { 757 u32 host_isr; 758 759 if (AR_SREV_9100(ah)) 760 return true; 761 762 host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE); 763 if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS)) 764 return true; 765 766 host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE); 767 if ((host_isr & AR_INTR_SYNC_DEFAULT) 768 && (host_isr != AR_INTR_SPURIOUS)) 769 return true; 770 771 return false; 772 } 773 EXPORT_SYMBOL(ath9k_hw_intrpend); 774 775 void ath9k_hw_disable_interrupts(struct ath_hw *ah) 776 { 777 struct ath_common *common = ath9k_hw_common(ah); 778 779 if (!(ah->imask & ATH9K_INT_GLOBAL)) 780 atomic_set(&ah->intr_ref_cnt, -1); 781 else 782 atomic_dec(&ah->intr_ref_cnt); 783 784 ath_dbg(common, ATH_DBG_INTERRUPT, "disable IER\n"); 785 REG_WRITE(ah, AR_IER, AR_IER_DISABLE); 786 (void) REG_READ(ah, AR_IER); 787 if (!AR_SREV_9100(ah)) { 788 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0); 789 (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE); 790 791 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0); 792 (void) REG_READ(ah, AR_INTR_SYNC_ENABLE); 793 } 794 } 795 EXPORT_SYMBOL(ath9k_hw_disable_interrupts); 796 797 void ath9k_hw_enable_interrupts(struct ath_hw *ah) 798 { 799 struct ath_common *common = ath9k_hw_common(ah); 800 u32 sync_default = AR_INTR_SYNC_DEFAULT; 801 802 if (!(ah->imask & ATH9K_INT_GLOBAL)) 803 return; 804 805 if (!atomic_inc_and_test(&ah->intr_ref_cnt)) { 806 ath_dbg(common, ATH_DBG_INTERRUPT, 807 "Do not enable IER ref count %d\n", 808 atomic_read(&ah->intr_ref_cnt)); 809 return; 810 } 811 812 if (AR_SREV_9340(ah)) 813 sync_default &= ~AR_INTR_SYNC_HOST1_FATAL; 814 815 ath_dbg(common, ATH_DBG_INTERRUPT, "enable IER\n"); 816 REG_WRITE(ah, AR_IER, AR_IER_ENABLE); 817 if (!AR_SREV_9100(ah)) { 818 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 819 AR_INTR_MAC_IRQ); 820 REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ); 821 822 823 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default); 824 REG_WRITE(ah, AR_INTR_SYNC_MASK, sync_default); 825 } 826 ath_dbg(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n", 827 REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER)); 828 } 829 EXPORT_SYMBOL(ath9k_hw_enable_interrupts); 830 831 void ath9k_hw_set_interrupts(struct ath_hw *ah) 832 { 833 enum ath9k_int ints = ah->imask; 834 u32 mask, mask2; 835 struct ath9k_hw_capabilities *pCap = &ah->caps; 836 struct ath_common *common = ath9k_hw_common(ah); 837 838 if (!(ints & ATH9K_INT_GLOBAL)) 839 ath9k_hw_disable_interrupts(ah); 840 841 ath_dbg(common, ATH_DBG_INTERRUPT, "New interrupt mask 0x%x\n", ints); 842 843 mask = ints & ATH9K_INT_COMMON; 844 mask2 = 0; 845 846 if (ints & ATH9K_INT_TX) { 847 if (ah->config.tx_intr_mitigation) 848 mask |= AR_IMR_TXMINTR | AR_IMR_TXINTM; 849 else { 850 if (ah->txok_interrupt_mask) 851 mask |= AR_IMR_TXOK; 852 if (ah->txdesc_interrupt_mask) 853 mask |= AR_IMR_TXDESC; 854 } 855 if (ah->txerr_interrupt_mask) 856 mask |= AR_IMR_TXERR; 857 if (ah->txeol_interrupt_mask) 858 mask |= AR_IMR_TXEOL; 859 } 860 if (ints & ATH9K_INT_RX) { 861 if (AR_SREV_9300_20_OR_LATER(ah)) { 862 mask |= AR_IMR_RXERR | AR_IMR_RXOK_HP; 863 if (ah->config.rx_intr_mitigation) { 864 mask &= ~AR_IMR_RXOK_LP; 865 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM; 866 } else { 867 mask |= AR_IMR_RXOK_LP; 868 } 869 } else { 870 if (ah->config.rx_intr_mitigation) 871 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM; 872 else 873 mask |= AR_IMR_RXOK | AR_IMR_RXDESC; 874 } 875 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) 876 mask |= AR_IMR_GENTMR; 877 } 878 879 if (ints & ATH9K_INT_GENTIMER) 880 mask |= AR_IMR_GENTMR; 881 882 if (ints & (ATH9K_INT_BMISC)) { 883 mask |= AR_IMR_BCNMISC; 884 if (ints & ATH9K_INT_TIM) 885 mask2 |= AR_IMR_S2_TIM; 886 if (ints & ATH9K_INT_DTIM) 887 mask2 |= AR_IMR_S2_DTIM; 888 if (ints & ATH9K_INT_DTIMSYNC) 889 mask2 |= AR_IMR_S2_DTIMSYNC; 890 if (ints & ATH9K_INT_CABEND) 891 mask2 |= AR_IMR_S2_CABEND; 892 if (ints & ATH9K_INT_TSFOOR) 893 mask2 |= AR_IMR_S2_TSFOOR; 894 } 895 896 if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) { 897 mask |= AR_IMR_BCNMISC; 898 if (ints & ATH9K_INT_GTT) 899 mask2 |= AR_IMR_S2_GTT; 900 if (ints & ATH9K_INT_CST) 901 mask2 |= AR_IMR_S2_CST; 902 } 903 904 ath_dbg(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask); 905 REG_WRITE(ah, AR_IMR, mask); 906 ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC | 907 AR_IMR_S2_CABEND | AR_IMR_S2_CABTO | 908 AR_IMR_S2_TSFOOR | AR_IMR_S2_GTT | AR_IMR_S2_CST); 909 ah->imrs2_reg |= mask2; 910 REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg); 911 912 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { 913 if (ints & ATH9K_INT_TIM_TIMER) 914 REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER); 915 else 916 REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER); 917 } 918 919 return; 920 } 921 EXPORT_SYMBOL(ath9k_hw_set_interrupts); 922