1 /* 2 * Copyright (c) 2008-2011 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include "hw.h" 18 #include "hw-ops.h" 19 20 static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah, 21 struct ath9k_tx_queue_info *qi) 22 { 23 ath_dbg(ath9k_hw_common(ah), ATH_DBG_INTERRUPT, 24 "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n", 25 ah->txok_interrupt_mask, ah->txerr_interrupt_mask, 26 ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask, 27 ah->txurn_interrupt_mask); 28 29 ENABLE_REGWRITE_BUFFER(ah); 30 31 REG_WRITE(ah, AR_IMR_S0, 32 SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK) 33 | SM(ah->txdesc_interrupt_mask, AR_IMR_S0_QCU_TXDESC)); 34 REG_WRITE(ah, AR_IMR_S1, 35 SM(ah->txerr_interrupt_mask, AR_IMR_S1_QCU_TXERR) 36 | SM(ah->txeol_interrupt_mask, AR_IMR_S1_QCU_TXEOL)); 37 38 ah->imrs2_reg &= ~AR_IMR_S2_QCU_TXURN; 39 ah->imrs2_reg |= (ah->txurn_interrupt_mask & AR_IMR_S2_QCU_TXURN); 40 REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg); 41 42 REGWRITE_BUFFER_FLUSH(ah); 43 } 44 45 u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q) 46 { 47 return REG_READ(ah, AR_QTXDP(q)); 48 } 49 EXPORT_SYMBOL(ath9k_hw_gettxbuf); 50 51 void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp) 52 { 53 REG_WRITE(ah, AR_QTXDP(q), txdp); 54 } 55 EXPORT_SYMBOL(ath9k_hw_puttxbuf); 56 57 void ath9k_hw_txstart(struct ath_hw *ah, u32 q) 58 { 59 ath_dbg(ath9k_hw_common(ah), ATH_DBG_QUEUE, 60 "Enable TXE on queue: %u\n", q); 61 REG_WRITE(ah, AR_Q_TXE, 1 << q); 62 } 63 EXPORT_SYMBOL(ath9k_hw_txstart); 64 65 void ath9k_hw_cleartxdesc(struct ath_hw *ah, void *ds) 66 { 67 struct ar5416_desc *ads = AR5416DESC(ds); 68 69 ads->ds_txstatus0 = ads->ds_txstatus1 = 0; 70 ads->ds_txstatus2 = ads->ds_txstatus3 = 0; 71 ads->ds_txstatus4 = ads->ds_txstatus5 = 0; 72 ads->ds_txstatus6 = ads->ds_txstatus7 = 0; 73 ads->ds_txstatus8 = ads->ds_txstatus9 = 0; 74 } 75 EXPORT_SYMBOL(ath9k_hw_cleartxdesc); 76 77 u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q) 78 { 79 u32 npend; 80 81 npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT; 82 if (npend == 0) { 83 84 if (REG_READ(ah, AR_Q_TXE) & (1 << q)) 85 npend = 1; 86 } 87 88 return npend; 89 } 90 EXPORT_SYMBOL(ath9k_hw_numtxpending); 91 92 /** 93 * ath9k_hw_updatetxtriglevel - adjusts the frame trigger level 94 * 95 * @ah: atheros hardware struct 96 * @bIncTrigLevel: whether or not the frame trigger level should be updated 97 * 98 * The frame trigger level specifies the minimum number of bytes, 99 * in units of 64 bytes, that must be DMA'ed into the PCU TX FIFO 100 * before the PCU will initiate sending the frame on the air. This can 101 * mean we initiate transmit before a full frame is on the PCU TX FIFO. 102 * Resets to 0x1 (meaning 64 bytes or a full frame, whichever occurs 103 * first) 104 * 105 * Caution must be taken to ensure to set the frame trigger level based 106 * on the DMA request size. For example if the DMA request size is set to 107 * 128 bytes the trigger level cannot exceed 6 * 64 = 384. This is because 108 * there need to be enough space in the tx FIFO for the requested transfer 109 * size. Hence the tx FIFO will stop with 512 - 128 = 384 bytes. If we set 110 * the threshold to a value beyond 6, then the transmit will hang. 111 * 112 * Current dual stream devices have a PCU TX FIFO size of 8 KB. 113 * Current single stream devices have a PCU TX FIFO size of 4 KB, however, 114 * there is a hardware issue which forces us to use 2 KB instead so the 115 * frame trigger level must not exceed 2 KB for these chipsets. 116 */ 117 bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel) 118 { 119 u32 txcfg, curLevel, newLevel; 120 121 if (ah->tx_trig_level >= ah->config.max_txtrig_level) 122 return false; 123 124 ath9k_hw_disable_interrupts(ah); 125 126 txcfg = REG_READ(ah, AR_TXCFG); 127 curLevel = MS(txcfg, AR_FTRIG); 128 newLevel = curLevel; 129 if (bIncTrigLevel) { 130 if (curLevel < ah->config.max_txtrig_level) 131 newLevel++; 132 } else if (curLevel > MIN_TX_FIFO_THRESHOLD) 133 newLevel--; 134 if (newLevel != curLevel) 135 REG_WRITE(ah, AR_TXCFG, 136 (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG)); 137 138 ath9k_hw_enable_interrupts(ah); 139 140 ah->tx_trig_level = newLevel; 141 142 return newLevel != curLevel; 143 } 144 EXPORT_SYMBOL(ath9k_hw_updatetxtriglevel); 145 146 void ath9k_hw_abort_tx_dma(struct ath_hw *ah) 147 { 148 int i, q; 149 150 REG_WRITE(ah, AR_Q_TXD, AR_Q_TXD_M); 151 152 REG_SET_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF); 153 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH); 154 REG_SET_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF); 155 156 for (q = 0; q < AR_NUM_QCU; q++) { 157 for (i = 0; i < 1000; i++) { 158 if (i) 159 udelay(5); 160 161 if (!ath9k_hw_numtxpending(ah, q)) 162 break; 163 } 164 } 165 166 REG_CLR_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF); 167 REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH); 168 REG_CLR_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF); 169 170 REG_WRITE(ah, AR_Q_TXD, 0); 171 } 172 EXPORT_SYMBOL(ath9k_hw_abort_tx_dma); 173 174 bool ath9k_hw_stop_dma_queue(struct ath_hw *ah, u32 q) 175 { 176 #define ATH9K_TX_STOP_DMA_TIMEOUT 1000 /* usec */ 177 #define ATH9K_TIME_QUANTUM 100 /* usec */ 178 int wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM; 179 int wait; 180 181 REG_WRITE(ah, AR_Q_TXD, 1 << q); 182 183 for (wait = wait_time; wait != 0; wait--) { 184 if (wait != wait_time) 185 udelay(ATH9K_TIME_QUANTUM); 186 187 if (ath9k_hw_numtxpending(ah, q) == 0) 188 break; 189 } 190 191 REG_WRITE(ah, AR_Q_TXD, 0); 192 193 return wait != 0; 194 195 #undef ATH9K_TX_STOP_DMA_TIMEOUT 196 #undef ATH9K_TIME_QUANTUM 197 } 198 EXPORT_SYMBOL(ath9k_hw_stop_dma_queue); 199 200 void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs) 201 { 202 *txqs &= ah->intr_txqs; 203 ah->intr_txqs &= ~(*txqs); 204 } 205 EXPORT_SYMBOL(ath9k_hw_gettxintrtxqs); 206 207 bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q, 208 const struct ath9k_tx_queue_info *qinfo) 209 { 210 u32 cw; 211 struct ath_common *common = ath9k_hw_common(ah); 212 struct ath9k_tx_queue_info *qi; 213 214 qi = &ah->txq[q]; 215 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 216 ath_dbg(common, ATH_DBG_QUEUE, 217 "Set TXQ properties, inactive queue: %u\n", q); 218 return false; 219 } 220 221 ath_dbg(common, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q); 222 223 qi->tqi_ver = qinfo->tqi_ver; 224 qi->tqi_subtype = qinfo->tqi_subtype; 225 qi->tqi_qflags = qinfo->tqi_qflags; 226 qi->tqi_priority = qinfo->tqi_priority; 227 if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT) 228 qi->tqi_aifs = min(qinfo->tqi_aifs, 255U); 229 else 230 qi->tqi_aifs = INIT_AIFS; 231 if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) { 232 cw = min(qinfo->tqi_cwmin, 1024U); 233 qi->tqi_cwmin = 1; 234 while (qi->tqi_cwmin < cw) 235 qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1; 236 } else 237 qi->tqi_cwmin = qinfo->tqi_cwmin; 238 if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) { 239 cw = min(qinfo->tqi_cwmax, 1024U); 240 qi->tqi_cwmax = 1; 241 while (qi->tqi_cwmax < cw) 242 qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1; 243 } else 244 qi->tqi_cwmax = INIT_CWMAX; 245 246 if (qinfo->tqi_shretry != 0) 247 qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U); 248 else 249 qi->tqi_shretry = INIT_SH_RETRY; 250 if (qinfo->tqi_lgretry != 0) 251 qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U); 252 else 253 qi->tqi_lgretry = INIT_LG_RETRY; 254 qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod; 255 qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit; 256 qi->tqi_burstTime = qinfo->tqi_burstTime; 257 qi->tqi_readyTime = qinfo->tqi_readyTime; 258 259 switch (qinfo->tqi_subtype) { 260 case ATH9K_WME_UPSD: 261 if (qi->tqi_type == ATH9K_TX_QUEUE_DATA) 262 qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS; 263 break; 264 default: 265 break; 266 } 267 268 return true; 269 } 270 EXPORT_SYMBOL(ath9k_hw_set_txq_props); 271 272 bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q, 273 struct ath9k_tx_queue_info *qinfo) 274 { 275 struct ath_common *common = ath9k_hw_common(ah); 276 struct ath9k_tx_queue_info *qi; 277 278 qi = &ah->txq[q]; 279 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 280 ath_dbg(common, ATH_DBG_QUEUE, 281 "Get TXQ properties, inactive queue: %u\n", q); 282 return false; 283 } 284 285 qinfo->tqi_qflags = qi->tqi_qflags; 286 qinfo->tqi_ver = qi->tqi_ver; 287 qinfo->tqi_subtype = qi->tqi_subtype; 288 qinfo->tqi_qflags = qi->tqi_qflags; 289 qinfo->tqi_priority = qi->tqi_priority; 290 qinfo->tqi_aifs = qi->tqi_aifs; 291 qinfo->tqi_cwmin = qi->tqi_cwmin; 292 qinfo->tqi_cwmax = qi->tqi_cwmax; 293 qinfo->tqi_shretry = qi->tqi_shretry; 294 qinfo->tqi_lgretry = qi->tqi_lgretry; 295 qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod; 296 qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit; 297 qinfo->tqi_burstTime = qi->tqi_burstTime; 298 qinfo->tqi_readyTime = qi->tqi_readyTime; 299 300 return true; 301 } 302 EXPORT_SYMBOL(ath9k_hw_get_txq_props); 303 304 int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type, 305 const struct ath9k_tx_queue_info *qinfo) 306 { 307 struct ath_common *common = ath9k_hw_common(ah); 308 struct ath9k_tx_queue_info *qi; 309 int q; 310 311 switch (type) { 312 case ATH9K_TX_QUEUE_BEACON: 313 q = ATH9K_NUM_TX_QUEUES - 1; 314 break; 315 case ATH9K_TX_QUEUE_CAB: 316 q = ATH9K_NUM_TX_QUEUES - 2; 317 break; 318 case ATH9K_TX_QUEUE_PSPOLL: 319 q = 1; 320 break; 321 case ATH9K_TX_QUEUE_UAPSD: 322 q = ATH9K_NUM_TX_QUEUES - 3; 323 break; 324 case ATH9K_TX_QUEUE_DATA: 325 for (q = 0; q < ATH9K_NUM_TX_QUEUES; q++) 326 if (ah->txq[q].tqi_type == 327 ATH9K_TX_QUEUE_INACTIVE) 328 break; 329 if (q == ATH9K_NUM_TX_QUEUES) { 330 ath_err(common, "No available TX queue\n"); 331 return -1; 332 } 333 break; 334 default: 335 ath_err(common, "Invalid TX queue type: %u\n", type); 336 return -1; 337 } 338 339 ath_dbg(common, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q); 340 341 qi = &ah->txq[q]; 342 if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) { 343 ath_err(common, "TX queue: %u already active\n", q); 344 return -1; 345 } 346 memset(qi, 0, sizeof(struct ath9k_tx_queue_info)); 347 qi->tqi_type = type; 348 if (qinfo == NULL) { 349 qi->tqi_qflags = 350 TXQ_FLAG_TXOKINT_ENABLE 351 | TXQ_FLAG_TXERRINT_ENABLE 352 | TXQ_FLAG_TXDESCINT_ENABLE | TXQ_FLAG_TXURNINT_ENABLE; 353 qi->tqi_aifs = INIT_AIFS; 354 qi->tqi_cwmin = ATH9K_TXQ_USEDEFAULT; 355 qi->tqi_cwmax = INIT_CWMAX; 356 qi->tqi_shretry = INIT_SH_RETRY; 357 qi->tqi_lgretry = INIT_LG_RETRY; 358 qi->tqi_physCompBuf = 0; 359 } else { 360 qi->tqi_physCompBuf = qinfo->tqi_physCompBuf; 361 (void) ath9k_hw_set_txq_props(ah, q, qinfo); 362 } 363 364 return q; 365 } 366 EXPORT_SYMBOL(ath9k_hw_setuptxqueue); 367 368 bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q) 369 { 370 struct ath_common *common = ath9k_hw_common(ah); 371 struct ath9k_tx_queue_info *qi; 372 373 qi = &ah->txq[q]; 374 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 375 ath_dbg(common, ATH_DBG_QUEUE, 376 "Release TXQ, inactive queue: %u\n", q); 377 return false; 378 } 379 380 ath_dbg(common, ATH_DBG_QUEUE, "Release TX queue: %u\n", q); 381 382 qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE; 383 ah->txok_interrupt_mask &= ~(1 << q); 384 ah->txerr_interrupt_mask &= ~(1 << q); 385 ah->txdesc_interrupt_mask &= ~(1 << q); 386 ah->txeol_interrupt_mask &= ~(1 << q); 387 ah->txurn_interrupt_mask &= ~(1 << q); 388 ath9k_hw_set_txq_interrupts(ah, qi); 389 390 return true; 391 } 392 EXPORT_SYMBOL(ath9k_hw_releasetxqueue); 393 394 bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q) 395 { 396 struct ath_common *common = ath9k_hw_common(ah); 397 struct ath9k_channel *chan = ah->curchan; 398 struct ath9k_tx_queue_info *qi; 399 u32 cwMin, chanCwMin, value; 400 401 qi = &ah->txq[q]; 402 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 403 ath_dbg(common, ATH_DBG_QUEUE, 404 "Reset TXQ, inactive queue: %u\n", q); 405 return true; 406 } 407 408 ath_dbg(common, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q); 409 410 if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) { 411 if (chan && IS_CHAN_B(chan)) 412 chanCwMin = INIT_CWMIN_11B; 413 else 414 chanCwMin = INIT_CWMIN; 415 416 for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1); 417 } else 418 cwMin = qi->tqi_cwmin; 419 420 ENABLE_REGWRITE_BUFFER(ah); 421 422 REG_WRITE(ah, AR_DLCL_IFS(q), 423 SM(cwMin, AR_D_LCL_IFS_CWMIN) | 424 SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) | 425 SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS)); 426 427 REG_WRITE(ah, AR_DRETRY_LIMIT(q), 428 SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) | 429 SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) | 430 SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH)); 431 432 REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ); 433 434 if (AR_SREV_9340(ah)) 435 REG_WRITE(ah, AR_DMISC(q), 436 AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x1); 437 else 438 REG_WRITE(ah, AR_DMISC(q), 439 AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2); 440 441 if (qi->tqi_cbrPeriod) { 442 REG_WRITE(ah, AR_QCBRCFG(q), 443 SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) | 444 SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH)); 445 REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_FSP_CBR | 446 (qi->tqi_cbrOverflowLimit ? 447 AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0)); 448 } 449 if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) { 450 REG_WRITE(ah, AR_QRDYTIMECFG(q), 451 SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) | 452 AR_Q_RDYTIMECFG_EN); 453 } 454 455 REG_WRITE(ah, AR_DCHNTIME(q), 456 SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) | 457 (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0)); 458 459 if (qi->tqi_burstTime 460 && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)) 461 REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_RDYTIME_EXP_POLICY); 462 463 if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE) 464 REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS); 465 466 REGWRITE_BUFFER_FLUSH(ah); 467 468 if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) 469 REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_FRAG_BKOFF_EN); 470 471 switch (qi->tqi_type) { 472 case ATH9K_TX_QUEUE_BEACON: 473 ENABLE_REGWRITE_BUFFER(ah); 474 475 REG_SET_BIT(ah, AR_QMISC(q), 476 AR_Q_MISC_FSP_DBA_GATED 477 | AR_Q_MISC_BEACON_USE 478 | AR_Q_MISC_CBR_INCR_DIS1); 479 480 REG_SET_BIT(ah, AR_DMISC(q), 481 (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL << 482 AR_D_MISC_ARB_LOCKOUT_CNTRL_S) 483 | AR_D_MISC_BEACON_USE 484 | AR_D_MISC_POST_FR_BKOFF_DIS); 485 486 REGWRITE_BUFFER_FLUSH(ah); 487 488 /* 489 * cwmin and cwmax should be 0 for beacon queue 490 * but not for IBSS as we would create an imbalance 491 * on beaconing fairness for participating nodes. 492 */ 493 if (AR_SREV_9300_20_OR_LATER(ah) && 494 ah->opmode != NL80211_IFTYPE_ADHOC) { 495 REG_WRITE(ah, AR_DLCL_IFS(q), SM(0, AR_D_LCL_IFS_CWMIN) 496 | SM(0, AR_D_LCL_IFS_CWMAX) 497 | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS)); 498 } 499 break; 500 case ATH9K_TX_QUEUE_CAB: 501 ENABLE_REGWRITE_BUFFER(ah); 502 503 REG_SET_BIT(ah, AR_QMISC(q), 504 AR_Q_MISC_FSP_DBA_GATED 505 | AR_Q_MISC_CBR_INCR_DIS1 506 | AR_Q_MISC_CBR_INCR_DIS0); 507 value = (qi->tqi_readyTime - 508 (ah->config.sw_beacon_response_time - 509 ah->config.dma_beacon_response_time) - 510 ah->config.additional_swba_backoff) * 1024; 511 REG_WRITE(ah, AR_QRDYTIMECFG(q), 512 value | AR_Q_RDYTIMECFG_EN); 513 REG_SET_BIT(ah, AR_DMISC(q), 514 (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL << 515 AR_D_MISC_ARB_LOCKOUT_CNTRL_S)); 516 517 REGWRITE_BUFFER_FLUSH(ah); 518 519 break; 520 case ATH9K_TX_QUEUE_PSPOLL: 521 REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_CBR_INCR_DIS1); 522 break; 523 case ATH9K_TX_QUEUE_UAPSD: 524 REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS); 525 break; 526 default: 527 break; 528 } 529 530 if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) { 531 REG_SET_BIT(ah, AR_DMISC(q), 532 SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL, 533 AR_D_MISC_ARB_LOCKOUT_CNTRL) | 534 AR_D_MISC_POST_FR_BKOFF_DIS); 535 } 536 537 if (AR_SREV_9300_20_OR_LATER(ah)) 538 REG_WRITE(ah, AR_Q_DESC_CRCCHK, AR_Q_DESC_CRCCHK_EN); 539 540 if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE) 541 ah->txok_interrupt_mask |= 1 << q; 542 else 543 ah->txok_interrupt_mask &= ~(1 << q); 544 if (qi->tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE) 545 ah->txerr_interrupt_mask |= 1 << q; 546 else 547 ah->txerr_interrupt_mask &= ~(1 << q); 548 if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE) 549 ah->txdesc_interrupt_mask |= 1 << q; 550 else 551 ah->txdesc_interrupt_mask &= ~(1 << q); 552 if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE) 553 ah->txeol_interrupt_mask |= 1 << q; 554 else 555 ah->txeol_interrupt_mask &= ~(1 << q); 556 if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE) 557 ah->txurn_interrupt_mask |= 1 << q; 558 else 559 ah->txurn_interrupt_mask &= ~(1 << q); 560 ath9k_hw_set_txq_interrupts(ah, qi); 561 562 return true; 563 } 564 EXPORT_SYMBOL(ath9k_hw_resettxqueue); 565 566 int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds, 567 struct ath_rx_status *rs, u64 tsf) 568 { 569 struct ar5416_desc ads; 570 struct ar5416_desc *adsp = AR5416DESC(ds); 571 u32 phyerr; 572 573 if ((adsp->ds_rxstatus8 & AR_RxDone) == 0) 574 return -EINPROGRESS; 575 576 ads.u.rx = adsp->u.rx; 577 578 rs->rs_status = 0; 579 rs->rs_flags = 0; 580 581 rs->rs_datalen = ads.ds_rxstatus1 & AR_DataLen; 582 rs->rs_tstamp = ads.AR_RcvTimestamp; 583 584 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) { 585 rs->rs_rssi = ATH9K_RSSI_BAD; 586 rs->rs_rssi_ctl0 = ATH9K_RSSI_BAD; 587 rs->rs_rssi_ctl1 = ATH9K_RSSI_BAD; 588 rs->rs_rssi_ctl2 = ATH9K_RSSI_BAD; 589 rs->rs_rssi_ext0 = ATH9K_RSSI_BAD; 590 rs->rs_rssi_ext1 = ATH9K_RSSI_BAD; 591 rs->rs_rssi_ext2 = ATH9K_RSSI_BAD; 592 } else { 593 rs->rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined); 594 rs->rs_rssi_ctl0 = MS(ads.ds_rxstatus0, 595 AR_RxRSSIAnt00); 596 rs->rs_rssi_ctl1 = MS(ads.ds_rxstatus0, 597 AR_RxRSSIAnt01); 598 rs->rs_rssi_ctl2 = MS(ads.ds_rxstatus0, 599 AR_RxRSSIAnt02); 600 rs->rs_rssi_ext0 = MS(ads.ds_rxstatus4, 601 AR_RxRSSIAnt10); 602 rs->rs_rssi_ext1 = MS(ads.ds_rxstatus4, 603 AR_RxRSSIAnt11); 604 rs->rs_rssi_ext2 = MS(ads.ds_rxstatus4, 605 AR_RxRSSIAnt12); 606 } 607 if (ads.ds_rxstatus8 & AR_RxKeyIdxValid) 608 rs->rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx); 609 else 610 rs->rs_keyix = ATH9K_RXKEYIX_INVALID; 611 612 rs->rs_rate = RXSTATUS_RATE(ah, (&ads)); 613 rs->rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0; 614 615 rs->rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0; 616 rs->rs_moreaggr = 617 (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0; 618 rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna); 619 rs->rs_flags = 620 (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0; 621 rs->rs_flags |= 622 (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0; 623 624 if (ads.ds_rxstatus8 & AR_PreDelimCRCErr) 625 rs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE; 626 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) 627 rs->rs_flags |= ATH9K_RX_DELIM_CRC_POST; 628 if (ads.ds_rxstatus8 & AR_DecryptBusyErr) 629 rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY; 630 631 if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) { 632 /* 633 * Treat these errors as mutually exclusive to avoid spurious 634 * extra error reports from the hardware. If a CRC error is 635 * reported, then decryption and MIC errors are irrelevant, 636 * the frame is going to be dropped either way 637 */ 638 if (ads.ds_rxstatus8 & AR_CRCErr) 639 rs->rs_status |= ATH9K_RXERR_CRC; 640 else if (ads.ds_rxstatus8 & AR_PHYErr) { 641 rs->rs_status |= ATH9K_RXERR_PHY; 642 phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode); 643 rs->rs_phyerr = phyerr; 644 } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr) 645 rs->rs_status |= ATH9K_RXERR_DECRYPT; 646 else if (ads.ds_rxstatus8 & AR_MichaelErr) 647 rs->rs_status |= ATH9K_RXERR_MIC; 648 649 if (ads.ds_rxstatus8 & AR_KeyMiss) 650 rs->rs_status |= ATH9K_RXERR_DECRYPT; 651 } 652 653 return 0; 654 } 655 EXPORT_SYMBOL(ath9k_hw_rxprocdesc); 656 657 /* 658 * This can stop or re-enables RX. 659 * 660 * If bool is set this will kill any frame which is currently being 661 * transferred between the MAC and baseband and also prevent any new 662 * frames from getting started. 663 */ 664 bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set) 665 { 666 u32 reg; 667 668 if (set) { 669 REG_SET_BIT(ah, AR_DIAG_SW, 670 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); 671 672 if (!ath9k_hw_wait(ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE, 673 0, AH_WAIT_TIMEOUT)) { 674 REG_CLR_BIT(ah, AR_DIAG_SW, 675 (AR_DIAG_RX_DIS | 676 AR_DIAG_RX_ABORT)); 677 678 reg = REG_READ(ah, AR_OBS_BUS_1); 679 ath_err(ath9k_hw_common(ah), 680 "RX failed to go idle in 10 ms RXSM=0x%x\n", 681 reg); 682 683 return false; 684 } 685 } else { 686 REG_CLR_BIT(ah, AR_DIAG_SW, 687 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); 688 } 689 690 return true; 691 } 692 EXPORT_SYMBOL(ath9k_hw_setrxabort); 693 694 void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp) 695 { 696 REG_WRITE(ah, AR_RXDP, rxdp); 697 } 698 EXPORT_SYMBOL(ath9k_hw_putrxbuf); 699 700 void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning) 701 { 702 ath9k_enable_mib_counters(ah); 703 704 ath9k_ani_reset(ah, is_scanning); 705 706 REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); 707 } 708 EXPORT_SYMBOL(ath9k_hw_startpcureceive); 709 710 void ath9k_hw_abortpcurecv(struct ath_hw *ah) 711 { 712 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_ABORT | AR_DIAG_RX_DIS); 713 714 ath9k_hw_disable_mib_counters(ah); 715 } 716 EXPORT_SYMBOL(ath9k_hw_abortpcurecv); 717 718 bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset) 719 { 720 #define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */ 721 struct ath_common *common = ath9k_hw_common(ah); 722 u32 mac_status, last_mac_status = 0; 723 int i; 724 725 /* Enable access to the DMA observation bus */ 726 REG_WRITE(ah, AR_MACMISC, 727 ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) | 728 (AR_MACMISC_MISC_OBS_BUS_1 << 729 AR_MACMISC_MISC_OBS_BUS_MSB_S))); 730 731 REG_WRITE(ah, AR_CR, AR_CR_RXD); 732 733 /* Wait for rx enable bit to go low */ 734 for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) { 735 if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0) 736 break; 737 738 if (!AR_SREV_9300_20_OR_LATER(ah)) { 739 mac_status = REG_READ(ah, AR_DMADBG_7) & 0x7f0; 740 if (mac_status == 0x1c0 && mac_status == last_mac_status) { 741 *reset = true; 742 break; 743 } 744 745 last_mac_status = mac_status; 746 } 747 748 udelay(AH_TIME_QUANTUM); 749 } 750 751 if (i == 0) { 752 ath_err(common, 753 "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x DMADBG_7=0x%08x\n", 754 AH_RX_STOP_DMA_TIMEOUT / 1000, 755 REG_READ(ah, AR_CR), 756 REG_READ(ah, AR_DIAG_SW), 757 REG_READ(ah, AR_DMADBG_7)); 758 return false; 759 } else { 760 return true; 761 } 762 763 #undef AH_RX_STOP_DMA_TIMEOUT 764 } 765 EXPORT_SYMBOL(ath9k_hw_stopdmarecv); 766 767 int ath9k_hw_beaconq_setup(struct ath_hw *ah) 768 { 769 struct ath9k_tx_queue_info qi; 770 771 memset(&qi, 0, sizeof(qi)); 772 qi.tqi_aifs = 1; 773 qi.tqi_cwmin = 0; 774 qi.tqi_cwmax = 0; 775 /* NB: don't enable any interrupts */ 776 return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi); 777 } 778 EXPORT_SYMBOL(ath9k_hw_beaconq_setup); 779 780 bool ath9k_hw_intrpend(struct ath_hw *ah) 781 { 782 u32 host_isr; 783 784 if (AR_SREV_9100(ah)) 785 return true; 786 787 host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE); 788 if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS)) 789 return true; 790 791 host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE); 792 if ((host_isr & AR_INTR_SYNC_DEFAULT) 793 && (host_isr != AR_INTR_SPURIOUS)) 794 return true; 795 796 return false; 797 } 798 EXPORT_SYMBOL(ath9k_hw_intrpend); 799 800 void ath9k_hw_disable_interrupts(struct ath_hw *ah) 801 { 802 struct ath_common *common = ath9k_hw_common(ah); 803 804 ath_dbg(common, ATH_DBG_INTERRUPT, "disable IER\n"); 805 REG_WRITE(ah, AR_IER, AR_IER_DISABLE); 806 (void) REG_READ(ah, AR_IER); 807 if (!AR_SREV_9100(ah)) { 808 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0); 809 (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE); 810 811 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0); 812 (void) REG_READ(ah, AR_INTR_SYNC_ENABLE); 813 } 814 } 815 EXPORT_SYMBOL(ath9k_hw_disable_interrupts); 816 817 void ath9k_hw_enable_interrupts(struct ath_hw *ah) 818 { 819 struct ath_common *common = ath9k_hw_common(ah); 820 u32 sync_default = AR_INTR_SYNC_DEFAULT; 821 822 if (!(ah->imask & ATH9K_INT_GLOBAL)) 823 return; 824 825 if (AR_SREV_9340(ah)) 826 sync_default &= ~AR_INTR_SYNC_HOST1_FATAL; 827 828 ath_dbg(common, ATH_DBG_INTERRUPT, "enable IER\n"); 829 REG_WRITE(ah, AR_IER, AR_IER_ENABLE); 830 if (!AR_SREV_9100(ah)) { 831 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 832 AR_INTR_MAC_IRQ); 833 REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ); 834 835 836 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default); 837 REG_WRITE(ah, AR_INTR_SYNC_MASK, sync_default); 838 } 839 ath_dbg(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n", 840 REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER)); 841 } 842 EXPORT_SYMBOL(ath9k_hw_enable_interrupts); 843 844 void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints) 845 { 846 enum ath9k_int omask = ah->imask; 847 u32 mask, mask2; 848 struct ath9k_hw_capabilities *pCap = &ah->caps; 849 struct ath_common *common = ath9k_hw_common(ah); 850 851 if (!(ints & ATH9K_INT_GLOBAL)) 852 ath9k_hw_disable_interrupts(ah); 853 854 ath_dbg(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints); 855 856 /* TODO: global int Ref count */ 857 mask = ints & ATH9K_INT_COMMON; 858 mask2 = 0; 859 860 if (ints & ATH9K_INT_TX) { 861 if (ah->config.tx_intr_mitigation) 862 mask |= AR_IMR_TXMINTR | AR_IMR_TXINTM; 863 else { 864 if (ah->txok_interrupt_mask) 865 mask |= AR_IMR_TXOK; 866 if (ah->txdesc_interrupt_mask) 867 mask |= AR_IMR_TXDESC; 868 } 869 if (ah->txerr_interrupt_mask) 870 mask |= AR_IMR_TXERR; 871 if (ah->txeol_interrupt_mask) 872 mask |= AR_IMR_TXEOL; 873 } 874 if (ints & ATH9K_INT_RX) { 875 if (AR_SREV_9300_20_OR_LATER(ah)) { 876 mask |= AR_IMR_RXERR | AR_IMR_RXOK_HP; 877 if (ah->config.rx_intr_mitigation) { 878 mask &= ~AR_IMR_RXOK_LP; 879 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM; 880 } else { 881 mask |= AR_IMR_RXOK_LP; 882 } 883 } else { 884 if (ah->config.rx_intr_mitigation) 885 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM; 886 else 887 mask |= AR_IMR_RXOK | AR_IMR_RXDESC; 888 } 889 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) 890 mask |= AR_IMR_GENTMR; 891 } 892 893 if (ints & ATH9K_INT_GENTIMER) 894 mask |= AR_IMR_GENTMR; 895 896 if (ints & (ATH9K_INT_BMISC)) { 897 mask |= AR_IMR_BCNMISC; 898 if (ints & ATH9K_INT_TIM) 899 mask2 |= AR_IMR_S2_TIM; 900 if (ints & ATH9K_INT_DTIM) 901 mask2 |= AR_IMR_S2_DTIM; 902 if (ints & ATH9K_INT_DTIMSYNC) 903 mask2 |= AR_IMR_S2_DTIMSYNC; 904 if (ints & ATH9K_INT_CABEND) 905 mask2 |= AR_IMR_S2_CABEND; 906 if (ints & ATH9K_INT_TSFOOR) 907 mask2 |= AR_IMR_S2_TSFOOR; 908 } 909 910 if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) { 911 mask |= AR_IMR_BCNMISC; 912 if (ints & ATH9K_INT_GTT) 913 mask2 |= AR_IMR_S2_GTT; 914 if (ints & ATH9K_INT_CST) 915 mask2 |= AR_IMR_S2_CST; 916 } 917 918 ath_dbg(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask); 919 REG_WRITE(ah, AR_IMR, mask); 920 ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC | 921 AR_IMR_S2_CABEND | AR_IMR_S2_CABTO | 922 AR_IMR_S2_TSFOOR | AR_IMR_S2_GTT | AR_IMR_S2_CST); 923 ah->imrs2_reg |= mask2; 924 REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg); 925 926 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { 927 if (ints & ATH9K_INT_TIM_TIMER) 928 REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER); 929 else 930 REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER); 931 } 932 933 if (ints & ATH9K_INT_GLOBAL) 934 ath9k_hw_enable_interrupts(ah); 935 936 return; 937 } 938 EXPORT_SYMBOL(ath9k_hw_set_interrupts); 939