1 /* 2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org> 3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com> 4 * 5 * Permission to use, copy, modify, and distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 * 17 */ 18 19 /********************************************\ 20 Queue Control Unit, DFS Control Unit Functions 21 \********************************************/ 22 23 #include "ath5k.h" 24 #include "reg.h" 25 #include "debug.h" 26 #include "base.h" 27 28 /* 29 * Get properties for a transmit queue 30 */ 31 int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, 32 struct ath5k_txq_info *queue_info) 33 { 34 ATH5K_TRACE(ah->ah_sc); 35 memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info)); 36 return 0; 37 } 38 39 /* 40 * Set properties for a transmit queue 41 */ 42 int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue, 43 const struct ath5k_txq_info *queue_info) 44 { 45 ATH5K_TRACE(ah->ah_sc); 46 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 47 48 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) 49 return -EIO; 50 51 memcpy(&ah->ah_txq[queue], queue_info, sizeof(struct ath5k_txq_info)); 52 53 /*XXX: Is this supported on 5210 ?*/ 54 if ((queue_info->tqi_type == AR5K_TX_QUEUE_DATA && 55 ((queue_info->tqi_subtype == AR5K_WME_AC_VI) || 56 (queue_info->tqi_subtype == AR5K_WME_AC_VO))) || 57 queue_info->tqi_type == AR5K_TX_QUEUE_UAPSD) 58 ah->ah_txq[queue].tqi_flags |= AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS; 59 60 return 0; 61 } 62 63 /* 64 * Initialize a transmit queue 65 */ 66 int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type, 67 struct ath5k_txq_info *queue_info) 68 { 69 unsigned int queue; 70 int ret; 71 72 ATH5K_TRACE(ah->ah_sc); 73 74 /* 75 * Get queue by type 76 */ 77 /*5210 only has 2 queues*/ 78 if (ah->ah_version == AR5K_AR5210) { 79 switch (queue_type) { 80 case AR5K_TX_QUEUE_DATA: 81 queue = AR5K_TX_QUEUE_ID_NOQCU_DATA; 82 break; 83 case AR5K_TX_QUEUE_BEACON: 84 case AR5K_TX_QUEUE_CAB: 85 queue = AR5K_TX_QUEUE_ID_NOQCU_BEACON; 86 break; 87 default: 88 return -EINVAL; 89 } 90 } else { 91 switch (queue_type) { 92 case AR5K_TX_QUEUE_DATA: 93 for (queue = AR5K_TX_QUEUE_ID_DATA_MIN; 94 ah->ah_txq[queue].tqi_type != 95 AR5K_TX_QUEUE_INACTIVE; queue++) { 96 97 if (queue > AR5K_TX_QUEUE_ID_DATA_MAX) 98 return -EINVAL; 99 } 100 break; 101 case AR5K_TX_QUEUE_UAPSD: 102 queue = AR5K_TX_QUEUE_ID_UAPSD; 103 break; 104 case AR5K_TX_QUEUE_BEACON: 105 queue = AR5K_TX_QUEUE_ID_BEACON; 106 break; 107 case AR5K_TX_QUEUE_CAB: 108 queue = AR5K_TX_QUEUE_ID_CAB; 109 break; 110 case AR5K_TX_QUEUE_XR_DATA: 111 if (ah->ah_version != AR5K_AR5212) 112 ATH5K_ERR(ah->ah_sc, 113 "XR data queues only supported in" 114 " 5212!\n"); 115 queue = AR5K_TX_QUEUE_ID_XR_DATA; 116 break; 117 default: 118 return -EINVAL; 119 } 120 } 121 122 /* 123 * Setup internal queue structure 124 */ 125 memset(&ah->ah_txq[queue], 0, sizeof(struct ath5k_txq_info)); 126 ah->ah_txq[queue].tqi_type = queue_type; 127 128 if (queue_info != NULL) { 129 queue_info->tqi_type = queue_type; 130 ret = ath5k_hw_set_tx_queueprops(ah, queue, queue_info); 131 if (ret) 132 return ret; 133 } 134 135 /* 136 * We use ah_txq_status to hold a temp value for 137 * the Secondary interrupt mask registers on 5211+ 138 * check out ath5k_hw_reset_tx_queue 139 */ 140 AR5K_Q_ENABLE_BITS(ah->ah_txq_status, queue); 141 142 return queue; 143 } 144 145 /* 146 * Get number of pending frames 147 * for a specific queue [5211+] 148 */ 149 u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue) 150 { 151 u32 pending; 152 ATH5K_TRACE(ah->ah_sc); 153 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 154 155 /* Return if queue is declared inactive */ 156 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) 157 return false; 158 159 /* XXX: How about AR5K_CFG_TXCNT ? */ 160 if (ah->ah_version == AR5K_AR5210) 161 return false; 162 163 pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue)); 164 pending &= AR5K_QCU_STS_FRMPENDCNT; 165 166 /* It's possible to have no frames pending even if TXE 167 * is set. To indicate that q has not stopped return 168 * true */ 169 if (!pending && AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue)) 170 return true; 171 172 return pending; 173 } 174 175 /* 176 * Set a transmit queue inactive 177 */ 178 void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue) 179 { 180 ATH5K_TRACE(ah->ah_sc); 181 if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num)) 182 return; 183 184 /* This queue will be skipped in further operations */ 185 ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE; 186 /*For SIMR setup*/ 187 AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue); 188 } 189 190 /* 191 * Set DFS properties for a transmit queue on DCU 192 */ 193 int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue) 194 { 195 u32 cw_min, cw_max, retry_lg, retry_sh; 196 struct ath5k_txq_info *tq = &ah->ah_txq[queue]; 197 198 ATH5K_TRACE(ah->ah_sc); 199 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 200 201 tq = &ah->ah_txq[queue]; 202 203 if (tq->tqi_type == AR5K_TX_QUEUE_INACTIVE) 204 return 0; 205 206 if (ah->ah_version == AR5K_AR5210) { 207 /* Only handle data queues, others will be ignored */ 208 if (tq->tqi_type != AR5K_TX_QUEUE_DATA) 209 return 0; 210 211 /* Set Slot time */ 212 ath5k_hw_reg_write(ah, ah->ah_turbo ? 213 AR5K_INIT_SLOT_TIME_TURBO : AR5K_INIT_SLOT_TIME, 214 AR5K_SLOT_TIME); 215 /* Set ACK_CTS timeout */ 216 ath5k_hw_reg_write(ah, ah->ah_turbo ? 217 AR5K_INIT_ACK_CTS_TIMEOUT_TURBO : 218 AR5K_INIT_ACK_CTS_TIMEOUT, AR5K_SLOT_TIME); 219 /* Set Transmit Latency */ 220 ath5k_hw_reg_write(ah, ah->ah_turbo ? 221 AR5K_INIT_TRANSMIT_LATENCY_TURBO : 222 AR5K_INIT_TRANSMIT_LATENCY, AR5K_USEC_5210); 223 224 /* Set IFS0 */ 225 if (ah->ah_turbo) { 226 ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS_TURBO + 227 (ah->ah_aifs + tq->tqi_aifs) * 228 AR5K_INIT_SLOT_TIME_TURBO) << 229 AR5K_IFS0_DIFS_S) | AR5K_INIT_SIFS_TURBO, 230 AR5K_IFS0); 231 } else { 232 ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS + 233 (ah->ah_aifs + tq->tqi_aifs) * 234 AR5K_INIT_SLOT_TIME) << AR5K_IFS0_DIFS_S) | 235 AR5K_INIT_SIFS, AR5K_IFS0); 236 } 237 238 /* Set IFS1 */ 239 ath5k_hw_reg_write(ah, ah->ah_turbo ? 240 AR5K_INIT_PROTO_TIME_CNTRL_TURBO : 241 AR5K_INIT_PROTO_TIME_CNTRL, AR5K_IFS1); 242 /* Set AR5K_PHY_SETTLING */ 243 ath5k_hw_reg_write(ah, ah->ah_turbo ? 244 (ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F) 245 | 0x38 : 246 (ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F) 247 | 0x1C, 248 AR5K_PHY_SETTLING); 249 /* Set Frame Control Register */ 250 ath5k_hw_reg_write(ah, ah->ah_turbo ? 251 (AR5K_PHY_FRAME_CTL_INI | AR5K_PHY_TURBO_MODE | 252 AR5K_PHY_TURBO_SHORT | 0x2020) : 253 (AR5K_PHY_FRAME_CTL_INI | 0x1020), 254 AR5K_PHY_FRAME_CTL_5210); 255 } 256 257 /* 258 * Calculate cwmin/max by channel mode 259 */ 260 cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN; 261 cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX; 262 ah->ah_aifs = AR5K_TUNE_AIFS; 263 /*XR is only supported on 5212*/ 264 if (IS_CHAN_XR(ah->ah_current_channel) && 265 ah->ah_version == AR5K_AR5212) { 266 cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN_XR; 267 cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX_XR; 268 ah->ah_aifs = AR5K_TUNE_AIFS_XR; 269 /*B mode is not supported on 5210*/ 270 } else if (IS_CHAN_B(ah->ah_current_channel) && 271 ah->ah_version != AR5K_AR5210) { 272 cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN_11B; 273 cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX_11B; 274 ah->ah_aifs = AR5K_TUNE_AIFS_11B; 275 } 276 277 cw_min = 1; 278 while (cw_min < ah->ah_cw_min) 279 cw_min = (cw_min << 1) | 1; 280 281 cw_min = tq->tqi_cw_min < 0 ? (cw_min >> (-tq->tqi_cw_min)) : 282 ((cw_min << tq->tqi_cw_min) + (1 << tq->tqi_cw_min) - 1); 283 cw_max = tq->tqi_cw_max < 0 ? (cw_max >> (-tq->tqi_cw_max)) : 284 ((cw_max << tq->tqi_cw_max) + (1 << tq->tqi_cw_max) - 1); 285 286 /* 287 * Calculate and set retry limits 288 */ 289 if (ah->ah_software_retry) { 290 /* XXX Need to test this */ 291 retry_lg = ah->ah_limit_tx_retries; 292 retry_sh = retry_lg = retry_lg > AR5K_DCU_RETRY_LMT_SH_RETRY ? 293 AR5K_DCU_RETRY_LMT_SH_RETRY : retry_lg; 294 } else { 295 retry_lg = AR5K_INIT_LG_RETRY; 296 retry_sh = AR5K_INIT_SH_RETRY; 297 } 298 299 /*No QCU/DCU [5210]*/ 300 if (ah->ah_version == AR5K_AR5210) { 301 ath5k_hw_reg_write(ah, 302 (cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S) 303 | AR5K_REG_SM(AR5K_INIT_SLG_RETRY, 304 AR5K_NODCU_RETRY_LMT_SLG_RETRY) 305 | AR5K_REG_SM(AR5K_INIT_SSH_RETRY, 306 AR5K_NODCU_RETRY_LMT_SSH_RETRY) 307 | AR5K_REG_SM(retry_lg, AR5K_NODCU_RETRY_LMT_LG_RETRY) 308 | AR5K_REG_SM(retry_sh, AR5K_NODCU_RETRY_LMT_SH_RETRY), 309 AR5K_NODCU_RETRY_LMT); 310 } else { 311 /*QCU/DCU [5211+]*/ 312 ath5k_hw_reg_write(ah, 313 AR5K_REG_SM(AR5K_INIT_SLG_RETRY, 314 AR5K_DCU_RETRY_LMT_SLG_RETRY) | 315 AR5K_REG_SM(AR5K_INIT_SSH_RETRY, 316 AR5K_DCU_RETRY_LMT_SSH_RETRY) | 317 AR5K_REG_SM(retry_lg, AR5K_DCU_RETRY_LMT_LG_RETRY) | 318 AR5K_REG_SM(retry_sh, AR5K_DCU_RETRY_LMT_SH_RETRY), 319 AR5K_QUEUE_DFS_RETRY_LIMIT(queue)); 320 321 /*===Rest is also for QCU/DCU only [5211+]===*/ 322 323 /* 324 * Set initial content window (cw_min/cw_max) 325 * and arbitrated interframe space (aifs)... 326 */ 327 ath5k_hw_reg_write(ah, 328 AR5K_REG_SM(cw_min, AR5K_DCU_LCL_IFS_CW_MIN) | 329 AR5K_REG_SM(cw_max, AR5K_DCU_LCL_IFS_CW_MAX) | 330 AR5K_REG_SM(ah->ah_aifs + tq->tqi_aifs, 331 AR5K_DCU_LCL_IFS_AIFS), 332 AR5K_QUEUE_DFS_LOCAL_IFS(queue)); 333 334 /* 335 * Set misc registers 336 */ 337 /* Enable DCU early termination for this queue */ 338 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), 339 AR5K_QCU_MISC_DCU_EARLY); 340 341 /* Enable DCU to wait for next fragment from QCU */ 342 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue), 343 AR5K_DCU_MISC_FRAG_WAIT); 344 345 /* On Maui and Spirit use the global seqnum on DCU */ 346 if (ah->ah_mac_version < AR5K_SREV_AR5211) 347 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue), 348 AR5K_DCU_MISC_SEQNUM_CTL); 349 350 if (tq->tqi_cbr_period) { 351 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_cbr_period, 352 AR5K_QCU_CBRCFG_INTVAL) | 353 AR5K_REG_SM(tq->tqi_cbr_overflow_limit, 354 AR5K_QCU_CBRCFG_ORN_THRES), 355 AR5K_QUEUE_CBRCFG(queue)); 356 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), 357 AR5K_QCU_MISC_FRSHED_CBR); 358 if (tq->tqi_cbr_overflow_limit) 359 AR5K_REG_ENABLE_BITS(ah, 360 AR5K_QUEUE_MISC(queue), 361 AR5K_QCU_MISC_CBR_THRES_ENABLE); 362 } 363 364 if (tq->tqi_ready_time && 365 (tq->tqi_type != AR5K_TX_QUEUE_CAB)) 366 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_ready_time, 367 AR5K_QCU_RDYTIMECFG_INTVAL) | 368 AR5K_QCU_RDYTIMECFG_ENABLE, 369 AR5K_QUEUE_RDYTIMECFG(queue)); 370 371 if (tq->tqi_burst_time) { 372 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_burst_time, 373 AR5K_DCU_CHAN_TIME_DUR) | 374 AR5K_DCU_CHAN_TIME_ENABLE, 375 AR5K_QUEUE_DFS_CHANNEL_TIME(queue)); 376 377 if (tq->tqi_flags 378 & AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE) 379 AR5K_REG_ENABLE_BITS(ah, 380 AR5K_QUEUE_MISC(queue), 381 AR5K_QCU_MISC_RDY_VEOL_POLICY); 382 } 383 384 if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE) 385 ath5k_hw_reg_write(ah, AR5K_DCU_MISC_POST_FR_BKOFF_DIS, 386 AR5K_QUEUE_DFS_MISC(queue)); 387 388 if (tq->tqi_flags & AR5K_TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) 389 ath5k_hw_reg_write(ah, AR5K_DCU_MISC_BACKOFF_FRAG, 390 AR5K_QUEUE_DFS_MISC(queue)); 391 392 /* 393 * Set registers by queue type 394 */ 395 switch (tq->tqi_type) { 396 case AR5K_TX_QUEUE_BEACON: 397 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), 398 AR5K_QCU_MISC_FRSHED_DBA_GT | 399 AR5K_QCU_MISC_CBREXP_BCN_DIS | 400 AR5K_QCU_MISC_BCN_ENABLE); 401 402 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue), 403 (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL << 404 AR5K_DCU_MISC_ARBLOCK_CTL_S) | 405 AR5K_DCU_MISC_ARBLOCK_IGNORE | 406 AR5K_DCU_MISC_POST_FR_BKOFF_DIS | 407 AR5K_DCU_MISC_BCN_ENABLE); 408 break; 409 410 case AR5K_TX_QUEUE_CAB: 411 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), 412 AR5K_QCU_MISC_FRSHED_BCN_SENT_GT | 413 AR5K_QCU_MISC_CBREXP_DIS | 414 AR5K_QCU_MISC_CBREXP_BCN_DIS); 415 416 ath5k_hw_reg_write(ah, ((AR5K_TUNE_BEACON_INTERVAL - 417 (AR5K_TUNE_SW_BEACON_RESP - 418 AR5K_TUNE_DMA_BEACON_RESP) - 419 AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) | 420 AR5K_QCU_RDYTIMECFG_ENABLE, 421 AR5K_QUEUE_RDYTIMECFG(queue)); 422 423 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue), 424 (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL << 425 AR5K_DCU_MISC_ARBLOCK_CTL_S)); 426 break; 427 428 case AR5K_TX_QUEUE_UAPSD: 429 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), 430 AR5K_QCU_MISC_CBREXP_DIS); 431 break; 432 433 case AR5K_TX_QUEUE_DATA: 434 default: 435 break; 436 } 437 438 /* TODO: Handle frame compression */ 439 440 /* 441 * Enable interrupts for this tx queue 442 * in the secondary interrupt mask registers 443 */ 444 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXOKINT_ENABLE) 445 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txok, queue); 446 447 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXERRINT_ENABLE) 448 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txerr, queue); 449 450 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXURNINT_ENABLE) 451 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txurn, queue); 452 453 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXDESCINT_ENABLE) 454 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txdesc, queue); 455 456 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXEOLINT_ENABLE) 457 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txeol, queue); 458 459 if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRORNINT_ENABLE) 460 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrorn, queue); 461 462 if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRURNINT_ENABLE) 463 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrurn, queue); 464 465 if (tq->tqi_flags & AR5K_TXQ_FLAG_QTRIGINT_ENABLE) 466 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_qtrig, queue); 467 468 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXNOFRMINT_ENABLE) 469 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_nofrm, queue); 470 471 /* Update secondary interrupt mask registers */ 472 473 /* Filter out inactive queues */ 474 ah->ah_txq_imr_txok &= ah->ah_txq_status; 475 ah->ah_txq_imr_txerr &= ah->ah_txq_status; 476 ah->ah_txq_imr_txurn &= ah->ah_txq_status; 477 ah->ah_txq_imr_txdesc &= ah->ah_txq_status; 478 ah->ah_txq_imr_txeol &= ah->ah_txq_status; 479 ah->ah_txq_imr_cbrorn &= ah->ah_txq_status; 480 ah->ah_txq_imr_cbrurn &= ah->ah_txq_status; 481 ah->ah_txq_imr_qtrig &= ah->ah_txq_status; 482 ah->ah_txq_imr_nofrm &= ah->ah_txq_status; 483 484 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txok, 485 AR5K_SIMR0_QCU_TXOK) | 486 AR5K_REG_SM(ah->ah_txq_imr_txdesc, 487 AR5K_SIMR0_QCU_TXDESC), AR5K_SIMR0); 488 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txerr, 489 AR5K_SIMR1_QCU_TXERR) | 490 AR5K_REG_SM(ah->ah_txq_imr_txeol, 491 AR5K_SIMR1_QCU_TXEOL), AR5K_SIMR1); 492 /* Update simr2 but don't overwrite rest simr2 settings */ 493 AR5K_REG_DISABLE_BITS(ah, AR5K_SIMR2, AR5K_SIMR2_QCU_TXURN); 494 AR5K_REG_ENABLE_BITS(ah, AR5K_SIMR2, 495 AR5K_REG_SM(ah->ah_txq_imr_txurn, 496 AR5K_SIMR2_QCU_TXURN)); 497 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_cbrorn, 498 AR5K_SIMR3_QCBRORN) | 499 AR5K_REG_SM(ah->ah_txq_imr_cbrurn, 500 AR5K_SIMR3_QCBRURN), AR5K_SIMR3); 501 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_qtrig, 502 AR5K_SIMR4_QTRIG), AR5K_SIMR4); 503 /* Set TXNOFRM_QCU for the queues with TXNOFRM enabled */ 504 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_nofrm, 505 AR5K_TXNOFRM_QCU), AR5K_TXNOFRM); 506 /* No queue has TXNOFRM enabled, disable the interrupt 507 * by setting AR5K_TXNOFRM to zero */ 508 if (ah->ah_txq_imr_nofrm == 0) 509 ath5k_hw_reg_write(ah, 0, AR5K_TXNOFRM); 510 511 /* Set QCU mask for this DCU to save power */ 512 AR5K_REG_WRITE_Q(ah, AR5K_QUEUE_QCUMASK(queue), queue); 513 } 514 515 return 0; 516 } 517 518 /* 519 * Get slot time from DCU 520 */ 521 unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah) 522 { 523 ATH5K_TRACE(ah->ah_sc); 524 if (ah->ah_version == AR5K_AR5210) 525 return ath5k_hw_clocktoh(ath5k_hw_reg_read(ah, 526 AR5K_SLOT_TIME) & 0xffff, ah->ah_turbo); 527 else 528 return ath5k_hw_reg_read(ah, AR5K_DCU_GBL_IFS_SLOT) & 0xffff; 529 } 530 531 /* 532 * Set slot time on DCU 533 */ 534 int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time) 535 { 536 ATH5K_TRACE(ah->ah_sc); 537 if (slot_time < AR5K_SLOT_TIME_9 || slot_time > AR5K_SLOT_TIME_MAX) 538 return -EINVAL; 539 540 if (ah->ah_version == AR5K_AR5210) 541 ath5k_hw_reg_write(ah, ath5k_hw_htoclock(slot_time, 542 ah->ah_turbo), AR5K_SLOT_TIME); 543 else 544 ath5k_hw_reg_write(ah, slot_time, AR5K_DCU_GBL_IFS_SLOT); 545 546 return 0; 547 } 548 549