1 /* 2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org> 3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com> 4 * 5 * Permission to use, copy, modify, and distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 * 17 */ 18 19 /********************************************\ 20 Queue Control Unit, DFS Control Unit Functions 21 \********************************************/ 22 23 #include "ath5k.h" 24 #include "reg.h" 25 #include "debug.h" 26 #include "base.h" 27 28 29 /******************\ 30 * Helper functions * 31 \******************/ 32 33 /* 34 * Get number of pending frames 35 * for a specific queue [5211+] 36 */ 37 u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue) 38 { 39 u32 pending; 40 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 41 42 /* Return if queue is declared inactive */ 43 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) 44 return false; 45 46 /* XXX: How about AR5K_CFG_TXCNT ? */ 47 if (ah->ah_version == AR5K_AR5210) 48 return false; 49 50 pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue)); 51 pending &= AR5K_QCU_STS_FRMPENDCNT; 52 53 /* It's possible to have no frames pending even if TXE 54 * is set. To indicate that q has not stopped return 55 * true */ 56 if (!pending && AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue)) 57 return true; 58 59 return pending; 60 } 61 62 /* 63 * Set a transmit queue inactive 64 */ 65 void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue) 66 { 67 if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num)) 68 return; 69 70 /* This queue will be skipped in further operations */ 71 ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE; 72 /*For SIMR setup*/ 73 AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue); 74 } 75 76 /* 77 * Make sure cw is a power of 2 minus 1 and smaller than 1024 78 */ 79 static u16 ath5k_cw_validate(u16 cw_req) 80 { 81 u32 cw = 1; 82 cw_req = min(cw_req, (u16)1023); 83 84 while (cw < cw_req) 85 cw = (cw << 1) | 1; 86 87 return cw; 88 } 89 90 /* 91 * Get properties for a transmit queue 92 */ 93 int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, 94 struct ath5k_txq_info *queue_info) 95 { 96 memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info)); 97 return 0; 98 } 99 100 /* 101 * Set properties for a transmit queue 102 */ 103 int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue, 104 const struct ath5k_txq_info *qinfo) 105 { 106 struct ath5k_txq_info *qi; 107 108 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 109 110 qi = &ah->ah_txq[queue]; 111 112 if (qi->tqi_type == AR5K_TX_QUEUE_INACTIVE) 113 return -EIO; 114 115 /* copy and validate values */ 116 qi->tqi_type = qinfo->tqi_type; 117 qi->tqi_subtype = qinfo->tqi_subtype; 118 qi->tqi_flags = qinfo->tqi_flags; 119 /* 120 * According to the docs: Although the AIFS field is 8 bit wide, 121 * the maximum supported value is 0xFC. Setting it higher than that 122 * will cause the DCU to hang. 123 */ 124 qi->tqi_aifs = min(qinfo->tqi_aifs, (u8)0xFC); 125 qi->tqi_cw_min = ath5k_cw_validate(qinfo->tqi_cw_min); 126 qi->tqi_cw_max = ath5k_cw_validate(qinfo->tqi_cw_max); 127 qi->tqi_cbr_period = qinfo->tqi_cbr_period; 128 qi->tqi_cbr_overflow_limit = qinfo->tqi_cbr_overflow_limit; 129 qi->tqi_burst_time = qinfo->tqi_burst_time; 130 qi->tqi_ready_time = qinfo->tqi_ready_time; 131 132 /*XXX: Is this supported on 5210 ?*/ 133 /*XXX: Is this correct for AR5K_WME_AC_VI,VO ???*/ 134 if ((qinfo->tqi_type == AR5K_TX_QUEUE_DATA && 135 ((qinfo->tqi_subtype == AR5K_WME_AC_VI) || 136 (qinfo->tqi_subtype == AR5K_WME_AC_VO))) || 137 qinfo->tqi_type == AR5K_TX_QUEUE_UAPSD) 138 qi->tqi_flags |= AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS; 139 140 return 0; 141 } 142 143 /* 144 * Initialize a transmit queue 145 */ 146 int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type, 147 struct ath5k_txq_info *queue_info) 148 { 149 unsigned int queue; 150 int ret; 151 152 /* 153 * Get queue by type 154 */ 155 /* 5210 only has 2 queues */ 156 if (ah->ah_capabilities.cap_queues.q_tx_num == 2) { 157 switch (queue_type) { 158 case AR5K_TX_QUEUE_DATA: 159 queue = AR5K_TX_QUEUE_ID_NOQCU_DATA; 160 break; 161 case AR5K_TX_QUEUE_BEACON: 162 case AR5K_TX_QUEUE_CAB: 163 queue = AR5K_TX_QUEUE_ID_NOQCU_BEACON; 164 break; 165 default: 166 return -EINVAL; 167 } 168 } else { 169 switch (queue_type) { 170 case AR5K_TX_QUEUE_DATA: 171 for (queue = AR5K_TX_QUEUE_ID_DATA_MIN; 172 ah->ah_txq[queue].tqi_type != 173 AR5K_TX_QUEUE_INACTIVE; queue++) { 174 175 if (queue > AR5K_TX_QUEUE_ID_DATA_MAX) 176 return -EINVAL; 177 } 178 break; 179 case AR5K_TX_QUEUE_UAPSD: 180 queue = AR5K_TX_QUEUE_ID_UAPSD; 181 break; 182 case AR5K_TX_QUEUE_BEACON: 183 queue = AR5K_TX_QUEUE_ID_BEACON; 184 break; 185 case AR5K_TX_QUEUE_CAB: 186 queue = AR5K_TX_QUEUE_ID_CAB; 187 break; 188 case AR5K_TX_QUEUE_XR_DATA: 189 if (ah->ah_version != AR5K_AR5212) 190 ATH5K_ERR(ah->ah_sc, 191 "XR data queues only supported in" 192 " 5212!\n"); 193 queue = AR5K_TX_QUEUE_ID_XR_DATA; 194 break; 195 default: 196 return -EINVAL; 197 } 198 } 199 200 /* 201 * Setup internal queue structure 202 */ 203 memset(&ah->ah_txq[queue], 0, sizeof(struct ath5k_txq_info)); 204 ah->ah_txq[queue].tqi_type = queue_type; 205 206 if (queue_info != NULL) { 207 queue_info->tqi_type = queue_type; 208 ret = ath5k_hw_set_tx_queueprops(ah, queue, queue_info); 209 if (ret) 210 return ret; 211 } 212 213 /* 214 * We use ah_txq_status to hold a temp value for 215 * the Secondary interrupt mask registers on 5211+ 216 * check out ath5k_hw_reset_tx_queue 217 */ 218 AR5K_Q_ENABLE_BITS(ah->ah_txq_status, queue); 219 220 return queue; 221 } 222 223 224 /*******************************\ 225 * Single QCU/DCU initialization * 226 \*******************************/ 227 228 /* 229 * Set tx retry limits on DCU 230 */ 231 void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah, 232 unsigned int queue) 233 { 234 /* Single data queue on AR5210 */ 235 if (ah->ah_version == AR5K_AR5210) { 236 struct ath5k_txq_info *tq = &ah->ah_txq[queue]; 237 238 if (queue > 0) 239 return; 240 241 ath5k_hw_reg_write(ah, 242 (tq->tqi_cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S) 243 | AR5K_REG_SM(ah->ah_retry_long, 244 AR5K_NODCU_RETRY_LMT_SLG_RETRY) 245 | AR5K_REG_SM(ah->ah_retry_short, 246 AR5K_NODCU_RETRY_LMT_SSH_RETRY) 247 | AR5K_REG_SM(ah->ah_retry_long, 248 AR5K_NODCU_RETRY_LMT_LG_RETRY) 249 | AR5K_REG_SM(ah->ah_retry_short, 250 AR5K_NODCU_RETRY_LMT_SH_RETRY), 251 AR5K_NODCU_RETRY_LMT); 252 /* DCU on AR5211+ */ 253 } else { 254 ath5k_hw_reg_write(ah, 255 AR5K_REG_SM(ah->ah_retry_long, 256 AR5K_DCU_RETRY_LMT_RTS) 257 | AR5K_REG_SM(ah->ah_retry_long, 258 AR5K_DCU_RETRY_LMT_STA_RTS) 259 | AR5K_REG_SM(max(ah->ah_retry_long, ah->ah_retry_short), 260 AR5K_DCU_RETRY_LMT_STA_DATA), 261 AR5K_QUEUE_DFS_RETRY_LIMIT(queue)); 262 } 263 } 264 265 /** 266 * ath5k_hw_reset_tx_queue - Initialize a single hw queue 267 * 268 * @ah The &struct ath5k_hw 269 * @queue The hw queue number 270 * 271 * Set DFS properties for the given transmit queue on DCU 272 * and configures all queue-specific parameters. 273 */ 274 int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue) 275 { 276 struct ath5k_txq_info *tq = &ah->ah_txq[queue]; 277 278 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 279 280 tq = &ah->ah_txq[queue]; 281 282 /* Skip if queue inactive or if we are on AR5210 283 * that doesn't have QCU/DCU */ 284 if ((ah->ah_version == AR5K_AR5210) || 285 (tq->tqi_type == AR5K_TX_QUEUE_INACTIVE)) 286 return 0; 287 288 /* 289 * Set contention window (cw_min/cw_max) 290 * and arbitrated interframe space (aifs)... 291 */ 292 ath5k_hw_reg_write(ah, 293 AR5K_REG_SM(tq->tqi_cw_min, AR5K_DCU_LCL_IFS_CW_MIN) | 294 AR5K_REG_SM(tq->tqi_cw_max, AR5K_DCU_LCL_IFS_CW_MAX) | 295 AR5K_REG_SM(tq->tqi_aifs, AR5K_DCU_LCL_IFS_AIFS), 296 AR5K_QUEUE_DFS_LOCAL_IFS(queue)); 297 298 /* 299 * Set tx retry limits for this queue 300 */ 301 ath5k_hw_set_tx_retry_limits(ah, queue); 302 303 304 /* 305 * Set misc registers 306 */ 307 308 /* Enable DCU to wait for next fragment from QCU */ 309 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue), 310 AR5K_DCU_MISC_FRAG_WAIT); 311 312 /* On Maui and Spirit use the global seqnum on DCU */ 313 if (ah->ah_mac_version < AR5K_SREV_AR5211) 314 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue), 315 AR5K_DCU_MISC_SEQNUM_CTL); 316 317 /* Constant bit rate period */ 318 if (tq->tqi_cbr_period) { 319 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_cbr_period, 320 AR5K_QCU_CBRCFG_INTVAL) | 321 AR5K_REG_SM(tq->tqi_cbr_overflow_limit, 322 AR5K_QCU_CBRCFG_ORN_THRES), 323 AR5K_QUEUE_CBRCFG(queue)); 324 325 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), 326 AR5K_QCU_MISC_FRSHED_CBR); 327 328 if (tq->tqi_cbr_overflow_limit) 329 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), 330 AR5K_QCU_MISC_CBR_THRES_ENABLE); 331 } 332 333 /* Ready time interval */ 334 if (tq->tqi_ready_time && (tq->tqi_type != AR5K_TX_QUEUE_CAB)) 335 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_ready_time, 336 AR5K_QCU_RDYTIMECFG_INTVAL) | 337 AR5K_QCU_RDYTIMECFG_ENABLE, 338 AR5K_QUEUE_RDYTIMECFG(queue)); 339 340 if (tq->tqi_burst_time) { 341 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_burst_time, 342 AR5K_DCU_CHAN_TIME_DUR) | 343 AR5K_DCU_CHAN_TIME_ENABLE, 344 AR5K_QUEUE_DFS_CHANNEL_TIME(queue)); 345 346 if (tq->tqi_flags & AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE) 347 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), 348 AR5K_QCU_MISC_RDY_VEOL_POLICY); 349 } 350 351 /* Enable/disable Post frame backoff */ 352 if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE) 353 ath5k_hw_reg_write(ah, AR5K_DCU_MISC_POST_FR_BKOFF_DIS, 354 AR5K_QUEUE_DFS_MISC(queue)); 355 356 /* Enable/disable fragmentation burst backoff */ 357 if (tq->tqi_flags & AR5K_TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) 358 ath5k_hw_reg_write(ah, AR5K_DCU_MISC_BACKOFF_FRAG, 359 AR5K_QUEUE_DFS_MISC(queue)); 360 361 /* 362 * Set registers by queue type 363 */ 364 switch (tq->tqi_type) { 365 case AR5K_TX_QUEUE_BEACON: 366 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), 367 AR5K_QCU_MISC_FRSHED_DBA_GT | 368 AR5K_QCU_MISC_CBREXP_BCN_DIS | 369 AR5K_QCU_MISC_BCN_ENABLE); 370 371 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue), 372 (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL << 373 AR5K_DCU_MISC_ARBLOCK_CTL_S) | 374 AR5K_DCU_MISC_ARBLOCK_IGNORE | 375 AR5K_DCU_MISC_POST_FR_BKOFF_DIS | 376 AR5K_DCU_MISC_BCN_ENABLE); 377 break; 378 379 case AR5K_TX_QUEUE_CAB: 380 /* XXX: use BCN_SENT_GT, if we can figure out how */ 381 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), 382 AR5K_QCU_MISC_FRSHED_DBA_GT | 383 AR5K_QCU_MISC_CBREXP_DIS | 384 AR5K_QCU_MISC_CBREXP_BCN_DIS); 385 386 ath5k_hw_reg_write(ah, ((tq->tqi_ready_time - 387 (AR5K_TUNE_SW_BEACON_RESP - 388 AR5K_TUNE_DMA_BEACON_RESP) - 389 AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) | 390 AR5K_QCU_RDYTIMECFG_ENABLE, 391 AR5K_QUEUE_RDYTIMECFG(queue)); 392 393 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue), 394 (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL << 395 AR5K_DCU_MISC_ARBLOCK_CTL_S)); 396 break; 397 398 case AR5K_TX_QUEUE_UAPSD: 399 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), 400 AR5K_QCU_MISC_CBREXP_DIS); 401 break; 402 403 case AR5K_TX_QUEUE_DATA: 404 default: 405 break; 406 } 407 408 /* TODO: Handle frame compression */ 409 410 /* 411 * Enable interrupts for this tx queue 412 * in the secondary interrupt mask registers 413 */ 414 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXOKINT_ENABLE) 415 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txok, queue); 416 417 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXERRINT_ENABLE) 418 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txerr, queue); 419 420 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXURNINT_ENABLE) 421 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txurn, queue); 422 423 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXDESCINT_ENABLE) 424 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txdesc, queue); 425 426 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXEOLINT_ENABLE) 427 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txeol, queue); 428 429 if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRORNINT_ENABLE) 430 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrorn, queue); 431 432 if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRURNINT_ENABLE) 433 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrurn, queue); 434 435 if (tq->tqi_flags & AR5K_TXQ_FLAG_QTRIGINT_ENABLE) 436 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_qtrig, queue); 437 438 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXNOFRMINT_ENABLE) 439 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_nofrm, queue); 440 441 /* Update secondary interrupt mask registers */ 442 443 /* Filter out inactive queues */ 444 ah->ah_txq_imr_txok &= ah->ah_txq_status; 445 ah->ah_txq_imr_txerr &= ah->ah_txq_status; 446 ah->ah_txq_imr_txurn &= ah->ah_txq_status; 447 ah->ah_txq_imr_txdesc &= ah->ah_txq_status; 448 ah->ah_txq_imr_txeol &= ah->ah_txq_status; 449 ah->ah_txq_imr_cbrorn &= ah->ah_txq_status; 450 ah->ah_txq_imr_cbrurn &= ah->ah_txq_status; 451 ah->ah_txq_imr_qtrig &= ah->ah_txq_status; 452 ah->ah_txq_imr_nofrm &= ah->ah_txq_status; 453 454 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txok, 455 AR5K_SIMR0_QCU_TXOK) | 456 AR5K_REG_SM(ah->ah_txq_imr_txdesc, 457 AR5K_SIMR0_QCU_TXDESC), 458 AR5K_SIMR0); 459 460 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txerr, 461 AR5K_SIMR1_QCU_TXERR) | 462 AR5K_REG_SM(ah->ah_txq_imr_txeol, 463 AR5K_SIMR1_QCU_TXEOL), 464 AR5K_SIMR1); 465 466 /* Update SIMR2 but don't overwrite rest simr2 settings */ 467 AR5K_REG_DISABLE_BITS(ah, AR5K_SIMR2, AR5K_SIMR2_QCU_TXURN); 468 AR5K_REG_ENABLE_BITS(ah, AR5K_SIMR2, 469 AR5K_REG_SM(ah->ah_txq_imr_txurn, 470 AR5K_SIMR2_QCU_TXURN)); 471 472 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_cbrorn, 473 AR5K_SIMR3_QCBRORN) | 474 AR5K_REG_SM(ah->ah_txq_imr_cbrurn, 475 AR5K_SIMR3_QCBRURN), 476 AR5K_SIMR3); 477 478 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_qtrig, 479 AR5K_SIMR4_QTRIG), AR5K_SIMR4); 480 481 /* Set TXNOFRM_QCU for the queues with TXNOFRM enabled */ 482 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_nofrm, 483 AR5K_TXNOFRM_QCU), AR5K_TXNOFRM); 484 485 /* No queue has TXNOFRM enabled, disable the interrupt 486 * by setting AR5K_TXNOFRM to zero */ 487 if (ah->ah_txq_imr_nofrm == 0) 488 ath5k_hw_reg_write(ah, 0, AR5K_TXNOFRM); 489 490 /* Set QCU mask for this DCU to save power */ 491 AR5K_REG_WRITE_Q(ah, AR5K_QUEUE_QCUMASK(queue), queue); 492 493 return 0; 494 } 495 496 497 /**************************\ 498 * Global QCU/DCU functions * 499 \**************************/ 500 501 /** 502 * ath5k_hw_set_ifs_intervals - Set global inter-frame spaces on DCU 503 * 504 * @ah The &struct ath5k_hw 505 * @slot_time Slot time in us 506 * 507 * Sets the global IFS intervals on DCU (also works on AR5210) for 508 * the given slot time and the current bwmode. 509 */ 510 int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time) 511 { 512 struct ieee80211_channel *channel = ah->ah_current_channel; 513 struct ath5k_softc *sc = ah->ah_sc; 514 struct ieee80211_rate *rate; 515 u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock; 516 u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time); 517 518 if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX) 519 return -EINVAL; 520 521 sifs = ath5k_hw_get_default_sifs(ah); 522 sifs_clock = ath5k_hw_htoclock(ah, sifs); 523 524 /* EIFS 525 * Txtime of ack at lowest rate + SIFS + DIFS 526 * (DIFS = SIFS + 2 * Slot time) 527 * 528 * Note: HAL has some predefined values for EIFS 529 * Turbo: (37 + 2 * 6) 530 * Default: (74 + 2 * 9) 531 * Half: (149 + 2 * 13) 532 * Quarter: (298 + 2 * 21) 533 * 534 * (74 + 2 * 6) for AR5210 default and turbo ! 535 * 536 * According to the formula we have 537 * ack_tx_time = 25 for turbo and 538 * ack_tx_time = 42.5 * clock multiplier 539 * for default/half/quarter. 540 * 541 * This can't be right, 42 is what we would get 542 * from ath5k_hw_get_frame_dur_for_bwmode or 543 * ieee80211_generic_frame_duration for zero frame 544 * length and without SIFS ! 545 * 546 * Also we have different lowest rate for 802.11a 547 */ 548 if (channel->hw_value & CHANNEL_5GHZ) 549 rate = &sc->sbands[IEEE80211_BAND_5GHZ].bitrates[0]; 550 else 551 rate = &sc->sbands[IEEE80211_BAND_2GHZ].bitrates[0]; 552 553 ack_tx_time = ath5k_hw_get_frame_duration(ah, 10, rate); 554 555 /* ack_tx_time includes an SIFS already */ 556 eifs = ack_tx_time + sifs + 2 * slot_time; 557 eifs_clock = ath5k_hw_htoclock(ah, eifs); 558 559 /* Set IFS settings on AR5210 */ 560 if (ah->ah_version == AR5K_AR5210) { 561 u32 pifs, pifs_clock, difs, difs_clock; 562 563 /* Set slot time */ 564 ath5k_hw_reg_write(ah, slot_time_clock, AR5K_SLOT_TIME); 565 566 /* Set EIFS */ 567 eifs_clock = AR5K_REG_SM(eifs_clock, AR5K_IFS1_EIFS); 568 569 /* PIFS = Slot time + SIFS */ 570 pifs = slot_time + sifs; 571 pifs_clock = ath5k_hw_htoclock(ah, pifs); 572 pifs_clock = AR5K_REG_SM(pifs_clock, AR5K_IFS1_PIFS); 573 574 /* DIFS = SIFS + 2 * Slot time */ 575 difs = sifs + 2 * slot_time; 576 difs_clock = ath5k_hw_htoclock(ah, difs); 577 578 /* Set SIFS/DIFS */ 579 ath5k_hw_reg_write(ah, (difs_clock << 580 AR5K_IFS0_DIFS_S) | sifs_clock, 581 AR5K_IFS0); 582 583 /* Set PIFS/EIFS and preserve AR5K_INIT_CARR_SENSE_EN */ 584 ath5k_hw_reg_write(ah, pifs_clock | eifs_clock | 585 (AR5K_INIT_CARR_SENSE_EN << AR5K_IFS1_CS_EN_S), 586 AR5K_IFS1); 587 588 return 0; 589 } 590 591 /* Set IFS slot time */ 592 ath5k_hw_reg_write(ah, slot_time_clock, AR5K_DCU_GBL_IFS_SLOT); 593 594 /* Set EIFS interval */ 595 ath5k_hw_reg_write(ah, eifs_clock, AR5K_DCU_GBL_IFS_EIFS); 596 597 /* Set SIFS interval in usecs */ 598 AR5K_REG_WRITE_BITS(ah, AR5K_DCU_GBL_IFS_MISC, 599 AR5K_DCU_GBL_IFS_MISC_SIFS_DUR_USEC, 600 sifs); 601 602 /* Set SIFS interval in clock cycles */ 603 ath5k_hw_reg_write(ah, sifs_clock, AR5K_DCU_GBL_IFS_SIFS); 604 605 return 0; 606 } 607 608 609 int ath5k_hw_init_queues(struct ath5k_hw *ah) 610 { 611 int i, ret; 612 613 /* TODO: HW Compression support for data queues */ 614 /* TODO: Burst prefetch for data queues */ 615 616 /* 617 * Reset queues and start beacon timers at the end of the reset routine 618 * This also sets QCU mask on each DCU for 1:1 qcu to dcu mapping 619 * Note: If we want we can assign multiple qcus on one dcu. 620 */ 621 if (ah->ah_version != AR5K_AR5210) 622 for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++) { 623 ret = ath5k_hw_reset_tx_queue(ah, i); 624 if (ret) { 625 ATH5K_ERR(ah->ah_sc, 626 "failed to reset TX queue #%d\n", i); 627 return ret; 628 } 629 } 630 else 631 /* No QCU/DCU on AR5210, just set tx 632 * retry limits. We set IFS parameters 633 * on ath5k_hw_set_ifs_intervals */ 634 ath5k_hw_set_tx_retry_limits(ah, 0); 635 636 /* Set the turbo flag when operating on 40MHz */ 637 if (ah->ah_bwmode == AR5K_BWMODE_40MHZ) 638 AR5K_REG_ENABLE_BITS(ah, AR5K_DCU_GBL_IFS_MISC, 639 AR5K_DCU_GBL_IFS_MISC_TURBO_MODE); 640 641 /* If we didn't set IFS timings through 642 * ath5k_hw_set_coverage_class make sure 643 * we set them here */ 644 if (!ah->ah_coverage_class) { 645 unsigned int slot_time = ath5k_hw_get_default_slottime(ah); 646 ath5k_hw_set_ifs_intervals(ah, slot_time); 647 } 648 649 return 0; 650 } 651