1 /* 2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org> 3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com> 4 * 5 * Permission to use, copy, modify, and distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 * 17 */ 18 19 /*************************************\ 20 * DMA and interrupt masking functions * 21 \*************************************/ 22 23 /** 24 * DOC: DMA and interrupt masking functions 25 * 26 * Here we setup descriptor pointers (rxdp/txdp) start/stop dma engine and 27 * handle queue setup for 5210 chipset (rest are handled on qcu.c). 28 * Also we setup interrupt mask register (IMR) and read the various interrupt 29 * status registers (ISR). 30 */ 31 32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 33 34 #include "ath5k.h" 35 #include "reg.h" 36 #include "debug.h" 37 38 39 /*********\ 40 * Receive * 41 \*********/ 42 43 /** 44 * ath5k_hw_start_rx_dma() - Start DMA receive 45 * @ah: The &struct ath5k_hw 46 */ 47 void 48 ath5k_hw_start_rx_dma(struct ath5k_hw *ah) 49 { 50 ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR); 51 ath5k_hw_reg_read(ah, AR5K_CR); 52 } 53 54 /** 55 * ath5k_hw_stop_rx_dma() - Stop DMA receive 56 * @ah: The &struct ath5k_hw 57 */ 58 static int 59 ath5k_hw_stop_rx_dma(struct ath5k_hw *ah) 60 { 61 unsigned int i; 62 63 ath5k_hw_reg_write(ah, AR5K_CR_RXD, AR5K_CR); 64 65 /* 66 * It may take some time to disable the DMA receive unit 67 */ 68 for (i = 1000; i > 0 && 69 (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) != 0; 70 i--) 71 udelay(100); 72 73 if (!i) 74 ATH5K_DBG(ah, ATH5K_DEBUG_DMA, 75 "failed to stop RX DMA !\n"); 76 77 return i ? 0 : -EBUSY; 78 } 79 80 /** 81 * ath5k_hw_get_rxdp() - Get RX Descriptor's address 82 * @ah: The &struct ath5k_hw 83 */ 84 u32 85 ath5k_hw_get_rxdp(struct ath5k_hw *ah) 86 { 87 return ath5k_hw_reg_read(ah, AR5K_RXDP); 88 } 89 90 /** 91 * ath5k_hw_set_rxdp() - Set RX Descriptor's address 92 * @ah: The &struct ath5k_hw 93 * @phys_addr: RX descriptor address 94 * 95 * Returns -EIO if rx is active 96 */ 97 int 98 ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr) 99 { 100 if (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) { 101 ATH5K_DBG(ah, ATH5K_DEBUG_DMA, 102 "tried to set RXDP while rx was active !\n"); 103 return -EIO; 104 } 105 106 ath5k_hw_reg_write(ah, phys_addr, AR5K_RXDP); 107 return 0; 108 } 109 110 111 /**********\ 112 * Transmit * 113 \**********/ 114 115 /** 116 * ath5k_hw_start_tx_dma() - Start DMA transmit for a specific queue 117 * @ah: The &struct ath5k_hw 118 * @queue: The hw queue number 119 * 120 * Start DMA transmit for a specific queue and since 5210 doesn't have 121 * QCU/DCU, set up queue parameters for 5210 here based on queue type (one 122 * queue for normal data and one queue for beacons). For queue setup 123 * on newer chips check out qcu.c. Returns -EINVAL if queue number is out 124 * of range or if queue is already disabled. 125 * 126 * NOTE: Must be called after setting up tx control descriptor for that 127 * queue (see below). 128 */ 129 int 130 ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue) 131 { 132 u32 tx_queue; 133 134 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 135 136 /* Return if queue is declared inactive */ 137 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) 138 return -EINVAL; 139 140 if (ah->ah_version == AR5K_AR5210) { 141 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR); 142 143 /* 144 * Set the queue by type on 5210 145 */ 146 switch (ah->ah_txq[queue].tqi_type) { 147 case AR5K_TX_QUEUE_DATA: 148 tx_queue |= AR5K_CR_TXE0 & ~AR5K_CR_TXD0; 149 break; 150 case AR5K_TX_QUEUE_BEACON: 151 tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1; 152 ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE, 153 AR5K_BSR); 154 break; 155 case AR5K_TX_QUEUE_CAB: 156 tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1; 157 ath5k_hw_reg_write(ah, AR5K_BCR_TQ1FV | AR5K_BCR_TQ1V | 158 AR5K_BCR_BDMAE, AR5K_BSR); 159 break; 160 default: 161 return -EINVAL; 162 } 163 /* Start queue */ 164 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR); 165 ath5k_hw_reg_read(ah, AR5K_CR); 166 } else { 167 /* Return if queue is disabled */ 168 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue)) 169 return -EIO; 170 171 /* Start queue */ 172 AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXE, queue); 173 } 174 175 return 0; 176 } 177 178 /** 179 * ath5k_hw_stop_tx_dma() - Stop DMA transmit on a specific queue 180 * @ah: The &struct ath5k_hw 181 * @queue: The hw queue number 182 * 183 * Stop DMA transmit on a specific hw queue and drain queue so we don't 184 * have any pending frames. Returns -EBUSY if we still have pending frames, 185 * -EINVAL if queue number is out of range or inactive. 186 */ 187 static int 188 ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue) 189 { 190 unsigned int i = 40; 191 u32 tx_queue, pending; 192 193 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 194 195 /* Return if queue is declared inactive */ 196 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) 197 return -EINVAL; 198 199 if (ah->ah_version == AR5K_AR5210) { 200 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR); 201 202 /* 203 * Set by queue type 204 */ 205 switch (ah->ah_txq[queue].tqi_type) { 206 case AR5K_TX_QUEUE_DATA: 207 tx_queue |= AR5K_CR_TXD0 & ~AR5K_CR_TXE0; 208 break; 209 case AR5K_TX_QUEUE_BEACON: 210 case AR5K_TX_QUEUE_CAB: 211 /* XXX Fix me... */ 212 tx_queue |= AR5K_CR_TXD1 & ~AR5K_CR_TXD1; 213 ath5k_hw_reg_write(ah, 0, AR5K_BSR); 214 break; 215 default: 216 return -EINVAL; 217 } 218 219 /* Stop queue */ 220 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR); 221 ath5k_hw_reg_read(ah, AR5K_CR); 222 } else { 223 224 /* 225 * Enable DCU early termination to quickly 226 * flush any pending frames from QCU 227 */ 228 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), 229 AR5K_QCU_MISC_DCU_EARLY); 230 231 /* 232 * Schedule TX disable and wait until queue is empty 233 */ 234 AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXD, queue); 235 236 /* Wait for queue to stop */ 237 for (i = 1000; i > 0 && 238 (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue) != 0); 239 i--) 240 udelay(100); 241 242 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue)) 243 ATH5K_DBG(ah, ATH5K_DEBUG_DMA, 244 "queue %i didn't stop !\n", queue); 245 246 /* Check for pending frames */ 247 i = 1000; 248 do { 249 pending = ath5k_hw_reg_read(ah, 250 AR5K_QUEUE_STATUS(queue)) & 251 AR5K_QCU_STS_FRMPENDCNT; 252 udelay(100); 253 } while (--i && pending); 254 255 /* For 2413+ order PCU to drop packets using 256 * QUIET mechanism */ 257 if (ah->ah_mac_version >= (AR5K_SREV_AR2414 >> 4) && 258 pending) { 259 /* Set periodicity and duration */ 260 ath5k_hw_reg_write(ah, 261 AR5K_REG_SM(100, AR5K_QUIET_CTL2_QT_PER)| 262 AR5K_REG_SM(10, AR5K_QUIET_CTL2_QT_DUR), 263 AR5K_QUIET_CTL2); 264 265 /* Enable quiet period for current TSF */ 266 ath5k_hw_reg_write(ah, 267 AR5K_QUIET_CTL1_QT_EN | 268 AR5K_REG_SM(ath5k_hw_reg_read(ah, 269 AR5K_TSF_L32_5211) >> 10, 270 AR5K_QUIET_CTL1_NEXT_QT_TSF), 271 AR5K_QUIET_CTL1); 272 273 /* Force channel idle high */ 274 AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW_5211, 275 AR5K_DIAG_SW_CHANNEL_IDLE_HIGH); 276 277 /* Wait a while and disable mechanism */ 278 udelay(400); 279 AR5K_REG_DISABLE_BITS(ah, AR5K_QUIET_CTL1, 280 AR5K_QUIET_CTL1_QT_EN); 281 282 /* Re-check for pending frames */ 283 i = 100; 284 do { 285 pending = ath5k_hw_reg_read(ah, 286 AR5K_QUEUE_STATUS(queue)) & 287 AR5K_QCU_STS_FRMPENDCNT; 288 udelay(100); 289 } while (--i && pending); 290 291 AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW_5211, 292 AR5K_DIAG_SW_CHANNEL_IDLE_HIGH); 293 294 if (pending) 295 ATH5K_DBG(ah, ATH5K_DEBUG_DMA, 296 "quiet mechanism didn't work q:%i !\n", 297 queue); 298 } 299 300 /* 301 * Disable DCU early termination 302 */ 303 AR5K_REG_DISABLE_BITS(ah, AR5K_QUEUE_MISC(queue), 304 AR5K_QCU_MISC_DCU_EARLY); 305 306 /* Clear register */ 307 ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD); 308 if (pending) { 309 ATH5K_DBG(ah, ATH5K_DEBUG_DMA, 310 "tx dma didn't stop (q:%i, frm:%i) !\n", 311 queue, pending); 312 return -EBUSY; 313 } 314 } 315 316 /* TODO: Check for success on 5210 else return error */ 317 return 0; 318 } 319 320 /** 321 * ath5k_hw_stop_beacon_queue() - Stop beacon queue 322 * @ah: The &struct ath5k_hw 323 * @queue: The queue number 324 * 325 * Returns -EIO if queue didn't stop 326 */ 327 int 328 ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue) 329 { 330 int ret; 331 ret = ath5k_hw_stop_tx_dma(ah, queue); 332 if (ret) { 333 ATH5K_DBG(ah, ATH5K_DEBUG_DMA, 334 "beacon queue didn't stop !\n"); 335 return -EIO; 336 } 337 return 0; 338 } 339 340 /** 341 * ath5k_hw_get_txdp() - Get TX Descriptor's address for a specific queue 342 * @ah: The &struct ath5k_hw 343 * @queue: The hw queue number 344 * 345 * Get TX descriptor's address for a specific queue. For 5210 we ignore 346 * the queue number and use tx queue type since we only have 2 queues. 347 * We use TXDP0 for normal data queue and TXDP1 for beacon queue. 348 * For newer chips with QCU/DCU we just read the corresponding TXDP register. 349 * 350 * XXX: Is TXDP read and clear ? 351 */ 352 u32 353 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue) 354 { 355 u16 tx_reg; 356 357 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 358 359 /* 360 * Get the transmit queue descriptor pointer from the selected queue 361 */ 362 /*5210 doesn't have QCU*/ 363 if (ah->ah_version == AR5K_AR5210) { 364 switch (ah->ah_txq[queue].tqi_type) { 365 case AR5K_TX_QUEUE_DATA: 366 tx_reg = AR5K_NOQCU_TXDP0; 367 break; 368 case AR5K_TX_QUEUE_BEACON: 369 case AR5K_TX_QUEUE_CAB: 370 tx_reg = AR5K_NOQCU_TXDP1; 371 break; 372 default: 373 return 0xffffffff; 374 } 375 } else { 376 tx_reg = AR5K_QUEUE_TXDP(queue); 377 } 378 379 return ath5k_hw_reg_read(ah, tx_reg); 380 } 381 382 /** 383 * ath5k_hw_set_txdp() - Set TX Descriptor's address for a specific queue 384 * @ah: The &struct ath5k_hw 385 * @queue: The hw queue number 386 * @phys_addr: The physical address 387 * 388 * Set TX descriptor's address for a specific queue. For 5210 we ignore 389 * the queue number and we use tx queue type since we only have 2 queues 390 * so as above we use TXDP0 for normal data queue and TXDP1 for beacon queue. 391 * For newer chips with QCU/DCU we just set the corresponding TXDP register. 392 * Returns -EINVAL if queue type is invalid for 5210 and -EIO if queue is still 393 * active. 394 */ 395 int 396 ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr) 397 { 398 u16 tx_reg; 399 400 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 401 402 /* 403 * Set the transmit queue descriptor pointer register by type 404 * on 5210 405 */ 406 if (ah->ah_version == AR5K_AR5210) { 407 switch (ah->ah_txq[queue].tqi_type) { 408 case AR5K_TX_QUEUE_DATA: 409 tx_reg = AR5K_NOQCU_TXDP0; 410 break; 411 case AR5K_TX_QUEUE_BEACON: 412 case AR5K_TX_QUEUE_CAB: 413 tx_reg = AR5K_NOQCU_TXDP1; 414 break; 415 default: 416 return -EINVAL; 417 } 418 } else { 419 /* 420 * Set the transmit queue descriptor pointer for 421 * the selected queue on QCU for 5211+ 422 * (this won't work if the queue is still active) 423 */ 424 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue)) 425 return -EIO; 426 427 tx_reg = AR5K_QUEUE_TXDP(queue); 428 } 429 430 /* Set descriptor pointer */ 431 ath5k_hw_reg_write(ah, phys_addr, tx_reg); 432 433 return 0; 434 } 435 436 /** 437 * ath5k_hw_update_tx_triglevel() - Update tx trigger level 438 * @ah: The &struct ath5k_hw 439 * @increase: Flag to force increase of trigger level 440 * 441 * This function increases/decreases the tx trigger level for the tx fifo 442 * buffer (aka FIFO threshold) that is used to indicate when PCU flushes 443 * the buffer and transmits its data. Lowering this results sending small 444 * frames more quickly but can lead to tx underruns, raising it a lot can 445 * result other problems. Right now we start with the lowest possible 446 * (64Bytes) and if we get tx underrun we increase it using the increase 447 * flag. Returns -EIO if we have reached maximum/minimum. 448 * 449 * XXX: Link this with tx DMA size ? 450 * XXX2: Use it to save interrupts ? 451 */ 452 int 453 ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase) 454 { 455 u32 trigger_level, imr; 456 int ret = -EIO; 457 458 /* 459 * Disable interrupts by setting the mask 460 */ 461 imr = ath5k_hw_set_imr(ah, ah->ah_imr & ~AR5K_INT_GLOBAL); 462 463 trigger_level = AR5K_REG_MS(ath5k_hw_reg_read(ah, AR5K_TXCFG), 464 AR5K_TXCFG_TXFULL); 465 466 if (!increase) { 467 if (--trigger_level < AR5K_TUNE_MIN_TX_FIFO_THRES) 468 goto done; 469 } else 470 trigger_level += 471 ((AR5K_TUNE_MAX_TX_FIFO_THRES - trigger_level) / 2); 472 473 /* 474 * Update trigger level on success 475 */ 476 if (ah->ah_version == AR5K_AR5210) 477 ath5k_hw_reg_write(ah, trigger_level, AR5K_TRIG_LVL); 478 else 479 AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG, 480 AR5K_TXCFG_TXFULL, trigger_level); 481 482 ret = 0; 483 484 done: 485 /* 486 * Restore interrupt mask 487 */ 488 ath5k_hw_set_imr(ah, imr); 489 490 return ret; 491 } 492 493 494 /*******************\ 495 * Interrupt masking * 496 \*******************/ 497 498 /** 499 * ath5k_hw_is_intr_pending() - Check if we have pending interrupts 500 * @ah: The &struct ath5k_hw 501 * 502 * Check if we have pending interrupts to process. Returns 1 if we 503 * have pending interrupts and 0 if we haven't. 504 */ 505 bool 506 ath5k_hw_is_intr_pending(struct ath5k_hw *ah) 507 { 508 return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0; 509 } 510 511 /** 512 * ath5k_hw_get_isr() - Get interrupt status 513 * @ah: The @struct ath5k_hw 514 * @interrupt_mask: Driver's interrupt mask used to filter out 515 * interrupts in sw. 516 * 517 * This function is used inside our interrupt handler to determine the reason 518 * for the interrupt by reading Primary Interrupt Status Register. Returns an 519 * abstract interrupt status mask which is mostly ISR with some uncommon bits 520 * being mapped on some standard non hw-specific positions 521 * (check out &ath5k_int). 522 * 523 * NOTE: We do write-to-clear, so the active PISR/SISR bits at the time this 524 * function gets called are cleared on return. 525 */ 526 int 527 ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask) 528 { 529 u32 data = 0; 530 531 /* 532 * Read interrupt status from Primary Interrupt 533 * Register. 534 * 535 * Note: PISR/SISR Not available on 5210 536 */ 537 if (ah->ah_version == AR5K_AR5210) { 538 u32 isr = 0; 539 isr = ath5k_hw_reg_read(ah, AR5K_ISR); 540 if (unlikely(isr == AR5K_INT_NOCARD)) { 541 *interrupt_mask = isr; 542 return -ENODEV; 543 } 544 545 /* 546 * Filter out the non-common bits from the interrupt 547 * status. 548 */ 549 *interrupt_mask = (isr & AR5K_INT_COMMON) & ah->ah_imr; 550 551 /* Hanlde INT_FATAL */ 552 if (unlikely(isr & (AR5K_ISR_SSERR | AR5K_ISR_MCABT 553 | AR5K_ISR_DPERR))) 554 *interrupt_mask |= AR5K_INT_FATAL; 555 556 /* 557 * XXX: BMISS interrupts may occur after association. 558 * I found this on 5210 code but it needs testing. If this is 559 * true we should disable them before assoc and re-enable them 560 * after a successful assoc + some jiffies. 561 interrupt_mask &= ~AR5K_INT_BMISS; 562 */ 563 564 data = isr; 565 } else { 566 u32 pisr = 0; 567 u32 pisr_clear = 0; 568 u32 sisr0 = 0; 569 u32 sisr1 = 0; 570 u32 sisr2 = 0; 571 u32 sisr3 = 0; 572 u32 sisr4 = 0; 573 574 /* Read PISR and SISRs... */ 575 pisr = ath5k_hw_reg_read(ah, AR5K_PISR); 576 if (unlikely(pisr == AR5K_INT_NOCARD)) { 577 *interrupt_mask = pisr; 578 return -ENODEV; 579 } 580 581 sisr0 = ath5k_hw_reg_read(ah, AR5K_SISR0); 582 sisr1 = ath5k_hw_reg_read(ah, AR5K_SISR1); 583 sisr2 = ath5k_hw_reg_read(ah, AR5K_SISR2); 584 sisr3 = ath5k_hw_reg_read(ah, AR5K_SISR3); 585 sisr4 = ath5k_hw_reg_read(ah, AR5K_SISR4); 586 587 /* 588 * PISR holds the logical OR of interrupt bits 589 * from SISR registers: 590 * 591 * TXOK and TXDESC -> Logical OR of TXOK and TXDESC 592 * per-queue bits on SISR0 593 * 594 * TXERR and TXEOL -> Logical OR of TXERR and TXEOL 595 * per-queue bits on SISR1 596 * 597 * TXURN -> Logical OR of TXURN per-queue bits on SISR2 598 * 599 * HIUERR -> Logical OR of MCABT, SSERR and DPER bits on SISR2 600 * 601 * BCNMISC -> Logical OR of TIM, CAB_END, DTIM_SYNC 602 * BCN_TIMEOUT, CAB_TIMEOUT and DTIM 603 * (and TSFOOR ?) bits on SISR2 604 * 605 * QCBRORN and QCBRURN -> Logical OR of QCBRORN and 606 * QCBRURN per-queue bits on SISR3 607 * QTRIG -> Logical OR of QTRIG per-queue bits on SISR4 608 * 609 * If we clean these bits on PISR we 'll also clear all 610 * related bits from SISRs, e.g. if we write the TXOK bit on 611 * PISR we 'll clean all TXOK bits from SISR0 so if a new TXOK 612 * interrupt got fired for another queue while we were reading 613 * the interrupt registers and we write back the TXOK bit on 614 * PISR we 'll lose it. So make sure that we don't write back 615 * on PISR any bits that come from SISRs. Clearing them from 616 * SISRs will also clear PISR so no need to worry here. 617 */ 618 619 /* XXX: There seems to be an issue on some cards 620 * with tx interrupt flags not being updated 621 * on PISR despite that all Tx interrupt bits 622 * are cleared on SISRs. Since we handle all 623 * Tx queues all together it shouldn't be an 624 * issue if we clear Tx interrupt flags also 625 * on PISR to avoid that. 626 */ 627 pisr_clear = (pisr & ~AR5K_ISR_BITS_FROM_SISRS) | 628 (pisr & AR5K_INT_TX_ALL); 629 630 /* 631 * Write to clear them... 632 * Note: This means that each bit we write back 633 * to the registers will get cleared, leaving the 634 * rest unaffected. So this won't affect new interrupts 635 * we didn't catch while reading/processing, we 'll get 636 * them next time get_isr gets called. 637 */ 638 ath5k_hw_reg_write(ah, sisr0, AR5K_SISR0); 639 ath5k_hw_reg_write(ah, sisr1, AR5K_SISR1); 640 ath5k_hw_reg_write(ah, sisr2, AR5K_SISR2); 641 ath5k_hw_reg_write(ah, sisr3, AR5K_SISR3); 642 ath5k_hw_reg_write(ah, sisr4, AR5K_SISR4); 643 ath5k_hw_reg_write(ah, pisr_clear, AR5K_PISR); 644 /* Flush previous write */ 645 ath5k_hw_reg_read(ah, AR5K_PISR); 646 647 /* 648 * Filter out the non-common bits from the interrupt 649 * status. 650 */ 651 *interrupt_mask = (pisr & AR5K_INT_COMMON) & ah->ah_imr; 652 653 ah->ah_txq_isr_txok_all = 0; 654 655 /* We treat TXOK,TXDESC, TXERR and TXEOL 656 * the same way (schedule the tx tasklet) 657 * so we track them all together per queue */ 658 if (pisr & AR5K_ISR_TXOK) 659 ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0, 660 AR5K_SISR0_QCU_TXOK); 661 662 if (pisr & AR5K_ISR_TXDESC) 663 ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0, 664 AR5K_SISR0_QCU_TXDESC); 665 666 if (pisr & AR5K_ISR_TXERR) 667 ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1, 668 AR5K_SISR1_QCU_TXERR); 669 670 if (pisr & AR5K_ISR_TXEOL) 671 ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1, 672 AR5K_SISR1_QCU_TXEOL); 673 674 /* Misc Beacon related interrupts */ 675 676 /* For AR5211 */ 677 if (pisr & AR5K_ISR_TIM) 678 *interrupt_mask |= AR5K_INT_TIM; 679 680 /* For AR5212+ */ 681 if (pisr & AR5K_ISR_BCNMISC) { 682 if (sisr2 & AR5K_SISR2_TIM) 683 *interrupt_mask |= AR5K_INT_TIM; 684 if (sisr2 & AR5K_SISR2_DTIM) 685 *interrupt_mask |= AR5K_INT_DTIM; 686 if (sisr2 & AR5K_SISR2_DTIM_SYNC) 687 *interrupt_mask |= AR5K_INT_DTIM_SYNC; 688 if (sisr2 & AR5K_SISR2_BCN_TIMEOUT) 689 *interrupt_mask |= AR5K_INT_BCN_TIMEOUT; 690 if (sisr2 & AR5K_SISR2_CAB_TIMEOUT) 691 *interrupt_mask |= AR5K_INT_CAB_TIMEOUT; 692 } 693 694 /* Below interrupts are unlikely to happen */ 695 696 /* HIU = Host Interface Unit (PCI etc) 697 * Can be one of MCABT, SSERR, DPERR from SISR2 */ 698 if (unlikely(pisr & (AR5K_ISR_HIUERR))) 699 *interrupt_mask |= AR5K_INT_FATAL; 700 701 /*Beacon Not Ready*/ 702 if (unlikely(pisr & (AR5K_ISR_BNR))) 703 *interrupt_mask |= AR5K_INT_BNR; 704 705 /* A queue got CBR overrun */ 706 if (unlikely(pisr & (AR5K_ISR_QCBRORN))) 707 *interrupt_mask |= AR5K_INT_QCBRORN; 708 709 /* A queue got CBR underrun */ 710 if (unlikely(pisr & (AR5K_ISR_QCBRURN))) 711 *interrupt_mask |= AR5K_INT_QCBRURN; 712 713 /* A queue got triggered */ 714 if (unlikely(pisr & (AR5K_ISR_QTRIG))) 715 *interrupt_mask |= AR5K_INT_QTRIG; 716 717 data = pisr; 718 } 719 720 /* 721 * In case we didn't handle anything, 722 * print the register value. 723 */ 724 if (unlikely(*interrupt_mask == 0 && net_ratelimit())) 725 ATH5K_PRINTF("ISR: 0x%08x IMR: 0x%08x\n", data, ah->ah_imr); 726 727 return 0; 728 } 729 730 /** 731 * ath5k_hw_set_imr() - Set interrupt mask 732 * @ah: The &struct ath5k_hw 733 * @new_mask: The new interrupt mask to be set 734 * 735 * Set the interrupt mask in hw to save interrupts. We do that by mapping 736 * ath5k_int bits to hw-specific bits to remove abstraction and writing 737 * Interrupt Mask Register. 738 */ 739 enum ath5k_int 740 ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask) 741 { 742 enum ath5k_int old_mask, int_mask; 743 744 old_mask = ah->ah_imr; 745 746 /* 747 * Disable card interrupts to prevent any race conditions 748 * (they will be re-enabled afterwards if AR5K_INT GLOBAL 749 * is set again on the new mask). 750 */ 751 if (old_mask & AR5K_INT_GLOBAL) { 752 ath5k_hw_reg_write(ah, AR5K_IER_DISABLE, AR5K_IER); 753 ath5k_hw_reg_read(ah, AR5K_IER); 754 } 755 756 /* 757 * Add additional, chipset-dependent interrupt mask flags 758 * and write them to the IMR (interrupt mask register). 759 */ 760 int_mask = new_mask & AR5K_INT_COMMON; 761 762 if (ah->ah_version != AR5K_AR5210) { 763 /* Preserve per queue TXURN interrupt mask */ 764 u32 simr2 = ath5k_hw_reg_read(ah, AR5K_SIMR2) 765 & AR5K_SIMR2_QCU_TXURN; 766 767 /* Fatal interrupt abstraction for 5211+ */ 768 if (new_mask & AR5K_INT_FATAL) { 769 int_mask |= AR5K_IMR_HIUERR; 770 simr2 |= (AR5K_SIMR2_MCABT | AR5K_SIMR2_SSERR 771 | AR5K_SIMR2_DPERR); 772 } 773 774 /* Misc beacon related interrupts */ 775 if (new_mask & AR5K_INT_TIM) 776 int_mask |= AR5K_IMR_TIM; 777 778 if (new_mask & AR5K_INT_TIM) 779 simr2 |= AR5K_SISR2_TIM; 780 if (new_mask & AR5K_INT_DTIM) 781 simr2 |= AR5K_SISR2_DTIM; 782 if (new_mask & AR5K_INT_DTIM_SYNC) 783 simr2 |= AR5K_SISR2_DTIM_SYNC; 784 if (new_mask & AR5K_INT_BCN_TIMEOUT) 785 simr2 |= AR5K_SISR2_BCN_TIMEOUT; 786 if (new_mask & AR5K_INT_CAB_TIMEOUT) 787 simr2 |= AR5K_SISR2_CAB_TIMEOUT; 788 789 /*Beacon Not Ready*/ 790 if (new_mask & AR5K_INT_BNR) 791 int_mask |= AR5K_INT_BNR; 792 793 /* Note: Per queue interrupt masks 794 * are set via ath5k_hw_reset_tx_queue() (qcu.c) */ 795 ath5k_hw_reg_write(ah, int_mask, AR5K_PIMR); 796 ath5k_hw_reg_write(ah, simr2, AR5K_SIMR2); 797 798 } else { 799 /* Fatal interrupt abstraction for 5210 */ 800 if (new_mask & AR5K_INT_FATAL) 801 int_mask |= (AR5K_IMR_SSERR | AR5K_IMR_MCABT 802 | AR5K_IMR_HIUERR | AR5K_IMR_DPERR); 803 804 /* Only common interrupts left for 5210 (no SIMRs) */ 805 ath5k_hw_reg_write(ah, int_mask, AR5K_IMR); 806 } 807 808 /* If RXNOFRM interrupt is masked disable it 809 * by setting AR5K_RXNOFRM to zero */ 810 if (!(new_mask & AR5K_INT_RXNOFRM)) 811 ath5k_hw_reg_write(ah, 0, AR5K_RXNOFRM); 812 813 /* Store new interrupt mask */ 814 ah->ah_imr = new_mask; 815 816 /* ..re-enable interrupts if AR5K_INT_GLOBAL is set */ 817 if (new_mask & AR5K_INT_GLOBAL) { 818 ath5k_hw_reg_write(ah, AR5K_IER_ENABLE, AR5K_IER); 819 ath5k_hw_reg_read(ah, AR5K_IER); 820 } 821 822 return old_mask; 823 } 824 825 826 /********************\ 827 Init/Stop functions 828 \********************/ 829 830 /** 831 * ath5k_hw_dma_init() - Initialize DMA unit 832 * @ah: The &struct ath5k_hw 833 * 834 * Set DMA size and pre-enable interrupts 835 * (driver handles tx/rx buffer setup and 836 * dma start/stop) 837 * 838 * XXX: Save/restore RXDP/TXDP registers ? 839 */ 840 void 841 ath5k_hw_dma_init(struct ath5k_hw *ah) 842 { 843 /* 844 * Set Rx/Tx DMA Configuration 845 * 846 * Set standard DMA size (128). Note that 847 * a DMA size of 512 causes rx overruns and tx errors 848 * on pci-e cards (tested on 5424 but since rx overruns 849 * also occur on 5416/5418 with madwifi we set 128 850 * for all PCI-E cards to be safe). 851 * 852 * XXX: need to check 5210 for this 853 * TODO: Check out tx trigger level, it's always 64 on dumps but I 854 * guess we can tweak it and see how it goes ;-) 855 */ 856 if (ah->ah_version != AR5K_AR5210) { 857 AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG, 858 AR5K_TXCFG_SDMAMR, AR5K_DMASIZE_128B); 859 AR5K_REG_WRITE_BITS(ah, AR5K_RXCFG, 860 AR5K_RXCFG_SDMAMW, AR5K_DMASIZE_128B); 861 } 862 863 /* Pre-enable interrupts on 5211/5212*/ 864 if (ah->ah_version != AR5K_AR5210) 865 ath5k_hw_set_imr(ah, ah->ah_imr); 866 867 } 868 869 /** 870 * ath5k_hw_dma_stop() - stop DMA unit 871 * @ah: The &struct ath5k_hw 872 * 873 * Stop tx/rx DMA and interrupts. Returns 874 * -EBUSY if tx or rx dma failed to stop. 875 * 876 * XXX: Sometimes DMA unit hangs and we have 877 * stuck frames on tx queues, only a reset 878 * can fix that. 879 */ 880 int 881 ath5k_hw_dma_stop(struct ath5k_hw *ah) 882 { 883 int i, qmax, err; 884 err = 0; 885 886 /* Disable interrupts */ 887 ath5k_hw_set_imr(ah, 0); 888 889 /* Stop rx dma */ 890 err = ath5k_hw_stop_rx_dma(ah); 891 if (err) 892 return err; 893 894 /* Clear any pending interrupts 895 * and disable tx dma */ 896 if (ah->ah_version != AR5K_AR5210) { 897 ath5k_hw_reg_write(ah, 0xffffffff, AR5K_PISR); 898 qmax = AR5K_NUM_TX_QUEUES; 899 } else { 900 /* PISR/SISR Not available on 5210 */ 901 ath5k_hw_reg_read(ah, AR5K_ISR); 902 qmax = AR5K_NUM_TX_QUEUES_NOQCU; 903 } 904 905 for (i = 0; i < qmax; i++) { 906 err = ath5k_hw_stop_tx_dma(ah, i); 907 /* -EINVAL -> queue inactive */ 908 if (err && err != -EINVAL) 909 return err; 910 } 911 912 return 0; 913 } 914