1 /* 2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org> 3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com> 4 * 5 * Permission to use, copy, modify, and distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 * 17 */ 18 19 /*************************************\ 20 * DMA and interrupt masking functions * 21 \*************************************/ 22 23 /** 24 * DOC: DMA and interrupt masking functions 25 * 26 * Here we setup descriptor pointers (rxdp/txdp) start/stop dma engine and 27 * handle queue setup for 5210 chipset (rest are handled on qcu.c). 28 * Also we setup interrupt mask register (IMR) and read the various interrupt 29 * status registers (ISR). 30 */ 31 32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 33 34 #include "ath5k.h" 35 #include "reg.h" 36 #include "debug.h" 37 38 39 /*********\ 40 * Receive * 41 \*********/ 42 43 /** 44 * ath5k_hw_start_rx_dma() - Start DMA receive 45 * @ah: The &struct ath5k_hw 46 */ 47 void 48 ath5k_hw_start_rx_dma(struct ath5k_hw *ah) 49 { 50 ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR); 51 ath5k_hw_reg_read(ah, AR5K_CR); 52 } 53 54 /** 55 * ath5k_hw_stop_rx_dma() - Stop DMA receive 56 * @ah: The &struct ath5k_hw 57 */ 58 static int 59 ath5k_hw_stop_rx_dma(struct ath5k_hw *ah) 60 { 61 unsigned int i; 62 63 ath5k_hw_reg_write(ah, AR5K_CR_RXD, AR5K_CR); 64 65 /* 66 * It may take some time to disable the DMA receive unit 67 */ 68 for (i = 1000; i > 0 && 69 (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) != 0; 70 i--) 71 udelay(100); 72 73 if (!i) 74 ATH5K_DBG(ah, ATH5K_DEBUG_DMA, 75 "failed to stop RX DMA !\n"); 76 77 return i ? 0 : -EBUSY; 78 } 79 80 /** 81 * ath5k_hw_get_rxdp() - Get RX Descriptor's address 82 * @ah: The &struct ath5k_hw 83 */ 84 u32 85 ath5k_hw_get_rxdp(struct ath5k_hw *ah) 86 { 87 return ath5k_hw_reg_read(ah, AR5K_RXDP); 88 } 89 90 /** 91 * ath5k_hw_set_rxdp() - Set RX Descriptor's address 92 * @ah: The &struct ath5k_hw 93 * @phys_addr: RX descriptor address 94 * 95 * Returns -EIO if rx is active 96 */ 97 int 98 ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr) 99 { 100 if (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) { 101 ATH5K_DBG(ah, ATH5K_DEBUG_DMA, 102 "tried to set RXDP while rx was active !\n"); 103 return -EIO; 104 } 105 106 ath5k_hw_reg_write(ah, phys_addr, AR5K_RXDP); 107 return 0; 108 } 109 110 111 /**********\ 112 * Transmit * 113 \**********/ 114 115 /** 116 * ath5k_hw_start_tx_dma() - Start DMA transmit for a specific queue 117 * @ah: The &struct ath5k_hw 118 * @queue: The hw queue number 119 * 120 * Start DMA transmit for a specific queue and since 5210 doesn't have 121 * QCU/DCU, set up queue parameters for 5210 here based on queue type (one 122 * queue for normal data and one queue for beacons). For queue setup 123 * on newer chips check out qcu.c. Returns -EINVAL if queue number is out 124 * of range or if queue is already disabled. 125 * 126 * NOTE: Must be called after setting up tx control descriptor for that 127 * queue (see below). 128 */ 129 int 130 ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue) 131 { 132 u32 tx_queue; 133 134 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 135 136 /* Return if queue is declared inactive */ 137 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) 138 return -EINVAL; 139 140 if (ah->ah_version == AR5K_AR5210) { 141 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR); 142 143 /* 144 * Set the queue by type on 5210 145 */ 146 switch (ah->ah_txq[queue].tqi_type) { 147 case AR5K_TX_QUEUE_DATA: 148 tx_queue |= AR5K_CR_TXE0 & ~AR5K_CR_TXD0; 149 break; 150 case AR5K_TX_QUEUE_BEACON: 151 tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1; 152 ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE, 153 AR5K_BSR); 154 break; 155 case AR5K_TX_QUEUE_CAB: 156 tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1; 157 ath5k_hw_reg_write(ah, AR5K_BCR_TQ1FV | AR5K_BCR_TQ1V | 158 AR5K_BCR_BDMAE, AR5K_BSR); 159 break; 160 default: 161 return -EINVAL; 162 } 163 /* Start queue */ 164 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR); 165 ath5k_hw_reg_read(ah, AR5K_CR); 166 } else { 167 /* Return if queue is disabled */ 168 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue)) 169 return -EIO; 170 171 /* Start queue */ 172 AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXE, queue); 173 } 174 175 return 0; 176 } 177 178 /** 179 * ath5k_hw_stop_tx_dma() - Stop DMA transmit on a specific queue 180 * @ah: The &struct ath5k_hw 181 * @queue: The hw queue number 182 * 183 * Stop DMA transmit on a specific hw queue and drain queue so we don't 184 * have any pending frames. Returns -EBUSY if we still have pending frames, 185 * -EINVAL if queue number is out of range or inactive. 186 */ 187 static int 188 ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue) 189 { 190 unsigned int i = 40; 191 u32 tx_queue, pending; 192 193 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 194 195 /* Return if queue is declared inactive */ 196 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) 197 return -EINVAL; 198 199 if (ah->ah_version == AR5K_AR5210) { 200 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR); 201 202 /* 203 * Set by queue type 204 */ 205 switch (ah->ah_txq[queue].tqi_type) { 206 case AR5K_TX_QUEUE_DATA: 207 tx_queue |= AR5K_CR_TXD0 & ~AR5K_CR_TXE0; 208 break; 209 case AR5K_TX_QUEUE_BEACON: 210 case AR5K_TX_QUEUE_CAB: 211 /* XXX Fix me... */ 212 tx_queue |= AR5K_CR_TXD1 & ~AR5K_CR_TXD1; 213 ath5k_hw_reg_write(ah, 0, AR5K_BSR); 214 break; 215 default: 216 return -EINVAL; 217 } 218 219 /* Stop queue */ 220 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR); 221 ath5k_hw_reg_read(ah, AR5K_CR); 222 } else { 223 224 /* 225 * Enable DCU early termination to quickly 226 * flush any pending frames from QCU 227 */ 228 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), 229 AR5K_QCU_MISC_DCU_EARLY); 230 231 /* 232 * Schedule TX disable and wait until queue is empty 233 */ 234 AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXD, queue); 235 236 /* Wait for queue to stop */ 237 for (i = 1000; i > 0 && 238 (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue) != 0); 239 i--) 240 udelay(100); 241 242 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue)) 243 ATH5K_DBG(ah, ATH5K_DEBUG_DMA, 244 "queue %i didn't stop !\n", queue); 245 246 /* Check for pending frames */ 247 i = 1000; 248 do { 249 pending = ath5k_hw_reg_read(ah, 250 AR5K_QUEUE_STATUS(queue)) & 251 AR5K_QCU_STS_FRMPENDCNT; 252 udelay(100); 253 } while (--i && pending); 254 255 /* For 2413+ order PCU to drop packets using 256 * QUIET mechanism */ 257 if (ah->ah_mac_version >= (AR5K_SREV_AR2414 >> 4) && 258 pending) { 259 /* Set periodicity and duration */ 260 ath5k_hw_reg_write(ah, 261 AR5K_REG_SM(100, AR5K_QUIET_CTL2_QT_PER)| 262 AR5K_REG_SM(10, AR5K_QUIET_CTL2_QT_DUR), 263 AR5K_QUIET_CTL2); 264 265 /* Enable quiet period for current TSF */ 266 ath5k_hw_reg_write(ah, 267 AR5K_QUIET_CTL1_QT_EN | 268 AR5K_REG_SM(ath5k_hw_reg_read(ah, 269 AR5K_TSF_L32_5211) >> 10, 270 AR5K_QUIET_CTL1_NEXT_QT_TSF), 271 AR5K_QUIET_CTL1); 272 273 /* Force channel idle high */ 274 AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW_5211, 275 AR5K_DIAG_SW_CHANNEL_IDLE_HIGH); 276 277 /* Wait a while and disable mechanism */ 278 udelay(400); 279 AR5K_REG_DISABLE_BITS(ah, AR5K_QUIET_CTL1, 280 AR5K_QUIET_CTL1_QT_EN); 281 282 /* Re-check for pending frames */ 283 i = 100; 284 do { 285 pending = ath5k_hw_reg_read(ah, 286 AR5K_QUEUE_STATUS(queue)) & 287 AR5K_QCU_STS_FRMPENDCNT; 288 udelay(100); 289 } while (--i && pending); 290 291 AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW_5211, 292 AR5K_DIAG_SW_CHANNEL_IDLE_HIGH); 293 294 if (pending) 295 ATH5K_DBG(ah, ATH5K_DEBUG_DMA, 296 "quiet mechanism didn't work q:%i !\n", 297 queue); 298 } 299 300 /* 301 * Disable DCU early termination 302 */ 303 AR5K_REG_DISABLE_BITS(ah, AR5K_QUEUE_MISC(queue), 304 AR5K_QCU_MISC_DCU_EARLY); 305 306 /* Clear register */ 307 ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD); 308 if (pending) { 309 ATH5K_DBG(ah, ATH5K_DEBUG_DMA, 310 "tx dma didn't stop (q:%i, frm:%i) !\n", 311 queue, pending); 312 return -EBUSY; 313 } 314 } 315 316 /* TODO: Check for success on 5210 else return error */ 317 return 0; 318 } 319 320 /** 321 * ath5k_hw_stop_beacon_queue() - Stop beacon queue 322 * @ah: The &struct ath5k_hw 323 * @queue: The queue number 324 * 325 * Returns -EIO if queue didn't stop 326 */ 327 int 328 ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue) 329 { 330 int ret; 331 ret = ath5k_hw_stop_tx_dma(ah, queue); 332 if (ret) { 333 ATH5K_DBG(ah, ATH5K_DEBUG_DMA, 334 "beacon queue didn't stop !\n"); 335 return -EIO; 336 } 337 return 0; 338 } 339 340 /** 341 * ath5k_hw_get_txdp() - Get TX Descriptor's address for a specific queue 342 * @ah: The &struct ath5k_hw 343 * @queue: The hw queue number 344 * 345 * Get TX descriptor's address for a specific queue. For 5210 we ignore 346 * the queue number and use tx queue type since we only have 2 queues. 347 * We use TXDP0 for normal data queue and TXDP1 for beacon queue. 348 * For newer chips with QCU/DCU we just read the corresponding TXDP register. 349 * 350 * XXX: Is TXDP read and clear ? 351 */ 352 u32 353 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue) 354 { 355 u16 tx_reg; 356 357 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 358 359 /* 360 * Get the transmit queue descriptor pointer from the selected queue 361 */ 362 /*5210 doesn't have QCU*/ 363 if (ah->ah_version == AR5K_AR5210) { 364 switch (ah->ah_txq[queue].tqi_type) { 365 case AR5K_TX_QUEUE_DATA: 366 tx_reg = AR5K_NOQCU_TXDP0; 367 break; 368 case AR5K_TX_QUEUE_BEACON: 369 case AR5K_TX_QUEUE_CAB: 370 tx_reg = AR5K_NOQCU_TXDP1; 371 break; 372 default: 373 return 0xffffffff; 374 } 375 } else { 376 tx_reg = AR5K_QUEUE_TXDP(queue); 377 } 378 379 return ath5k_hw_reg_read(ah, tx_reg); 380 } 381 382 /** 383 * ath5k_hw_set_txdp() - Set TX Descriptor's address for a specific queue 384 * @ah: The &struct ath5k_hw 385 * @queue: The hw queue number 386 * @phys_addr: The physical address 387 * 388 * Set TX descriptor's address for a specific queue. For 5210 we ignore 389 * the queue number and we use tx queue type since we only have 2 queues 390 * so as above we use TXDP0 for normal data queue and TXDP1 for beacon queue. 391 * For newer chips with QCU/DCU we just set the corresponding TXDP register. 392 * Returns -EINVAL if queue type is invalid for 5210 and -EIO if queue is still 393 * active. 394 */ 395 int 396 ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr) 397 { 398 u16 tx_reg; 399 400 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 401 402 /* 403 * Set the transmit queue descriptor pointer register by type 404 * on 5210 405 */ 406 if (ah->ah_version == AR5K_AR5210) { 407 switch (ah->ah_txq[queue].tqi_type) { 408 case AR5K_TX_QUEUE_DATA: 409 tx_reg = AR5K_NOQCU_TXDP0; 410 break; 411 case AR5K_TX_QUEUE_BEACON: 412 case AR5K_TX_QUEUE_CAB: 413 tx_reg = AR5K_NOQCU_TXDP1; 414 break; 415 default: 416 return -EINVAL; 417 } 418 } else { 419 /* 420 * Set the transmit queue descriptor pointer for 421 * the selected queue on QCU for 5211+ 422 * (this won't work if the queue is still active) 423 */ 424 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue)) 425 return -EIO; 426 427 tx_reg = AR5K_QUEUE_TXDP(queue); 428 } 429 430 /* Set descriptor pointer */ 431 ath5k_hw_reg_write(ah, phys_addr, tx_reg); 432 433 return 0; 434 } 435 436 /** 437 * ath5k_hw_update_tx_triglevel() - Update tx trigger level 438 * @ah: The &struct ath5k_hw 439 * @increase: Flag to force increase of trigger level 440 * 441 * This function increases/decreases the tx trigger level for the tx fifo 442 * buffer (aka FIFO threshold) that is used to indicate when PCU flushes 443 * the buffer and transmits its data. Lowering this results sending small 444 * frames more quickly but can lead to tx underruns, raising it a lot can 445 * result other problems. Right now we start with the lowest possible 446 * (64Bytes) and if we get tx underrun we increase it using the increase 447 * flag. Returns -EIO if we have reached maximum/minimum. 448 * 449 * XXX: Link this with tx DMA size ? 450 * XXX2: Use it to save interrupts ? 451 */ 452 int 453 ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase) 454 { 455 u32 trigger_level, imr; 456 int ret = -EIO; 457 458 /* 459 * Disable interrupts by setting the mask 460 */ 461 imr = ath5k_hw_set_imr(ah, ah->ah_imr & ~AR5K_INT_GLOBAL); 462 463 trigger_level = AR5K_REG_MS(ath5k_hw_reg_read(ah, AR5K_TXCFG), 464 AR5K_TXCFG_TXFULL); 465 466 if (!increase) { 467 if (--trigger_level < AR5K_TUNE_MIN_TX_FIFO_THRES) 468 goto done; 469 } else 470 trigger_level += 471 ((AR5K_TUNE_MAX_TX_FIFO_THRES - trigger_level) / 2); 472 473 /* 474 * Update trigger level on success 475 */ 476 if (ah->ah_version == AR5K_AR5210) 477 ath5k_hw_reg_write(ah, trigger_level, AR5K_TRIG_LVL); 478 else 479 AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG, 480 AR5K_TXCFG_TXFULL, trigger_level); 481 482 ret = 0; 483 484 done: 485 /* 486 * Restore interrupt mask 487 */ 488 ath5k_hw_set_imr(ah, imr); 489 490 return ret; 491 } 492 493 494 /*******************\ 495 * Interrupt masking * 496 \*******************/ 497 498 /** 499 * ath5k_hw_is_intr_pending() - Check if we have pending interrupts 500 * @ah: The &struct ath5k_hw 501 * 502 * Check if we have pending interrupts to process. Returns 1 if we 503 * have pending interrupts and 0 if we haven't. 504 */ 505 bool 506 ath5k_hw_is_intr_pending(struct ath5k_hw *ah) 507 { 508 return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0; 509 } 510 511 /** 512 * ath5k_hw_get_isr() - Get interrupt status 513 * @ah: The @struct ath5k_hw 514 * @interrupt_mask: Driver's interrupt mask used to filter out 515 * interrupts in sw. 516 * 517 * This function is used inside our interrupt handler to determine the reason 518 * for the interrupt by reading Primary Interrupt Status Register. Returns an 519 * abstract interrupt status mask which is mostly ISR with some uncommon bits 520 * being mapped on some standard non hw-specific positions 521 * (check out &ath5k_int). 522 * 523 * NOTE: We do write-to-clear, so the active PISR/SISR bits at the time this 524 * function gets called are cleared on return. 525 */ 526 int 527 ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask) 528 { 529 u32 data = 0; 530 531 /* 532 * Read interrupt status from Primary Interrupt 533 * Register. 534 * 535 * Note: PISR/SISR Not available on 5210 536 */ 537 if (ah->ah_version == AR5K_AR5210) { 538 u32 isr = 0; 539 isr = ath5k_hw_reg_read(ah, AR5K_ISR); 540 if (unlikely(isr == AR5K_INT_NOCARD)) { 541 *interrupt_mask = isr; 542 return -ENODEV; 543 } 544 545 /* 546 * Filter out the non-common bits from the interrupt 547 * status. 548 */ 549 *interrupt_mask = (isr & AR5K_INT_COMMON) & ah->ah_imr; 550 551 /* Hanlde INT_FATAL */ 552 if (unlikely(isr & (AR5K_ISR_SSERR | AR5K_ISR_MCABT 553 | AR5K_ISR_DPERR))) 554 *interrupt_mask |= AR5K_INT_FATAL; 555 556 /* 557 * XXX: BMISS interrupts may occur after association. 558 * I found this on 5210 code but it needs testing. If this is 559 * true we should disable them before assoc and re-enable them 560 * after a successful assoc + some jiffies. 561 interrupt_mask &= ~AR5K_INT_BMISS; 562 */ 563 564 data = isr; 565 } else { 566 u32 pisr = 0; 567 u32 pisr_clear = 0; 568 u32 sisr0 = 0; 569 u32 sisr1 = 0; 570 u32 sisr2 = 0; 571 u32 sisr3 = 0; 572 u32 sisr4 = 0; 573 574 /* Read PISR and SISRs... */ 575 pisr = ath5k_hw_reg_read(ah, AR5K_PISR); 576 if (unlikely(pisr == AR5K_INT_NOCARD)) { 577 *interrupt_mask = pisr; 578 return -ENODEV; 579 } 580 581 sisr0 = ath5k_hw_reg_read(ah, AR5K_SISR0); 582 sisr1 = ath5k_hw_reg_read(ah, AR5K_SISR1); 583 sisr2 = ath5k_hw_reg_read(ah, AR5K_SISR2); 584 sisr3 = ath5k_hw_reg_read(ah, AR5K_SISR3); 585 sisr4 = ath5k_hw_reg_read(ah, AR5K_SISR4); 586 587 /* 588 * PISR holds the logical OR of interrupt bits 589 * from SISR registers: 590 * 591 * TXOK and TXDESC -> Logical OR of TXOK and TXDESC 592 * per-queue bits on SISR0 593 * 594 * TXERR and TXEOL -> Logical OR of TXERR and TXEOL 595 * per-queue bits on SISR1 596 * 597 * TXURN -> Logical OR of TXURN per-queue bits on SISR2 598 * 599 * HIUERR -> Logical OR of MCABT, SSERR and DPER bits on SISR2 600 * 601 * BCNMISC -> Logical OR of TIM, CAB_END, DTIM_SYNC 602 * BCN_TIMEOUT, CAB_TIMEOUT and DTIM 603 * (and TSFOOR ?) bits on SISR2 604 * 605 * QCBRORN and QCBRURN -> Logical OR of QCBRORN and 606 * QCBRURN per-queue bits on SISR3 607 * QTRIG -> Logical OR of QTRIG per-queue bits on SISR4 608 * 609 * If we clean these bits on PISR we 'll also clear all 610 * related bits from SISRs, e.g. if we write the TXOK bit on 611 * PISR we 'll clean all TXOK bits from SISR0 so if a new TXOK 612 * interrupt got fired for another queue while we were reading 613 * the interrupt registers and we write back the TXOK bit on 614 * PISR we 'll lose it. So make sure that we don't write back 615 * on PISR any bits that come from SISRs. Clearing them from 616 * SISRs will also clear PISR so no need to worry here. 617 */ 618 619 pisr_clear = pisr & ~AR5K_ISR_BITS_FROM_SISRS; 620 621 /* 622 * Write to clear them... 623 * Note: This means that each bit we write back 624 * to the registers will get cleared, leaving the 625 * rest unaffected. So this won't affect new interrupts 626 * we didn't catch while reading/processing, we 'll get 627 * them next time get_isr gets called. 628 */ 629 ath5k_hw_reg_write(ah, sisr0, AR5K_SISR0); 630 ath5k_hw_reg_write(ah, sisr1, AR5K_SISR1); 631 ath5k_hw_reg_write(ah, sisr2, AR5K_SISR2); 632 ath5k_hw_reg_write(ah, sisr3, AR5K_SISR3); 633 ath5k_hw_reg_write(ah, sisr4, AR5K_SISR4); 634 ath5k_hw_reg_write(ah, pisr_clear, AR5K_PISR); 635 /* Flush previous write */ 636 ath5k_hw_reg_read(ah, AR5K_PISR); 637 638 /* 639 * Filter out the non-common bits from the interrupt 640 * status. 641 */ 642 *interrupt_mask = (pisr & AR5K_INT_COMMON) & ah->ah_imr; 643 644 645 /* We treat TXOK,TXDESC, TXERR and TXEOL 646 * the same way (schedule the tx tasklet) 647 * so we track them all together per queue */ 648 if (pisr & AR5K_ISR_TXOK) 649 ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0, 650 AR5K_SISR0_QCU_TXOK); 651 652 if (pisr & AR5K_ISR_TXDESC) 653 ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0, 654 AR5K_SISR0_QCU_TXDESC); 655 656 if (pisr & AR5K_ISR_TXERR) 657 ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1, 658 AR5K_SISR1_QCU_TXERR); 659 660 if (pisr & AR5K_ISR_TXEOL) 661 ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1, 662 AR5K_SISR1_QCU_TXEOL); 663 664 /* Currently this is not much useful since we treat 665 * all queues the same way if we get a TXURN (update 666 * tx trigger level) but we might need it later on*/ 667 if (pisr & AR5K_ISR_TXURN) 668 ah->ah_txq_isr_txurn |= AR5K_REG_MS(sisr2, 669 AR5K_SISR2_QCU_TXURN); 670 671 /* Misc Beacon related interrupts */ 672 673 /* For AR5211 */ 674 if (pisr & AR5K_ISR_TIM) 675 *interrupt_mask |= AR5K_INT_TIM; 676 677 /* For AR5212+ */ 678 if (pisr & AR5K_ISR_BCNMISC) { 679 if (sisr2 & AR5K_SISR2_TIM) 680 *interrupt_mask |= AR5K_INT_TIM; 681 if (sisr2 & AR5K_SISR2_DTIM) 682 *interrupt_mask |= AR5K_INT_DTIM; 683 if (sisr2 & AR5K_SISR2_DTIM_SYNC) 684 *interrupt_mask |= AR5K_INT_DTIM_SYNC; 685 if (sisr2 & AR5K_SISR2_BCN_TIMEOUT) 686 *interrupt_mask |= AR5K_INT_BCN_TIMEOUT; 687 if (sisr2 & AR5K_SISR2_CAB_TIMEOUT) 688 *interrupt_mask |= AR5K_INT_CAB_TIMEOUT; 689 } 690 691 /* Below interrupts are unlikely to happen */ 692 693 /* HIU = Host Interface Unit (PCI etc) 694 * Can be one of MCABT, SSERR, DPERR from SISR2 */ 695 if (unlikely(pisr & (AR5K_ISR_HIUERR))) 696 *interrupt_mask |= AR5K_INT_FATAL; 697 698 /*Beacon Not Ready*/ 699 if (unlikely(pisr & (AR5K_ISR_BNR))) 700 *interrupt_mask |= AR5K_INT_BNR; 701 702 /* A queue got CBR overrun */ 703 if (unlikely(pisr & (AR5K_ISR_QCBRORN))) { 704 *interrupt_mask |= AR5K_INT_QCBRORN; 705 ah->ah_txq_isr_qcborn |= AR5K_REG_MS(sisr3, 706 AR5K_SISR3_QCBRORN); 707 } 708 709 /* A queue got CBR underrun */ 710 if (unlikely(pisr & (AR5K_ISR_QCBRURN))) { 711 *interrupt_mask |= AR5K_INT_QCBRURN; 712 ah->ah_txq_isr_qcburn |= AR5K_REG_MS(sisr3, 713 AR5K_SISR3_QCBRURN); 714 } 715 716 /* A queue got triggered */ 717 if (unlikely(pisr & (AR5K_ISR_QTRIG))) { 718 *interrupt_mask |= AR5K_INT_QTRIG; 719 ah->ah_txq_isr_qtrig |= AR5K_REG_MS(sisr4, 720 AR5K_SISR4_QTRIG); 721 } 722 723 data = pisr; 724 } 725 726 /* 727 * In case we didn't handle anything, 728 * print the register value. 729 */ 730 if (unlikely(*interrupt_mask == 0 && net_ratelimit())) 731 ATH5K_PRINTF("ISR: 0x%08x IMR: 0x%08x\n", data, ah->ah_imr); 732 733 return 0; 734 } 735 736 /** 737 * ath5k_hw_set_imr() - Set interrupt mask 738 * @ah: The &struct ath5k_hw 739 * @new_mask: The new interrupt mask to be set 740 * 741 * Set the interrupt mask in hw to save interrupts. We do that by mapping 742 * ath5k_int bits to hw-specific bits to remove abstraction and writing 743 * Interrupt Mask Register. 744 */ 745 enum ath5k_int 746 ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask) 747 { 748 enum ath5k_int old_mask, int_mask; 749 750 old_mask = ah->ah_imr; 751 752 /* 753 * Disable card interrupts to prevent any race conditions 754 * (they will be re-enabled afterwards if AR5K_INT GLOBAL 755 * is set again on the new mask). 756 */ 757 if (old_mask & AR5K_INT_GLOBAL) { 758 ath5k_hw_reg_write(ah, AR5K_IER_DISABLE, AR5K_IER); 759 ath5k_hw_reg_read(ah, AR5K_IER); 760 } 761 762 /* 763 * Add additional, chipset-dependent interrupt mask flags 764 * and write them to the IMR (interrupt mask register). 765 */ 766 int_mask = new_mask & AR5K_INT_COMMON; 767 768 if (ah->ah_version != AR5K_AR5210) { 769 /* Preserve per queue TXURN interrupt mask */ 770 u32 simr2 = ath5k_hw_reg_read(ah, AR5K_SIMR2) 771 & AR5K_SIMR2_QCU_TXURN; 772 773 /* Fatal interrupt abstraction for 5211+ */ 774 if (new_mask & AR5K_INT_FATAL) { 775 int_mask |= AR5K_IMR_HIUERR; 776 simr2 |= (AR5K_SIMR2_MCABT | AR5K_SIMR2_SSERR 777 | AR5K_SIMR2_DPERR); 778 } 779 780 /* Misc beacon related interrupts */ 781 if (new_mask & AR5K_INT_TIM) 782 int_mask |= AR5K_IMR_TIM; 783 784 if (new_mask & AR5K_INT_TIM) 785 simr2 |= AR5K_SISR2_TIM; 786 if (new_mask & AR5K_INT_DTIM) 787 simr2 |= AR5K_SISR2_DTIM; 788 if (new_mask & AR5K_INT_DTIM_SYNC) 789 simr2 |= AR5K_SISR2_DTIM_SYNC; 790 if (new_mask & AR5K_INT_BCN_TIMEOUT) 791 simr2 |= AR5K_SISR2_BCN_TIMEOUT; 792 if (new_mask & AR5K_INT_CAB_TIMEOUT) 793 simr2 |= AR5K_SISR2_CAB_TIMEOUT; 794 795 /*Beacon Not Ready*/ 796 if (new_mask & AR5K_INT_BNR) 797 int_mask |= AR5K_INT_BNR; 798 799 /* Note: Per queue interrupt masks 800 * are set via ath5k_hw_reset_tx_queue() (qcu.c) */ 801 ath5k_hw_reg_write(ah, int_mask, AR5K_PIMR); 802 ath5k_hw_reg_write(ah, simr2, AR5K_SIMR2); 803 804 } else { 805 /* Fatal interrupt abstraction for 5210 */ 806 if (new_mask & AR5K_INT_FATAL) 807 int_mask |= (AR5K_IMR_SSERR | AR5K_IMR_MCABT 808 | AR5K_IMR_HIUERR | AR5K_IMR_DPERR); 809 810 /* Only common interrupts left for 5210 (no SIMRs) */ 811 ath5k_hw_reg_write(ah, int_mask, AR5K_IMR); 812 } 813 814 /* If RXNOFRM interrupt is masked disable it 815 * by setting AR5K_RXNOFRM to zero */ 816 if (!(new_mask & AR5K_INT_RXNOFRM)) 817 ath5k_hw_reg_write(ah, 0, AR5K_RXNOFRM); 818 819 /* Store new interrupt mask */ 820 ah->ah_imr = new_mask; 821 822 /* ..re-enable interrupts if AR5K_INT_GLOBAL is set */ 823 if (new_mask & AR5K_INT_GLOBAL) { 824 ath5k_hw_reg_write(ah, AR5K_IER_ENABLE, AR5K_IER); 825 ath5k_hw_reg_read(ah, AR5K_IER); 826 } 827 828 return old_mask; 829 } 830 831 832 /********************\ 833 Init/Stop functions 834 \********************/ 835 836 /** 837 * ath5k_hw_dma_init() - Initialize DMA unit 838 * @ah: The &struct ath5k_hw 839 * 840 * Set DMA size and pre-enable interrupts 841 * (driver handles tx/rx buffer setup and 842 * dma start/stop) 843 * 844 * XXX: Save/restore RXDP/TXDP registers ? 845 */ 846 void 847 ath5k_hw_dma_init(struct ath5k_hw *ah) 848 { 849 /* 850 * Set Rx/Tx DMA Configuration 851 * 852 * Set standard DMA size (128). Note that 853 * a DMA size of 512 causes rx overruns and tx errors 854 * on pci-e cards (tested on 5424 but since rx overruns 855 * also occur on 5416/5418 with madwifi we set 128 856 * for all PCI-E cards to be safe). 857 * 858 * XXX: need to check 5210 for this 859 * TODO: Check out tx trigger level, it's always 64 on dumps but I 860 * guess we can tweak it and see how it goes ;-) 861 */ 862 if (ah->ah_version != AR5K_AR5210) { 863 AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG, 864 AR5K_TXCFG_SDMAMR, AR5K_DMASIZE_128B); 865 AR5K_REG_WRITE_BITS(ah, AR5K_RXCFG, 866 AR5K_RXCFG_SDMAMW, AR5K_DMASIZE_128B); 867 } 868 869 /* Pre-enable interrupts on 5211/5212*/ 870 if (ah->ah_version != AR5K_AR5210) 871 ath5k_hw_set_imr(ah, ah->ah_imr); 872 873 } 874 875 /** 876 * ath5k_hw_dma_stop() - stop DMA unit 877 * @ah: The &struct ath5k_hw 878 * 879 * Stop tx/rx DMA and interrupts. Returns 880 * -EBUSY if tx or rx dma failed to stop. 881 * 882 * XXX: Sometimes DMA unit hangs and we have 883 * stuck frames on tx queues, only a reset 884 * can fix that. 885 */ 886 int 887 ath5k_hw_dma_stop(struct ath5k_hw *ah) 888 { 889 int i, qmax, err; 890 err = 0; 891 892 /* Disable interrupts */ 893 ath5k_hw_set_imr(ah, 0); 894 895 /* Stop rx dma */ 896 err = ath5k_hw_stop_rx_dma(ah); 897 if (err) 898 return err; 899 900 /* Clear any pending interrupts 901 * and disable tx dma */ 902 if (ah->ah_version != AR5K_AR5210) { 903 ath5k_hw_reg_write(ah, 0xffffffff, AR5K_PISR); 904 qmax = AR5K_NUM_TX_QUEUES; 905 } else { 906 /* PISR/SISR Not available on 5210 */ 907 ath5k_hw_reg_read(ah, AR5K_ISR); 908 qmax = AR5K_NUM_TX_QUEUES_NOQCU; 909 } 910 911 for (i = 0; i < qmax; i++) { 912 err = ath5k_hw_stop_tx_dma(ah, i); 913 /* -EINVAL -> queue inactive */ 914 if (err && err != -EINVAL) 915 return err; 916 } 917 918 return 0; 919 } 920