1 /* Copyright (C) 2009 - 2010 Ivo van Doorn <IvDoorn@gmail.com> 2 * Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com> 3 * Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org> 4 * Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com> 5 * Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de> 6 * Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com> 7 * Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com> 8 * Copyright (C) 2009 Bart Zolnierkiewicz <bzolnier@gmail.com> 9 * <http://rt2x00.serialmonkey.com> 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2 of the License, or 14 * (at your option) any later version. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, see <http://www.gnu.org/licenses/>. 23 */ 24 25 /* Module: rt2800mmio 26 * Abstract: rt2800 MMIO device routines. 27 */ 28 29 #include <linux/kernel.h> 30 #include <linux/module.h> 31 #include <linux/export.h> 32 33 #include "rt2x00.h" 34 #include "rt2x00mmio.h" 35 #include "rt2800.h" 36 #include "rt2800lib.h" 37 #include "rt2800mmio.h" 38 39 /* 40 * TX descriptor initialization 41 */ 42 __le32 *rt2800mmio_get_txwi(struct queue_entry *entry) 43 { 44 return (__le32 *) entry->skb->data; 45 } 46 EXPORT_SYMBOL_GPL(rt2800mmio_get_txwi); 47 48 void rt2800mmio_write_tx_desc(struct queue_entry *entry, 49 struct txentry_desc *txdesc) 50 { 51 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 52 struct queue_entry_priv_mmio *entry_priv = entry->priv_data; 53 __le32 *txd = entry_priv->desc; 54 u32 word; 55 const unsigned int txwi_size = entry->queue->winfo_size; 56 57 /* 58 * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1 59 * must contains a TXWI structure + 802.11 header + padding + 802.11 60 * data. We choose to have SD_PTR0/SD_LEN0 only contains TXWI and 61 * SD_PTR1/SD_LEN1 contains 802.11 header + padding + 802.11 62 * data. It means that LAST_SEC0 is always 0. 63 */ 64 65 /* 66 * Initialize TX descriptor 67 */ 68 word = 0; 69 rt2x00_set_field32(&word, TXD_W0_SD_PTR0, skbdesc->skb_dma); 70 rt2x00_desc_write(txd, 0, word); 71 72 word = 0; 73 rt2x00_set_field32(&word, TXD_W1_SD_LEN1, entry->skb->len); 74 rt2x00_set_field32(&word, TXD_W1_LAST_SEC1, 75 !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags)); 76 rt2x00_set_field32(&word, TXD_W1_BURST, 77 test_bit(ENTRY_TXD_BURST, &txdesc->flags)); 78 rt2x00_set_field32(&word, TXD_W1_SD_LEN0, txwi_size); 79 rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0); 80 rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0); 81 rt2x00_desc_write(txd, 1, word); 82 83 word = 0; 84 rt2x00_set_field32(&word, TXD_W2_SD_PTR1, 85 skbdesc->skb_dma + txwi_size); 86 rt2x00_desc_write(txd, 2, word); 87 88 word = 0; 89 rt2x00_set_field32(&word, TXD_W3_WIV, 90 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags)); 91 rt2x00_set_field32(&word, TXD_W3_QSEL, 2); 92 rt2x00_desc_write(txd, 3, word); 93 94 /* 95 * Register descriptor details in skb frame descriptor. 96 */ 97 skbdesc->desc = txd; 98 skbdesc->desc_len = TXD_DESC_SIZE; 99 } 100 EXPORT_SYMBOL_GPL(rt2800mmio_write_tx_desc); 101 102 /* 103 * RX control handlers 104 */ 105 void rt2800mmio_fill_rxdone(struct queue_entry *entry, 106 struct rxdone_entry_desc *rxdesc) 107 { 108 struct queue_entry_priv_mmio *entry_priv = entry->priv_data; 109 __le32 *rxd = entry_priv->desc; 110 u32 word; 111 112 word = rt2x00_desc_read(rxd, 3); 113 114 if (rt2x00_get_field32(word, RXD_W3_CRC_ERROR)) 115 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; 116 117 /* 118 * Unfortunately we don't know the cipher type used during 119 * decryption. This prevents us from correct providing 120 * correct statistics through debugfs. 121 */ 122 rxdesc->cipher_status = rt2x00_get_field32(word, RXD_W3_CIPHER_ERROR); 123 124 if (rt2x00_get_field32(word, RXD_W3_DECRYPTED)) { 125 /* 126 * Hardware has stripped IV/EIV data from 802.11 frame during 127 * decryption. Unfortunately the descriptor doesn't contain 128 * any fields with the EIV/IV data either, so they can't 129 * be restored by rt2x00lib. 130 */ 131 rxdesc->flags |= RX_FLAG_IV_STRIPPED; 132 133 /* 134 * The hardware has already checked the Michael Mic and has 135 * stripped it from the frame. Signal this to mac80211. 136 */ 137 rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; 138 139 if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) { 140 rxdesc->flags |= RX_FLAG_DECRYPTED; 141 } else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) { 142 /* 143 * In order to check the Michael Mic, the packet must have 144 * been decrypted. Mac80211 doesnt check the MMIC failure 145 * flag to initiate MMIC countermeasures if the decoded flag 146 * has not been set. 147 */ 148 rxdesc->flags |= RX_FLAG_DECRYPTED; 149 150 rxdesc->flags |= RX_FLAG_MMIC_ERROR; 151 } 152 } 153 154 if (rt2x00_get_field32(word, RXD_W3_MY_BSS)) 155 rxdesc->dev_flags |= RXDONE_MY_BSS; 156 157 if (rt2x00_get_field32(word, RXD_W3_L2PAD)) 158 rxdesc->dev_flags |= RXDONE_L2PAD; 159 160 /* 161 * Process the RXWI structure that is at the start of the buffer. 162 */ 163 rt2800_process_rxwi(entry, rxdesc); 164 } 165 EXPORT_SYMBOL_GPL(rt2800mmio_fill_rxdone); 166 167 /* 168 * Interrupt functions. 169 */ 170 static void rt2800mmio_wakeup(struct rt2x00_dev *rt2x00dev) 171 { 172 struct ieee80211_conf conf = { .flags = 0 }; 173 struct rt2x00lib_conf libconf = { .conf = &conf }; 174 175 rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS); 176 } 177 178 static inline void rt2800mmio_enable_interrupt(struct rt2x00_dev *rt2x00dev, 179 struct rt2x00_field32 irq_field) 180 { 181 u32 reg; 182 183 /* 184 * Enable a single interrupt. The interrupt mask register 185 * access needs locking. 186 */ 187 spin_lock_irq(&rt2x00dev->irqmask_lock); 188 reg = rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR); 189 rt2x00_set_field32(®, irq_field, 1); 190 rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg); 191 spin_unlock_irq(&rt2x00dev->irqmask_lock); 192 } 193 194 void rt2800mmio_pretbtt_tasklet(unsigned long data) 195 { 196 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; 197 rt2x00lib_pretbtt(rt2x00dev); 198 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 199 rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_PRE_TBTT); 200 } 201 EXPORT_SYMBOL_GPL(rt2800mmio_pretbtt_tasklet); 202 203 void rt2800mmio_tbtt_tasklet(unsigned long data) 204 { 205 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; 206 struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; 207 u32 reg; 208 209 rt2x00lib_beacondone(rt2x00dev); 210 211 if (rt2x00dev->intf_ap_count) { 212 /* 213 * The rt2800pci hardware tbtt timer is off by 1us per tbtt 214 * causing beacon skew and as a result causing problems with 215 * some powersaving clients over time. Shorten the beacon 216 * interval every 64 beacons by 64us to mitigate this effect. 217 */ 218 if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 2)) { 219 reg = rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG); 220 rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_INTERVAL, 221 (rt2x00dev->beacon_int * 16) - 1); 222 rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg); 223 } else if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 1)) { 224 reg = rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG); 225 rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_INTERVAL, 226 (rt2x00dev->beacon_int * 16)); 227 rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg); 228 } 229 drv_data->tbtt_tick++; 230 drv_data->tbtt_tick %= BCN_TBTT_OFFSET; 231 } 232 233 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 234 rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_TBTT); 235 } 236 EXPORT_SYMBOL_GPL(rt2800mmio_tbtt_tasklet); 237 238 void rt2800mmio_rxdone_tasklet(unsigned long data) 239 { 240 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; 241 if (rt2x00mmio_rxdone(rt2x00dev)) 242 tasklet_schedule(&rt2x00dev->rxdone_tasklet); 243 else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 244 rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_RX_DONE); 245 } 246 EXPORT_SYMBOL_GPL(rt2800mmio_rxdone_tasklet); 247 248 void rt2800mmio_autowake_tasklet(unsigned long data) 249 { 250 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; 251 rt2800mmio_wakeup(rt2x00dev); 252 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 253 rt2800mmio_enable_interrupt(rt2x00dev, 254 INT_MASK_CSR_AUTO_WAKEUP); 255 } 256 EXPORT_SYMBOL_GPL(rt2800mmio_autowake_tasklet); 257 258 static void rt2800mmio_fetch_txstatus(struct rt2x00_dev *rt2x00dev) 259 { 260 u32 status; 261 unsigned long flags; 262 263 /* 264 * The TX_FIFO_STATUS interrupt needs special care. We should 265 * read TX_STA_FIFO but we should do it immediately as otherwise 266 * the register can overflow and we would lose status reports. 267 * 268 * Hence, read the TX_STA_FIFO register and copy all tx status 269 * reports into a kernel FIFO which is handled in the txstatus 270 * tasklet. We use a tasklet to process the tx status reports 271 * because we can schedule the tasklet multiple times (when the 272 * interrupt fires again during tx status processing). 273 * 274 * We also read statuses from tx status timeout timer, use 275 * lock to prevent concurent writes to fifo. 276 */ 277 278 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags); 279 280 while (!kfifo_is_full(&rt2x00dev->txstatus_fifo)) { 281 status = rt2x00mmio_register_read(rt2x00dev, TX_STA_FIFO); 282 if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID)) 283 break; 284 285 kfifo_put(&rt2x00dev->txstatus_fifo, status); 286 } 287 288 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags); 289 } 290 291 void rt2800mmio_txstatus_tasklet(unsigned long data) 292 { 293 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; 294 295 rt2800_txdone(rt2x00dev, 16); 296 297 if (!kfifo_is_empty(&rt2x00dev->txstatus_fifo)) 298 tasklet_schedule(&rt2x00dev->txstatus_tasklet); 299 300 } 301 EXPORT_SYMBOL_GPL(rt2800mmio_txstatus_tasklet); 302 303 irqreturn_t rt2800mmio_interrupt(int irq, void *dev_instance) 304 { 305 struct rt2x00_dev *rt2x00dev = dev_instance; 306 u32 reg, mask; 307 308 /* Read status and ACK all interrupts */ 309 reg = rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR); 310 rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg); 311 312 if (!reg) 313 return IRQ_NONE; 314 315 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 316 return IRQ_HANDLED; 317 318 /* 319 * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits 320 * for interrupts and interrupt masks we can just use the value of 321 * INT_SOURCE_CSR to create the interrupt mask. 322 */ 323 mask = ~reg; 324 325 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) { 326 rt2x00_set_field32(&mask, INT_MASK_CSR_TX_FIFO_STATUS, 1); 327 rt2800mmio_fetch_txstatus(rt2x00dev); 328 if (!kfifo_is_empty(&rt2x00dev->txstatus_fifo)) 329 tasklet_schedule(&rt2x00dev->txstatus_tasklet); 330 } 331 332 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT)) 333 tasklet_hi_schedule(&rt2x00dev->pretbtt_tasklet); 334 335 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT)) 336 tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet); 337 338 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE)) 339 tasklet_schedule(&rt2x00dev->rxdone_tasklet); 340 341 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP)) 342 tasklet_schedule(&rt2x00dev->autowake_tasklet); 343 344 /* 345 * Disable all interrupts for which a tasklet was scheduled right now, 346 * the tasklet will reenable the appropriate interrupts. 347 */ 348 spin_lock(&rt2x00dev->irqmask_lock); 349 reg = rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR); 350 reg &= mask; 351 rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg); 352 spin_unlock(&rt2x00dev->irqmask_lock); 353 354 return IRQ_HANDLED; 355 } 356 EXPORT_SYMBOL_GPL(rt2800mmio_interrupt); 357 358 void rt2800mmio_toggle_irq(struct rt2x00_dev *rt2x00dev, 359 enum dev_state state) 360 { 361 u32 reg; 362 unsigned long flags; 363 364 /* 365 * When interrupts are being enabled, the interrupt registers 366 * should clear the register to assure a clean state. 367 */ 368 if (state == STATE_RADIO_IRQ_ON) { 369 reg = rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR); 370 rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg); 371 } 372 373 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags); 374 reg = 0; 375 if (state == STATE_RADIO_IRQ_ON) { 376 rt2x00_set_field32(®, INT_MASK_CSR_RX_DONE, 1); 377 rt2x00_set_field32(®, INT_MASK_CSR_TBTT, 1); 378 rt2x00_set_field32(®, INT_MASK_CSR_PRE_TBTT, 1); 379 rt2x00_set_field32(®, INT_MASK_CSR_TX_FIFO_STATUS, 1); 380 rt2x00_set_field32(®, INT_MASK_CSR_AUTO_WAKEUP, 1); 381 } 382 rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg); 383 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags); 384 385 if (state == STATE_RADIO_IRQ_OFF) { 386 /* 387 * Wait for possibly running tasklets to finish. 388 */ 389 tasklet_kill(&rt2x00dev->txstatus_tasklet); 390 tasklet_kill(&rt2x00dev->rxdone_tasklet); 391 tasklet_kill(&rt2x00dev->autowake_tasklet); 392 tasklet_kill(&rt2x00dev->tbtt_tasklet); 393 tasklet_kill(&rt2x00dev->pretbtt_tasklet); 394 } 395 } 396 EXPORT_SYMBOL_GPL(rt2800mmio_toggle_irq); 397 398 /* 399 * Queue handlers. 400 */ 401 void rt2800mmio_start_queue(struct data_queue *queue) 402 { 403 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; 404 u32 reg; 405 406 switch (queue->qid) { 407 case QID_RX: 408 reg = rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL); 409 rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_RX, 1); 410 rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg); 411 break; 412 case QID_BEACON: 413 reg = rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG); 414 rt2x00_set_field32(®, BCN_TIME_CFG_TSF_TICKING, 1); 415 rt2x00_set_field32(®, BCN_TIME_CFG_TBTT_ENABLE, 1); 416 rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_GEN, 1); 417 rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg); 418 419 reg = rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN); 420 rt2x00_set_field32(®, INT_TIMER_EN_PRE_TBTT_TIMER, 1); 421 rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg); 422 break; 423 default: 424 break; 425 } 426 } 427 EXPORT_SYMBOL_GPL(rt2800mmio_start_queue); 428 429 /* 200 ms */ 430 #define TXSTATUS_TIMEOUT 200000000 431 432 void rt2800mmio_kick_queue(struct data_queue *queue) 433 { 434 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; 435 struct queue_entry *entry; 436 437 switch (queue->qid) { 438 case QID_AC_VO: 439 case QID_AC_VI: 440 case QID_AC_BE: 441 case QID_AC_BK: 442 WARN_ON_ONCE(rt2x00queue_empty(queue)); 443 entry = rt2x00queue_get_entry(queue, Q_INDEX); 444 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(queue->qid), 445 entry->entry_idx); 446 hrtimer_start(&rt2x00dev->txstatus_timer, 447 TXSTATUS_TIMEOUT, HRTIMER_MODE_REL); 448 break; 449 case QID_MGMT: 450 entry = rt2x00queue_get_entry(queue, Q_INDEX); 451 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(5), 452 entry->entry_idx); 453 break; 454 default: 455 break; 456 } 457 } 458 EXPORT_SYMBOL_GPL(rt2800mmio_kick_queue); 459 460 void rt2800mmio_flush_queue(struct data_queue *queue, bool drop) 461 { 462 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; 463 bool tx_queue = false; 464 unsigned int i; 465 466 switch (queue->qid) { 467 case QID_AC_VO: 468 case QID_AC_VI: 469 case QID_AC_BE: 470 case QID_AC_BK: 471 tx_queue = true; 472 break; 473 case QID_RX: 474 break; 475 default: 476 return; 477 } 478 479 for (i = 0; i < 5; i++) { 480 /* 481 * Check if the driver is already done, otherwise we 482 * have to sleep a little while to give the driver/hw 483 * the oppurtunity to complete interrupt process itself. 484 */ 485 if (rt2x00queue_empty(queue)) 486 break; 487 488 /* 489 * For TX queues schedule completion tasklet to catch 490 * tx status timeouts, othewise just wait. 491 */ 492 if (tx_queue) 493 queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work); 494 495 /* 496 * Wait for a little while to give the driver 497 * the oppurtunity to recover itself. 498 */ 499 msleep(50); 500 } 501 } 502 EXPORT_SYMBOL_GPL(rt2800mmio_flush_queue); 503 504 void rt2800mmio_stop_queue(struct data_queue *queue) 505 { 506 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; 507 u32 reg; 508 509 switch (queue->qid) { 510 case QID_RX: 511 reg = rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL); 512 rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_RX, 0); 513 rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg); 514 break; 515 case QID_BEACON: 516 reg = rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG); 517 rt2x00_set_field32(®, BCN_TIME_CFG_TSF_TICKING, 0); 518 rt2x00_set_field32(®, BCN_TIME_CFG_TBTT_ENABLE, 0); 519 rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_GEN, 0); 520 rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg); 521 522 reg = rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN); 523 rt2x00_set_field32(®, INT_TIMER_EN_PRE_TBTT_TIMER, 0); 524 rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg); 525 526 /* 527 * Wait for current invocation to finish. The tasklet 528 * won't be scheduled anymore afterwards since we disabled 529 * the TBTT and PRE TBTT timer. 530 */ 531 tasklet_kill(&rt2x00dev->tbtt_tasklet); 532 tasklet_kill(&rt2x00dev->pretbtt_tasklet); 533 534 break; 535 default: 536 break; 537 } 538 } 539 EXPORT_SYMBOL_GPL(rt2800mmio_stop_queue); 540 541 void rt2800mmio_queue_init(struct data_queue *queue) 542 { 543 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; 544 unsigned short txwi_size, rxwi_size; 545 546 rt2800_get_txwi_rxwi_size(rt2x00dev, &txwi_size, &rxwi_size); 547 548 switch (queue->qid) { 549 case QID_RX: 550 queue->limit = 128; 551 queue->data_size = AGGREGATION_SIZE; 552 queue->desc_size = RXD_DESC_SIZE; 553 queue->winfo_size = rxwi_size; 554 queue->priv_size = sizeof(struct queue_entry_priv_mmio); 555 break; 556 557 case QID_AC_VO: 558 case QID_AC_VI: 559 case QID_AC_BE: 560 case QID_AC_BK: 561 queue->limit = 64; 562 queue->data_size = AGGREGATION_SIZE; 563 queue->desc_size = TXD_DESC_SIZE; 564 queue->winfo_size = txwi_size; 565 queue->priv_size = sizeof(struct queue_entry_priv_mmio); 566 break; 567 568 case QID_BEACON: 569 queue->limit = 8; 570 queue->data_size = 0; /* No DMA required for beacons */ 571 queue->desc_size = TXD_DESC_SIZE; 572 queue->winfo_size = txwi_size; 573 queue->priv_size = sizeof(struct queue_entry_priv_mmio); 574 break; 575 576 case QID_ATIM: 577 /* fallthrough */ 578 default: 579 BUG(); 580 break; 581 } 582 } 583 EXPORT_SYMBOL_GPL(rt2800mmio_queue_init); 584 585 /* 586 * Initialization functions. 587 */ 588 bool rt2800mmio_get_entry_state(struct queue_entry *entry) 589 { 590 struct queue_entry_priv_mmio *entry_priv = entry->priv_data; 591 u32 word; 592 593 if (entry->queue->qid == QID_RX) { 594 word = rt2x00_desc_read(entry_priv->desc, 1); 595 596 return (!rt2x00_get_field32(word, RXD_W1_DMA_DONE)); 597 } else { 598 word = rt2x00_desc_read(entry_priv->desc, 1); 599 600 return (!rt2x00_get_field32(word, TXD_W1_DMA_DONE)); 601 } 602 } 603 EXPORT_SYMBOL_GPL(rt2800mmio_get_entry_state); 604 605 void rt2800mmio_clear_entry(struct queue_entry *entry) 606 { 607 struct queue_entry_priv_mmio *entry_priv = entry->priv_data; 608 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 609 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 610 u32 word; 611 612 if (entry->queue->qid == QID_RX) { 613 word = rt2x00_desc_read(entry_priv->desc, 0); 614 rt2x00_set_field32(&word, RXD_W0_SDP0, skbdesc->skb_dma); 615 rt2x00_desc_write(entry_priv->desc, 0, word); 616 617 word = rt2x00_desc_read(entry_priv->desc, 1); 618 rt2x00_set_field32(&word, RXD_W1_DMA_DONE, 0); 619 rt2x00_desc_write(entry_priv->desc, 1, word); 620 621 /* 622 * Set RX IDX in register to inform hardware that we have 623 * handled this entry and it is available for reuse again. 624 */ 625 rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX, 626 entry->entry_idx); 627 } else { 628 word = rt2x00_desc_read(entry_priv->desc, 1); 629 rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 1); 630 rt2x00_desc_write(entry_priv->desc, 1, word); 631 632 /* If last entry stop txstatus timer */ 633 if (entry->queue->length == 1) 634 hrtimer_cancel(&rt2x00dev->txstatus_timer); 635 } 636 } 637 EXPORT_SYMBOL_GPL(rt2800mmio_clear_entry); 638 639 int rt2800mmio_init_queues(struct rt2x00_dev *rt2x00dev) 640 { 641 struct queue_entry_priv_mmio *entry_priv; 642 643 /* 644 * Initialize registers. 645 */ 646 entry_priv = rt2x00dev->tx[0].entries[0].priv_data; 647 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR0, 648 entry_priv->desc_dma); 649 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT0, 650 rt2x00dev->tx[0].limit); 651 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX0, 0); 652 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX0, 0); 653 654 entry_priv = rt2x00dev->tx[1].entries[0].priv_data; 655 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR1, 656 entry_priv->desc_dma); 657 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT1, 658 rt2x00dev->tx[1].limit); 659 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX1, 0); 660 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX1, 0); 661 662 entry_priv = rt2x00dev->tx[2].entries[0].priv_data; 663 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR2, 664 entry_priv->desc_dma); 665 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT2, 666 rt2x00dev->tx[2].limit); 667 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX2, 0); 668 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX2, 0); 669 670 entry_priv = rt2x00dev->tx[3].entries[0].priv_data; 671 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR3, 672 entry_priv->desc_dma); 673 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT3, 674 rt2x00dev->tx[3].limit); 675 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX3, 0); 676 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX3, 0); 677 678 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR4, 0); 679 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT4, 0); 680 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX4, 0); 681 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX4, 0); 682 683 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR5, 0); 684 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT5, 0); 685 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX5, 0); 686 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX5, 0); 687 688 entry_priv = rt2x00dev->rx->entries[0].priv_data; 689 rt2x00mmio_register_write(rt2x00dev, RX_BASE_PTR, 690 entry_priv->desc_dma); 691 rt2x00mmio_register_write(rt2x00dev, RX_MAX_CNT, 692 rt2x00dev->rx[0].limit); 693 rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX, 694 rt2x00dev->rx[0].limit - 1); 695 rt2x00mmio_register_write(rt2x00dev, RX_DRX_IDX, 0); 696 697 rt2800_disable_wpdma(rt2x00dev); 698 699 rt2x00mmio_register_write(rt2x00dev, DELAY_INT_CFG, 0); 700 701 return 0; 702 } 703 EXPORT_SYMBOL_GPL(rt2800mmio_init_queues); 704 705 int rt2800mmio_init_registers(struct rt2x00_dev *rt2x00dev) 706 { 707 u32 reg; 708 709 /* 710 * Reset DMA indexes 711 */ 712 reg = rt2x00mmio_register_read(rt2x00dev, WPDMA_RST_IDX); 713 rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX0, 1); 714 rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX1, 1); 715 rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX2, 1); 716 rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX3, 1); 717 rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX4, 1); 718 rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX5, 1); 719 rt2x00_set_field32(®, WPDMA_RST_IDX_DRX_IDX0, 1); 720 rt2x00mmio_register_write(rt2x00dev, WPDMA_RST_IDX, reg); 721 722 rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f); 723 rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00); 724 725 if (rt2x00_is_pcie(rt2x00dev) && 726 (rt2x00_rt(rt2x00dev, RT3090) || 727 rt2x00_rt(rt2x00dev, RT3390) || 728 rt2x00_rt(rt2x00dev, RT3572) || 729 rt2x00_rt(rt2x00dev, RT3593) || 730 rt2x00_rt(rt2x00dev, RT5390) || 731 rt2x00_rt(rt2x00dev, RT5392) || 732 rt2x00_rt(rt2x00dev, RT5592))) { 733 reg = rt2x00mmio_register_read(rt2x00dev, AUX_CTRL); 734 rt2x00_set_field32(®, AUX_CTRL_FORCE_PCIE_CLK, 1); 735 rt2x00_set_field32(®, AUX_CTRL_WAKE_PCIE_EN, 1); 736 rt2x00mmio_register_write(rt2x00dev, AUX_CTRL, reg); 737 } 738 739 rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003); 740 741 reg = 0; 742 rt2x00_set_field32(®, MAC_SYS_CTRL_RESET_CSR, 1); 743 rt2x00_set_field32(®, MAC_SYS_CTRL_RESET_BBP, 1); 744 rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg); 745 746 rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000); 747 748 return 0; 749 } 750 EXPORT_SYMBOL_GPL(rt2800mmio_init_registers); 751 752 /* 753 * Device state switch handlers. 754 */ 755 int rt2800mmio_enable_radio(struct rt2x00_dev *rt2x00dev) 756 { 757 /* Wait for DMA, ignore error until we initialize queues. */ 758 rt2800_wait_wpdma_ready(rt2x00dev); 759 760 if (unlikely(rt2800mmio_init_queues(rt2x00dev))) 761 return -EIO; 762 763 return rt2800_enable_radio(rt2x00dev); 764 } 765 EXPORT_SYMBOL_GPL(rt2800mmio_enable_radio); 766 767 static void rt2800mmio_work_txdone(struct work_struct *work) 768 { 769 struct rt2x00_dev *rt2x00dev = 770 container_of(work, struct rt2x00_dev, txdone_work); 771 772 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 773 return; 774 775 while (!kfifo_is_empty(&rt2x00dev->txstatus_fifo) || 776 rt2800_txstatus_timeout(rt2x00dev)) { 777 778 tasklet_disable(&rt2x00dev->txstatus_tasklet); 779 rt2800_txdone(rt2x00dev, UINT_MAX); 780 rt2800_txdone_nostatus(rt2x00dev); 781 tasklet_enable(&rt2x00dev->txstatus_tasklet); 782 } 783 784 if (rt2800_txstatus_pending(rt2x00dev)) 785 hrtimer_start(&rt2x00dev->txstatus_timer, 786 TXSTATUS_TIMEOUT, HRTIMER_MODE_REL); 787 } 788 789 static enum hrtimer_restart rt2800mmio_tx_sta_fifo_timeout(struct hrtimer *timer) 790 { 791 struct rt2x00_dev *rt2x00dev = 792 container_of(timer, struct rt2x00_dev, txstatus_timer); 793 794 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 795 goto out; 796 797 if (!rt2800_txstatus_pending(rt2x00dev)) 798 goto out; 799 800 rt2800mmio_fetch_txstatus(rt2x00dev); 801 if (!kfifo_is_empty(&rt2x00dev->txstatus_fifo)) 802 tasklet_schedule(&rt2x00dev->txstatus_tasklet); 803 else 804 queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work); 805 out: 806 return HRTIMER_NORESTART; 807 } 808 809 int rt2800mmio_probe_hw(struct rt2x00_dev *rt2x00dev) 810 { 811 int retval; 812 813 retval = rt2800_probe_hw(rt2x00dev); 814 if (retval) 815 return retval; 816 817 /* 818 * Set txstatus timer function. 819 */ 820 rt2x00dev->txstatus_timer.function = rt2800mmio_tx_sta_fifo_timeout; 821 822 /* 823 * Overwrite TX done handler 824 */ 825 INIT_WORK(&rt2x00dev->txdone_work, rt2800mmio_work_txdone); 826 827 return 0; 828 } 829 EXPORT_SYMBOL_GPL(rt2800mmio_probe_hw); 830 831 MODULE_AUTHOR(DRV_PROJECT); 832 MODULE_VERSION(DRV_VERSION); 833 MODULE_DESCRIPTION("rt2800 MMIO library"); 834 MODULE_LICENSE("GPL"); 835