1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* Copyright (C) 2009 - 2010 Ivo van Doorn <IvDoorn@gmail.com> 3 * Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com> 4 * Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org> 5 * Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com> 6 * Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de> 7 * Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com> 8 * Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com> 9 * Copyright (C) 2009 Bart Zolnierkiewicz <bzolnier@gmail.com> 10 * <http://rt2x00.serialmonkey.com> 11 */ 12 13 /* Module: rt2800mmio 14 * Abstract: rt2800 MMIO device routines. 15 */ 16 17 #include <linux/kernel.h> 18 #include <linux/module.h> 19 #include <linux/export.h> 20 21 #include "rt2x00.h" 22 #include "rt2x00mmio.h" 23 #include "rt2800.h" 24 #include "rt2800lib.h" 25 #include "rt2800mmio.h" 26 27 /* 28 * TX descriptor initialization 29 */ 30 __le32 *rt2800mmio_get_txwi(struct queue_entry *entry) 31 { 32 return (__le32 *) entry->skb->data; 33 } 34 EXPORT_SYMBOL_GPL(rt2800mmio_get_txwi); 35 36 void rt2800mmio_write_tx_desc(struct queue_entry *entry, 37 struct txentry_desc *txdesc) 38 { 39 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 40 struct queue_entry_priv_mmio *entry_priv = entry->priv_data; 41 __le32 *txd = entry_priv->desc; 42 u32 word; 43 const unsigned int txwi_size = entry->queue->winfo_size; 44 45 /* 46 * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1 47 * must contains a TXWI structure + 802.11 header + padding + 802.11 48 * data. We choose to have SD_PTR0/SD_LEN0 only contains TXWI and 49 * SD_PTR1/SD_LEN1 contains 802.11 header + padding + 802.11 50 * data. It means that LAST_SEC0 is always 0. 51 */ 52 53 /* 54 * Initialize TX descriptor 55 */ 56 word = 0; 57 rt2x00_set_field32(&word, TXD_W0_SD_PTR0, skbdesc->skb_dma); 58 rt2x00_desc_write(txd, 0, word); 59 60 word = 0; 61 rt2x00_set_field32(&word, TXD_W1_SD_LEN1, entry->skb->len); 62 rt2x00_set_field32(&word, TXD_W1_LAST_SEC1, 63 !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags)); 64 rt2x00_set_field32(&word, TXD_W1_BURST, 65 test_bit(ENTRY_TXD_BURST, &txdesc->flags)); 66 rt2x00_set_field32(&word, TXD_W1_SD_LEN0, txwi_size); 67 rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0); 68 rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0); 69 rt2x00_desc_write(txd, 1, word); 70 71 word = 0; 72 rt2x00_set_field32(&word, TXD_W2_SD_PTR1, 73 skbdesc->skb_dma + txwi_size); 74 rt2x00_desc_write(txd, 2, word); 75 76 word = 0; 77 rt2x00_set_field32(&word, TXD_W3_WIV, 78 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags)); 79 rt2x00_set_field32(&word, TXD_W3_QSEL, 2); 80 rt2x00_desc_write(txd, 3, word); 81 82 /* 83 * Register descriptor details in skb frame descriptor. 84 */ 85 skbdesc->desc = txd; 86 skbdesc->desc_len = TXD_DESC_SIZE; 87 } 88 EXPORT_SYMBOL_GPL(rt2800mmio_write_tx_desc); 89 90 /* 91 * RX control handlers 92 */ 93 void rt2800mmio_fill_rxdone(struct queue_entry *entry, 94 struct rxdone_entry_desc *rxdesc) 95 { 96 struct queue_entry_priv_mmio *entry_priv = entry->priv_data; 97 __le32 *rxd = entry_priv->desc; 98 u32 word; 99 100 word = rt2x00_desc_read(rxd, 3); 101 102 if (rt2x00_get_field32(word, RXD_W3_CRC_ERROR)) 103 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; 104 105 /* 106 * Unfortunately we don't know the cipher type used during 107 * decryption. This prevents us from correct providing 108 * correct statistics through debugfs. 109 */ 110 rxdesc->cipher_status = rt2x00_get_field32(word, RXD_W3_CIPHER_ERROR); 111 112 if (rt2x00_get_field32(word, RXD_W3_DECRYPTED)) { 113 /* 114 * Hardware has stripped IV/EIV data from 802.11 frame during 115 * decryption. Unfortunately the descriptor doesn't contain 116 * any fields with the EIV/IV data either, so they can't 117 * be restored by rt2x00lib. 118 */ 119 rxdesc->flags |= RX_FLAG_IV_STRIPPED; 120 121 /* 122 * The hardware has already checked the Michael Mic and has 123 * stripped it from the frame. Signal this to mac80211. 124 */ 125 rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; 126 127 if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) { 128 rxdesc->flags |= RX_FLAG_DECRYPTED; 129 } else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) { 130 /* 131 * In order to check the Michael Mic, the packet must have 132 * been decrypted. Mac80211 doesnt check the MMIC failure 133 * flag to initiate MMIC countermeasures if the decoded flag 134 * has not been set. 135 */ 136 rxdesc->flags |= RX_FLAG_DECRYPTED; 137 138 rxdesc->flags |= RX_FLAG_MMIC_ERROR; 139 } 140 } 141 142 if (rt2x00_get_field32(word, RXD_W3_MY_BSS)) 143 rxdesc->dev_flags |= RXDONE_MY_BSS; 144 145 if (rt2x00_get_field32(word, RXD_W3_L2PAD)) 146 rxdesc->dev_flags |= RXDONE_L2PAD; 147 148 /* 149 * Process the RXWI structure that is at the start of the buffer. 150 */ 151 rt2800_process_rxwi(entry, rxdesc); 152 } 153 EXPORT_SYMBOL_GPL(rt2800mmio_fill_rxdone); 154 155 /* 156 * Interrupt functions. 157 */ 158 static void rt2800mmio_wakeup(struct rt2x00_dev *rt2x00dev) 159 { 160 struct ieee80211_conf conf = { .flags = 0 }; 161 struct rt2x00lib_conf libconf = { .conf = &conf }; 162 163 rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS); 164 } 165 166 static inline void rt2800mmio_enable_interrupt(struct rt2x00_dev *rt2x00dev, 167 struct rt2x00_field32 irq_field) 168 { 169 u32 reg; 170 171 /* 172 * Enable a single interrupt. The interrupt mask register 173 * access needs locking. 174 */ 175 spin_lock_irq(&rt2x00dev->irqmask_lock); 176 reg = rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR); 177 rt2x00_set_field32(®, irq_field, 1); 178 rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg); 179 spin_unlock_irq(&rt2x00dev->irqmask_lock); 180 } 181 182 void rt2800mmio_pretbtt_tasklet(unsigned long data) 183 { 184 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; 185 rt2x00lib_pretbtt(rt2x00dev); 186 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 187 rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_PRE_TBTT); 188 } 189 EXPORT_SYMBOL_GPL(rt2800mmio_pretbtt_tasklet); 190 191 void rt2800mmio_tbtt_tasklet(unsigned long data) 192 { 193 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; 194 struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; 195 u32 reg; 196 197 rt2x00lib_beacondone(rt2x00dev); 198 199 if (rt2x00dev->intf_ap_count) { 200 /* 201 * The rt2800pci hardware tbtt timer is off by 1us per tbtt 202 * causing beacon skew and as a result causing problems with 203 * some powersaving clients over time. Shorten the beacon 204 * interval every 64 beacons by 64us to mitigate this effect. 205 */ 206 if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 2)) { 207 reg = rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG); 208 rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_INTERVAL, 209 (rt2x00dev->beacon_int * 16) - 1); 210 rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg); 211 } else if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 1)) { 212 reg = rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG); 213 rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_INTERVAL, 214 (rt2x00dev->beacon_int * 16)); 215 rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg); 216 } 217 drv_data->tbtt_tick++; 218 drv_data->tbtt_tick %= BCN_TBTT_OFFSET; 219 } 220 221 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 222 rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_TBTT); 223 } 224 EXPORT_SYMBOL_GPL(rt2800mmio_tbtt_tasklet); 225 226 void rt2800mmio_rxdone_tasklet(unsigned long data) 227 { 228 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; 229 if (rt2x00mmio_rxdone(rt2x00dev)) 230 tasklet_schedule(&rt2x00dev->rxdone_tasklet); 231 else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 232 rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_RX_DONE); 233 } 234 EXPORT_SYMBOL_GPL(rt2800mmio_rxdone_tasklet); 235 236 void rt2800mmio_autowake_tasklet(unsigned long data) 237 { 238 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; 239 rt2800mmio_wakeup(rt2x00dev); 240 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 241 rt2800mmio_enable_interrupt(rt2x00dev, 242 INT_MASK_CSR_AUTO_WAKEUP); 243 } 244 EXPORT_SYMBOL_GPL(rt2800mmio_autowake_tasklet); 245 246 static void rt2800mmio_fetch_txstatus(struct rt2x00_dev *rt2x00dev) 247 { 248 u32 status; 249 unsigned long flags; 250 251 /* 252 * The TX_FIFO_STATUS interrupt needs special care. We should 253 * read TX_STA_FIFO but we should do it immediately as otherwise 254 * the register can overflow and we would lose status reports. 255 * 256 * Hence, read the TX_STA_FIFO register and copy all tx status 257 * reports into a kernel FIFO which is handled in the txstatus 258 * tasklet. We use a tasklet to process the tx status reports 259 * because we can schedule the tasklet multiple times (when the 260 * interrupt fires again during tx status processing). 261 * 262 * We also read statuses from tx status timeout timer, use 263 * lock to prevent concurent writes to fifo. 264 */ 265 266 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags); 267 268 while (!kfifo_is_full(&rt2x00dev->txstatus_fifo)) { 269 status = rt2x00mmio_register_read(rt2x00dev, TX_STA_FIFO); 270 if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID)) 271 break; 272 273 kfifo_put(&rt2x00dev->txstatus_fifo, status); 274 } 275 276 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags); 277 } 278 279 void rt2800mmio_txstatus_tasklet(unsigned long data) 280 { 281 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; 282 283 rt2800_txdone(rt2x00dev, 16); 284 285 if (!kfifo_is_empty(&rt2x00dev->txstatus_fifo)) 286 tasklet_schedule(&rt2x00dev->txstatus_tasklet); 287 288 } 289 EXPORT_SYMBOL_GPL(rt2800mmio_txstatus_tasklet); 290 291 irqreturn_t rt2800mmio_interrupt(int irq, void *dev_instance) 292 { 293 struct rt2x00_dev *rt2x00dev = dev_instance; 294 u32 reg, mask; 295 296 /* Read status and ACK all interrupts */ 297 reg = rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR); 298 rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg); 299 300 if (!reg) 301 return IRQ_NONE; 302 303 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 304 return IRQ_HANDLED; 305 306 /* 307 * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits 308 * for interrupts and interrupt masks we can just use the value of 309 * INT_SOURCE_CSR to create the interrupt mask. 310 */ 311 mask = ~reg; 312 313 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) { 314 rt2x00_set_field32(&mask, INT_MASK_CSR_TX_FIFO_STATUS, 1); 315 rt2800mmio_fetch_txstatus(rt2x00dev); 316 if (!kfifo_is_empty(&rt2x00dev->txstatus_fifo)) 317 tasklet_schedule(&rt2x00dev->txstatus_tasklet); 318 } 319 320 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT)) 321 tasklet_hi_schedule(&rt2x00dev->pretbtt_tasklet); 322 323 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT)) 324 tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet); 325 326 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE)) 327 tasklet_schedule(&rt2x00dev->rxdone_tasklet); 328 329 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP)) 330 tasklet_schedule(&rt2x00dev->autowake_tasklet); 331 332 /* 333 * Disable all interrupts for which a tasklet was scheduled right now, 334 * the tasklet will reenable the appropriate interrupts. 335 */ 336 spin_lock(&rt2x00dev->irqmask_lock); 337 reg = rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR); 338 reg &= mask; 339 rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg); 340 spin_unlock(&rt2x00dev->irqmask_lock); 341 342 return IRQ_HANDLED; 343 } 344 EXPORT_SYMBOL_GPL(rt2800mmio_interrupt); 345 346 void rt2800mmio_toggle_irq(struct rt2x00_dev *rt2x00dev, 347 enum dev_state state) 348 { 349 u32 reg; 350 unsigned long flags; 351 352 /* 353 * When interrupts are being enabled, the interrupt registers 354 * should clear the register to assure a clean state. 355 */ 356 if (state == STATE_RADIO_IRQ_ON) { 357 reg = rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR); 358 rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg); 359 } 360 361 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags); 362 reg = 0; 363 if (state == STATE_RADIO_IRQ_ON) { 364 rt2x00_set_field32(®, INT_MASK_CSR_RX_DONE, 1); 365 rt2x00_set_field32(®, INT_MASK_CSR_TBTT, 1); 366 rt2x00_set_field32(®, INT_MASK_CSR_PRE_TBTT, 1); 367 rt2x00_set_field32(®, INT_MASK_CSR_TX_FIFO_STATUS, 1); 368 rt2x00_set_field32(®, INT_MASK_CSR_AUTO_WAKEUP, 1); 369 } 370 rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg); 371 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags); 372 373 if (state == STATE_RADIO_IRQ_OFF) { 374 /* 375 * Wait for possibly running tasklets to finish. 376 */ 377 tasklet_kill(&rt2x00dev->txstatus_tasklet); 378 tasklet_kill(&rt2x00dev->rxdone_tasklet); 379 tasklet_kill(&rt2x00dev->autowake_tasklet); 380 tasklet_kill(&rt2x00dev->tbtt_tasklet); 381 tasklet_kill(&rt2x00dev->pretbtt_tasklet); 382 } 383 } 384 EXPORT_SYMBOL_GPL(rt2800mmio_toggle_irq); 385 386 /* 387 * Queue handlers. 388 */ 389 void rt2800mmio_start_queue(struct data_queue *queue) 390 { 391 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; 392 u32 reg; 393 394 switch (queue->qid) { 395 case QID_RX: 396 reg = rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL); 397 rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_RX, 1); 398 rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg); 399 break; 400 case QID_BEACON: 401 reg = rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG); 402 rt2x00_set_field32(®, BCN_TIME_CFG_TSF_TICKING, 1); 403 rt2x00_set_field32(®, BCN_TIME_CFG_TBTT_ENABLE, 1); 404 rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_GEN, 1); 405 rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg); 406 407 reg = rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN); 408 rt2x00_set_field32(®, INT_TIMER_EN_PRE_TBTT_TIMER, 1); 409 rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg); 410 break; 411 default: 412 break; 413 } 414 } 415 EXPORT_SYMBOL_GPL(rt2800mmio_start_queue); 416 417 /* 200 ms */ 418 #define TXSTATUS_TIMEOUT 200000000 419 420 void rt2800mmio_kick_queue(struct data_queue *queue) 421 { 422 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; 423 struct queue_entry *entry; 424 425 switch (queue->qid) { 426 case QID_AC_VO: 427 case QID_AC_VI: 428 case QID_AC_BE: 429 case QID_AC_BK: 430 WARN_ON_ONCE(rt2x00queue_empty(queue)); 431 entry = rt2x00queue_get_entry(queue, Q_INDEX); 432 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(queue->qid), 433 entry->entry_idx); 434 hrtimer_start(&rt2x00dev->txstatus_timer, 435 TXSTATUS_TIMEOUT, HRTIMER_MODE_REL); 436 break; 437 case QID_MGMT: 438 entry = rt2x00queue_get_entry(queue, Q_INDEX); 439 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(5), 440 entry->entry_idx); 441 break; 442 default: 443 break; 444 } 445 } 446 EXPORT_SYMBOL_GPL(rt2800mmio_kick_queue); 447 448 void rt2800mmio_flush_queue(struct data_queue *queue, bool drop) 449 { 450 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; 451 bool tx_queue = false; 452 unsigned int i; 453 454 switch (queue->qid) { 455 case QID_AC_VO: 456 case QID_AC_VI: 457 case QID_AC_BE: 458 case QID_AC_BK: 459 tx_queue = true; 460 break; 461 case QID_RX: 462 break; 463 default: 464 return; 465 } 466 467 for (i = 0; i < 5; i++) { 468 /* 469 * Check if the driver is already done, otherwise we 470 * have to sleep a little while to give the driver/hw 471 * the oppurtunity to complete interrupt process itself. 472 */ 473 if (rt2x00queue_empty(queue)) 474 break; 475 476 /* 477 * For TX queues schedule completion tasklet to catch 478 * tx status timeouts, othewise just wait. 479 */ 480 if (tx_queue) 481 queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work); 482 483 /* 484 * Wait for a little while to give the driver 485 * the oppurtunity to recover itself. 486 */ 487 msleep(50); 488 } 489 } 490 EXPORT_SYMBOL_GPL(rt2800mmio_flush_queue); 491 492 void rt2800mmio_stop_queue(struct data_queue *queue) 493 { 494 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; 495 u32 reg; 496 497 switch (queue->qid) { 498 case QID_RX: 499 reg = rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL); 500 rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_RX, 0); 501 rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg); 502 break; 503 case QID_BEACON: 504 reg = rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG); 505 rt2x00_set_field32(®, BCN_TIME_CFG_TSF_TICKING, 0); 506 rt2x00_set_field32(®, BCN_TIME_CFG_TBTT_ENABLE, 0); 507 rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_GEN, 0); 508 rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg); 509 510 reg = rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN); 511 rt2x00_set_field32(®, INT_TIMER_EN_PRE_TBTT_TIMER, 0); 512 rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg); 513 514 /* 515 * Wait for current invocation to finish. The tasklet 516 * won't be scheduled anymore afterwards since we disabled 517 * the TBTT and PRE TBTT timer. 518 */ 519 tasklet_kill(&rt2x00dev->tbtt_tasklet); 520 tasklet_kill(&rt2x00dev->pretbtt_tasklet); 521 522 break; 523 default: 524 break; 525 } 526 } 527 EXPORT_SYMBOL_GPL(rt2800mmio_stop_queue); 528 529 void rt2800mmio_queue_init(struct data_queue *queue) 530 { 531 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; 532 unsigned short txwi_size, rxwi_size; 533 534 rt2800_get_txwi_rxwi_size(rt2x00dev, &txwi_size, &rxwi_size); 535 536 switch (queue->qid) { 537 case QID_RX: 538 queue->limit = 128; 539 queue->data_size = AGGREGATION_SIZE; 540 queue->desc_size = RXD_DESC_SIZE; 541 queue->winfo_size = rxwi_size; 542 queue->priv_size = sizeof(struct queue_entry_priv_mmio); 543 break; 544 545 case QID_AC_VO: 546 case QID_AC_VI: 547 case QID_AC_BE: 548 case QID_AC_BK: 549 queue->limit = 64; 550 queue->data_size = AGGREGATION_SIZE; 551 queue->desc_size = TXD_DESC_SIZE; 552 queue->winfo_size = txwi_size; 553 queue->priv_size = sizeof(struct queue_entry_priv_mmio); 554 break; 555 556 case QID_BEACON: 557 queue->limit = 8; 558 queue->data_size = 0; /* No DMA required for beacons */ 559 queue->desc_size = TXD_DESC_SIZE; 560 queue->winfo_size = txwi_size; 561 queue->priv_size = sizeof(struct queue_entry_priv_mmio); 562 break; 563 564 case QID_ATIM: 565 /* fallthrough */ 566 default: 567 BUG(); 568 break; 569 } 570 } 571 EXPORT_SYMBOL_GPL(rt2800mmio_queue_init); 572 573 /* 574 * Initialization functions. 575 */ 576 bool rt2800mmio_get_entry_state(struct queue_entry *entry) 577 { 578 struct queue_entry_priv_mmio *entry_priv = entry->priv_data; 579 u32 word; 580 581 if (entry->queue->qid == QID_RX) { 582 word = rt2x00_desc_read(entry_priv->desc, 1); 583 584 return (!rt2x00_get_field32(word, RXD_W1_DMA_DONE)); 585 } else { 586 word = rt2x00_desc_read(entry_priv->desc, 1); 587 588 return (!rt2x00_get_field32(word, TXD_W1_DMA_DONE)); 589 } 590 } 591 EXPORT_SYMBOL_GPL(rt2800mmio_get_entry_state); 592 593 void rt2800mmio_clear_entry(struct queue_entry *entry) 594 { 595 struct queue_entry_priv_mmio *entry_priv = entry->priv_data; 596 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 597 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 598 u32 word; 599 600 if (entry->queue->qid == QID_RX) { 601 word = rt2x00_desc_read(entry_priv->desc, 0); 602 rt2x00_set_field32(&word, RXD_W0_SDP0, skbdesc->skb_dma); 603 rt2x00_desc_write(entry_priv->desc, 0, word); 604 605 word = rt2x00_desc_read(entry_priv->desc, 1); 606 rt2x00_set_field32(&word, RXD_W1_DMA_DONE, 0); 607 rt2x00_desc_write(entry_priv->desc, 1, word); 608 609 /* 610 * Set RX IDX in register to inform hardware that we have 611 * handled this entry and it is available for reuse again. 612 */ 613 rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX, 614 entry->entry_idx); 615 } else { 616 word = rt2x00_desc_read(entry_priv->desc, 1); 617 rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 1); 618 rt2x00_desc_write(entry_priv->desc, 1, word); 619 620 /* If last entry stop txstatus timer */ 621 if (entry->queue->length == 1) 622 hrtimer_cancel(&rt2x00dev->txstatus_timer); 623 } 624 } 625 EXPORT_SYMBOL_GPL(rt2800mmio_clear_entry); 626 627 int rt2800mmio_init_queues(struct rt2x00_dev *rt2x00dev) 628 { 629 struct queue_entry_priv_mmio *entry_priv; 630 631 /* 632 * Initialize registers. 633 */ 634 entry_priv = rt2x00dev->tx[0].entries[0].priv_data; 635 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR0, 636 entry_priv->desc_dma); 637 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT0, 638 rt2x00dev->tx[0].limit); 639 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX0, 0); 640 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX0, 0); 641 642 entry_priv = rt2x00dev->tx[1].entries[0].priv_data; 643 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR1, 644 entry_priv->desc_dma); 645 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT1, 646 rt2x00dev->tx[1].limit); 647 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX1, 0); 648 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX1, 0); 649 650 entry_priv = rt2x00dev->tx[2].entries[0].priv_data; 651 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR2, 652 entry_priv->desc_dma); 653 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT2, 654 rt2x00dev->tx[2].limit); 655 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX2, 0); 656 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX2, 0); 657 658 entry_priv = rt2x00dev->tx[3].entries[0].priv_data; 659 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR3, 660 entry_priv->desc_dma); 661 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT3, 662 rt2x00dev->tx[3].limit); 663 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX3, 0); 664 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX3, 0); 665 666 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR4, 0); 667 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT4, 0); 668 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX4, 0); 669 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX4, 0); 670 671 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR5, 0); 672 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT5, 0); 673 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX5, 0); 674 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX5, 0); 675 676 entry_priv = rt2x00dev->rx->entries[0].priv_data; 677 rt2x00mmio_register_write(rt2x00dev, RX_BASE_PTR, 678 entry_priv->desc_dma); 679 rt2x00mmio_register_write(rt2x00dev, RX_MAX_CNT, 680 rt2x00dev->rx[0].limit); 681 rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX, 682 rt2x00dev->rx[0].limit - 1); 683 rt2x00mmio_register_write(rt2x00dev, RX_DRX_IDX, 0); 684 685 rt2800_disable_wpdma(rt2x00dev); 686 687 rt2x00mmio_register_write(rt2x00dev, DELAY_INT_CFG, 0); 688 689 return 0; 690 } 691 EXPORT_SYMBOL_GPL(rt2800mmio_init_queues); 692 693 int rt2800mmio_init_registers(struct rt2x00_dev *rt2x00dev) 694 { 695 u32 reg; 696 697 /* 698 * Reset DMA indexes 699 */ 700 reg = rt2x00mmio_register_read(rt2x00dev, WPDMA_RST_IDX); 701 rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX0, 1); 702 rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX1, 1); 703 rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX2, 1); 704 rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX3, 1); 705 rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX4, 1); 706 rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX5, 1); 707 rt2x00_set_field32(®, WPDMA_RST_IDX_DRX_IDX0, 1); 708 rt2x00mmio_register_write(rt2x00dev, WPDMA_RST_IDX, reg); 709 710 rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f); 711 rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00); 712 713 if (rt2x00_is_pcie(rt2x00dev) && 714 (rt2x00_rt(rt2x00dev, RT3090) || 715 rt2x00_rt(rt2x00dev, RT3390) || 716 rt2x00_rt(rt2x00dev, RT3572) || 717 rt2x00_rt(rt2x00dev, RT3593) || 718 rt2x00_rt(rt2x00dev, RT5390) || 719 rt2x00_rt(rt2x00dev, RT5392) || 720 rt2x00_rt(rt2x00dev, RT5592))) { 721 reg = rt2x00mmio_register_read(rt2x00dev, AUX_CTRL); 722 rt2x00_set_field32(®, AUX_CTRL_FORCE_PCIE_CLK, 1); 723 rt2x00_set_field32(®, AUX_CTRL_WAKE_PCIE_EN, 1); 724 rt2x00mmio_register_write(rt2x00dev, AUX_CTRL, reg); 725 } 726 727 rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003); 728 729 reg = 0; 730 rt2x00_set_field32(®, MAC_SYS_CTRL_RESET_CSR, 1); 731 rt2x00_set_field32(®, MAC_SYS_CTRL_RESET_BBP, 1); 732 rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg); 733 734 rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000); 735 736 return 0; 737 } 738 EXPORT_SYMBOL_GPL(rt2800mmio_init_registers); 739 740 /* 741 * Device state switch handlers. 742 */ 743 int rt2800mmio_enable_radio(struct rt2x00_dev *rt2x00dev) 744 { 745 /* Wait for DMA, ignore error until we initialize queues. */ 746 rt2800_wait_wpdma_ready(rt2x00dev); 747 748 if (unlikely(rt2800mmio_init_queues(rt2x00dev))) 749 return -EIO; 750 751 return rt2800_enable_radio(rt2x00dev); 752 } 753 EXPORT_SYMBOL_GPL(rt2800mmio_enable_radio); 754 755 static void rt2800mmio_work_txdone(struct work_struct *work) 756 { 757 struct rt2x00_dev *rt2x00dev = 758 container_of(work, struct rt2x00_dev, txdone_work); 759 760 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 761 return; 762 763 while (!kfifo_is_empty(&rt2x00dev->txstatus_fifo) || 764 rt2800_txstatus_timeout(rt2x00dev)) { 765 766 tasklet_disable(&rt2x00dev->txstatus_tasklet); 767 rt2800_txdone(rt2x00dev, UINT_MAX); 768 rt2800_txdone_nostatus(rt2x00dev); 769 tasklet_enable(&rt2x00dev->txstatus_tasklet); 770 } 771 772 if (rt2800_txstatus_pending(rt2x00dev)) 773 hrtimer_start(&rt2x00dev->txstatus_timer, 774 TXSTATUS_TIMEOUT, HRTIMER_MODE_REL); 775 } 776 777 static enum hrtimer_restart rt2800mmio_tx_sta_fifo_timeout(struct hrtimer *timer) 778 { 779 struct rt2x00_dev *rt2x00dev = 780 container_of(timer, struct rt2x00_dev, txstatus_timer); 781 782 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 783 goto out; 784 785 if (!rt2800_txstatus_pending(rt2x00dev)) 786 goto out; 787 788 rt2800mmio_fetch_txstatus(rt2x00dev); 789 if (!kfifo_is_empty(&rt2x00dev->txstatus_fifo)) 790 tasklet_schedule(&rt2x00dev->txstatus_tasklet); 791 else 792 queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work); 793 out: 794 return HRTIMER_NORESTART; 795 } 796 797 int rt2800mmio_probe_hw(struct rt2x00_dev *rt2x00dev) 798 { 799 int retval; 800 801 retval = rt2800_probe_hw(rt2x00dev); 802 if (retval) 803 return retval; 804 805 /* 806 * Set txstatus timer function. 807 */ 808 rt2x00dev->txstatus_timer.function = rt2800mmio_tx_sta_fifo_timeout; 809 810 /* 811 * Overwrite TX done handler 812 */ 813 INIT_WORK(&rt2x00dev->txdone_work, rt2800mmio_work_txdone); 814 815 return 0; 816 } 817 EXPORT_SYMBOL_GPL(rt2800mmio_probe_hw); 818 819 MODULE_AUTHOR(DRV_PROJECT); 820 MODULE_VERSION(DRV_VERSION); 821 MODULE_DESCRIPTION("rt2800 MMIO library"); 822 MODULE_LICENSE("GPL"); 823