1 /* 2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 3 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <linux/kernel.h> 19 #include <linux/irq.h> 20 21 #include "mt76x02.h" 22 #include "mt76x02_trace.h" 23 24 struct beacon_bc_data { 25 struct mt76x02_dev *dev; 26 struct sk_buff_head q; 27 struct sk_buff *tail[8]; 28 }; 29 30 static void 31 mt76x02_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) 32 { 33 struct mt76x02_dev *dev = (struct mt76x02_dev *)priv; 34 struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; 35 struct sk_buff *skb = NULL; 36 37 if (!(dev->beacon_mask & BIT(mvif->idx))) 38 return; 39 40 skb = ieee80211_beacon_get(mt76_hw(dev), vif); 41 if (!skb) 42 return; 43 44 mt76x02_mac_set_beacon(dev, mvif->idx, skb); 45 } 46 47 static void 48 mt76x02_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif) 49 { 50 struct beacon_bc_data *data = priv; 51 struct mt76x02_dev *dev = data->dev; 52 struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; 53 struct ieee80211_tx_info *info; 54 struct sk_buff *skb; 55 56 if (!(dev->beacon_mask & BIT(mvif->idx))) 57 return; 58 59 skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif); 60 if (!skb) 61 return; 62 63 info = IEEE80211_SKB_CB(skb); 64 info->control.vif = vif; 65 info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; 66 mt76_skb_set_moredata(skb, true); 67 __skb_queue_tail(&data->q, skb); 68 data->tail[mvif->idx] = skb; 69 } 70 71 static void 72 mt76x02_resync_beacon_timer(struct mt76x02_dev *dev) 73 { 74 u32 timer_val = dev->beacon_int << 4; 75 76 dev->tbtt_count++; 77 78 /* 79 * Beacon timer drifts by 1us every tick, the timer is configured 80 * in 1/16 TU (64us) units. 81 */ 82 if (dev->tbtt_count < 63) 83 return; 84 85 /* 86 * The updated beacon interval takes effect after two TBTT, because 87 * at this point the original interval has already been loaded into 88 * the next TBTT_TIMER value 89 */ 90 if (dev->tbtt_count == 63) 91 timer_val -= 1; 92 93 mt76_rmw_field(dev, MT_BEACON_TIME_CFG, 94 MT_BEACON_TIME_CFG_INTVAL, timer_val); 95 96 if (dev->tbtt_count >= 64) { 97 dev->tbtt_count = 0; 98 return; 99 } 100 } 101 102 static void mt76x02_pre_tbtt_tasklet(unsigned long arg) 103 { 104 struct mt76x02_dev *dev = (struct mt76x02_dev *)arg; 105 struct mt76_queue *q = &dev->mt76.q_tx[MT_TXQ_PSD]; 106 struct beacon_bc_data data = {}; 107 struct sk_buff *skb; 108 int i, nframes; 109 110 mt76x02_resync_beacon_timer(dev); 111 112 data.dev = dev; 113 __skb_queue_head_init(&data.q); 114 115 ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), 116 IEEE80211_IFACE_ITER_RESUME_ALL, 117 mt76x02_update_beacon_iter, dev); 118 119 mt76_csa_check(&dev->mt76); 120 121 if (dev->mt76.csa_complete) 122 return; 123 124 do { 125 nframes = skb_queue_len(&data.q); 126 ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), 127 IEEE80211_IFACE_ITER_RESUME_ALL, 128 mt76x02_add_buffered_bc, &data); 129 } while (nframes != skb_queue_len(&data.q) && 130 skb_queue_len(&data.q) < 8); 131 132 if (!skb_queue_len(&data.q)) 133 return; 134 135 for (i = 0; i < ARRAY_SIZE(data.tail); i++) { 136 if (!data.tail[i]) 137 continue; 138 139 mt76_skb_set_moredata(data.tail[i], false); 140 } 141 142 spin_lock_bh(&q->lock); 143 while ((skb = __skb_dequeue(&data.q)) != NULL) { 144 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 145 struct ieee80211_vif *vif = info->control.vif; 146 struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; 147 148 mt76_dma_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid, 149 NULL); 150 } 151 spin_unlock_bh(&q->lock); 152 } 153 154 static int 155 mt76x02_init_tx_queue(struct mt76x02_dev *dev, struct mt76_queue *q, 156 int idx, int n_desc) 157 { 158 int ret; 159 160 q->regs = dev->mt76.mmio.regs + MT_TX_RING_BASE + idx * MT_RING_SIZE; 161 q->ndesc = n_desc; 162 q->hw_idx = idx; 163 164 ret = mt76_queue_alloc(dev, q); 165 if (ret) 166 return ret; 167 168 mt76x02_irq_enable(dev, MT_INT_TX_DONE(idx)); 169 170 return 0; 171 } 172 173 static int 174 mt76x02_init_rx_queue(struct mt76x02_dev *dev, struct mt76_queue *q, 175 int idx, int n_desc, int bufsize) 176 { 177 int ret; 178 179 q->regs = dev->mt76.mmio.regs + MT_RX_RING_BASE + idx * MT_RING_SIZE; 180 q->ndesc = n_desc; 181 q->buf_size = bufsize; 182 183 ret = mt76_queue_alloc(dev, q); 184 if (ret) 185 return ret; 186 187 mt76x02_irq_enable(dev, MT_INT_RX_DONE(idx)); 188 189 return 0; 190 } 191 192 static void mt76x02_process_tx_status_fifo(struct mt76x02_dev *dev) 193 { 194 struct mt76x02_tx_status stat; 195 u8 update = 1; 196 197 while (kfifo_get(&dev->txstatus_fifo, &stat)) 198 mt76x02_send_tx_status(dev, &stat, &update); 199 } 200 201 static void mt76x02_tx_tasklet(unsigned long data) 202 { 203 struct mt76x02_dev *dev = (struct mt76x02_dev *)data; 204 int i; 205 206 mt76x02_process_tx_status_fifo(dev); 207 208 for (i = MT_TXQ_MCU; i >= 0; i--) 209 mt76_queue_tx_cleanup(dev, i, false); 210 211 mt76x02_mac_poll_tx_status(dev, false); 212 mt76x02_irq_enable(dev, MT_INT_TX_DONE_ALL); 213 } 214 215 int mt76x02_dma_init(struct mt76x02_dev *dev) 216 { 217 struct mt76_txwi_cache __maybe_unused *t; 218 int i, ret, fifo_size; 219 struct mt76_queue *q; 220 void *status_fifo; 221 222 BUILD_BUG_ON(sizeof(t->txwi) < sizeof(struct mt76x02_txwi)); 223 BUILD_BUG_ON(sizeof(struct mt76x02_rxwi) > MT_RX_HEADROOM); 224 225 fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76x02_tx_status)); 226 status_fifo = devm_kzalloc(dev->mt76.dev, fifo_size, GFP_KERNEL); 227 if (!status_fifo) 228 return -ENOMEM; 229 230 tasklet_init(&dev->tx_tasklet, mt76x02_tx_tasklet, (unsigned long) dev); 231 tasklet_init(&dev->pre_tbtt_tasklet, mt76x02_pre_tbtt_tasklet, 232 (unsigned long)dev); 233 234 kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size); 235 236 mt76_dma_attach(&dev->mt76); 237 238 mt76_wr(dev, MT_WPDMA_RST_IDX, ~0); 239 240 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 241 ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[i], 242 mt76_ac_to_hwq(i), 243 MT_TX_RING_SIZE); 244 if (ret) 245 return ret; 246 } 247 248 ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD], 249 MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE); 250 if (ret) 251 return ret; 252 253 ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU], 254 MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE); 255 if (ret) 256 return ret; 257 258 ret = mt76x02_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1, 259 MT_MCU_RING_SIZE, MT_RX_BUF_SIZE); 260 if (ret) 261 return ret; 262 263 q = &dev->mt76.q_rx[MT_RXQ_MAIN]; 264 q->buf_offset = MT_RX_HEADROOM - sizeof(struct mt76x02_rxwi); 265 ret = mt76x02_init_rx_queue(dev, q, 0, MT76X02_RX_RING_SIZE, 266 MT_RX_BUF_SIZE); 267 if (ret) 268 return ret; 269 270 return mt76_init_queues(dev); 271 } 272 EXPORT_SYMBOL_GPL(mt76x02_dma_init); 273 274 void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q) 275 { 276 struct mt76x02_dev *dev; 277 278 dev = container_of(mdev, struct mt76x02_dev, mt76); 279 mt76x02_irq_enable(dev, MT_INT_RX_DONE(q)); 280 } 281 EXPORT_SYMBOL_GPL(mt76x02_rx_poll_complete); 282 283 irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance) 284 { 285 struct mt76x02_dev *dev = dev_instance; 286 u32 intr; 287 288 intr = mt76_rr(dev, MT_INT_SOURCE_CSR); 289 mt76_wr(dev, MT_INT_SOURCE_CSR, intr); 290 291 if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state)) 292 return IRQ_NONE; 293 294 trace_dev_irq(dev, intr, dev->mt76.mmio.irqmask); 295 296 intr &= dev->mt76.mmio.irqmask; 297 298 if (intr & MT_INT_TX_DONE_ALL) { 299 mt76x02_irq_disable(dev, MT_INT_TX_DONE_ALL); 300 tasklet_schedule(&dev->tx_tasklet); 301 } 302 303 if (intr & MT_INT_RX_DONE(0)) { 304 mt76x02_irq_disable(dev, MT_INT_RX_DONE(0)); 305 napi_schedule(&dev->mt76.napi[0]); 306 } 307 308 if (intr & MT_INT_RX_DONE(1)) { 309 mt76x02_irq_disable(dev, MT_INT_RX_DONE(1)); 310 napi_schedule(&dev->mt76.napi[1]); 311 } 312 313 if (intr & MT_INT_PRE_TBTT) 314 tasklet_schedule(&dev->pre_tbtt_tasklet); 315 316 /* send buffered multicast frames now */ 317 if (intr & MT_INT_TBTT) { 318 if (dev->mt76.csa_complete) 319 mt76_csa_finish(&dev->mt76); 320 else 321 mt76_queue_kick(dev, &dev->mt76.q_tx[MT_TXQ_PSD]); 322 } 323 324 if (intr & MT_INT_TX_STAT) { 325 mt76x02_mac_poll_tx_status(dev, true); 326 tasklet_schedule(&dev->tx_tasklet); 327 } 328 329 if (intr & MT_INT_GPTIMER) { 330 mt76x02_irq_disable(dev, MT_INT_GPTIMER); 331 tasklet_schedule(&dev->dfs_pd.dfs_tasklet); 332 } 333 334 return IRQ_HANDLED; 335 } 336 EXPORT_SYMBOL_GPL(mt76x02_irq_handler); 337 338 void mt76x02_set_irq_mask(struct mt76x02_dev *dev, u32 clear, u32 set) 339 { 340 unsigned long flags; 341 342 spin_lock_irqsave(&dev->mt76.mmio.irq_lock, flags); 343 dev->mt76.mmio.irqmask &= ~clear; 344 dev->mt76.mmio.irqmask |= set; 345 mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask); 346 spin_unlock_irqrestore(&dev->mt76.mmio.irq_lock, flags); 347 } 348 EXPORT_SYMBOL_GPL(mt76x02_set_irq_mask); 349 350 static void mt76x02_dma_enable(struct mt76x02_dev *dev) 351 { 352 u32 val; 353 354 mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX); 355 mt76x02_wait_for_wpdma(&dev->mt76, 1000); 356 usleep_range(50, 100); 357 358 val = FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3) | 359 MT_WPDMA_GLO_CFG_TX_DMA_EN | 360 MT_WPDMA_GLO_CFG_RX_DMA_EN; 361 mt76_set(dev, MT_WPDMA_GLO_CFG, val); 362 mt76_clear(dev, MT_WPDMA_GLO_CFG, 363 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); 364 } 365 366 void mt76x02_dma_cleanup(struct mt76x02_dev *dev) 367 { 368 tasklet_kill(&dev->tx_tasklet); 369 mt76_dma_cleanup(&dev->mt76); 370 } 371 EXPORT_SYMBOL_GPL(mt76x02_dma_cleanup); 372 373 void mt76x02_dma_disable(struct mt76x02_dev *dev) 374 { 375 u32 val = mt76_rr(dev, MT_WPDMA_GLO_CFG); 376 377 val &= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE | 378 MT_WPDMA_GLO_CFG_BIG_ENDIAN | 379 MT_WPDMA_GLO_CFG_HDR_SEG_LEN; 380 val |= MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE; 381 mt76_wr(dev, MT_WPDMA_GLO_CFG, val); 382 } 383 EXPORT_SYMBOL_GPL(mt76x02_dma_disable); 384 385 void mt76x02_mac_start(struct mt76x02_dev *dev) 386 { 387 mt76x02_dma_enable(dev); 388 mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter); 389 mt76_wr(dev, MT_MAC_SYS_CTRL, 390 MT_MAC_SYS_CTRL_ENABLE_TX | 391 MT_MAC_SYS_CTRL_ENABLE_RX); 392 mt76x02_irq_enable(dev, 393 MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL | 394 MT_INT_TX_STAT); 395 } 396 EXPORT_SYMBOL_GPL(mt76x02_mac_start); 397 398 static bool mt76x02_tx_hang(struct mt76x02_dev *dev) 399 { 400 u32 dma_idx, prev_dma_idx; 401 struct mt76_queue *q; 402 int i; 403 404 for (i = 0; i < 4; i++) { 405 q = &dev->mt76.q_tx[i]; 406 407 if (!q->queued) 408 continue; 409 410 prev_dma_idx = dev->mt76.tx_dma_idx[i]; 411 dma_idx = ioread32(&q->regs->dma_idx); 412 dev->mt76.tx_dma_idx[i] = dma_idx; 413 414 if (prev_dma_idx == dma_idx) 415 break; 416 } 417 418 return i < 4; 419 } 420 421 static void mt76x02_watchdog_reset(struct mt76x02_dev *dev) 422 { 423 u32 mask = dev->mt76.mmio.irqmask; 424 int i; 425 426 ieee80211_stop_queues(dev->mt76.hw); 427 set_bit(MT76_RESET, &dev->mt76.state); 428 429 tasklet_disable(&dev->pre_tbtt_tasklet); 430 tasklet_disable(&dev->tx_tasklet); 431 432 for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++) 433 napi_disable(&dev->mt76.napi[i]); 434 435 mutex_lock(&dev->mt76.mutex); 436 437 if (dev->beacon_mask) 438 mt76_clear(dev, MT_BEACON_TIME_CFG, 439 MT_BEACON_TIME_CFG_BEACON_TX | 440 MT_BEACON_TIME_CFG_TBTT_EN); 441 442 mt76x02_irq_disable(dev, mask); 443 444 /* perform device reset */ 445 mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN); 446 mt76_wr(dev, MT_MAC_SYS_CTRL, 0); 447 mt76_clear(dev, MT_WPDMA_GLO_CFG, 448 MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_RX_DMA_EN); 449 usleep_range(5000, 10000); 450 mt76_wr(dev, MT_INT_SOURCE_CSR, 0xffffffff); 451 452 /* let fw reset DMA */ 453 mt76_set(dev, 0x734, 0x3); 454 455 for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++) 456 mt76_queue_tx_cleanup(dev, i, true); 457 458 for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++) 459 mt76_queue_rx_reset(dev, i); 460 461 mt76_wr(dev, MT_MAC_SYS_CTRL, 462 MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX); 463 mt76_set(dev, MT_WPDMA_GLO_CFG, 464 MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_RX_DMA_EN); 465 if (dev->ed_monitor) 466 mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN); 467 468 if (dev->beacon_mask) 469 mt76_set(dev, MT_BEACON_TIME_CFG, 470 MT_BEACON_TIME_CFG_BEACON_TX | 471 MT_BEACON_TIME_CFG_TBTT_EN); 472 473 mt76x02_irq_enable(dev, mask); 474 475 mutex_unlock(&dev->mt76.mutex); 476 477 clear_bit(MT76_RESET, &dev->mt76.state); 478 479 tasklet_enable(&dev->tx_tasklet); 480 tasklet_schedule(&dev->tx_tasklet); 481 482 tasklet_enable(&dev->pre_tbtt_tasklet); 483 484 for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++) { 485 napi_enable(&dev->mt76.napi[i]); 486 napi_schedule(&dev->mt76.napi[i]); 487 } 488 489 ieee80211_wake_queues(dev->mt76.hw); 490 491 mt76_txq_schedule_all(&dev->mt76); 492 } 493 494 static void mt76x02_check_tx_hang(struct mt76x02_dev *dev) 495 { 496 if (mt76x02_tx_hang(dev)) { 497 if (++dev->tx_hang_check >= MT_TX_HANG_TH) 498 goto restart; 499 } else { 500 dev->tx_hang_check = 0; 501 } 502 503 if (dev->mcu_timeout) 504 goto restart; 505 506 return; 507 508 restart: 509 mt76x02_watchdog_reset(dev); 510 511 mutex_lock(&dev->mt76.mmio.mcu.mutex); 512 dev->mcu_timeout = 0; 513 mutex_unlock(&dev->mt76.mmio.mcu.mutex); 514 515 dev->tx_hang_reset++; 516 dev->tx_hang_check = 0; 517 memset(dev->mt76.tx_dma_idx, 0xff, 518 sizeof(dev->mt76.tx_dma_idx)); 519 } 520 521 void mt76x02_wdt_work(struct work_struct *work) 522 { 523 struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev, 524 wdt_work.work); 525 526 mt76x02_check_tx_hang(dev); 527 528 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->wdt_work, 529 MT_WATCHDOG_TIME); 530 } 531