1 /* 2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 3 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <linux/kernel.h> 19 #include <linux/irq.h> 20 21 #include "mt76x02.h" 22 #include "mt76x02_trace.h" 23 24 struct beacon_bc_data { 25 struct mt76x02_dev *dev; 26 struct sk_buff_head q; 27 struct sk_buff *tail[8]; 28 }; 29 30 static void 31 mt76x02_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) 32 { 33 struct mt76x02_dev *dev = (struct mt76x02_dev *)priv; 34 struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; 35 struct sk_buff *skb = NULL; 36 37 if (!(dev->beacon_mask & BIT(mvif->idx))) 38 return; 39 40 skb = ieee80211_beacon_get(mt76_hw(dev), vif); 41 if (!skb) 42 return; 43 44 mt76x02_mac_set_beacon(dev, mvif->idx, skb); 45 } 46 47 static void 48 mt76x02_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif) 49 { 50 struct beacon_bc_data *data = priv; 51 struct mt76x02_dev *dev = data->dev; 52 struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; 53 struct ieee80211_tx_info *info; 54 struct sk_buff *skb; 55 56 if (!(dev->beacon_mask & BIT(mvif->idx))) 57 return; 58 59 skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif); 60 if (!skb) 61 return; 62 63 info = IEEE80211_SKB_CB(skb); 64 info->control.vif = vif; 65 info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; 66 mt76_skb_set_moredata(skb, true); 67 __skb_queue_tail(&data->q, skb); 68 data->tail[mvif->idx] = skb; 69 } 70 71 static void 72 mt76x02_resync_beacon_timer(struct mt76x02_dev *dev) 73 { 74 u32 timer_val = dev->beacon_int << 4; 75 76 dev->tbtt_count++; 77 78 /* 79 * Beacon timer drifts by 1us every tick, the timer is configured 80 * in 1/16 TU (64us) units. 81 */ 82 if (dev->tbtt_count < 62) 83 return; 84 85 if (dev->tbtt_count >= 64) { 86 dev->tbtt_count = 0; 87 return; 88 } 89 90 /* 91 * The updated beacon interval takes effect after two TBTT, because 92 * at this point the original interval has already been loaded into 93 * the next TBTT_TIMER value 94 */ 95 if (dev->tbtt_count == 62) 96 timer_val -= 1; 97 98 mt76_rmw_field(dev, MT_BEACON_TIME_CFG, 99 MT_BEACON_TIME_CFG_INTVAL, timer_val); 100 } 101 102 static void mt76x02_pre_tbtt_tasklet(unsigned long arg) 103 { 104 struct mt76x02_dev *dev = (struct mt76x02_dev *)arg; 105 struct mt76_queue *q = &dev->mt76.q_tx[MT_TXQ_PSD]; 106 struct beacon_bc_data data = {}; 107 struct sk_buff *skb; 108 int i, nframes; 109 110 mt76x02_resync_beacon_timer(dev); 111 112 data.dev = dev; 113 __skb_queue_head_init(&data.q); 114 115 ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), 116 IEEE80211_IFACE_ITER_RESUME_ALL, 117 mt76x02_update_beacon_iter, dev); 118 119 do { 120 nframes = skb_queue_len(&data.q); 121 ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), 122 IEEE80211_IFACE_ITER_RESUME_ALL, 123 mt76x02_add_buffered_bc, &data); 124 } while (nframes != skb_queue_len(&data.q)); 125 126 if (!nframes) 127 return; 128 129 for (i = 0; i < ARRAY_SIZE(data.tail); i++) { 130 if (!data.tail[i]) 131 continue; 132 133 mt76_skb_set_moredata(data.tail[i], false); 134 } 135 136 spin_lock_bh(&q->lock); 137 while ((skb = __skb_dequeue(&data.q)) != NULL) { 138 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 139 struct ieee80211_vif *vif = info->control.vif; 140 struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; 141 142 mt76_dma_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid, 143 NULL); 144 } 145 spin_unlock_bh(&q->lock); 146 } 147 148 static int 149 mt76x02_init_tx_queue(struct mt76x02_dev *dev, struct mt76_queue *q, 150 int idx, int n_desc) 151 { 152 int ret; 153 154 q->regs = dev->mt76.mmio.regs + MT_TX_RING_BASE + idx * MT_RING_SIZE; 155 q->ndesc = n_desc; 156 q->hw_idx = idx; 157 158 ret = mt76_queue_alloc(dev, q); 159 if (ret) 160 return ret; 161 162 mt76x02_irq_enable(dev, MT_INT_TX_DONE(idx)); 163 164 return 0; 165 } 166 167 static int 168 mt76x02_init_rx_queue(struct mt76x02_dev *dev, struct mt76_queue *q, 169 int idx, int n_desc, int bufsize) 170 { 171 int ret; 172 173 q->regs = dev->mt76.mmio.regs + MT_RX_RING_BASE + idx * MT_RING_SIZE; 174 q->ndesc = n_desc; 175 q->buf_size = bufsize; 176 177 ret = mt76_queue_alloc(dev, q); 178 if (ret) 179 return ret; 180 181 mt76x02_irq_enable(dev, MT_INT_RX_DONE(idx)); 182 183 return 0; 184 } 185 186 static void mt76x02_process_tx_status_fifo(struct mt76x02_dev *dev) 187 { 188 struct mt76x02_tx_status stat; 189 u8 update = 1; 190 191 while (kfifo_get(&dev->txstatus_fifo, &stat)) 192 mt76x02_send_tx_status(dev, &stat, &update); 193 } 194 195 static void mt76x02_tx_tasklet(unsigned long data) 196 { 197 struct mt76x02_dev *dev = (struct mt76x02_dev *)data; 198 int i; 199 200 mt76x02_process_tx_status_fifo(dev); 201 202 for (i = MT_TXQ_MCU; i >= 0; i--) 203 mt76_queue_tx_cleanup(dev, i, false); 204 205 mt76x02_mac_poll_tx_status(dev, false); 206 mt76x02_irq_enable(dev, MT_INT_TX_DONE_ALL); 207 } 208 209 int mt76x02_dma_init(struct mt76x02_dev *dev) 210 { 211 struct mt76_txwi_cache __maybe_unused *t; 212 int i, ret, fifo_size; 213 struct mt76_queue *q; 214 void *status_fifo; 215 216 BUILD_BUG_ON(sizeof(t->txwi) < sizeof(struct mt76x02_txwi)); 217 BUILD_BUG_ON(sizeof(struct mt76x02_rxwi) > MT_RX_HEADROOM); 218 219 fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76x02_tx_status)); 220 status_fifo = devm_kzalloc(dev->mt76.dev, fifo_size, GFP_KERNEL); 221 if (!status_fifo) 222 return -ENOMEM; 223 224 tasklet_init(&dev->tx_tasklet, mt76x02_tx_tasklet, (unsigned long) dev); 225 tasklet_init(&dev->pre_tbtt_tasklet, mt76x02_pre_tbtt_tasklet, 226 (unsigned long)dev); 227 228 kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size); 229 230 mt76_dma_attach(&dev->mt76); 231 232 mt76_wr(dev, MT_WPDMA_RST_IDX, ~0); 233 234 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 235 ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[i], 236 mt76_ac_to_hwq(i), 237 MT_TX_RING_SIZE); 238 if (ret) 239 return ret; 240 } 241 242 ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD], 243 MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE); 244 if (ret) 245 return ret; 246 247 ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU], 248 MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE); 249 if (ret) 250 return ret; 251 252 ret = mt76x02_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1, 253 MT_MCU_RING_SIZE, MT_RX_BUF_SIZE); 254 if (ret) 255 return ret; 256 257 q = &dev->mt76.q_rx[MT_RXQ_MAIN]; 258 q->buf_offset = MT_RX_HEADROOM - sizeof(struct mt76x02_rxwi); 259 ret = mt76x02_init_rx_queue(dev, q, 0, MT76X02_RX_RING_SIZE, 260 MT_RX_BUF_SIZE); 261 if (ret) 262 return ret; 263 264 return mt76_init_queues(dev); 265 } 266 EXPORT_SYMBOL_GPL(mt76x02_dma_init); 267 268 void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q) 269 { 270 struct mt76x02_dev *dev; 271 272 dev = container_of(mdev, struct mt76x02_dev, mt76); 273 mt76x02_irq_enable(dev, MT_INT_RX_DONE(q)); 274 } 275 EXPORT_SYMBOL_GPL(mt76x02_rx_poll_complete); 276 277 irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance) 278 { 279 struct mt76x02_dev *dev = dev_instance; 280 u32 intr; 281 282 intr = mt76_rr(dev, MT_INT_SOURCE_CSR); 283 mt76_wr(dev, MT_INT_SOURCE_CSR, intr); 284 285 if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state)) 286 return IRQ_NONE; 287 288 trace_dev_irq(dev, intr, dev->mt76.mmio.irqmask); 289 290 intr &= dev->mt76.mmio.irqmask; 291 292 if (intr & MT_INT_TX_DONE_ALL) { 293 mt76x02_irq_disable(dev, MT_INT_TX_DONE_ALL); 294 tasklet_schedule(&dev->tx_tasklet); 295 } 296 297 if (intr & MT_INT_RX_DONE(0)) { 298 mt76x02_irq_disable(dev, MT_INT_RX_DONE(0)); 299 napi_schedule(&dev->mt76.napi[0]); 300 } 301 302 if (intr & MT_INT_RX_DONE(1)) { 303 mt76x02_irq_disable(dev, MT_INT_RX_DONE(1)); 304 napi_schedule(&dev->mt76.napi[1]); 305 } 306 307 if (intr & MT_INT_PRE_TBTT) 308 tasklet_schedule(&dev->pre_tbtt_tasklet); 309 310 /* send buffered multicast frames now */ 311 if (intr & MT_INT_TBTT) 312 mt76_queue_kick(dev, &dev->mt76.q_tx[MT_TXQ_PSD]); 313 314 if (intr & MT_INT_TX_STAT) { 315 mt76x02_mac_poll_tx_status(dev, true); 316 tasklet_schedule(&dev->tx_tasklet); 317 } 318 319 if (intr & MT_INT_GPTIMER) { 320 mt76x02_irq_disable(dev, MT_INT_GPTIMER); 321 tasklet_schedule(&dev->dfs_pd.dfs_tasklet); 322 } 323 324 return IRQ_HANDLED; 325 } 326 EXPORT_SYMBOL_GPL(mt76x02_irq_handler); 327 328 void mt76x02_set_irq_mask(struct mt76x02_dev *dev, u32 clear, u32 set) 329 { 330 unsigned long flags; 331 332 spin_lock_irqsave(&dev->mt76.mmio.irq_lock, flags); 333 dev->mt76.mmio.irqmask &= ~clear; 334 dev->mt76.mmio.irqmask |= set; 335 mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask); 336 spin_unlock_irqrestore(&dev->mt76.mmio.irq_lock, flags); 337 } 338 EXPORT_SYMBOL_GPL(mt76x02_set_irq_mask); 339 340 static void mt76x02_dma_enable(struct mt76x02_dev *dev) 341 { 342 u32 val; 343 344 mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX); 345 mt76x02_wait_for_wpdma(&dev->mt76, 1000); 346 usleep_range(50, 100); 347 348 val = FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3) | 349 MT_WPDMA_GLO_CFG_TX_DMA_EN | 350 MT_WPDMA_GLO_CFG_RX_DMA_EN; 351 mt76_set(dev, MT_WPDMA_GLO_CFG, val); 352 mt76_clear(dev, MT_WPDMA_GLO_CFG, 353 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); 354 } 355 356 void mt76x02_dma_cleanup(struct mt76x02_dev *dev) 357 { 358 tasklet_kill(&dev->tx_tasklet); 359 mt76_dma_cleanup(&dev->mt76); 360 } 361 EXPORT_SYMBOL_GPL(mt76x02_dma_cleanup); 362 363 void mt76x02_dma_disable(struct mt76x02_dev *dev) 364 { 365 u32 val = mt76_rr(dev, MT_WPDMA_GLO_CFG); 366 367 val &= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE | 368 MT_WPDMA_GLO_CFG_BIG_ENDIAN | 369 MT_WPDMA_GLO_CFG_HDR_SEG_LEN; 370 val |= MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE; 371 mt76_wr(dev, MT_WPDMA_GLO_CFG, val); 372 } 373 EXPORT_SYMBOL_GPL(mt76x02_dma_disable); 374 375 void mt76x02_mac_start(struct mt76x02_dev *dev) 376 { 377 mt76x02_dma_enable(dev); 378 mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter); 379 mt76_wr(dev, MT_MAC_SYS_CTRL, 380 MT_MAC_SYS_CTRL_ENABLE_TX | 381 MT_MAC_SYS_CTRL_ENABLE_RX); 382 mt76x02_irq_enable(dev, 383 MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL | 384 MT_INT_TX_STAT); 385 } 386 EXPORT_SYMBOL_GPL(mt76x02_mac_start); 387