1 /* 2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 3 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <linux/kernel.h> 19 #include <linux/irq.h> 20 21 #include "mt76x02.h" 22 #include "mt76x02_trace.h" 23 24 static int 25 mt76x02_init_tx_queue(struct mt76x02_dev *dev, struct mt76_queue *q, 26 int idx, int n_desc) 27 { 28 int ret; 29 30 q->regs = dev->mt76.mmio.regs + MT_TX_RING_BASE + idx * MT_RING_SIZE; 31 q->ndesc = n_desc; 32 q->hw_idx = idx; 33 34 ret = mt76_queue_alloc(dev, q); 35 if (ret) 36 return ret; 37 38 mt76x02_irq_enable(dev, MT_INT_TX_DONE(idx)); 39 40 return 0; 41 } 42 43 static int 44 mt76x02_init_rx_queue(struct mt76x02_dev *dev, struct mt76_queue *q, 45 int idx, int n_desc, int bufsize) 46 { 47 int ret; 48 49 q->regs = dev->mt76.mmio.regs + MT_RX_RING_BASE + idx * MT_RING_SIZE; 50 q->ndesc = n_desc; 51 q->buf_size = bufsize; 52 53 ret = mt76_queue_alloc(dev, q); 54 if (ret) 55 return ret; 56 57 mt76x02_irq_enable(dev, MT_INT_RX_DONE(idx)); 58 59 return 0; 60 } 61 62 static void mt76x02_process_tx_status_fifo(struct mt76x02_dev *dev) 63 { 64 struct mt76x02_tx_status stat; 65 u8 update = 1; 66 67 while (kfifo_get(&dev->txstatus_fifo, &stat)) 68 mt76x02_send_tx_status(dev, &stat, &update); 69 } 70 71 static void mt76x02_tx_tasklet(unsigned long data) 72 { 73 struct mt76x02_dev *dev = (struct mt76x02_dev *)data; 74 int i; 75 76 mt76x02_process_tx_status_fifo(dev); 77 78 for (i = MT_TXQ_MCU; i >= 0; i--) 79 mt76_queue_tx_cleanup(dev, i, false); 80 81 mt76x02_mac_poll_tx_status(dev, false); 82 mt76x02_irq_enable(dev, MT_INT_TX_DONE_ALL); 83 } 84 85 int mt76x02_dma_init(struct mt76x02_dev *dev) 86 { 87 struct mt76_txwi_cache __maybe_unused *t; 88 int i, ret, fifo_size; 89 struct mt76_queue *q; 90 void *status_fifo; 91 92 BUILD_BUG_ON(sizeof(t->txwi) < sizeof(struct mt76x02_txwi)); 93 BUILD_BUG_ON(sizeof(struct mt76x02_rxwi) > MT_RX_HEADROOM); 94 95 fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76x02_tx_status)); 96 status_fifo = devm_kzalloc(dev->mt76.dev, fifo_size, GFP_KERNEL); 97 if (!status_fifo) 98 return -ENOMEM; 99 100 tasklet_init(&dev->tx_tasklet, mt76x02_tx_tasklet, (unsigned long) dev); 101 kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size); 102 103 mt76_dma_attach(&dev->mt76); 104 105 mt76_wr(dev, MT_WPDMA_RST_IDX, ~0); 106 107 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 108 ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[i], 109 mt76_ac_to_hwq(i), 110 MT_TX_RING_SIZE); 111 if (ret) 112 return ret; 113 } 114 115 ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD], 116 MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE); 117 if (ret) 118 return ret; 119 120 ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU], 121 MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE); 122 if (ret) 123 return ret; 124 125 ret = mt76x02_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1, 126 MT_MCU_RING_SIZE, MT_RX_BUF_SIZE); 127 if (ret) 128 return ret; 129 130 q = &dev->mt76.q_rx[MT_RXQ_MAIN]; 131 q->buf_offset = MT_RX_HEADROOM - sizeof(struct mt76x02_rxwi); 132 ret = mt76x02_init_rx_queue(dev, q, 0, MT76X02_RX_RING_SIZE, 133 MT_RX_BUF_SIZE); 134 if (ret) 135 return ret; 136 137 return mt76_init_queues(dev); 138 } 139 EXPORT_SYMBOL_GPL(mt76x02_dma_init); 140 141 void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q) 142 { 143 struct mt76x02_dev *dev; 144 145 dev = container_of(mdev, struct mt76x02_dev, mt76); 146 mt76x02_irq_enable(dev, MT_INT_RX_DONE(q)); 147 } 148 EXPORT_SYMBOL_GPL(mt76x02_rx_poll_complete); 149 150 irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance) 151 { 152 struct mt76x02_dev *dev = dev_instance; 153 u32 intr; 154 155 intr = mt76_rr(dev, MT_INT_SOURCE_CSR); 156 mt76_wr(dev, MT_INT_SOURCE_CSR, intr); 157 158 if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state)) 159 return IRQ_NONE; 160 161 trace_dev_irq(dev, intr, dev->mt76.mmio.irqmask); 162 163 intr &= dev->mt76.mmio.irqmask; 164 165 if (intr & MT_INT_TX_DONE_ALL) { 166 mt76x02_irq_disable(dev, MT_INT_TX_DONE_ALL); 167 tasklet_schedule(&dev->tx_tasklet); 168 } 169 170 if (intr & MT_INT_RX_DONE(0)) { 171 mt76x02_irq_disable(dev, MT_INT_RX_DONE(0)); 172 napi_schedule(&dev->mt76.napi[0]); 173 } 174 175 if (intr & MT_INT_RX_DONE(1)) { 176 mt76x02_irq_disable(dev, MT_INT_RX_DONE(1)); 177 napi_schedule(&dev->mt76.napi[1]); 178 } 179 180 if (intr & MT_INT_PRE_TBTT) 181 tasklet_schedule(&dev->pre_tbtt_tasklet); 182 183 /* send buffered multicast frames now */ 184 if (intr & MT_INT_TBTT) 185 mt76_queue_kick(dev, &dev->mt76.q_tx[MT_TXQ_PSD]); 186 187 if (intr & MT_INT_TX_STAT) { 188 mt76x02_mac_poll_tx_status(dev, true); 189 tasklet_schedule(&dev->tx_tasklet); 190 } 191 192 if (intr & MT_INT_GPTIMER) { 193 mt76x02_irq_disable(dev, MT_INT_GPTIMER); 194 tasklet_schedule(&dev->dfs_pd.dfs_tasklet); 195 } 196 197 return IRQ_HANDLED; 198 } 199 EXPORT_SYMBOL_GPL(mt76x02_irq_handler); 200 201 void mt76x02_set_irq_mask(struct mt76x02_dev *dev, u32 clear, u32 set) 202 { 203 unsigned long flags; 204 205 spin_lock_irqsave(&dev->mt76.mmio.irq_lock, flags); 206 dev->mt76.mmio.irqmask &= ~clear; 207 dev->mt76.mmio.irqmask |= set; 208 mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask); 209 spin_unlock_irqrestore(&dev->mt76.mmio.irq_lock, flags); 210 } 211 EXPORT_SYMBOL_GPL(mt76x02_set_irq_mask); 212 213 static void mt76x02_dma_enable(struct mt76x02_dev *dev) 214 { 215 u32 val; 216 217 mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX); 218 mt76x02_wait_for_wpdma(&dev->mt76, 1000); 219 usleep_range(50, 100); 220 221 val = FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3) | 222 MT_WPDMA_GLO_CFG_TX_DMA_EN | 223 MT_WPDMA_GLO_CFG_RX_DMA_EN; 224 mt76_set(dev, MT_WPDMA_GLO_CFG, val); 225 mt76_clear(dev, MT_WPDMA_GLO_CFG, 226 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); 227 } 228 EXPORT_SYMBOL_GPL(mt76x02_dma_enable); 229 230 void mt76x02_dma_cleanup(struct mt76x02_dev *dev) 231 { 232 tasklet_kill(&dev->tx_tasklet); 233 mt76_dma_cleanup(&dev->mt76); 234 } 235 EXPORT_SYMBOL_GPL(mt76x02_dma_cleanup); 236 237 void mt76x02_dma_disable(struct mt76x02_dev *dev) 238 { 239 u32 val = mt76_rr(dev, MT_WPDMA_GLO_CFG); 240 241 val &= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE | 242 MT_WPDMA_GLO_CFG_BIG_ENDIAN | 243 MT_WPDMA_GLO_CFG_HDR_SEG_LEN; 244 val |= MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE; 245 mt76_wr(dev, MT_WPDMA_GLO_CFG, val); 246 } 247 EXPORT_SYMBOL_GPL(mt76x02_dma_disable); 248 249 void mt76x02_mac_start(struct mt76x02_dev *dev) 250 { 251 mt76x02_dma_enable(dev); 252 mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter); 253 mt76_wr(dev, MT_MAC_SYS_CTRL, 254 MT_MAC_SYS_CTRL_ENABLE_TX | 255 MT_MAC_SYS_CTRL_ENABLE_RX); 256 mt76x02_irq_enable(dev, 257 MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL | 258 MT_INT_TX_STAT); 259 } 260 EXPORT_SYMBOL_GPL(mt76x02_mac_start); 261