1 // SPDX-License-Identifier: ISC 2 /* Copyright (C) 2020 MediaTek Inc. */ 3 4 #include "mt7915.h" 5 #include "../dma.h" 6 #include "mac.h" 7 8 static int 9 mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base) 10 { 11 struct mt7915_dev *dev = phy->dev; 12 13 if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) { 14 if (is_mt7986(&dev->mt76)) 15 ring_base += MT_TXQ_ID(0) * MT_RING_SIZE; 16 else 17 ring_base = MT_WED_TX_RING_BASE; 18 19 idx -= MT_TXQ_ID(0); 20 } 21 22 return mt76_connac_init_tx_queues(phy->mt76, idx, n_desc, ring_base, 23 MT_WED_Q_TX(idx)); 24 } 25 26 static int mt7915_poll_tx(struct napi_struct *napi, int budget) 27 { 28 struct mt7915_dev *dev; 29 30 dev = container_of(napi, struct mt7915_dev, mt76.tx_napi); 31 32 mt76_connac_tx_cleanup(&dev->mt76); 33 if (napi_complete_done(napi, 0)) 34 mt7915_irq_enable(dev, MT_INT_TX_DONE_MCU); 35 36 return 0; 37 } 38 39 static void mt7915_dma_config(struct mt7915_dev *dev) 40 { 41 #define Q_CONFIG(q, wfdma, int, id) do { \ 42 if (wfdma) \ 43 dev->wfdma_mask |= (1 << (q)); \ 44 dev->q_int_mask[(q)] = int; \ 45 dev->q_id[(q)] = id; \ 46 } while (0) 47 48 #define MCUQ_CONFIG(q, wfdma, int, id) Q_CONFIG(q, (wfdma), (int), (id)) 49 #define RXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__RXQ(q), (wfdma), (int), (id)) 50 #define TXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__TXQ(q), (wfdma), (int), (id)) 51 52 if (is_mt7915(&dev->mt76)) { 53 RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0, 54 MT7915_RXQ_BAND0); 55 RXQ_CONFIG(MT_RXQ_MCU, WFDMA1, MT_INT_RX_DONE_WM, 56 MT7915_RXQ_MCU_WM); 57 RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA1, MT_INT_RX_DONE_WA, 58 MT7915_RXQ_MCU_WA); 59 RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1, 60 MT7915_RXQ_BAND1); 61 RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA1, MT_INT_RX_DONE_WA_EXT, 62 MT7915_RXQ_MCU_WA_EXT); 63 RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA1, MT_INT_RX_DONE_WA_MAIN, 64 MT7915_RXQ_MCU_WA); 65 TXQ_CONFIG(0, WFDMA1, MT_INT_TX_DONE_BAND0, MT7915_TXQ_BAND0); 66 TXQ_CONFIG(1, WFDMA1, MT_INT_TX_DONE_BAND1, MT7915_TXQ_BAND1); 67 MCUQ_CONFIG(MT_MCUQ_WM, WFDMA1, MT_INT_TX_DONE_MCU_WM, 68 MT7915_TXQ_MCU_WM); 69 MCUQ_CONFIG(MT_MCUQ_WA, WFDMA1, MT_INT_TX_DONE_MCU_WA, 70 MT7915_TXQ_MCU_WA); 71 MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA1, MT_INT_TX_DONE_FWDL, 72 MT7915_TXQ_FWDL); 73 } else { 74 RXQ_CONFIG(MT_RXQ_MCU, WFDMA0, MT_INT_RX_DONE_WM, 75 MT7916_RXQ_MCU_WM); 76 RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA0, MT_INT_RX_DONE_WA_EXT_MT7916, 77 MT7916_RXQ_MCU_WA_EXT); 78 MCUQ_CONFIG(MT_MCUQ_WM, WFDMA0, MT_INT_TX_DONE_MCU_WM, 79 MT7915_TXQ_MCU_WM); 80 MCUQ_CONFIG(MT_MCUQ_WA, WFDMA0, MT_INT_TX_DONE_MCU_WA_MT7916, 81 MT7915_TXQ_MCU_WA); 82 MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA0, MT_INT_TX_DONE_FWDL, 83 MT7915_TXQ_FWDL); 84 85 if (is_mt7916(&dev->mt76) && mtk_wed_device_active(&dev->mt76.mmio.wed)) { 86 RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_WED_RX_DONE_BAND0_MT7916, 87 MT7916_RXQ_BAND0); 88 RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_WED_RX_DONE_WA_MT7916, 89 MT7916_RXQ_MCU_WA); 90 if (dev->hif2) 91 RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, 92 MT_INT_RX_DONE_BAND1_MT7916, 93 MT7916_RXQ_BAND1); 94 else 95 RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, 96 MT_INT_WED_RX_DONE_BAND1_MT7916, 97 MT7916_RXQ_BAND1); 98 RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_WED_RX_DONE_WA_MAIN_MT7916, 99 MT7916_RXQ_MCU_WA_MAIN); 100 TXQ_CONFIG(0, WFDMA0, MT_INT_WED_TX_DONE_BAND0, 101 MT7915_TXQ_BAND0); 102 TXQ_CONFIG(1, WFDMA0, MT_INT_WED_TX_DONE_BAND1, 103 MT7915_TXQ_BAND1); 104 } else { 105 RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0_MT7916, 106 MT7916_RXQ_BAND0); 107 RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_RX_DONE_WA, 108 MT7916_RXQ_MCU_WA); 109 RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1_MT7916, 110 MT7916_RXQ_BAND1); 111 RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN_MT7916, 112 MT7916_RXQ_MCU_WA_MAIN); 113 TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, 114 MT7915_TXQ_BAND0); 115 TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, 116 MT7915_TXQ_BAND1); 117 } 118 } 119 } 120 121 static void __mt7915_dma_prefetch(struct mt7915_dev *dev, u32 ofs) 122 { 123 #define PREFETCH(_base, _depth) ((_base) << 16 | (_depth)) 124 u32 base = 0; 125 126 /* prefetch SRAM wrapping boundary for tx/rx ring. */ 127 mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_FWDL) + ofs, PREFETCH(0x0, 0x4)); 128 mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WM) + ofs, PREFETCH(0x40, 0x4)); 129 mt76_wr(dev, MT_TXQ_EXT_CTRL(0) + ofs, PREFETCH(0x80, 0x4)); 130 mt76_wr(dev, MT_TXQ_EXT_CTRL(1) + ofs, PREFETCH(0xc0, 0x4)); 131 mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(0x100, 0x4)); 132 133 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU) + ofs, 134 PREFETCH(0x140, 0x4)); 135 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU_WA) + ofs, 136 PREFETCH(0x180, 0x4)); 137 if (!is_mt7915(&dev->mt76)) { 138 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN_WA) + ofs, 139 PREFETCH(0x1c0, 0x4)); 140 base = 0x40; 141 } 142 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND1_WA) + ofs, 143 PREFETCH(0x1c0 + base, 0x4)); 144 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN) + ofs, 145 PREFETCH(0x200 + base, 0x4)); 146 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND1) + ofs, 147 PREFETCH(0x240 + base, 0x4)); 148 149 /* for mt7915, the ring which is next the last 150 * used ring must be initialized. 151 */ 152 if (is_mt7915(&dev->mt76)) { 153 ofs += 0x4; 154 mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, 155 PREFETCH(0x140, 0x0)); 156 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND1_WA) + ofs, 157 PREFETCH(0x200 + base, 0x0)); 158 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND1) + ofs, 159 PREFETCH(0x280 + base, 0x0)); 160 } 161 } 162 163 void mt7915_dma_prefetch(struct mt7915_dev *dev) 164 { 165 __mt7915_dma_prefetch(dev, 0); 166 if (dev->hif2) 167 __mt7915_dma_prefetch(dev, MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0)); 168 } 169 170 static void mt7915_dma_disable(struct mt7915_dev *dev, bool rst) 171 { 172 struct mt76_dev *mdev = &dev->mt76; 173 u32 hif1_ofs = 0; 174 175 if (dev->hif2) 176 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); 177 178 /* reset */ 179 if (rst) { 180 mt76_clear(dev, MT_WFDMA0_RST, 181 MT_WFDMA0_RST_DMASHDL_ALL_RST | 182 MT_WFDMA0_RST_LOGIC_RST); 183 184 mt76_set(dev, MT_WFDMA0_RST, 185 MT_WFDMA0_RST_DMASHDL_ALL_RST | 186 MT_WFDMA0_RST_LOGIC_RST); 187 188 if (is_mt7915(mdev)) { 189 mt76_clear(dev, MT_WFDMA1_RST, 190 MT_WFDMA1_RST_DMASHDL_ALL_RST | 191 MT_WFDMA1_RST_LOGIC_RST); 192 193 mt76_set(dev, MT_WFDMA1_RST, 194 MT_WFDMA1_RST_DMASHDL_ALL_RST | 195 MT_WFDMA1_RST_LOGIC_RST); 196 } 197 198 if (dev->hif2) { 199 mt76_clear(dev, MT_WFDMA0_RST + hif1_ofs, 200 MT_WFDMA0_RST_DMASHDL_ALL_RST | 201 MT_WFDMA0_RST_LOGIC_RST); 202 203 mt76_set(dev, MT_WFDMA0_RST + hif1_ofs, 204 MT_WFDMA0_RST_DMASHDL_ALL_RST | 205 MT_WFDMA0_RST_LOGIC_RST); 206 207 if (is_mt7915(mdev)) { 208 mt76_clear(dev, MT_WFDMA1_RST + hif1_ofs, 209 MT_WFDMA1_RST_DMASHDL_ALL_RST | 210 MT_WFDMA1_RST_LOGIC_RST); 211 212 mt76_set(dev, MT_WFDMA1_RST + hif1_ofs, 213 MT_WFDMA1_RST_DMASHDL_ALL_RST | 214 MT_WFDMA1_RST_LOGIC_RST); 215 } 216 } 217 } 218 219 /* disable */ 220 mt76_clear(dev, MT_WFDMA0_GLO_CFG, 221 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 222 MT_WFDMA0_GLO_CFG_RX_DMA_EN | 223 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 224 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO | 225 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); 226 227 if (is_mt7915(mdev)) 228 mt76_clear(dev, MT_WFDMA1_GLO_CFG, 229 MT_WFDMA1_GLO_CFG_TX_DMA_EN | 230 MT_WFDMA1_GLO_CFG_RX_DMA_EN | 231 MT_WFDMA1_GLO_CFG_OMIT_TX_INFO | 232 MT_WFDMA1_GLO_CFG_OMIT_RX_INFO | 233 MT_WFDMA1_GLO_CFG_OMIT_RX_INFO_PFET2); 234 235 if (dev->hif2) { 236 mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs, 237 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 238 MT_WFDMA0_GLO_CFG_RX_DMA_EN | 239 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 240 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO | 241 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); 242 243 if (is_mt7915(mdev)) 244 mt76_clear(dev, MT_WFDMA1_GLO_CFG + hif1_ofs, 245 MT_WFDMA1_GLO_CFG_TX_DMA_EN | 246 MT_WFDMA1_GLO_CFG_RX_DMA_EN | 247 MT_WFDMA1_GLO_CFG_OMIT_TX_INFO | 248 MT_WFDMA1_GLO_CFG_OMIT_RX_INFO | 249 MT_WFDMA1_GLO_CFG_OMIT_RX_INFO_PFET2); 250 } 251 } 252 253 static int mt7915_dma_enable(struct mt7915_dev *dev) 254 { 255 struct mt76_dev *mdev = &dev->mt76; 256 u32 hif1_ofs = 0; 257 u32 irq_mask; 258 259 if (dev->hif2) 260 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); 261 262 /* reset dma idx */ 263 mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0); 264 if (is_mt7915(mdev)) 265 mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR, ~0); 266 if (dev->hif2) { 267 mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR + hif1_ofs, ~0); 268 if (is_mt7915(mdev)) 269 mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR + hif1_ofs, ~0); 270 } 271 272 /* configure delay interrupt off */ 273 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0); 274 if (is_mt7915(mdev)) { 275 mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0, 0); 276 } else { 277 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1, 0); 278 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2, 0); 279 } 280 281 if (dev->hif2) { 282 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0 + hif1_ofs, 0); 283 if (is_mt7915(mdev)) { 284 mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0 + 285 hif1_ofs, 0); 286 } else { 287 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1 + 288 hif1_ofs, 0); 289 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2 + 290 hif1_ofs, 0); 291 } 292 } 293 294 /* configure perfetch settings */ 295 mt7915_dma_prefetch(dev); 296 297 /* hif wait WFDMA idle */ 298 mt76_set(dev, MT_WFDMA0_BUSY_ENA, 299 MT_WFDMA0_BUSY_ENA_TX_FIFO0 | 300 MT_WFDMA0_BUSY_ENA_TX_FIFO1 | 301 MT_WFDMA0_BUSY_ENA_RX_FIFO); 302 303 if (is_mt7915(mdev)) 304 mt76_set(dev, MT_WFDMA1_BUSY_ENA, 305 MT_WFDMA1_BUSY_ENA_TX_FIFO0 | 306 MT_WFDMA1_BUSY_ENA_TX_FIFO1 | 307 MT_WFDMA1_BUSY_ENA_RX_FIFO); 308 309 if (dev->hif2) { 310 mt76_set(dev, MT_WFDMA0_BUSY_ENA + hif1_ofs, 311 MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 | 312 MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 | 313 MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO); 314 315 if (is_mt7915(mdev)) 316 mt76_set(dev, MT_WFDMA1_BUSY_ENA + hif1_ofs, 317 MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO0 | 318 MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1 | 319 MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO); 320 } 321 322 mt76_poll(dev, MT_WFDMA_EXT_CSR_HIF_MISC, 323 MT_WFDMA_EXT_CSR_HIF_MISC_BUSY, 0, 1000); 324 325 /* set WFDMA Tx/Rx */ 326 mt76_set(dev, MT_WFDMA0_GLO_CFG, 327 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 328 MT_WFDMA0_GLO_CFG_RX_DMA_EN | 329 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 330 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); 331 332 if (is_mt7915(mdev)) 333 mt76_set(dev, MT_WFDMA1_GLO_CFG, 334 MT_WFDMA1_GLO_CFG_TX_DMA_EN | 335 MT_WFDMA1_GLO_CFG_RX_DMA_EN | 336 MT_WFDMA1_GLO_CFG_OMIT_TX_INFO | 337 MT_WFDMA1_GLO_CFG_OMIT_RX_INFO); 338 339 if (dev->hif2) { 340 mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs, 341 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 342 MT_WFDMA0_GLO_CFG_RX_DMA_EN | 343 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 344 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); 345 346 if (is_mt7915(mdev)) 347 mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs, 348 MT_WFDMA1_GLO_CFG_TX_DMA_EN | 349 MT_WFDMA1_GLO_CFG_RX_DMA_EN | 350 MT_WFDMA1_GLO_CFG_OMIT_TX_INFO | 351 MT_WFDMA1_GLO_CFG_OMIT_RX_INFO); 352 353 mt76_set(dev, MT_WFDMA_HOST_CONFIG, 354 MT_WFDMA_HOST_CONFIG_PDMA_BAND); 355 } 356 357 /* enable interrupts for TX/RX rings */ 358 irq_mask = MT_INT_RX_DONE_MCU | 359 MT_INT_TX_DONE_MCU | 360 MT_INT_MCU_CMD; 361 362 if (!dev->phy.mt76->band_idx) 363 irq_mask |= MT_INT_BAND0_RX_DONE; 364 365 if (dev->dbdc_support || dev->phy.mt76->band_idx) 366 irq_mask |= MT_INT_BAND1_RX_DONE; 367 368 if (mtk_wed_device_active(&dev->mt76.mmio.wed)) { 369 u32 wed_irq_mask = irq_mask; 370 int ret; 371 372 wed_irq_mask |= MT_INT_TX_DONE_BAND0 | MT_INT_TX_DONE_BAND1; 373 if (!is_mt7986(&dev->mt76)) 374 mt76_wr(dev, MT_INT_WED_MASK_CSR, wed_irq_mask); 375 else 376 mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask); 377 378 ret = mt7915_mcu_wed_enable_rx_stats(dev); 379 if (ret) 380 return ret; 381 382 mtk_wed_device_start(&dev->mt76.mmio.wed, wed_irq_mask); 383 } 384 385 mt7915_irq_enable(dev, irq_mask); 386 387 return 0; 388 } 389 390 int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2) 391 { 392 struct mt76_dev *mdev = &dev->mt76; 393 u32 wa_rx_base, wa_rx_idx; 394 u32 hif1_ofs = 0; 395 int ret; 396 397 mt7915_dma_config(dev); 398 399 mt76_dma_attach(&dev->mt76); 400 401 if (dev->hif2) 402 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); 403 404 mt7915_dma_disable(dev, true); 405 406 if (mtk_wed_device_active(&mdev->mmio.wed)) { 407 if (!is_mt7986(mdev)) { 408 u8 wed_control_rx1 = is_mt7915(mdev) ? 1 : 2; 409 410 mt76_set(dev, MT_WFDMA_HOST_CONFIG, 411 MT_WFDMA_HOST_CONFIG_WED); 412 mt76_wr(dev, MT_WFDMA_WED_RING_CONTROL, 413 FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX0, 18) | 414 FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX1, 19) | 415 FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_RX1, 416 wed_control_rx1)); 417 if (is_mt7915(mdev)) 418 mt76_rmw(dev, MT_WFDMA0_EXT0_CFG, MT_WFDMA0_EXT0_RXWB_KEEP, 419 MT_WFDMA0_EXT0_RXWB_KEEP); 420 } 421 } else { 422 mt76_clear(dev, MT_WFDMA_HOST_CONFIG, MT_WFDMA_HOST_CONFIG_WED); 423 } 424 425 /* init tx queue */ 426 ret = mt7915_init_tx_queues(&dev->phy, 427 MT_TXQ_ID(dev->phy.mt76->band_idx), 428 MT7915_TX_RING_SIZE, 429 MT_TXQ_RING_BASE(0)); 430 if (ret) 431 return ret; 432 433 if (phy2) { 434 ret = mt7915_init_tx_queues(phy2, 435 MT_TXQ_ID(phy2->mt76->band_idx), 436 MT7915_TX_RING_SIZE, 437 MT_TXQ_RING_BASE(1)); 438 if (ret) 439 return ret; 440 } 441 442 /* command to WM */ 443 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, 444 MT_MCUQ_ID(MT_MCUQ_WM), 445 MT7915_TX_MCU_RING_SIZE, 446 MT_MCUQ_RING_BASE(MT_MCUQ_WM)); 447 if (ret) 448 return ret; 449 450 /* command to WA */ 451 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA, 452 MT_MCUQ_ID(MT_MCUQ_WA), 453 MT7915_TX_MCU_RING_SIZE, 454 MT_MCUQ_RING_BASE(MT_MCUQ_WA)); 455 if (ret) 456 return ret; 457 458 /* firmware download */ 459 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL, 460 MT_MCUQ_ID(MT_MCUQ_FWDL), 461 MT7915_TX_FWDL_RING_SIZE, 462 MT_MCUQ_RING_BASE(MT_MCUQ_FWDL)); 463 if (ret) 464 return ret; 465 466 /* event from WM */ 467 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 468 MT_RXQ_ID(MT_RXQ_MCU), 469 MT7915_RX_MCU_RING_SIZE, 470 MT_RX_BUF_SIZE, 471 MT_RXQ_RING_BASE(MT_RXQ_MCU)); 472 if (ret) 473 return ret; 474 475 /* event from WA */ 476 if (mtk_wed_device_active(&mdev->mmio.wed) && is_mt7915(mdev)) { 477 wa_rx_base = MT_WED_RX_RING_BASE; 478 wa_rx_idx = MT7915_RXQ_MCU_WA; 479 dev->mt76.q_rx[MT_RXQ_MCU_WA].flags = MT_WED_Q_TXFREE; 480 } else { 481 wa_rx_base = MT_RXQ_RING_BASE(MT_RXQ_MCU_WA); 482 wa_rx_idx = MT_RXQ_ID(MT_RXQ_MCU_WA); 483 } 484 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA], 485 wa_rx_idx, MT7915_RX_MCU_RING_SIZE, 486 MT_RX_BUF_SIZE, wa_rx_base); 487 if (ret) 488 return ret; 489 490 /* rx data queue for band0 */ 491 if (!dev->phy.mt76->band_idx) { 492 if (mtk_wed_device_active(&mdev->mmio.wed) && 493 mtk_wed_get_rx_capa(&mdev->mmio.wed)) { 494 dev->mt76.q_rx[MT_RXQ_MAIN].flags = 495 MT_WED_Q_RX(MT7915_RXQ_BAND0); 496 dev->mt76.rx_token_size += MT7915_RX_RING_SIZE; 497 } 498 499 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 500 MT_RXQ_ID(MT_RXQ_MAIN), 501 MT7915_RX_RING_SIZE, 502 MT_RX_BUF_SIZE, 503 MT_RXQ_RING_BASE(MT_RXQ_MAIN)); 504 if (ret) 505 return ret; 506 } 507 508 /* tx free notify event from WA for band0 */ 509 if (!is_mt7915(mdev)) { 510 wa_rx_base = MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA); 511 wa_rx_idx = MT_RXQ_ID(MT_RXQ_MAIN_WA); 512 513 if (mtk_wed_device_active(&mdev->mmio.wed)) { 514 mdev->q_rx[MT_RXQ_MAIN_WA].flags = MT_WED_Q_TXFREE; 515 if (is_mt7916(mdev)) { 516 wa_rx_base = MT_WED_RX_RING_BASE; 517 wa_rx_idx = MT7915_RXQ_MCU_WA; 518 } 519 } 520 521 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA], 522 wa_rx_idx, MT7915_RX_MCU_RING_SIZE, 523 MT_RX_BUF_SIZE, wa_rx_base); 524 if (ret) 525 return ret; 526 } 527 528 if (dev->dbdc_support || dev->phy.mt76->band_idx) { 529 if (mtk_wed_device_active(&mdev->mmio.wed) && 530 mtk_wed_get_rx_capa(&mdev->mmio.wed)) { 531 dev->mt76.q_rx[MT_RXQ_BAND1].flags = 532 MT_WED_Q_RX(MT7915_RXQ_BAND1); 533 dev->mt76.rx_token_size += MT7915_RX_RING_SIZE; 534 } 535 536 /* rx data queue for band1 */ 537 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1], 538 MT_RXQ_ID(MT_RXQ_BAND1), 539 MT7915_RX_RING_SIZE, 540 MT_RX_BUF_SIZE, 541 MT_RXQ_RING_BASE(MT_RXQ_BAND1) + hif1_ofs); 542 if (ret) 543 return ret; 544 545 /* tx free notify event from WA for band1 */ 546 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1_WA], 547 MT_RXQ_ID(MT_RXQ_BAND1_WA), 548 MT7915_RX_MCU_RING_SIZE, 549 MT_RX_BUF_SIZE, 550 MT_RXQ_RING_BASE(MT_RXQ_BAND1_WA) + hif1_ofs); 551 if (ret) 552 return ret; 553 } 554 555 ret = mt76_init_queues(dev, mt76_dma_rx_poll); 556 if (ret < 0) 557 return ret; 558 559 netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, 560 mt7915_poll_tx); 561 napi_enable(&dev->mt76.tx_napi); 562 563 mt7915_dma_enable(dev); 564 565 return 0; 566 } 567 568 static void mt7915_dma_wed_reset(struct mt7915_dev *dev) 569 { 570 struct mt76_dev *mdev = &dev->mt76; 571 572 if (!test_bit(MT76_STATE_WED_RESET, &dev->mphy.state)) 573 return; 574 575 complete(&mdev->mmio.wed_reset); 576 577 if (!wait_for_completion_timeout(&dev->mt76.mmio.wed_reset_complete, 578 3 * HZ)) 579 dev_err(dev->mt76.dev, "wed reset complete timeout\n"); 580 } 581 582 static void 583 mt7915_dma_reset_tx_queue(struct mt7915_dev *dev, struct mt76_queue *q) 584 { 585 mt76_queue_reset(dev, q); 586 if (mtk_wed_device_active(&dev->mt76.mmio.wed)) 587 mt76_dma_wed_setup(&dev->mt76, q, true); 588 } 589 590 int mt7915_dma_reset(struct mt7915_dev *dev, bool force) 591 { 592 struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1]; 593 struct mtk_wed_device *wed = &dev->mt76.mmio.wed; 594 int i; 595 596 /* clean up hw queues */ 597 for (i = 0; i < ARRAY_SIZE(dev->mt76.phy.q_tx); i++) { 598 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); 599 if (mphy_ext) 600 mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[i], true); 601 } 602 603 for (i = 0; i < ARRAY_SIZE(dev->mt76.q_mcu); i++) 604 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true); 605 606 mt76_for_each_q_rx(&dev->mt76, i) 607 mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]); 608 609 /* reset wfsys */ 610 if (force) 611 mt7915_wfsys_reset(dev); 612 613 if (mtk_wed_device_active(wed)) 614 mtk_wed_device_dma_reset(wed); 615 616 mt7915_dma_disable(dev, force); 617 mt7915_dma_wed_reset(dev); 618 619 /* reset hw queues */ 620 for (i = 0; i < __MT_TXQ_MAX; i++) { 621 mt7915_dma_reset_tx_queue(dev, dev->mphy.q_tx[i]); 622 if (mphy_ext) 623 mt7915_dma_reset_tx_queue(dev, mphy_ext->q_tx[i]); 624 } 625 626 for (i = 0; i < __MT_MCUQ_MAX; i++) 627 mt76_queue_reset(dev, dev->mt76.q_mcu[i]); 628 629 mt76_for_each_q_rx(&dev->mt76, i) { 630 if (dev->mt76.q_rx[i].flags == MT_WED_Q_TXFREE) 631 continue; 632 633 mt76_queue_reset(dev, &dev->mt76.q_rx[i]); 634 } 635 636 mt76_tx_status_check(&dev->mt76, true); 637 638 mt76_for_each_q_rx(&dev->mt76, i) 639 mt76_queue_rx_reset(dev, i); 640 641 if (mtk_wed_device_active(wed) && is_mt7915(&dev->mt76)) 642 mt76_rmw(dev, MT_WFDMA0_EXT0_CFG, MT_WFDMA0_EXT0_RXWB_KEEP, 643 MT_WFDMA0_EXT0_RXWB_KEEP); 644 645 mt7915_dma_enable(dev); 646 647 return 0; 648 } 649 650 void mt7915_dma_cleanup(struct mt7915_dev *dev) 651 { 652 mt7915_dma_disable(dev, true); 653 654 mt76_dma_cleanup(&dev->mt76); 655 } 656