1 // SPDX-License-Identifier: ISC 2 /* Copyright (C) 2020 MediaTek Inc. */ 3 4 #include "mt7915.h" 5 #include "../dma.h" 6 #include "mac.h" 7 8 static int 9 mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base) 10 { 11 struct mt7915_dev *dev = phy->dev; 12 13 if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) { 14 if (is_mt798x(&dev->mt76)) 15 ring_base += MT_TXQ_ID(0) * MT_RING_SIZE; 16 else 17 ring_base = MT_WED_TX_RING_BASE; 18 19 idx -= MT_TXQ_ID(0); 20 } 21 22 return mt76_connac_init_tx_queues(phy->mt76, idx, n_desc, ring_base, 23 MT_WED_Q_TX(idx)); 24 } 25 26 static int mt7915_poll_tx(struct napi_struct *napi, int budget) 27 { 28 struct mt7915_dev *dev; 29 30 dev = container_of(napi, struct mt7915_dev, mt76.tx_napi); 31 32 mt76_connac_tx_cleanup(&dev->mt76); 33 if (napi_complete_done(napi, 0)) 34 mt7915_irq_enable(dev, MT_INT_TX_DONE_MCU); 35 36 return 0; 37 } 38 39 static void mt7915_dma_config(struct mt7915_dev *dev) 40 { 41 #define Q_CONFIG(q, wfdma, int, id) do { \ 42 if (wfdma) \ 43 dev->wfdma_mask |= (1 << (q)); \ 44 dev->q_int_mask[(q)] = int; \ 45 dev->q_id[(q)] = id; \ 46 } while (0) 47 48 #define MCUQ_CONFIG(q, wfdma, int, id) Q_CONFIG(q, (wfdma), (int), (id)) 49 #define RXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__RXQ(q), (wfdma), (int), (id)) 50 #define TXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__TXQ(q), (wfdma), (int), (id)) 51 52 if (is_mt7915(&dev->mt76)) { 53 RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0, 54 MT7915_RXQ_BAND0); 55 RXQ_CONFIG(MT_RXQ_MCU, WFDMA1, MT_INT_RX_DONE_WM, 56 MT7915_RXQ_MCU_WM); 57 RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA1, MT_INT_RX_DONE_WA, 58 MT7915_RXQ_MCU_WA); 59 RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1, 60 MT7915_RXQ_BAND1); 61 RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA1, MT_INT_RX_DONE_WA_EXT, 62 MT7915_RXQ_MCU_WA_EXT); 63 RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA1, MT_INT_RX_DONE_WA_MAIN, 64 MT7915_RXQ_MCU_WA); 65 TXQ_CONFIG(0, WFDMA1, MT_INT_TX_DONE_BAND0, MT7915_TXQ_BAND0); 66 TXQ_CONFIG(1, WFDMA1, MT_INT_TX_DONE_BAND1, MT7915_TXQ_BAND1); 67 MCUQ_CONFIG(MT_MCUQ_WM, WFDMA1, MT_INT_TX_DONE_MCU_WM, 68 MT7915_TXQ_MCU_WM); 69 MCUQ_CONFIG(MT_MCUQ_WA, WFDMA1, MT_INT_TX_DONE_MCU_WA, 70 MT7915_TXQ_MCU_WA); 71 MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA1, MT_INT_TX_DONE_FWDL, 72 MT7915_TXQ_FWDL); 73 } else { 74 RXQ_CONFIG(MT_RXQ_MCU, WFDMA0, MT_INT_RX_DONE_WM, 75 MT7916_RXQ_MCU_WM); 76 RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA0, MT_INT_RX_DONE_WA_EXT_MT7916, 77 MT7916_RXQ_MCU_WA_EXT); 78 MCUQ_CONFIG(MT_MCUQ_WM, WFDMA0, MT_INT_TX_DONE_MCU_WM, 79 MT7915_TXQ_MCU_WM); 80 MCUQ_CONFIG(MT_MCUQ_WA, WFDMA0, MT_INT_TX_DONE_MCU_WA_MT7916, 81 MT7915_TXQ_MCU_WA); 82 MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA0, MT_INT_TX_DONE_FWDL, 83 MT7915_TXQ_FWDL); 84 85 if (is_mt7916(&dev->mt76) && mtk_wed_device_active(&dev->mt76.mmio.wed)) { 86 RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_WED_RX_DONE_BAND0_MT7916, 87 MT7916_RXQ_BAND0); 88 RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_WED_RX_DONE_WA_MT7916, 89 MT7916_RXQ_MCU_WA); 90 if (dev->hif2) 91 RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, 92 MT_INT_RX_DONE_BAND1_MT7916, 93 MT7916_RXQ_BAND1); 94 else 95 RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, 96 MT_INT_WED_RX_DONE_BAND1_MT7916, 97 MT7916_RXQ_BAND1); 98 RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_WED_RX_DONE_WA_MAIN_MT7916, 99 MT7916_RXQ_MCU_WA_MAIN); 100 TXQ_CONFIG(0, WFDMA0, MT_INT_WED_TX_DONE_BAND0, 101 MT7915_TXQ_BAND0); 102 TXQ_CONFIG(1, WFDMA0, MT_INT_WED_TX_DONE_BAND1, 103 MT7915_TXQ_BAND1); 104 } else { 105 RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0_MT7916, 106 MT7916_RXQ_BAND0); 107 RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_RX_DONE_WA, 108 MT7916_RXQ_MCU_WA); 109 RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1_MT7916, 110 MT7916_RXQ_BAND1); 111 RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN_MT7916, 112 MT7916_RXQ_MCU_WA_MAIN); 113 TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, 114 MT7915_TXQ_BAND0); 115 TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, 116 MT7915_TXQ_BAND1); 117 } 118 } 119 } 120 121 static void __mt7915_dma_prefetch(struct mt7915_dev *dev, u32 ofs) 122 { 123 #define PREFETCH(_base, _depth) ((_base) << 16 | (_depth)) 124 u32 base = 0; 125 126 /* prefetch SRAM wrapping boundary for tx/rx ring. */ 127 mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_FWDL) + ofs, PREFETCH(0x0, 0x4)); 128 mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WM) + ofs, PREFETCH(0x40, 0x4)); 129 mt76_wr(dev, MT_TXQ_EXT_CTRL(0) + ofs, PREFETCH(0x80, 0x4)); 130 mt76_wr(dev, MT_TXQ_EXT_CTRL(1) + ofs, PREFETCH(0xc0, 0x4)); 131 mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(0x100, 0x4)); 132 133 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU) + ofs, 134 PREFETCH(0x140, 0x4)); 135 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU_WA) + ofs, 136 PREFETCH(0x180, 0x4)); 137 if (!is_mt7915(&dev->mt76)) { 138 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN_WA) + ofs, 139 PREFETCH(0x1c0, 0x4)); 140 base = 0x40; 141 } 142 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND1_WA) + ofs, 143 PREFETCH(0x1c0 + base, 0x4)); 144 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN) + ofs, 145 PREFETCH(0x200 + base, 0x4)); 146 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND1) + ofs, 147 PREFETCH(0x240 + base, 0x4)); 148 149 /* for mt7915, the ring which is next the last 150 * used ring must be initialized. 151 */ 152 if (is_mt7915(&dev->mt76)) { 153 ofs += 0x4; 154 mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, 155 PREFETCH(0x140, 0x0)); 156 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND1_WA) + ofs, 157 PREFETCH(0x200 + base, 0x0)); 158 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND1) + ofs, 159 PREFETCH(0x280 + base, 0x0)); 160 } 161 } 162 163 void mt7915_dma_prefetch(struct mt7915_dev *dev) 164 { 165 __mt7915_dma_prefetch(dev, 0); 166 if (dev->hif2) 167 __mt7915_dma_prefetch(dev, MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0)); 168 } 169 170 static void mt7915_dma_disable(struct mt7915_dev *dev, bool rst) 171 { 172 struct mt76_dev *mdev = &dev->mt76; 173 u32 hif1_ofs = 0; 174 175 if (dev->hif2) 176 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); 177 178 /* reset */ 179 if (rst) { 180 mt76_clear(dev, MT_WFDMA0_RST, 181 MT_WFDMA0_RST_DMASHDL_ALL_RST | 182 MT_WFDMA0_RST_LOGIC_RST); 183 184 mt76_set(dev, MT_WFDMA0_RST, 185 MT_WFDMA0_RST_DMASHDL_ALL_RST | 186 MT_WFDMA0_RST_LOGIC_RST); 187 188 if (is_mt7915(mdev)) { 189 mt76_clear(dev, MT_WFDMA1_RST, 190 MT_WFDMA1_RST_DMASHDL_ALL_RST | 191 MT_WFDMA1_RST_LOGIC_RST); 192 193 mt76_set(dev, MT_WFDMA1_RST, 194 MT_WFDMA1_RST_DMASHDL_ALL_RST | 195 MT_WFDMA1_RST_LOGIC_RST); 196 } 197 198 if (dev->hif2) { 199 mt76_clear(dev, MT_WFDMA0_RST + hif1_ofs, 200 MT_WFDMA0_RST_DMASHDL_ALL_RST | 201 MT_WFDMA0_RST_LOGIC_RST); 202 203 mt76_set(dev, MT_WFDMA0_RST + hif1_ofs, 204 MT_WFDMA0_RST_DMASHDL_ALL_RST | 205 MT_WFDMA0_RST_LOGIC_RST); 206 207 if (is_mt7915(mdev)) { 208 mt76_clear(dev, MT_WFDMA1_RST + hif1_ofs, 209 MT_WFDMA1_RST_DMASHDL_ALL_RST | 210 MT_WFDMA1_RST_LOGIC_RST); 211 212 mt76_set(dev, MT_WFDMA1_RST + hif1_ofs, 213 MT_WFDMA1_RST_DMASHDL_ALL_RST | 214 MT_WFDMA1_RST_LOGIC_RST); 215 } 216 } 217 } 218 219 /* disable */ 220 mt76_clear(dev, MT_WFDMA0_GLO_CFG, 221 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 222 MT_WFDMA0_GLO_CFG_RX_DMA_EN | 223 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 224 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO | 225 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); 226 227 if (is_mt7915(mdev)) 228 mt76_clear(dev, MT_WFDMA1_GLO_CFG, 229 MT_WFDMA1_GLO_CFG_TX_DMA_EN | 230 MT_WFDMA1_GLO_CFG_RX_DMA_EN | 231 MT_WFDMA1_GLO_CFG_OMIT_TX_INFO | 232 MT_WFDMA1_GLO_CFG_OMIT_RX_INFO | 233 MT_WFDMA1_GLO_CFG_OMIT_RX_INFO_PFET2); 234 235 if (dev->hif2) { 236 mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs, 237 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 238 MT_WFDMA0_GLO_CFG_RX_DMA_EN | 239 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 240 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO | 241 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); 242 243 if (is_mt7915(mdev)) 244 mt76_clear(dev, MT_WFDMA1_GLO_CFG + hif1_ofs, 245 MT_WFDMA1_GLO_CFG_TX_DMA_EN | 246 MT_WFDMA1_GLO_CFG_RX_DMA_EN | 247 MT_WFDMA1_GLO_CFG_OMIT_TX_INFO | 248 MT_WFDMA1_GLO_CFG_OMIT_RX_INFO | 249 MT_WFDMA1_GLO_CFG_OMIT_RX_INFO_PFET2); 250 } 251 } 252 253 int mt7915_dma_start(struct mt7915_dev *dev, bool reset, bool wed_reset) 254 { 255 struct mt76_dev *mdev = &dev->mt76; 256 u32 hif1_ofs = 0; 257 u32 irq_mask; 258 259 if (dev->hif2) 260 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); 261 262 /* enable wpdma tx/rx */ 263 if (!reset) { 264 mt76_set(dev, MT_WFDMA0_GLO_CFG, 265 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 266 MT_WFDMA0_GLO_CFG_RX_DMA_EN | 267 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 268 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); 269 270 if (is_mt7915(mdev)) 271 mt76_set(dev, MT_WFDMA1_GLO_CFG, 272 MT_WFDMA1_GLO_CFG_TX_DMA_EN | 273 MT_WFDMA1_GLO_CFG_RX_DMA_EN | 274 MT_WFDMA1_GLO_CFG_OMIT_TX_INFO | 275 MT_WFDMA1_GLO_CFG_OMIT_RX_INFO); 276 277 if (dev->hif2) { 278 mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs, 279 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 280 MT_WFDMA0_GLO_CFG_RX_DMA_EN | 281 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 282 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); 283 284 if (is_mt7915(mdev)) 285 mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs, 286 MT_WFDMA1_GLO_CFG_TX_DMA_EN | 287 MT_WFDMA1_GLO_CFG_RX_DMA_EN | 288 MT_WFDMA1_GLO_CFG_OMIT_TX_INFO | 289 MT_WFDMA1_GLO_CFG_OMIT_RX_INFO); 290 291 mt76_set(dev, MT_WFDMA_HOST_CONFIG, 292 MT_WFDMA_HOST_CONFIG_PDMA_BAND); 293 } 294 } 295 296 /* enable interrupts for TX/RX rings */ 297 irq_mask = MT_INT_RX_DONE_MCU | 298 MT_INT_TX_DONE_MCU | 299 MT_INT_MCU_CMD; 300 301 if (!dev->phy.mt76->band_idx) 302 irq_mask |= MT_INT_BAND0_RX_DONE; 303 304 if (dev->dbdc_support || dev->phy.mt76->band_idx) 305 irq_mask |= MT_INT_BAND1_RX_DONE; 306 307 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && wed_reset) { 308 u32 wed_irq_mask = irq_mask; 309 int ret; 310 311 wed_irq_mask |= MT_INT_TX_DONE_BAND0 | MT_INT_TX_DONE_BAND1; 312 if (!is_mt798x(&dev->mt76)) 313 mt76_wr(dev, MT_INT_WED_MASK_CSR, wed_irq_mask); 314 else 315 mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask); 316 317 ret = mt7915_mcu_wed_enable_rx_stats(dev); 318 if (ret) 319 return ret; 320 321 mtk_wed_device_start(&dev->mt76.mmio.wed, wed_irq_mask); 322 } 323 324 irq_mask = reset ? MT_INT_MCU_CMD : irq_mask; 325 326 mt7915_irq_enable(dev, irq_mask); 327 mt7915_irq_disable(dev, 0); 328 329 return 0; 330 } 331 332 static int mt7915_dma_enable(struct mt7915_dev *dev, bool reset) 333 { 334 struct mt76_dev *mdev = &dev->mt76; 335 u32 hif1_ofs = 0; 336 337 if (dev->hif2) 338 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); 339 340 /* reset dma idx */ 341 mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0); 342 if (is_mt7915(mdev)) 343 mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR, ~0); 344 if (dev->hif2) { 345 mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR + hif1_ofs, ~0); 346 if (is_mt7915(mdev)) 347 mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR + hif1_ofs, ~0); 348 } 349 350 /* configure delay interrupt off */ 351 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0); 352 if (is_mt7915(mdev)) { 353 mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0, 0); 354 } else { 355 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1, 0); 356 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2, 0); 357 } 358 359 if (dev->hif2) { 360 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0 + hif1_ofs, 0); 361 if (is_mt7915(mdev)) { 362 mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0 + 363 hif1_ofs, 0); 364 } else { 365 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1 + 366 hif1_ofs, 0); 367 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2 + 368 hif1_ofs, 0); 369 } 370 } 371 372 /* configure perfetch settings */ 373 mt7915_dma_prefetch(dev); 374 375 /* hif wait WFDMA idle */ 376 mt76_set(dev, MT_WFDMA0_BUSY_ENA, 377 MT_WFDMA0_BUSY_ENA_TX_FIFO0 | 378 MT_WFDMA0_BUSY_ENA_TX_FIFO1 | 379 MT_WFDMA0_BUSY_ENA_RX_FIFO); 380 381 if (is_mt7915(mdev)) 382 mt76_set(dev, MT_WFDMA1_BUSY_ENA, 383 MT_WFDMA1_BUSY_ENA_TX_FIFO0 | 384 MT_WFDMA1_BUSY_ENA_TX_FIFO1 | 385 MT_WFDMA1_BUSY_ENA_RX_FIFO); 386 387 if (dev->hif2) { 388 mt76_set(dev, MT_WFDMA0_BUSY_ENA + hif1_ofs, 389 MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 | 390 MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 | 391 MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO); 392 393 if (is_mt7915(mdev)) 394 mt76_set(dev, MT_WFDMA1_BUSY_ENA + hif1_ofs, 395 MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO0 | 396 MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1 | 397 MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO); 398 } 399 400 mt76_poll(dev, MT_WFDMA_EXT_CSR_HIF_MISC, 401 MT_WFDMA_EXT_CSR_HIF_MISC_BUSY, 0, 1000); 402 403 return mt7915_dma_start(dev, reset, true); 404 } 405 406 int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2) 407 { 408 struct mt76_dev *mdev = &dev->mt76; 409 u32 wa_rx_base, wa_rx_idx; 410 u32 hif1_ofs = 0; 411 int ret; 412 413 mt7915_dma_config(dev); 414 415 mt76_dma_attach(&dev->mt76); 416 417 if (dev->hif2) 418 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); 419 420 mt7915_dma_disable(dev, true); 421 422 if (mtk_wed_device_active(&mdev->mmio.wed)) { 423 if (!is_mt798x(mdev)) { 424 u8 wed_control_rx1 = is_mt7915(mdev) ? 1 : 2; 425 426 mt76_set(dev, MT_WFDMA_HOST_CONFIG, 427 MT_WFDMA_HOST_CONFIG_WED); 428 mt76_wr(dev, MT_WFDMA_WED_RING_CONTROL, 429 FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX0, 18) | 430 FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX1, 19) | 431 FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_RX1, 432 wed_control_rx1)); 433 if (is_mt7915(mdev)) 434 mt76_rmw(dev, MT_WFDMA0_EXT0_CFG, MT_WFDMA0_EXT0_RXWB_KEEP, 435 MT_WFDMA0_EXT0_RXWB_KEEP); 436 } 437 } else { 438 mt76_clear(dev, MT_WFDMA_HOST_CONFIG, MT_WFDMA_HOST_CONFIG_WED); 439 } 440 441 /* init tx queue */ 442 ret = mt7915_init_tx_queues(&dev->phy, 443 MT_TXQ_ID(dev->phy.mt76->band_idx), 444 MT7915_TX_RING_SIZE, 445 MT_TXQ_RING_BASE(0)); 446 if (ret) 447 return ret; 448 449 if (phy2) { 450 ret = mt7915_init_tx_queues(phy2, 451 MT_TXQ_ID(phy2->mt76->band_idx), 452 MT7915_TX_RING_SIZE, 453 MT_TXQ_RING_BASE(1)); 454 if (ret) 455 return ret; 456 } 457 458 /* command to WM */ 459 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, 460 MT_MCUQ_ID(MT_MCUQ_WM), 461 MT7915_TX_MCU_RING_SIZE, 462 MT_MCUQ_RING_BASE(MT_MCUQ_WM)); 463 if (ret) 464 return ret; 465 466 /* command to WA */ 467 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA, 468 MT_MCUQ_ID(MT_MCUQ_WA), 469 MT7915_TX_MCU_RING_SIZE, 470 MT_MCUQ_RING_BASE(MT_MCUQ_WA)); 471 if (ret) 472 return ret; 473 474 /* firmware download */ 475 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL, 476 MT_MCUQ_ID(MT_MCUQ_FWDL), 477 MT7915_TX_FWDL_RING_SIZE, 478 MT_MCUQ_RING_BASE(MT_MCUQ_FWDL)); 479 if (ret) 480 return ret; 481 482 /* event from WM */ 483 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 484 MT_RXQ_ID(MT_RXQ_MCU), 485 MT7915_RX_MCU_RING_SIZE, 486 MT_RX_BUF_SIZE, 487 MT_RXQ_RING_BASE(MT_RXQ_MCU)); 488 if (ret) 489 return ret; 490 491 /* event from WA */ 492 if (mtk_wed_device_active(&mdev->mmio.wed) && is_mt7915(mdev)) { 493 wa_rx_base = MT_WED_RX_RING_BASE; 494 wa_rx_idx = MT7915_RXQ_MCU_WA; 495 dev->mt76.q_rx[MT_RXQ_MCU_WA].flags = MT_WED_Q_TXFREE; 496 } else { 497 wa_rx_base = MT_RXQ_RING_BASE(MT_RXQ_MCU_WA); 498 wa_rx_idx = MT_RXQ_ID(MT_RXQ_MCU_WA); 499 } 500 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA], 501 wa_rx_idx, MT7915_RX_MCU_RING_SIZE, 502 MT_RX_BUF_SIZE, wa_rx_base); 503 if (ret) 504 return ret; 505 506 /* rx data queue for band0 */ 507 if (!dev->phy.mt76->band_idx) { 508 if (mtk_wed_device_active(&mdev->mmio.wed) && 509 mtk_wed_get_rx_capa(&mdev->mmio.wed)) { 510 dev->mt76.q_rx[MT_RXQ_MAIN].flags = 511 MT_WED_Q_RX(MT7915_RXQ_BAND0); 512 dev->mt76.rx_token_size += MT7915_RX_RING_SIZE; 513 } 514 515 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 516 MT_RXQ_ID(MT_RXQ_MAIN), 517 MT7915_RX_RING_SIZE, 518 MT_RX_BUF_SIZE, 519 MT_RXQ_RING_BASE(MT_RXQ_MAIN)); 520 if (ret) 521 return ret; 522 } 523 524 /* tx free notify event from WA for band0 */ 525 if (!is_mt7915(mdev)) { 526 wa_rx_base = MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA); 527 wa_rx_idx = MT_RXQ_ID(MT_RXQ_MAIN_WA); 528 529 if (mtk_wed_device_active(&mdev->mmio.wed)) { 530 mdev->q_rx[MT_RXQ_MAIN_WA].flags = MT_WED_Q_TXFREE; 531 if (is_mt7916(mdev)) { 532 wa_rx_base = MT_WED_RX_RING_BASE; 533 wa_rx_idx = MT7915_RXQ_MCU_WA; 534 } 535 } 536 537 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA], 538 wa_rx_idx, MT7915_RX_MCU_RING_SIZE, 539 MT_RX_BUF_SIZE, wa_rx_base); 540 if (ret) 541 return ret; 542 } 543 544 if (dev->dbdc_support || dev->phy.mt76->band_idx) { 545 if (mtk_wed_device_active(&mdev->mmio.wed) && 546 mtk_wed_get_rx_capa(&mdev->mmio.wed)) { 547 dev->mt76.q_rx[MT_RXQ_BAND1].flags = 548 MT_WED_Q_RX(MT7915_RXQ_BAND1); 549 dev->mt76.rx_token_size += MT7915_RX_RING_SIZE; 550 } 551 552 /* rx data queue for band1 */ 553 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1], 554 MT_RXQ_ID(MT_RXQ_BAND1), 555 MT7915_RX_RING_SIZE, 556 MT_RX_BUF_SIZE, 557 MT_RXQ_RING_BASE(MT_RXQ_BAND1) + hif1_ofs); 558 if (ret) 559 return ret; 560 561 /* tx free notify event from WA for band1 */ 562 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1_WA], 563 MT_RXQ_ID(MT_RXQ_BAND1_WA), 564 MT7915_RX_MCU_RING_SIZE, 565 MT_RX_BUF_SIZE, 566 MT_RXQ_RING_BASE(MT_RXQ_BAND1_WA) + hif1_ofs); 567 if (ret) 568 return ret; 569 } 570 571 ret = mt76_init_queues(dev, mt76_dma_rx_poll); 572 if (ret < 0) 573 return ret; 574 575 netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, 576 mt7915_poll_tx); 577 napi_enable(&dev->mt76.tx_napi); 578 579 mt7915_dma_enable(dev, false); 580 581 return 0; 582 } 583 584 static void mt7915_dma_wed_reset(struct mt7915_dev *dev) 585 { 586 struct mt76_dev *mdev = &dev->mt76; 587 588 if (!test_bit(MT76_STATE_WED_RESET, &dev->mphy.state)) 589 return; 590 591 complete(&mdev->mmio.wed_reset); 592 593 if (!wait_for_completion_timeout(&dev->mt76.mmio.wed_reset_complete, 594 3 * HZ)) 595 dev_err(dev->mt76.dev, "wed reset complete timeout\n"); 596 } 597 598 static void 599 mt7915_dma_reset_tx_queue(struct mt7915_dev *dev, struct mt76_queue *q) 600 { 601 mt76_queue_reset(dev, q); 602 if (mtk_wed_device_active(&dev->mt76.mmio.wed)) 603 mt76_dma_wed_setup(&dev->mt76, q, true); 604 } 605 606 int mt7915_dma_reset(struct mt7915_dev *dev, bool force) 607 { 608 struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1]; 609 struct mtk_wed_device *wed = &dev->mt76.mmio.wed; 610 int i; 611 612 /* clean up hw queues */ 613 for (i = 0; i < ARRAY_SIZE(dev->mt76.phy.q_tx); i++) { 614 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); 615 if (mphy_ext) 616 mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[i], true); 617 } 618 619 for (i = 0; i < ARRAY_SIZE(dev->mt76.q_mcu); i++) 620 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true); 621 622 mt76_for_each_q_rx(&dev->mt76, i) 623 mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]); 624 625 /* reset wfsys */ 626 if (force) 627 mt7915_wfsys_reset(dev); 628 629 if (mtk_wed_device_active(wed)) 630 mtk_wed_device_dma_reset(wed); 631 632 mt7915_dma_disable(dev, force); 633 mt7915_dma_wed_reset(dev); 634 635 /* reset hw queues */ 636 for (i = 0; i < __MT_TXQ_MAX; i++) { 637 mt7915_dma_reset_tx_queue(dev, dev->mphy.q_tx[i]); 638 if (mphy_ext) 639 mt7915_dma_reset_tx_queue(dev, mphy_ext->q_tx[i]); 640 } 641 642 for (i = 0; i < __MT_MCUQ_MAX; i++) 643 mt76_queue_reset(dev, dev->mt76.q_mcu[i]); 644 645 mt76_for_each_q_rx(&dev->mt76, i) { 646 if (dev->mt76.q_rx[i].flags == MT_WED_Q_TXFREE) 647 continue; 648 649 mt76_queue_reset(dev, &dev->mt76.q_rx[i]); 650 } 651 652 mt76_tx_status_check(&dev->mt76, true); 653 654 mt76_for_each_q_rx(&dev->mt76, i) 655 mt76_queue_rx_reset(dev, i); 656 657 if (mtk_wed_device_active(wed) && is_mt7915(&dev->mt76)) 658 mt76_rmw(dev, MT_WFDMA0_EXT0_CFG, MT_WFDMA0_EXT0_RXWB_KEEP, 659 MT_WFDMA0_EXT0_RXWB_KEEP); 660 661 mt7915_dma_enable(dev, !force); 662 663 return 0; 664 } 665 666 void mt7915_dma_cleanup(struct mt7915_dev *dev) 667 { 668 mt7915_dma_disable(dev, true); 669 670 mt76_dma_cleanup(&dev->mt76); 671 } 672