Lines Matching +full:prefetch +full:- +full:dma
1 // SPDX-License-Identifier: ISC
5 #include "../dma.h"
11 struct mt7915_dev *dev = phy->dev; in mt7915_init_tx_queues()
13 if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) { in mt7915_init_tx_queues()
14 if (is_mt798x(&dev->mt76)) in mt7915_init_tx_queues()
19 idx -= MT_TXQ_ID(0); in mt7915_init_tx_queues()
22 return mt76_connac_init_tx_queues(phy->mt76, idx, n_desc, ring_base, in mt7915_init_tx_queues()
32 mt76_connac_tx_cleanup(&dev->mt76); in mt7915_poll_tx()
43 dev->wfdma_mask |= (1 << (q)); \ in mt7915_dma_config()
44 dev->q_int_mask[(q)] = int; \ in mt7915_dma_config()
45 dev->q_id[(q)] = id; \ in mt7915_dma_config()
52 if (is_mt7915(&dev->mt76)) { in mt7915_dma_config()
85 if (is_mt7916(&dev->mt76) && mtk_wed_device_active(&dev->mt76.mmio.wed)) { in mt7915_dma_config()
90 if (dev->hif2) in mt7915_dma_config()
123 #define PREFETCH(_base, _depth) ((_base) << 16 | (_depth)) in __mt7915_dma_prefetch() macro
126 /* prefetch SRAM wrapping boundary for tx/rx ring. */ in __mt7915_dma_prefetch()
127 mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_FWDL) + ofs, PREFETCH(0x0, 0x4)); in __mt7915_dma_prefetch()
128 mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WM) + ofs, PREFETCH(0x40, 0x4)); in __mt7915_dma_prefetch()
129 mt76_wr(dev, MT_TXQ_EXT_CTRL(0) + ofs, PREFETCH(0x80, 0x4)); in __mt7915_dma_prefetch()
130 mt76_wr(dev, MT_TXQ_EXT_CTRL(1) + ofs, PREFETCH(0xc0, 0x4)); in __mt7915_dma_prefetch()
131 mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(0x100, 0x4)); in __mt7915_dma_prefetch()
134 PREFETCH(0x140, 0x4)); in __mt7915_dma_prefetch()
136 PREFETCH(0x180, 0x4)); in __mt7915_dma_prefetch()
137 if (!is_mt7915(&dev->mt76)) { in __mt7915_dma_prefetch()
139 PREFETCH(0x1c0, 0x4)); in __mt7915_dma_prefetch()
143 PREFETCH(0x1c0 + base, 0x4)); in __mt7915_dma_prefetch()
145 PREFETCH(0x200 + base, 0x4)); in __mt7915_dma_prefetch()
147 PREFETCH(0x240 + base, 0x4)); in __mt7915_dma_prefetch()
152 if (is_mt7915(&dev->mt76)) { in __mt7915_dma_prefetch()
155 PREFETCH(0x140, 0x0)); in __mt7915_dma_prefetch()
157 PREFETCH(0x200 + base, 0x0)); in __mt7915_dma_prefetch()
159 PREFETCH(0x280 + base, 0x0)); in __mt7915_dma_prefetch()
166 if (dev->hif2) in mt7915_dma_prefetch()
167 __mt7915_dma_prefetch(dev, MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0)); in mt7915_dma_prefetch()
172 struct mt76_dev *mdev = &dev->mt76; in mt7915_dma_disable()
175 if (dev->hif2) in mt7915_dma_disable()
176 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); in mt7915_dma_disable()
198 if (dev->hif2) { in mt7915_dma_disable()
235 if (dev->hif2) { in mt7915_dma_disable()
255 struct mt76_dev *mdev = &dev->mt76; in mt7915_dma_start()
259 if (dev->hif2) in mt7915_dma_start()
260 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); in mt7915_dma_start()
277 if (dev->hif2) { in mt7915_dma_start()
301 if (!dev->phy.mt76->band_idx) in mt7915_dma_start()
304 if (dev->dbdc_support || dev->phy.mt76->band_idx) in mt7915_dma_start()
307 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && wed_reset) { in mt7915_dma_start()
312 if (!is_mt798x(&dev->mt76)) in mt7915_dma_start()
321 mtk_wed_device_start(&dev->mt76.mmio.wed, wed_irq_mask); in mt7915_dma_start()
334 struct mt76_dev *mdev = &dev->mt76; in mt7915_dma_enable()
337 if (dev->hif2) in mt7915_dma_enable()
338 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); in mt7915_dma_enable()
340 /* reset dma idx */ in mt7915_dma_enable()
344 if (dev->hif2) { in mt7915_dma_enable()
359 if (dev->hif2) { in mt7915_dma_enable()
387 if (dev->hif2) { in mt7915_dma_enable()
408 struct mt76_dev *mdev = &dev->mt76; in mt7915_dma_init()
415 mt76_dma_attach(&dev->mt76); in mt7915_dma_init()
417 if (dev->hif2) in mt7915_dma_init()
418 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); in mt7915_dma_init()
422 if (mtk_wed_device_active(&mdev->mmio.wed)) { in mt7915_dma_init()
442 ret = mt7915_init_tx_queues(&dev->phy, in mt7915_dma_init()
443 MT_TXQ_ID(dev->phy.mt76->band_idx), in mt7915_dma_init()
451 MT_TXQ_ID(phy2->mt76->band_idx), in mt7915_dma_init()
459 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, in mt7915_dma_init()
467 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA, in mt7915_dma_init()
475 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL, in mt7915_dma_init()
483 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU], in mt7915_dma_init()
492 if (mtk_wed_device_active(&mdev->mmio.wed) && is_mt7915(mdev)) { in mt7915_dma_init()
495 dev->mt76.q_rx[MT_RXQ_MCU_WA].flags = MT_WED_Q_TXFREE; in mt7915_dma_init()
500 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA], in mt7915_dma_init()
507 if (!dev->phy.mt76->band_idx) { in mt7915_dma_init()
508 if (mtk_wed_device_active(&mdev->mmio.wed) && in mt7915_dma_init()
509 mtk_wed_get_rx_capa(&mdev->mmio.wed)) { in mt7915_dma_init()
510 dev->mt76.q_rx[MT_RXQ_MAIN].flags = in mt7915_dma_init()
512 dev->mt76.rx_token_size += MT7915_RX_RING_SIZE; in mt7915_dma_init()
515 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], in mt7915_dma_init()
529 if (mtk_wed_device_active(&mdev->mmio.wed)) { in mt7915_dma_init()
530 mdev->q_rx[MT_RXQ_MAIN_WA].flags = MT_WED_Q_TXFREE; in mt7915_dma_init()
537 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA], in mt7915_dma_init()
544 if (dev->dbdc_support || dev->phy.mt76->band_idx) { in mt7915_dma_init()
545 if (mtk_wed_device_active(&mdev->mmio.wed) && in mt7915_dma_init()
546 mtk_wed_get_rx_capa(&mdev->mmio.wed)) { in mt7915_dma_init()
547 dev->mt76.q_rx[MT_RXQ_BAND1].flags = in mt7915_dma_init()
549 dev->mt76.rx_token_size += MT7915_RX_RING_SIZE; in mt7915_dma_init()
553 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1], in mt7915_dma_init()
562 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1_WA], in mt7915_dma_init()
575 netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, in mt7915_dma_init()
577 napi_enable(&dev->mt76.tx_napi); in mt7915_dma_init()
586 struct mt76_dev *mdev = &dev->mt76; in mt7915_dma_wed_reset()
588 if (!test_bit(MT76_STATE_WED_RESET, &dev->mphy.state)) in mt7915_dma_wed_reset()
591 complete(&mdev->mmio.wed_reset); in mt7915_dma_wed_reset()
593 if (!wait_for_completion_timeout(&dev->mt76.mmio.wed_reset_complete, in mt7915_dma_wed_reset()
595 dev_err(dev->mt76.dev, "wed reset complete timeout\n"); in mt7915_dma_wed_reset()
602 if (mtk_wed_device_active(&dev->mt76.mmio.wed)) in mt7915_dma_reset_tx_queue()
603 mt76_dma_wed_setup(&dev->mt76, q, true); in mt7915_dma_reset_tx_queue()
608 struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1]; in mt7915_dma_reset()
609 struct mtk_wed_device *wed = &dev->mt76.mmio.wed; in mt7915_dma_reset()
613 for (i = 0; i < ARRAY_SIZE(dev->mt76.phy.q_tx); i++) { in mt7915_dma_reset()
614 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); in mt7915_dma_reset()
616 mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[i], true); in mt7915_dma_reset()
619 for (i = 0; i < ARRAY_SIZE(dev->mt76.q_mcu); i++) in mt7915_dma_reset()
620 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true); in mt7915_dma_reset()
622 mt76_for_each_q_rx(&dev->mt76, i) in mt7915_dma_reset()
623 mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]); in mt7915_dma_reset()
637 mt7915_dma_reset_tx_queue(dev, dev->mphy.q_tx[i]); in mt7915_dma_reset()
639 mt7915_dma_reset_tx_queue(dev, mphy_ext->q_tx[i]); in mt7915_dma_reset()
643 mt76_queue_reset(dev, dev->mt76.q_mcu[i]); in mt7915_dma_reset()
645 mt76_for_each_q_rx(&dev->mt76, i) { in mt7915_dma_reset()
646 if (dev->mt76.q_rx[i].flags == MT_WED_Q_TXFREE) in mt7915_dma_reset()
649 mt76_queue_reset(dev, &dev->mt76.q_rx[i]); in mt7915_dma_reset()
652 mt76_tx_status_check(&dev->mt76, true); in mt7915_dma_reset()
654 mt76_for_each_q_rx(&dev->mt76, i) in mt7915_dma_reset()
657 if (mtk_wed_device_active(wed) && is_mt7915(&dev->mt76)) in mt7915_dma_reset()
670 mt76_dma_cleanup(&dev->mt76); in mt7915_dma_cleanup()