1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2023 MediaTek Inc. */
3 
4 #include <linux/module.h>
5 #include <linux/firmware.h>
6 
7 #include "mt792x.h"
8 #include "dma.h"
9 #include "trace.h"
10 
11 irqreturn_t mt792x_irq_handler(int irq, void *dev_instance)
12 {
13 	struct mt792x_dev *dev = dev_instance;
14 
15 	mt76_wr(dev, dev->irq_map->host_irq_enable, 0);
16 
17 	if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
18 		return IRQ_NONE;
19 
20 	tasklet_schedule(&dev->mt76.irq_tasklet);
21 
22 	return IRQ_HANDLED;
23 }
24 EXPORT_SYMBOL_GPL(mt792x_irq_handler);
25 
26 void mt792x_irq_tasklet(unsigned long data)
27 {
28 	struct mt792x_dev *dev = (struct mt792x_dev *)data;
29 	const struct mt792x_irq_map *irq_map = dev->irq_map;
30 	u32 intr, mask = 0;
31 
32 	mt76_wr(dev, irq_map->host_irq_enable, 0);
33 
34 	intr = mt76_rr(dev, MT_WFDMA0_HOST_INT_STA);
35 	intr &= dev->mt76.mmio.irqmask;
36 	mt76_wr(dev, MT_WFDMA0_HOST_INT_STA, intr);
37 
38 	trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
39 
40 	mask |= intr & (irq_map->rx.data_complete_mask |
41 			irq_map->rx.wm_complete_mask |
42 			irq_map->rx.wm2_complete_mask);
43 	if (intr & dev->irq_map->tx.mcu_complete_mask)
44 		mask |= dev->irq_map->tx.mcu_complete_mask;
45 
46 	if (intr & MT_INT_MCU_CMD) {
47 		u32 intr_sw;
48 
49 		intr_sw = mt76_rr(dev, MT_MCU_CMD);
50 		/* ack MCU2HOST_SW_INT_STA */
51 		mt76_wr(dev, MT_MCU_CMD, intr_sw);
52 		if (intr_sw & MT_MCU_CMD_WAKE_RX_PCIE) {
53 			mask |= irq_map->rx.data_complete_mask;
54 			intr |= irq_map->rx.data_complete_mask;
55 		}
56 	}
57 
58 	mt76_set_irq_mask(&dev->mt76, irq_map->host_irq_enable, mask, 0);
59 
60 	if (intr & dev->irq_map->tx.all_complete_mask)
61 		napi_schedule(&dev->mt76.tx_napi);
62 
63 	if (intr & irq_map->rx.wm_complete_mask)
64 		napi_schedule(&dev->mt76.napi[MT_RXQ_MCU]);
65 
66 	if (intr & irq_map->rx.wm2_complete_mask)
67 		napi_schedule(&dev->mt76.napi[MT_RXQ_MCU_WA]);
68 
69 	if (intr & irq_map->rx.data_complete_mask)
70 		napi_schedule(&dev->mt76.napi[MT_RXQ_MAIN]);
71 }
72 EXPORT_SYMBOL_GPL(mt792x_irq_tasklet);
73 
74 void mt792x_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
75 {
76 	struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
77 	const struct mt792x_irq_map *irq_map = dev->irq_map;
78 
79 	if (q == MT_RXQ_MAIN)
80 		mt76_connac_irq_enable(mdev, irq_map->rx.data_complete_mask);
81 	else if (q == MT_RXQ_MCU_WA)
82 		mt76_connac_irq_enable(mdev, irq_map->rx.wm2_complete_mask);
83 	else
84 		mt76_connac_irq_enable(mdev, irq_map->rx.wm_complete_mask);
85 }
86 EXPORT_SYMBOL_GPL(mt792x_rx_poll_complete);
87 
88 #define PREFETCH(base, depth)	((base) << 16 | (depth))
89 static void mt792x_dma_prefetch(struct mt792x_dev *dev)
90 {
91 	mt76_wr(dev, MT_WFDMA0_RX_RING0_EXT_CTRL, PREFETCH(0x0, 0x4));
92 	mt76_wr(dev, MT_WFDMA0_RX_RING2_EXT_CTRL, PREFETCH(0x40, 0x4));
93 	mt76_wr(dev, MT_WFDMA0_RX_RING3_EXT_CTRL, PREFETCH(0x80, 0x4));
94 	mt76_wr(dev, MT_WFDMA0_RX_RING4_EXT_CTRL, PREFETCH(0xc0, 0x4));
95 	mt76_wr(dev, MT_WFDMA0_RX_RING5_EXT_CTRL, PREFETCH(0x100, 0x4));
96 
97 	mt76_wr(dev, MT_WFDMA0_TX_RING0_EXT_CTRL, PREFETCH(0x140, 0x4));
98 	mt76_wr(dev, MT_WFDMA0_TX_RING1_EXT_CTRL, PREFETCH(0x180, 0x4));
99 	mt76_wr(dev, MT_WFDMA0_TX_RING2_EXT_CTRL, PREFETCH(0x1c0, 0x4));
100 	mt76_wr(dev, MT_WFDMA0_TX_RING3_EXT_CTRL, PREFETCH(0x200, 0x4));
101 	mt76_wr(dev, MT_WFDMA0_TX_RING4_EXT_CTRL, PREFETCH(0x240, 0x4));
102 	mt76_wr(dev, MT_WFDMA0_TX_RING5_EXT_CTRL, PREFETCH(0x280, 0x4));
103 	mt76_wr(dev, MT_WFDMA0_TX_RING6_EXT_CTRL, PREFETCH(0x2c0, 0x4));
104 	mt76_wr(dev, MT_WFDMA0_TX_RING16_EXT_CTRL, PREFETCH(0x340, 0x4));
105 	mt76_wr(dev, MT_WFDMA0_TX_RING17_EXT_CTRL, PREFETCH(0x380, 0x4));
106 }
107 
108 int mt792x_dma_enable(struct mt792x_dev *dev)
109 {
110 	/* configure perfetch settings */
111 	mt792x_dma_prefetch(dev);
112 
113 	/* reset dma idx */
114 	mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
115 
116 	/* configure delay interrupt */
117 	mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
118 
119 	mt76_set(dev, MT_WFDMA0_GLO_CFG,
120 		 MT_WFDMA0_GLO_CFG_TX_WB_DDONE |
121 		 MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN |
122 		 MT_WFDMA0_GLO_CFG_CLK_GAT_DIS |
123 		 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
124 		 MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
125 		 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
126 
127 	mt76_set(dev, MT_WFDMA0_GLO_CFG,
128 		 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
129 
130 	mt76_set(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT);
131 
132 	/* enable interrupts for TX/RX rings */
133 	mt76_connac_irq_enable(&dev->mt76,
134 			       dev->irq_map->tx.all_complete_mask |
135 			       dev->irq_map->rx.data_complete_mask |
136 			       dev->irq_map->rx.wm2_complete_mask |
137 			       dev->irq_map->rx.wm_complete_mask |
138 			       MT_INT_MCU_CMD);
139 	mt76_set(dev, MT_MCU2HOST_SW_INT_ENA, MT_MCU_CMD_WAKE_RX_PCIE);
140 
141 	return 0;
142 }
143 EXPORT_SYMBOL_GPL(mt792x_dma_enable);
144 
145 static int
146 mt792x_dma_reset(struct mt792x_dev *dev, bool force)
147 {
148 	int i, err;
149 
150 	err = mt792x_dma_disable(dev, force);
151 	if (err)
152 		return err;
153 
154 	/* reset hw queues */
155 	for (i = 0; i < __MT_TXQ_MAX; i++)
156 		mt76_queue_reset(dev, dev->mphy.q_tx[i]);
157 
158 	for (i = 0; i < __MT_MCUQ_MAX; i++)
159 		mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
160 
161 	mt76_for_each_q_rx(&dev->mt76, i)
162 		mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
163 
164 	mt76_tx_status_check(&dev->mt76, true);
165 
166 	return mt792x_dma_enable(dev);
167 }
168 
169 int mt792x_wpdma_reset(struct mt792x_dev *dev, bool force)
170 {
171 	int i, err;
172 
173 	/* clean up hw queues */
174 	for (i = 0; i < ARRAY_SIZE(dev->mt76.phy.q_tx); i++)
175 		mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
176 
177 	for (i = 0; i < ARRAY_SIZE(dev->mt76.q_mcu); i++)
178 		mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
179 
180 	mt76_for_each_q_rx(&dev->mt76, i)
181 		mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]);
182 
183 	if (force) {
184 		err = mt792x_wfsys_reset(dev);
185 		if (err)
186 			return err;
187 	}
188 	err = mt792x_dma_reset(dev, force);
189 	if (err)
190 		return err;
191 
192 	mt76_for_each_q_rx(&dev->mt76, i)
193 		mt76_queue_rx_reset(dev, i);
194 
195 	return 0;
196 }
197 EXPORT_SYMBOL_GPL(mt792x_wpdma_reset);
198 
199 int mt792x_wpdma_reinit_cond(struct mt792x_dev *dev)
200 {
201 	struct mt76_connac_pm *pm = &dev->pm;
202 	int err;
203 
204 	/* check if the wpdma must be reinitialized */
205 	if (mt792x_dma_need_reinit(dev)) {
206 		/* disable interrutpts */
207 		mt76_wr(dev, dev->irq_map->host_irq_enable, 0);
208 		mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
209 
210 		err = mt792x_wpdma_reset(dev, false);
211 		if (err) {
212 			dev_err(dev->mt76.dev, "wpdma reset failed\n");
213 			return err;
214 		}
215 
216 		/* enable interrutpts */
217 		mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
218 		pm->stats.lp_wake++;
219 	}
220 
221 	return 0;
222 }
223 EXPORT_SYMBOL_GPL(mt792x_wpdma_reinit_cond);
224 
225 int mt792x_dma_disable(struct mt792x_dev *dev, bool force)
226 {
227 	/* disable WFDMA0 */
228 	mt76_clear(dev, MT_WFDMA0_GLO_CFG,
229 		   MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN |
230 		   MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
231 		   MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
232 		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
233 		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
234 
235 	if (!mt76_poll_msec_tick(dev, MT_WFDMA0_GLO_CFG,
236 				 MT_WFDMA0_GLO_CFG_TX_DMA_BUSY |
237 				 MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 100, 1))
238 		return -ETIMEDOUT;
239 
240 	/* disable dmashdl */
241 	mt76_clear(dev, MT_WFDMA0_GLO_CFG_EXT0,
242 		   MT_WFDMA0_CSR_TX_DMASHDL_ENABLE);
243 	mt76_set(dev, MT_DMASHDL_SW_CONTROL, MT_DMASHDL_DMASHDL_BYPASS);
244 
245 	if (force) {
246 		/* reset */
247 		mt76_clear(dev, MT_WFDMA0_RST,
248 			   MT_WFDMA0_RST_DMASHDL_ALL_RST |
249 			   MT_WFDMA0_RST_LOGIC_RST);
250 
251 		mt76_set(dev, MT_WFDMA0_RST,
252 			 MT_WFDMA0_RST_DMASHDL_ALL_RST |
253 			 MT_WFDMA0_RST_LOGIC_RST);
254 	}
255 
256 	return 0;
257 }
258 EXPORT_SYMBOL_GPL(mt792x_dma_disable);
259 
260 void mt792x_dma_cleanup(struct mt792x_dev *dev)
261 {
262 	/* disable */
263 	mt76_clear(dev, MT_WFDMA0_GLO_CFG,
264 		   MT_WFDMA0_GLO_CFG_TX_DMA_EN |
265 		   MT_WFDMA0_GLO_CFG_RX_DMA_EN |
266 		   MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
267 		   MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
268 		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
269 		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
270 
271 	mt76_poll_msec_tick(dev, MT_WFDMA0_GLO_CFG,
272 			    MT_WFDMA0_GLO_CFG_TX_DMA_BUSY |
273 			    MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 100, 1);
274 
275 	/* reset */
276 	mt76_clear(dev, MT_WFDMA0_RST,
277 		   MT_WFDMA0_RST_DMASHDL_ALL_RST |
278 		   MT_WFDMA0_RST_LOGIC_RST);
279 
280 	mt76_set(dev, MT_WFDMA0_RST,
281 		 MT_WFDMA0_RST_DMASHDL_ALL_RST |
282 		 MT_WFDMA0_RST_LOGIC_RST);
283 
284 	mt76_dma_cleanup(&dev->mt76);
285 }
286 EXPORT_SYMBOL_GPL(mt792x_dma_cleanup);
287 
288 int mt792x_poll_tx(struct napi_struct *napi, int budget)
289 {
290 	struct mt792x_dev *dev;
291 
292 	dev = container_of(napi, struct mt792x_dev, mt76.tx_napi);
293 
294 	if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
295 		napi_complete(napi);
296 		queue_work(dev->mt76.wq, &dev->pm.wake_work);
297 		return 0;
298 	}
299 
300 	mt76_connac_tx_cleanup(&dev->mt76);
301 	if (napi_complete(napi))
302 		mt76_connac_irq_enable(&dev->mt76,
303 				       dev->irq_map->tx.all_complete_mask);
304 	mt76_connac_pm_unref(&dev->mphy, &dev->pm);
305 
306 	return 0;
307 }
308 EXPORT_SYMBOL_GPL(mt792x_poll_tx);
309 
310 int mt792x_poll_rx(struct napi_struct *napi, int budget)
311 {
312 	struct mt792x_dev *dev;
313 	int done;
314 
315 	dev = container_of(napi->dev, struct mt792x_dev, mt76.napi_dev);
316 
317 	if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
318 		napi_complete(napi);
319 		queue_work(dev->mt76.wq, &dev->pm.wake_work);
320 		return 0;
321 	}
322 	done = mt76_dma_rx_poll(napi, budget);
323 	mt76_connac_pm_unref(&dev->mphy, &dev->pm);
324 
325 	return done;
326 }
327 EXPORT_SYMBOL_GPL(mt792x_poll_rx);
328 
329 int mt792x_wfsys_reset(struct mt792x_dev *dev)
330 {
331 	u32 addr = is_mt7921(&dev->mt76) ? 0x18000140 : 0x7c000140;
332 
333 	mt76_clear(dev, addr, WFSYS_SW_RST_B);
334 	msleep(50);
335 	mt76_set(dev, addr, WFSYS_SW_RST_B);
336 
337 	if (!__mt76_poll_msec(&dev->mt76, addr, WFSYS_SW_INIT_DONE,
338 			      WFSYS_SW_INIT_DONE, 500))
339 		return -ETIMEDOUT;
340 
341 	return 0;
342 }
343 EXPORT_SYMBOL_GPL(mt792x_wfsys_reset);
344 
345