1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2023 MediaTek Inc. */
3 
4 #include <linux/module.h>
5 #include <linux/firmware.h>
6 
7 #include "mt792x.h"
8 #include "dma.h"
9 #include "trace.h"
10 
mt792x_irq_handler(int irq,void * dev_instance)11 irqreturn_t mt792x_irq_handler(int irq, void *dev_instance)
12 {
13 	struct mt792x_dev *dev = dev_instance;
14 
15 	if (test_bit(MT76_REMOVED, &dev->mt76.phy.state))
16 		return IRQ_NONE;
17 	mt76_wr(dev, dev->irq_map->host_irq_enable, 0);
18 
19 	if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
20 		return IRQ_NONE;
21 
22 	tasklet_schedule(&dev->mt76.irq_tasklet);
23 
24 	return IRQ_HANDLED;
25 }
26 EXPORT_SYMBOL_GPL(mt792x_irq_handler);
27 
mt792x_irq_tasklet(unsigned long data)28 void mt792x_irq_tasklet(unsigned long data)
29 {
30 	struct mt792x_dev *dev = (struct mt792x_dev *)data;
31 	const struct mt792x_irq_map *irq_map = dev->irq_map;
32 	u32 intr, mask = 0;
33 
34 	mt76_wr(dev, irq_map->host_irq_enable, 0);
35 
36 	intr = mt76_rr(dev, MT_WFDMA0_HOST_INT_STA);
37 	intr &= dev->mt76.mmio.irqmask;
38 	mt76_wr(dev, MT_WFDMA0_HOST_INT_STA, intr);
39 
40 	trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
41 
42 	mask |= intr & (irq_map->rx.data_complete_mask |
43 			irq_map->rx.wm_complete_mask |
44 			irq_map->rx.wm2_complete_mask);
45 	if (intr & dev->irq_map->tx.mcu_complete_mask)
46 		mask |= dev->irq_map->tx.mcu_complete_mask;
47 
48 	if (intr & MT_INT_MCU_CMD) {
49 		u32 intr_sw;
50 
51 		intr_sw = mt76_rr(dev, MT_MCU_CMD);
52 		/* ack MCU2HOST_SW_INT_STA */
53 		mt76_wr(dev, MT_MCU_CMD, intr_sw);
54 		if (intr_sw & MT_MCU_CMD_WAKE_RX_PCIE) {
55 			mask |= irq_map->rx.data_complete_mask;
56 			intr |= irq_map->rx.data_complete_mask;
57 		}
58 	}
59 
60 	mt76_set_irq_mask(&dev->mt76, irq_map->host_irq_enable, mask, 0);
61 
62 	if (intr & dev->irq_map->tx.all_complete_mask)
63 		napi_schedule(&dev->mt76.tx_napi);
64 
65 	if (intr & irq_map->rx.wm_complete_mask)
66 		napi_schedule(&dev->mt76.napi[MT_RXQ_MCU]);
67 
68 	if (intr & irq_map->rx.wm2_complete_mask)
69 		napi_schedule(&dev->mt76.napi[MT_RXQ_MCU_WA]);
70 
71 	if (intr & irq_map->rx.data_complete_mask)
72 		napi_schedule(&dev->mt76.napi[MT_RXQ_MAIN]);
73 }
74 EXPORT_SYMBOL_GPL(mt792x_irq_tasklet);
75 
mt792x_rx_poll_complete(struct mt76_dev * mdev,enum mt76_rxq_id q)76 void mt792x_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
77 {
78 	struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
79 	const struct mt792x_irq_map *irq_map = dev->irq_map;
80 
81 	if (q == MT_RXQ_MAIN)
82 		mt76_connac_irq_enable(mdev, irq_map->rx.data_complete_mask);
83 	else if (q == MT_RXQ_MCU_WA)
84 		mt76_connac_irq_enable(mdev, irq_map->rx.wm2_complete_mask);
85 	else
86 		mt76_connac_irq_enable(mdev, irq_map->rx.wm_complete_mask);
87 }
88 EXPORT_SYMBOL_GPL(mt792x_rx_poll_complete);
89 
90 #define PREFETCH(base, depth)	((base) << 16 | (depth))
mt792x_dma_prefetch(struct mt792x_dev * dev)91 static void mt792x_dma_prefetch(struct mt792x_dev *dev)
92 {
93 	mt76_wr(dev, MT_WFDMA0_RX_RING0_EXT_CTRL, PREFETCH(0x0, 0x4));
94 	mt76_wr(dev, MT_WFDMA0_RX_RING2_EXT_CTRL, PREFETCH(0x40, 0x4));
95 	mt76_wr(dev, MT_WFDMA0_RX_RING3_EXT_CTRL, PREFETCH(0x80, 0x4));
96 	mt76_wr(dev, MT_WFDMA0_RX_RING4_EXT_CTRL, PREFETCH(0xc0, 0x4));
97 	mt76_wr(dev, MT_WFDMA0_RX_RING5_EXT_CTRL, PREFETCH(0x100, 0x4));
98 
99 	mt76_wr(dev, MT_WFDMA0_TX_RING0_EXT_CTRL, PREFETCH(0x140, 0x4));
100 	mt76_wr(dev, MT_WFDMA0_TX_RING1_EXT_CTRL, PREFETCH(0x180, 0x4));
101 	mt76_wr(dev, MT_WFDMA0_TX_RING2_EXT_CTRL, PREFETCH(0x1c0, 0x4));
102 	mt76_wr(dev, MT_WFDMA0_TX_RING3_EXT_CTRL, PREFETCH(0x200, 0x4));
103 	mt76_wr(dev, MT_WFDMA0_TX_RING4_EXT_CTRL, PREFETCH(0x240, 0x4));
104 	mt76_wr(dev, MT_WFDMA0_TX_RING5_EXT_CTRL, PREFETCH(0x280, 0x4));
105 	mt76_wr(dev, MT_WFDMA0_TX_RING6_EXT_CTRL, PREFETCH(0x2c0, 0x4));
106 	mt76_wr(dev, MT_WFDMA0_TX_RING16_EXT_CTRL, PREFETCH(0x340, 0x4));
107 	mt76_wr(dev, MT_WFDMA0_TX_RING17_EXT_CTRL, PREFETCH(0x380, 0x4));
108 }
109 
mt792x_dma_enable(struct mt792x_dev * dev)110 int mt792x_dma_enable(struct mt792x_dev *dev)
111 {
112 	/* configure perfetch settings */
113 	mt792x_dma_prefetch(dev);
114 
115 	/* reset dma idx */
116 	mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
117 
118 	/* configure delay interrupt */
119 	mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
120 
121 	mt76_set(dev, MT_WFDMA0_GLO_CFG,
122 		 MT_WFDMA0_GLO_CFG_TX_WB_DDONE |
123 		 MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN |
124 		 MT_WFDMA0_GLO_CFG_CLK_GAT_DIS |
125 		 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
126 		 MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
127 		 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
128 
129 	mt76_set(dev, MT_WFDMA0_GLO_CFG,
130 		 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
131 
132 	mt76_set(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT);
133 
134 	/* enable interrupts for TX/RX rings */
135 	mt76_connac_irq_enable(&dev->mt76,
136 			       dev->irq_map->tx.all_complete_mask |
137 			       dev->irq_map->rx.data_complete_mask |
138 			       dev->irq_map->rx.wm2_complete_mask |
139 			       dev->irq_map->rx.wm_complete_mask |
140 			       MT_INT_MCU_CMD);
141 	mt76_set(dev, MT_MCU2HOST_SW_INT_ENA, MT_MCU_CMD_WAKE_RX_PCIE);
142 
143 	return 0;
144 }
145 EXPORT_SYMBOL_GPL(mt792x_dma_enable);
146 
147 static int
mt792x_dma_reset(struct mt792x_dev * dev,bool force)148 mt792x_dma_reset(struct mt792x_dev *dev, bool force)
149 {
150 	int i, err;
151 
152 	err = mt792x_dma_disable(dev, force);
153 	if (err)
154 		return err;
155 
156 	/* reset hw queues */
157 	for (i = 0; i < __MT_TXQ_MAX; i++)
158 		mt76_queue_reset(dev, dev->mphy.q_tx[i]);
159 
160 	for (i = 0; i < __MT_MCUQ_MAX; i++)
161 		mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
162 
163 	mt76_for_each_q_rx(&dev->mt76, i)
164 		mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
165 
166 	mt76_tx_status_check(&dev->mt76, true);
167 
168 	return mt792x_dma_enable(dev);
169 }
170 
mt792x_wpdma_reset(struct mt792x_dev * dev,bool force)171 int mt792x_wpdma_reset(struct mt792x_dev *dev, bool force)
172 {
173 	int i, err;
174 
175 	/* clean up hw queues */
176 	for (i = 0; i < ARRAY_SIZE(dev->mt76.phy.q_tx); i++)
177 		mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
178 
179 	for (i = 0; i < ARRAY_SIZE(dev->mt76.q_mcu); i++)
180 		mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
181 
182 	mt76_for_each_q_rx(&dev->mt76, i)
183 		mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]);
184 
185 	if (force) {
186 		err = mt792x_wfsys_reset(dev);
187 		if (err)
188 			return err;
189 	}
190 	err = mt792x_dma_reset(dev, force);
191 	if (err)
192 		return err;
193 
194 	mt76_for_each_q_rx(&dev->mt76, i)
195 		mt76_queue_rx_reset(dev, i);
196 
197 	return 0;
198 }
199 EXPORT_SYMBOL_GPL(mt792x_wpdma_reset);
200 
mt792x_wpdma_reinit_cond(struct mt792x_dev * dev)201 int mt792x_wpdma_reinit_cond(struct mt792x_dev *dev)
202 {
203 	struct mt76_connac_pm *pm = &dev->pm;
204 	int err;
205 
206 	/* check if the wpdma must be reinitialized */
207 	if (mt792x_dma_need_reinit(dev)) {
208 		/* disable interrutpts */
209 		mt76_wr(dev, dev->irq_map->host_irq_enable, 0);
210 		mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
211 
212 		err = mt792x_wpdma_reset(dev, false);
213 		if (err) {
214 			dev_err(dev->mt76.dev, "wpdma reset failed\n");
215 			return err;
216 		}
217 
218 		/* enable interrutpts */
219 		mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
220 		pm->stats.lp_wake++;
221 	}
222 
223 	return 0;
224 }
225 EXPORT_SYMBOL_GPL(mt792x_wpdma_reinit_cond);
226 
mt792x_dma_disable(struct mt792x_dev * dev,bool force)227 int mt792x_dma_disable(struct mt792x_dev *dev, bool force)
228 {
229 	/* disable WFDMA0 */
230 	mt76_clear(dev, MT_WFDMA0_GLO_CFG,
231 		   MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN |
232 		   MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
233 		   MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
234 		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
235 		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
236 
237 	if (!mt76_poll_msec_tick(dev, MT_WFDMA0_GLO_CFG,
238 				 MT_WFDMA0_GLO_CFG_TX_DMA_BUSY |
239 				 MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 100, 1))
240 		return -ETIMEDOUT;
241 
242 	/* disable dmashdl */
243 	mt76_clear(dev, MT_WFDMA0_GLO_CFG_EXT0,
244 		   MT_WFDMA0_CSR_TX_DMASHDL_ENABLE);
245 	mt76_set(dev, MT_DMASHDL_SW_CONTROL, MT_DMASHDL_DMASHDL_BYPASS);
246 
247 	if (force) {
248 		/* reset */
249 		mt76_clear(dev, MT_WFDMA0_RST,
250 			   MT_WFDMA0_RST_DMASHDL_ALL_RST |
251 			   MT_WFDMA0_RST_LOGIC_RST);
252 
253 		mt76_set(dev, MT_WFDMA0_RST,
254 			 MT_WFDMA0_RST_DMASHDL_ALL_RST |
255 			 MT_WFDMA0_RST_LOGIC_RST);
256 	}
257 
258 	return 0;
259 }
260 EXPORT_SYMBOL_GPL(mt792x_dma_disable);
261 
mt792x_dma_cleanup(struct mt792x_dev * dev)262 void mt792x_dma_cleanup(struct mt792x_dev *dev)
263 {
264 	/* disable */
265 	mt76_clear(dev, MT_WFDMA0_GLO_CFG,
266 		   MT_WFDMA0_GLO_CFG_TX_DMA_EN |
267 		   MT_WFDMA0_GLO_CFG_RX_DMA_EN |
268 		   MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
269 		   MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
270 		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
271 		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
272 
273 	mt76_poll_msec_tick(dev, MT_WFDMA0_GLO_CFG,
274 			    MT_WFDMA0_GLO_CFG_TX_DMA_BUSY |
275 			    MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 100, 1);
276 
277 	/* reset */
278 	mt76_clear(dev, MT_WFDMA0_RST,
279 		   MT_WFDMA0_RST_DMASHDL_ALL_RST |
280 		   MT_WFDMA0_RST_LOGIC_RST);
281 
282 	mt76_set(dev, MT_WFDMA0_RST,
283 		 MT_WFDMA0_RST_DMASHDL_ALL_RST |
284 		 MT_WFDMA0_RST_LOGIC_RST);
285 
286 	mt76_dma_cleanup(&dev->mt76);
287 }
288 EXPORT_SYMBOL_GPL(mt792x_dma_cleanup);
289 
mt792x_poll_tx(struct napi_struct * napi,int budget)290 int mt792x_poll_tx(struct napi_struct *napi, int budget)
291 {
292 	struct mt792x_dev *dev;
293 
294 	dev = container_of(napi, struct mt792x_dev, mt76.tx_napi);
295 
296 	if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
297 		napi_complete(napi);
298 		queue_work(dev->mt76.wq, &dev->pm.wake_work);
299 		return 0;
300 	}
301 
302 	mt76_connac_tx_cleanup(&dev->mt76);
303 	if (napi_complete(napi))
304 		mt76_connac_irq_enable(&dev->mt76,
305 				       dev->irq_map->tx.all_complete_mask);
306 	mt76_connac_pm_unref(&dev->mphy, &dev->pm);
307 
308 	return 0;
309 }
310 EXPORT_SYMBOL_GPL(mt792x_poll_tx);
311 
mt792x_poll_rx(struct napi_struct * napi,int budget)312 int mt792x_poll_rx(struct napi_struct *napi, int budget)
313 {
314 	struct mt792x_dev *dev;
315 	int done;
316 
317 	dev = container_of(napi->dev, struct mt792x_dev, mt76.napi_dev);
318 
319 	if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
320 		napi_complete(napi);
321 		queue_work(dev->mt76.wq, &dev->pm.wake_work);
322 		return 0;
323 	}
324 	done = mt76_dma_rx_poll(napi, budget);
325 	mt76_connac_pm_unref(&dev->mphy, &dev->pm);
326 
327 	return done;
328 }
329 EXPORT_SYMBOL_GPL(mt792x_poll_rx);
330 
mt792x_wfsys_reset(struct mt792x_dev * dev)331 int mt792x_wfsys_reset(struct mt792x_dev *dev)
332 {
333 	u32 addr = is_mt7921(&dev->mt76) ? 0x18000140 : 0x7c000140;
334 
335 	mt76_clear(dev, addr, WFSYS_SW_RST_B);
336 	msleep(50);
337 	mt76_set(dev, addr, WFSYS_SW_RST_B);
338 
339 	if (!__mt76_poll_msec(&dev->mt76, addr, WFSYS_SW_INIT_DONE,
340 			      WFSYS_SW_INIT_DONE, 500))
341 		return -ETIMEDOUT;
342 
343 	return 0;
344 }
345 EXPORT_SYMBOL_GPL(mt792x_wfsys_reset);
346 
347