xref: /openbmc/linux/drivers/net/ethernet/mediatek/mtk_wed.c (revision f2bb566f5c977ff010baaa9e5e14d9a75b06e5f2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
3 
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/module.h>
7 #include <linux/bitfield.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/skbuff.h>
10 #include <linux/of_platform.h>
11 #include <linux/of_address.h>
12 #include <linux/of_reserved_mem.h>
13 #include <linux/mfd/syscon.h>
14 #include <linux/debugfs.h>
15 #include <linux/soc/mediatek/mtk_wed.h>
16 #include "mtk_eth_soc.h"
17 #include "mtk_wed_regs.h"
18 #include "mtk_wed.h"
19 #include "mtk_ppe.h"
20 #include "mtk_wed_wo.h"
21 
22 #define MTK_PCIE_BASE(n)		(0x1a143000 + (n) * 0x2000)
23 
24 #define MTK_WED_PKT_SIZE		1900
25 #define MTK_WED_BUF_SIZE		2048
26 #define MTK_WED_BUF_PER_PAGE		(PAGE_SIZE / 2048)
27 #define MTK_WED_RX_RING_SIZE		1536
28 
29 #define MTK_WED_TX_RING_SIZE		2048
30 #define MTK_WED_WDMA_RING_SIZE		1024
31 #define MTK_WED_MAX_GROUP_SIZE		0x100
32 #define MTK_WED_VLD_GROUP_SIZE		0x40
33 #define MTK_WED_PER_GROUP_PKT		128
34 
35 #define MTK_WED_FBUF_SIZE		128
36 #define MTK_WED_MIOD_CNT		16
37 #define MTK_WED_FB_CMD_CNT		1024
38 #define MTK_WED_RRO_QUE_CNT		8192
39 #define MTK_WED_MIOD_ENTRY_CNT		128
40 
41 static struct mtk_wed_hw *hw_list[2];
42 static DEFINE_MUTEX(hw_lock);
43 
44 static void
45 wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
46 {
47 	regmap_update_bits(dev->hw->regs, reg, mask | val, val);
48 }
49 
50 static void
51 wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
52 {
53 	return wed_m32(dev, reg, 0, mask);
54 }
55 
56 static void
57 wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask)
58 {
59 	return wed_m32(dev, reg, mask, 0);
60 }
61 
62 static void
63 wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
64 {
65 	wdma_w32(dev, reg, (wdma_r32(dev, reg) & ~mask) | val);
66 }
67 
68 static void
69 wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
70 {
71 	wdma_m32(dev, reg, 0, mask);
72 }
73 
74 static void
75 wdma_clr(struct mtk_wed_device *dev, u32 reg, u32 mask)
76 {
77 	wdma_m32(dev, reg, mask, 0);
78 }
79 
80 static u32
81 wifi_r32(struct mtk_wed_device *dev, u32 reg)
82 {
83 	return readl(dev->wlan.base + reg);
84 }
85 
86 static void
87 wifi_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
88 {
89 	writel(val, dev->wlan.base + reg);
90 }
91 
92 static u32
93 mtk_wed_read_reset(struct mtk_wed_device *dev)
94 {
95 	return wed_r32(dev, MTK_WED_RESET);
96 }
97 
98 static u32
99 mtk_wdma_read_reset(struct mtk_wed_device *dev)
100 {
101 	return wdma_r32(dev, MTK_WDMA_GLO_CFG);
102 }
103 
104 static int
105 mtk_wdma_rx_reset(struct mtk_wed_device *dev)
106 {
107 	u32 status, mask = MTK_WDMA_GLO_CFG_RX_DMA_BUSY;
108 	int i, ret;
109 
110 	wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_DMA_EN);
111 	ret = readx_poll_timeout(mtk_wdma_read_reset, dev, status,
112 				 !(status & mask), 0, 10000);
113 	if (ret)
114 		dev_err(dev->hw->dev, "rx reset failed\n");
115 
116 	wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
117 	wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
118 
119 	for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) {
120 		if (dev->rx_wdma[i].desc)
121 			continue;
122 
123 		wdma_w32(dev,
124 			 MTK_WDMA_RING_RX(i) + MTK_WED_RING_OFS_CPU_IDX, 0);
125 	}
126 
127 	return ret;
128 }
129 
130 static void
131 mtk_wdma_tx_reset(struct mtk_wed_device *dev)
132 {
133 	u32 status, mask = MTK_WDMA_GLO_CFG_TX_DMA_BUSY;
134 	int i;
135 
136 	wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN);
137 	if (readx_poll_timeout(mtk_wdma_read_reset, dev, status,
138 			       !(status & mask), 0, 10000))
139 		dev_err(dev->hw->dev, "tx reset failed\n");
140 
141 	wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX);
142 	wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
143 
144 	for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
145 		wdma_w32(dev,
146 			 MTK_WDMA_RING_TX(i) + MTK_WED_RING_OFS_CPU_IDX, 0);
147 }
148 
149 static void
150 mtk_wed_reset(struct mtk_wed_device *dev, u32 mask)
151 {
152 	u32 status;
153 
154 	wed_w32(dev, MTK_WED_RESET, mask);
155 	if (readx_poll_timeout(mtk_wed_read_reset, dev, status,
156 			       !(status & mask), 0, 1000))
157 		WARN_ON_ONCE(1);
158 }
159 
160 static u32
161 mtk_wed_wo_read_status(struct mtk_wed_device *dev)
162 {
163 	return wed_r32(dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_WO_STATUS);
164 }
165 
166 static void
167 mtk_wed_wo_reset(struct mtk_wed_device *dev)
168 {
169 	struct mtk_wed_wo *wo = dev->hw->wed_wo;
170 	u8 state = MTK_WED_WO_STATE_DISABLE;
171 	void __iomem *reg;
172 	u32 val;
173 
174 	mtk_wdma_tx_reset(dev);
175 	mtk_wed_reset(dev, MTK_WED_RESET_WED);
176 
177 	mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
178 			     MTK_WED_WO_CMD_CHANGE_STATE, &state,
179 			     sizeof(state), false);
180 
181 	if (readx_poll_timeout(mtk_wed_wo_read_status, dev, val,
182 			       val == MTK_WED_WOIF_DISABLE_DONE,
183 			       100, MTK_WOCPU_TIMEOUT))
184 		dev_err(dev->hw->dev, "failed to disable wed-wo\n");
185 
186 	reg = ioremap(MTK_WED_WO_CPU_MCUSYS_RESET_ADDR, 4);
187 
188 	val = readl(reg);
189 	switch (dev->hw->index) {
190 	case 0:
191 		val |= MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK;
192 		writel(val, reg);
193 		val &= ~MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK;
194 		writel(val, reg);
195 		break;
196 	case 1:
197 		val |= MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK;
198 		writel(val, reg);
199 		val &= ~MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK;
200 		writel(val, reg);
201 		break;
202 	default:
203 		break;
204 	}
205 	iounmap(reg);
206 }
207 
208 static struct mtk_wed_hw *
209 mtk_wed_assign(struct mtk_wed_device *dev)
210 {
211 	struct mtk_wed_hw *hw;
212 	int i;
213 
214 	if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
215 		hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)];
216 		if (!hw)
217 			return NULL;
218 
219 		if (!hw->wed_dev)
220 			goto out;
221 
222 		if (hw->version == 1)
223 			return NULL;
224 
225 		/* MT7986 WED devices do not have any pcie slot restrictions */
226 	}
227 	/* MT7986 PCIE or AXI */
228 	for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
229 		hw = hw_list[i];
230 		if (hw && !hw->wed_dev)
231 			goto out;
232 	}
233 
234 	return NULL;
235 
236 out:
237 	hw->wed_dev = dev;
238 	return hw;
239 }
240 
241 static int
242 mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
243 {
244 	struct mtk_wdma_desc *desc;
245 	dma_addr_t desc_phys;
246 	void **page_list;
247 	int token = dev->wlan.token_start;
248 	int ring_size;
249 	int n_pages;
250 	int i, page_idx;
251 
252 	ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
253 	n_pages = ring_size / MTK_WED_BUF_PER_PAGE;
254 
255 	page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
256 	if (!page_list)
257 		return -ENOMEM;
258 
259 	dev->tx_buf_ring.size = ring_size;
260 	dev->tx_buf_ring.pages = page_list;
261 
262 	desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
263 				  &desc_phys, GFP_KERNEL);
264 	if (!desc)
265 		return -ENOMEM;
266 
267 	dev->tx_buf_ring.desc = desc;
268 	dev->tx_buf_ring.desc_phys = desc_phys;
269 
270 	for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
271 		dma_addr_t page_phys, buf_phys;
272 		struct page *page;
273 		void *buf;
274 		int s;
275 
276 		page = __dev_alloc_pages(GFP_KERNEL, 0);
277 		if (!page)
278 			return -ENOMEM;
279 
280 		page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
281 					 DMA_BIDIRECTIONAL);
282 		if (dma_mapping_error(dev->hw->dev, page_phys)) {
283 			__free_page(page);
284 			return -ENOMEM;
285 		}
286 
287 		page_list[page_idx++] = page;
288 		dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
289 					DMA_BIDIRECTIONAL);
290 
291 		buf = page_to_virt(page);
292 		buf_phys = page_phys;
293 
294 		for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) {
295 			u32 txd_size;
296 			u32 ctrl;
297 
298 			txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
299 
300 			desc->buf0 = cpu_to_le32(buf_phys);
301 			desc->buf1 = cpu_to_le32(buf_phys + txd_size);
302 
303 			if (dev->hw->version == 1)
304 				ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
305 				       FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
306 						  MTK_WED_BUF_SIZE - txd_size) |
307 				       MTK_WDMA_DESC_CTRL_LAST_SEG1;
308 			else
309 				ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
310 				       FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2,
311 						  MTK_WED_BUF_SIZE - txd_size) |
312 				       MTK_WDMA_DESC_CTRL_LAST_SEG0;
313 			desc->ctrl = cpu_to_le32(ctrl);
314 			desc->info = 0;
315 			desc++;
316 
317 			buf += MTK_WED_BUF_SIZE;
318 			buf_phys += MTK_WED_BUF_SIZE;
319 		}
320 
321 		dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
322 					   DMA_BIDIRECTIONAL);
323 	}
324 
325 	return 0;
326 }
327 
328 static void
329 mtk_wed_free_tx_buffer(struct mtk_wed_device *dev)
330 {
331 	struct mtk_wdma_desc *desc = dev->tx_buf_ring.desc;
332 	void **page_list = dev->tx_buf_ring.pages;
333 	int page_idx;
334 	int i;
335 
336 	if (!page_list)
337 		return;
338 
339 	if (!desc)
340 		goto free_pagelist;
341 
342 	for (i = 0, page_idx = 0; i < dev->tx_buf_ring.size;
343 	     i += MTK_WED_BUF_PER_PAGE) {
344 		void *page = page_list[page_idx++];
345 		dma_addr_t buf_addr;
346 
347 		if (!page)
348 			break;
349 
350 		buf_addr = le32_to_cpu(desc[i].buf0);
351 		dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE,
352 			       DMA_BIDIRECTIONAL);
353 		__free_page(page);
354 	}
355 
356 	dma_free_coherent(dev->hw->dev, dev->tx_buf_ring.size * sizeof(*desc),
357 			  desc, dev->tx_buf_ring.desc_phys);
358 
359 free_pagelist:
360 	kfree(page_list);
361 }
362 
363 static int
364 mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev)
365 {
366 	struct mtk_rxbm_desc *desc;
367 	dma_addr_t desc_phys;
368 
369 	dev->rx_buf_ring.size = dev->wlan.rx_nbuf;
370 	desc = dma_alloc_coherent(dev->hw->dev,
371 				  dev->wlan.rx_nbuf * sizeof(*desc),
372 				  &desc_phys, GFP_KERNEL);
373 	if (!desc)
374 		return -ENOMEM;
375 
376 	dev->rx_buf_ring.desc = desc;
377 	dev->rx_buf_ring.desc_phys = desc_phys;
378 	dev->wlan.init_rx_buf(dev, dev->wlan.rx_npkt);
379 
380 	return 0;
381 }
382 
383 static void
384 mtk_wed_free_rx_buffer(struct mtk_wed_device *dev)
385 {
386 	struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc;
387 
388 	if (!desc)
389 		return;
390 
391 	dev->wlan.release_rx_buf(dev);
392 	dma_free_coherent(dev->hw->dev, dev->rx_buf_ring.size * sizeof(*desc),
393 			  desc, dev->rx_buf_ring.desc_phys);
394 }
395 
396 static void
397 mtk_wed_rx_buffer_hw_init(struct mtk_wed_device *dev)
398 {
399 	wed_w32(dev, MTK_WED_RX_BM_RX_DMAD,
400 		FIELD_PREP(MTK_WED_RX_BM_RX_DMAD_SDL0, dev->wlan.rx_size));
401 	wed_w32(dev, MTK_WED_RX_BM_BASE, dev->rx_buf_ring.desc_phys);
402 	wed_w32(dev, MTK_WED_RX_BM_INIT_PTR, MTK_WED_RX_BM_INIT_SW_TAIL |
403 		FIELD_PREP(MTK_WED_RX_BM_SW_TAIL, dev->wlan.rx_npkt));
404 	wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH,
405 		FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff));
406 	wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
407 }
408 
409 static void
410 mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring)
411 {
412 	if (!ring->desc)
413 		return;
414 
415 	dma_free_coherent(dev->hw->dev, ring->size * ring->desc_size,
416 			  ring->desc, ring->desc_phys);
417 }
418 
419 static void
420 mtk_wed_free_rx_rings(struct mtk_wed_device *dev)
421 {
422 	mtk_wed_free_rx_buffer(dev);
423 	mtk_wed_free_ring(dev, &dev->rro.ring);
424 }
425 
426 static void
427 mtk_wed_free_tx_rings(struct mtk_wed_device *dev)
428 {
429 	int i;
430 
431 	for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++)
432 		mtk_wed_free_ring(dev, &dev->tx_ring[i]);
433 	for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++)
434 		mtk_wed_free_ring(dev, &dev->rx_wdma[i]);
435 }
436 
437 static void
438 mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
439 {
440 	u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
441 
442 	if (dev->hw->version == 1)
443 		mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
444 	else
445 		mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
446 			MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH |
447 			MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
448 			MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
449 
450 	if (!dev->hw->num_flows)
451 		mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
452 
453 	wed_w32(dev, MTK_WED_EXT_INT_MASK, en ? mask : 0);
454 	wed_r32(dev, MTK_WED_EXT_INT_MASK);
455 }
456 
457 static void
458 mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable)
459 {
460 	if (enable) {
461 		wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
462 		wed_w32(dev, MTK_WED_TXP_DW1,
463 			FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0103));
464 	} else {
465 		wed_w32(dev, MTK_WED_TXP_DW1,
466 			FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0100));
467 		wed_clr(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
468 	}
469 }
470 
471 #define MTK_WFMDA_RX_DMA_EN	BIT(2)
472 static void
473 mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
474 {
475 	u32 val;
476 	int i;
477 
478 	if (!(dev->rx_ring[idx].flags & MTK_WED_RING_CONFIGURED))
479 		return; /* queue is not configured by mt76 */
480 
481 	for (i = 0; i < 3; i++) {
482 		u32 cur_idx;
483 
484 		cur_idx = wed_r32(dev,
485 				  MTK_WED_WPDMA_RING_RX_DATA(idx) +
486 				  MTK_WED_RING_OFS_CPU_IDX);
487 		if (cur_idx == MTK_WED_RX_RING_SIZE - 1)
488 			break;
489 
490 		usleep_range(100000, 200000);
491 	}
492 
493 	if (i == 3) {
494 		dev_err(dev->hw->dev, "rx dma enable failed\n");
495 		return;
496 	}
497 
498 	val = wifi_r32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base) |
499 	      MTK_WFMDA_RX_DMA_EN;
500 	wifi_w32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base, val);
501 }
502 
503 static void
504 mtk_wed_dma_disable(struct mtk_wed_device *dev)
505 {
506 	wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
507 		MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
508 		MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
509 
510 	wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
511 
512 	wed_clr(dev, MTK_WED_GLO_CFG,
513 		MTK_WED_GLO_CFG_TX_DMA_EN |
514 		MTK_WED_GLO_CFG_RX_DMA_EN);
515 
516 	wdma_clr(dev, MTK_WDMA_GLO_CFG,
517 		 MTK_WDMA_GLO_CFG_TX_DMA_EN |
518 		 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
519 		 MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
520 
521 	if (dev->hw->version == 1) {
522 		regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
523 		wdma_clr(dev, MTK_WDMA_GLO_CFG,
524 			 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
525 	} else {
526 		wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
527 			MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
528 			MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
529 
530 		wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
531 			MTK_WED_WPDMA_RX_D_RX_DRV_EN);
532 		wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
533 			MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
534 	}
535 
536 	mtk_wed_set_512_support(dev, false);
537 }
538 
539 static void
540 mtk_wed_stop(struct mtk_wed_device *dev)
541 {
542 	mtk_wed_set_ext_int(dev, false);
543 
544 	wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
545 	wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
546 	wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
547 	wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
548 	wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
549 
550 	if (dev->hw->version == 1)
551 		return;
552 
553 	wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0);
554 	wed_w32(dev, MTK_WED_EXT_INT_MASK2, 0);
555 }
556 
557 static void
558 mtk_wed_deinit(struct mtk_wed_device *dev)
559 {
560 	mtk_wed_stop(dev);
561 	mtk_wed_dma_disable(dev);
562 
563 	wed_clr(dev, MTK_WED_CTRL,
564 		MTK_WED_CTRL_WDMA_INT_AGENT_EN |
565 		MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
566 		MTK_WED_CTRL_WED_TX_BM_EN |
567 		MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
568 
569 	if (dev->hw->version == 1)
570 		return;
571 
572 	wed_clr(dev, MTK_WED_CTRL,
573 		MTK_WED_CTRL_RX_ROUTE_QM_EN |
574 		MTK_WED_CTRL_WED_RX_BM_EN |
575 		MTK_WED_CTRL_RX_RRO_QM_EN);
576 }
577 
578 static void
579 mtk_wed_detach(struct mtk_wed_device *dev)
580 {
581 	struct mtk_wed_hw *hw = dev->hw;
582 
583 	mutex_lock(&hw_lock);
584 
585 	mtk_wed_deinit(dev);
586 
587 	mtk_wdma_rx_reset(dev);
588 	mtk_wed_reset(dev, MTK_WED_RESET_WED);
589 	mtk_wed_free_tx_buffer(dev);
590 	mtk_wed_free_tx_rings(dev);
591 
592 	if (mtk_wed_get_rx_capa(dev)) {
593 		mtk_wed_wo_reset(dev);
594 		mtk_wed_free_rx_rings(dev);
595 		mtk_wed_wo_deinit(hw);
596 	}
597 
598 	if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
599 		struct device_node *wlan_node;
600 
601 		wlan_node = dev->wlan.pci_dev->dev.of_node;
602 		if (of_dma_is_coherent(wlan_node) && hw->hifsys)
603 			regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
604 					   BIT(hw->index), BIT(hw->index));
605 	}
606 
607 	if (!hw_list[!hw->index]->wed_dev &&
608 	    hw->eth->dma_dev != hw->eth->dev)
609 		mtk_eth_set_dma_device(hw->eth, hw->eth->dev);
610 
611 	memset(dev, 0, sizeof(*dev));
612 	module_put(THIS_MODULE);
613 
614 	hw->wed_dev = NULL;
615 	mutex_unlock(&hw_lock);
616 }
617 
618 #define PCIE_BASE_ADDR0		0x11280000
619 static void
620 mtk_wed_bus_init(struct mtk_wed_device *dev)
621 {
622 	switch (dev->wlan.bus_type) {
623 	case MTK_WED_BUS_PCIE: {
624 		struct device_node *np = dev->hw->eth->dev->of_node;
625 		struct regmap *regs;
626 
627 		regs = syscon_regmap_lookup_by_phandle(np,
628 						       "mediatek,wed-pcie");
629 		if (IS_ERR(regs))
630 			break;
631 
632 		regmap_update_bits(regs, 0, BIT(0), BIT(0));
633 
634 		wed_w32(dev, MTK_WED_PCIE_INT_CTRL,
635 			FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2));
636 
637 		/* pcie interrupt control: pola/source selection */
638 		wed_set(dev, MTK_WED_PCIE_INT_CTRL,
639 			MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
640 			FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1));
641 		wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
642 
643 		wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180);
644 		wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184);
645 
646 		/* pcie interrupt status trigger register */
647 		wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
648 		wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER);
649 
650 		/* pola setting */
651 		wed_set(dev, MTK_WED_PCIE_INT_CTRL,
652 			MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA);
653 		break;
654 	}
655 	case MTK_WED_BUS_AXI:
656 		wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
657 			MTK_WED_WPDMA_INT_CTRL_SIG_SRC |
658 			FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_SRC_SEL, 0));
659 		break;
660 	default:
661 		break;
662 	}
663 }
664 
665 static void
666 mtk_wed_set_wpdma(struct mtk_wed_device *dev)
667 {
668 	if (dev->hw->version == 1) {
669 		wed_w32(dev, MTK_WED_WPDMA_CFG_BASE,  dev->wlan.wpdma_phys);
670 	} else {
671 		mtk_wed_bus_init(dev);
672 
673 		wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int);
674 		wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
675 		wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
676 		wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
677 		wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
678 		wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx);
679 	}
680 }
681 
682 static void
683 mtk_wed_hw_init_early(struct mtk_wed_device *dev)
684 {
685 	u32 mask, set;
686 
687 	mtk_wed_deinit(dev);
688 	mtk_wed_reset(dev, MTK_WED_RESET_WED);
689 	mtk_wed_set_wpdma(dev);
690 
691 	mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
692 	       MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
693 	       MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
694 	set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
695 	      MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
696 	      MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
697 	wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
698 
699 	if (dev->hw->version == 1) {
700 		u32 offset = dev->hw->index ? 0x04000400 : 0;
701 
702 		wdma_set(dev, MTK_WDMA_GLO_CFG,
703 			 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
704 			 MTK_WDMA_GLO_CFG_RX_INFO2_PRERES |
705 			 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
706 
707 		wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
708 		wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
709 		wed_w32(dev, MTK_WED_PCIE_CFG_BASE,
710 			MTK_PCIE_BASE(dev->hw->index));
711 	} else {
712 		wed_w32(dev, MTK_WED_WDMA_CFG_BASE, dev->hw->wdma_phy);
713 		wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_ETH_DMAD_FMT);
714 		wed_w32(dev, MTK_WED_WDMA_OFFSET0,
715 			FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_INTS,
716 				   MTK_WDMA_INT_STATUS) |
717 			FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_CFG,
718 				   MTK_WDMA_GLO_CFG));
719 
720 		wed_w32(dev, MTK_WED_WDMA_OFFSET1,
721 			FIELD_PREP(MTK_WED_WDMA_OFST1_TX_CTRL,
722 				   MTK_WDMA_RING_TX(0)) |
723 			FIELD_PREP(MTK_WED_WDMA_OFST1_RX_CTRL,
724 				   MTK_WDMA_RING_RX(0)));
725 	}
726 }
727 
728 static int
729 mtk_wed_rro_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
730 		       int size)
731 {
732 	ring->desc = dma_alloc_coherent(dev->hw->dev,
733 					size * sizeof(*ring->desc),
734 					&ring->desc_phys, GFP_KERNEL);
735 	if (!ring->desc)
736 		return -ENOMEM;
737 
738 	ring->desc_size = sizeof(*ring->desc);
739 	ring->size = size;
740 	memset(ring->desc, 0, size);
741 
742 	return 0;
743 }
744 
745 #define MTK_WED_MIOD_COUNT	(MTK_WED_MIOD_ENTRY_CNT * MTK_WED_MIOD_CNT)
746 static int
747 mtk_wed_rro_alloc(struct mtk_wed_device *dev)
748 {
749 	struct reserved_mem *rmem;
750 	struct device_node *np;
751 	int index;
752 
753 	index = of_property_match_string(dev->hw->node, "memory-region-names",
754 					 "wo-dlm");
755 	if (index < 0)
756 		return index;
757 
758 	np = of_parse_phandle(dev->hw->node, "memory-region", index);
759 	if (!np)
760 		return -ENODEV;
761 
762 	rmem = of_reserved_mem_lookup(np);
763 	of_node_put(np);
764 
765 	if (!rmem)
766 		return -ENODEV;
767 
768 	dev->rro.miod_phys = rmem->base;
769 	dev->rro.fdbk_phys = MTK_WED_MIOD_COUNT + dev->rro.miod_phys;
770 
771 	return mtk_wed_rro_ring_alloc(dev, &dev->rro.ring,
772 				      MTK_WED_RRO_QUE_CNT);
773 }
774 
775 static int
776 mtk_wed_rro_cfg(struct mtk_wed_device *dev)
777 {
778 	struct mtk_wed_wo *wo = dev->hw->wed_wo;
779 	struct {
780 		struct {
781 			__le32 base;
782 			__le32 cnt;
783 			__le32 unit;
784 		} ring[2];
785 		__le32 wed;
786 		u8 version;
787 	} req = {
788 		.ring[0] = {
789 			.base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE),
790 			.cnt = cpu_to_le32(MTK_WED_MIOD_CNT),
791 			.unit = cpu_to_le32(MTK_WED_MIOD_ENTRY_CNT),
792 		},
793 		.ring[1] = {
794 			.base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE +
795 					    MTK_WED_MIOD_COUNT),
796 			.cnt = cpu_to_le32(MTK_WED_FB_CMD_CNT),
797 			.unit = cpu_to_le32(4),
798 		},
799 	};
800 
801 	return mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
802 				    MTK_WED_WO_CMD_WED_CFG,
803 				    &req, sizeof(req), true);
804 }
805 
806 static void
807 mtk_wed_rro_hw_init(struct mtk_wed_device *dev)
808 {
809 	wed_w32(dev, MTK_WED_RROQM_MIOD_CFG,
810 		FIELD_PREP(MTK_WED_RROQM_MIOD_MID_DW, 0x70 >> 2) |
811 		FIELD_PREP(MTK_WED_RROQM_MIOD_MOD_DW, 0x10 >> 2) |
812 		FIELD_PREP(MTK_WED_RROQM_MIOD_ENTRY_DW,
813 			   MTK_WED_MIOD_ENTRY_CNT >> 2));
814 
815 	wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL0, dev->rro.miod_phys);
816 	wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL1,
817 		FIELD_PREP(MTK_WED_RROQM_MIOD_CNT, MTK_WED_MIOD_CNT));
818 	wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL0, dev->rro.fdbk_phys);
819 	wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL1,
820 		FIELD_PREP(MTK_WED_RROQM_FDBK_CNT, MTK_WED_FB_CMD_CNT));
821 	wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL2, 0);
822 	wed_w32(dev, MTK_WED_RROQ_BASE_L, dev->rro.ring.desc_phys);
823 
824 	wed_set(dev, MTK_WED_RROQM_RST_IDX,
825 		MTK_WED_RROQM_RST_IDX_MIOD |
826 		MTK_WED_RROQM_RST_IDX_FDBK);
827 
828 	wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0);
829 	wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL2, MTK_WED_MIOD_CNT - 1);
830 	wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN);
831 }
832 
833 static void
834 mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev)
835 {
836 	wed_w32(dev, MTK_WED_RESET, MTK_WED_RESET_RX_ROUTE_QM);
837 
838 	for (;;) {
839 		usleep_range(100, 200);
840 		if (!(wed_r32(dev, MTK_WED_RESET) & MTK_WED_RESET_RX_ROUTE_QM))
841 			break;
842 	}
843 
844 	/* configure RX_ROUTE_QM */
845 	wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
846 	wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
847 	wed_set(dev, MTK_WED_RTQM_GLO_CFG,
848 		FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index));
849 	wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
850 	/* enable RX_ROUTE_QM */
851 	wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
852 }
853 
854 static void
855 mtk_wed_hw_init(struct mtk_wed_device *dev)
856 {
857 	if (dev->init_done)
858 		return;
859 
860 	dev->init_done = true;
861 	mtk_wed_set_ext_int(dev, false);
862 	wed_w32(dev, MTK_WED_TX_BM_CTRL,
863 		MTK_WED_TX_BM_CTRL_PAUSE |
864 		FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
865 			   dev->tx_buf_ring.size / 128) |
866 		FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
867 			   MTK_WED_TX_RING_SIZE / 256));
868 
869 	wed_w32(dev, MTK_WED_TX_BM_BASE, dev->tx_buf_ring.desc_phys);
870 
871 	wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
872 
873 	if (dev->hw->version == 1) {
874 		wed_w32(dev, MTK_WED_TX_BM_TKID,
875 			FIELD_PREP(MTK_WED_TX_BM_TKID_START,
876 				   dev->wlan.token_start) |
877 			FIELD_PREP(MTK_WED_TX_BM_TKID_END,
878 				   dev->wlan.token_start +
879 				   dev->wlan.nbuf - 1));
880 		wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
881 			FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
882 			MTK_WED_TX_BM_DYN_THR_HI);
883 	} else {
884 		wed_w32(dev, MTK_WED_TX_BM_TKID_V2,
885 			FIELD_PREP(MTK_WED_TX_BM_TKID_START,
886 				   dev->wlan.token_start) |
887 			FIELD_PREP(MTK_WED_TX_BM_TKID_END,
888 				   dev->wlan.token_start +
889 				   dev->wlan.nbuf - 1));
890 		wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
891 			FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO_V2, 0) |
892 			MTK_WED_TX_BM_DYN_THR_HI_V2);
893 		wed_w32(dev, MTK_WED_TX_TKID_CTRL,
894 			MTK_WED_TX_TKID_CTRL_PAUSE |
895 			FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM,
896 				   dev->tx_buf_ring.size / 128) |
897 			FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM,
898 				   dev->tx_buf_ring.size / 128));
899 		wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
900 			FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
901 			MTK_WED_TX_TKID_DYN_THR_HI);
902 	}
903 
904 	mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
905 
906 	if (dev->hw->version == 1) {
907 		wed_set(dev, MTK_WED_CTRL,
908 			MTK_WED_CTRL_WED_TX_BM_EN |
909 			MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
910 	} else {
911 		wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE);
912 		/* rx hw init */
913 		wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
914 			MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
915 			MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
916 		wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
917 
918 		mtk_wed_rx_buffer_hw_init(dev);
919 		mtk_wed_rro_hw_init(dev);
920 		mtk_wed_route_qm_hw_init(dev);
921 	}
922 
923 	wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
924 }
925 
926 static void
927 mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size, bool tx)
928 {
929 	void *head = (void *)ring->desc;
930 	int i;
931 
932 	for (i = 0; i < size; i++) {
933 		struct mtk_wdma_desc *desc;
934 
935 		desc = (struct mtk_wdma_desc *)(head + i * ring->desc_size);
936 		desc->buf0 = 0;
937 		if (tx)
938 			desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
939 		else
940 			desc->ctrl = cpu_to_le32(MTK_WFDMA_DESC_CTRL_TO_HOST);
941 		desc->buf1 = 0;
942 		desc->info = 0;
943 	}
944 }
945 
946 static u32
947 mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
948 {
949 	return !!(wed_r32(dev, reg) & mask);
950 }
951 
952 static int
953 mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
954 {
955 	int sleep = 15000;
956 	int timeout = 100 * sleep;
957 	u32 val;
958 
959 	return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
960 				 timeout, false, dev, reg, mask);
961 }
962 
963 static int
964 mtk_wed_rx_reset(struct mtk_wed_device *dev)
965 {
966 	struct mtk_wed_wo *wo = dev->hw->wed_wo;
967 	u8 val = MTK_WED_WO_STATE_SER_RESET;
968 	int i, ret;
969 
970 	ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
971 				   MTK_WED_WO_CMD_CHANGE_STATE, &val,
972 				   sizeof(val), true);
973 	if (ret)
974 		return ret;
975 
976 	wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN);
977 	ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
978 				MTK_WED_WPDMA_RX_D_RX_DRV_BUSY);
979 	if (ret) {
980 		mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
981 		mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_D_DRV);
982 	} else {
983 		wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
984 			MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
985 			MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
986 
987 		wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
988 			MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE |
989 			MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE);
990 		wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
991 			MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE |
992 			MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE);
993 
994 		wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
995 	}
996 
997 	/* reset rro qm */
998 	wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN);
999 	ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL,
1000 				MTK_WED_CTRL_RX_RRO_QM_BUSY);
1001 	if (ret) {
1002 		mtk_wed_reset(dev, MTK_WED_RESET_RX_RRO_QM);
1003 	} else {
1004 		wed_set(dev, MTK_WED_RROQM_RST_IDX,
1005 			MTK_WED_RROQM_RST_IDX_MIOD |
1006 			MTK_WED_RROQM_RST_IDX_FDBK);
1007 		wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0);
1008 	}
1009 
1010 	/* reset route qm */
1011 	wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
1012 	ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL,
1013 				MTK_WED_CTRL_RX_ROUTE_QM_BUSY);
1014 	if (ret)
1015 		mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM);
1016 	else
1017 		wed_set(dev, MTK_WED_RTQM_GLO_CFG,
1018 			MTK_WED_RTQM_Q_RST);
1019 
1020 	/* reset tx wdma */
1021 	mtk_wdma_tx_reset(dev);
1022 
1023 	/* reset tx wdma drv */
1024 	wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DRV_EN);
1025 	mtk_wed_poll_busy(dev, MTK_WED_CTRL,
1026 			  MTK_WED_CTRL_WDMA_INT_AGENT_BUSY);
1027 	mtk_wed_reset(dev, MTK_WED_RESET_WDMA_TX_DRV);
1028 
1029 	/* reset wed rx dma */
1030 	ret = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG,
1031 				MTK_WED_GLO_CFG_RX_DMA_BUSY);
1032 	wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_RX_DMA_EN);
1033 	if (ret) {
1034 		mtk_wed_reset(dev, MTK_WED_RESET_WED_RX_DMA);
1035 	} else {
1036 		struct mtk_eth *eth = dev->hw->eth;
1037 
1038 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1039 			wed_set(dev, MTK_WED_RESET_IDX,
1040 				MTK_WED_RESET_IDX_RX_V2);
1041 		else
1042 			wed_set(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_RX);
1043 		wed_w32(dev, MTK_WED_RESET_IDX, 0);
1044 	}
1045 
1046 	/* reset rx bm */
1047 	wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
1048 	mtk_wed_poll_busy(dev, MTK_WED_CTRL,
1049 			  MTK_WED_CTRL_WED_RX_BM_BUSY);
1050 	mtk_wed_reset(dev, MTK_WED_RESET_RX_BM);
1051 
1052 	/* wo change to enable state */
1053 	val = MTK_WED_WO_STATE_ENABLE;
1054 	ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
1055 				   MTK_WED_WO_CMD_CHANGE_STATE, &val,
1056 				   sizeof(val), true);
1057 	if (ret)
1058 		return ret;
1059 
1060 	/* wed_rx_ring_reset */
1061 	for (i = 0; i < ARRAY_SIZE(dev->rx_ring); i++) {
1062 		if (!dev->rx_ring[i].desc)
1063 			continue;
1064 
1065 		mtk_wed_ring_reset(&dev->rx_ring[i], MTK_WED_RX_RING_SIZE,
1066 				   false);
1067 	}
1068 	mtk_wed_free_rx_buffer(dev);
1069 
1070 	return 0;
1071 }
1072 
1073 static void
1074 mtk_wed_reset_dma(struct mtk_wed_device *dev)
1075 {
1076 	bool busy = false;
1077 	u32 val;
1078 	int i;
1079 
1080 	for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) {
1081 		if (!dev->tx_ring[i].desc)
1082 			continue;
1083 
1084 		mtk_wed_ring_reset(&dev->tx_ring[i], MTK_WED_TX_RING_SIZE,
1085 				   true);
1086 	}
1087 
1088 	/* 1. reset WED tx DMA */
1089 	wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_TX_DMA_EN);
1090 	busy = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG,
1091 				 MTK_WED_GLO_CFG_TX_DMA_BUSY);
1092 	if (busy) {
1093 		mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA);
1094 	} else {
1095 		wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_TX);
1096 		wed_w32(dev, MTK_WED_RESET_IDX, 0);
1097 	}
1098 
1099 	/* 2. reset WDMA rx DMA */
1100 	busy = !!mtk_wdma_rx_reset(dev);
1101 	wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
1102 	if (!busy)
1103 		busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_GLO_CFG,
1104 					 MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY);
1105 
1106 	if (busy) {
1107 		mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
1108 		mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV);
1109 	} else {
1110 		wed_w32(dev, MTK_WED_WDMA_RESET_IDX,
1111 			MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV);
1112 		wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0);
1113 
1114 		wed_set(dev, MTK_WED_WDMA_GLO_CFG,
1115 			MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
1116 
1117 		wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
1118 			MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
1119 	}
1120 
1121 	/* 3. reset WED WPDMA tx */
1122 	wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
1123 
1124 	for (i = 0; i < 100; i++) {
1125 		val = wed_r32(dev, MTK_WED_TX_BM_INTF);
1126 		if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
1127 			break;
1128 	}
1129 
1130 	mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT);
1131 	wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_BM_EN);
1132 	mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
1133 
1134 	/* 4. reset WED WPDMA tx */
1135 	busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG,
1136 				 MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY);
1137 	wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
1138 		MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
1139 		MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
1140 	if (!busy)
1141 		busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG,
1142 					 MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY);
1143 
1144 	if (busy) {
1145 		mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
1146 		mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
1147 		mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV);
1148 	} else {
1149 		wed_w32(dev, MTK_WED_WPDMA_RESET_IDX,
1150 			MTK_WED_WPDMA_RESET_IDX_TX |
1151 			MTK_WED_WPDMA_RESET_IDX_RX);
1152 		wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0);
1153 	}
1154 
1155 	dev->init_done = false;
1156 	if (dev->hw->version == 1)
1157 		return;
1158 
1159 	if (!busy) {
1160 		wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_WPDMA_IDX_RX);
1161 		wed_w32(dev, MTK_WED_RESET_IDX, 0);
1162 	}
1163 
1164 	mtk_wed_rx_reset(dev);
1165 }
1166 
1167 static int
1168 mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
1169 		   int size, u32 desc_size, bool tx)
1170 {
1171 	ring->desc = dma_alloc_coherent(dev->hw->dev, size * desc_size,
1172 					&ring->desc_phys, GFP_KERNEL);
1173 	if (!ring->desc)
1174 		return -ENOMEM;
1175 
1176 	ring->desc_size = desc_size;
1177 	ring->size = size;
1178 	mtk_wed_ring_reset(ring, size, tx);
1179 
1180 	return 0;
1181 }
1182 
1183 static int
1184 mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size,
1185 			   bool reset)
1186 {
1187 	u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
1188 	struct mtk_wed_ring *wdma;
1189 
1190 	if (idx >= ARRAY_SIZE(dev->rx_wdma))
1191 		return -EINVAL;
1192 
1193 	wdma = &dev->rx_wdma[idx];
1194 	if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
1195 					 desc_size, true))
1196 		return -ENOMEM;
1197 
1198 	wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
1199 		 wdma->desc_phys);
1200 	wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
1201 		 size);
1202 	wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
1203 
1204 	wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
1205 		wdma->desc_phys);
1206 	wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
1207 		size);
1208 
1209 	return 0;
1210 }
1211 
1212 static int
1213 mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size)
1214 {
1215 	u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
1216 	struct mtk_wed_ring *wdma;
1217 
1218 	if (idx >= ARRAY_SIZE(dev->tx_wdma))
1219 		return -EINVAL;
1220 
1221 	wdma = &dev->tx_wdma[idx];
1222 	if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, desc_size,
1223 			       true))
1224 		return -ENOMEM;
1225 
1226 	wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
1227 		 wdma->desc_phys);
1228 	wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
1229 		 size);
1230 	wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
1231 	wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0);
1232 
1233 	if (!idx)  {
1234 		wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_BASE,
1235 			wdma->desc_phys);
1236 		wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_COUNT,
1237 			size);
1238 		wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_CPU_IDX,
1239 			0);
1240 		wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_DMA_IDX,
1241 			0);
1242 	}
1243 
1244 	return 0;
1245 }
1246 
1247 static void
1248 mtk_wed_ppe_check(struct mtk_wed_device *dev, struct sk_buff *skb,
1249 		  u32 reason, u32 hash)
1250 {
1251 	struct mtk_eth *eth = dev->hw->eth;
1252 	struct ethhdr *eh;
1253 
1254 	if (!skb)
1255 		return;
1256 
1257 	if (reason != MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
1258 		return;
1259 
1260 	skb_set_mac_header(skb, 0);
1261 	eh = eth_hdr(skb);
1262 	skb->protocol = eh->h_proto;
1263 	mtk_ppe_check_skb(eth->ppe[dev->hw->index], skb, hash);
1264 }
1265 
1266 static void
1267 mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
1268 {
1269 	u32 wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
1270 
1271 	/* wed control cr set */
1272 	wed_set(dev, MTK_WED_CTRL,
1273 		MTK_WED_CTRL_WDMA_INT_AGENT_EN |
1274 		MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
1275 		MTK_WED_CTRL_WED_TX_BM_EN |
1276 		MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
1277 
1278 	if (dev->hw->version == 1) {
1279 		wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER,
1280 			MTK_WED_PCIE_INT_TRIGGER_STATUS);
1281 
1282 		wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER,
1283 			MTK_WED_WPDMA_INT_TRIGGER_RX_DONE |
1284 			MTK_WED_WPDMA_INT_TRIGGER_TX_DONE);
1285 
1286 		wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
1287 	} else {
1288 		wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE,
1289 					GENMASK(1, 0));
1290 		/* initail tx interrupt trigger */
1291 		wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
1292 			MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
1293 			MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR |
1294 			MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN |
1295 			MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR |
1296 			FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG,
1297 				   dev->wlan.tx_tbit[0]) |
1298 			FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG,
1299 				   dev->wlan.tx_tbit[1]));
1300 
1301 		/* initail txfree interrupt trigger */
1302 		wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE,
1303 			MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN |
1304 			MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR |
1305 			FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG,
1306 				   dev->wlan.txfree_tbit));
1307 
1308 		wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
1309 			MTK_WED_WPDMA_INT_CTRL_RX0_EN |
1310 			MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
1311 			MTK_WED_WPDMA_INT_CTRL_RX1_EN |
1312 			MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
1313 			FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
1314 				   dev->wlan.rx_tbit[0]) |
1315 			FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
1316 				   dev->wlan.rx_tbit[1]));
1317 
1318 		wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask);
1319 		wed_set(dev, MTK_WED_WDMA_INT_CTRL,
1320 			FIELD_PREP(MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL,
1321 				   dev->wdma_idx));
1322 	}
1323 
1324 	wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
1325 
1326 	wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask);
1327 	wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask);
1328 	wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
1329 	wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
1330 }
1331 
1332 static void
1333 mtk_wed_dma_enable(struct mtk_wed_device *dev)
1334 {
1335 	wed_set(dev, MTK_WED_WPDMA_INT_CTRL, MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
1336 
1337 	wed_set(dev, MTK_WED_GLO_CFG,
1338 		MTK_WED_GLO_CFG_TX_DMA_EN |
1339 		MTK_WED_GLO_CFG_RX_DMA_EN);
1340 	wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
1341 		MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
1342 		MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
1343 	wed_set(dev, MTK_WED_WDMA_GLO_CFG,
1344 		MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
1345 
1346 	wdma_set(dev, MTK_WDMA_GLO_CFG,
1347 		 MTK_WDMA_GLO_CFG_TX_DMA_EN |
1348 		 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
1349 		 MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
1350 
1351 	if (dev->hw->version == 1) {
1352 		wdma_set(dev, MTK_WDMA_GLO_CFG,
1353 			 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
1354 	} else {
1355 		int i;
1356 
1357 		wed_set(dev, MTK_WED_WPDMA_CTRL,
1358 			MTK_WED_WPDMA_CTRL_SDL1_FIXED);
1359 
1360 		wed_set(dev, MTK_WED_WDMA_GLO_CFG,
1361 			MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
1362 			MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
1363 
1364 		wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
1365 			MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
1366 			MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
1367 
1368 		wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
1369 			MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
1370 			MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
1371 
1372 		wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
1373 			MTK_WED_WPDMA_RX_D_RX_DRV_EN |
1374 			FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) |
1375 			FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL,
1376 				   0x2));
1377 
1378 		for (i = 0; i < MTK_WED_RX_QUEUES; i++)
1379 			mtk_wed_check_wfdma_rx_fill(dev, i);
1380 	}
1381 }
1382 
1383 static void
1384 mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
1385 {
1386 	int i;
1387 
1388 	if (mtk_wed_get_rx_capa(dev) && mtk_wed_rx_buffer_alloc(dev))
1389 		return;
1390 
1391 	for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++)
1392 		if (!dev->rx_wdma[i].desc)
1393 			mtk_wed_wdma_rx_ring_setup(dev, i, 16, false);
1394 
1395 	mtk_wed_hw_init(dev);
1396 	mtk_wed_configure_irq(dev, irq_mask);
1397 
1398 	mtk_wed_set_ext_int(dev, true);
1399 
1400 	if (dev->hw->version == 1) {
1401 		u32 val = dev->wlan.wpdma_phys | MTK_PCIE_MIRROR_MAP_EN |
1402 			  FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID,
1403 				     dev->hw->index);
1404 
1405 		val |= BIT(0) | (BIT(1) * !!dev->hw->index);
1406 		regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
1407 	} else {
1408 		/* driver set mid ready and only once */
1409 		wed_w32(dev, MTK_WED_EXT_INT_MASK1,
1410 			MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
1411 		wed_w32(dev, MTK_WED_EXT_INT_MASK2,
1412 			MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
1413 
1414 		wed_r32(dev, MTK_WED_EXT_INT_MASK1);
1415 		wed_r32(dev, MTK_WED_EXT_INT_MASK2);
1416 
1417 		if (mtk_wed_rro_cfg(dev))
1418 			return;
1419 
1420 	}
1421 
1422 	mtk_wed_set_512_support(dev, dev->wlan.wcid_512);
1423 
1424 	mtk_wed_dma_enable(dev);
1425 	dev->running = true;
1426 }
1427 
1428 static int
1429 mtk_wed_attach(struct mtk_wed_device *dev)
1430 	__releases(RCU)
1431 {
1432 	struct mtk_wed_hw *hw;
1433 	struct device *device;
1434 	int ret = 0;
1435 
1436 	RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
1437 			 "mtk_wed_attach without holding the RCU read lock");
1438 
1439 	if ((dev->wlan.bus_type == MTK_WED_BUS_PCIE &&
1440 	     pci_domain_nr(dev->wlan.pci_dev->bus) > 1) ||
1441 	    !try_module_get(THIS_MODULE))
1442 		ret = -ENODEV;
1443 
1444 	rcu_read_unlock();
1445 
1446 	if (ret)
1447 		return ret;
1448 
1449 	mutex_lock(&hw_lock);
1450 
1451 	hw = mtk_wed_assign(dev);
1452 	if (!hw) {
1453 		module_put(THIS_MODULE);
1454 		ret = -ENODEV;
1455 		goto unlock;
1456 	}
1457 
1458 	device = dev->wlan.bus_type == MTK_WED_BUS_PCIE
1459 		? &dev->wlan.pci_dev->dev
1460 		: &dev->wlan.platform_dev->dev;
1461 	dev_info(device, "attaching wed device %d version %d\n",
1462 		 hw->index, hw->version);
1463 
1464 	dev->hw = hw;
1465 	dev->dev = hw->dev;
1466 	dev->irq = hw->irq;
1467 	dev->wdma_idx = hw->index;
1468 	dev->version = hw->version;
1469 
1470 	if (hw->eth->dma_dev == hw->eth->dev &&
1471 	    of_dma_is_coherent(hw->eth->dev->of_node))
1472 		mtk_eth_set_dma_device(hw->eth, hw->dev);
1473 
1474 	ret = mtk_wed_tx_buffer_alloc(dev);
1475 	if (ret)
1476 		goto out;
1477 
1478 	if (mtk_wed_get_rx_capa(dev)) {
1479 		ret = mtk_wed_rro_alloc(dev);
1480 		if (ret)
1481 			goto out;
1482 	}
1483 
1484 	mtk_wed_hw_init_early(dev);
1485 	if (hw->version == 1) {
1486 		regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
1487 				   BIT(hw->index), 0);
1488 	} else {
1489 		dev->rev_id = wed_r32(dev, MTK_WED_REV_ID);
1490 		ret = mtk_wed_wo_init(hw);
1491 	}
1492 out:
1493 	if (ret)
1494 		mtk_wed_detach(dev);
1495 unlock:
1496 	mutex_unlock(&hw_lock);
1497 
1498 	return ret;
1499 }
1500 
1501 static int
1502 mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs,
1503 		      bool reset)
1504 {
1505 	struct mtk_wed_ring *ring = &dev->tx_ring[idx];
1506 
1507 	/*
1508 	 * Tx ring redirection:
1509 	 * Instead of configuring the WLAN PDMA TX ring directly, the WLAN
1510 	 * driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n)
1511 	 * registers.
1512 	 *
1513 	 * WED driver posts its own DMA ring as WLAN PDMA TX and configures it
1514 	 * into MTK_WED_WPDMA_RING_TX(n) registers.
1515 	 * It gets filled with packets picked up from WED TX ring and from
1516 	 * WDMA RX.
1517 	 */
1518 
1519 	if (WARN_ON(idx >= ARRAY_SIZE(dev->tx_ring)))
1520 		return -EINVAL;
1521 
1522 	if (!reset && mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE,
1523 					 sizeof(*ring->desc), true))
1524 		return -ENOMEM;
1525 
1526 	if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE,
1527 				       reset))
1528 		return -ENOMEM;
1529 
1530 	ring->reg_base = MTK_WED_RING_TX(idx);
1531 	ring->wpdma = regs;
1532 
1533 	/* WED -> WPDMA */
1534 	wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
1535 	wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE);
1536 	wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_CPU_IDX, 0);
1537 
1538 	wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
1539 		ring->desc_phys);
1540 	wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
1541 		MTK_WED_TX_RING_SIZE);
1542 	wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
1543 
1544 	return 0;
1545 }
1546 
1547 static int
1548 mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
1549 {
1550 	struct mtk_wed_ring *ring = &dev->txfree_ring;
1551 	int i, index = dev->hw->version == 1;
1552 
1553 	/*
1554 	 * For txfree event handling, the same DMA ring is shared between WED
1555 	 * and WLAN. The WLAN driver accesses the ring index registers through
1556 	 * WED
1557 	 */
1558 	ring->reg_base = MTK_WED_RING_RX(index);
1559 	ring->wpdma = regs;
1560 
1561 	for (i = 0; i < 12; i += 4) {
1562 		u32 val = readl(regs + i);
1563 
1564 		wed_w32(dev, MTK_WED_RING_RX(index) + i, val);
1565 		wed_w32(dev, MTK_WED_WPDMA_RING_RX(index) + i, val);
1566 	}
1567 
1568 	return 0;
1569 }
1570 
1571 static int
1572 mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
1573 {
1574 	struct mtk_wed_ring *ring = &dev->rx_ring[idx];
1575 
1576 	if (WARN_ON(idx >= ARRAY_SIZE(dev->rx_ring)))
1577 		return -EINVAL;
1578 
1579 	if (mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE,
1580 			       sizeof(*ring->desc), false))
1581 		return -ENOMEM;
1582 
1583 	if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
1584 		return -ENOMEM;
1585 
1586 	ring->reg_base = MTK_WED_RING_RX_DATA(idx);
1587 	ring->wpdma = regs;
1588 	ring->flags |= MTK_WED_RING_CONFIGURED;
1589 
1590 	/* WPDMA ->  WED */
1591 	wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
1592 	wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_RX_RING_SIZE);
1593 
1594 	wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_BASE,
1595 		ring->desc_phys);
1596 	wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_COUNT,
1597 		MTK_WED_RX_RING_SIZE);
1598 
1599 	return 0;
1600 }
1601 
1602 static u32
1603 mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
1604 {
1605 	u32 val, ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
1606 
1607 	if (dev->hw->version == 1)
1608 		ext_mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
1609 	else
1610 		ext_mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
1611 			    MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH |
1612 			    MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
1613 			    MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
1614 
1615 	val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
1616 	wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
1617 	val &= ext_mask;
1618 	if (!dev->hw->num_flows)
1619 		val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
1620 	if (val && net_ratelimit())
1621 		pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val);
1622 
1623 	val = wed_r32(dev, MTK_WED_INT_STATUS);
1624 	val &= mask;
1625 	wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */
1626 
1627 	return val;
1628 }
1629 
1630 static void
1631 mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask)
1632 {
1633 	if (!dev->running)
1634 		return;
1635 
1636 	mtk_wed_set_ext_int(dev, !!mask);
1637 	wed_w32(dev, MTK_WED_INT_MASK, mask);
1638 }
1639 
1640 int mtk_wed_flow_add(int index)
1641 {
1642 	struct mtk_wed_hw *hw = hw_list[index];
1643 	int ret;
1644 
1645 	if (!hw || !hw->wed_dev)
1646 		return -ENODEV;
1647 
1648 	if (hw->num_flows) {
1649 		hw->num_flows++;
1650 		return 0;
1651 	}
1652 
1653 	mutex_lock(&hw_lock);
1654 	if (!hw->wed_dev) {
1655 		ret = -ENODEV;
1656 		goto out;
1657 	}
1658 
1659 	ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev);
1660 	if (!ret)
1661 		hw->num_flows++;
1662 	mtk_wed_set_ext_int(hw->wed_dev, true);
1663 
1664 out:
1665 	mutex_unlock(&hw_lock);
1666 
1667 	return ret;
1668 }
1669 
1670 void mtk_wed_flow_remove(int index)
1671 {
1672 	struct mtk_wed_hw *hw = hw_list[index];
1673 
1674 	if (!hw)
1675 		return;
1676 
1677 	if (--hw->num_flows)
1678 		return;
1679 
1680 	mutex_lock(&hw_lock);
1681 	if (!hw->wed_dev)
1682 		goto out;
1683 
1684 	hw->wed_dev->wlan.offload_disable(hw->wed_dev);
1685 	mtk_wed_set_ext_int(hw->wed_dev, true);
1686 
1687 out:
1688 	mutex_unlock(&hw_lock);
1689 }
1690 
1691 void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
1692 		    void __iomem *wdma, phys_addr_t wdma_phy,
1693 		    int index)
1694 {
1695 	static const struct mtk_wed_ops wed_ops = {
1696 		.attach = mtk_wed_attach,
1697 		.tx_ring_setup = mtk_wed_tx_ring_setup,
1698 		.rx_ring_setup = mtk_wed_rx_ring_setup,
1699 		.txfree_ring_setup = mtk_wed_txfree_ring_setup,
1700 		.msg_update = mtk_wed_mcu_msg_update,
1701 		.start = mtk_wed_start,
1702 		.stop = mtk_wed_stop,
1703 		.reset_dma = mtk_wed_reset_dma,
1704 		.reg_read = wed_r32,
1705 		.reg_write = wed_w32,
1706 		.irq_get = mtk_wed_irq_get,
1707 		.irq_set_mask = mtk_wed_irq_set_mask,
1708 		.detach = mtk_wed_detach,
1709 		.ppe_check = mtk_wed_ppe_check,
1710 	};
1711 	struct device_node *eth_np = eth->dev->of_node;
1712 	struct platform_device *pdev;
1713 	struct mtk_wed_hw *hw;
1714 	struct regmap *regs;
1715 	int irq;
1716 
1717 	if (!np)
1718 		return;
1719 
1720 	pdev = of_find_device_by_node(np);
1721 	if (!pdev)
1722 		goto err_of_node_put;
1723 
1724 	get_device(&pdev->dev);
1725 	irq = platform_get_irq(pdev, 0);
1726 	if (irq < 0)
1727 		goto err_put_device;
1728 
1729 	regs = syscon_regmap_lookup_by_phandle(np, NULL);
1730 	if (IS_ERR(regs))
1731 		goto err_put_device;
1732 
1733 	rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops);
1734 
1735 	mutex_lock(&hw_lock);
1736 
1737 	if (WARN_ON(hw_list[index]))
1738 		goto unlock;
1739 
1740 	hw = kzalloc(sizeof(*hw), GFP_KERNEL);
1741 	if (!hw)
1742 		goto unlock;
1743 
1744 	hw->node = np;
1745 	hw->regs = regs;
1746 	hw->eth = eth;
1747 	hw->dev = &pdev->dev;
1748 	hw->wdma_phy = wdma_phy;
1749 	hw->wdma = wdma;
1750 	hw->index = index;
1751 	hw->irq = irq;
1752 	hw->version = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
1753 
1754 	if (hw->version == 1) {
1755 		hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
1756 				"mediatek,pcie-mirror");
1757 		hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
1758 				"mediatek,hifsys");
1759 		if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) {
1760 			kfree(hw);
1761 			goto unlock;
1762 		}
1763 
1764 		if (!index) {
1765 			regmap_write(hw->mirror, 0, 0);
1766 			regmap_write(hw->mirror, 4, 0);
1767 		}
1768 	}
1769 
1770 	mtk_wed_hw_add_debugfs(hw);
1771 
1772 	hw_list[index] = hw;
1773 
1774 	mutex_unlock(&hw_lock);
1775 
1776 	return;
1777 
1778 unlock:
1779 	mutex_unlock(&hw_lock);
1780 err_put_device:
1781 	put_device(&pdev->dev);
1782 err_of_node_put:
1783 	of_node_put(np);
1784 }
1785 
1786 void mtk_wed_exit(void)
1787 {
1788 	int i;
1789 
1790 	rcu_assign_pointer(mtk_soc_wed_ops, NULL);
1791 
1792 	synchronize_rcu();
1793 
1794 	for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
1795 		struct mtk_wed_hw *hw;
1796 
1797 		hw = hw_list[i];
1798 		if (!hw)
1799 			continue;
1800 
1801 		hw_list[i] = NULL;
1802 		debugfs_remove(hw->debugfs_dir);
1803 		put_device(hw->dev);
1804 		of_node_put(hw->node);
1805 		kfree(hw);
1806 	}
1807 }
1808