1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
3 
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/module.h>
7 #include <linux/bitfield.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/skbuff.h>
10 #include <linux/of_platform.h>
11 #include <linux/of_address.h>
12 #include <linux/mfd/syscon.h>
13 #include <linux/debugfs.h>
14 #include <linux/soc/mediatek/mtk_wed.h>
15 #include "mtk_eth_soc.h"
16 #include "mtk_wed_regs.h"
17 #include "mtk_wed.h"
18 #include "mtk_ppe.h"
19 
20 #define MTK_PCIE_BASE(n)		(0x1a143000 + (n) * 0x2000)
21 
22 #define MTK_WED_PKT_SIZE		1900
23 #define MTK_WED_BUF_SIZE		2048
24 #define MTK_WED_BUF_PER_PAGE		(PAGE_SIZE / 2048)
25 
26 #define MTK_WED_TX_RING_SIZE		2048
27 #define MTK_WED_WDMA_RING_SIZE		1024
28 
29 static struct mtk_wed_hw *hw_list[2];
30 static DEFINE_MUTEX(hw_lock);
31 
32 static void
33 wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
34 {
35 	regmap_update_bits(dev->hw->regs, reg, mask | val, val);
36 }
37 
38 static void
39 wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
40 {
41 	return wed_m32(dev, reg, 0, mask);
42 }
43 
44 static void
45 wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask)
46 {
47 	return wed_m32(dev, reg, mask, 0);
48 }
49 
50 static void
51 wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
52 {
53 	wdma_w32(dev, reg, (wdma_r32(dev, reg) & ~mask) | val);
54 }
55 
56 static void
57 wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
58 {
59 	wdma_m32(dev, reg, 0, mask);
60 }
61 
62 static u32
63 mtk_wed_read_reset(struct mtk_wed_device *dev)
64 {
65 	return wed_r32(dev, MTK_WED_RESET);
66 }
67 
68 static void
69 mtk_wed_reset(struct mtk_wed_device *dev, u32 mask)
70 {
71 	u32 status;
72 
73 	wed_w32(dev, MTK_WED_RESET, mask);
74 	if (readx_poll_timeout(mtk_wed_read_reset, dev, status,
75 			       !(status & mask), 0, 1000))
76 		WARN_ON_ONCE(1);
77 }
78 
79 static struct mtk_wed_hw *
80 mtk_wed_assign(struct mtk_wed_device *dev)
81 {
82 	struct mtk_wed_hw *hw;
83 
84 	hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)];
85 	if (!hw || hw->wed_dev)
86 		return NULL;
87 
88 	hw->wed_dev = dev;
89 	return hw;
90 }
91 
92 static int
93 mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
94 {
95 	struct mtk_wdma_desc *desc;
96 	dma_addr_t desc_phys;
97 	void **page_list;
98 	int token = dev->wlan.token_start;
99 	int ring_size;
100 	int n_pages;
101 	int i, page_idx;
102 
103 	ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
104 	n_pages = ring_size / MTK_WED_BUF_PER_PAGE;
105 
106 	page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
107 	if (!page_list)
108 		return -ENOMEM;
109 
110 	dev->buf_ring.size = ring_size;
111 	dev->buf_ring.pages = page_list;
112 
113 	desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
114 				  &desc_phys, GFP_KERNEL);
115 	if (!desc)
116 		return -ENOMEM;
117 
118 	dev->buf_ring.desc = desc;
119 	dev->buf_ring.desc_phys = desc_phys;
120 
121 	for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
122 		dma_addr_t page_phys, buf_phys;
123 		struct page *page;
124 		void *buf;
125 		int s;
126 
127 		page = __dev_alloc_pages(GFP_KERNEL, 0);
128 		if (!page)
129 			return -ENOMEM;
130 
131 		page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
132 					 DMA_BIDIRECTIONAL);
133 		if (dma_mapping_error(dev->hw->dev, page_phys)) {
134 			__free_page(page);
135 			return -ENOMEM;
136 		}
137 
138 		page_list[page_idx++] = page;
139 		dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
140 					DMA_BIDIRECTIONAL);
141 
142 		buf = page_to_virt(page);
143 		buf_phys = page_phys;
144 
145 		for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) {
146 			u32 txd_size;
147 			u32 ctrl;
148 
149 			txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
150 
151 			desc->buf0 = cpu_to_le32(buf_phys);
152 			desc->buf1 = cpu_to_le32(buf_phys + txd_size);
153 			ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
154 			       FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
155 					  MTK_WED_BUF_SIZE - txd_size) |
156 			       MTK_WDMA_DESC_CTRL_LAST_SEG1;
157 			desc->ctrl = cpu_to_le32(ctrl);
158 			desc->info = 0;
159 			desc++;
160 
161 			buf += MTK_WED_BUF_SIZE;
162 			buf_phys += MTK_WED_BUF_SIZE;
163 		}
164 
165 		dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
166 					   DMA_BIDIRECTIONAL);
167 	}
168 
169 	return 0;
170 }
171 
172 static void
173 mtk_wed_free_buffer(struct mtk_wed_device *dev)
174 {
175 	struct mtk_wdma_desc *desc = dev->buf_ring.desc;
176 	void **page_list = dev->buf_ring.pages;
177 	int page_idx;
178 	int i;
179 
180 	if (!page_list)
181 		return;
182 
183 	if (!desc)
184 		goto free_pagelist;
185 
186 	for (i = 0, page_idx = 0; i < dev->buf_ring.size; i += MTK_WED_BUF_PER_PAGE) {
187 		void *page = page_list[page_idx++];
188 		dma_addr_t buf_addr;
189 
190 		if (!page)
191 			break;
192 
193 		buf_addr = le32_to_cpu(desc[i].buf0);
194 		dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE,
195 			       DMA_BIDIRECTIONAL);
196 		__free_page(page);
197 	}
198 
199 	dma_free_coherent(dev->hw->dev, dev->buf_ring.size * sizeof(*desc),
200 			  desc, dev->buf_ring.desc_phys);
201 
202 free_pagelist:
203 	kfree(page_list);
204 }
205 
206 static void
207 mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring)
208 {
209 	if (!ring->desc)
210 		return;
211 
212 	dma_free_coherent(dev->hw->dev, ring->size * sizeof(*ring->desc),
213 			  ring->desc, ring->desc_phys);
214 }
215 
216 static void
217 mtk_wed_free_tx_rings(struct mtk_wed_device *dev)
218 {
219 	int i;
220 
221 	for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++)
222 		mtk_wed_free_ring(dev, &dev->tx_ring[i]);
223 	for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
224 		mtk_wed_free_ring(dev, &dev->tx_wdma[i]);
225 }
226 
227 static void
228 mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
229 {
230 	u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
231 
232 	if (!dev->hw->num_flows)
233 		mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
234 
235 	wed_w32(dev, MTK_WED_EXT_INT_MASK, en ? mask : 0);
236 	wed_r32(dev, MTK_WED_EXT_INT_MASK);
237 }
238 
239 static void
240 mtk_wed_stop(struct mtk_wed_device *dev)
241 {
242 	regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
243 	mtk_wed_set_ext_int(dev, false);
244 
245 	wed_clr(dev, MTK_WED_CTRL,
246 		MTK_WED_CTRL_WDMA_INT_AGENT_EN |
247 		MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
248 		MTK_WED_CTRL_WED_TX_BM_EN |
249 		MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
250 	wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
251 	wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
252 	wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
253 	wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
254 	wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
255 
256 	wed_clr(dev, MTK_WED_GLO_CFG,
257 		MTK_WED_GLO_CFG_TX_DMA_EN |
258 		MTK_WED_GLO_CFG_RX_DMA_EN);
259 	wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
260 		MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
261 		MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
262 	wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
263 		MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
264 }
265 
266 static void
267 mtk_wed_detach(struct mtk_wed_device *dev)
268 {
269 	struct device_node *wlan_node = dev->wlan.pci_dev->dev.of_node;
270 	struct mtk_wed_hw *hw = dev->hw;
271 
272 	mutex_lock(&hw_lock);
273 
274 	mtk_wed_stop(dev);
275 
276 	wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
277 	wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
278 
279 	mtk_wed_reset(dev, MTK_WED_RESET_WED);
280 
281 	mtk_wed_free_buffer(dev);
282 	mtk_wed_free_tx_rings(dev);
283 
284 	if (of_dma_is_coherent(wlan_node))
285 		regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
286 				   BIT(hw->index), BIT(hw->index));
287 
288 	if (!hw_list[!hw->index]->wed_dev &&
289 	    hw->eth->dma_dev != hw->eth->dev)
290 		mtk_eth_set_dma_device(hw->eth, hw->eth->dev);
291 
292 	memset(dev, 0, sizeof(*dev));
293 	module_put(THIS_MODULE);
294 
295 	hw->wed_dev = NULL;
296 	mutex_unlock(&hw_lock);
297 }
298 
299 static void
300 mtk_wed_hw_init_early(struct mtk_wed_device *dev)
301 {
302 	u32 mask, set;
303 	u32 offset;
304 
305 	mtk_wed_stop(dev);
306 	mtk_wed_reset(dev, MTK_WED_RESET_WED);
307 
308 	mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
309 	       MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
310 	       MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
311 	set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
312 	      MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
313 	      MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
314 	wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
315 
316 	wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_INFO_PRERES);
317 
318 	offset = dev->hw->index ? 0x04000400 : 0;
319 	wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
320 	wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
321 
322 	wed_w32(dev, MTK_WED_PCIE_CFG_BASE, MTK_PCIE_BASE(dev->hw->index));
323 	wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
324 }
325 
326 static void
327 mtk_wed_hw_init(struct mtk_wed_device *dev)
328 {
329 	if (dev->init_done)
330 		return;
331 
332 	dev->init_done = true;
333 	mtk_wed_set_ext_int(dev, false);
334 	wed_w32(dev, MTK_WED_TX_BM_CTRL,
335 		MTK_WED_TX_BM_CTRL_PAUSE |
336 		FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
337 			   dev->buf_ring.size / 128) |
338 		FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
339 			   MTK_WED_TX_RING_SIZE / 256));
340 
341 	wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys);
342 
343 	wed_w32(dev, MTK_WED_TX_BM_TKID,
344 		FIELD_PREP(MTK_WED_TX_BM_TKID_START,
345 			   dev->wlan.token_start) |
346 		FIELD_PREP(MTK_WED_TX_BM_TKID_END,
347 			   dev->wlan.token_start + dev->wlan.nbuf - 1));
348 
349 	wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
350 
351 	wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
352 		FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
353 		MTK_WED_TX_BM_DYN_THR_HI);
354 
355 	mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
356 
357 	wed_set(dev, MTK_WED_CTRL,
358 		MTK_WED_CTRL_WED_TX_BM_EN |
359 		MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
360 
361 	wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
362 }
363 
364 static void
365 mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size)
366 {
367 	int i;
368 
369 	for (i = 0; i < size; i++) {
370 		desc[i].buf0 = 0;
371 		desc[i].ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
372 		desc[i].buf1 = 0;
373 		desc[i].info = 0;
374 	}
375 }
376 
377 static u32
378 mtk_wed_check_busy(struct mtk_wed_device *dev)
379 {
380 	if (wed_r32(dev, MTK_WED_GLO_CFG) & MTK_WED_GLO_CFG_TX_DMA_BUSY)
381 		return true;
382 
383 	if (wed_r32(dev, MTK_WED_WPDMA_GLO_CFG) &
384 	    MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY)
385 		return true;
386 
387 	if (wed_r32(dev, MTK_WED_CTRL) & MTK_WED_CTRL_WDMA_INT_AGENT_BUSY)
388 		return true;
389 
390 	if (wed_r32(dev, MTK_WED_WDMA_GLO_CFG) &
391 	    MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
392 		return true;
393 
394 	if (wdma_r32(dev, MTK_WDMA_GLO_CFG) &
395 	    MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
396 		return true;
397 
398 	if (wed_r32(dev, MTK_WED_CTRL) &
399 	    (MTK_WED_CTRL_WED_TX_BM_BUSY | MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY))
400 		return true;
401 
402 	return false;
403 }
404 
405 static int
406 mtk_wed_poll_busy(struct mtk_wed_device *dev)
407 {
408 	int sleep = 15000;
409 	int timeout = 100 * sleep;
410 	u32 val;
411 
412 	return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
413 				 timeout, false, dev);
414 }
415 
416 static void
417 mtk_wed_reset_dma(struct mtk_wed_device *dev)
418 {
419 	bool busy = false;
420 	u32 val;
421 	int i;
422 
423 	for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) {
424 		struct mtk_wdma_desc *desc = dev->tx_ring[i].desc;
425 
426 		if (!desc)
427 			continue;
428 
429 		mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE);
430 	}
431 
432 	if (mtk_wed_poll_busy(dev))
433 		busy = mtk_wed_check_busy(dev);
434 
435 	if (busy) {
436 		mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA);
437 	} else {
438 		wed_w32(dev, MTK_WED_RESET_IDX,
439 			MTK_WED_RESET_IDX_TX |
440 			MTK_WED_RESET_IDX_RX);
441 		wed_w32(dev, MTK_WED_RESET_IDX, 0);
442 	}
443 
444 	wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
445 	wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
446 
447 	if (busy) {
448 		mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
449 		mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV);
450 	} else {
451 		wed_w32(dev, MTK_WED_WDMA_RESET_IDX,
452 			MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV);
453 		wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0);
454 
455 		wed_set(dev, MTK_WED_WDMA_GLO_CFG,
456 			MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
457 
458 		wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
459 			MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
460 	}
461 
462 	for (i = 0; i < 100; i++) {
463 		val = wed_r32(dev, MTK_WED_TX_BM_INTF);
464 		if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
465 			break;
466 	}
467 
468 	mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT);
469 	mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
470 
471 	if (busy) {
472 		mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
473 		mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
474 		mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV);
475 	} else {
476 		wed_w32(dev, MTK_WED_WPDMA_RESET_IDX,
477 			MTK_WED_WPDMA_RESET_IDX_TX |
478 			MTK_WED_WPDMA_RESET_IDX_RX);
479 		wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0);
480 	}
481 
482 }
483 
484 static int
485 mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
486 		   int size)
487 {
488 	ring->desc = dma_alloc_coherent(dev->hw->dev,
489 					size * sizeof(*ring->desc),
490 					&ring->desc_phys, GFP_KERNEL);
491 	if (!ring->desc)
492 		return -ENOMEM;
493 
494 	ring->size = size;
495 	mtk_wed_ring_reset(ring->desc, size);
496 
497 	return 0;
498 }
499 
500 static int
501 mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
502 {
503 	struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
504 
505 	if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE))
506 		return -ENOMEM;
507 
508 	wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
509 		 wdma->desc_phys);
510 	wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
511 		 size);
512 	wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
513 
514 	wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
515 		wdma->desc_phys);
516 	wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
517 		size);
518 
519 	return 0;
520 }
521 
522 static void
523 mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
524 {
525 	u32 wdma_mask;
526 	u32 val;
527 	int i;
528 
529 	for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
530 		if (!dev->tx_wdma[i].desc)
531 			mtk_wed_wdma_ring_setup(dev, i, 16);
532 
533 	wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
534 
535 	mtk_wed_hw_init(dev);
536 
537 	wed_set(dev, MTK_WED_CTRL,
538 		MTK_WED_CTRL_WDMA_INT_AGENT_EN |
539 		MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
540 		MTK_WED_CTRL_WED_TX_BM_EN |
541 		MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
542 
543 	wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, MTK_WED_PCIE_INT_TRIGGER_STATUS);
544 
545 	wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER,
546 		MTK_WED_WPDMA_INT_TRIGGER_RX_DONE |
547 		MTK_WED_WPDMA_INT_TRIGGER_TX_DONE);
548 
549 	wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
550 		MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
551 
552 	wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
553 	wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
554 
555 	wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask);
556 	wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask);
557 
558 	wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
559 	wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
560 
561 	wed_set(dev, MTK_WED_GLO_CFG,
562 		MTK_WED_GLO_CFG_TX_DMA_EN |
563 		MTK_WED_GLO_CFG_RX_DMA_EN);
564 	wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
565 		MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
566 		MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
567 	wed_set(dev, MTK_WED_WDMA_GLO_CFG,
568 		MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
569 
570 	mtk_wed_set_ext_int(dev, true);
571 	val = dev->wlan.wpdma_phys |
572 	      MTK_PCIE_MIRROR_MAP_EN |
573 	      FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, dev->hw->index);
574 
575 	if (dev->hw->index)
576 		val |= BIT(1);
577 	val |= BIT(0);
578 	regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
579 
580 	dev->running = true;
581 }
582 
583 static int
584 mtk_wed_attach(struct mtk_wed_device *dev)
585 	__releases(RCU)
586 {
587 	struct mtk_wed_hw *hw;
588 	int ret = 0;
589 
590 	RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
591 			 "mtk_wed_attach without holding the RCU read lock");
592 
593 	if (pci_domain_nr(dev->wlan.pci_dev->bus) > 1 ||
594 	    !try_module_get(THIS_MODULE))
595 		ret = -ENODEV;
596 
597 	rcu_read_unlock();
598 
599 	if (ret)
600 		return ret;
601 
602 	mutex_lock(&hw_lock);
603 
604 	hw = mtk_wed_assign(dev);
605 	if (!hw) {
606 		module_put(THIS_MODULE);
607 		ret = -ENODEV;
608 		goto out;
609 	}
610 
611 	dev_info(&dev->wlan.pci_dev->dev, "attaching wed device %d\n", hw->index);
612 
613 	dev->hw = hw;
614 	dev->dev = hw->dev;
615 	dev->irq = hw->irq;
616 	dev->wdma_idx = hw->index;
617 
618 	if (hw->eth->dma_dev == hw->eth->dev &&
619 	    of_dma_is_coherent(hw->eth->dev->of_node))
620 		mtk_eth_set_dma_device(hw->eth, hw->dev);
621 
622 	ret = mtk_wed_buffer_alloc(dev);
623 	if (ret) {
624 		mtk_wed_detach(dev);
625 		goto out;
626 	}
627 
628 	mtk_wed_hw_init_early(dev);
629 	regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, BIT(hw->index), 0);
630 
631 out:
632 	mutex_unlock(&hw_lock);
633 
634 	return ret;
635 }
636 
637 static int
638 mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
639 {
640 	struct mtk_wed_ring *ring = &dev->tx_ring[idx];
641 
642 	/*
643 	 * Tx ring redirection:
644 	 * Instead of configuring the WLAN PDMA TX ring directly, the WLAN
645 	 * driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n)
646 	 * registers.
647 	 *
648 	 * WED driver posts its own DMA ring as WLAN PDMA TX and configures it
649 	 * into MTK_WED_WPDMA_RING_TX(n) registers.
650 	 * It gets filled with packets picked up from WED TX ring and from
651 	 * WDMA RX.
652 	 */
653 
654 	BUG_ON(idx > ARRAY_SIZE(dev->tx_ring));
655 
656 	if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE))
657 		return -ENOMEM;
658 
659 	if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
660 		return -ENOMEM;
661 
662 	ring->reg_base = MTK_WED_RING_TX(idx);
663 	ring->wpdma = regs;
664 
665 	/* WED -> WPDMA */
666 	wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
667 	wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE);
668 	wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_CPU_IDX, 0);
669 
670 	wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
671 		ring->desc_phys);
672 	wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
673 		MTK_WED_TX_RING_SIZE);
674 	wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
675 
676 	return 0;
677 }
678 
679 static int
680 mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
681 {
682 	struct mtk_wed_ring *ring = &dev->txfree_ring;
683 	int i;
684 
685 	/*
686 	 * For txfree event handling, the same DMA ring is shared between WED
687 	 * and WLAN. The WLAN driver accesses the ring index registers through
688 	 * WED
689 	 */
690 	ring->reg_base = MTK_WED_RING_RX(1);
691 	ring->wpdma = regs;
692 
693 	for (i = 0; i < 12; i += 4) {
694 		u32 val = readl(regs + i);
695 
696 		wed_w32(dev, MTK_WED_RING_RX(1) + i, val);
697 		wed_w32(dev, MTK_WED_WPDMA_RING_RX(1) + i, val);
698 	}
699 
700 	return 0;
701 }
702 
703 static u32
704 mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
705 {
706 	u32 val;
707 
708 	val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
709 	wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
710 	val &= MTK_WED_EXT_INT_STATUS_ERROR_MASK;
711 	if (!dev->hw->num_flows)
712 		val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
713 	if (val && net_ratelimit())
714 		pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val);
715 
716 	val = wed_r32(dev, MTK_WED_INT_STATUS);
717 	val &= mask;
718 	wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */
719 
720 	return val;
721 }
722 
723 static void
724 mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask)
725 {
726 	if (!dev->running)
727 		return;
728 
729 	mtk_wed_set_ext_int(dev, !!mask);
730 	wed_w32(dev, MTK_WED_INT_MASK, mask);
731 }
732 
733 int mtk_wed_flow_add(int index)
734 {
735 	struct mtk_wed_hw *hw = hw_list[index];
736 	int ret;
737 
738 	if (!hw || !hw->wed_dev)
739 		return -ENODEV;
740 
741 	if (hw->num_flows) {
742 		hw->num_flows++;
743 		return 0;
744 	}
745 
746 	mutex_lock(&hw_lock);
747 	if (!hw->wed_dev) {
748 		ret = -ENODEV;
749 		goto out;
750 	}
751 
752 	ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev);
753 	if (!ret)
754 		hw->num_flows++;
755 	mtk_wed_set_ext_int(hw->wed_dev, true);
756 
757 out:
758 	mutex_unlock(&hw_lock);
759 
760 	return ret;
761 }
762 
763 void mtk_wed_flow_remove(int index)
764 {
765 	struct mtk_wed_hw *hw = hw_list[index];
766 
767 	if (!hw)
768 		return;
769 
770 	if (--hw->num_flows)
771 		return;
772 
773 	mutex_lock(&hw_lock);
774 	if (!hw->wed_dev)
775 		goto out;
776 
777 	hw->wed_dev->wlan.offload_disable(hw->wed_dev);
778 	mtk_wed_set_ext_int(hw->wed_dev, true);
779 
780 out:
781 	mutex_unlock(&hw_lock);
782 }
783 
784 void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
785 		    void __iomem *wdma, int index)
786 {
787 	static const struct mtk_wed_ops wed_ops = {
788 		.attach = mtk_wed_attach,
789 		.tx_ring_setup = mtk_wed_tx_ring_setup,
790 		.txfree_ring_setup = mtk_wed_txfree_ring_setup,
791 		.start = mtk_wed_start,
792 		.stop = mtk_wed_stop,
793 		.reset_dma = mtk_wed_reset_dma,
794 		.reg_read = wed_r32,
795 		.reg_write = wed_w32,
796 		.irq_get = mtk_wed_irq_get,
797 		.irq_set_mask = mtk_wed_irq_set_mask,
798 		.detach = mtk_wed_detach,
799 	};
800 	struct device_node *eth_np = eth->dev->of_node;
801 	struct platform_device *pdev;
802 	struct mtk_wed_hw *hw;
803 	struct regmap *regs;
804 	int irq;
805 
806 	if (!np)
807 		return;
808 
809 	pdev = of_find_device_by_node(np);
810 	if (!pdev)
811 		return;
812 
813 	get_device(&pdev->dev);
814 	irq = platform_get_irq(pdev, 0);
815 	if (irq < 0)
816 		return;
817 
818 	regs = syscon_regmap_lookup_by_phandle(np, NULL);
819 	if (IS_ERR(regs))
820 		return;
821 
822 	rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops);
823 
824 	mutex_lock(&hw_lock);
825 
826 	if (WARN_ON(hw_list[index]))
827 		goto unlock;
828 
829 	hw = kzalloc(sizeof(*hw), GFP_KERNEL);
830 	if (!hw)
831 		goto unlock;
832 	hw->node = np;
833 	hw->regs = regs;
834 	hw->eth = eth;
835 	hw->dev = &pdev->dev;
836 	hw->wdma = wdma;
837 	hw->index = index;
838 	hw->irq = irq;
839 	hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
840 						     "mediatek,pcie-mirror");
841 	hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
842 						     "mediatek,hifsys");
843 	if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) {
844 		kfree(hw);
845 		goto unlock;
846 	}
847 
848 	if (!index) {
849 		regmap_write(hw->mirror, 0, 0);
850 		regmap_write(hw->mirror, 4, 0);
851 	}
852 	mtk_wed_hw_add_debugfs(hw);
853 
854 	hw_list[index] = hw;
855 
856 unlock:
857 	mutex_unlock(&hw_lock);
858 }
859 
860 void mtk_wed_exit(void)
861 {
862 	int i;
863 
864 	rcu_assign_pointer(mtk_soc_wed_ops, NULL);
865 
866 	synchronize_rcu();
867 
868 	for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
869 		struct mtk_wed_hw *hw;
870 
871 		hw = hw_list[i];
872 		if (!hw)
873 			continue;
874 
875 		hw_list[i] = NULL;
876 		debugfs_remove(hw->debugfs_dir);
877 		put_device(hw->dev);
878 		kfree(hw);
879 	}
880 }
881