xref: /openbmc/linux/drivers/net/ethernet/mediatek/mtk_wed_wo.c (revision c334ac6461d516c6d79dd10fd84cd69a00422966)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2022 MediaTek Inc.
3  *
4  * Author: Lorenzo Bianconi <lorenzo@kernel.org>
5  *	   Sujuan Chen <sujuan.chen@mediatek.com>
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/of_platform.h>
11 #include <linux/interrupt.h>
12 #include <linux/of_address.h>
13 #include <linux/mfd/syscon.h>
14 #include <linux/of_irq.h>
15 #include <linux/bitfield.h>
16 
17 #include "mtk_wed.h"
18 #include "mtk_wed_regs.h"
19 #include "mtk_wed_wo.h"
20 
21 static u32
22 mtk_wed_mmio_r32(struct mtk_wed_wo *wo, u32 reg)
23 {
24 	u32 val;
25 
26 	if (regmap_read(wo->mmio.regs, reg, &val))
27 		val = ~0;
28 
29 	return val;
30 }
31 
32 static void
33 mtk_wed_mmio_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
34 {
35 	regmap_write(wo->mmio.regs, reg, val);
36 }
37 
38 static u32
39 mtk_wed_wo_get_isr(struct mtk_wed_wo *wo)
40 {
41 	u32 val = mtk_wed_mmio_r32(wo, MTK_WED_WO_CCIF_RCHNUM);
42 
43 	return val & MTK_WED_WO_CCIF_RCHNUM_MASK;
44 }
45 
46 static void
47 mtk_wed_wo_set_isr(struct mtk_wed_wo *wo, u32 mask)
48 {
49 	mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_IRQ0_MASK, mask);
50 }
51 
52 static void
53 mtk_wed_wo_set_ack(struct mtk_wed_wo *wo, u32 mask)
54 {
55 	mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_ACK, mask);
56 }
57 
58 static void
59 mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, u32 mask, u32 val, bool set)
60 {
61 	unsigned long flags;
62 
63 	spin_lock_irqsave(&wo->mmio.lock, flags);
64 	wo->mmio.irq_mask &= ~mask;
65 	wo->mmio.irq_mask |= val;
66 	if (set)
67 		mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
68 	spin_unlock_irqrestore(&wo->mmio.lock, flags);
69 }
70 
71 static void
72 mtk_wed_wo_irq_enable(struct mtk_wed_wo *wo, u32 mask)
73 {
74 	mtk_wed_wo_set_isr_mask(wo, 0, mask, false);
75 	tasklet_schedule(&wo->mmio.irq_tasklet);
76 }
77 
78 static void
79 mtk_wed_wo_irq_disable(struct mtk_wed_wo *wo, u32 mask)
80 {
81 	mtk_wed_wo_set_isr_mask(wo, mask, 0, true);
82 }
83 
84 static void
85 mtk_wed_wo_kickout(struct mtk_wed_wo *wo)
86 {
87 	mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_BUSY, 1 << MTK_WED_WO_TXCH_NUM);
88 	mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_TCHNUM, MTK_WED_WO_TXCH_NUM);
89 }
90 
91 static void
92 mtk_wed_wo_queue_kick(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
93 		      u32 val)
94 {
95 	wmb();
96 	mtk_wed_mmio_w32(wo, q->regs.cpu_idx, val);
97 }
98 
99 static void *
100 mtk_wed_wo_dequeue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, u32 *len,
101 		   bool flush)
102 {
103 	int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
104 	int index = (q->tail + 1) % q->n_desc;
105 	struct mtk_wed_wo_queue_entry *entry;
106 	struct mtk_wed_wo_queue_desc *desc;
107 	void *buf;
108 
109 	if (!q->queued)
110 		return NULL;
111 
112 	if (flush)
113 		q->desc[index].ctrl |= cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE);
114 	else if (!(q->desc[index].ctrl & cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE)))
115 		return NULL;
116 
117 	q->tail = index;
118 	q->queued--;
119 
120 	desc = &q->desc[index];
121 	entry = &q->entry[index];
122 	buf = entry->buf;
123 	if (len)
124 		*len = FIELD_GET(MTK_WED_WO_CTL_SD_LEN0,
125 				 le32_to_cpu(READ_ONCE(desc->ctrl)));
126 	if (buf)
127 		dma_unmap_single(wo->hw->dev, entry->addr, buf_len,
128 				 DMA_FROM_DEVICE);
129 	entry->buf = NULL;
130 
131 	return buf;
132 }
133 
134 static int
135 mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
136 			gfp_t gfp, bool rx)
137 {
138 	enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
139 	int n_buf = 0;
140 
141 	spin_lock_bh(&q->lock);
142 	while (q->queued < q->n_desc) {
143 		void *buf = page_frag_alloc(&q->cache, q->buf_size, gfp);
144 		struct mtk_wed_wo_queue_entry *entry;
145 		dma_addr_t addr;
146 
147 		if (!buf)
148 			break;
149 
150 		addr = dma_map_single(wo->hw->dev, buf, q->buf_size, dir);
151 		if (unlikely(dma_mapping_error(wo->hw->dev, addr))) {
152 			skb_free_frag(buf);
153 			break;
154 		}
155 
156 		q->head = (q->head + 1) % q->n_desc;
157 		entry = &q->entry[q->head];
158 		entry->addr = addr;
159 		entry->len = q->buf_size;
160 		q->entry[q->head].buf = buf;
161 
162 		if (rx) {
163 			struct mtk_wed_wo_queue_desc *desc = &q->desc[q->head];
164 			u32 ctrl = MTK_WED_WO_CTL_LAST_SEC0 |
165 				   FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0,
166 					      entry->len);
167 
168 			WRITE_ONCE(desc->buf0, cpu_to_le32(addr));
169 			WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
170 		}
171 		q->queued++;
172 		n_buf++;
173 	}
174 	spin_unlock_bh(&q->lock);
175 
176 	return n_buf;
177 }
178 
179 static void
180 mtk_wed_wo_rx_complete(struct mtk_wed_wo *wo)
181 {
182 	mtk_wed_wo_set_ack(wo, MTK_WED_WO_RXCH_INT_MASK);
183 	mtk_wed_wo_irq_enable(wo, MTK_WED_WO_RXCH_INT_MASK);
184 }
185 
186 static void
187 mtk_wed_wo_rx_run_queue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
188 {
189 	for (;;) {
190 		struct mtk_wed_mcu_hdr *hdr;
191 		struct sk_buff *skb;
192 		void *data;
193 		u32 len;
194 
195 		data = mtk_wed_wo_dequeue(wo, q, &len, false);
196 		if (!data)
197 			break;
198 
199 		skb = build_skb(data, q->buf_size);
200 		if (!skb) {
201 			skb_free_frag(data);
202 			continue;
203 		}
204 
205 		__skb_put(skb, len);
206 		if (mtk_wed_mcu_check_msg(wo, skb)) {
207 			dev_kfree_skb(skb);
208 			continue;
209 		}
210 
211 		hdr = (struct mtk_wed_mcu_hdr *)skb->data;
212 		if (hdr->flag & cpu_to_le16(MTK_WED_WARP_CMD_FLAG_RSP))
213 			mtk_wed_mcu_rx_event(wo, skb);
214 		else
215 			mtk_wed_mcu_rx_unsolicited_event(wo, skb);
216 	}
217 
218 	if (mtk_wed_wo_queue_refill(wo, q, GFP_ATOMIC, true)) {
219 		u32 index = (q->head - 1) % q->n_desc;
220 
221 		mtk_wed_wo_queue_kick(wo, q, index);
222 	}
223 }
224 
225 static irqreturn_t
226 mtk_wed_wo_irq_handler(int irq, void *data)
227 {
228 	struct mtk_wed_wo *wo = data;
229 
230 	mtk_wed_wo_set_isr(wo, 0);
231 	tasklet_schedule(&wo->mmio.irq_tasklet);
232 
233 	return IRQ_HANDLED;
234 }
235 
236 static void mtk_wed_wo_irq_tasklet(struct tasklet_struct *t)
237 {
238 	struct mtk_wed_wo *wo = from_tasklet(wo, t, mmio.irq_tasklet);
239 	u32 intr, mask;
240 
241 	/* disable interrupts */
242 	mtk_wed_wo_set_isr(wo, 0);
243 
244 	intr = mtk_wed_wo_get_isr(wo);
245 	intr &= wo->mmio.irq_mask;
246 	mask = intr & (MTK_WED_WO_RXCH_INT_MASK | MTK_WED_WO_EXCEPTION_INT_MASK);
247 	mtk_wed_wo_irq_disable(wo, mask);
248 
249 	if (intr & MTK_WED_WO_RXCH_INT_MASK) {
250 		mtk_wed_wo_rx_run_queue(wo, &wo->q_rx);
251 		mtk_wed_wo_rx_complete(wo);
252 	}
253 }
254 
255 /* mtk wed wo hw queues */
256 
257 static int
258 mtk_wed_wo_queue_alloc(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
259 		       int n_desc, int buf_size, int index,
260 		       struct mtk_wed_wo_queue_regs *regs)
261 {
262 	spin_lock_init(&q->lock);
263 	q->regs = *regs;
264 	q->n_desc = n_desc;
265 	q->buf_size = buf_size;
266 
267 	q->desc = dmam_alloc_coherent(wo->hw->dev, n_desc * sizeof(*q->desc),
268 				      &q->desc_dma, GFP_KERNEL);
269 	if (!q->desc)
270 		return -ENOMEM;
271 
272 	q->entry = devm_kzalloc(wo->hw->dev, n_desc * sizeof(*q->entry),
273 				GFP_KERNEL);
274 	if (!q->entry)
275 		return -ENOMEM;
276 
277 	return 0;
278 }
279 
280 static void
281 mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
282 {
283 	mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
284 	dma_free_coherent(wo->hw->dev, q->n_desc * sizeof(*q->desc), q->desc,
285 			  q->desc_dma);
286 }
287 
288 static void
289 mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
290 {
291 	struct page *page;
292 	int i;
293 
294 	spin_lock_bh(&q->lock);
295 	for (i = 0; i < q->n_desc; i++) {
296 		struct mtk_wed_wo_queue_entry *entry = &q->entry[i];
297 
298 		dma_unmap_single(wo->hw->dev, entry->addr, entry->len,
299 				 DMA_TO_DEVICE);
300 		skb_free_frag(entry->buf);
301 		entry->buf = NULL;
302 	}
303 	spin_unlock_bh(&q->lock);
304 
305 	if (!q->cache.va)
306 		return;
307 
308 	page = virt_to_page(q->cache.va);
309 	__page_frag_cache_drain(page, q->cache.pagecnt_bias);
310 	memset(&q->cache, 0, sizeof(q->cache));
311 }
312 
313 static void
314 mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
315 {
316 	struct page *page;
317 
318 	spin_lock_bh(&q->lock);
319 	for (;;) {
320 		void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true);
321 
322 		if (!buf)
323 			break;
324 
325 		skb_free_frag(buf);
326 	}
327 	spin_unlock_bh(&q->lock);
328 
329 	if (!q->cache.va)
330 		return;
331 
332 	page = virt_to_page(q->cache.va);
333 	__page_frag_cache_drain(page, q->cache.pagecnt_bias);
334 	memset(&q->cache, 0, sizeof(q->cache));
335 }
336 
337 static void
338 mtk_wed_wo_queue_reset(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
339 {
340 	mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
341 	mtk_wed_mmio_w32(wo, q->regs.desc_base, q->desc_dma);
342 	mtk_wed_mmio_w32(wo, q->regs.ring_size, q->n_desc);
343 }
344 
345 int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
346 			    struct sk_buff *skb)
347 {
348 	struct mtk_wed_wo_queue_entry *entry;
349 	struct mtk_wed_wo_queue_desc *desc;
350 	int ret = 0, index;
351 	u32 ctrl;
352 
353 	spin_lock_bh(&q->lock);
354 
355 	q->tail = mtk_wed_mmio_r32(wo, q->regs.dma_idx);
356 	index = (q->head + 1) % q->n_desc;
357 	if (q->tail == index) {
358 		ret = -ENOMEM;
359 		goto out;
360 	}
361 
362 	entry = &q->entry[index];
363 	if (skb->len > entry->len) {
364 		ret = -ENOMEM;
365 		goto out;
366 	}
367 
368 	desc = &q->desc[index];
369 	q->head = index;
370 
371 	dma_sync_single_for_cpu(wo->hw->dev, entry->addr, skb->len,
372 				DMA_TO_DEVICE);
373 	memcpy(entry->buf, skb->data, skb->len);
374 	dma_sync_single_for_device(wo->hw->dev, entry->addr, skb->len,
375 				   DMA_TO_DEVICE);
376 
377 	ctrl = FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0, skb->len) |
378 	       MTK_WED_WO_CTL_LAST_SEC0 | MTK_WED_WO_CTL_DMA_DONE;
379 	WRITE_ONCE(desc->buf0, cpu_to_le32(entry->addr));
380 	WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
381 
382 	mtk_wed_wo_queue_kick(wo, q, q->head);
383 	mtk_wed_wo_kickout(wo);
384 out:
385 	spin_unlock_bh(&q->lock);
386 
387 	dev_kfree_skb(skb);
388 
389 	return ret;
390 }
391 
392 static int
393 mtk_wed_wo_exception_init(struct mtk_wed_wo *wo)
394 {
395 	return 0;
396 }
397 
398 static int
399 mtk_wed_wo_hardware_init(struct mtk_wed_wo *wo)
400 {
401 	struct mtk_wed_wo_queue_regs regs;
402 	struct device_node *np;
403 	int ret;
404 
405 	np = of_parse_phandle(wo->hw->node, "mediatek,wo-ccif", 0);
406 	if (!np)
407 		return -ENODEV;
408 
409 	wo->mmio.regs = syscon_regmap_lookup_by_phandle(np, NULL);
410 	if (IS_ERR_OR_NULL(wo->mmio.regs))
411 		return PTR_ERR(wo->mmio.regs);
412 
413 	wo->mmio.irq = irq_of_parse_and_map(np, 0);
414 	wo->mmio.irq_mask = MTK_WED_WO_ALL_INT_MASK;
415 	spin_lock_init(&wo->mmio.lock);
416 	tasklet_setup(&wo->mmio.irq_tasklet, mtk_wed_wo_irq_tasklet);
417 
418 	ret = devm_request_irq(wo->hw->dev, wo->mmio.irq,
419 			       mtk_wed_wo_irq_handler, IRQF_TRIGGER_HIGH,
420 			       KBUILD_MODNAME, wo);
421 	if (ret)
422 		goto error;
423 
424 	regs.desc_base = MTK_WED_WO_CCIF_DUMMY1;
425 	regs.ring_size = MTK_WED_WO_CCIF_DUMMY2;
426 	regs.dma_idx = MTK_WED_WO_CCIF_SHADOW4;
427 	regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY3;
428 
429 	ret = mtk_wed_wo_queue_alloc(wo, &wo->q_tx, MTK_WED_WO_RING_SIZE,
430 				     MTK_WED_WO_CMD_LEN, MTK_WED_WO_TXCH_NUM,
431 				     &regs);
432 	if (ret)
433 		goto error;
434 
435 	mtk_wed_wo_queue_refill(wo, &wo->q_tx, GFP_KERNEL, false);
436 	mtk_wed_wo_queue_reset(wo, &wo->q_tx);
437 
438 	regs.desc_base = MTK_WED_WO_CCIF_DUMMY5;
439 	regs.ring_size = MTK_WED_WO_CCIF_DUMMY6;
440 	regs.dma_idx = MTK_WED_WO_CCIF_SHADOW8;
441 	regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY7;
442 
443 	ret = mtk_wed_wo_queue_alloc(wo, &wo->q_rx, MTK_WED_WO_RING_SIZE,
444 				     MTK_WED_WO_CMD_LEN, MTK_WED_WO_RXCH_NUM,
445 				     &regs);
446 	if (ret)
447 		goto error;
448 
449 	mtk_wed_wo_queue_refill(wo, &wo->q_rx, GFP_KERNEL, true);
450 	mtk_wed_wo_queue_reset(wo, &wo->q_rx);
451 
452 	/* rx queue irqmask */
453 	mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
454 
455 	return 0;
456 
457 error:
458 	devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
459 
460 	return ret;
461 }
462 
463 static void
464 mtk_wed_wo_hw_deinit(struct mtk_wed_wo *wo)
465 {
466 	/* disable interrupts */
467 	mtk_wed_wo_set_isr(wo, 0);
468 
469 	tasklet_disable(&wo->mmio.irq_tasklet);
470 
471 	disable_irq(wo->mmio.irq);
472 	devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
473 
474 	mtk_wed_wo_queue_tx_clean(wo, &wo->q_tx);
475 	mtk_wed_wo_queue_rx_clean(wo, &wo->q_rx);
476 	mtk_wed_wo_queue_free(wo, &wo->q_tx);
477 	mtk_wed_wo_queue_free(wo, &wo->q_rx);
478 }
479 
480 int mtk_wed_wo_init(struct mtk_wed_hw *hw)
481 {
482 	struct mtk_wed_wo *wo;
483 	int ret;
484 
485 	wo = devm_kzalloc(hw->dev, sizeof(*wo), GFP_KERNEL);
486 	if (!wo)
487 		return -ENOMEM;
488 
489 	hw->wed_wo = wo;
490 	wo->hw = hw;
491 
492 	ret = mtk_wed_wo_hardware_init(wo);
493 	if (ret)
494 		return ret;
495 
496 	ret = mtk_wed_mcu_init(wo);
497 	if (ret)
498 		return ret;
499 
500 	return mtk_wed_wo_exception_init(wo);
501 }
502 
503 void mtk_wed_wo_deinit(struct mtk_wed_hw *hw)
504 {
505 	struct mtk_wed_wo *wo = hw->wed_wo;
506 
507 	mtk_wed_wo_hw_deinit(wo);
508 }
509