1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2022 MediaTek Inc.
3  *
4  * Author: Lorenzo Bianconi <lorenzo@kernel.org>
5  *	   Sujuan Chen <sujuan.chen@mediatek.com>
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/of_platform.h>
11 #include <linux/interrupt.h>
12 #include <linux/of_address.h>
13 #include <linux/mfd/syscon.h>
14 #include <linux/of_irq.h>
15 #include <linux/bitfield.h>
16 
17 #include "mtk_wed.h"
18 #include "mtk_wed_regs.h"
19 #include "mtk_wed_wo.h"
20 
21 static u32
22 mtk_wed_mmio_r32(struct mtk_wed_wo *wo, u32 reg)
23 {
24 	u32 val;
25 
26 	if (regmap_read(wo->mmio.regs, reg, &val))
27 		val = ~0;
28 
29 	return val;
30 }
31 
32 static void
33 mtk_wed_mmio_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
34 {
35 	regmap_write(wo->mmio.regs, reg, val);
36 }
37 
38 static u32
39 mtk_wed_wo_get_isr(struct mtk_wed_wo *wo)
40 {
41 	u32 val = mtk_wed_mmio_r32(wo, MTK_WED_WO_CCIF_RCHNUM);
42 
43 	return val & MTK_WED_WO_CCIF_RCHNUM_MASK;
44 }
45 
46 static void
47 mtk_wed_wo_set_isr(struct mtk_wed_wo *wo, u32 mask)
48 {
49 	mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_IRQ0_MASK, mask);
50 }
51 
52 static void
53 mtk_wed_wo_set_ack(struct mtk_wed_wo *wo, u32 mask)
54 {
55 	mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_ACK, mask);
56 }
57 
58 static void
59 mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, u32 mask, u32 val, bool set)
60 {
61 	unsigned long flags;
62 
63 	spin_lock_irqsave(&wo->mmio.lock, flags);
64 	wo->mmio.irq_mask &= ~mask;
65 	wo->mmio.irq_mask |= val;
66 	if (set)
67 		mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
68 	spin_unlock_irqrestore(&wo->mmio.lock, flags);
69 }
70 
71 static void
72 mtk_wed_wo_irq_enable(struct mtk_wed_wo *wo, u32 mask)
73 {
74 	mtk_wed_wo_set_isr_mask(wo, 0, mask, false);
75 	tasklet_schedule(&wo->mmio.irq_tasklet);
76 }
77 
78 static void
79 mtk_wed_wo_irq_disable(struct mtk_wed_wo *wo, u32 mask)
80 {
81 	mtk_wed_wo_set_isr_mask(wo, mask, 0, true);
82 }
83 
84 static void
85 mtk_wed_wo_kickout(struct mtk_wed_wo *wo)
86 {
87 	mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_BUSY, 1 << MTK_WED_WO_TXCH_NUM);
88 	mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_TCHNUM, MTK_WED_WO_TXCH_NUM);
89 }
90 
91 static void
92 mtk_wed_wo_queue_kick(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
93 		      u32 val)
94 {
95 	wmb();
96 	mtk_wed_mmio_w32(wo, q->regs.cpu_idx, val);
97 }
98 
99 static void *
100 mtk_wed_wo_dequeue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, u32 *len,
101 		   bool flush)
102 {
103 	int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
104 	int index = (q->tail + 1) % q->n_desc;
105 	struct mtk_wed_wo_queue_entry *entry;
106 	struct mtk_wed_wo_queue_desc *desc;
107 	void *buf;
108 
109 	if (!q->queued)
110 		return NULL;
111 
112 	if (flush)
113 		q->desc[index].ctrl |= cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE);
114 	else if (!(q->desc[index].ctrl & cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE)))
115 		return NULL;
116 
117 	q->tail = index;
118 	q->queued--;
119 
120 	desc = &q->desc[index];
121 	entry = &q->entry[index];
122 	buf = entry->buf;
123 	if (len)
124 		*len = FIELD_GET(MTK_WED_WO_CTL_SD_LEN0,
125 				 le32_to_cpu(READ_ONCE(desc->ctrl)));
126 	if (buf)
127 		dma_unmap_single(wo->hw->dev, entry->addr, buf_len,
128 				 DMA_FROM_DEVICE);
129 	entry->buf = NULL;
130 
131 	return buf;
132 }
133 
134 static int
135 mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
136 			bool rx)
137 {
138 	enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
139 	int n_buf = 0;
140 
141 	spin_lock_bh(&q->lock);
142 	while (q->queued < q->n_desc) {
143 		struct mtk_wed_wo_queue_entry *entry;
144 		dma_addr_t addr;
145 		void *buf;
146 
147 		buf = page_frag_alloc(&q->cache, q->buf_size, GFP_ATOMIC);
148 		if (!buf)
149 			break;
150 
151 		addr = dma_map_single(wo->hw->dev, buf, q->buf_size, dir);
152 		if (unlikely(dma_mapping_error(wo->hw->dev, addr))) {
153 			skb_free_frag(buf);
154 			break;
155 		}
156 
157 		q->head = (q->head + 1) % q->n_desc;
158 		entry = &q->entry[q->head];
159 		entry->addr = addr;
160 		entry->len = q->buf_size;
161 		q->entry[q->head].buf = buf;
162 
163 		if (rx) {
164 			struct mtk_wed_wo_queue_desc *desc = &q->desc[q->head];
165 			u32 ctrl = MTK_WED_WO_CTL_LAST_SEC0 |
166 				   FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0,
167 					      entry->len);
168 
169 			WRITE_ONCE(desc->buf0, cpu_to_le32(addr));
170 			WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
171 		}
172 		q->queued++;
173 		n_buf++;
174 	}
175 	spin_unlock_bh(&q->lock);
176 
177 	return n_buf;
178 }
179 
180 static void
181 mtk_wed_wo_rx_complete(struct mtk_wed_wo *wo)
182 {
183 	mtk_wed_wo_set_ack(wo, MTK_WED_WO_RXCH_INT_MASK);
184 	mtk_wed_wo_irq_enable(wo, MTK_WED_WO_RXCH_INT_MASK);
185 }
186 
187 static void
188 mtk_wed_wo_rx_run_queue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
189 {
190 	for (;;) {
191 		struct mtk_wed_mcu_hdr *hdr;
192 		struct sk_buff *skb;
193 		void *data;
194 		u32 len;
195 
196 		data = mtk_wed_wo_dequeue(wo, q, &len, false);
197 		if (!data)
198 			break;
199 
200 		skb = build_skb(data, q->buf_size);
201 		if (!skb) {
202 			skb_free_frag(data);
203 			continue;
204 		}
205 
206 		__skb_put(skb, len);
207 		if (mtk_wed_mcu_check_msg(wo, skb)) {
208 			dev_kfree_skb(skb);
209 			continue;
210 		}
211 
212 		hdr = (struct mtk_wed_mcu_hdr *)skb->data;
213 		if (hdr->flag & cpu_to_le16(MTK_WED_WARP_CMD_FLAG_RSP))
214 			mtk_wed_mcu_rx_event(wo, skb);
215 		else
216 			mtk_wed_mcu_rx_unsolicited_event(wo, skb);
217 	}
218 
219 	if (mtk_wed_wo_queue_refill(wo, q, true)) {
220 		u32 index = (q->head - 1) % q->n_desc;
221 
222 		mtk_wed_wo_queue_kick(wo, q, index);
223 	}
224 }
225 
226 static irqreturn_t
227 mtk_wed_wo_irq_handler(int irq, void *data)
228 {
229 	struct mtk_wed_wo *wo = data;
230 
231 	mtk_wed_wo_set_isr(wo, 0);
232 	tasklet_schedule(&wo->mmio.irq_tasklet);
233 
234 	return IRQ_HANDLED;
235 }
236 
237 static void mtk_wed_wo_irq_tasklet(struct tasklet_struct *t)
238 {
239 	struct mtk_wed_wo *wo = from_tasklet(wo, t, mmio.irq_tasklet);
240 	u32 intr, mask;
241 
242 	/* disable interrupts */
243 	mtk_wed_wo_set_isr(wo, 0);
244 
245 	intr = mtk_wed_wo_get_isr(wo);
246 	intr &= wo->mmio.irq_mask;
247 	mask = intr & (MTK_WED_WO_RXCH_INT_MASK | MTK_WED_WO_EXCEPTION_INT_MASK);
248 	mtk_wed_wo_irq_disable(wo, mask);
249 
250 	if (intr & MTK_WED_WO_RXCH_INT_MASK) {
251 		mtk_wed_wo_rx_run_queue(wo, &wo->q_rx);
252 		mtk_wed_wo_rx_complete(wo);
253 	}
254 }
255 
256 /* mtk wed wo hw queues */
257 
258 static int
259 mtk_wed_wo_queue_alloc(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
260 		       int n_desc, int buf_size, int index,
261 		       struct mtk_wed_wo_queue_regs *regs)
262 {
263 	spin_lock_init(&q->lock);
264 	q->regs = *regs;
265 	q->n_desc = n_desc;
266 	q->buf_size = buf_size;
267 
268 	q->desc = dmam_alloc_coherent(wo->hw->dev, n_desc * sizeof(*q->desc),
269 				      &q->desc_dma, GFP_KERNEL);
270 	if (!q->desc)
271 		return -ENOMEM;
272 
273 	q->entry = devm_kzalloc(wo->hw->dev, n_desc * sizeof(*q->entry),
274 				GFP_KERNEL);
275 	if (!q->entry)
276 		return -ENOMEM;
277 
278 	return 0;
279 }
280 
281 static void
282 mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
283 {
284 	mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
285 	dma_free_coherent(wo->hw->dev, q->n_desc * sizeof(*q->desc), q->desc,
286 			  q->desc_dma);
287 }
288 
289 static void
290 mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
291 {
292 	struct page *page;
293 	int i;
294 
295 	spin_lock_bh(&q->lock);
296 	for (i = 0; i < q->n_desc; i++) {
297 		struct mtk_wed_wo_queue_entry *entry = &q->entry[i];
298 
299 		dma_unmap_single(wo->hw->dev, entry->addr, entry->len,
300 				 DMA_TO_DEVICE);
301 		skb_free_frag(entry->buf);
302 		entry->buf = NULL;
303 	}
304 	spin_unlock_bh(&q->lock);
305 
306 	if (!q->cache.va)
307 		return;
308 
309 	page = virt_to_page(q->cache.va);
310 	__page_frag_cache_drain(page, q->cache.pagecnt_bias);
311 	memset(&q->cache, 0, sizeof(q->cache));
312 }
313 
314 static void
315 mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
316 {
317 	struct page *page;
318 
319 	spin_lock_bh(&q->lock);
320 	for (;;) {
321 		void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true);
322 
323 		if (!buf)
324 			break;
325 
326 		skb_free_frag(buf);
327 	}
328 	spin_unlock_bh(&q->lock);
329 
330 	if (!q->cache.va)
331 		return;
332 
333 	page = virt_to_page(q->cache.va);
334 	__page_frag_cache_drain(page, q->cache.pagecnt_bias);
335 	memset(&q->cache, 0, sizeof(q->cache));
336 }
337 
338 static void
339 mtk_wed_wo_queue_reset(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
340 {
341 	mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
342 	mtk_wed_mmio_w32(wo, q->regs.desc_base, q->desc_dma);
343 	mtk_wed_mmio_w32(wo, q->regs.ring_size, q->n_desc);
344 }
345 
346 int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
347 			    struct sk_buff *skb)
348 {
349 	struct mtk_wed_wo_queue_entry *entry;
350 	struct mtk_wed_wo_queue_desc *desc;
351 	int ret = 0, index;
352 	u32 ctrl;
353 
354 	spin_lock_bh(&q->lock);
355 
356 	q->tail = mtk_wed_mmio_r32(wo, q->regs.dma_idx);
357 	index = (q->head + 1) % q->n_desc;
358 	if (q->tail == index) {
359 		ret = -ENOMEM;
360 		goto out;
361 	}
362 
363 	entry = &q->entry[index];
364 	if (skb->len > entry->len) {
365 		ret = -ENOMEM;
366 		goto out;
367 	}
368 
369 	desc = &q->desc[index];
370 	q->head = index;
371 
372 	dma_sync_single_for_cpu(wo->hw->dev, entry->addr, skb->len,
373 				DMA_TO_DEVICE);
374 	memcpy(entry->buf, skb->data, skb->len);
375 	dma_sync_single_for_device(wo->hw->dev, entry->addr, skb->len,
376 				   DMA_TO_DEVICE);
377 
378 	ctrl = FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0, skb->len) |
379 	       MTK_WED_WO_CTL_LAST_SEC0 | MTK_WED_WO_CTL_DMA_DONE;
380 	WRITE_ONCE(desc->buf0, cpu_to_le32(entry->addr));
381 	WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
382 
383 	mtk_wed_wo_queue_kick(wo, q, q->head);
384 	mtk_wed_wo_kickout(wo);
385 out:
386 	spin_unlock_bh(&q->lock);
387 
388 	dev_kfree_skb(skb);
389 
390 	return ret;
391 }
392 
393 static int
394 mtk_wed_wo_exception_init(struct mtk_wed_wo *wo)
395 {
396 	return 0;
397 }
398 
399 static int
400 mtk_wed_wo_hardware_init(struct mtk_wed_wo *wo)
401 {
402 	struct mtk_wed_wo_queue_regs regs;
403 	struct device_node *np;
404 	int ret;
405 
406 	np = of_parse_phandle(wo->hw->node, "mediatek,wo-ccif", 0);
407 	if (!np)
408 		return -ENODEV;
409 
410 	wo->mmio.regs = syscon_regmap_lookup_by_phandle(np, NULL);
411 	if (IS_ERR(wo->mmio.regs)) {
412 		ret = PTR_ERR(wo->mmio.regs);
413 		goto error_put;
414 	}
415 
416 	wo->mmio.irq = irq_of_parse_and_map(np, 0);
417 	wo->mmio.irq_mask = MTK_WED_WO_ALL_INT_MASK;
418 	spin_lock_init(&wo->mmio.lock);
419 	tasklet_setup(&wo->mmio.irq_tasklet, mtk_wed_wo_irq_tasklet);
420 
421 	ret = devm_request_irq(wo->hw->dev, wo->mmio.irq,
422 			       mtk_wed_wo_irq_handler, IRQF_TRIGGER_HIGH,
423 			       KBUILD_MODNAME, wo);
424 	if (ret)
425 		goto error;
426 
427 	regs.desc_base = MTK_WED_WO_CCIF_DUMMY1;
428 	regs.ring_size = MTK_WED_WO_CCIF_DUMMY2;
429 	regs.dma_idx = MTK_WED_WO_CCIF_SHADOW4;
430 	regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY3;
431 
432 	ret = mtk_wed_wo_queue_alloc(wo, &wo->q_tx, MTK_WED_WO_RING_SIZE,
433 				     MTK_WED_WO_CMD_LEN, MTK_WED_WO_TXCH_NUM,
434 				     &regs);
435 	if (ret)
436 		goto error;
437 
438 	mtk_wed_wo_queue_refill(wo, &wo->q_tx, false);
439 	mtk_wed_wo_queue_reset(wo, &wo->q_tx);
440 
441 	regs.desc_base = MTK_WED_WO_CCIF_DUMMY5;
442 	regs.ring_size = MTK_WED_WO_CCIF_DUMMY6;
443 	regs.dma_idx = MTK_WED_WO_CCIF_SHADOW8;
444 	regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY7;
445 
446 	ret = mtk_wed_wo_queue_alloc(wo, &wo->q_rx, MTK_WED_WO_RING_SIZE,
447 				     MTK_WED_WO_CMD_LEN, MTK_WED_WO_RXCH_NUM,
448 				     &regs);
449 	if (ret)
450 		goto error;
451 
452 	mtk_wed_wo_queue_refill(wo, &wo->q_rx, true);
453 	mtk_wed_wo_queue_reset(wo, &wo->q_rx);
454 
455 	/* rx queue irqmask */
456 	mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
457 
458 	return 0;
459 
460 error:
461 	devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
462 error_put:
463 	of_node_put(np);
464 	return ret;
465 }
466 
467 static void
468 mtk_wed_wo_hw_deinit(struct mtk_wed_wo *wo)
469 {
470 	/* disable interrupts */
471 	mtk_wed_wo_set_isr(wo, 0);
472 
473 	tasklet_disable(&wo->mmio.irq_tasklet);
474 
475 	disable_irq(wo->mmio.irq);
476 	devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
477 
478 	mtk_wed_wo_queue_tx_clean(wo, &wo->q_tx);
479 	mtk_wed_wo_queue_rx_clean(wo, &wo->q_rx);
480 	mtk_wed_wo_queue_free(wo, &wo->q_tx);
481 	mtk_wed_wo_queue_free(wo, &wo->q_rx);
482 }
483 
484 int mtk_wed_wo_init(struct mtk_wed_hw *hw)
485 {
486 	struct mtk_wed_wo *wo;
487 	int ret;
488 
489 	wo = devm_kzalloc(hw->dev, sizeof(*wo), GFP_KERNEL);
490 	if (!wo)
491 		return -ENOMEM;
492 
493 	hw->wed_wo = wo;
494 	wo->hw = hw;
495 
496 	ret = mtk_wed_wo_hardware_init(wo);
497 	if (ret)
498 		return ret;
499 
500 	ret = mtk_wed_mcu_init(wo);
501 	if (ret)
502 		return ret;
503 
504 	return mtk_wed_wo_exception_init(wo);
505 }
506 
507 void mtk_wed_wo_deinit(struct mtk_wed_hw *hw)
508 {
509 	struct mtk_wed_wo *wo = hw->wed_wo;
510 
511 	mtk_wed_wo_hw_deinit(wo);
512 }
513