179968444SLorenzo Bianconi // SPDX-License-Identifier: GPL-2.0-only
279968444SLorenzo Bianconi /* Copyright (C) 2022 MediaTek Inc.
379968444SLorenzo Bianconi *
479968444SLorenzo Bianconi * Author: Lorenzo Bianconi <lorenzo@kernel.org>
579968444SLorenzo Bianconi * Sujuan Chen <sujuan.chen@mediatek.com>
679968444SLorenzo Bianconi */
779968444SLorenzo Bianconi
879968444SLorenzo Bianconi #include <linux/kernel.h>
979968444SLorenzo Bianconi #include <linux/dma-mapping.h>
1079968444SLorenzo Bianconi #include <linux/interrupt.h>
1179968444SLorenzo Bianconi #include <linux/mfd/syscon.h>
123d40aed8SRob Herring #include <linux/of.h>
1379968444SLorenzo Bianconi #include <linux/of_irq.h>
1479968444SLorenzo Bianconi #include <linux/bitfield.h>
1579968444SLorenzo Bianconi
1679968444SLorenzo Bianconi #include "mtk_wed.h"
1779968444SLorenzo Bianconi #include "mtk_wed_regs.h"
1879968444SLorenzo Bianconi #include "mtk_wed_wo.h"
1979968444SLorenzo Bianconi
2079968444SLorenzo Bianconi static u32
mtk_wed_mmio_r32(struct mtk_wed_wo * wo,u32 reg)2179968444SLorenzo Bianconi mtk_wed_mmio_r32(struct mtk_wed_wo *wo, u32 reg)
2279968444SLorenzo Bianconi {
2379968444SLorenzo Bianconi u32 val;
2479968444SLorenzo Bianconi
2579968444SLorenzo Bianconi if (regmap_read(wo->mmio.regs, reg, &val))
2679968444SLorenzo Bianconi val = ~0;
2779968444SLorenzo Bianconi
2879968444SLorenzo Bianconi return val;
2979968444SLorenzo Bianconi }
3079968444SLorenzo Bianconi
3179968444SLorenzo Bianconi static void
mtk_wed_mmio_w32(struct mtk_wed_wo * wo,u32 reg,u32 val)3279968444SLorenzo Bianconi mtk_wed_mmio_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
3379968444SLorenzo Bianconi {
3479968444SLorenzo Bianconi regmap_write(wo->mmio.regs, reg, val);
3579968444SLorenzo Bianconi }
3679968444SLorenzo Bianconi
3779968444SLorenzo Bianconi static u32
mtk_wed_wo_get_isr(struct mtk_wed_wo * wo)3879968444SLorenzo Bianconi mtk_wed_wo_get_isr(struct mtk_wed_wo *wo)
3979968444SLorenzo Bianconi {
4079968444SLorenzo Bianconi u32 val = mtk_wed_mmio_r32(wo, MTK_WED_WO_CCIF_RCHNUM);
4179968444SLorenzo Bianconi
4279968444SLorenzo Bianconi return val & MTK_WED_WO_CCIF_RCHNUM_MASK;
4379968444SLorenzo Bianconi }
4479968444SLorenzo Bianconi
4579968444SLorenzo Bianconi static void
mtk_wed_wo_set_isr(struct mtk_wed_wo * wo,u32 mask)4679968444SLorenzo Bianconi mtk_wed_wo_set_isr(struct mtk_wed_wo *wo, u32 mask)
4779968444SLorenzo Bianconi {
4879968444SLorenzo Bianconi mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_IRQ0_MASK, mask);
4979968444SLorenzo Bianconi }
5079968444SLorenzo Bianconi
5179968444SLorenzo Bianconi static void
mtk_wed_wo_set_ack(struct mtk_wed_wo * wo,u32 mask)5279968444SLorenzo Bianconi mtk_wed_wo_set_ack(struct mtk_wed_wo *wo, u32 mask)
5379968444SLorenzo Bianconi {
5479968444SLorenzo Bianconi mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_ACK, mask);
5579968444SLorenzo Bianconi }
5679968444SLorenzo Bianconi
5779968444SLorenzo Bianconi static void
mtk_wed_wo_set_isr_mask(struct mtk_wed_wo * wo,u32 mask,u32 val,bool set)5879968444SLorenzo Bianconi mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, u32 mask, u32 val, bool set)
5979968444SLorenzo Bianconi {
6079968444SLorenzo Bianconi unsigned long flags;
6179968444SLorenzo Bianconi
6279968444SLorenzo Bianconi spin_lock_irqsave(&wo->mmio.lock, flags);
6379968444SLorenzo Bianconi wo->mmio.irq_mask &= ~mask;
6479968444SLorenzo Bianconi wo->mmio.irq_mask |= val;
6579968444SLorenzo Bianconi if (set)
6679968444SLorenzo Bianconi mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
6779968444SLorenzo Bianconi spin_unlock_irqrestore(&wo->mmio.lock, flags);
6879968444SLorenzo Bianconi }
6979968444SLorenzo Bianconi
7079968444SLorenzo Bianconi static void
mtk_wed_wo_irq_enable(struct mtk_wed_wo * wo,u32 mask)7179968444SLorenzo Bianconi mtk_wed_wo_irq_enable(struct mtk_wed_wo *wo, u32 mask)
7279968444SLorenzo Bianconi {
7379968444SLorenzo Bianconi mtk_wed_wo_set_isr_mask(wo, 0, mask, false);
7479968444SLorenzo Bianconi tasklet_schedule(&wo->mmio.irq_tasklet);
7579968444SLorenzo Bianconi }
7679968444SLorenzo Bianconi
7779968444SLorenzo Bianconi static void
mtk_wed_wo_irq_disable(struct mtk_wed_wo * wo,u32 mask)7879968444SLorenzo Bianconi mtk_wed_wo_irq_disable(struct mtk_wed_wo *wo, u32 mask)
7979968444SLorenzo Bianconi {
8079968444SLorenzo Bianconi mtk_wed_wo_set_isr_mask(wo, mask, 0, true);
8179968444SLorenzo Bianconi }
8279968444SLorenzo Bianconi
8379968444SLorenzo Bianconi static void
mtk_wed_wo_kickout(struct mtk_wed_wo * wo)8479968444SLorenzo Bianconi mtk_wed_wo_kickout(struct mtk_wed_wo *wo)
8579968444SLorenzo Bianconi {
8679968444SLorenzo Bianconi mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_BUSY, 1 << MTK_WED_WO_TXCH_NUM);
8779968444SLorenzo Bianconi mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_TCHNUM, MTK_WED_WO_TXCH_NUM);
8879968444SLorenzo Bianconi }
8979968444SLorenzo Bianconi
9079968444SLorenzo Bianconi static void
mtk_wed_wo_queue_kick(struct mtk_wed_wo * wo,struct mtk_wed_wo_queue * q,u32 val)9179968444SLorenzo Bianconi mtk_wed_wo_queue_kick(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
9279968444SLorenzo Bianconi u32 val)
9379968444SLorenzo Bianconi {
9479968444SLorenzo Bianconi wmb();
9579968444SLorenzo Bianconi mtk_wed_mmio_w32(wo, q->regs.cpu_idx, val);
9679968444SLorenzo Bianconi }
9779968444SLorenzo Bianconi
9879968444SLorenzo Bianconi static void *
mtk_wed_wo_dequeue(struct mtk_wed_wo * wo,struct mtk_wed_wo_queue * q,u32 * len,bool flush)9979968444SLorenzo Bianconi mtk_wed_wo_dequeue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, u32 *len,
10079968444SLorenzo Bianconi bool flush)
10179968444SLorenzo Bianconi {
10279968444SLorenzo Bianconi int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
10379968444SLorenzo Bianconi int index = (q->tail + 1) % q->n_desc;
10479968444SLorenzo Bianconi struct mtk_wed_wo_queue_entry *entry;
10579968444SLorenzo Bianconi struct mtk_wed_wo_queue_desc *desc;
10679968444SLorenzo Bianconi void *buf;
10779968444SLorenzo Bianconi
10879968444SLorenzo Bianconi if (!q->queued)
10979968444SLorenzo Bianconi return NULL;
11079968444SLorenzo Bianconi
11179968444SLorenzo Bianconi if (flush)
11279968444SLorenzo Bianconi q->desc[index].ctrl |= cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE);
11379968444SLorenzo Bianconi else if (!(q->desc[index].ctrl & cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE)))
11479968444SLorenzo Bianconi return NULL;
11579968444SLorenzo Bianconi
11679968444SLorenzo Bianconi q->tail = index;
11779968444SLorenzo Bianconi q->queued--;
11879968444SLorenzo Bianconi
11979968444SLorenzo Bianconi desc = &q->desc[index];
12079968444SLorenzo Bianconi entry = &q->entry[index];
12179968444SLorenzo Bianconi buf = entry->buf;
12279968444SLorenzo Bianconi if (len)
12379968444SLorenzo Bianconi *len = FIELD_GET(MTK_WED_WO_CTL_SD_LEN0,
12479968444SLorenzo Bianconi le32_to_cpu(READ_ONCE(desc->ctrl)));
12579968444SLorenzo Bianconi if (buf)
12679968444SLorenzo Bianconi dma_unmap_single(wo->hw->dev, entry->addr, buf_len,
12779968444SLorenzo Bianconi DMA_FROM_DEVICE);
12879968444SLorenzo Bianconi entry->buf = NULL;
12979968444SLorenzo Bianconi
13079968444SLorenzo Bianconi return buf;
13179968444SLorenzo Bianconi }
13279968444SLorenzo Bianconi
13379968444SLorenzo Bianconi static int
mtk_wed_wo_queue_refill(struct mtk_wed_wo * wo,struct mtk_wed_wo_queue * q,bool rx)13479968444SLorenzo Bianconi mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
13565e6af6cSLorenzo Bianconi bool rx)
13679968444SLorenzo Bianconi {
13779968444SLorenzo Bianconi enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
13879968444SLorenzo Bianconi int n_buf = 0;
13979968444SLorenzo Bianconi
14079968444SLorenzo Bianconi while (q->queued < q->n_desc) {
14179968444SLorenzo Bianconi struct mtk_wed_wo_queue_entry *entry;
14279968444SLorenzo Bianconi dma_addr_t addr;
14365e6af6cSLorenzo Bianconi void *buf;
14479968444SLorenzo Bianconi
14565e6af6cSLorenzo Bianconi buf = page_frag_alloc(&q->cache, q->buf_size, GFP_ATOMIC);
14679968444SLorenzo Bianconi if (!buf)
14779968444SLorenzo Bianconi break;
14879968444SLorenzo Bianconi
14979968444SLorenzo Bianconi addr = dma_map_single(wo->hw->dev, buf, q->buf_size, dir);
15079968444SLorenzo Bianconi if (unlikely(dma_mapping_error(wo->hw->dev, addr))) {
15179968444SLorenzo Bianconi skb_free_frag(buf);
15279968444SLorenzo Bianconi break;
15379968444SLorenzo Bianconi }
15479968444SLorenzo Bianconi
15579968444SLorenzo Bianconi q->head = (q->head + 1) % q->n_desc;
15679968444SLorenzo Bianconi entry = &q->entry[q->head];
15779968444SLorenzo Bianconi entry->addr = addr;
15879968444SLorenzo Bianconi entry->len = q->buf_size;
15979968444SLorenzo Bianconi q->entry[q->head].buf = buf;
16079968444SLorenzo Bianconi
16179968444SLorenzo Bianconi if (rx) {
16279968444SLorenzo Bianconi struct mtk_wed_wo_queue_desc *desc = &q->desc[q->head];
16379968444SLorenzo Bianconi u32 ctrl = MTK_WED_WO_CTL_LAST_SEC0 |
16479968444SLorenzo Bianconi FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0,
16579968444SLorenzo Bianconi entry->len);
16679968444SLorenzo Bianconi
16779968444SLorenzo Bianconi WRITE_ONCE(desc->buf0, cpu_to_le32(addr));
16879968444SLorenzo Bianconi WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
16979968444SLorenzo Bianconi }
17079968444SLorenzo Bianconi q->queued++;
17179968444SLorenzo Bianconi n_buf++;
17279968444SLorenzo Bianconi }
17379968444SLorenzo Bianconi
17479968444SLorenzo Bianconi return n_buf;
17579968444SLorenzo Bianconi }
17679968444SLorenzo Bianconi
17779968444SLorenzo Bianconi static void
mtk_wed_wo_rx_complete(struct mtk_wed_wo * wo)17879968444SLorenzo Bianconi mtk_wed_wo_rx_complete(struct mtk_wed_wo *wo)
17979968444SLorenzo Bianconi {
18079968444SLorenzo Bianconi mtk_wed_wo_set_ack(wo, MTK_WED_WO_RXCH_INT_MASK);
18179968444SLorenzo Bianconi mtk_wed_wo_irq_enable(wo, MTK_WED_WO_RXCH_INT_MASK);
18279968444SLorenzo Bianconi }
18379968444SLorenzo Bianconi
18479968444SLorenzo Bianconi static void
mtk_wed_wo_rx_run_queue(struct mtk_wed_wo * wo,struct mtk_wed_wo_queue * q)18579968444SLorenzo Bianconi mtk_wed_wo_rx_run_queue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
18679968444SLorenzo Bianconi {
18779968444SLorenzo Bianconi for (;;) {
18879968444SLorenzo Bianconi struct mtk_wed_mcu_hdr *hdr;
18979968444SLorenzo Bianconi struct sk_buff *skb;
19079968444SLorenzo Bianconi void *data;
19179968444SLorenzo Bianconi u32 len;
19279968444SLorenzo Bianconi
19379968444SLorenzo Bianconi data = mtk_wed_wo_dequeue(wo, q, &len, false);
19479968444SLorenzo Bianconi if (!data)
19579968444SLorenzo Bianconi break;
19679968444SLorenzo Bianconi
19779968444SLorenzo Bianconi skb = build_skb(data, q->buf_size);
19879968444SLorenzo Bianconi if (!skb) {
19979968444SLorenzo Bianconi skb_free_frag(data);
20079968444SLorenzo Bianconi continue;
20179968444SLorenzo Bianconi }
20279968444SLorenzo Bianconi
20379968444SLorenzo Bianconi __skb_put(skb, len);
20479968444SLorenzo Bianconi if (mtk_wed_mcu_check_msg(wo, skb)) {
20579968444SLorenzo Bianconi dev_kfree_skb(skb);
20679968444SLorenzo Bianconi continue;
20779968444SLorenzo Bianconi }
20879968444SLorenzo Bianconi
20979968444SLorenzo Bianconi hdr = (struct mtk_wed_mcu_hdr *)skb->data;
21079968444SLorenzo Bianconi if (hdr->flag & cpu_to_le16(MTK_WED_WARP_CMD_FLAG_RSP))
21179968444SLorenzo Bianconi mtk_wed_mcu_rx_event(wo, skb);
21279968444SLorenzo Bianconi else
21379968444SLorenzo Bianconi mtk_wed_mcu_rx_unsolicited_event(wo, skb);
21479968444SLorenzo Bianconi }
21579968444SLorenzo Bianconi
21665e6af6cSLorenzo Bianconi if (mtk_wed_wo_queue_refill(wo, q, true)) {
21779968444SLorenzo Bianconi u32 index = (q->head - 1) % q->n_desc;
21879968444SLorenzo Bianconi
21979968444SLorenzo Bianconi mtk_wed_wo_queue_kick(wo, q, index);
22079968444SLorenzo Bianconi }
22179968444SLorenzo Bianconi }
22279968444SLorenzo Bianconi
22379968444SLorenzo Bianconi static irqreturn_t
mtk_wed_wo_irq_handler(int irq,void * data)22479968444SLorenzo Bianconi mtk_wed_wo_irq_handler(int irq, void *data)
22579968444SLorenzo Bianconi {
22679968444SLorenzo Bianconi struct mtk_wed_wo *wo = data;
22779968444SLorenzo Bianconi
22879968444SLorenzo Bianconi mtk_wed_wo_set_isr(wo, 0);
22979968444SLorenzo Bianconi tasklet_schedule(&wo->mmio.irq_tasklet);
23079968444SLorenzo Bianconi
23179968444SLorenzo Bianconi return IRQ_HANDLED;
23279968444SLorenzo Bianconi }
23379968444SLorenzo Bianconi
mtk_wed_wo_irq_tasklet(struct tasklet_struct * t)23479968444SLorenzo Bianconi static void mtk_wed_wo_irq_tasklet(struct tasklet_struct *t)
23579968444SLorenzo Bianconi {
23679968444SLorenzo Bianconi struct mtk_wed_wo *wo = from_tasklet(wo, t, mmio.irq_tasklet);
23779968444SLorenzo Bianconi u32 intr, mask;
23879968444SLorenzo Bianconi
23979968444SLorenzo Bianconi /* disable interrupts */
24079968444SLorenzo Bianconi mtk_wed_wo_set_isr(wo, 0);
24179968444SLorenzo Bianconi
24279968444SLorenzo Bianconi intr = mtk_wed_wo_get_isr(wo);
24379968444SLorenzo Bianconi intr &= wo->mmio.irq_mask;
24479968444SLorenzo Bianconi mask = intr & (MTK_WED_WO_RXCH_INT_MASK | MTK_WED_WO_EXCEPTION_INT_MASK);
24579968444SLorenzo Bianconi mtk_wed_wo_irq_disable(wo, mask);
24679968444SLorenzo Bianconi
24779968444SLorenzo Bianconi if (intr & MTK_WED_WO_RXCH_INT_MASK) {
24879968444SLorenzo Bianconi mtk_wed_wo_rx_run_queue(wo, &wo->q_rx);
24979968444SLorenzo Bianconi mtk_wed_wo_rx_complete(wo);
25079968444SLorenzo Bianconi }
25179968444SLorenzo Bianconi }
25279968444SLorenzo Bianconi
25379968444SLorenzo Bianconi /* mtk wed wo hw queues */
25479968444SLorenzo Bianconi
25579968444SLorenzo Bianconi static int
mtk_wed_wo_queue_alloc(struct mtk_wed_wo * wo,struct mtk_wed_wo_queue * q,int n_desc,int buf_size,int index,struct mtk_wed_wo_queue_regs * regs)25679968444SLorenzo Bianconi mtk_wed_wo_queue_alloc(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
25779968444SLorenzo Bianconi int n_desc, int buf_size, int index,
25879968444SLorenzo Bianconi struct mtk_wed_wo_queue_regs *regs)
25979968444SLorenzo Bianconi {
26079968444SLorenzo Bianconi q->regs = *regs;
26179968444SLorenzo Bianconi q->n_desc = n_desc;
26279968444SLorenzo Bianconi q->buf_size = buf_size;
26379968444SLorenzo Bianconi
26479968444SLorenzo Bianconi q->desc = dmam_alloc_coherent(wo->hw->dev, n_desc * sizeof(*q->desc),
26579968444SLorenzo Bianconi &q->desc_dma, GFP_KERNEL);
26679968444SLorenzo Bianconi if (!q->desc)
26779968444SLorenzo Bianconi return -ENOMEM;
26879968444SLorenzo Bianconi
26979968444SLorenzo Bianconi q->entry = devm_kzalloc(wo->hw->dev, n_desc * sizeof(*q->entry),
27079968444SLorenzo Bianconi GFP_KERNEL);
27179968444SLorenzo Bianconi if (!q->entry)
27279968444SLorenzo Bianconi return -ENOMEM;
27379968444SLorenzo Bianconi
27479968444SLorenzo Bianconi return 0;
27579968444SLorenzo Bianconi }
27679968444SLorenzo Bianconi
27779968444SLorenzo Bianconi static void
mtk_wed_wo_queue_free(struct mtk_wed_wo * wo,struct mtk_wed_wo_queue * q)27879968444SLorenzo Bianconi mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
27979968444SLorenzo Bianconi {
28079968444SLorenzo Bianconi mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
28179968444SLorenzo Bianconi dma_free_coherent(wo->hw->dev, q->n_desc * sizeof(*q->desc), q->desc,
28279968444SLorenzo Bianconi q->desc_dma);
28379968444SLorenzo Bianconi }
28479968444SLorenzo Bianconi
28579968444SLorenzo Bianconi static void
mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo * wo,struct mtk_wed_wo_queue * q)28679968444SLorenzo Bianconi mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
28779968444SLorenzo Bianconi {
28879968444SLorenzo Bianconi struct page *page;
28979968444SLorenzo Bianconi int i;
29079968444SLorenzo Bianconi
29179968444SLorenzo Bianconi for (i = 0; i < q->n_desc; i++) {
29279968444SLorenzo Bianconi struct mtk_wed_wo_queue_entry *entry = &q->entry[i];
29379968444SLorenzo Bianconi
294*5c7a24abSLorenzo Bianconi if (!entry->buf)
295*5c7a24abSLorenzo Bianconi continue;
296*5c7a24abSLorenzo Bianconi
29779968444SLorenzo Bianconi dma_unmap_single(wo->hw->dev, entry->addr, entry->len,
29879968444SLorenzo Bianconi DMA_TO_DEVICE);
29979968444SLorenzo Bianconi skb_free_frag(entry->buf);
30079968444SLorenzo Bianconi entry->buf = NULL;
30179968444SLorenzo Bianconi }
30279968444SLorenzo Bianconi
30379968444SLorenzo Bianconi if (!q->cache.va)
30479968444SLorenzo Bianconi return;
30579968444SLorenzo Bianconi
30679968444SLorenzo Bianconi page = virt_to_page(q->cache.va);
30779968444SLorenzo Bianconi __page_frag_cache_drain(page, q->cache.pagecnt_bias);
30879968444SLorenzo Bianconi memset(&q->cache, 0, sizeof(q->cache));
30979968444SLorenzo Bianconi }
31079968444SLorenzo Bianconi
31179968444SLorenzo Bianconi static void
mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo * wo,struct mtk_wed_wo_queue * q)31279968444SLorenzo Bianconi mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
31379968444SLorenzo Bianconi {
31479968444SLorenzo Bianconi struct page *page;
31579968444SLorenzo Bianconi
31679968444SLorenzo Bianconi for (;;) {
31779968444SLorenzo Bianconi void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true);
31879968444SLorenzo Bianconi
31979968444SLorenzo Bianconi if (!buf)
32079968444SLorenzo Bianconi break;
32179968444SLorenzo Bianconi
32279968444SLorenzo Bianconi skb_free_frag(buf);
32379968444SLorenzo Bianconi }
32479968444SLorenzo Bianconi
32579968444SLorenzo Bianconi if (!q->cache.va)
32679968444SLorenzo Bianconi return;
32779968444SLorenzo Bianconi
32879968444SLorenzo Bianconi page = virt_to_page(q->cache.va);
32979968444SLorenzo Bianconi __page_frag_cache_drain(page, q->cache.pagecnt_bias);
33079968444SLorenzo Bianconi memset(&q->cache, 0, sizeof(q->cache));
33179968444SLorenzo Bianconi }
33279968444SLorenzo Bianconi
33379968444SLorenzo Bianconi static void
mtk_wed_wo_queue_reset(struct mtk_wed_wo * wo,struct mtk_wed_wo_queue * q)33479968444SLorenzo Bianconi mtk_wed_wo_queue_reset(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
33579968444SLorenzo Bianconi {
33679968444SLorenzo Bianconi mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
33779968444SLorenzo Bianconi mtk_wed_mmio_w32(wo, q->regs.desc_base, q->desc_dma);
33879968444SLorenzo Bianconi mtk_wed_mmio_w32(wo, q->regs.ring_size, q->n_desc);
33979968444SLorenzo Bianconi }
34079968444SLorenzo Bianconi
mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo * wo,struct mtk_wed_wo_queue * q,struct sk_buff * skb)34179968444SLorenzo Bianconi int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
34279968444SLorenzo Bianconi struct sk_buff *skb)
34379968444SLorenzo Bianconi {
34479968444SLorenzo Bianconi struct mtk_wed_wo_queue_entry *entry;
34579968444SLorenzo Bianconi struct mtk_wed_wo_queue_desc *desc;
34679968444SLorenzo Bianconi int ret = 0, index;
34779968444SLorenzo Bianconi u32 ctrl;
34879968444SLorenzo Bianconi
34979968444SLorenzo Bianconi q->tail = mtk_wed_mmio_r32(wo, q->regs.dma_idx);
35079968444SLorenzo Bianconi index = (q->head + 1) % q->n_desc;
35179968444SLorenzo Bianconi if (q->tail == index) {
35279968444SLorenzo Bianconi ret = -ENOMEM;
35379968444SLorenzo Bianconi goto out;
35479968444SLorenzo Bianconi }
35579968444SLorenzo Bianconi
35679968444SLorenzo Bianconi entry = &q->entry[index];
35779968444SLorenzo Bianconi if (skb->len > entry->len) {
35879968444SLorenzo Bianconi ret = -ENOMEM;
35979968444SLorenzo Bianconi goto out;
36079968444SLorenzo Bianconi }
36179968444SLorenzo Bianconi
36279968444SLorenzo Bianconi desc = &q->desc[index];
36379968444SLorenzo Bianconi q->head = index;
36479968444SLorenzo Bianconi
36579968444SLorenzo Bianconi dma_sync_single_for_cpu(wo->hw->dev, entry->addr, skb->len,
36679968444SLorenzo Bianconi DMA_TO_DEVICE);
36779968444SLorenzo Bianconi memcpy(entry->buf, skb->data, skb->len);
36879968444SLorenzo Bianconi dma_sync_single_for_device(wo->hw->dev, entry->addr, skb->len,
36979968444SLorenzo Bianconi DMA_TO_DEVICE);
37079968444SLorenzo Bianconi
37179968444SLorenzo Bianconi ctrl = FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0, skb->len) |
37279968444SLorenzo Bianconi MTK_WED_WO_CTL_LAST_SEC0 | MTK_WED_WO_CTL_DMA_DONE;
37379968444SLorenzo Bianconi WRITE_ONCE(desc->buf0, cpu_to_le32(entry->addr));
37479968444SLorenzo Bianconi WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
37579968444SLorenzo Bianconi
37679968444SLorenzo Bianconi mtk_wed_wo_queue_kick(wo, q, q->head);
37779968444SLorenzo Bianconi mtk_wed_wo_kickout(wo);
37879968444SLorenzo Bianconi out:
37979968444SLorenzo Bianconi dev_kfree_skb(skb);
38079968444SLorenzo Bianconi
38179968444SLorenzo Bianconi return ret;
38279968444SLorenzo Bianconi }
38379968444SLorenzo Bianconi
38479968444SLorenzo Bianconi static int
mtk_wed_wo_exception_init(struct mtk_wed_wo * wo)38579968444SLorenzo Bianconi mtk_wed_wo_exception_init(struct mtk_wed_wo *wo)
38679968444SLorenzo Bianconi {
38779968444SLorenzo Bianconi return 0;
38879968444SLorenzo Bianconi }
38979968444SLorenzo Bianconi
39079968444SLorenzo Bianconi static int
mtk_wed_wo_hardware_init(struct mtk_wed_wo * wo)39179968444SLorenzo Bianconi mtk_wed_wo_hardware_init(struct mtk_wed_wo *wo)
39279968444SLorenzo Bianconi {
39379968444SLorenzo Bianconi struct mtk_wed_wo_queue_regs regs;
39479968444SLorenzo Bianconi struct device_node *np;
39579968444SLorenzo Bianconi int ret;
39679968444SLorenzo Bianconi
39779968444SLorenzo Bianconi np = of_parse_phandle(wo->hw->node, "mediatek,wo-ccif", 0);
39879968444SLorenzo Bianconi if (!np)
39979968444SLorenzo Bianconi return -ENODEV;
40079968444SLorenzo Bianconi
40179968444SLorenzo Bianconi wo->mmio.regs = syscon_regmap_lookup_by_phandle(np, NULL);
402e22dcbc9SYuan Can if (IS_ERR(wo->mmio.regs)) {
403e22dcbc9SYuan Can ret = PTR_ERR(wo->mmio.regs);
404e22dcbc9SYuan Can goto error_put;
405e22dcbc9SYuan Can }
40679968444SLorenzo Bianconi
40779968444SLorenzo Bianconi wo->mmio.irq = irq_of_parse_and_map(np, 0);
40879968444SLorenzo Bianconi wo->mmio.irq_mask = MTK_WED_WO_ALL_INT_MASK;
40979968444SLorenzo Bianconi spin_lock_init(&wo->mmio.lock);
41079968444SLorenzo Bianconi tasklet_setup(&wo->mmio.irq_tasklet, mtk_wed_wo_irq_tasklet);
41179968444SLorenzo Bianconi
41279968444SLorenzo Bianconi ret = devm_request_irq(wo->hw->dev, wo->mmio.irq,
41379968444SLorenzo Bianconi mtk_wed_wo_irq_handler, IRQF_TRIGGER_HIGH,
41479968444SLorenzo Bianconi KBUILD_MODNAME, wo);
41579968444SLorenzo Bianconi if (ret)
41679968444SLorenzo Bianconi goto error;
41779968444SLorenzo Bianconi
41879968444SLorenzo Bianconi regs.desc_base = MTK_WED_WO_CCIF_DUMMY1;
41979968444SLorenzo Bianconi regs.ring_size = MTK_WED_WO_CCIF_DUMMY2;
42079968444SLorenzo Bianconi regs.dma_idx = MTK_WED_WO_CCIF_SHADOW4;
42179968444SLorenzo Bianconi regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY3;
42279968444SLorenzo Bianconi
42379968444SLorenzo Bianconi ret = mtk_wed_wo_queue_alloc(wo, &wo->q_tx, MTK_WED_WO_RING_SIZE,
42479968444SLorenzo Bianconi MTK_WED_WO_CMD_LEN, MTK_WED_WO_TXCH_NUM,
42579968444SLorenzo Bianconi ®s);
42679968444SLorenzo Bianconi if (ret)
42779968444SLorenzo Bianconi goto error;
42879968444SLorenzo Bianconi
42965e6af6cSLorenzo Bianconi mtk_wed_wo_queue_refill(wo, &wo->q_tx, false);
43079968444SLorenzo Bianconi mtk_wed_wo_queue_reset(wo, &wo->q_tx);
43179968444SLorenzo Bianconi
43279968444SLorenzo Bianconi regs.desc_base = MTK_WED_WO_CCIF_DUMMY5;
43379968444SLorenzo Bianconi regs.ring_size = MTK_WED_WO_CCIF_DUMMY6;
43479968444SLorenzo Bianconi regs.dma_idx = MTK_WED_WO_CCIF_SHADOW8;
43579968444SLorenzo Bianconi regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY7;
43679968444SLorenzo Bianconi
43779968444SLorenzo Bianconi ret = mtk_wed_wo_queue_alloc(wo, &wo->q_rx, MTK_WED_WO_RING_SIZE,
43879968444SLorenzo Bianconi MTK_WED_WO_CMD_LEN, MTK_WED_WO_RXCH_NUM,
43979968444SLorenzo Bianconi ®s);
44079968444SLorenzo Bianconi if (ret)
44179968444SLorenzo Bianconi goto error;
44279968444SLorenzo Bianconi
44365e6af6cSLorenzo Bianconi mtk_wed_wo_queue_refill(wo, &wo->q_rx, true);
44479968444SLorenzo Bianconi mtk_wed_wo_queue_reset(wo, &wo->q_rx);
44579968444SLorenzo Bianconi
44679968444SLorenzo Bianconi /* rx queue irqmask */
44779968444SLorenzo Bianconi mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
44879968444SLorenzo Bianconi
44979968444SLorenzo Bianconi return 0;
45079968444SLorenzo Bianconi
45179968444SLorenzo Bianconi error:
45279968444SLorenzo Bianconi devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
453e22dcbc9SYuan Can error_put:
454e22dcbc9SYuan Can of_node_put(np);
45579968444SLorenzo Bianconi return ret;
45679968444SLorenzo Bianconi }
45779968444SLorenzo Bianconi
45879968444SLorenzo Bianconi static void
mtk_wed_wo_hw_deinit(struct mtk_wed_wo * wo)45979968444SLorenzo Bianconi mtk_wed_wo_hw_deinit(struct mtk_wed_wo *wo)
46079968444SLorenzo Bianconi {
46179968444SLorenzo Bianconi /* disable interrupts */
46279968444SLorenzo Bianconi mtk_wed_wo_set_isr(wo, 0);
46379968444SLorenzo Bianconi
46479968444SLorenzo Bianconi tasklet_disable(&wo->mmio.irq_tasklet);
46579968444SLorenzo Bianconi
46679968444SLorenzo Bianconi disable_irq(wo->mmio.irq);
46779968444SLorenzo Bianconi devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
46879968444SLorenzo Bianconi
46979968444SLorenzo Bianconi mtk_wed_wo_queue_tx_clean(wo, &wo->q_tx);
47079968444SLorenzo Bianconi mtk_wed_wo_queue_rx_clean(wo, &wo->q_rx);
47179968444SLorenzo Bianconi mtk_wed_wo_queue_free(wo, &wo->q_tx);
47279968444SLorenzo Bianconi mtk_wed_wo_queue_free(wo, &wo->q_rx);
47379968444SLorenzo Bianconi }
47479968444SLorenzo Bianconi
mtk_wed_wo_init(struct mtk_wed_hw * hw)47579968444SLorenzo Bianconi int mtk_wed_wo_init(struct mtk_wed_hw *hw)
47679968444SLorenzo Bianconi {
47779968444SLorenzo Bianconi struct mtk_wed_wo *wo;
47879968444SLorenzo Bianconi int ret;
47979968444SLorenzo Bianconi
48079968444SLorenzo Bianconi wo = devm_kzalloc(hw->dev, sizeof(*wo), GFP_KERNEL);
48179968444SLorenzo Bianconi if (!wo)
48279968444SLorenzo Bianconi return -ENOMEM;
48379968444SLorenzo Bianconi
48479968444SLorenzo Bianconi hw->wed_wo = wo;
48579968444SLorenzo Bianconi wo->hw = hw;
48679968444SLorenzo Bianconi
48779968444SLorenzo Bianconi ret = mtk_wed_wo_hardware_init(wo);
48879968444SLorenzo Bianconi if (ret)
48979968444SLorenzo Bianconi return ret;
49079968444SLorenzo Bianconi
49179968444SLorenzo Bianconi ret = mtk_wed_mcu_init(wo);
49279968444SLorenzo Bianconi if (ret)
49379968444SLorenzo Bianconi return ret;
49479968444SLorenzo Bianconi
49579968444SLorenzo Bianconi return mtk_wed_wo_exception_init(wo);
49679968444SLorenzo Bianconi }
49779968444SLorenzo Bianconi
mtk_wed_wo_deinit(struct mtk_wed_hw * hw)49879968444SLorenzo Bianconi void mtk_wed_wo_deinit(struct mtk_wed_hw *hw)
49979968444SLorenzo Bianconi {
50079968444SLorenzo Bianconi struct mtk_wed_wo *wo = hw->wed_wo;
50179968444SLorenzo Bianconi
50279968444SLorenzo Bianconi mtk_wed_wo_hw_deinit(wo);
50379968444SLorenzo Bianconi }
504