Lines Matching +full:wo +full:- +full:ccif

1 // SPDX-License-Identifier: GPL-2.0-only
9 #include <linux/dma-mapping.h>
21 mtk_wed_mmio_r32(struct mtk_wed_wo *wo, u32 reg) in mtk_wed_mmio_r32() argument
25 if (regmap_read(wo->mmio.regs, reg, &val)) in mtk_wed_mmio_r32()
32 mtk_wed_mmio_w32(struct mtk_wed_wo *wo, u32 reg, u32 val) in mtk_wed_mmio_w32() argument
34 regmap_write(wo->mmio.regs, reg, val); in mtk_wed_mmio_w32()
38 mtk_wed_wo_get_isr(struct mtk_wed_wo *wo) in mtk_wed_wo_get_isr() argument
40 u32 val = mtk_wed_mmio_r32(wo, MTK_WED_WO_CCIF_RCHNUM); in mtk_wed_wo_get_isr()
46 mtk_wed_wo_set_isr(struct mtk_wed_wo *wo, u32 mask) in mtk_wed_wo_set_isr() argument
48 mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_IRQ0_MASK, mask); in mtk_wed_wo_set_isr()
52 mtk_wed_wo_set_ack(struct mtk_wed_wo *wo, u32 mask) in mtk_wed_wo_set_ack() argument
54 mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_ACK, mask); in mtk_wed_wo_set_ack()
58 mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, u32 mask, u32 val, bool set) in mtk_wed_wo_set_isr_mask() argument
62 spin_lock_irqsave(&wo->mmio.lock, flags); in mtk_wed_wo_set_isr_mask()
63 wo->mmio.irq_mask &= ~mask; in mtk_wed_wo_set_isr_mask()
64 wo->mmio.irq_mask |= val; in mtk_wed_wo_set_isr_mask()
66 mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask); in mtk_wed_wo_set_isr_mask()
67 spin_unlock_irqrestore(&wo->mmio.lock, flags); in mtk_wed_wo_set_isr_mask()
71 mtk_wed_wo_irq_enable(struct mtk_wed_wo *wo, u32 mask) in mtk_wed_wo_irq_enable() argument
73 mtk_wed_wo_set_isr_mask(wo, 0, mask, false); in mtk_wed_wo_irq_enable()
74 tasklet_schedule(&wo->mmio.irq_tasklet); in mtk_wed_wo_irq_enable()
78 mtk_wed_wo_irq_disable(struct mtk_wed_wo *wo, u32 mask) in mtk_wed_wo_irq_disable() argument
80 mtk_wed_wo_set_isr_mask(wo, mask, 0, true); in mtk_wed_wo_irq_disable()
84 mtk_wed_wo_kickout(struct mtk_wed_wo *wo) in mtk_wed_wo_kickout() argument
86 mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_BUSY, 1 << MTK_WED_WO_TXCH_NUM); in mtk_wed_wo_kickout()
87 mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_TCHNUM, MTK_WED_WO_TXCH_NUM); in mtk_wed_wo_kickout()
91 mtk_wed_wo_queue_kick(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, in mtk_wed_wo_queue_kick() argument
95 mtk_wed_mmio_w32(wo, q->regs.cpu_idx, val); in mtk_wed_wo_queue_kick()
99 mtk_wed_wo_dequeue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, u32 *len, in mtk_wed_wo_dequeue() argument
102 int buf_len = SKB_WITH_OVERHEAD(q->buf_size); in mtk_wed_wo_dequeue()
103 int index = (q->tail + 1) % q->n_desc; in mtk_wed_wo_dequeue()
108 if (!q->queued) in mtk_wed_wo_dequeue()
112 q->desc[index].ctrl |= cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE); in mtk_wed_wo_dequeue()
113 else if (!(q->desc[index].ctrl & cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE))) in mtk_wed_wo_dequeue()
116 q->tail = index; in mtk_wed_wo_dequeue()
117 q->queued--; in mtk_wed_wo_dequeue()
119 desc = &q->desc[index]; in mtk_wed_wo_dequeue()
120 entry = &q->entry[index]; in mtk_wed_wo_dequeue()
121 buf = entry->buf; in mtk_wed_wo_dequeue()
124 le32_to_cpu(READ_ONCE(desc->ctrl))); in mtk_wed_wo_dequeue()
126 dma_unmap_single(wo->hw->dev, entry->addr, buf_len, in mtk_wed_wo_dequeue()
128 entry->buf = NULL; in mtk_wed_wo_dequeue()
134 mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, in mtk_wed_wo_queue_refill() argument
140 while (q->queued < q->n_desc) { in mtk_wed_wo_queue_refill()
145 buf = page_frag_alloc(&q->cache, q->buf_size, GFP_ATOMIC); in mtk_wed_wo_queue_refill()
149 addr = dma_map_single(wo->hw->dev, buf, q->buf_size, dir); in mtk_wed_wo_queue_refill()
150 if (unlikely(dma_mapping_error(wo->hw->dev, addr))) { in mtk_wed_wo_queue_refill()
155 q->head = (q->head + 1) % q->n_desc; in mtk_wed_wo_queue_refill()
156 entry = &q->entry[q->head]; in mtk_wed_wo_queue_refill()
157 entry->addr = addr; in mtk_wed_wo_queue_refill()
158 entry->len = q->buf_size; in mtk_wed_wo_queue_refill()
159 q->entry[q->head].buf = buf; in mtk_wed_wo_queue_refill()
162 struct mtk_wed_wo_queue_desc *desc = &q->desc[q->head]; in mtk_wed_wo_queue_refill()
165 entry->len); in mtk_wed_wo_queue_refill()
167 WRITE_ONCE(desc->buf0, cpu_to_le32(addr)); in mtk_wed_wo_queue_refill()
168 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); in mtk_wed_wo_queue_refill()
170 q->queued++; in mtk_wed_wo_queue_refill()
178 mtk_wed_wo_rx_complete(struct mtk_wed_wo *wo) in mtk_wed_wo_rx_complete() argument
180 mtk_wed_wo_set_ack(wo, MTK_WED_WO_RXCH_INT_MASK); in mtk_wed_wo_rx_complete()
181 mtk_wed_wo_irq_enable(wo, MTK_WED_WO_RXCH_INT_MASK); in mtk_wed_wo_rx_complete()
185 mtk_wed_wo_rx_run_queue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) in mtk_wed_wo_rx_run_queue() argument
193 data = mtk_wed_wo_dequeue(wo, q, &len, false); in mtk_wed_wo_rx_run_queue()
197 skb = build_skb(data, q->buf_size); in mtk_wed_wo_rx_run_queue()
204 if (mtk_wed_mcu_check_msg(wo, skb)) { in mtk_wed_wo_rx_run_queue()
209 hdr = (struct mtk_wed_mcu_hdr *)skb->data; in mtk_wed_wo_rx_run_queue()
210 if (hdr->flag & cpu_to_le16(MTK_WED_WARP_CMD_FLAG_RSP)) in mtk_wed_wo_rx_run_queue()
211 mtk_wed_mcu_rx_event(wo, skb); in mtk_wed_wo_rx_run_queue()
213 mtk_wed_mcu_rx_unsolicited_event(wo, skb); in mtk_wed_wo_rx_run_queue()
216 if (mtk_wed_wo_queue_refill(wo, q, true)) { in mtk_wed_wo_rx_run_queue()
217 u32 index = (q->head - 1) % q->n_desc; in mtk_wed_wo_rx_run_queue()
219 mtk_wed_wo_queue_kick(wo, q, index); in mtk_wed_wo_rx_run_queue()
226 struct mtk_wed_wo *wo = data; in mtk_wed_wo_irq_handler() local
228 mtk_wed_wo_set_isr(wo, 0); in mtk_wed_wo_irq_handler()
229 tasklet_schedule(&wo->mmio.irq_tasklet); in mtk_wed_wo_irq_handler()
236 struct mtk_wed_wo *wo = from_tasklet(wo, t, mmio.irq_tasklet); in mtk_wed_wo_irq_tasklet() local
240 mtk_wed_wo_set_isr(wo, 0); in mtk_wed_wo_irq_tasklet()
242 intr = mtk_wed_wo_get_isr(wo); in mtk_wed_wo_irq_tasklet()
243 intr &= wo->mmio.irq_mask; in mtk_wed_wo_irq_tasklet()
245 mtk_wed_wo_irq_disable(wo, mask); in mtk_wed_wo_irq_tasklet()
248 mtk_wed_wo_rx_run_queue(wo, &wo->q_rx); in mtk_wed_wo_irq_tasklet()
249 mtk_wed_wo_rx_complete(wo); in mtk_wed_wo_irq_tasklet()
253 /* mtk wed wo hw queues */
256 mtk_wed_wo_queue_alloc(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, in mtk_wed_wo_queue_alloc() argument
260 q->regs = *regs; in mtk_wed_wo_queue_alloc()
261 q->n_desc = n_desc; in mtk_wed_wo_queue_alloc()
262 q->buf_size = buf_size; in mtk_wed_wo_queue_alloc()
264 q->desc = dmam_alloc_coherent(wo->hw->dev, n_desc * sizeof(*q->desc), in mtk_wed_wo_queue_alloc()
265 &q->desc_dma, GFP_KERNEL); in mtk_wed_wo_queue_alloc()
266 if (!q->desc) in mtk_wed_wo_queue_alloc()
267 return -ENOMEM; in mtk_wed_wo_queue_alloc()
269 q->entry = devm_kzalloc(wo->hw->dev, n_desc * sizeof(*q->entry), in mtk_wed_wo_queue_alloc()
271 if (!q->entry) in mtk_wed_wo_queue_alloc()
272 return -ENOMEM; in mtk_wed_wo_queue_alloc()
278 mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) in mtk_wed_wo_queue_free() argument
280 mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0); in mtk_wed_wo_queue_free()
281 dma_free_coherent(wo->hw->dev, q->n_desc * sizeof(*q->desc), q->desc, in mtk_wed_wo_queue_free()
282 q->desc_dma); in mtk_wed_wo_queue_free()
286 mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) in mtk_wed_wo_queue_tx_clean() argument
291 for (i = 0; i < q->n_desc; i++) { in mtk_wed_wo_queue_tx_clean()
292 struct mtk_wed_wo_queue_entry *entry = &q->entry[i]; in mtk_wed_wo_queue_tx_clean()
294 if (!entry->buf) in mtk_wed_wo_queue_tx_clean()
297 dma_unmap_single(wo->hw->dev, entry->addr, entry->len, in mtk_wed_wo_queue_tx_clean()
299 skb_free_frag(entry->buf); in mtk_wed_wo_queue_tx_clean()
300 entry->buf = NULL; in mtk_wed_wo_queue_tx_clean()
303 if (!q->cache.va) in mtk_wed_wo_queue_tx_clean()
306 page = virt_to_page(q->cache.va); in mtk_wed_wo_queue_tx_clean()
307 __page_frag_cache_drain(page, q->cache.pagecnt_bias); in mtk_wed_wo_queue_tx_clean()
308 memset(&q->cache, 0, sizeof(q->cache)); in mtk_wed_wo_queue_tx_clean()
312 mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) in mtk_wed_wo_queue_rx_clean() argument
317 void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true); in mtk_wed_wo_queue_rx_clean()
325 if (!q->cache.va) in mtk_wed_wo_queue_rx_clean()
328 page = virt_to_page(q->cache.va); in mtk_wed_wo_queue_rx_clean()
329 __page_frag_cache_drain(page, q->cache.pagecnt_bias); in mtk_wed_wo_queue_rx_clean()
330 memset(&q->cache, 0, sizeof(q->cache)); in mtk_wed_wo_queue_rx_clean()
334 mtk_wed_wo_queue_reset(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) in mtk_wed_wo_queue_reset() argument
336 mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0); in mtk_wed_wo_queue_reset()
337 mtk_wed_mmio_w32(wo, q->regs.desc_base, q->desc_dma); in mtk_wed_wo_queue_reset()
338 mtk_wed_mmio_w32(wo, q->regs.ring_size, q->n_desc); in mtk_wed_wo_queue_reset()
341 int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, in mtk_wed_wo_queue_tx_skb() argument
349 q->tail = mtk_wed_mmio_r32(wo, q->regs.dma_idx); in mtk_wed_wo_queue_tx_skb()
350 index = (q->head + 1) % q->n_desc; in mtk_wed_wo_queue_tx_skb()
351 if (q->tail == index) { in mtk_wed_wo_queue_tx_skb()
352 ret = -ENOMEM; in mtk_wed_wo_queue_tx_skb()
356 entry = &q->entry[index]; in mtk_wed_wo_queue_tx_skb()
357 if (skb->len > entry->len) { in mtk_wed_wo_queue_tx_skb()
358 ret = -ENOMEM; in mtk_wed_wo_queue_tx_skb()
362 desc = &q->desc[index]; in mtk_wed_wo_queue_tx_skb()
363 q->head = index; in mtk_wed_wo_queue_tx_skb()
365 dma_sync_single_for_cpu(wo->hw->dev, entry->addr, skb->len, in mtk_wed_wo_queue_tx_skb()
367 memcpy(entry->buf, skb->data, skb->len); in mtk_wed_wo_queue_tx_skb()
368 dma_sync_single_for_device(wo->hw->dev, entry->addr, skb->len, in mtk_wed_wo_queue_tx_skb()
371 ctrl = FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0, skb->len) | in mtk_wed_wo_queue_tx_skb()
373 WRITE_ONCE(desc->buf0, cpu_to_le32(entry->addr)); in mtk_wed_wo_queue_tx_skb()
374 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); in mtk_wed_wo_queue_tx_skb()
376 mtk_wed_wo_queue_kick(wo, q, q->head); in mtk_wed_wo_queue_tx_skb()
377 mtk_wed_wo_kickout(wo); in mtk_wed_wo_queue_tx_skb()
385 mtk_wed_wo_exception_init(struct mtk_wed_wo *wo) in mtk_wed_wo_exception_init() argument
391 mtk_wed_wo_hardware_init(struct mtk_wed_wo *wo) in mtk_wed_wo_hardware_init() argument
397 np = of_parse_phandle(wo->hw->node, "mediatek,wo-ccif", 0); in mtk_wed_wo_hardware_init()
399 return -ENODEV; in mtk_wed_wo_hardware_init()
401 wo->mmio.regs = syscon_regmap_lookup_by_phandle(np, NULL); in mtk_wed_wo_hardware_init()
402 if (IS_ERR(wo->mmio.regs)) { in mtk_wed_wo_hardware_init()
403 ret = PTR_ERR(wo->mmio.regs); in mtk_wed_wo_hardware_init()
407 wo->mmio.irq = irq_of_parse_and_map(np, 0); in mtk_wed_wo_hardware_init()
408 wo->mmio.irq_mask = MTK_WED_WO_ALL_INT_MASK; in mtk_wed_wo_hardware_init()
409 spin_lock_init(&wo->mmio.lock); in mtk_wed_wo_hardware_init()
410 tasklet_setup(&wo->mmio.irq_tasklet, mtk_wed_wo_irq_tasklet); in mtk_wed_wo_hardware_init()
412 ret = devm_request_irq(wo->hw->dev, wo->mmio.irq, in mtk_wed_wo_hardware_init()
414 KBUILD_MODNAME, wo); in mtk_wed_wo_hardware_init()
423 ret = mtk_wed_wo_queue_alloc(wo, &wo->q_tx, MTK_WED_WO_RING_SIZE, in mtk_wed_wo_hardware_init()
429 mtk_wed_wo_queue_refill(wo, &wo->q_tx, false); in mtk_wed_wo_hardware_init()
430 mtk_wed_wo_queue_reset(wo, &wo->q_tx); in mtk_wed_wo_hardware_init()
437 ret = mtk_wed_wo_queue_alloc(wo, &wo->q_rx, MTK_WED_WO_RING_SIZE, in mtk_wed_wo_hardware_init()
443 mtk_wed_wo_queue_refill(wo, &wo->q_rx, true); in mtk_wed_wo_hardware_init()
444 mtk_wed_wo_queue_reset(wo, &wo->q_rx); in mtk_wed_wo_hardware_init()
447 mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask); in mtk_wed_wo_hardware_init()
452 devm_free_irq(wo->hw->dev, wo->mmio.irq, wo); in mtk_wed_wo_hardware_init()
459 mtk_wed_wo_hw_deinit(struct mtk_wed_wo *wo) in mtk_wed_wo_hw_deinit() argument
462 mtk_wed_wo_set_isr(wo, 0); in mtk_wed_wo_hw_deinit()
464 tasklet_disable(&wo->mmio.irq_tasklet); in mtk_wed_wo_hw_deinit()
466 disable_irq(wo->mmio.irq); in mtk_wed_wo_hw_deinit()
467 devm_free_irq(wo->hw->dev, wo->mmio.irq, wo); in mtk_wed_wo_hw_deinit()
469 mtk_wed_wo_queue_tx_clean(wo, &wo->q_tx); in mtk_wed_wo_hw_deinit()
470 mtk_wed_wo_queue_rx_clean(wo, &wo->q_rx); in mtk_wed_wo_hw_deinit()
471 mtk_wed_wo_queue_free(wo, &wo->q_tx); in mtk_wed_wo_hw_deinit()
472 mtk_wed_wo_queue_free(wo, &wo->q_rx); in mtk_wed_wo_hw_deinit()
477 struct mtk_wed_wo *wo; in mtk_wed_wo_init() local
480 wo = devm_kzalloc(hw->dev, sizeof(*wo), GFP_KERNEL); in mtk_wed_wo_init()
481 if (!wo) in mtk_wed_wo_init()
482 return -ENOMEM; in mtk_wed_wo_init()
484 hw->wed_wo = wo; in mtk_wed_wo_init()
485 wo->hw = hw; in mtk_wed_wo_init()
487 ret = mtk_wed_wo_hardware_init(wo); in mtk_wed_wo_init()
491 ret = mtk_wed_mcu_init(wo); in mtk_wed_wo_init()
495 return mtk_wed_wo_exception_init(wo); in mtk_wed_wo_init()
500 struct mtk_wed_wo *wo = hw->wed_wo; in mtk_wed_wo_deinit() local
502 mtk_wed_wo_hw_deinit(wo); in mtk_wed_wo_deinit()