Lines Matching refs:q

91 mtk_wed_wo_queue_kick(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,  in mtk_wed_wo_queue_kick()  argument
95 mtk_wed_mmio_w32(wo, q->regs.cpu_idx, val); in mtk_wed_wo_queue_kick()
99 mtk_wed_wo_dequeue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, u32 *len, in mtk_wed_wo_dequeue() argument
102 int buf_len = SKB_WITH_OVERHEAD(q->buf_size); in mtk_wed_wo_dequeue()
103 int index = (q->tail + 1) % q->n_desc; in mtk_wed_wo_dequeue()
108 if (!q->queued) in mtk_wed_wo_dequeue()
112 q->desc[index].ctrl |= cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE); in mtk_wed_wo_dequeue()
113 else if (!(q->desc[index].ctrl & cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE))) in mtk_wed_wo_dequeue()
116 q->tail = index; in mtk_wed_wo_dequeue()
117 q->queued--; in mtk_wed_wo_dequeue()
119 desc = &q->desc[index]; in mtk_wed_wo_dequeue()
120 entry = &q->entry[index]; in mtk_wed_wo_dequeue()
134 mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, in mtk_wed_wo_queue_refill() argument
140 while (q->queued < q->n_desc) { in mtk_wed_wo_queue_refill()
145 buf = page_frag_alloc(&q->cache, q->buf_size, GFP_ATOMIC); in mtk_wed_wo_queue_refill()
149 addr = dma_map_single(wo->hw->dev, buf, q->buf_size, dir); in mtk_wed_wo_queue_refill()
155 q->head = (q->head + 1) % q->n_desc; in mtk_wed_wo_queue_refill()
156 entry = &q->entry[q->head]; in mtk_wed_wo_queue_refill()
158 entry->len = q->buf_size; in mtk_wed_wo_queue_refill()
159 q->entry[q->head].buf = buf; in mtk_wed_wo_queue_refill()
162 struct mtk_wed_wo_queue_desc *desc = &q->desc[q->head]; in mtk_wed_wo_queue_refill()
170 q->queued++; in mtk_wed_wo_queue_refill()
185 mtk_wed_wo_rx_run_queue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) in mtk_wed_wo_rx_run_queue() argument
193 data = mtk_wed_wo_dequeue(wo, q, &len, false); in mtk_wed_wo_rx_run_queue()
197 skb = build_skb(data, q->buf_size); in mtk_wed_wo_rx_run_queue()
216 if (mtk_wed_wo_queue_refill(wo, q, true)) { in mtk_wed_wo_rx_run_queue()
217 u32 index = (q->head - 1) % q->n_desc; in mtk_wed_wo_rx_run_queue()
219 mtk_wed_wo_queue_kick(wo, q, index); in mtk_wed_wo_rx_run_queue()
256 mtk_wed_wo_queue_alloc(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, in mtk_wed_wo_queue_alloc() argument
260 q->regs = *regs; in mtk_wed_wo_queue_alloc()
261 q->n_desc = n_desc; in mtk_wed_wo_queue_alloc()
262 q->buf_size = buf_size; in mtk_wed_wo_queue_alloc()
264 q->desc = dmam_alloc_coherent(wo->hw->dev, n_desc * sizeof(*q->desc), in mtk_wed_wo_queue_alloc()
265 &q->desc_dma, GFP_KERNEL); in mtk_wed_wo_queue_alloc()
266 if (!q->desc) in mtk_wed_wo_queue_alloc()
269 q->entry = devm_kzalloc(wo->hw->dev, n_desc * sizeof(*q->entry), in mtk_wed_wo_queue_alloc()
271 if (!q->entry) in mtk_wed_wo_queue_alloc()
278 mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) in mtk_wed_wo_queue_free() argument
280 mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0); in mtk_wed_wo_queue_free()
281 dma_free_coherent(wo->hw->dev, q->n_desc * sizeof(*q->desc), q->desc, in mtk_wed_wo_queue_free()
282 q->desc_dma); in mtk_wed_wo_queue_free()
286 mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) in mtk_wed_wo_queue_tx_clean() argument
291 for (i = 0; i < q->n_desc; i++) { in mtk_wed_wo_queue_tx_clean()
292 struct mtk_wed_wo_queue_entry *entry = &q->entry[i]; in mtk_wed_wo_queue_tx_clean()
303 if (!q->cache.va) in mtk_wed_wo_queue_tx_clean()
306 page = virt_to_page(q->cache.va); in mtk_wed_wo_queue_tx_clean()
307 __page_frag_cache_drain(page, q->cache.pagecnt_bias); in mtk_wed_wo_queue_tx_clean()
308 memset(&q->cache, 0, sizeof(q->cache)); in mtk_wed_wo_queue_tx_clean()
312 mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) in mtk_wed_wo_queue_rx_clean() argument
317 void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true); in mtk_wed_wo_queue_rx_clean()
325 if (!q->cache.va) in mtk_wed_wo_queue_rx_clean()
328 page = virt_to_page(q->cache.va); in mtk_wed_wo_queue_rx_clean()
329 __page_frag_cache_drain(page, q->cache.pagecnt_bias); in mtk_wed_wo_queue_rx_clean()
330 memset(&q->cache, 0, sizeof(q->cache)); in mtk_wed_wo_queue_rx_clean()
334 mtk_wed_wo_queue_reset(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) in mtk_wed_wo_queue_reset() argument
336 mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0); in mtk_wed_wo_queue_reset()
337 mtk_wed_mmio_w32(wo, q->regs.desc_base, q->desc_dma); in mtk_wed_wo_queue_reset()
338 mtk_wed_mmio_w32(wo, q->regs.ring_size, q->n_desc); in mtk_wed_wo_queue_reset()
341 int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, in mtk_wed_wo_queue_tx_skb() argument
349 q->tail = mtk_wed_mmio_r32(wo, q->regs.dma_idx); in mtk_wed_wo_queue_tx_skb()
350 index = (q->head + 1) % q->n_desc; in mtk_wed_wo_queue_tx_skb()
351 if (q->tail == index) { in mtk_wed_wo_queue_tx_skb()
356 entry = &q->entry[index]; in mtk_wed_wo_queue_tx_skb()
362 desc = &q->desc[index]; in mtk_wed_wo_queue_tx_skb()
363 q->head = index; in mtk_wed_wo_queue_tx_skb()
376 mtk_wed_wo_queue_kick(wo, q, q->head); in mtk_wed_wo_queue_tx_skb()