Lines Matching refs:q

184 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)  in mt76_dma_sync_idx()  argument
186 Q_WRITE(dev, q, desc_base, q->desc_dma); in mt76_dma_sync_idx()
187 Q_WRITE(dev, q, ring_size, q->ndesc); in mt76_dma_sync_idx()
188 q->head = Q_READ(dev, q, dma_idx); in mt76_dma_sync_idx()
189 q->tail = q->head; in mt76_dma_sync_idx()
193 mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_queue_reset() argument
197 if (!q || !q->ndesc) in mt76_dma_queue_reset()
201 for (i = 0; i < q->ndesc; i++) in mt76_dma_queue_reset()
202 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); in mt76_dma_queue_reset()
204 Q_WRITE(dev, q, cpu_idx, 0); in mt76_dma_queue_reset()
205 Q_WRITE(dev, q, dma_idx, 0); in mt76_dma_queue_reset()
206 mt76_dma_sync_idx(dev, q); in mt76_dma_queue_reset()
210 mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_add_rx_buf() argument
213 struct mt76_desc *desc = &q->desc[q->head]; in mt76_dma_add_rx_buf()
214 struct mt76_queue_entry *entry = &q->entry[q->head]; in mt76_dma_add_rx_buf()
217 int idx = q->head; in mt76_dma_add_rx_buf()
222 if (mt76_queue_is_wed_rx(q)) { in mt76_dma_add_rx_buf()
248 q->head = (q->head + 1) % q->ndesc; in mt76_dma_add_rx_buf()
249 q->queued++; in mt76_dma_add_rx_buf()
255 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_add_buf() argument
265 q->entry[q->head].txwi = DMA_DUMMY_DATA; in mt76_dma_add_buf()
266 q->entry[q->head].skip_buf0 = true; in mt76_dma_add_buf()
272 idx = q->head; in mt76_dma_add_buf()
273 next = (q->head + 1) % q->ndesc; in mt76_dma_add_buf()
275 desc = &q->desc[idx]; in mt76_dma_add_buf()
276 entry = &q->entry[idx]; in mt76_dma_add_buf()
305 q->head = next; in mt76_dma_add_buf()
306 q->queued++; in mt76_dma_add_buf()
309 q->entry[idx].txwi = txwi; in mt76_dma_add_buf()
310 q->entry[idx].skb = skb; in mt76_dma_add_buf()
311 q->entry[idx].wcid = 0xffff; in mt76_dma_add_buf()
317 mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, in mt76_dma_tx_cleanup_idx() argument
320 struct mt76_queue_entry *e = &q->entry[idx]; in mt76_dma_tx_cleanup_idx()
338 mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_kick_queue() argument
341 Q_WRITE(dev, q, cpu_idx, q->head); in mt76_dma_kick_queue()
345 mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush) in mt76_dma_tx_cleanup() argument
350 if (!q || !q->ndesc) in mt76_dma_tx_cleanup()
353 spin_lock_bh(&q->cleanup_lock); in mt76_dma_tx_cleanup()
357 last = Q_READ(dev, q, dma_idx); in mt76_dma_tx_cleanup()
359 while (q->queued > 0 && q->tail != last) { in mt76_dma_tx_cleanup()
360 mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry); in mt76_dma_tx_cleanup()
361 mt76_queue_tx_complete(dev, q, &entry); in mt76_dma_tx_cleanup()
368 if (!flush && q->tail == last) in mt76_dma_tx_cleanup()
369 last = Q_READ(dev, q, dma_idx); in mt76_dma_tx_cleanup()
371 spin_unlock_bh(&q->cleanup_lock); in mt76_dma_tx_cleanup()
374 spin_lock_bh(&q->lock); in mt76_dma_tx_cleanup()
375 mt76_dma_sync_idx(dev, q); in mt76_dma_tx_cleanup()
376 mt76_dma_kick_queue(dev, q); in mt76_dma_tx_cleanup()
377 spin_unlock_bh(&q->lock); in mt76_dma_tx_cleanup()
380 if (!q->queued) in mt76_dma_tx_cleanup()
385 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, in mt76_dma_get_buf() argument
388 struct mt76_queue_entry *e = &q->entry[idx]; in mt76_dma_get_buf()
389 struct mt76_desc *desc = &q->desc[idx]; in mt76_dma_get_buf()
401 if (mt76_queue_is_wed_rx(q)) { in mt76_dma_get_buf()
410 SKB_WITH_OVERHEAD(q->buf_size), in mt76_dma_get_buf()
411 page_pool_get_dma_dir(q->page_pool)); in mt76_dma_get_buf()
431 SKB_WITH_OVERHEAD(q->buf_size), in mt76_dma_get_buf()
432 page_pool_get_dma_dir(q->page_pool)); in mt76_dma_get_buf()
439 mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush, in mt76_dma_dequeue() argument
442 int idx = q->tail; in mt76_dma_dequeue()
445 if (!q->queued) in mt76_dma_dequeue()
449 q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE); in mt76_dma_dequeue()
450 else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) in mt76_dma_dequeue()
453 q->tail = (q->tail + 1) % q->ndesc; in mt76_dma_dequeue()
454 q->queued--; in mt76_dma_dequeue()
456 return mt76_dma_get_buf(dev, q, idx, len, info, more, drop); in mt76_dma_dequeue()
460 mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_tx_queue_skb_raw() argument
469 if (q->queued + 1 >= q->ndesc - 1) in mt76_dma_tx_queue_skb_raw()
480 spin_lock_bh(&q->lock); in mt76_dma_tx_queue_skb_raw()
481 mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL); in mt76_dma_tx_queue_skb_raw()
482 mt76_dma_kick_queue(dev, q); in mt76_dma_tx_queue_skb_raw()
483 spin_unlock_bh(&q->lock); in mt76_dma_tx_queue_skb_raw()
493 mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_tx_queue_skb() argument
547 if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) { in mt76_dma_tx_queue_skb()
560 return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf, in mt76_dma_tx_queue_skb()
592 mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_rx_fill() argument
595 int len = SKB_WITH_OVERHEAD(q->buf_size); in mt76_dma_rx_fill()
598 if (!q->ndesc) in mt76_dma_rx_fill()
601 spin_lock_bh(&q->lock); in mt76_dma_rx_fill()
603 while (q->queued < q->ndesc - 1) { in mt76_dma_rx_fill()
610 buf = mt76_get_page_pool_buf(q, &offset, q->buf_size); in mt76_dma_rx_fill()
615 dir = page_pool_get_dma_dir(q->page_pool); in mt76_dma_rx_fill()
618 qbuf.addr = addr + q->buf_offset; in mt76_dma_rx_fill()
619 qbuf.len = len - q->buf_offset; in mt76_dma_rx_fill()
621 if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) { in mt76_dma_rx_fill()
629 mt76_dma_kick_queue(dev, q); in mt76_dma_rx_fill()
631 spin_unlock_bh(&q->lock); in mt76_dma_rx_fill()
636 int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset) in mt76_dma_wed_setup() argument
643 if (!q || !q->ndesc) in mt76_dma_wed_setup()
646 flags = q->flags; in mt76_dma_wed_setup()
648 q->flags &= ~MT_QFLAG_WED; in mt76_dma_wed_setup()
650 if (!(q->flags & MT_QFLAG_WED)) in mt76_dma_wed_setup()
653 type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags); in mt76_dma_wed_setup()
654 ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags); in mt76_dma_wed_setup()
658 ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs, reset); in mt76_dma_wed_setup()
660 q->wed_regs = wed->tx_ring[ring].reg_base; in mt76_dma_wed_setup()
664 q->flags = 0; in mt76_dma_wed_setup()
665 mt76_dma_queue_reset(dev, q); in mt76_dma_wed_setup()
666 mt76_dma_rx_fill(dev, q, false); in mt76_dma_wed_setup()
667 q->flags = flags; in mt76_dma_wed_setup()
669 ret = mtk_wed_device_txfree_ring_setup(wed, q->regs); in mt76_dma_wed_setup()
671 q->wed_regs = wed->txfree_ring.reg_base; in mt76_dma_wed_setup()
674 ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs, reset); in mt76_dma_wed_setup()
676 q->wed_regs = wed->rx_ring[ring].reg_base; in mt76_dma_wed_setup()
690 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_alloc_queue() argument
696 spin_lock_init(&q->lock); in mt76_dma_alloc_queue()
697 spin_lock_init(&q->cleanup_lock); in mt76_dma_alloc_queue()
699 q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE; in mt76_dma_alloc_queue()
700 q->ndesc = n_desc; in mt76_dma_alloc_queue()
701 q->buf_size = bufsize; in mt76_dma_alloc_queue()
702 q->hw_idx = idx; in mt76_dma_alloc_queue()
704 size = q->ndesc * sizeof(struct mt76_desc); in mt76_dma_alloc_queue()
705 q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL); in mt76_dma_alloc_queue()
706 if (!q->desc) in mt76_dma_alloc_queue()
709 size = q->ndesc * sizeof(*q->entry); in mt76_dma_alloc_queue()
710 q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL); in mt76_dma_alloc_queue()
711 if (!q->entry) in mt76_dma_alloc_queue()
714 ret = mt76_create_page_pool(dev, q); in mt76_dma_alloc_queue()
718 ret = mt76_dma_wed_setup(dev, q, false); in mt76_dma_alloc_queue()
722 if (q->flags != MT_WED_Q_TXFREE) in mt76_dma_alloc_queue()
723 mt76_dma_queue_reset(dev, q); in mt76_dma_alloc_queue()
729 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_rx_cleanup() argument
734 if (!q->ndesc) in mt76_dma_rx_cleanup()
737 spin_lock_bh(&q->lock); in mt76_dma_rx_cleanup()
740 buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL); in mt76_dma_rx_cleanup()
747 if (q->rx_head) { in mt76_dma_rx_cleanup()
748 dev_kfree_skb(q->rx_head); in mt76_dma_rx_cleanup()
749 q->rx_head = NULL; in mt76_dma_rx_cleanup()
752 spin_unlock_bh(&q->lock); in mt76_dma_rx_cleanup()
758 struct mt76_queue *q = &dev->q_rx[qid]; in mt76_dma_rx_reset() local
761 if (!q->ndesc) in mt76_dma_rx_reset()
764 for (i = 0; i < q->ndesc; i++) in mt76_dma_rx_reset()
765 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); in mt76_dma_rx_reset()
767 mt76_dma_rx_cleanup(dev, q); in mt76_dma_rx_reset()
770 mt76_dma_wed_setup(dev, q, true); in mt76_dma_rx_reset()
771 if (q->flags != MT_WED_Q_TXFREE) { in mt76_dma_rx_reset()
772 mt76_dma_sync_idx(dev, q); in mt76_dma_rx_reset()
773 mt76_dma_rx_fill(dev, q, false); in mt76_dma_rx_reset()
778 mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, in mt76_add_fragment() argument
781 struct sk_buff *skb = q->rx_head; in mt76_add_fragment()
787 int offset = data - page_address(page) + q->buf_offset; in mt76_add_fragment()
789 skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size); in mt76_add_fragment()
797 q->rx_head = NULL; in mt76_add_fragment()
799 dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info); in mt76_add_fragment()
805 mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) in mt76_dma_rx_process() argument
811 bool allow_direct = !mt76_queue_is_wed_rx(q); in mt76_dma_rx_process()
815 q->flags == MT_WED_Q_TXFREE) { in mt76_dma_rx_process()
816 dma_idx = Q_READ(dev, q, dma_idx); in mt76_dma_rx_process()
825 if (q->tail == dma_idx) in mt76_dma_rx_process()
826 dma_idx = Q_READ(dev, q, dma_idx); in mt76_dma_rx_process()
828 if (q->tail == dma_idx) in mt76_dma_rx_process()
832 data = mt76_dma_dequeue(dev, q, false, &len, &info, &more, in mt76_dma_rx_process()
840 if (q->rx_head) in mt76_dma_rx_process()
841 data_len = q->buf_size; in mt76_dma_rx_process()
843 data_len = SKB_WITH_OVERHEAD(q->buf_size); in mt76_dma_rx_process()
845 if (data_len < len + q->buf_offset) { in mt76_dma_rx_process()
846 dev_kfree_skb(q->rx_head); in mt76_dma_rx_process()
847 q->rx_head = NULL; in mt76_dma_rx_process()
851 if (q->rx_head) { in mt76_dma_rx_process()
852 mt76_add_fragment(dev, q, data, len, more, info, in mt76_dma_rx_process()
861 skb = napi_build_skb(data, q->buf_size); in mt76_dma_rx_process()
865 skb_reserve(skb, q->buf_offset); in mt76_dma_rx_process()
874 q->rx_head = skb; in mt76_dma_rx_process()
878 dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info); in mt76_dma_rx_process()
885 mt76_dma_rx_fill(dev, q, true); in mt76_dma_rx_process()
977 struct mt76_queue *q = &dev->q_rx[i]; in mt76_dma_cleanup() local
980 mt76_dma_rx_cleanup(dev, q); in mt76_dma_cleanup()
982 page_pool_destroy(q->page_pool); in mt76_dma_cleanup()