Lines Matching refs:dev

42 mt76_alloc_txwi(struct mt76_dev *dev)  in mt76_alloc_txwi()  argument
49 size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t)); in mt76_alloc_txwi()
54 addr = dma_map_single(dev->dma_dev, txwi, dev->drv->txwi_size, in mt76_alloc_txwi()
56 t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size); in mt76_alloc_txwi()
63 mt76_alloc_rxwi(struct mt76_dev *dev) in mt76_alloc_rxwi() argument
76 __mt76_get_txwi(struct mt76_dev *dev) in __mt76_get_txwi() argument
80 spin_lock(&dev->lock); in __mt76_get_txwi()
81 if (!list_empty(&dev->txwi_cache)) { in __mt76_get_txwi()
82 t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache, in __mt76_get_txwi()
86 spin_unlock(&dev->lock); in __mt76_get_txwi()
92 __mt76_get_rxwi(struct mt76_dev *dev) in __mt76_get_rxwi() argument
96 spin_lock_bh(&dev->wed_lock); in __mt76_get_rxwi()
97 if (!list_empty(&dev->rxwi_cache)) { in __mt76_get_rxwi()
98 t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache, in __mt76_get_rxwi()
102 spin_unlock_bh(&dev->wed_lock); in __mt76_get_rxwi()
108 mt76_get_txwi(struct mt76_dev *dev) in mt76_get_txwi() argument
110 struct mt76_txwi_cache *t = __mt76_get_txwi(dev); in mt76_get_txwi()
115 return mt76_alloc_txwi(dev); in mt76_get_txwi()
119 mt76_get_rxwi(struct mt76_dev *dev) in mt76_get_rxwi() argument
121 struct mt76_txwi_cache *t = __mt76_get_rxwi(dev); in mt76_get_rxwi()
126 return mt76_alloc_rxwi(dev); in mt76_get_rxwi()
131 mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) in mt76_put_txwi() argument
136 spin_lock(&dev->lock); in mt76_put_txwi()
137 list_add(&t->list, &dev->txwi_cache); in mt76_put_txwi()
138 spin_unlock(&dev->lock); in mt76_put_txwi()
143 mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) in mt76_put_rxwi() argument
148 spin_lock_bh(&dev->wed_lock); in mt76_put_rxwi()
149 list_add(&t->list, &dev->rxwi_cache); in mt76_put_rxwi()
150 spin_unlock_bh(&dev->wed_lock); in mt76_put_rxwi()
155 mt76_free_pending_txwi(struct mt76_dev *dev) in mt76_free_pending_txwi() argument
160 while ((t = __mt76_get_txwi(dev)) != NULL) { in mt76_free_pending_txwi()
161 dma_unmap_single(dev->dma_dev, t->dma_addr, dev->drv->txwi_size, in mt76_free_pending_txwi()
163 kfree(mt76_get_txwi_ptr(dev, t)); in mt76_free_pending_txwi()
169 mt76_free_pending_rxwi(struct mt76_dev *dev) in mt76_free_pending_rxwi() argument
174 while ((t = __mt76_get_rxwi(dev)) != NULL) { in mt76_free_pending_rxwi()
184 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_sync_idx() argument
186 Q_WRITE(dev, q, desc_base, q->desc_dma); in mt76_dma_sync_idx()
187 Q_WRITE(dev, q, ring_size, q->ndesc); in mt76_dma_sync_idx()
188 q->head = Q_READ(dev, q, dma_idx); in mt76_dma_sync_idx()
193 mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_queue_reset() argument
204 Q_WRITE(dev, q, cpu_idx, 0); in mt76_dma_queue_reset()
205 Q_WRITE(dev, q, dma_idx, 0); in mt76_dma_queue_reset()
206 mt76_dma_sync_idx(dev, q); in mt76_dma_queue_reset()
210 mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_add_rx_buf() argument
223 txwi = mt76_get_rxwi(dev); in mt76_dma_add_rx_buf()
227 rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr); in mt76_dma_add_rx_buf()
229 mt76_put_rxwi(dev, txwi); in mt76_dma_add_rx_buf()
255 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_add_buf() argument
317 mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, in mt76_dma_tx_cleanup_idx() argument
323 dma_unmap_single(dev->dma_dev, e->dma_addr[0], e->dma_len[0], in mt76_dma_tx_cleanup_idx()
327 dma_unmap_single(dev->dma_dev, e->dma_addr[1], e->dma_len[1], in mt76_dma_tx_cleanup_idx()
338 mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_kick_queue() argument
341 Q_WRITE(dev, q, cpu_idx, q->head); in mt76_dma_kick_queue()
345 mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush) in mt76_dma_tx_cleanup() argument
357 last = Q_READ(dev, q, dma_idx); in mt76_dma_tx_cleanup()
360 mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry); in mt76_dma_tx_cleanup()
361 mt76_queue_tx_complete(dev, q, &entry); in mt76_dma_tx_cleanup()
364 if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE)) in mt76_dma_tx_cleanup()
365 mt76_put_txwi(dev, entry.txwi); in mt76_dma_tx_cleanup()
369 last = Q_READ(dev, q, dma_idx); in mt76_dma_tx_cleanup()
375 mt76_dma_sync_idx(dev, q); in mt76_dma_tx_cleanup()
376 mt76_dma_kick_queue(dev, q); in mt76_dma_tx_cleanup()
381 wake_up(&dev->tx_wait); in mt76_dma_tx_cleanup()
385 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, in mt76_dma_get_buf() argument
404 struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token); in mt76_dma_get_buf()
409 dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, in mt76_dma_get_buf()
417 mt76_put_rxwi(dev, t); in mt76_dma_get_buf()
430 dma_sync_single_for_cpu(dev->dma_dev, e->dma_addr[0], in mt76_dma_get_buf()
439 mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush, in mt76_dma_dequeue() argument
456 return mt76_dma_get_buf(dev, q, idx, len, info, more, drop); in mt76_dma_dequeue()
460 mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_tx_queue_skb_raw() argument
466 if (test_bit(MT76_MCU_RESET, &dev->phy.state)) in mt76_dma_tx_queue_skb_raw()
472 addr = dma_map_single(dev->dma_dev, skb->data, skb->len, in mt76_dma_tx_queue_skb_raw()
474 if (unlikely(dma_mapping_error(dev->dma_dev, addr))) in mt76_dma_tx_queue_skb_raw()
481 mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL); in mt76_dma_tx_queue_skb_raw()
482 mt76_dma_kick_queue(dev, q); in mt76_dma_tx_queue_skb_raw()
493 mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_tx_queue_skb() argument
510 if (test_bit(MT76_RESET, &dev->phy.state)) in mt76_dma_tx_queue_skb()
513 t = mt76_get_txwi(dev); in mt76_dma_tx_queue_skb()
517 txwi = mt76_get_txwi_ptr(dev, t); in mt76_dma_tx_queue_skb()
520 if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS) in mt76_dma_tx_queue_skb()
524 addr = dma_map_single(dev->dma_dev, skb->data, len, DMA_TO_DEVICE); in mt76_dma_tx_queue_skb()
525 if (unlikely(dma_mapping_error(dev->dma_dev, addr))) in mt76_dma_tx_queue_skb()
529 tx_info.buf[n++].len = dev->drv->txwi_size; in mt76_dma_tx_queue_skb()
537 addr = dma_map_single(dev->dma_dev, iter->data, iter->len, in mt76_dma_tx_queue_skb()
539 if (unlikely(dma_mapping_error(dev->dma_dev, addr))) in mt76_dma_tx_queue_skb()
552 dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, dev->drv->txwi_size, in mt76_dma_tx_queue_skb()
554 ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info); in mt76_dma_tx_queue_skb()
555 dma_sync_single_for_device(dev->dma_dev, t->dma_addr, dev->drv->txwi_size, in mt76_dma_tx_queue_skb()
560 return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf, in mt76_dma_tx_queue_skb()
565 dma_unmap_single(dev->dma_dev, tx_info.buf[n].addr, in mt76_dma_tx_queue_skb()
571 if (mt76_is_testmode_skb(dev, skb, &hw)) { in mt76_dma_tx_queue_skb()
579 mt76_put_txwi(dev, t); in mt76_dma_tx_queue_skb()
583 hw = mt76_tx_status_get_hw(dev, tx_info.skb); in mt76_dma_tx_queue_skb()
584 spin_lock_bh(&dev->rx_lock); in mt76_dma_tx_queue_skb()
586 spin_unlock_bh(&dev->rx_lock); in mt76_dma_tx_queue_skb()
592 mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_rx_fill() argument
616 dma_sync_single_for_device(dev->dma_dev, addr, len, dir); in mt76_dma_rx_fill()
621 if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) { in mt76_dma_rx_fill()
629 mt76_dma_kick_queue(dev, q); in mt76_dma_rx_fill()
636 int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset) in mt76_dma_wed_setup() argument
639 struct mtk_wed_device *wed = &dev->mmio.wed; in mt76_dma_wed_setup()
665 mt76_dma_queue_reset(dev, q); in mt76_dma_wed_setup()
666 mt76_dma_rx_fill(dev, q, false); in mt76_dma_wed_setup()
690 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_alloc_queue() argument
699 q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE; in mt76_dma_alloc_queue()
705 q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL); in mt76_dma_alloc_queue()
710 q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL); in mt76_dma_alloc_queue()
714 ret = mt76_create_page_pool(dev, q); in mt76_dma_alloc_queue()
718 ret = mt76_dma_wed_setup(dev, q, false); in mt76_dma_alloc_queue()
723 mt76_dma_queue_reset(dev, q); in mt76_dma_alloc_queue()
729 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_rx_cleanup() argument
740 buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL); in mt76_dma_rx_cleanup()
756 mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid) in mt76_dma_rx_reset() argument
758 struct mt76_queue *q = &dev->q_rx[qid]; in mt76_dma_rx_reset()
767 mt76_dma_rx_cleanup(dev, q); in mt76_dma_rx_reset()
770 mt76_dma_wed_setup(dev, q, true); in mt76_dma_rx_reset()
772 mt76_dma_sync_idx(dev, q); in mt76_dma_rx_reset()
773 mt76_dma_rx_fill(dev, q, false); in mt76_dma_rx_reset()
778 mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, in mt76_add_fragment() argument
799 dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info); in mt76_add_fragment()
805 mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) in mt76_dma_rx_process() argument
816 dma_idx = Q_READ(dev, q, dma_idx); in mt76_dma_rx_process()
826 dma_idx = Q_READ(dev, q, dma_idx); in mt76_dma_rx_process()
832 data = mt76_dma_dequeue(dev, q, false, &len, &info, &more, in mt76_dma_rx_process()
852 mt76_add_fragment(dev, q, data, len, more, info, in mt76_dma_rx_process()
857 if (!more && dev->drv->rx_check && in mt76_dma_rx_process()
858 !(dev->drv->rx_check(dev, data, len))) in mt76_dma_rx_process()
878 dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info); in mt76_dma_rx_process()
885 mt76_dma_rx_fill(dev, q, true); in mt76_dma_rx_process()
891 struct mt76_dev *dev; in mt76_dma_rx_poll() local
894 dev = container_of(napi->dev, struct mt76_dev, napi_dev); in mt76_dma_rx_poll()
895 qid = napi - dev->napi; in mt76_dma_rx_poll()
900 cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done); in mt76_dma_rx_poll()
901 mt76_rx_poll_complete(dev, qid, napi); in mt76_dma_rx_poll()
908 dev->drv->rx_poll_complete(dev, qid); in mt76_dma_rx_poll()
915 mt76_dma_init(struct mt76_dev *dev, in mt76_dma_init() argument
920 init_dummy_netdev(&dev->napi_dev); in mt76_dma_init()
921 init_dummy_netdev(&dev->tx_napi_dev); in mt76_dma_init()
922 snprintf(dev->napi_dev.name, sizeof(dev->napi_dev.name), "%s", in mt76_dma_init()
923 wiphy_name(dev->hw->wiphy)); in mt76_dma_init()
924 dev->napi_dev.threaded = 1; in mt76_dma_init()
925 init_completion(&dev->mmio.wed_reset); in mt76_dma_init()
926 init_completion(&dev->mmio.wed_reset_complete); in mt76_dma_init()
928 mt76_for_each_q_rx(dev, i) { in mt76_dma_init()
929 netif_napi_add(&dev->napi_dev, &dev->napi[i], poll); in mt76_dma_init()
930 mt76_dma_rx_fill(dev, &dev->q_rx[i], false); in mt76_dma_init()
931 napi_enable(&dev->napi[i]); in mt76_dma_init()
949 void mt76_dma_attach(struct mt76_dev *dev) in mt76_dma_attach() argument
951 dev->queue_ops = &mt76_dma_ops; in mt76_dma_attach()
955 void mt76_dma_cleanup(struct mt76_dev *dev) in mt76_dma_cleanup() argument
959 mt76_worker_disable(&dev->tx_worker); in mt76_dma_cleanup()
960 netif_napi_del(&dev->tx_napi); in mt76_dma_cleanup()
962 for (i = 0; i < ARRAY_SIZE(dev->phys); i++) { in mt76_dma_cleanup()
963 struct mt76_phy *phy = dev->phys[i]; in mt76_dma_cleanup()
970 mt76_dma_tx_cleanup(dev, phy->q_tx[j], true); in mt76_dma_cleanup()
973 for (i = 0; i < ARRAY_SIZE(dev->q_mcu); i++) in mt76_dma_cleanup()
974 mt76_dma_tx_cleanup(dev, dev->q_mcu[i], true); in mt76_dma_cleanup()
976 mt76_for_each_q_rx(dev, i) { in mt76_dma_cleanup()
977 struct mt76_queue *q = &dev->q_rx[i]; in mt76_dma_cleanup()
979 netif_napi_del(&dev->napi[i]); in mt76_dma_cleanup()
980 mt76_dma_rx_cleanup(dev, q); in mt76_dma_cleanup()
985 mt76_free_pending_txwi(dev); in mt76_dma_cleanup()
986 mt76_free_pending_rxwi(dev); in mt76_dma_cleanup()
988 if (mtk_wed_device_active(&dev->mmio.wed)) in mt76_dma_cleanup()
989 mtk_wed_device_detach(&dev->mmio.wed); in mt76_dma_cleanup()