Lines Matching refs:dev
11 static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
28 mt7601u_rx_skb_from_seg(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi, in mt7601u_rx_skb_from_seg() argument
38 true_len = mt76_mac_process_rx(dev, skb, data, rxwi); in mt7601u_rx_skb_from_seg()
70 dev_err_ratelimited(dev->dev, "Error: incorrect frame len:%u hdr:%u\n", in mt7601u_rx_skb_from_seg()
76 static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data, in mt7601u_rx_process_seg() argument
98 dev_err_once(dev->dev, "Error: RXWI zero fields are set\n"); in mt7601u_rx_process_seg()
100 dev_err_once(dev->dev, "Error: RX path seen a non-pkt urb\n"); in mt7601u_rx_process_seg()
102 trace_mt_rx(dev, rxwi, fce_info); in mt7601u_rx_process_seg()
104 skb = mt7601u_rx_skb_from_seg(dev, rxwi, data, seg_len, truesize, p); in mt7601u_rx_process_seg()
111 ieee80211_rx_list(dev->hw, NULL, skb, list); in mt7601u_rx_process_seg()
134 mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e) in mt7601u_rx_process_entry() argument
142 if (!test_bit(MT7601U_STATE_INITIALIZED, &dev->state)) in mt7601u_rx_process_entry()
150 mt7601u_rx_process_seg(dev, data, seg_len, in mt7601u_rx_process_entry()
159 trace_mt_rx_dma_aggr(dev, cnt, !!new_p); in mt7601u_rx_process_entry()
171 mt7601u_rx_get_pending_entry(struct mt7601u_dev *dev) in mt7601u_rx_get_pending_entry() argument
173 struct mt7601u_rx_queue *q = &dev->rx_q; in mt7601u_rx_get_pending_entry()
177 spin_lock_irqsave(&dev->rx_lock, flags); in mt7601u_rx_get_pending_entry()
186 spin_unlock_irqrestore(&dev->rx_lock, flags); in mt7601u_rx_get_pending_entry()
193 struct mt7601u_dev *dev = urb->context; in mt7601u_complete_rx() local
194 struct mt7601u_rx_queue *q = &dev->rx_q; in mt7601u_complete_rx()
207 dev_err_ratelimited(dev->dev, "rx urb failed: %d\n", in mt7601u_complete_rx()
214 spin_lock_irqsave(&dev->rx_lock, flags); in mt7601u_complete_rx()
220 tasklet_schedule(&dev->rx_tasklet); in mt7601u_complete_rx()
222 spin_unlock_irqrestore(&dev->rx_lock, flags); in mt7601u_complete_rx()
227 struct mt7601u_dev *dev = from_tasklet(dev, t, rx_tasklet); in mt7601u_rx_tasklet() local
230 while ((e = mt7601u_rx_get_pending_entry(dev))) { in mt7601u_rx_tasklet()
234 mt7601u_rx_process_entry(dev, e); in mt7601u_rx_tasklet()
235 mt7601u_submit_rx_buf(dev, e, GFP_ATOMIC); in mt7601u_rx_tasklet()
242 struct mt7601u_dev *dev = q->dev; in mt7601u_complete_tx() local
253 dev_err_ratelimited(dev->dev, "tx urb failed: %d\n", in mt7601u_complete_tx()
260 spin_lock_irqsave(&dev->tx_lock, flags); in mt7601u_complete_tx()
266 trace_mt_tx_dma_done(dev, skb); in mt7601u_complete_tx()
268 __skb_queue_tail(&dev->tx_skb_done, skb); in mt7601u_complete_tx()
269 tasklet_schedule(&dev->tx_tasklet); in mt7601u_complete_tx()
272 ieee80211_wake_queue(dev->hw, skb_get_queue_mapping(skb)); in mt7601u_complete_tx()
277 spin_unlock_irqrestore(&dev->tx_lock, flags); in mt7601u_complete_tx()
282 struct mt7601u_dev *dev = from_tasklet(dev, t, tx_tasklet); in mt7601u_tx_tasklet() local
288 spin_lock_irqsave(&dev->tx_lock, flags); in mt7601u_tx_tasklet()
290 set_bit(MT7601U_STATE_MORE_STATS, &dev->state); in mt7601u_tx_tasklet()
291 if (!test_and_set_bit(MT7601U_STATE_READING_STATS, &dev->state)) in mt7601u_tx_tasklet()
292 queue_delayed_work(dev->stat_wq, &dev->stat_work, in mt7601u_tx_tasklet()
295 skb_queue_splice_init(&dev->tx_skb_done, &skbs); in mt7601u_tx_tasklet()
297 spin_unlock_irqrestore(&dev->tx_lock, flags); in mt7601u_tx_tasklet()
302 mt7601u_tx_status(dev, skb); in mt7601u_tx_tasklet()
306 static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev, in mt7601u_dma_submit_tx() argument
309 struct usb_device *usb_dev = mt7601u_to_usb_dev(dev); in mt7601u_dma_submit_tx()
310 unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep]); in mt7601u_dma_submit_tx()
312 struct mt7601u_tx_queue *q = &dev->tx_q[ep]; in mt7601u_dma_submit_tx()
316 spin_lock_irqsave(&dev->tx_lock, flags); in mt7601u_dma_submit_tx()
332 set_bit(MT7601U_STATE_REMOVED, &dev->state); in mt7601u_dma_submit_tx()
334 dev_err(dev->dev, "Error: TX urb submit failed:%d\n", in mt7601u_dma_submit_tx()
344 ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb)); in mt7601u_dma_submit_tx()
346 spin_unlock_irqrestore(&dev->tx_lock, flags); in mt7601u_dma_submit_tx()
366 int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb, in mt7601u_dma_enqueue_tx() argument
381 ret = mt7601u_dma_submit_tx(dev, skb, ep); in mt7601u_dma_enqueue_tx()
383 ieee80211_free_txskb(dev->hw, skb); in mt7601u_dma_enqueue_tx()
390 static void mt7601u_kill_rx(struct mt7601u_dev *dev) in mt7601u_kill_rx() argument
394 for (i = 0; i < dev->rx_q.entries; i++) in mt7601u_kill_rx()
395 usb_poison_urb(dev->rx_q.e[i].urb); in mt7601u_kill_rx()
398 static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev, in mt7601u_submit_rx_buf() argument
401 struct usb_device *usb_dev = mt7601u_to_usb_dev(dev); in mt7601u_submit_rx_buf()
406 pipe = usb_rcvbulkpipe(usb_dev, dev->in_eps[MT_EP_IN_PKT_RX]); in mt7601u_submit_rx_buf()
409 mt7601u_complete_rx, dev); in mt7601u_submit_rx_buf()
411 trace_mt_submit_urb(dev, e->urb); in mt7601u_submit_rx_buf()
414 dev_err(dev->dev, "Error: submit RX URB failed:%d\n", ret); in mt7601u_submit_rx_buf()
419 static int mt7601u_submit_rx(struct mt7601u_dev *dev) in mt7601u_submit_rx() argument
423 for (i = 0; i < dev->rx_q.entries; i++) { in mt7601u_submit_rx()
424 ret = mt7601u_submit_rx_buf(dev, &dev->rx_q.e[i], GFP_KERNEL); in mt7601u_submit_rx()
432 static void mt7601u_free_rx(struct mt7601u_dev *dev) in mt7601u_free_rx() argument
436 for (i = 0; i < dev->rx_q.entries; i++) { in mt7601u_free_rx()
437 __free_pages(dev->rx_q.e[i].p, MT_RX_ORDER); in mt7601u_free_rx()
438 usb_free_urb(dev->rx_q.e[i].urb); in mt7601u_free_rx()
442 static int mt7601u_alloc_rx(struct mt7601u_dev *dev) in mt7601u_alloc_rx() argument
446 memset(&dev->rx_q, 0, sizeof(dev->rx_q)); in mt7601u_alloc_rx()
447 dev->rx_q.dev = dev; in mt7601u_alloc_rx()
448 dev->rx_q.entries = N_RX_ENTRIES; in mt7601u_alloc_rx()
451 dev->rx_q.e[i].urb = usb_alloc_urb(0, GFP_KERNEL); in mt7601u_alloc_rx()
452 dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER); in mt7601u_alloc_rx()
454 if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p) in mt7601u_alloc_rx()
468 mt7601u_tx_status(q->dev, q->e[i].skb); in mt7601u_free_tx_queue()
473 static void mt7601u_free_tx(struct mt7601u_dev *dev) in mt7601u_free_tx() argument
477 if (!dev->tx_q) in mt7601u_free_tx()
481 mt7601u_free_tx_queue(&dev->tx_q[i]); in mt7601u_free_tx()
484 static int mt7601u_alloc_tx_queue(struct mt7601u_dev *dev, in mt7601u_alloc_tx_queue() argument
489 q->dev = dev; in mt7601u_alloc_tx_queue()
501 static int mt7601u_alloc_tx(struct mt7601u_dev *dev) in mt7601u_alloc_tx() argument
505 dev->tx_q = devm_kcalloc(dev->dev, __MT_EP_OUT_MAX, in mt7601u_alloc_tx()
506 sizeof(*dev->tx_q), GFP_KERNEL); in mt7601u_alloc_tx()
507 if (!dev->tx_q) in mt7601u_alloc_tx()
511 if (mt7601u_alloc_tx_queue(dev, &dev->tx_q[i])) in mt7601u_alloc_tx()
517 int mt7601u_dma_init(struct mt7601u_dev *dev) in mt7601u_dma_init() argument
521 tasklet_setup(&dev->tx_tasklet, mt7601u_tx_tasklet); in mt7601u_dma_init()
522 tasklet_setup(&dev->rx_tasklet, mt7601u_rx_tasklet); in mt7601u_dma_init()
524 ret = mt7601u_alloc_tx(dev); in mt7601u_dma_init()
527 ret = mt7601u_alloc_rx(dev); in mt7601u_dma_init()
531 ret = mt7601u_submit_rx(dev); in mt7601u_dma_init()
537 mt7601u_dma_cleanup(dev); in mt7601u_dma_init()
541 void mt7601u_dma_cleanup(struct mt7601u_dev *dev) in mt7601u_dma_cleanup() argument
543 mt7601u_kill_rx(dev); in mt7601u_dma_cleanup()
545 tasklet_kill(&dev->rx_tasklet); in mt7601u_dma_cleanup()
547 mt7601u_free_rx(dev); in mt7601u_dma_cleanup()
548 mt7601u_free_tx(dev); in mt7601u_dma_cleanup()
550 tasklet_kill(&dev->tx_tasklet); in mt7601u_dma_cleanup()