11802d0beSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2c869f77dSJakub Kicinski /*
3c869f77dSJakub Kicinski  * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
4c869f77dSJakub Kicinski  */
5c869f77dSJakub Kicinski 
6c869f77dSJakub Kicinski #include "mt7601u.h"
7c869f77dSJakub Kicinski #include "dma.h"
8c869f77dSJakub Kicinski #include "usb.h"
9c869f77dSJakub Kicinski #include "trace.h"
10c869f77dSJakub Kicinski 
11c869f77dSJakub Kicinski static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
12c869f77dSJakub Kicinski 				 struct mt7601u_dma_buf_rx *e, gfp_t gfp);
13c869f77dSJakub Kicinski 
ieee80211_get_hdrlen_from_buf(const u8 * data,unsigned len)14c869f77dSJakub Kicinski static unsigned int ieee80211_get_hdrlen_from_buf(const u8 *data, unsigned len)
15c869f77dSJakub Kicinski {
16c869f77dSJakub Kicinski 	const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data;
17c869f77dSJakub Kicinski 	unsigned int hdrlen;
18c869f77dSJakub Kicinski 
19c869f77dSJakub Kicinski 	if (unlikely(len < 10))
20c869f77dSJakub Kicinski 		return 0;
21c869f77dSJakub Kicinski 	hdrlen = ieee80211_hdrlen(hdr->frame_control);
22c869f77dSJakub Kicinski 	if (unlikely(hdrlen > len))
23c869f77dSJakub Kicinski 		return 0;
24c869f77dSJakub Kicinski 	return hdrlen;
25c869f77dSJakub Kicinski }
26c869f77dSJakub Kicinski 
27c869f77dSJakub Kicinski static struct sk_buff *
mt7601u_rx_skb_from_seg(struct mt7601u_dev * dev,struct mt7601u_rxwi * rxwi,void * data,u32 seg_len,u32 truesize,struct page * p)28c869f77dSJakub Kicinski mt7601u_rx_skb_from_seg(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi,
2969647fabSJakub Kicinski 			void *data, u32 seg_len, u32 truesize, struct page *p)
30c869f77dSJakub Kicinski {
31c869f77dSJakub Kicinski 	struct sk_buff *skb;
322af6d21fSJakub Kicinski 	u32 true_len, hdr_len = 0, copy, frag;
33c869f77dSJakub Kicinski 
3469647fabSJakub Kicinski 	skb = alloc_skb(p ? 128 : seg_len, GFP_ATOMIC);
35c869f77dSJakub Kicinski 	if (!skb)
36c869f77dSJakub Kicinski 		return NULL;
37c869f77dSJakub Kicinski 
38c869f77dSJakub Kicinski 	true_len = mt76_mac_process_rx(dev, skb, data, rxwi);
392af6d21fSJakub Kicinski 	if (!true_len || true_len > seg_len)
402af6d21fSJakub Kicinski 		goto bad_frame;
41c869f77dSJakub Kicinski 
4269647fabSJakub Kicinski 	hdr_len = ieee80211_get_hdrlen_from_buf(data, true_len);
432af6d21fSJakub Kicinski 	if (!hdr_len)
442af6d21fSJakub Kicinski 		goto bad_frame;
452af6d21fSJakub Kicinski 
46c869f77dSJakub Kicinski 	if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) {
4759ae1d12SJohannes Berg 		skb_put_data(skb, data, hdr_len);
4869647fabSJakub Kicinski 
49c869f77dSJakub Kicinski 		data += hdr_len + 2;
50c869f77dSJakub Kicinski 		true_len -= hdr_len;
51c869f77dSJakub Kicinski 		hdr_len = 0;
52c869f77dSJakub Kicinski 	}
53c869f77dSJakub Kicinski 
5469647fabSJakub Kicinski 	/* If not doing paged RX allocated skb will always have enough space */
55c869f77dSJakub Kicinski 	copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8;
56c869f77dSJakub Kicinski 	frag = true_len - copy;
57c869f77dSJakub Kicinski 
5859ae1d12SJohannes Berg 	skb_put_data(skb, data, copy);
59c869f77dSJakub Kicinski 	data += copy;
60c869f77dSJakub Kicinski 
61c869f77dSJakub Kicinski 	if (frag) {
62c869f77dSJakub Kicinski 		skb_add_rx_frag(skb, 0, p, data - page_address(p),
63c869f77dSJakub Kicinski 				frag, truesize);
64c869f77dSJakub Kicinski 		get_page(p);
65c869f77dSJakub Kicinski 	}
66c869f77dSJakub Kicinski 
67c869f77dSJakub Kicinski 	return skb;
682af6d21fSJakub Kicinski 
692af6d21fSJakub Kicinski bad_frame:
702af6d21fSJakub Kicinski 	dev_err_ratelimited(dev->dev, "Error: incorrect frame len:%u hdr:%u\n",
712af6d21fSJakub Kicinski 			    true_len, hdr_len);
722af6d21fSJakub Kicinski 	dev_kfree_skb(skb);
732af6d21fSJakub Kicinski 	return NULL;
74c869f77dSJakub Kicinski }
75c869f77dSJakub Kicinski 
mt7601u_rx_process_seg(struct mt7601u_dev * dev,u8 * data,u32 seg_len,struct page * p,struct list_head * list)76c869f77dSJakub Kicinski static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data,
772a9269b1SLorenzo Bianconi 				   u32 seg_len, struct page *p,
782a9269b1SLorenzo Bianconi 				   struct list_head *list)
79c869f77dSJakub Kicinski {
80c869f77dSJakub Kicinski 	struct sk_buff *skb;
81c869f77dSJakub Kicinski 	struct mt7601u_rxwi *rxwi;
82c869f77dSJakub Kicinski 	u32 fce_info, truesize = seg_len;
83c869f77dSJakub Kicinski 
84c869f77dSJakub Kicinski 	/* DMA_INFO field at the beginning of the segment contains only some of
85c869f77dSJakub Kicinski 	 * the information, we need to read the FCE descriptor from the end.
86c869f77dSJakub Kicinski 	 */
87c869f77dSJakub Kicinski 	fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN);
88c869f77dSJakub Kicinski 	seg_len -= MT_FCE_INFO_LEN;
89c869f77dSJakub Kicinski 
90c869f77dSJakub Kicinski 	data += MT_DMA_HDR_LEN;
91c869f77dSJakub Kicinski 	seg_len -= MT_DMA_HDR_LEN;
92c869f77dSJakub Kicinski 
93c869f77dSJakub Kicinski 	rxwi = (struct mt7601u_rxwi *) data;
94c869f77dSJakub Kicinski 	data += sizeof(struct mt7601u_rxwi);
95c869f77dSJakub Kicinski 	seg_len -= sizeof(struct mt7601u_rxwi);
96c869f77dSJakub Kicinski 
97c869f77dSJakub Kicinski 	if (unlikely(rxwi->zero[0] || rxwi->zero[1] || rxwi->zero[2]))
98c869f77dSJakub Kicinski 		dev_err_once(dev->dev, "Error: RXWI zero fields are set\n");
99d43af505SJakub Kicinski 	if (unlikely(FIELD_GET(MT_RXD_INFO_TYPE, fce_info)))
100c869f77dSJakub Kicinski 		dev_err_once(dev->dev, "Error: RX path seen a non-pkt urb\n");
101c869f77dSJakub Kicinski 
102c869f77dSJakub Kicinski 	trace_mt_rx(dev, rxwi, fce_info);
103c869f77dSJakub Kicinski 
10469647fabSJakub Kicinski 	skb = mt7601u_rx_skb_from_seg(dev, rxwi, data, seg_len, truesize, p);
105c869f77dSJakub Kicinski 	if (!skb)
106c869f77dSJakub Kicinski 		return;
107c869f77dSJakub Kicinski 
1082a9269b1SLorenzo Bianconi 	local_bh_disable();
1092a9269b1SLorenzo Bianconi 	rcu_read_lock();
1102a9269b1SLorenzo Bianconi 
1112a9269b1SLorenzo Bianconi 	ieee80211_rx_list(dev->hw, NULL, skb, list);
1122a9269b1SLorenzo Bianconi 
1132a9269b1SLorenzo Bianconi 	rcu_read_unlock();
1142a9269b1SLorenzo Bianconi 	local_bh_enable();
115c869f77dSJakub Kicinski }
116c869f77dSJakub Kicinski 
mt7601u_rx_next_seg_len(u8 * data,u32 data_len)117c869f77dSJakub Kicinski static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len)
118c869f77dSJakub Kicinski {
119c869f77dSJakub Kicinski 	u32 min_seg_len = MT_DMA_HDR_LEN + MT_RX_INFO_LEN +
120c869f77dSJakub Kicinski 		sizeof(struct mt7601u_rxwi) + MT_FCE_INFO_LEN;
121c869f77dSJakub Kicinski 	u16 dma_len = get_unaligned_le16(data);
122c869f77dSJakub Kicinski 
123c869f77dSJakub Kicinski 	if (data_len < min_seg_len ||
124b6958ad0SLorenzo Bianconi 	    WARN_ON_ONCE(!dma_len) ||
125b6958ad0SLorenzo Bianconi 	    WARN_ON_ONCE(dma_len + MT_DMA_HDRS > data_len) ||
126*803f3176SJisoo Jang 	    WARN_ON_ONCE(dma_len & 0x3) ||
127*803f3176SJisoo Jang 	    WARN_ON_ONCE(dma_len < min_seg_len))
128c869f77dSJakub Kicinski 		return 0;
129c869f77dSJakub Kicinski 
130c869f77dSJakub Kicinski 	return MT_DMA_HDRS + dma_len;
131c869f77dSJakub Kicinski }
132c869f77dSJakub Kicinski 
133c869f77dSJakub Kicinski static void
mt7601u_rx_process_entry(struct mt7601u_dev * dev,struct mt7601u_dma_buf_rx * e)134c869f77dSJakub Kicinski mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
135c869f77dSJakub Kicinski {
136c869f77dSJakub Kicinski 	u32 seg_len, data_len = e->urb->actual_length;
137c869f77dSJakub Kicinski 	u8 *data = page_address(e->p);
138c869f77dSJakub Kicinski 	struct page *new_p = NULL;
1392a9269b1SLorenzo Bianconi 	LIST_HEAD(list);
140c869f77dSJakub Kicinski 	int cnt = 0;
141c869f77dSJakub Kicinski 
142c869f77dSJakub Kicinski 	if (!test_bit(MT7601U_STATE_INITIALIZED, &dev->state))
143c869f77dSJakub Kicinski 		return;
144c869f77dSJakub Kicinski 
145c869f77dSJakub Kicinski 	/* Copy if there is very little data in the buffer. */
14669647fabSJakub Kicinski 	if (data_len > 512)
147c869f77dSJakub Kicinski 		new_p = dev_alloc_pages(MT_RX_ORDER);
148c869f77dSJakub Kicinski 
149c869f77dSJakub Kicinski 	while ((seg_len = mt7601u_rx_next_seg_len(data, data_len))) {
1502a9269b1SLorenzo Bianconi 		mt7601u_rx_process_seg(dev, data, seg_len,
1512a9269b1SLorenzo Bianconi 				       new_p ? e->p : NULL, &list);
152c869f77dSJakub Kicinski 
153c869f77dSJakub Kicinski 		data_len -= seg_len;
154c869f77dSJakub Kicinski 		data += seg_len;
155c869f77dSJakub Kicinski 		cnt++;
156c869f77dSJakub Kicinski 	}
157c869f77dSJakub Kicinski 
158c869f77dSJakub Kicinski 	if (cnt > 1)
15969647fabSJakub Kicinski 		trace_mt_rx_dma_aggr(dev, cnt, !!new_p);
160c869f77dSJakub Kicinski 
1612a9269b1SLorenzo Bianconi 	netif_receive_skb_list(&list);
1622a9269b1SLorenzo Bianconi 
16369647fabSJakub Kicinski 	if (new_p) {
164c869f77dSJakub Kicinski 		/* we have one extra ref from the allocator */
165d24c7905SLorenzo Bianconi 		put_page(e->p);
166c869f77dSJakub Kicinski 		e->p = new_p;
167c869f77dSJakub Kicinski 	}
168c869f77dSJakub Kicinski }
169c869f77dSJakub Kicinski 
170c869f77dSJakub Kicinski static struct mt7601u_dma_buf_rx *
mt7601u_rx_get_pending_entry(struct mt7601u_dev * dev)171c869f77dSJakub Kicinski mt7601u_rx_get_pending_entry(struct mt7601u_dev *dev)
172c869f77dSJakub Kicinski {
173c869f77dSJakub Kicinski 	struct mt7601u_rx_queue *q = &dev->rx_q;
174c869f77dSJakub Kicinski 	struct mt7601u_dma_buf_rx *buf = NULL;
175c869f77dSJakub Kicinski 	unsigned long flags;
176c869f77dSJakub Kicinski 
177c869f77dSJakub Kicinski 	spin_lock_irqsave(&dev->rx_lock, flags);
178c869f77dSJakub Kicinski 
179c869f77dSJakub Kicinski 	if (!q->pending)
180c869f77dSJakub Kicinski 		goto out;
181c869f77dSJakub Kicinski 
182c869f77dSJakub Kicinski 	buf = &q->e[q->start];
183c869f77dSJakub Kicinski 	q->pending--;
184c869f77dSJakub Kicinski 	q->start = (q->start + 1) % q->entries;
185c869f77dSJakub Kicinski out:
186c869f77dSJakub Kicinski 	spin_unlock_irqrestore(&dev->rx_lock, flags);
187c869f77dSJakub Kicinski 
188c869f77dSJakub Kicinski 	return buf;
189c869f77dSJakub Kicinski }
190c869f77dSJakub Kicinski 
mt7601u_complete_rx(struct urb * urb)191c869f77dSJakub Kicinski static void mt7601u_complete_rx(struct urb *urb)
192c869f77dSJakub Kicinski {
193c869f77dSJakub Kicinski 	struct mt7601u_dev *dev = urb->context;
194c869f77dSJakub Kicinski 	struct mt7601u_rx_queue *q = &dev->rx_q;
195c869f77dSJakub Kicinski 	unsigned long flags;
196c869f77dSJakub Kicinski 
1974079e8ccSLorenzo Bianconi 	/* do no schedule rx tasklet if urb has been unlinked
1984079e8ccSLorenzo Bianconi 	 * or the device has been removed
1994079e8ccSLorenzo Bianconi 	 */
2004079e8ccSLorenzo Bianconi 	switch (urb->status) {
2014079e8ccSLorenzo Bianconi 	case -ECONNRESET:
2024079e8ccSLorenzo Bianconi 	case -ESHUTDOWN:
2034079e8ccSLorenzo Bianconi 	case -ENOENT:
2040e40dbd5SZhi Han 	case -EPROTO:
2054079e8ccSLorenzo Bianconi 		return;
2064079e8ccSLorenzo Bianconi 	default:
2074079e8ccSLorenzo Bianconi 		dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
2084079e8ccSLorenzo Bianconi 				    urb->status);
2091808191dSGustavo A. R. Silva 		fallthrough;
2104079e8ccSLorenzo Bianconi 	case 0:
2114079e8ccSLorenzo Bianconi 		break;
2124079e8ccSLorenzo Bianconi 	}
213c869f77dSJakub Kicinski 
2144079e8ccSLorenzo Bianconi 	spin_lock_irqsave(&dev->rx_lock, flags);
215c869f77dSJakub Kicinski 	if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
216c869f77dSJakub Kicinski 		goto out;
217c869f77dSJakub Kicinski 
218c869f77dSJakub Kicinski 	q->end = (q->end + 1) % q->entries;
219c869f77dSJakub Kicinski 	q->pending++;
220c869f77dSJakub Kicinski 	tasklet_schedule(&dev->rx_tasklet);
221c869f77dSJakub Kicinski out:
222c869f77dSJakub Kicinski 	spin_unlock_irqrestore(&dev->rx_lock, flags);
223c869f77dSJakub Kicinski }
224c869f77dSJakub Kicinski 
mt7601u_rx_tasklet(struct tasklet_struct * t)2257eae0518SAllen Pais static void mt7601u_rx_tasklet(struct tasklet_struct *t)
226c869f77dSJakub Kicinski {
2277eae0518SAllen Pais 	struct mt7601u_dev *dev = from_tasklet(dev, t, rx_tasklet);
228c869f77dSJakub Kicinski 	struct mt7601u_dma_buf_rx *e;
229c869f77dSJakub Kicinski 
230c869f77dSJakub Kicinski 	while ((e = mt7601u_rx_get_pending_entry(dev))) {
231c869f77dSJakub Kicinski 		if (e->urb->status)
232c869f77dSJakub Kicinski 			continue;
233c869f77dSJakub Kicinski 
234c869f77dSJakub Kicinski 		mt7601u_rx_process_entry(dev, e);
235c869f77dSJakub Kicinski 		mt7601u_submit_rx_buf(dev, e, GFP_ATOMIC);
236c869f77dSJakub Kicinski 	}
237c869f77dSJakub Kicinski }
238c869f77dSJakub Kicinski 
mt7601u_complete_tx(struct urb * urb)239c869f77dSJakub Kicinski static void mt7601u_complete_tx(struct urb *urb)
240c869f77dSJakub Kicinski {
241c869f77dSJakub Kicinski 	struct mt7601u_tx_queue *q = urb->context;
242c869f77dSJakub Kicinski 	struct mt7601u_dev *dev = q->dev;
243c869f77dSJakub Kicinski 	struct sk_buff *skb;
244c869f77dSJakub Kicinski 	unsigned long flags;
245c869f77dSJakub Kicinski 
24623377c20SLorenzo Bianconi 	switch (urb->status) {
24723377c20SLorenzo Bianconi 	case -ECONNRESET:
24823377c20SLorenzo Bianconi 	case -ESHUTDOWN:
24923377c20SLorenzo Bianconi 	case -ENOENT:
250f43fcaefSLorenzo Bianconi 	case -EPROTO:
25123377c20SLorenzo Bianconi 		return;
25223377c20SLorenzo Bianconi 	default:
25323377c20SLorenzo Bianconi 		dev_err_ratelimited(dev->dev, "tx urb failed: %d\n",
25423377c20SLorenzo Bianconi 				    urb->status);
2551808191dSGustavo A. R. Silva 		fallthrough;
25623377c20SLorenzo Bianconi 	case 0:
25723377c20SLorenzo Bianconi 		break;
25823377c20SLorenzo Bianconi 	}
259c869f77dSJakub Kicinski 
26023377c20SLorenzo Bianconi 	spin_lock_irqsave(&dev->tx_lock, flags);
261c869f77dSJakub Kicinski 	if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
262c869f77dSJakub Kicinski 		goto out;
263c869f77dSJakub Kicinski 
264c869f77dSJakub Kicinski 	skb = q->e[q->start].skb;
26523377c20SLorenzo Bianconi 	q->e[q->start].skb = NULL;
266c869f77dSJakub Kicinski 	trace_mt_tx_dma_done(dev, skb);
267c869f77dSJakub Kicinski 
2684513493dSJakub Kicinski 	__skb_queue_tail(&dev->tx_skb_done, skb);
2694513493dSJakub Kicinski 	tasklet_schedule(&dev->tx_tasklet);
270c869f77dSJakub Kicinski 
271c869f77dSJakub Kicinski 	if (q->used == q->entries - q->entries / 8)
272c869f77dSJakub Kicinski 		ieee80211_wake_queue(dev->hw, skb_get_queue_mapping(skb));
273c869f77dSJakub Kicinski 
274c869f77dSJakub Kicinski 	q->start = (q->start + 1) % q->entries;
275c869f77dSJakub Kicinski 	q->used--;
2764513493dSJakub Kicinski out:
2774513493dSJakub Kicinski 	spin_unlock_irqrestore(&dev->tx_lock, flags);
2784513493dSJakub Kicinski }
279c869f77dSJakub Kicinski 
mt7601u_tx_tasklet(struct tasklet_struct * t)2807eae0518SAllen Pais static void mt7601u_tx_tasklet(struct tasklet_struct *t)
2814513493dSJakub Kicinski {
2827eae0518SAllen Pais 	struct mt7601u_dev *dev = from_tasklet(dev, t, tx_tasklet);
2834513493dSJakub Kicinski 	struct sk_buff_head skbs;
2844513493dSJakub Kicinski 	unsigned long flags;
2854513493dSJakub Kicinski 
2864513493dSJakub Kicinski 	__skb_queue_head_init(&skbs);
2874513493dSJakub Kicinski 
2884513493dSJakub Kicinski 	spin_lock_irqsave(&dev->tx_lock, flags);
289c869f77dSJakub Kicinski 
290c869f77dSJakub Kicinski 	set_bit(MT7601U_STATE_MORE_STATS, &dev->state);
291c869f77dSJakub Kicinski 	if (!test_and_set_bit(MT7601U_STATE_READING_STATS, &dev->state))
292c869f77dSJakub Kicinski 		queue_delayed_work(dev->stat_wq, &dev->stat_work,
293c869f77dSJakub Kicinski 				   msecs_to_jiffies(10));
2944513493dSJakub Kicinski 
2954513493dSJakub Kicinski 	skb_queue_splice_init(&dev->tx_skb_done, &skbs);
2964513493dSJakub Kicinski 
297c869f77dSJakub Kicinski 	spin_unlock_irqrestore(&dev->tx_lock, flags);
2984513493dSJakub Kicinski 
2994513493dSJakub Kicinski 	while (!skb_queue_empty(&skbs)) {
3004513493dSJakub Kicinski 		struct sk_buff *skb = __skb_dequeue(&skbs);
3014513493dSJakub Kicinski 
3024513493dSJakub Kicinski 		mt7601u_tx_status(dev, skb);
3034513493dSJakub Kicinski 	}
304c869f77dSJakub Kicinski }
305c869f77dSJakub Kicinski 
mt7601u_dma_submit_tx(struct mt7601u_dev * dev,struct sk_buff * skb,u8 ep)306c869f77dSJakub Kicinski static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
307c869f77dSJakub Kicinski 				 struct sk_buff *skb, u8 ep)
308c869f77dSJakub Kicinski {
309c869f77dSJakub Kicinski 	struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
310c869f77dSJakub Kicinski 	unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep]);
311c869f77dSJakub Kicinski 	struct mt7601u_dma_buf_tx *e;
312c869f77dSJakub Kicinski 	struct mt7601u_tx_queue *q = &dev->tx_q[ep];
313c869f77dSJakub Kicinski 	unsigned long flags;
314c869f77dSJakub Kicinski 	int ret;
315c869f77dSJakub Kicinski 
316c869f77dSJakub Kicinski 	spin_lock_irqsave(&dev->tx_lock, flags);
317c869f77dSJakub Kicinski 
318c869f77dSJakub Kicinski 	if (WARN_ON(q->entries <= q->used)) {
319c869f77dSJakub Kicinski 		ret = -ENOSPC;
320c869f77dSJakub Kicinski 		goto out;
321c869f77dSJakub Kicinski 	}
322c869f77dSJakub Kicinski 
323c869f77dSJakub Kicinski 	e = &q->e[q->end];
324c869f77dSJakub Kicinski 	usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
325c869f77dSJakub Kicinski 			  mt7601u_complete_tx, q);
326c869f77dSJakub Kicinski 	ret = usb_submit_urb(e->urb, GFP_ATOMIC);
327c869f77dSJakub Kicinski 	if (ret) {
328c869f77dSJakub Kicinski 		/* Special-handle ENODEV from TX urb submission because it will
329c869f77dSJakub Kicinski 		 * often be the first ENODEV we see after device is removed.
330c869f77dSJakub Kicinski 		 */
331c869f77dSJakub Kicinski 		if (ret == -ENODEV)
332c869f77dSJakub Kicinski 			set_bit(MT7601U_STATE_REMOVED, &dev->state);
333c869f77dSJakub Kicinski 		else
334c869f77dSJakub Kicinski 			dev_err(dev->dev, "Error: TX urb submit failed:%d\n",
335c869f77dSJakub Kicinski 				ret);
336c869f77dSJakub Kicinski 		goto out;
337c869f77dSJakub Kicinski 	}
338c869f77dSJakub Kicinski 
339c869f77dSJakub Kicinski 	q->end = (q->end + 1) % q->entries;
340c869f77dSJakub Kicinski 	q->used++;
3410acb20a5SLorenzo Bianconi 	e->skb = skb;
342c869f77dSJakub Kicinski 
343c869f77dSJakub Kicinski 	if (q->used >= q->entries)
344c869f77dSJakub Kicinski 		ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
345c869f77dSJakub Kicinski out:
346c869f77dSJakub Kicinski 	spin_unlock_irqrestore(&dev->tx_lock, flags);
347c869f77dSJakub Kicinski 
348c869f77dSJakub Kicinski 	return ret;
349c869f77dSJakub Kicinski }
350c869f77dSJakub Kicinski 
351c869f77dSJakub Kicinski /* Map hardware Q to USB endpoint number */
q2ep(u8 qid)352c869f77dSJakub Kicinski static u8 q2ep(u8 qid)
353c869f77dSJakub Kicinski {
354c869f77dSJakub Kicinski 	/* TODO: take management packets to queue 5 */
355c869f77dSJakub Kicinski 	return qid + 1;
356c869f77dSJakub Kicinski }
357c869f77dSJakub Kicinski 
358c869f77dSJakub Kicinski /* Map USB endpoint number to Q id in the DMA engine */
ep2dmaq(u8 ep)359c869f77dSJakub Kicinski static enum mt76_qsel ep2dmaq(u8 ep)
360c869f77dSJakub Kicinski {
361c869f77dSJakub Kicinski 	if (ep == 5)
362c869f77dSJakub Kicinski 		return MT_QSEL_MGMT;
363c869f77dSJakub Kicinski 	return MT_QSEL_EDCA;
364c869f77dSJakub Kicinski }
365c869f77dSJakub Kicinski 
mt7601u_dma_enqueue_tx(struct mt7601u_dev * dev,struct sk_buff * skb,struct mt76_wcid * wcid,int hw_q)366c869f77dSJakub Kicinski int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
367c869f77dSJakub Kicinski 			   struct mt76_wcid *wcid, int hw_q)
368c869f77dSJakub Kicinski {
369c869f77dSJakub Kicinski 	u8 ep = q2ep(hw_q);
370c869f77dSJakub Kicinski 	u32 dma_flags;
371c869f77dSJakub Kicinski 	int ret;
372c869f77dSJakub Kicinski 
373c869f77dSJakub Kicinski 	dma_flags = MT_TXD_PKT_INFO_80211;
374c869f77dSJakub Kicinski 	if (wcid->hw_key_idx == 0xff)
375c869f77dSJakub Kicinski 		dma_flags |= MT_TXD_PKT_INFO_WIV;
376c869f77dSJakub Kicinski 
377c869f77dSJakub Kicinski 	ret = mt7601u_dma_skb_wrap_pkt(skb, ep2dmaq(ep), dma_flags);
378c869f77dSJakub Kicinski 	if (ret)
379c869f77dSJakub Kicinski 		return ret;
380c869f77dSJakub Kicinski 
381c869f77dSJakub Kicinski 	ret = mt7601u_dma_submit_tx(dev, skb, ep);
382c869f77dSJakub Kicinski 	if (ret) {
383c869f77dSJakub Kicinski 		ieee80211_free_txskb(dev->hw, skb);
384c869f77dSJakub Kicinski 		return ret;
385c869f77dSJakub Kicinski 	}
386c869f77dSJakub Kicinski 
387c869f77dSJakub Kicinski 	return 0;
388c869f77dSJakub Kicinski }
389c869f77dSJakub Kicinski 
mt7601u_kill_rx(struct mt7601u_dev * dev)390c869f77dSJakub Kicinski static void mt7601u_kill_rx(struct mt7601u_dev *dev)
391c869f77dSJakub Kicinski {
392c869f77dSJakub Kicinski 	int i;
393c869f77dSJakub Kicinski 
3944079e8ccSLorenzo Bianconi 	for (i = 0; i < dev->rx_q.entries; i++)
3954079e8ccSLorenzo Bianconi 		usb_poison_urb(dev->rx_q.e[i].urb);
396c869f77dSJakub Kicinski }
397c869f77dSJakub Kicinski 
mt7601u_submit_rx_buf(struct mt7601u_dev * dev,struct mt7601u_dma_buf_rx * e,gfp_t gfp)398c869f77dSJakub Kicinski static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
399c869f77dSJakub Kicinski 				 struct mt7601u_dma_buf_rx *e, gfp_t gfp)
400c869f77dSJakub Kicinski {
401c869f77dSJakub Kicinski 	struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
402c869f77dSJakub Kicinski 	u8 *buf = page_address(e->p);
403c869f77dSJakub Kicinski 	unsigned pipe;
404c869f77dSJakub Kicinski 	int ret;
405c869f77dSJakub Kicinski 
406c869f77dSJakub Kicinski 	pipe = usb_rcvbulkpipe(usb_dev, dev->in_eps[MT_EP_IN_PKT_RX]);
407c869f77dSJakub Kicinski 
408c869f77dSJakub Kicinski 	usb_fill_bulk_urb(e->urb, usb_dev, pipe, buf, MT_RX_URB_SIZE,
409c869f77dSJakub Kicinski 			  mt7601u_complete_rx, dev);
410c869f77dSJakub Kicinski 
411c869f77dSJakub Kicinski 	trace_mt_submit_urb(dev, e->urb);
412c869f77dSJakub Kicinski 	ret = usb_submit_urb(e->urb, gfp);
413c869f77dSJakub Kicinski 	if (ret)
414c869f77dSJakub Kicinski 		dev_err(dev->dev, "Error: submit RX URB failed:%d\n", ret);
415c869f77dSJakub Kicinski 
416c869f77dSJakub Kicinski 	return ret;
417c869f77dSJakub Kicinski }
418c869f77dSJakub Kicinski 
mt7601u_submit_rx(struct mt7601u_dev * dev)419c869f77dSJakub Kicinski static int mt7601u_submit_rx(struct mt7601u_dev *dev)
420c869f77dSJakub Kicinski {
421c869f77dSJakub Kicinski 	int i, ret;
422c869f77dSJakub Kicinski 
423c869f77dSJakub Kicinski 	for (i = 0; i < dev->rx_q.entries; i++) {
424c869f77dSJakub Kicinski 		ret = mt7601u_submit_rx_buf(dev, &dev->rx_q.e[i], GFP_KERNEL);
425c869f77dSJakub Kicinski 		if (ret)
426c869f77dSJakub Kicinski 			return ret;
427c869f77dSJakub Kicinski 	}
428c869f77dSJakub Kicinski 
429c869f77dSJakub Kicinski 	return 0;
430c869f77dSJakub Kicinski }
431c869f77dSJakub Kicinski 
mt7601u_free_rx(struct mt7601u_dev * dev)432c869f77dSJakub Kicinski static void mt7601u_free_rx(struct mt7601u_dev *dev)
433c869f77dSJakub Kicinski {
434c869f77dSJakub Kicinski 	int i;
435c869f77dSJakub Kicinski 
436c869f77dSJakub Kicinski 	for (i = 0; i < dev->rx_q.entries; i++) {
437c869f77dSJakub Kicinski 		__free_pages(dev->rx_q.e[i].p, MT_RX_ORDER);
438c869f77dSJakub Kicinski 		usb_free_urb(dev->rx_q.e[i].urb);
439c869f77dSJakub Kicinski 	}
440c869f77dSJakub Kicinski }
441c869f77dSJakub Kicinski 
mt7601u_alloc_rx(struct mt7601u_dev * dev)442c869f77dSJakub Kicinski static int mt7601u_alloc_rx(struct mt7601u_dev *dev)
443c869f77dSJakub Kicinski {
444c869f77dSJakub Kicinski 	int i;
445c869f77dSJakub Kicinski 
446c869f77dSJakub Kicinski 	memset(&dev->rx_q, 0, sizeof(dev->rx_q));
447c869f77dSJakub Kicinski 	dev->rx_q.dev = dev;
448c869f77dSJakub Kicinski 	dev->rx_q.entries = N_RX_ENTRIES;
449c869f77dSJakub Kicinski 
450c869f77dSJakub Kicinski 	for (i = 0; i < N_RX_ENTRIES; i++) {
451c869f77dSJakub Kicinski 		dev->rx_q.e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
452c869f77dSJakub Kicinski 		dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER);
453c869f77dSJakub Kicinski 
454c869f77dSJakub Kicinski 		if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p)
455c869f77dSJakub Kicinski 			return -ENOMEM;
456c869f77dSJakub Kicinski 	}
457c869f77dSJakub Kicinski 
458c869f77dSJakub Kicinski 	return 0;
459c869f77dSJakub Kicinski }
460c869f77dSJakub Kicinski 
mt7601u_free_tx_queue(struct mt7601u_tx_queue * q)461c869f77dSJakub Kicinski static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q)
462c869f77dSJakub Kicinski {
463c869f77dSJakub Kicinski 	int i;
464c869f77dSJakub Kicinski 
465c869f77dSJakub Kicinski 	for (i = 0; i < q->entries; i++)  {
466c869f77dSJakub Kicinski 		usb_poison_urb(q->e[i].urb);
46723377c20SLorenzo Bianconi 		if (q->e[i].skb)
46823377c20SLorenzo Bianconi 			mt7601u_tx_status(q->dev, q->e[i].skb);
469c869f77dSJakub Kicinski 		usb_free_urb(q->e[i].urb);
470c869f77dSJakub Kicinski 	}
471c869f77dSJakub Kicinski }
472c869f77dSJakub Kicinski 
mt7601u_free_tx(struct mt7601u_dev * dev)473c869f77dSJakub Kicinski static void mt7601u_free_tx(struct mt7601u_dev *dev)
474c869f77dSJakub Kicinski {
475c869f77dSJakub Kicinski 	int i;
476c869f77dSJakub Kicinski 
477b3b2f62cSChristophe Jaillet 	if (!dev->tx_q)
478b3b2f62cSChristophe Jaillet 		return;
479b3b2f62cSChristophe Jaillet 
480c869f77dSJakub Kicinski 	for (i = 0; i < __MT_EP_OUT_MAX; i++)
481c869f77dSJakub Kicinski 		mt7601u_free_tx_queue(&dev->tx_q[i]);
482c869f77dSJakub Kicinski }
483c869f77dSJakub Kicinski 
mt7601u_alloc_tx_queue(struct mt7601u_dev * dev,struct mt7601u_tx_queue * q)484c869f77dSJakub Kicinski static int mt7601u_alloc_tx_queue(struct mt7601u_dev *dev,
485c869f77dSJakub Kicinski 				  struct mt7601u_tx_queue *q)
486c869f77dSJakub Kicinski {
487c869f77dSJakub Kicinski 	int i;
488c869f77dSJakub Kicinski 
489c869f77dSJakub Kicinski 	q->dev = dev;
490c869f77dSJakub Kicinski 	q->entries = N_TX_ENTRIES;
491c869f77dSJakub Kicinski 
492c869f77dSJakub Kicinski 	for (i = 0; i < N_TX_ENTRIES; i++) {
493c869f77dSJakub Kicinski 		q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
494c869f77dSJakub Kicinski 		if (!q->e[i].urb)
495c869f77dSJakub Kicinski 			return -ENOMEM;
496c869f77dSJakub Kicinski 	}
497c869f77dSJakub Kicinski 
498c869f77dSJakub Kicinski 	return 0;
499c869f77dSJakub Kicinski }
500c869f77dSJakub Kicinski 
mt7601u_alloc_tx(struct mt7601u_dev * dev)501c869f77dSJakub Kicinski static int mt7601u_alloc_tx(struct mt7601u_dev *dev)
502c869f77dSJakub Kicinski {
503c869f77dSJakub Kicinski 	int i;
504c869f77dSJakub Kicinski 
505c869f77dSJakub Kicinski 	dev->tx_q = devm_kcalloc(dev->dev, __MT_EP_OUT_MAX,
506c869f77dSJakub Kicinski 				 sizeof(*dev->tx_q), GFP_KERNEL);
507b3b2f62cSChristophe Jaillet 	if (!dev->tx_q)
508b3b2f62cSChristophe Jaillet 		return -ENOMEM;
509c869f77dSJakub Kicinski 
510c869f77dSJakub Kicinski 	for (i = 0; i < __MT_EP_OUT_MAX; i++)
511c869f77dSJakub Kicinski 		if (mt7601u_alloc_tx_queue(dev, &dev->tx_q[i]))
512c869f77dSJakub Kicinski 			return -ENOMEM;
513c869f77dSJakub Kicinski 
514c869f77dSJakub Kicinski 	return 0;
515c869f77dSJakub Kicinski }
516c869f77dSJakub Kicinski 
mt7601u_dma_init(struct mt7601u_dev * dev)517c869f77dSJakub Kicinski int mt7601u_dma_init(struct mt7601u_dev *dev)
518c869f77dSJakub Kicinski {
5193e4beec5SColin Ian King 	int ret;
520c869f77dSJakub Kicinski 
5217eae0518SAllen Pais 	tasklet_setup(&dev->tx_tasklet, mt7601u_tx_tasklet);
5227eae0518SAllen Pais 	tasklet_setup(&dev->rx_tasklet, mt7601u_rx_tasklet);
523c869f77dSJakub Kicinski 
524c869f77dSJakub Kicinski 	ret = mt7601u_alloc_tx(dev);
525c869f77dSJakub Kicinski 	if (ret)
526c869f77dSJakub Kicinski 		goto err;
527c869f77dSJakub Kicinski 	ret = mt7601u_alloc_rx(dev);
528c869f77dSJakub Kicinski 	if (ret)
529c869f77dSJakub Kicinski 		goto err;
530c869f77dSJakub Kicinski 
531c869f77dSJakub Kicinski 	ret = mt7601u_submit_rx(dev);
532c869f77dSJakub Kicinski 	if (ret)
533c869f77dSJakub Kicinski 		goto err;
534c869f77dSJakub Kicinski 
535c869f77dSJakub Kicinski 	return 0;
536c869f77dSJakub Kicinski err:
537c869f77dSJakub Kicinski 	mt7601u_dma_cleanup(dev);
538c869f77dSJakub Kicinski 	return ret;
539c869f77dSJakub Kicinski }
540c869f77dSJakub Kicinski 
mt7601u_dma_cleanup(struct mt7601u_dev * dev)541c869f77dSJakub Kicinski void mt7601u_dma_cleanup(struct mt7601u_dev *dev)
542c869f77dSJakub Kicinski {
543c869f77dSJakub Kicinski 	mt7601u_kill_rx(dev);
544c869f77dSJakub Kicinski 
545c869f77dSJakub Kicinski 	tasklet_kill(&dev->rx_tasklet);
546c869f77dSJakub Kicinski 
547c869f77dSJakub Kicinski 	mt7601u_free_rx(dev);
548c869f77dSJakub Kicinski 	mt7601u_free_tx(dev);
5494513493dSJakub Kicinski 
5504513493dSJakub Kicinski 	tasklet_kill(&dev->tx_tasklet);
551c869f77dSJakub Kicinski }
552