1 /*
2  * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/module.h>
18 #include "mt76.h"
19 #include "usb_trace.h"
20 #include "dma.h"
21 
22 #define MT_VEND_REQ_MAX_RETRY	10
23 #define MT_VEND_REQ_TOUT_MS	300
24 
25 /* should be called with usb_ctrl_mtx locked */
26 static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
27 				  u8 req_type, u16 val, u16 offset,
28 				  void *buf, size_t len)
29 {
30 	struct usb_interface *intf = to_usb_interface(dev->dev);
31 	struct usb_device *udev = interface_to_usbdev(intf);
32 	unsigned int pipe;
33 	int i, ret;
34 
35 	pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
36 				       : usb_sndctrlpipe(udev, 0);
37 	for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
38 		if (test_bit(MT76_REMOVED, &dev->state))
39 			return -EIO;
40 
41 		ret = usb_control_msg(udev, pipe, req, req_type, val,
42 				      offset, buf, len, MT_VEND_REQ_TOUT_MS);
43 		if (ret == -ENODEV)
44 			set_bit(MT76_REMOVED, &dev->state);
45 		if (ret >= 0 || ret == -ENODEV)
46 			return ret;
47 		usleep_range(5000, 10000);
48 	}
49 
50 	dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
51 		req, offset, ret);
52 	return ret;
53 }
54 
55 int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
56 			 u8 req_type, u16 val, u16 offset,
57 			 void *buf, size_t len)
58 {
59 	int ret;
60 
61 	mutex_lock(&dev->usb.usb_ctrl_mtx);
62 	ret = __mt76u_vendor_request(dev, req, req_type,
63 				     val, offset, buf, len);
64 	trace_usb_reg_wr(dev, offset, val);
65 	mutex_unlock(&dev->usb.usb_ctrl_mtx);
66 
67 	return ret;
68 }
69 EXPORT_SYMBOL_GPL(mt76u_vendor_request);
70 
71 /* should be called with usb_ctrl_mtx locked */
72 static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
73 {
74 	struct mt76_usb *usb = &dev->usb;
75 	u32 data = ~0;
76 	u16 offset;
77 	int ret;
78 	u8 req;
79 
80 	switch (addr & MT_VEND_TYPE_MASK) {
81 	case MT_VEND_TYPE_EEPROM:
82 		req = MT_VEND_READ_EEPROM;
83 		break;
84 	case MT_VEND_TYPE_CFG:
85 		req = MT_VEND_READ_CFG;
86 		break;
87 	default:
88 		req = MT_VEND_MULTI_READ;
89 		break;
90 	}
91 	offset = addr & ~MT_VEND_TYPE_MASK;
92 
93 	ret = __mt76u_vendor_request(dev, req,
94 				     USB_DIR_IN | USB_TYPE_VENDOR,
95 				     0, offset, usb->data, sizeof(__le32));
96 	if (ret == sizeof(__le32))
97 		data = get_unaligned_le32(usb->data);
98 	trace_usb_reg_rr(dev, addr, data);
99 
100 	return data;
101 }
102 
103 static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
104 {
105 	u32 ret;
106 
107 	mutex_lock(&dev->usb.usb_ctrl_mtx);
108 	ret = __mt76u_rr(dev, addr);
109 	mutex_unlock(&dev->usb.usb_ctrl_mtx);
110 
111 	return ret;
112 }
113 
114 /* should be called with usb_ctrl_mtx locked */
115 static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
116 {
117 	struct mt76_usb *usb = &dev->usb;
118 	u16 offset;
119 	u8 req;
120 
121 	switch (addr & MT_VEND_TYPE_MASK) {
122 	case MT_VEND_TYPE_CFG:
123 		req = MT_VEND_WRITE_CFG;
124 		break;
125 	default:
126 		req = MT_VEND_MULTI_WRITE;
127 		break;
128 	}
129 	offset = addr & ~MT_VEND_TYPE_MASK;
130 
131 	put_unaligned_le32(val, usb->data);
132 	__mt76u_vendor_request(dev, req,
133 			       USB_DIR_OUT | USB_TYPE_VENDOR, 0,
134 			       offset, usb->data, sizeof(__le32));
135 	trace_usb_reg_wr(dev, addr, val);
136 }
137 
138 static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
139 {
140 	mutex_lock(&dev->usb.usb_ctrl_mtx);
141 	__mt76u_wr(dev, addr, val);
142 	mutex_unlock(&dev->usb.usb_ctrl_mtx);
143 }
144 
145 static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
146 		     u32 mask, u32 val)
147 {
148 	mutex_lock(&dev->usb.usb_ctrl_mtx);
149 	val |= __mt76u_rr(dev, addr) & ~mask;
150 	__mt76u_wr(dev, addr, val);
151 	mutex_unlock(&dev->usb.usb_ctrl_mtx);
152 
153 	return val;
154 }
155 
156 static void mt76u_copy(struct mt76_dev *dev, u32 offset,
157 		       const void *data, int len)
158 {
159 	struct mt76_usb *usb = &dev->usb;
160 	const u32 *val = data;
161 	int i, ret;
162 
163 	mutex_lock(&usb->usb_ctrl_mtx);
164 	for (i = 0; i < (len / 4); i++) {
165 		put_unaligned_le32(val[i], usb->data);
166 		ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
167 					     USB_DIR_OUT | USB_TYPE_VENDOR,
168 					     0, offset + i * 4, usb->data,
169 					     sizeof(__le32));
170 		if (ret < 0)
171 			break;
172 	}
173 	mutex_unlock(&usb->usb_ctrl_mtx);
174 }
175 
176 void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
177 		     const u16 offset, const u32 val)
178 {
179 	mutex_lock(&dev->usb.usb_ctrl_mtx);
180 	__mt76u_vendor_request(dev, req,
181 			       USB_DIR_OUT | USB_TYPE_VENDOR,
182 			       val & 0xffff, offset, NULL, 0);
183 	__mt76u_vendor_request(dev, req,
184 			       USB_DIR_OUT | USB_TYPE_VENDOR,
185 			       val >> 16, offset + 2, NULL, 0);
186 	mutex_unlock(&dev->usb.usb_ctrl_mtx);
187 }
188 EXPORT_SYMBOL_GPL(mt76u_single_wr);
189 
190 static int
191 mt76u_req_wr_rp(struct mt76_dev *dev, u32 base,
192 		const struct mt76_reg_pair *data, int len)
193 {
194 	struct mt76_usb *usb = &dev->usb;
195 
196 	mutex_lock(&usb->usb_ctrl_mtx);
197 	while (len > 0) {
198 		__mt76u_wr(dev, base + data->reg, data->value);
199 		len--;
200 		data++;
201 	}
202 	mutex_unlock(&usb->usb_ctrl_mtx);
203 
204 	return 0;
205 }
206 
207 static int
208 mt76u_wr_rp(struct mt76_dev *dev, u32 base,
209 	    const struct mt76_reg_pair *data, int n)
210 {
211 	if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
212 		return dev->mcu_ops->mcu_wr_rp(dev, base, data, n);
213 	else
214 		return mt76u_req_wr_rp(dev, base, data, n);
215 }
216 
217 static int
218 mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data,
219 		int len)
220 {
221 	struct mt76_usb *usb = &dev->usb;
222 
223 	mutex_lock(&usb->usb_ctrl_mtx);
224 	while (len > 0) {
225 		data->value = __mt76u_rr(dev, base + data->reg);
226 		len--;
227 		data++;
228 	}
229 	mutex_unlock(&usb->usb_ctrl_mtx);
230 
231 	return 0;
232 }
233 
234 static int
235 mt76u_rd_rp(struct mt76_dev *dev, u32 base,
236 	    struct mt76_reg_pair *data, int n)
237 {
238 	if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
239 		return dev->mcu_ops->mcu_rd_rp(dev, base, data, n);
240 	else
241 		return mt76u_req_rd_rp(dev, base, data, n);
242 }
243 
244 static int
245 mt76u_set_endpoints(struct usb_interface *intf,
246 		    struct mt76_usb *usb)
247 {
248 	struct usb_host_interface *intf_desc = intf->cur_altsetting;
249 	struct usb_endpoint_descriptor *ep_desc;
250 	int i, in_ep = 0, out_ep = 0;
251 
252 	for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
253 		ep_desc = &intf_desc->endpoint[i].desc;
254 
255 		if (usb_endpoint_is_bulk_in(ep_desc) &&
256 		    in_ep < __MT_EP_IN_MAX) {
257 			usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
258 			usb->in_max_packet = usb_endpoint_maxp(ep_desc);
259 			in_ep++;
260 		} else if (usb_endpoint_is_bulk_out(ep_desc) &&
261 			   out_ep < __MT_EP_OUT_MAX) {
262 			usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
263 			usb->out_max_packet = usb_endpoint_maxp(ep_desc);
264 			out_ep++;
265 		}
266 	}
267 
268 	if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
269 		return -EINVAL;
270 	return 0;
271 }
272 
273 static int
274 mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
275 		 int nsgs, int len, int sglen)
276 {
277 	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
278 	struct urb *urb = buf->urb;
279 	int i;
280 
281 	spin_lock_bh(&q->rx_page_lock);
282 	for (i = 0; i < nsgs; i++) {
283 		struct page *page;
284 		void *data;
285 		int offset;
286 
287 		data = page_frag_alloc(&q->rx_page, len, GFP_ATOMIC);
288 		if (!data)
289 			break;
290 
291 		page = virt_to_head_page(data);
292 		offset = data - page_address(page);
293 		sg_set_page(&urb->sg[i], page, sglen, offset);
294 	}
295 	spin_unlock_bh(&q->rx_page_lock);
296 
297 	if (i < nsgs) {
298 		int j;
299 
300 		for (j = nsgs; j < urb->num_sgs; j++)
301 			skb_free_frag(sg_virt(&urb->sg[j]));
302 		urb->num_sgs = i;
303 	}
304 
305 	urb->num_sgs = max_t(int, i, urb->num_sgs);
306 	buf->len = urb->num_sgs * sglen,
307 	sg_init_marker(urb->sg, urb->num_sgs);
308 
309 	return i ? : -ENOMEM;
310 }
311 
312 int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
313 		    int nsgs, int len, int sglen, gfp_t gfp)
314 {
315 	buf->urb = usb_alloc_urb(0, gfp);
316 	if (!buf->urb)
317 		return -ENOMEM;
318 
319 	buf->urb->sg = devm_kcalloc(dev->dev, nsgs, sizeof(*buf->urb->sg),
320 				    gfp);
321 	if (!buf->urb->sg)
322 		return -ENOMEM;
323 
324 	sg_init_table(buf->urb->sg, nsgs);
325 	buf->dev = dev;
326 
327 	return mt76u_fill_rx_sg(dev, buf, nsgs, len, sglen);
328 }
329 EXPORT_SYMBOL_GPL(mt76u_buf_alloc);
330 
331 void mt76u_buf_free(struct mt76u_buf *buf)
332 {
333 	struct urb *urb = buf->urb;
334 	int i;
335 
336 	for (i = 0; i < urb->num_sgs; i++)
337 		skb_free_frag(sg_virt(&urb->sg[i]));
338 	usb_free_urb(buf->urb);
339 }
340 EXPORT_SYMBOL_GPL(mt76u_buf_free);
341 
342 int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
343 		     struct mt76u_buf *buf, gfp_t gfp,
344 		     usb_complete_t complete_fn, void *context)
345 {
346 	struct usb_interface *intf = to_usb_interface(dev->dev);
347 	struct usb_device *udev = interface_to_usbdev(intf);
348 	unsigned int pipe;
349 
350 	if (dir == USB_DIR_IN)
351 		pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
352 	else
353 		pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
354 
355 	usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, buf->len,
356 			  complete_fn, context);
357 	trace_submit_urb(dev, buf->urb);
358 
359 	return usb_submit_urb(buf->urb, gfp);
360 }
361 EXPORT_SYMBOL_GPL(mt76u_submit_buf);
362 
363 static inline struct mt76u_buf
364 *mt76u_get_next_rx_entry(struct mt76_queue *q)
365 {
366 	struct mt76u_buf *buf = NULL;
367 	unsigned long flags;
368 
369 	spin_lock_irqsave(&q->lock, flags);
370 	if (q->queued > 0) {
371 		buf = &q->entry[q->head].ubuf;
372 		q->head = (q->head + 1) % q->ndesc;
373 		q->queued--;
374 	}
375 	spin_unlock_irqrestore(&q->lock, flags);
376 
377 	return buf;
378 }
379 
380 static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
381 {
382 	u16 dma_len, min_len;
383 
384 	dma_len = get_unaligned_le16(data);
385 	min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN +
386 		  MT_FCE_INFO_LEN;
387 
388 	if (data_len < min_len || !dma_len ||
389 	    dma_len + MT_DMA_HDR_LEN > data_len ||
390 	    (dma_len & 0x3))
391 		return -EINVAL;
392 	return dma_len;
393 }
394 
395 static int
396 mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
397 {
398 	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
399 	u8 *data = sg_virt(&urb->sg[0]);
400 	int data_len, len, nsgs = 1;
401 	struct sk_buff *skb;
402 
403 	if (!test_bit(MT76_STATE_INITIALIZED, &dev->state))
404 		return 0;
405 
406 	len = mt76u_get_rx_entry_len(data, urb->actual_length);
407 	if (len < 0)
408 		return 0;
409 
410 	skb = build_skb(data, q->buf_size);
411 	if (!skb)
412 		return 0;
413 
414 	data_len = min_t(int, len, urb->sg[0].length - MT_DMA_HDR_LEN);
415 	skb_reserve(skb, MT_DMA_HDR_LEN);
416 	if (skb->tail + data_len > skb->end) {
417 		dev_kfree_skb(skb);
418 		return 1;
419 	}
420 
421 	__skb_put(skb, data_len);
422 	len -= data_len;
423 
424 	while (len > 0) {
425 		data_len = min_t(int, len, urb->sg[nsgs].length);
426 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
427 				sg_page(&urb->sg[nsgs]),
428 				urb->sg[nsgs].offset,
429 				data_len, q->buf_size);
430 		len -= data_len;
431 		nsgs++;
432 	}
433 	dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
434 
435 	return nsgs;
436 }
437 
438 static void mt76u_complete_rx(struct urb *urb)
439 {
440 	struct mt76_dev *dev = urb->context;
441 	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
442 	unsigned long flags;
443 
444 	trace_rx_urb(dev, urb);
445 
446 	switch (urb->status) {
447 	case -ECONNRESET:
448 	case -ESHUTDOWN:
449 	case -ENOENT:
450 		return;
451 	default:
452 		dev_err(dev->dev, "rx urb failed: %d\n", urb->status);
453 		/* fall through */
454 	case 0:
455 		break;
456 	}
457 
458 	spin_lock_irqsave(&q->lock, flags);
459 	if (WARN_ONCE(q->entry[q->tail].ubuf.urb != urb, "rx urb mismatch"))
460 		goto out;
461 
462 	q->tail = (q->tail + 1) % q->ndesc;
463 	q->queued++;
464 	tasklet_schedule(&dev->usb.rx_tasklet);
465 out:
466 	spin_unlock_irqrestore(&q->lock, flags);
467 }
468 
469 static void mt76u_rx_tasklet(unsigned long data)
470 {
471 	struct mt76_dev *dev = (struct mt76_dev *)data;
472 	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
473 	int err, nsgs, buf_len = q->buf_size;
474 	struct mt76u_buf *buf;
475 
476 	rcu_read_lock();
477 
478 	while (true) {
479 		buf = mt76u_get_next_rx_entry(q);
480 		if (!buf)
481 			break;
482 
483 		nsgs = mt76u_process_rx_entry(dev, buf->urb);
484 		if (nsgs > 0) {
485 			err = mt76u_fill_rx_sg(dev, buf, nsgs,
486 					       buf_len,
487 					       SKB_WITH_OVERHEAD(buf_len));
488 			if (err < 0)
489 				break;
490 		}
491 		mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
492 				 buf, GFP_ATOMIC,
493 				 mt76u_complete_rx, dev);
494 	}
495 	mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
496 
497 	rcu_read_unlock();
498 }
499 
500 int mt76u_submit_rx_buffers(struct mt76_dev *dev)
501 {
502 	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
503 	unsigned long flags;
504 	int i, err = 0;
505 
506 	spin_lock_irqsave(&q->lock, flags);
507 	for (i = 0; i < q->ndesc; i++) {
508 		err = mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
509 				       &q->entry[i].ubuf, GFP_ATOMIC,
510 				       mt76u_complete_rx, dev);
511 		if (err < 0)
512 			break;
513 	}
514 	q->head = q->tail = 0;
515 	q->queued = 0;
516 	spin_unlock_irqrestore(&q->lock, flags);
517 
518 	return err;
519 }
520 EXPORT_SYMBOL_GPL(mt76u_submit_rx_buffers);
521 
522 static int mt76u_alloc_rx(struct mt76_dev *dev)
523 {
524 	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
525 	int i, err, nsgs;
526 
527 	spin_lock_init(&q->rx_page_lock);
528 	spin_lock_init(&q->lock);
529 	q->entry = devm_kcalloc(dev->dev,
530 				MT_NUM_RX_ENTRIES, sizeof(*q->entry),
531 				GFP_KERNEL);
532 	if (!q->entry)
533 		return -ENOMEM;
534 
535 	if (mt76u_check_sg(dev)) {
536 		q->buf_size = MT_RX_BUF_SIZE;
537 		nsgs = MT_SG_MAX_SIZE;
538 	} else {
539 		q->buf_size = PAGE_SIZE;
540 		nsgs = 1;
541 	}
542 
543 	for (i = 0; i < MT_NUM_RX_ENTRIES; i++) {
544 		err = mt76u_buf_alloc(dev, &q->entry[i].ubuf,
545 				      nsgs, q->buf_size,
546 				      SKB_WITH_OVERHEAD(q->buf_size),
547 				      GFP_KERNEL);
548 		if (err < 0)
549 			return err;
550 	}
551 	q->ndesc = MT_NUM_RX_ENTRIES;
552 
553 	return mt76u_submit_rx_buffers(dev);
554 }
555 
556 static void mt76u_free_rx(struct mt76_dev *dev)
557 {
558 	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
559 	struct page *page;
560 	int i;
561 
562 	for (i = 0; i < q->ndesc; i++)
563 		mt76u_buf_free(&q->entry[i].ubuf);
564 
565 	spin_lock_bh(&q->rx_page_lock);
566 	if (!q->rx_page.va)
567 		goto out;
568 
569 	page = virt_to_page(q->rx_page.va);
570 	__page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
571 	memset(&q->rx_page, 0, sizeof(q->rx_page));
572 out:
573 	spin_unlock_bh(&q->rx_page_lock);
574 }
575 
576 static void mt76u_stop_rx(struct mt76_dev *dev)
577 {
578 	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
579 	int i;
580 
581 	for (i = 0; i < q->ndesc; i++)
582 		usb_kill_urb(q->entry[i].ubuf.urb);
583 }
584 
585 static void mt76u_tx_tasklet(unsigned long data)
586 {
587 	struct mt76_dev *dev = (struct mt76_dev *)data;
588 	struct mt76u_buf *buf;
589 	struct mt76_queue *q;
590 	bool wake;
591 	int i;
592 
593 	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
594 		q = &dev->q_tx[i];
595 
596 		spin_lock_bh(&q->lock);
597 		while (true) {
598 			buf = &q->entry[q->head].ubuf;
599 			if (!buf->done || !q->queued)
600 				break;
601 
602 			dev->drv->tx_complete_skb(dev, q,
603 						  &q->entry[q->head],
604 						  false);
605 
606 			if (q->entry[q->head].schedule) {
607 				q->entry[q->head].schedule = false;
608 				q->swq_queued--;
609 			}
610 
611 			q->head = (q->head + 1) % q->ndesc;
612 			q->queued--;
613 		}
614 		mt76_txq_schedule(dev, q);
615 		wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
616 		if (!q->queued)
617 			wake_up(&dev->tx_wait);
618 
619 		spin_unlock_bh(&q->lock);
620 
621 		if (!test_and_set_bit(MT76_READING_STATS, &dev->state))
622 			ieee80211_queue_delayed_work(dev->hw,
623 						     &dev->usb.stat_work,
624 						     msecs_to_jiffies(10));
625 
626 		if (wake)
627 			ieee80211_wake_queue(dev->hw, i);
628 	}
629 }
630 
631 static void mt76u_tx_status_data(struct work_struct *work)
632 {
633 	struct mt76_usb *usb;
634 	struct mt76_dev *dev;
635 	u8 update = 1;
636 	u16 count = 0;
637 
638 	usb = container_of(work, struct mt76_usb, stat_work.work);
639 	dev = container_of(usb, struct mt76_dev, usb);
640 
641 	while (true) {
642 		if (test_bit(MT76_REMOVED, &dev->state))
643 			break;
644 
645 		if (!dev->drv->tx_status_data(dev, &update))
646 			break;
647 		count++;
648 	}
649 
650 	if (count && test_bit(MT76_STATE_RUNNING, &dev->state))
651 		ieee80211_queue_delayed_work(dev->hw, &usb->stat_work,
652 					     msecs_to_jiffies(10));
653 	else
654 		clear_bit(MT76_READING_STATS, &dev->state);
655 }
656 
657 static void mt76u_complete_tx(struct urb *urb)
658 {
659 	struct mt76u_buf *buf = urb->context;
660 	struct mt76_dev *dev = buf->dev;
661 
662 	if (mt76u_urb_error(urb))
663 		dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
664 	buf->done = true;
665 
666 	tasklet_schedule(&dev->usb.tx_tasklet);
667 }
668 
669 static int
670 mt76u_tx_build_sg(struct sk_buff *skb, struct urb *urb)
671 {
672 	int nsgs = 1 + skb_shinfo(skb)->nr_frags;
673 	struct sk_buff *iter;
674 
675 	skb_walk_frags(skb, iter)
676 		nsgs += 1 + skb_shinfo(iter)->nr_frags;
677 
678 	memset(urb->sg, 0, sizeof(*urb->sg) * MT_SG_MAX_SIZE);
679 
680 	nsgs = min_t(int, MT_SG_MAX_SIZE, nsgs);
681 	sg_init_marker(urb->sg, nsgs);
682 	urb->num_sgs = nsgs;
683 
684 	return skb_to_sgvec_nomark(skb, urb->sg, 0, skb->len);
685 }
686 
687 static int
688 mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
689 		   struct sk_buff *skb, struct mt76_wcid *wcid,
690 		   struct ieee80211_sta *sta)
691 {
692 	struct usb_interface *intf = to_usb_interface(dev->dev);
693 	struct usb_device *udev = interface_to_usbdev(intf);
694 	u8 ep = q2ep(q->hw_idx);
695 	struct mt76u_buf *buf;
696 	u16 idx = q->tail;
697 	unsigned int pipe;
698 	int err;
699 
700 	if (q->queued == q->ndesc)
701 		return -ENOSPC;
702 
703 	skb->prev = skb->next = NULL;
704 	err = dev->drv->tx_prepare_skb(dev, NULL, skb, q, wcid, sta, NULL);
705 	if (err < 0)
706 		return err;
707 
708 	buf = &q->entry[idx].ubuf;
709 	buf->done = false;
710 
711 	err = mt76u_tx_build_sg(skb, buf->urb);
712 	if (err < 0)
713 		return err;
714 
715 	pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[ep]);
716 	usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, skb->len,
717 			  mt76u_complete_tx, buf);
718 
719 	q->tail = (q->tail + 1) % q->ndesc;
720 	q->entry[idx].skb = skb;
721 	q->queued++;
722 
723 	return idx;
724 }
725 
726 static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
727 {
728 	struct mt76u_buf *buf;
729 	int err;
730 
731 	while (q->first != q->tail) {
732 		buf = &q->entry[q->first].ubuf;
733 
734 		trace_submit_urb(dev, buf->urb);
735 		err = usb_submit_urb(buf->urb, GFP_ATOMIC);
736 		if (err < 0) {
737 			if (err == -ENODEV)
738 				set_bit(MT76_REMOVED, &dev->state);
739 			else
740 				dev_err(dev->dev, "tx urb submit failed:%d\n",
741 					err);
742 			break;
743 		}
744 		q->first = (q->first + 1) % q->ndesc;
745 	}
746 }
747 
748 static int mt76u_alloc_tx(struct mt76_dev *dev)
749 {
750 	struct mt76u_buf *buf;
751 	struct mt76_queue *q;
752 	size_t size;
753 	int i, j;
754 
755 	size = MT_SG_MAX_SIZE * sizeof(struct scatterlist);
756 	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
757 		q = &dev->q_tx[i];
758 		spin_lock_init(&q->lock);
759 		INIT_LIST_HEAD(&q->swq);
760 		q->hw_idx = mt76_ac_to_hwq(i);
761 
762 		q->entry = devm_kcalloc(dev->dev,
763 					MT_NUM_TX_ENTRIES, sizeof(*q->entry),
764 					GFP_KERNEL);
765 		if (!q->entry)
766 			return -ENOMEM;
767 
768 		q->ndesc = MT_NUM_TX_ENTRIES;
769 		for (j = 0; j < q->ndesc; j++) {
770 			buf = &q->entry[j].ubuf;
771 			buf->dev = dev;
772 
773 			buf->urb = usb_alloc_urb(0, GFP_KERNEL);
774 			if (!buf->urb)
775 				return -ENOMEM;
776 
777 			buf->urb->sg = devm_kzalloc(dev->dev, size, GFP_KERNEL);
778 			if (!buf->urb->sg)
779 				return -ENOMEM;
780 		}
781 	}
782 	return 0;
783 }
784 
785 static void mt76u_free_tx(struct mt76_dev *dev)
786 {
787 	struct mt76_queue *q;
788 	int i, j;
789 
790 	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
791 		q = &dev->q_tx[i];
792 		for (j = 0; j < q->ndesc; j++)
793 			usb_free_urb(q->entry[j].ubuf.urb);
794 	}
795 }
796 
797 static void mt76u_stop_tx(struct mt76_dev *dev)
798 {
799 	struct mt76_queue *q;
800 	int i, j;
801 
802 	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
803 		q = &dev->q_tx[i];
804 		for (j = 0; j < q->ndesc; j++)
805 			usb_kill_urb(q->entry[j].ubuf.urb);
806 	}
807 }
808 
809 void mt76u_stop_queues(struct mt76_dev *dev)
810 {
811 	tasklet_disable(&dev->usb.rx_tasklet);
812 	tasklet_disable(&dev->usb.tx_tasklet);
813 
814 	mt76u_stop_rx(dev);
815 	mt76u_stop_tx(dev);
816 }
817 EXPORT_SYMBOL_GPL(mt76u_stop_queues);
818 
819 void mt76u_stop_stat_wk(struct mt76_dev *dev)
820 {
821 	cancel_delayed_work_sync(&dev->usb.stat_work);
822 	clear_bit(MT76_READING_STATS, &dev->state);
823 }
824 EXPORT_SYMBOL_GPL(mt76u_stop_stat_wk);
825 
826 void mt76u_queues_deinit(struct mt76_dev *dev)
827 {
828 	mt76u_stop_queues(dev);
829 
830 	mt76u_free_rx(dev);
831 	mt76u_free_tx(dev);
832 }
833 EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
834 
835 int mt76u_alloc_queues(struct mt76_dev *dev)
836 {
837 	int err;
838 
839 	err = mt76u_alloc_rx(dev);
840 	if (err < 0)
841 		goto err;
842 
843 	err = mt76u_alloc_tx(dev);
844 	if (err < 0)
845 		goto err;
846 
847 	return 0;
848 err:
849 	mt76u_queues_deinit(dev);
850 	return err;
851 }
852 EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
853 
854 static const struct mt76_queue_ops usb_queue_ops = {
855 	.tx_queue_skb = mt76u_tx_queue_skb,
856 	.kick = mt76u_tx_kick,
857 };
858 
859 int mt76u_init(struct mt76_dev *dev,
860 	       struct usb_interface *intf)
861 {
862 	static const struct mt76_bus_ops mt76u_ops = {
863 		.rr = mt76u_rr,
864 		.wr = mt76u_wr,
865 		.rmw = mt76u_rmw,
866 		.copy = mt76u_copy,
867 		.wr_rp = mt76u_wr_rp,
868 		.rd_rp = mt76u_rd_rp,
869 		.type = MT76_BUS_USB,
870 	};
871 	struct mt76_usb *usb = &dev->usb;
872 
873 	tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
874 	tasklet_init(&usb->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
875 	INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data);
876 	skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
877 
878 	init_completion(&usb->mcu.cmpl);
879 	mutex_init(&usb->mcu.mutex);
880 
881 	mutex_init(&usb->usb_ctrl_mtx);
882 	dev->bus = &mt76u_ops;
883 	dev->queue_ops = &usb_queue_ops;
884 
885 	return mt76u_set_endpoints(intf, usb);
886 }
887 EXPORT_SYMBOL_GPL(mt76u_init);
888 
889 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
890 MODULE_LICENSE("Dual BSD/GPL");
891