1 /*
2  * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/module.h>
18 #include "mt76.h"
19 #include "usb_trace.h"
20 #include "dma.h"
21 
22 #define MT_VEND_REQ_MAX_RETRY	10
23 #define MT_VEND_REQ_TOUT_MS	300
24 
25 /* should be called with usb_ctrl_mtx locked */
26 static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
27 				  u8 req_type, u16 val, u16 offset,
28 				  void *buf, size_t len)
29 {
30 	struct usb_interface *intf = to_usb_interface(dev->dev);
31 	struct usb_device *udev = interface_to_usbdev(intf);
32 	unsigned int pipe;
33 	int i, ret;
34 
35 	pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
36 				       : usb_sndctrlpipe(udev, 0);
37 	for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
38 		if (test_bit(MT76_REMOVED, &dev->state))
39 			return -EIO;
40 
41 		ret = usb_control_msg(udev, pipe, req, req_type, val,
42 				      offset, buf, len, MT_VEND_REQ_TOUT_MS);
43 		if (ret == -ENODEV)
44 			set_bit(MT76_REMOVED, &dev->state);
45 		if (ret >= 0 || ret == -ENODEV)
46 			return ret;
47 		usleep_range(5000, 10000);
48 	}
49 
50 	dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
51 		req, offset, ret);
52 	return ret;
53 }
54 
55 int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
56 			 u8 req_type, u16 val, u16 offset,
57 			 void *buf, size_t len)
58 {
59 	int ret;
60 
61 	mutex_lock(&dev->usb.usb_ctrl_mtx);
62 	ret = __mt76u_vendor_request(dev, req, req_type,
63 				     val, offset, buf, len);
64 	trace_usb_reg_wr(dev, offset, val);
65 	mutex_unlock(&dev->usb.usb_ctrl_mtx);
66 
67 	return ret;
68 }
69 EXPORT_SYMBOL_GPL(mt76u_vendor_request);
70 
71 /* should be called with usb_ctrl_mtx locked */
72 static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
73 {
74 	struct mt76_usb *usb = &dev->usb;
75 	u32 data = ~0;
76 	u16 offset;
77 	int ret;
78 	u8 req;
79 
80 	switch (addr & MT_VEND_TYPE_MASK) {
81 	case MT_VEND_TYPE_EEPROM:
82 		req = MT_VEND_READ_EEPROM;
83 		break;
84 	case MT_VEND_TYPE_CFG:
85 		req = MT_VEND_READ_CFG;
86 		break;
87 	default:
88 		req = MT_VEND_MULTI_READ;
89 		break;
90 	}
91 	offset = addr & ~MT_VEND_TYPE_MASK;
92 
93 	ret = __mt76u_vendor_request(dev, req,
94 				     USB_DIR_IN | USB_TYPE_VENDOR,
95 				     0, offset, usb->data, sizeof(__le32));
96 	if (ret == sizeof(__le32))
97 		data = get_unaligned_le32(usb->data);
98 	trace_usb_reg_rr(dev, addr, data);
99 
100 	return data;
101 }
102 
103 u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
104 {
105 	u32 ret;
106 
107 	mutex_lock(&dev->usb.usb_ctrl_mtx);
108 	ret = __mt76u_rr(dev, addr);
109 	mutex_unlock(&dev->usb.usb_ctrl_mtx);
110 
111 	return ret;
112 }
113 EXPORT_SYMBOL_GPL(mt76u_rr);
114 
115 /* should be called with usb_ctrl_mtx locked */
116 static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
117 {
118 	struct mt76_usb *usb = &dev->usb;
119 	u16 offset;
120 	u8 req;
121 
122 	switch (addr & MT_VEND_TYPE_MASK) {
123 	case MT_VEND_TYPE_CFG:
124 		req = MT_VEND_WRITE_CFG;
125 		break;
126 	default:
127 		req = MT_VEND_MULTI_WRITE;
128 		break;
129 	}
130 	offset = addr & ~MT_VEND_TYPE_MASK;
131 
132 	put_unaligned_le32(val, usb->data);
133 	__mt76u_vendor_request(dev, req,
134 			       USB_DIR_OUT | USB_TYPE_VENDOR, 0,
135 			       offset, usb->data, sizeof(__le32));
136 	trace_usb_reg_wr(dev, addr, val);
137 }
138 
139 void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
140 {
141 	mutex_lock(&dev->usb.usb_ctrl_mtx);
142 	__mt76u_wr(dev, addr, val);
143 	mutex_unlock(&dev->usb.usb_ctrl_mtx);
144 }
145 EXPORT_SYMBOL_GPL(mt76u_wr);
146 
147 static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
148 		     u32 mask, u32 val)
149 {
150 	mutex_lock(&dev->usb.usb_ctrl_mtx);
151 	val |= __mt76u_rr(dev, addr) & ~mask;
152 	__mt76u_wr(dev, addr, val);
153 	mutex_unlock(&dev->usb.usb_ctrl_mtx);
154 
155 	return val;
156 }
157 
158 static void mt76u_copy(struct mt76_dev *dev, u32 offset,
159 		       const void *data, int len)
160 {
161 	struct mt76_usb *usb = &dev->usb;
162 	const u32 *val = data;
163 	int i, ret;
164 
165 	mutex_lock(&usb->usb_ctrl_mtx);
166 	for (i = 0; i < (len / 4); i++) {
167 		put_unaligned_le32(val[i], usb->data);
168 		ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
169 					     USB_DIR_OUT | USB_TYPE_VENDOR,
170 					     0, offset + i * 4, usb->data,
171 					     sizeof(__le32));
172 		if (ret < 0)
173 			break;
174 	}
175 	mutex_unlock(&usb->usb_ctrl_mtx);
176 }
177 
178 void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
179 		     const u16 offset, const u32 val)
180 {
181 	mutex_lock(&dev->usb.usb_ctrl_mtx);
182 	__mt76u_vendor_request(dev, req,
183 			       USB_DIR_OUT | USB_TYPE_VENDOR,
184 			       val & 0xffff, offset, NULL, 0);
185 	__mt76u_vendor_request(dev, req,
186 			       USB_DIR_OUT | USB_TYPE_VENDOR,
187 			       val >> 16, offset + 2, NULL, 0);
188 	mutex_unlock(&dev->usb.usb_ctrl_mtx);
189 }
190 EXPORT_SYMBOL_GPL(mt76u_single_wr);
191 
192 static int
193 mt76u_req_wr_rp(struct mt76_dev *dev, u32 base,
194 		const struct mt76_reg_pair *data, int len)
195 {
196 	struct mt76_usb *usb = &dev->usb;
197 
198 	mutex_lock(&usb->usb_ctrl_mtx);
199 	while (len > 0) {
200 		__mt76u_wr(dev, base + data->reg, data->value);
201 		len--;
202 		data++;
203 	}
204 	mutex_unlock(&usb->usb_ctrl_mtx);
205 
206 	return 0;
207 }
208 
209 static int
210 mt76u_wr_rp(struct mt76_dev *dev, u32 base,
211 	    const struct mt76_reg_pair *data, int n)
212 {
213 	if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
214 		return dev->mcu_ops->mcu_wr_rp(dev, base, data, n);
215 	else
216 		return mt76u_req_wr_rp(dev, base, data, n);
217 }
218 
219 static int
220 mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data,
221 		int len)
222 {
223 	struct mt76_usb *usb = &dev->usb;
224 
225 	mutex_lock(&usb->usb_ctrl_mtx);
226 	while (len > 0) {
227 		data->value = __mt76u_rr(dev, base + data->reg);
228 		len--;
229 		data++;
230 	}
231 	mutex_unlock(&usb->usb_ctrl_mtx);
232 
233 	return 0;
234 }
235 
236 static int
237 mt76u_rd_rp(struct mt76_dev *dev, u32 base,
238 	    struct mt76_reg_pair *data, int n)
239 {
240 	if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
241 		return dev->mcu_ops->mcu_rd_rp(dev, base, data, n);
242 	else
243 		return mt76u_req_rd_rp(dev, base, data, n);
244 }
245 
246 static int
247 mt76u_set_endpoints(struct usb_interface *intf,
248 		    struct mt76_usb *usb)
249 {
250 	struct usb_host_interface *intf_desc = intf->cur_altsetting;
251 	struct usb_endpoint_descriptor *ep_desc;
252 	int i, in_ep = 0, out_ep = 0;
253 
254 	for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
255 		ep_desc = &intf_desc->endpoint[i].desc;
256 
257 		if (usb_endpoint_is_bulk_in(ep_desc) &&
258 		    in_ep < __MT_EP_IN_MAX) {
259 			usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
260 			usb->in_max_packet = usb_endpoint_maxp(ep_desc);
261 			in_ep++;
262 		} else if (usb_endpoint_is_bulk_out(ep_desc) &&
263 			   out_ep < __MT_EP_OUT_MAX) {
264 			usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
265 			usb->out_max_packet = usb_endpoint_maxp(ep_desc);
266 			out_ep++;
267 		}
268 	}
269 
270 	if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
271 		return -EINVAL;
272 	return 0;
273 }
274 
275 static int
276 mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
277 		 int nsgs, int len, int sglen)
278 {
279 	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
280 	struct urb *urb = buf->urb;
281 	int i;
282 
283 	spin_lock_bh(&q->rx_page_lock);
284 	for (i = 0; i < nsgs; i++) {
285 		struct page *page;
286 		void *data;
287 		int offset;
288 
289 		data = page_frag_alloc(&q->rx_page, len, GFP_ATOMIC);
290 		if (!data)
291 			break;
292 
293 		page = virt_to_head_page(data);
294 		offset = data - page_address(page);
295 		sg_set_page(&urb->sg[i], page, sglen, offset);
296 	}
297 	spin_unlock_bh(&q->rx_page_lock);
298 
299 	if (i < nsgs) {
300 		int j;
301 
302 		for (j = nsgs; j < urb->num_sgs; j++)
303 			skb_free_frag(sg_virt(&urb->sg[j]));
304 		urb->num_sgs = i;
305 	}
306 
307 	urb->num_sgs = max_t(int, i, urb->num_sgs);
308 	buf->len = urb->num_sgs * sglen,
309 	sg_init_marker(urb->sg, urb->num_sgs);
310 
311 	return i ? : -ENOMEM;
312 }
313 
314 int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
315 		    int nsgs, int len, int sglen, gfp_t gfp)
316 {
317 	buf->urb = usb_alloc_urb(0, gfp);
318 	if (!buf->urb)
319 		return -ENOMEM;
320 
321 	buf->urb->sg = devm_kcalloc(dev->dev, nsgs, sizeof(*buf->urb->sg),
322 				    gfp);
323 	if (!buf->urb->sg)
324 		return -ENOMEM;
325 
326 	sg_init_table(buf->urb->sg, nsgs);
327 	buf->dev = dev;
328 
329 	return mt76u_fill_rx_sg(dev, buf, nsgs, len, sglen);
330 }
331 EXPORT_SYMBOL_GPL(mt76u_buf_alloc);
332 
333 void mt76u_buf_free(struct mt76u_buf *buf)
334 {
335 	struct urb *urb = buf->urb;
336 	int i;
337 
338 	for (i = 0; i < urb->num_sgs; i++)
339 		skb_free_frag(sg_virt(&urb->sg[i]));
340 	usb_free_urb(buf->urb);
341 }
342 EXPORT_SYMBOL_GPL(mt76u_buf_free);
343 
344 int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
345 		     struct mt76u_buf *buf, gfp_t gfp,
346 		     usb_complete_t complete_fn, void *context)
347 {
348 	struct usb_interface *intf = to_usb_interface(dev->dev);
349 	struct usb_device *udev = interface_to_usbdev(intf);
350 	unsigned int pipe;
351 
352 	if (dir == USB_DIR_IN)
353 		pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
354 	else
355 		pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
356 
357 	usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, buf->len,
358 			  complete_fn, context);
359 
360 	return usb_submit_urb(buf->urb, gfp);
361 }
362 EXPORT_SYMBOL_GPL(mt76u_submit_buf);
363 
364 static inline struct mt76u_buf
365 *mt76u_get_next_rx_entry(struct mt76_queue *q)
366 {
367 	struct mt76u_buf *buf = NULL;
368 	unsigned long flags;
369 
370 	spin_lock_irqsave(&q->lock, flags);
371 	if (q->queued > 0) {
372 		buf = &q->entry[q->head].ubuf;
373 		q->head = (q->head + 1) % q->ndesc;
374 		q->queued--;
375 	}
376 	spin_unlock_irqrestore(&q->lock, flags);
377 
378 	return buf;
379 }
380 
381 static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
382 {
383 	u16 dma_len, min_len;
384 
385 	dma_len = get_unaligned_le16(data);
386 	min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN +
387 		  MT_FCE_INFO_LEN;
388 
389 	if (data_len < min_len || !dma_len ||
390 	    dma_len + MT_DMA_HDR_LEN > data_len ||
391 	    (dma_len & 0x3))
392 		return -EINVAL;
393 	return dma_len;
394 }
395 
396 static int
397 mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
398 {
399 	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
400 	u8 *data = sg_virt(&urb->sg[0]);
401 	int data_len, len, nsgs = 1;
402 	struct sk_buff *skb;
403 
404 	if (!test_bit(MT76_STATE_INITIALIZED, &dev->state))
405 		return 0;
406 
407 	len = mt76u_get_rx_entry_len(data, urb->actual_length);
408 	if (len < 0)
409 		return 0;
410 
411 	skb = build_skb(data, q->buf_size);
412 	if (!skb)
413 		return 0;
414 
415 	data_len = min_t(int, len, urb->sg[0].length - MT_DMA_HDR_LEN);
416 	skb_reserve(skb, MT_DMA_HDR_LEN);
417 	if (skb->tail + data_len > skb->end) {
418 		dev_kfree_skb(skb);
419 		return 1;
420 	}
421 
422 	__skb_put(skb, data_len);
423 	len -= data_len;
424 
425 	while (len > 0) {
426 		data_len = min_t(int, len, urb->sg[nsgs].length);
427 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
428 				sg_page(&urb->sg[nsgs]),
429 				urb->sg[nsgs].offset,
430 				data_len, q->buf_size);
431 		len -= data_len;
432 		nsgs++;
433 	}
434 	dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
435 
436 	return nsgs;
437 }
438 
439 static void mt76u_complete_rx(struct urb *urb)
440 {
441 	struct mt76_dev *dev = urb->context;
442 	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
443 	unsigned long flags;
444 
445 	switch (urb->status) {
446 	case -ECONNRESET:
447 	case -ESHUTDOWN:
448 	case -ENOENT:
449 		return;
450 	default:
451 		dev_err(dev->dev, "rx urb failed: %d\n", urb->status);
452 		/* fall through */
453 	case 0:
454 		break;
455 	}
456 
457 	spin_lock_irqsave(&q->lock, flags);
458 	if (WARN_ONCE(q->entry[q->tail].ubuf.urb != urb, "rx urb mismatch"))
459 		goto out;
460 
461 	q->tail = (q->tail + 1) % q->ndesc;
462 	q->queued++;
463 	tasklet_schedule(&dev->usb.rx_tasklet);
464 out:
465 	spin_unlock_irqrestore(&q->lock, flags);
466 }
467 
468 static void mt76u_rx_tasklet(unsigned long data)
469 {
470 	struct mt76_dev *dev = (struct mt76_dev *)data;
471 	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
472 	int err, nsgs, buf_len = q->buf_size;
473 	struct mt76u_buf *buf;
474 
475 	rcu_read_lock();
476 
477 	while (true) {
478 		buf = mt76u_get_next_rx_entry(q);
479 		if (!buf)
480 			break;
481 
482 		nsgs = mt76u_process_rx_entry(dev, buf->urb);
483 		if (nsgs > 0) {
484 			err = mt76u_fill_rx_sg(dev, buf, nsgs,
485 					       buf_len,
486 					       SKB_WITH_OVERHEAD(buf_len));
487 			if (err < 0)
488 				break;
489 		}
490 		mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
491 				 buf, GFP_ATOMIC,
492 				 mt76u_complete_rx, dev);
493 	}
494 	mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
495 
496 	rcu_read_unlock();
497 }
498 
499 int mt76u_submit_rx_buffers(struct mt76_dev *dev)
500 {
501 	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
502 	unsigned long flags;
503 	int i, err = 0;
504 
505 	spin_lock_irqsave(&q->lock, flags);
506 	for (i = 0; i < q->ndesc; i++) {
507 		err = mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
508 				       &q->entry[i].ubuf, GFP_ATOMIC,
509 				       mt76u_complete_rx, dev);
510 		if (err < 0)
511 			break;
512 	}
513 	q->head = q->tail = 0;
514 	q->queued = 0;
515 	spin_unlock_irqrestore(&q->lock, flags);
516 
517 	return err;
518 }
519 EXPORT_SYMBOL_GPL(mt76u_submit_rx_buffers);
520 
521 static int mt76u_alloc_rx(struct mt76_dev *dev)
522 {
523 	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
524 	int i, err, nsgs;
525 
526 	spin_lock_init(&q->rx_page_lock);
527 	spin_lock_init(&q->lock);
528 	q->entry = devm_kcalloc(dev->dev,
529 				MT_NUM_RX_ENTRIES, sizeof(*q->entry),
530 				GFP_KERNEL);
531 	if (!q->entry)
532 		return -ENOMEM;
533 
534 	if (mt76u_check_sg(dev)) {
535 		q->buf_size = MT_RX_BUF_SIZE;
536 		nsgs = MT_SG_MAX_SIZE;
537 	} else {
538 		q->buf_size = PAGE_SIZE;
539 		nsgs = 1;
540 	}
541 
542 	for (i = 0; i < MT_NUM_RX_ENTRIES; i++) {
543 		err = mt76u_buf_alloc(dev, &q->entry[i].ubuf,
544 				      nsgs, q->buf_size,
545 				      SKB_WITH_OVERHEAD(q->buf_size),
546 				      GFP_KERNEL);
547 		if (err < 0)
548 			return err;
549 	}
550 	q->ndesc = MT_NUM_RX_ENTRIES;
551 
552 	return mt76u_submit_rx_buffers(dev);
553 }
554 
555 static void mt76u_free_rx(struct mt76_dev *dev)
556 {
557 	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
558 	struct page *page;
559 	int i;
560 
561 	for (i = 0; i < q->ndesc; i++)
562 		mt76u_buf_free(&q->entry[i].ubuf);
563 
564 	spin_lock_bh(&q->rx_page_lock);
565 	if (!q->rx_page.va)
566 		goto out;
567 
568 	page = virt_to_page(q->rx_page.va);
569 	__page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
570 	memset(&q->rx_page, 0, sizeof(q->rx_page));
571 out:
572 	spin_unlock_bh(&q->rx_page_lock);
573 }
574 
575 static void mt76u_stop_rx(struct mt76_dev *dev)
576 {
577 	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
578 	int i;
579 
580 	for (i = 0; i < q->ndesc; i++)
581 		usb_kill_urb(q->entry[i].ubuf.urb);
582 }
583 
584 static void mt76u_tx_tasklet(unsigned long data)
585 {
586 	struct mt76_dev *dev = (struct mt76_dev *)data;
587 	struct mt76u_buf *buf;
588 	struct mt76_queue *q;
589 	bool wake;
590 	int i;
591 
592 	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
593 		q = &dev->q_tx[i];
594 
595 		spin_lock_bh(&q->lock);
596 		while (true) {
597 			buf = &q->entry[q->head].ubuf;
598 			if (!buf->done || !q->queued)
599 				break;
600 
601 			dev->drv->tx_complete_skb(dev, q,
602 						  &q->entry[q->head],
603 						  false);
604 
605 			if (q->entry[q->head].schedule) {
606 				q->entry[q->head].schedule = false;
607 				q->swq_queued--;
608 			}
609 
610 			q->head = (q->head + 1) % q->ndesc;
611 			q->queued--;
612 		}
613 		mt76_txq_schedule(dev, q);
614 		wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
615 		if (!q->queued)
616 			wake_up(&dev->tx_wait);
617 
618 		spin_unlock_bh(&q->lock);
619 
620 		if (!test_and_set_bit(MT76_READING_STATS, &dev->state))
621 			ieee80211_queue_delayed_work(dev->hw,
622 						     &dev->usb.stat_work,
623 						     msecs_to_jiffies(10));
624 
625 		if (wake)
626 			ieee80211_wake_queue(dev->hw, i);
627 	}
628 }
629 
630 static void mt76u_tx_status_data(struct work_struct *work)
631 {
632 	struct mt76_usb *usb;
633 	struct mt76_dev *dev;
634 	u8 update = 1;
635 	u16 count = 0;
636 
637 	usb = container_of(work, struct mt76_usb, stat_work.work);
638 	dev = container_of(usb, struct mt76_dev, usb);
639 
640 	while (true) {
641 		if (test_bit(MT76_REMOVED, &dev->state))
642 			break;
643 
644 		if (!dev->drv->tx_status_data(dev, &update))
645 			break;
646 		count++;
647 	}
648 
649 	if (count && test_bit(MT76_STATE_RUNNING, &dev->state))
650 		ieee80211_queue_delayed_work(dev->hw, &usb->stat_work,
651 					     msecs_to_jiffies(10));
652 	else
653 		clear_bit(MT76_READING_STATS, &dev->state);
654 }
655 
656 static void mt76u_complete_tx(struct urb *urb)
657 {
658 	struct mt76u_buf *buf = urb->context;
659 	struct mt76_dev *dev = buf->dev;
660 
661 	if (mt76u_urb_error(urb))
662 		dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
663 	buf->done = true;
664 
665 	tasklet_schedule(&dev->usb.tx_tasklet);
666 }
667 
668 static int
669 mt76u_tx_build_sg(struct sk_buff *skb, struct urb *urb)
670 {
671 	int nsgs = 1 + skb_shinfo(skb)->nr_frags;
672 	struct sk_buff *iter;
673 
674 	skb_walk_frags(skb, iter)
675 		nsgs += 1 + skb_shinfo(iter)->nr_frags;
676 
677 	memset(urb->sg, 0, sizeof(*urb->sg) * MT_SG_MAX_SIZE);
678 
679 	nsgs = min_t(int, MT_SG_MAX_SIZE, nsgs);
680 	sg_init_marker(urb->sg, nsgs);
681 	urb->num_sgs = nsgs;
682 
683 	return skb_to_sgvec_nomark(skb, urb->sg, 0, skb->len);
684 }
685 
686 static int
687 mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
688 		   struct sk_buff *skb, struct mt76_wcid *wcid,
689 		   struct ieee80211_sta *sta)
690 {
691 	struct usb_interface *intf = to_usb_interface(dev->dev);
692 	struct usb_device *udev = interface_to_usbdev(intf);
693 	u8 ep = q2ep(q->hw_idx);
694 	struct mt76u_buf *buf;
695 	u16 idx = q->tail;
696 	unsigned int pipe;
697 	int err;
698 
699 	if (q->queued == q->ndesc)
700 		return -ENOSPC;
701 
702 	err = dev->drv->tx_prepare_skb(dev, NULL, skb, q, wcid, sta, NULL);
703 	if (err < 0)
704 		return err;
705 
706 	buf = &q->entry[idx].ubuf;
707 	buf->done = false;
708 
709 	err = mt76u_tx_build_sg(skb, buf->urb);
710 	if (err < 0)
711 		return err;
712 
713 	pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[ep]);
714 	usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, skb->len,
715 			  mt76u_complete_tx, buf);
716 
717 	q->tail = (q->tail + 1) % q->ndesc;
718 	q->entry[idx].skb = skb;
719 	q->queued++;
720 
721 	return idx;
722 }
723 
724 static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
725 {
726 	struct mt76u_buf *buf;
727 	int err;
728 
729 	while (q->first != q->tail) {
730 		buf = &q->entry[q->first].ubuf;
731 		err = usb_submit_urb(buf->urb, GFP_ATOMIC);
732 		if (err < 0) {
733 			if (err == -ENODEV)
734 				set_bit(MT76_REMOVED, &dev->state);
735 			else
736 				dev_err(dev->dev, "tx urb submit failed:%d\n",
737 					err);
738 			break;
739 		}
740 		q->first = (q->first + 1) % q->ndesc;
741 	}
742 }
743 
744 static int mt76u_alloc_tx(struct mt76_dev *dev)
745 {
746 	struct mt76u_buf *buf;
747 	struct mt76_queue *q;
748 	size_t size;
749 	int i, j;
750 
751 	size = MT_SG_MAX_SIZE * sizeof(struct scatterlist);
752 	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
753 		q = &dev->q_tx[i];
754 		spin_lock_init(&q->lock);
755 		INIT_LIST_HEAD(&q->swq);
756 		q->hw_idx = mt76_ac_to_hwq(i);
757 
758 		q->entry = devm_kcalloc(dev->dev,
759 					MT_NUM_TX_ENTRIES, sizeof(*q->entry),
760 					GFP_KERNEL);
761 		if (!q->entry)
762 			return -ENOMEM;
763 
764 		q->ndesc = MT_NUM_TX_ENTRIES;
765 		for (j = 0; j < q->ndesc; j++) {
766 			buf = &q->entry[j].ubuf;
767 			buf->dev = dev;
768 
769 			buf->urb = usb_alloc_urb(0, GFP_KERNEL);
770 			if (!buf->urb)
771 				return -ENOMEM;
772 
773 			buf->urb->sg = devm_kzalloc(dev->dev, size, GFP_KERNEL);
774 			if (!buf->urb->sg)
775 				return -ENOMEM;
776 		}
777 	}
778 	return 0;
779 }
780 
781 static void mt76u_free_tx(struct mt76_dev *dev)
782 {
783 	struct mt76_queue *q;
784 	int i, j;
785 
786 	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
787 		q = &dev->q_tx[i];
788 		for (j = 0; j < q->ndesc; j++)
789 			usb_free_urb(q->entry[j].ubuf.urb);
790 	}
791 }
792 
793 static void mt76u_stop_tx(struct mt76_dev *dev)
794 {
795 	struct mt76_queue *q;
796 	int i, j;
797 
798 	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
799 		q = &dev->q_tx[i];
800 		for (j = 0; j < q->ndesc; j++)
801 			usb_kill_urb(q->entry[j].ubuf.urb);
802 	}
803 }
804 
805 void mt76u_stop_queues(struct mt76_dev *dev)
806 {
807 	tasklet_disable(&dev->usb.rx_tasklet);
808 	tasklet_disable(&dev->usb.tx_tasklet);
809 
810 	mt76u_stop_rx(dev);
811 	mt76u_stop_tx(dev);
812 }
813 EXPORT_SYMBOL_GPL(mt76u_stop_queues);
814 
815 void mt76u_stop_stat_wk(struct mt76_dev *dev)
816 {
817 	cancel_delayed_work_sync(&dev->usb.stat_work);
818 	clear_bit(MT76_READING_STATS, &dev->state);
819 }
820 EXPORT_SYMBOL_GPL(mt76u_stop_stat_wk);
821 
822 void mt76u_queues_deinit(struct mt76_dev *dev)
823 {
824 	mt76u_stop_queues(dev);
825 
826 	mt76u_free_rx(dev);
827 	mt76u_free_tx(dev);
828 }
829 EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
830 
831 int mt76u_alloc_queues(struct mt76_dev *dev)
832 {
833 	int err;
834 
835 	err = mt76u_alloc_rx(dev);
836 	if (err < 0)
837 		goto err;
838 
839 	err = mt76u_alloc_tx(dev);
840 	if (err < 0)
841 		goto err;
842 
843 	return 0;
844 err:
845 	mt76u_queues_deinit(dev);
846 	return err;
847 }
848 EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
849 
850 static const struct mt76_queue_ops usb_queue_ops = {
851 	.tx_queue_skb = mt76u_tx_queue_skb,
852 	.kick = mt76u_tx_kick,
853 };
854 
855 int mt76u_init(struct mt76_dev *dev,
856 	       struct usb_interface *intf)
857 {
858 	static const struct mt76_bus_ops mt76u_ops = {
859 		.rr = mt76u_rr,
860 		.wr = mt76u_wr,
861 		.rmw = mt76u_rmw,
862 		.copy = mt76u_copy,
863 		.wr_rp = mt76u_wr_rp,
864 		.rd_rp = mt76u_rd_rp,
865 		.type = MT76_BUS_USB,
866 	};
867 	struct mt76_usb *usb = &dev->usb;
868 
869 	tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
870 	tasklet_init(&usb->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
871 	INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data);
872 	skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
873 
874 	init_completion(&usb->mcu.cmpl);
875 	mutex_init(&usb->mcu.mutex);
876 
877 	mutex_init(&usb->usb_ctrl_mtx);
878 	dev->bus = &mt76u_ops;
879 	dev->queue_ops = &usb_queue_ops;
880 
881 	return mt76u_set_endpoints(intf, usb);
882 }
883 EXPORT_SYMBOL_GPL(mt76u_init);
884 
885 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
886 MODULE_LICENSE("Dual BSD/GPL");
887