1 /* 2 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/module.h> 18 #include "mt76.h" 19 #include "usb_trace.h" 20 #include "dma.h" 21 22 #define MT_VEND_REQ_MAX_RETRY 10 23 #define MT_VEND_REQ_TOUT_MS 300 24 25 static bool disable_usb_sg; 26 module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644); 27 MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support"); 28 29 /* should be called with usb_ctrl_mtx locked */ 30 static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, 31 u8 req_type, u16 val, u16 offset, 32 void *buf, size_t len) 33 { 34 struct usb_device *udev = to_usb_device(dev->dev); 35 unsigned int pipe; 36 int i, ret; 37 38 pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0) 39 : usb_sndctrlpipe(udev, 0); 40 for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) { 41 if (test_bit(MT76_REMOVED, &dev->state)) 42 return -EIO; 43 44 ret = usb_control_msg(udev, pipe, req, req_type, val, 45 offset, buf, len, MT_VEND_REQ_TOUT_MS); 46 if (ret == -ENODEV) 47 set_bit(MT76_REMOVED, &dev->state); 48 if (ret >= 0 || ret == -ENODEV) 49 return ret; 50 usleep_range(5000, 10000); 51 } 52 53 dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n", 54 req, offset, ret); 55 return ret; 56 } 57 58 int mt76u_vendor_request(struct mt76_dev *dev, u8 req, 59 u8 req_type, u16 val, u16 offset, 60 void *buf, size_t len) 61 { 62 int ret; 63 64 mutex_lock(&dev->usb.usb_ctrl_mtx); 65 ret = __mt76u_vendor_request(dev, req, req_type, 66 val, offset, buf, len); 67 trace_usb_reg_wr(dev, offset, val); 68 mutex_unlock(&dev->usb.usb_ctrl_mtx); 69 70 return ret; 71 } 72 EXPORT_SYMBOL_GPL(mt76u_vendor_request); 73 74 /* should be called with usb_ctrl_mtx locked */ 75 static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr) 76 { 77 struct mt76_usb *usb = &dev->usb; 78 u32 data = ~0; 79 u16 offset; 80 int ret; 81 u8 req; 82 83 switch (addr & MT_VEND_TYPE_MASK) { 84 case MT_VEND_TYPE_EEPROM: 85 req = MT_VEND_READ_EEPROM; 86 break; 87 case MT_VEND_TYPE_CFG: 88 req = MT_VEND_READ_CFG; 89 break; 90 default: 91 req = MT_VEND_MULTI_READ; 92 break; 93 } 94 offset = addr & ~MT_VEND_TYPE_MASK; 95 96 ret = __mt76u_vendor_request(dev, req, 97 USB_DIR_IN | USB_TYPE_VENDOR, 98 0, offset, usb->data, sizeof(__le32)); 99 if (ret == sizeof(__le32)) 100 data = get_unaligned_le32(usb->data); 101 trace_usb_reg_rr(dev, addr, data); 102 103 return data; 104 } 105 106 static u32 mt76u_rr(struct mt76_dev *dev, u32 addr) 107 { 108 u32 ret; 109 110 mutex_lock(&dev->usb.usb_ctrl_mtx); 111 ret = __mt76u_rr(dev, addr); 112 mutex_unlock(&dev->usb.usb_ctrl_mtx); 113 114 return ret; 115 } 116 117 /* should be called with usb_ctrl_mtx locked */ 118 static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val) 119 { 120 struct mt76_usb *usb = &dev->usb; 121 u16 offset; 122 u8 req; 123 124 switch (addr & MT_VEND_TYPE_MASK) { 125 case MT_VEND_TYPE_CFG: 126 req = MT_VEND_WRITE_CFG; 127 break; 128 default: 129 req = MT_VEND_MULTI_WRITE; 130 break; 131 } 132 offset = addr & ~MT_VEND_TYPE_MASK; 133 134 put_unaligned_le32(val, usb->data); 135 __mt76u_vendor_request(dev, req, 136 USB_DIR_OUT | USB_TYPE_VENDOR, 0, 137 offset, usb->data, sizeof(__le32)); 138 trace_usb_reg_wr(dev, addr, val); 139 } 140 141 static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val) 142 { 143 mutex_lock(&dev->usb.usb_ctrl_mtx); 144 __mt76u_wr(dev, addr, val); 145 mutex_unlock(&dev->usb.usb_ctrl_mtx); 146 } 147 148 static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr, 149 u32 mask, u32 val) 150 { 151 mutex_lock(&dev->usb.usb_ctrl_mtx); 152 val |= __mt76u_rr(dev, addr) & ~mask; 153 __mt76u_wr(dev, addr, val); 154 mutex_unlock(&dev->usb.usb_ctrl_mtx); 155 156 return val; 157 } 158 159 static void mt76u_copy(struct mt76_dev *dev, u32 offset, 160 const void *data, int len) 161 { 162 struct mt76_usb *usb = &dev->usb; 163 const u32 *val = data; 164 int i, ret; 165 166 mutex_lock(&usb->usb_ctrl_mtx); 167 for (i = 0; i < (len / 4); i++) { 168 put_unaligned_le32(val[i], usb->data); 169 ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE, 170 USB_DIR_OUT | USB_TYPE_VENDOR, 171 0, offset + i * 4, usb->data, 172 sizeof(__le32)); 173 if (ret < 0) 174 break; 175 } 176 mutex_unlock(&usb->usb_ctrl_mtx); 177 } 178 179 void mt76u_single_wr(struct mt76_dev *dev, const u8 req, 180 const u16 offset, const u32 val) 181 { 182 mutex_lock(&dev->usb.usb_ctrl_mtx); 183 __mt76u_vendor_request(dev, req, 184 USB_DIR_OUT | USB_TYPE_VENDOR, 185 val & 0xffff, offset, NULL, 0); 186 __mt76u_vendor_request(dev, req, 187 USB_DIR_OUT | USB_TYPE_VENDOR, 188 val >> 16, offset + 2, NULL, 0); 189 mutex_unlock(&dev->usb.usb_ctrl_mtx); 190 } 191 EXPORT_SYMBOL_GPL(mt76u_single_wr); 192 193 static int 194 mt76u_req_wr_rp(struct mt76_dev *dev, u32 base, 195 const struct mt76_reg_pair *data, int len) 196 { 197 struct mt76_usb *usb = &dev->usb; 198 199 mutex_lock(&usb->usb_ctrl_mtx); 200 while (len > 0) { 201 __mt76u_wr(dev, base + data->reg, data->value); 202 len--; 203 data++; 204 } 205 mutex_unlock(&usb->usb_ctrl_mtx); 206 207 return 0; 208 } 209 210 static int 211 mt76u_wr_rp(struct mt76_dev *dev, u32 base, 212 const struct mt76_reg_pair *data, int n) 213 { 214 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state)) 215 return dev->mcu_ops->mcu_wr_rp(dev, base, data, n); 216 else 217 return mt76u_req_wr_rp(dev, base, data, n); 218 } 219 220 static int 221 mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data, 222 int len) 223 { 224 struct mt76_usb *usb = &dev->usb; 225 226 mutex_lock(&usb->usb_ctrl_mtx); 227 while (len > 0) { 228 data->value = __mt76u_rr(dev, base + data->reg); 229 len--; 230 data++; 231 } 232 mutex_unlock(&usb->usb_ctrl_mtx); 233 234 return 0; 235 } 236 237 static int 238 mt76u_rd_rp(struct mt76_dev *dev, u32 base, 239 struct mt76_reg_pair *data, int n) 240 { 241 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state)) 242 return dev->mcu_ops->mcu_rd_rp(dev, base, data, n); 243 else 244 return mt76u_req_rd_rp(dev, base, data, n); 245 } 246 247 static bool mt76u_check_sg(struct mt76_dev *dev) 248 { 249 struct usb_device *udev = to_usb_device(dev->dev); 250 251 return (!disable_usb_sg && udev->bus->sg_tablesize > 0 && 252 (udev->bus->no_sg_constraint || 253 udev->speed == USB_SPEED_WIRELESS)); 254 } 255 256 static int 257 mt76u_set_endpoints(struct usb_interface *intf, 258 struct mt76_usb *usb) 259 { 260 struct usb_host_interface *intf_desc = intf->cur_altsetting; 261 struct usb_endpoint_descriptor *ep_desc; 262 int i, in_ep = 0, out_ep = 0; 263 264 for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) { 265 ep_desc = &intf_desc->endpoint[i].desc; 266 267 if (usb_endpoint_is_bulk_in(ep_desc) && 268 in_ep < __MT_EP_IN_MAX) { 269 usb->in_ep[in_ep] = usb_endpoint_num(ep_desc); 270 in_ep++; 271 } else if (usb_endpoint_is_bulk_out(ep_desc) && 272 out_ep < __MT_EP_OUT_MAX) { 273 usb->out_ep[out_ep] = usb_endpoint_num(ep_desc); 274 out_ep++; 275 } 276 } 277 278 if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX) 279 return -EINVAL; 280 return 0; 281 } 282 283 static int 284 mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb, 285 int nsgs, gfp_t gfp) 286 { 287 int i; 288 289 for (i = 0; i < nsgs; i++) { 290 struct page *page; 291 void *data; 292 int offset; 293 294 data = page_frag_alloc(&q->rx_page, q->buf_size, gfp); 295 if (!data) 296 break; 297 298 page = virt_to_head_page(data); 299 offset = data - page_address(page); 300 sg_set_page(&urb->sg[i], page, q->buf_size, offset); 301 } 302 303 if (i < nsgs) { 304 int j; 305 306 for (j = nsgs; j < urb->num_sgs; j++) 307 skb_free_frag(sg_virt(&urb->sg[j])); 308 urb->num_sgs = i; 309 } 310 311 urb->num_sgs = max_t(int, i, urb->num_sgs); 312 urb->transfer_buffer_length = urb->num_sgs * q->buf_size, 313 sg_init_marker(urb->sg, urb->num_sgs); 314 315 return i ? : -ENOMEM; 316 } 317 318 static int 319 mt76u_refill_rx(struct mt76_dev *dev, struct urb *urb, int nsgs, gfp_t gfp) 320 { 321 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; 322 323 if (dev->usb.sg_en) { 324 return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp); 325 } else { 326 urb->transfer_buffer_length = q->buf_size; 327 urb->transfer_buffer = page_frag_alloc(&q->rx_page, 328 q->buf_size, gfp); 329 return urb->transfer_buffer ? 0 : -ENOMEM; 330 } 331 } 332 333 static int 334 mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e, 335 int sg_max_size) 336 { 337 unsigned int size = sizeof(struct urb); 338 339 if (dev->usb.sg_en) 340 size += sg_max_size * sizeof(struct scatterlist); 341 342 e->urb = kzalloc(size, GFP_KERNEL); 343 if (!e->urb) 344 return -ENOMEM; 345 346 usb_init_urb(e->urb); 347 348 if (dev->usb.sg_en) 349 e->urb->sg = (struct scatterlist *)(e->urb + 1); 350 351 return 0; 352 } 353 354 static int 355 mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e) 356 { 357 int err; 358 359 err = mt76u_urb_alloc(dev, e, MT_RX_SG_MAX_SIZE); 360 if (err) 361 return err; 362 363 return mt76u_refill_rx(dev, e->urb, MT_RX_SG_MAX_SIZE, 364 GFP_KERNEL); 365 } 366 367 static void mt76u_urb_free(struct urb *urb) 368 { 369 int i; 370 371 for (i = 0; i < urb->num_sgs; i++) 372 skb_free_frag(sg_virt(&urb->sg[i])); 373 374 if (urb->transfer_buffer) 375 skb_free_frag(urb->transfer_buffer); 376 377 usb_free_urb(urb); 378 } 379 380 static void 381 mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index, 382 struct urb *urb, usb_complete_t complete_fn, 383 void *context) 384 { 385 struct usb_device *udev = to_usb_device(dev->dev); 386 unsigned int pipe; 387 388 if (dir == USB_DIR_IN) 389 pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]); 390 else 391 pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]); 392 393 urb->dev = udev; 394 urb->pipe = pipe; 395 urb->complete = complete_fn; 396 urb->context = context; 397 } 398 399 static inline struct urb * 400 mt76u_get_next_rx_entry(struct mt76_dev *dev) 401 { 402 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; 403 struct urb *urb = NULL; 404 unsigned long flags; 405 406 spin_lock_irqsave(&q->lock, flags); 407 if (q->queued > 0) { 408 urb = q->entry[q->head].urb; 409 q->head = (q->head + 1) % q->ndesc; 410 q->queued--; 411 } 412 spin_unlock_irqrestore(&q->lock, flags); 413 414 return urb; 415 } 416 417 static int mt76u_get_rx_entry_len(u8 *data, u32 data_len) 418 { 419 u16 dma_len, min_len; 420 421 dma_len = get_unaligned_le16(data); 422 min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN + 423 MT_FCE_INFO_LEN; 424 425 if (data_len < min_len || !dma_len || 426 dma_len + MT_DMA_HDR_LEN > data_len || 427 (dma_len & 0x3)) 428 return -EINVAL; 429 return dma_len; 430 } 431 432 static struct sk_buff * 433 mt76u_build_rx_skb(void *data, int len, int buf_size) 434 { 435 struct sk_buff *skb; 436 437 if (SKB_WITH_OVERHEAD(buf_size) < MT_DMA_HDR_LEN + len) { 438 struct page *page; 439 440 /* slow path, not enough space for data and 441 * skb_shared_info 442 */ 443 skb = alloc_skb(MT_SKB_HEAD_LEN, GFP_ATOMIC); 444 if (!skb) 445 return NULL; 446 447 skb_put_data(skb, data + MT_DMA_HDR_LEN, MT_SKB_HEAD_LEN); 448 data += (MT_DMA_HDR_LEN + MT_SKB_HEAD_LEN); 449 page = virt_to_head_page(data); 450 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 451 page, data - page_address(page), 452 len - MT_SKB_HEAD_LEN, buf_size); 453 454 return skb; 455 } 456 457 /* fast path */ 458 skb = build_skb(data, buf_size); 459 if (!skb) 460 return NULL; 461 462 skb_reserve(skb, MT_DMA_HDR_LEN); 463 __skb_put(skb, len); 464 465 return skb; 466 } 467 468 static int 469 mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb) 470 { 471 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; 472 u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer; 473 int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length; 474 int len, nsgs = 1; 475 struct sk_buff *skb; 476 477 if (!test_bit(MT76_STATE_INITIALIZED, &dev->state)) 478 return 0; 479 480 len = mt76u_get_rx_entry_len(data, urb->actual_length); 481 if (len < 0) 482 return 0; 483 484 data_len = min_t(int, len, data_len - MT_DMA_HDR_LEN); 485 skb = mt76u_build_rx_skb(data, data_len, q->buf_size); 486 if (!skb) 487 return 0; 488 489 len -= data_len; 490 while (len > 0 && nsgs < urb->num_sgs) { 491 data_len = min_t(int, len, urb->sg[nsgs].length); 492 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 493 sg_page(&urb->sg[nsgs]), 494 urb->sg[nsgs].offset, 495 data_len, q->buf_size); 496 len -= data_len; 497 nsgs++; 498 } 499 dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb); 500 501 return nsgs; 502 } 503 504 static void mt76u_complete_rx(struct urb *urb) 505 { 506 struct mt76_dev *dev = urb->context; 507 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; 508 unsigned long flags; 509 510 trace_rx_urb(dev, urb); 511 512 switch (urb->status) { 513 case -ECONNRESET: 514 case -ESHUTDOWN: 515 case -ENOENT: 516 return; 517 default: 518 dev_err_ratelimited(dev->dev, "rx urb failed: %d\n", 519 urb->status); 520 /* fall through */ 521 case 0: 522 break; 523 } 524 525 spin_lock_irqsave(&q->lock, flags); 526 if (WARN_ONCE(q->entry[q->tail].urb != urb, "rx urb mismatch")) 527 goto out; 528 529 q->tail = (q->tail + 1) % q->ndesc; 530 q->queued++; 531 tasklet_schedule(&dev->usb.rx_tasklet); 532 out: 533 spin_unlock_irqrestore(&q->lock, flags); 534 } 535 536 static int 537 mt76u_submit_rx_buf(struct mt76_dev *dev, struct urb *urb) 538 { 539 mt76u_fill_bulk_urb(dev, USB_DIR_IN, MT_EP_IN_PKT_RX, urb, 540 mt76u_complete_rx, dev); 541 trace_submit_urb(dev, urb); 542 543 return usb_submit_urb(urb, GFP_ATOMIC); 544 } 545 546 static void mt76u_rx_tasklet(unsigned long data) 547 { 548 struct mt76_dev *dev = (struct mt76_dev *)data; 549 struct urb *urb; 550 int err, count; 551 552 rcu_read_lock(); 553 554 while (true) { 555 urb = mt76u_get_next_rx_entry(dev); 556 if (!urb) 557 break; 558 559 count = mt76u_process_rx_entry(dev, urb); 560 if (count > 0) { 561 err = mt76u_refill_rx(dev, urb, count, GFP_ATOMIC); 562 if (err < 0) 563 break; 564 } 565 mt76u_submit_rx_buf(dev, urb); 566 } 567 mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL); 568 569 rcu_read_unlock(); 570 } 571 572 static int mt76u_submit_rx_buffers(struct mt76_dev *dev) 573 { 574 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; 575 unsigned long flags; 576 int i, err = 0; 577 578 spin_lock_irqsave(&q->lock, flags); 579 for (i = 0; i < q->ndesc; i++) { 580 err = mt76u_submit_rx_buf(dev, q->entry[i].urb); 581 if (err < 0) 582 break; 583 } 584 q->head = q->tail = 0; 585 q->queued = 0; 586 spin_unlock_irqrestore(&q->lock, flags); 587 588 return err; 589 } 590 591 static int mt76u_alloc_rx(struct mt76_dev *dev) 592 { 593 struct mt76_usb *usb = &dev->usb; 594 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; 595 int i, err; 596 597 usb->mcu.data = devm_kmalloc(dev->dev, MCU_RESP_URB_SIZE, GFP_KERNEL); 598 if (!usb->mcu.data) 599 return -ENOMEM; 600 601 spin_lock_init(&q->lock); 602 q->entry = devm_kcalloc(dev->dev, 603 MT_NUM_RX_ENTRIES, sizeof(*q->entry), 604 GFP_KERNEL); 605 if (!q->entry) 606 return -ENOMEM; 607 608 q->ndesc = MT_NUM_RX_ENTRIES; 609 q->buf_size = PAGE_SIZE; 610 611 for (i = 0; i < q->ndesc; i++) { 612 err = mt76u_rx_urb_alloc(dev, &q->entry[i]); 613 if (err < 0) 614 return err; 615 } 616 617 return mt76u_submit_rx_buffers(dev); 618 } 619 620 static void mt76u_free_rx(struct mt76_dev *dev) 621 { 622 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; 623 struct page *page; 624 int i; 625 626 for (i = 0; i < q->ndesc; i++) 627 mt76u_urb_free(q->entry[i].urb); 628 629 if (!q->rx_page.va) 630 return; 631 632 page = virt_to_page(q->rx_page.va); 633 __page_frag_cache_drain(page, q->rx_page.pagecnt_bias); 634 memset(&q->rx_page, 0, sizeof(q->rx_page)); 635 } 636 637 void mt76u_stop_rx(struct mt76_dev *dev) 638 { 639 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; 640 int i; 641 642 for (i = 0; i < q->ndesc; i++) 643 usb_poison_urb(q->entry[i].urb); 644 645 tasklet_kill(&dev->usb.rx_tasklet); 646 } 647 EXPORT_SYMBOL_GPL(mt76u_stop_rx); 648 649 int mt76u_resume_rx(struct mt76_dev *dev) 650 { 651 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; 652 int i; 653 654 for (i = 0; i < q->ndesc; i++) 655 usb_unpoison_urb(q->entry[i].urb); 656 657 return mt76u_submit_rx_buffers(dev); 658 } 659 EXPORT_SYMBOL_GPL(mt76u_resume_rx); 660 661 static void mt76u_tx_tasklet(unsigned long data) 662 { 663 struct mt76_dev *dev = (struct mt76_dev *)data; 664 struct mt76_queue_entry entry; 665 struct mt76_sw_queue *sq; 666 struct mt76_queue *q; 667 bool wake; 668 int i; 669 670 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 671 u32 n_dequeued = 0, n_sw_dequeued = 0; 672 673 sq = &dev->q_tx[i]; 674 q = sq->q; 675 676 while (q->queued > n_dequeued) { 677 if (!q->entry[q->head].done) 678 break; 679 680 if (q->entry[q->head].schedule) { 681 q->entry[q->head].schedule = false; 682 n_sw_dequeued++; 683 } 684 685 entry = q->entry[q->head]; 686 q->entry[q->head].done = false; 687 q->head = (q->head + 1) % q->ndesc; 688 n_dequeued++; 689 690 dev->drv->tx_complete_skb(dev, i, &entry); 691 } 692 693 spin_lock_bh(&q->lock); 694 695 sq->swq_queued -= n_sw_dequeued; 696 q->queued -= n_dequeued; 697 698 wake = q->stopped && q->queued < q->ndesc - 8; 699 if (wake) 700 q->stopped = false; 701 702 if (!q->queued) 703 wake_up(&dev->tx_wait); 704 705 spin_unlock_bh(&q->lock); 706 707 mt76_txq_schedule(dev, i); 708 709 if (!test_and_set_bit(MT76_READING_STATS, &dev->state)) 710 ieee80211_queue_delayed_work(dev->hw, 711 &dev->usb.stat_work, 712 msecs_to_jiffies(10)); 713 714 if (wake) 715 ieee80211_wake_queue(dev->hw, i); 716 } 717 } 718 719 static void mt76u_tx_status_data(struct work_struct *work) 720 { 721 struct mt76_usb *usb; 722 struct mt76_dev *dev; 723 u8 update = 1; 724 u16 count = 0; 725 726 usb = container_of(work, struct mt76_usb, stat_work.work); 727 dev = container_of(usb, struct mt76_dev, usb); 728 729 while (true) { 730 if (test_bit(MT76_REMOVED, &dev->state)) 731 break; 732 733 if (!dev->drv->tx_status_data(dev, &update)) 734 break; 735 count++; 736 } 737 738 if (count && test_bit(MT76_STATE_RUNNING, &dev->state)) 739 ieee80211_queue_delayed_work(dev->hw, &usb->stat_work, 740 msecs_to_jiffies(10)); 741 else 742 clear_bit(MT76_READING_STATS, &dev->state); 743 } 744 745 static void mt76u_complete_tx(struct urb *urb) 746 { 747 struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev); 748 struct mt76_queue_entry *e = urb->context; 749 750 if (mt76u_urb_error(urb)) 751 dev_err(dev->dev, "tx urb failed: %d\n", urb->status); 752 e->done = true; 753 754 tasklet_schedule(&dev->tx_tasklet); 755 } 756 757 static int 758 mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb, 759 struct urb *urb) 760 { 761 urb->transfer_buffer_length = skb->len; 762 763 if (!dev->usb.sg_en) { 764 urb->transfer_buffer = skb->data; 765 return 0; 766 } else { 767 sg_init_table(urb->sg, MT_TX_SG_MAX_SIZE); 768 urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len); 769 if (urb->num_sgs == 0) 770 return -ENOMEM; 771 return urb->num_sgs; 772 } 773 } 774 775 static int 776 mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid, 777 struct sk_buff *skb, struct mt76_wcid *wcid, 778 struct ieee80211_sta *sta) 779 { 780 struct mt76_queue *q = dev->q_tx[qid].q; 781 struct mt76_tx_info tx_info = { 782 .skb = skb, 783 }; 784 u16 idx = q->tail; 785 int err; 786 787 if (q->queued == q->ndesc) 788 return -ENOSPC; 789 790 skb->prev = skb->next = NULL; 791 err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info); 792 if (err < 0) 793 return err; 794 795 err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb); 796 if (err < 0) 797 return err; 798 799 mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx), 800 q->entry[idx].urb, mt76u_complete_tx, 801 &q->entry[idx]); 802 803 q->tail = (q->tail + 1) % q->ndesc; 804 q->entry[idx].skb = tx_info.skb; 805 q->queued++; 806 807 return idx; 808 } 809 810 static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q) 811 { 812 struct urb *urb; 813 int err; 814 815 while (q->first != q->tail) { 816 urb = q->entry[q->first].urb; 817 818 trace_submit_urb(dev, urb); 819 err = usb_submit_urb(urb, GFP_ATOMIC); 820 if (err < 0) { 821 if (err == -ENODEV) 822 set_bit(MT76_REMOVED, &dev->state); 823 else 824 dev_err(dev->dev, "tx urb submit failed:%d\n", 825 err); 826 break; 827 } 828 q->first = (q->first + 1) % q->ndesc; 829 } 830 } 831 832 static int mt76u_alloc_tx(struct mt76_dev *dev) 833 { 834 struct mt76_queue *q; 835 int i, j, err; 836 837 for (i = 0; i <= MT_TXQ_PSD; i++) { 838 INIT_LIST_HEAD(&dev->q_tx[i].swq); 839 840 if (i >= IEEE80211_NUM_ACS) { 841 dev->q_tx[i].q = dev->q_tx[0].q; 842 continue; 843 } 844 845 q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL); 846 if (!q) 847 return -ENOMEM; 848 849 spin_lock_init(&q->lock); 850 q->hw_idx = mt76_ac_to_hwq(i); 851 dev->q_tx[i].q = q; 852 853 q->entry = devm_kcalloc(dev->dev, 854 MT_NUM_TX_ENTRIES, sizeof(*q->entry), 855 GFP_KERNEL); 856 if (!q->entry) 857 return -ENOMEM; 858 859 q->ndesc = MT_NUM_TX_ENTRIES; 860 for (j = 0; j < q->ndesc; j++) { 861 err = mt76u_urb_alloc(dev, &q->entry[j], 862 MT_TX_SG_MAX_SIZE); 863 if (err < 0) 864 return err; 865 } 866 } 867 return 0; 868 } 869 870 static void mt76u_free_tx(struct mt76_dev *dev) 871 { 872 struct mt76_queue *q; 873 int i, j; 874 875 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 876 q = dev->q_tx[i].q; 877 for (j = 0; j < q->ndesc; j++) 878 usb_free_urb(q->entry[j].urb); 879 } 880 } 881 882 void mt76u_stop_tx(struct mt76_dev *dev) 883 { 884 struct mt76_queue_entry entry; 885 struct mt76_queue *q; 886 int i, j, ret; 887 888 ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(dev), HZ/5); 889 if (!ret) { 890 dev_err(dev->dev, "timed out waiting for pending tx\n"); 891 892 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 893 q = dev->q_tx[i].q; 894 for (j = 0; j < q->ndesc; j++) 895 usb_kill_urb(q->entry[j].urb); 896 } 897 898 tasklet_kill(&dev->tx_tasklet); 899 900 /* On device removal we maight queue skb's, but mt76u_tx_kick() 901 * will fail to submit urb, cleanup those skb's manually. 902 */ 903 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 904 q = dev->q_tx[i].q; 905 906 /* Assure we are in sync with killed tasklet. */ 907 spin_lock_bh(&q->lock); 908 while (q->queued) { 909 entry = q->entry[q->head]; 910 q->head = (q->head + 1) % q->ndesc; 911 q->queued--; 912 913 dev->drv->tx_complete_skb(dev, i, &entry); 914 } 915 spin_unlock_bh(&q->lock); 916 } 917 } 918 919 cancel_delayed_work_sync(&dev->usb.stat_work); 920 clear_bit(MT76_READING_STATS, &dev->state); 921 922 mt76_tx_status_check(dev, NULL, true); 923 } 924 EXPORT_SYMBOL_GPL(mt76u_stop_tx); 925 926 void mt76u_queues_deinit(struct mt76_dev *dev) 927 { 928 mt76u_stop_rx(dev); 929 mt76u_stop_tx(dev); 930 931 mt76u_free_rx(dev); 932 mt76u_free_tx(dev); 933 } 934 EXPORT_SYMBOL_GPL(mt76u_queues_deinit); 935 936 int mt76u_alloc_queues(struct mt76_dev *dev) 937 { 938 int err; 939 940 err = mt76u_alloc_rx(dev); 941 if (err < 0) 942 return err; 943 944 return mt76u_alloc_tx(dev); 945 } 946 EXPORT_SYMBOL_GPL(mt76u_alloc_queues); 947 948 static const struct mt76_queue_ops usb_queue_ops = { 949 .tx_queue_skb = mt76u_tx_queue_skb, 950 .kick = mt76u_tx_kick, 951 }; 952 953 int mt76u_init(struct mt76_dev *dev, 954 struct usb_interface *intf) 955 { 956 static const struct mt76_bus_ops mt76u_ops = { 957 .rr = mt76u_rr, 958 .wr = mt76u_wr, 959 .rmw = mt76u_rmw, 960 .copy = mt76u_copy, 961 .wr_rp = mt76u_wr_rp, 962 .rd_rp = mt76u_rd_rp, 963 .type = MT76_BUS_USB, 964 }; 965 struct mt76_usb *usb = &dev->usb; 966 967 tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev); 968 tasklet_init(&dev->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev); 969 INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data); 970 skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]); 971 972 mutex_init(&usb->mcu.mutex); 973 974 mutex_init(&usb->usb_ctrl_mtx); 975 dev->bus = &mt76u_ops; 976 dev->queue_ops = &usb_queue_ops; 977 978 usb->sg_en = mt76u_check_sg(dev); 979 980 return mt76u_set_endpoints(intf, usb); 981 } 982 EXPORT_SYMBOL_GPL(mt76u_init); 983 984 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>"); 985 MODULE_LICENSE("Dual BSD/GPL"); 986