1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> 4 */ 5 6 #include <linux/module.h> 7 #include "mt76.h" 8 #include "usb_trace.h" 9 #include "dma.h" 10 11 #define MT_VEND_REQ_MAX_RETRY 10 12 #define MT_VEND_REQ_TOUT_MS 300 13 14 static bool disable_usb_sg; 15 module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644); 16 MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support"); 17 18 static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, 19 u8 req_type, u16 val, u16 offset, 20 void *buf, size_t len) 21 { 22 struct usb_interface *uintf = to_usb_interface(dev->dev); 23 struct usb_device *udev = interface_to_usbdev(uintf); 24 unsigned int pipe; 25 int i, ret; 26 27 lockdep_assert_held(&dev->usb.usb_ctrl_mtx); 28 29 pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0) 30 : usb_sndctrlpipe(udev, 0); 31 for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) { 32 if (test_bit(MT76_REMOVED, &dev->phy.state)) 33 return -EIO; 34 35 ret = usb_control_msg(udev, pipe, req, req_type, val, 36 offset, buf, len, MT_VEND_REQ_TOUT_MS); 37 if (ret == -ENODEV) 38 set_bit(MT76_REMOVED, &dev->phy.state); 39 if (ret >= 0 || ret == -ENODEV) 40 return ret; 41 usleep_range(5000, 10000); 42 } 43 44 dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n", 45 req, offset, ret); 46 return ret; 47 } 48 49 int mt76u_vendor_request(struct mt76_dev *dev, u8 req, 50 u8 req_type, u16 val, u16 offset, 51 void *buf, size_t len) 52 { 53 int ret; 54 55 mutex_lock(&dev->usb.usb_ctrl_mtx); 56 ret = __mt76u_vendor_request(dev, req, req_type, 57 val, offset, buf, len); 58 trace_usb_reg_wr(dev, offset, val); 59 mutex_unlock(&dev->usb.usb_ctrl_mtx); 60 61 return ret; 62 } 63 EXPORT_SYMBOL_GPL(mt76u_vendor_request); 64 65 static u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u32 addr) 66 { 67 struct mt76_usb *usb = &dev->usb; 68 u32 data = ~0; 69 int ret; 70 71 ret = __mt76u_vendor_request(dev, req, 72 USB_DIR_IN | USB_TYPE_VENDOR, 73 addr >> 16, addr, usb->data, 74 sizeof(__le32)); 75 if (ret == sizeof(__le32)) 76 data = get_unaligned_le32(usb->data); 77 trace_usb_reg_rr(dev, addr, data); 78 79 return data; 80 } 81 82 static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr) 83 { 84 u8 req; 85 86 switch (addr & MT_VEND_TYPE_MASK) { 87 case MT_VEND_TYPE_EEPROM: 88 req = MT_VEND_READ_EEPROM; 89 break; 90 case MT_VEND_TYPE_CFG: 91 req = MT_VEND_READ_CFG; 92 break; 93 default: 94 req = MT_VEND_MULTI_READ; 95 break; 96 } 97 98 return ___mt76u_rr(dev, req, addr & ~MT_VEND_TYPE_MASK); 99 } 100 101 static u32 mt76u_rr(struct mt76_dev *dev, u32 addr) 102 { 103 u32 ret; 104 105 mutex_lock(&dev->usb.usb_ctrl_mtx); 106 ret = __mt76u_rr(dev, addr); 107 mutex_unlock(&dev->usb.usb_ctrl_mtx); 108 109 return ret; 110 } 111 112 static u32 mt76u_rr_ext(struct mt76_dev *dev, u32 addr) 113 { 114 u32 ret; 115 116 mutex_lock(&dev->usb.usb_ctrl_mtx); 117 ret = ___mt76u_rr(dev, MT_VEND_READ_EXT, addr); 118 mutex_unlock(&dev->usb.usb_ctrl_mtx); 119 120 return ret; 121 } 122 123 static void ___mt76u_wr(struct mt76_dev *dev, u8 req, 124 u32 addr, u32 val) 125 { 126 struct mt76_usb *usb = &dev->usb; 127 128 put_unaligned_le32(val, usb->data); 129 __mt76u_vendor_request(dev, req, 130 USB_DIR_OUT | USB_TYPE_VENDOR, 131 addr >> 16, addr, usb->data, 132 sizeof(__le32)); 133 trace_usb_reg_wr(dev, addr, val); 134 } 135 136 static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val) 137 { 138 u8 req; 139 140 switch (addr & MT_VEND_TYPE_MASK) { 141 case MT_VEND_TYPE_CFG: 142 req = MT_VEND_WRITE_CFG; 143 break; 144 default: 145 req = MT_VEND_MULTI_WRITE; 146 break; 147 } 148 ___mt76u_wr(dev, req, addr & ~MT_VEND_TYPE_MASK, val); 149 } 150 151 static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val) 152 { 153 mutex_lock(&dev->usb.usb_ctrl_mtx); 154 __mt76u_wr(dev, addr, val); 155 mutex_unlock(&dev->usb.usb_ctrl_mtx); 156 } 157 158 static void mt76u_wr_ext(struct mt76_dev *dev, u32 addr, u32 val) 159 { 160 mutex_lock(&dev->usb.usb_ctrl_mtx); 161 ___mt76u_wr(dev, MT_VEND_WRITE_EXT, addr, val); 162 mutex_unlock(&dev->usb.usb_ctrl_mtx); 163 } 164 165 static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr, 166 u32 mask, u32 val) 167 { 168 mutex_lock(&dev->usb.usb_ctrl_mtx); 169 val |= __mt76u_rr(dev, addr) & ~mask; 170 __mt76u_wr(dev, addr, val); 171 mutex_unlock(&dev->usb.usb_ctrl_mtx); 172 173 return val; 174 } 175 176 static u32 mt76u_rmw_ext(struct mt76_dev *dev, u32 addr, 177 u32 mask, u32 val) 178 { 179 mutex_lock(&dev->usb.usb_ctrl_mtx); 180 val |= ___mt76u_rr(dev, MT_VEND_READ_EXT, addr) & ~mask; 181 ___mt76u_wr(dev, MT_VEND_WRITE_EXT, addr, val); 182 mutex_unlock(&dev->usb.usb_ctrl_mtx); 183 184 return val; 185 } 186 187 static void mt76u_copy(struct mt76_dev *dev, u32 offset, 188 const void *data, int len) 189 { 190 struct mt76_usb *usb = &dev->usb; 191 const u8 *val = data; 192 int ret; 193 int current_batch_size; 194 int i = 0; 195 196 /* Assure that always a multiple of 4 bytes are copied, 197 * otherwise beacons can be corrupted. 198 * See: "mt76: round up length on mt76_wr_copy" 199 * Commit 850e8f6fbd5d0003b0 200 */ 201 len = round_up(len, 4); 202 203 mutex_lock(&usb->usb_ctrl_mtx); 204 while (i < len) { 205 current_batch_size = min_t(int, usb->data_len, len - i); 206 memcpy(usb->data, val + i, current_batch_size); 207 ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE, 208 USB_DIR_OUT | USB_TYPE_VENDOR, 209 0, offset + i, usb->data, 210 current_batch_size); 211 if (ret < 0) 212 break; 213 214 i += current_batch_size; 215 } 216 mutex_unlock(&usb->usb_ctrl_mtx); 217 } 218 219 static void mt76u_copy_ext(struct mt76_dev *dev, u32 offset, 220 const void *data, int len) 221 { 222 struct mt76_usb *usb = &dev->usb; 223 int ret, i = 0, batch_len; 224 const u8 *val = data; 225 226 len = round_up(len, 4); 227 mutex_lock(&usb->usb_ctrl_mtx); 228 while (i < len) { 229 batch_len = min_t(int, usb->data_len, len - i); 230 memcpy(usb->data, val + i, batch_len); 231 ret = __mt76u_vendor_request(dev, MT_VEND_WRITE_EXT, 232 USB_DIR_OUT | USB_TYPE_VENDOR, 233 (offset + i) >> 16, offset + i, 234 usb->data, batch_len); 235 if (ret < 0) 236 break; 237 238 i += batch_len; 239 } 240 mutex_unlock(&usb->usb_ctrl_mtx); 241 } 242 243 static void 244 mt76u_read_copy_ext(struct mt76_dev *dev, u32 offset, 245 void *data, int len) 246 { 247 struct mt76_usb *usb = &dev->usb; 248 int i = 0, batch_len, ret; 249 u8 *val = data; 250 251 len = round_up(len, 4); 252 mutex_lock(&usb->usb_ctrl_mtx); 253 while (i < len) { 254 batch_len = min_t(int, usb->data_len, len - i); 255 ret = __mt76u_vendor_request(dev, MT_VEND_READ_EXT, 256 USB_DIR_IN | USB_TYPE_VENDOR, 257 (offset + i) >> 16, offset + i, 258 usb->data, batch_len); 259 if (ret < 0) 260 break; 261 262 memcpy(val + i, usb->data, batch_len); 263 i += batch_len; 264 } 265 mutex_unlock(&usb->usb_ctrl_mtx); 266 } 267 268 void mt76u_single_wr(struct mt76_dev *dev, const u8 req, 269 const u16 offset, const u32 val) 270 { 271 mutex_lock(&dev->usb.usb_ctrl_mtx); 272 __mt76u_vendor_request(dev, req, 273 USB_DIR_OUT | USB_TYPE_VENDOR, 274 val & 0xffff, offset, NULL, 0); 275 __mt76u_vendor_request(dev, req, 276 USB_DIR_OUT | USB_TYPE_VENDOR, 277 val >> 16, offset + 2, NULL, 0); 278 mutex_unlock(&dev->usb.usb_ctrl_mtx); 279 } 280 EXPORT_SYMBOL_GPL(mt76u_single_wr); 281 282 static int 283 mt76u_req_wr_rp(struct mt76_dev *dev, u32 base, 284 const struct mt76_reg_pair *data, int len) 285 { 286 struct mt76_usb *usb = &dev->usb; 287 288 mutex_lock(&usb->usb_ctrl_mtx); 289 while (len > 0) { 290 __mt76u_wr(dev, base + data->reg, data->value); 291 len--; 292 data++; 293 } 294 mutex_unlock(&usb->usb_ctrl_mtx); 295 296 return 0; 297 } 298 299 static int 300 mt76u_wr_rp(struct mt76_dev *dev, u32 base, 301 const struct mt76_reg_pair *data, int n) 302 { 303 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) 304 return dev->mcu_ops->mcu_wr_rp(dev, base, data, n); 305 else 306 return mt76u_req_wr_rp(dev, base, data, n); 307 } 308 309 static int 310 mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data, 311 int len) 312 { 313 struct mt76_usb *usb = &dev->usb; 314 315 mutex_lock(&usb->usb_ctrl_mtx); 316 while (len > 0) { 317 data->value = __mt76u_rr(dev, base + data->reg); 318 len--; 319 data++; 320 } 321 mutex_unlock(&usb->usb_ctrl_mtx); 322 323 return 0; 324 } 325 326 static int 327 mt76u_rd_rp(struct mt76_dev *dev, u32 base, 328 struct mt76_reg_pair *data, int n) 329 { 330 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) 331 return dev->mcu_ops->mcu_rd_rp(dev, base, data, n); 332 else 333 return mt76u_req_rd_rp(dev, base, data, n); 334 } 335 336 static bool mt76u_check_sg(struct mt76_dev *dev) 337 { 338 struct usb_interface *uintf = to_usb_interface(dev->dev); 339 struct usb_device *udev = interface_to_usbdev(uintf); 340 341 return (!disable_usb_sg && udev->bus->sg_tablesize > 0 && 342 (udev->bus->no_sg_constraint || 343 udev->speed == USB_SPEED_WIRELESS)); 344 } 345 346 static int 347 mt76u_set_endpoints(struct usb_interface *intf, 348 struct mt76_usb *usb) 349 { 350 struct usb_host_interface *intf_desc = intf->cur_altsetting; 351 struct usb_endpoint_descriptor *ep_desc; 352 int i, in_ep = 0, out_ep = 0; 353 354 for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) { 355 ep_desc = &intf_desc->endpoint[i].desc; 356 357 if (usb_endpoint_is_bulk_in(ep_desc) && 358 in_ep < __MT_EP_IN_MAX) { 359 usb->in_ep[in_ep] = usb_endpoint_num(ep_desc); 360 in_ep++; 361 } else if (usb_endpoint_is_bulk_out(ep_desc) && 362 out_ep < __MT_EP_OUT_MAX) { 363 usb->out_ep[out_ep] = usb_endpoint_num(ep_desc); 364 out_ep++; 365 } 366 } 367 368 if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX) 369 return -EINVAL; 370 return 0; 371 } 372 373 static int 374 mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb, 375 int nsgs, gfp_t gfp) 376 { 377 int i; 378 379 for (i = 0; i < nsgs; i++) { 380 struct page *page; 381 void *data; 382 int offset; 383 384 data = page_frag_alloc(&q->rx_page, q->buf_size, gfp); 385 if (!data) 386 break; 387 388 page = virt_to_head_page(data); 389 offset = data - page_address(page); 390 sg_set_page(&urb->sg[i], page, q->buf_size, offset); 391 } 392 393 if (i < nsgs) { 394 int j; 395 396 for (j = nsgs; j < urb->num_sgs; j++) 397 skb_free_frag(sg_virt(&urb->sg[j])); 398 urb->num_sgs = i; 399 } 400 401 urb->num_sgs = max_t(int, i, urb->num_sgs); 402 urb->transfer_buffer_length = urb->num_sgs * q->buf_size; 403 sg_init_marker(urb->sg, urb->num_sgs); 404 405 return i ? : -ENOMEM; 406 } 407 408 static int 409 mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q, 410 struct urb *urb, int nsgs, gfp_t gfp) 411 { 412 enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN]; 413 414 if (qid == MT_RXQ_MAIN && dev->usb.sg_en) 415 return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp); 416 417 urb->transfer_buffer_length = q->buf_size; 418 urb->transfer_buffer = page_frag_alloc(&q->rx_page, q->buf_size, gfp); 419 420 return urb->transfer_buffer ? 0 : -ENOMEM; 421 } 422 423 static int 424 mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e, 425 int sg_max_size) 426 { 427 unsigned int size = sizeof(struct urb); 428 429 if (dev->usb.sg_en) 430 size += sg_max_size * sizeof(struct scatterlist); 431 432 e->urb = kzalloc(size, GFP_KERNEL); 433 if (!e->urb) 434 return -ENOMEM; 435 436 usb_init_urb(e->urb); 437 438 if (dev->usb.sg_en && sg_max_size > 0) 439 e->urb->sg = (struct scatterlist *)(e->urb + 1); 440 441 return 0; 442 } 443 444 static int 445 mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q, 446 struct mt76_queue_entry *e) 447 { 448 enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN]; 449 int err, sg_size; 450 451 sg_size = qid == MT_RXQ_MAIN ? MT_RX_SG_MAX_SIZE : 0; 452 err = mt76u_urb_alloc(dev, e, sg_size); 453 if (err) 454 return err; 455 456 return mt76u_refill_rx(dev, q, e->urb, sg_size, GFP_KERNEL); 457 } 458 459 static void mt76u_urb_free(struct urb *urb) 460 { 461 int i; 462 463 for (i = 0; i < urb->num_sgs; i++) 464 skb_free_frag(sg_virt(&urb->sg[i])); 465 466 if (urb->transfer_buffer) 467 skb_free_frag(urb->transfer_buffer); 468 469 usb_free_urb(urb); 470 } 471 472 static void 473 mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index, 474 struct urb *urb, usb_complete_t complete_fn, 475 void *context) 476 { 477 struct usb_interface *uintf = to_usb_interface(dev->dev); 478 struct usb_device *udev = interface_to_usbdev(uintf); 479 unsigned int pipe; 480 481 if (dir == USB_DIR_IN) 482 pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]); 483 else 484 pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]); 485 486 urb->dev = udev; 487 urb->pipe = pipe; 488 urb->complete = complete_fn; 489 urb->context = context; 490 } 491 492 static struct urb * 493 mt76u_get_next_rx_entry(struct mt76_queue *q) 494 { 495 struct urb *urb = NULL; 496 unsigned long flags; 497 498 spin_lock_irqsave(&q->lock, flags); 499 if (q->queued > 0) { 500 urb = q->entry[q->head].urb; 501 q->head = (q->head + 1) % q->ndesc; 502 q->queued--; 503 } 504 spin_unlock_irqrestore(&q->lock, flags); 505 506 return urb; 507 } 508 509 static int 510 mt76u_get_rx_entry_len(struct mt76_dev *dev, u8 *data, 511 u32 data_len) 512 { 513 u16 dma_len, min_len; 514 515 dma_len = get_unaligned_le16(data); 516 if (dev->drv->drv_flags & MT_DRV_RX_DMA_HDR) 517 return dma_len; 518 519 min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN + MT_FCE_INFO_LEN; 520 if (data_len < min_len || !dma_len || 521 dma_len + MT_DMA_HDR_LEN > data_len || 522 (dma_len & 0x3)) 523 return -EINVAL; 524 return dma_len; 525 } 526 527 static struct sk_buff * 528 mt76u_build_rx_skb(struct mt76_dev *dev, void *data, 529 int len, int buf_size) 530 { 531 int head_room, drv_flags = dev->drv->drv_flags; 532 struct sk_buff *skb; 533 534 head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN; 535 if (SKB_WITH_OVERHEAD(buf_size) < head_room + len) { 536 struct page *page; 537 538 /* slow path, not enough space for data and 539 * skb_shared_info 540 */ 541 skb = alloc_skb(MT_SKB_HEAD_LEN, GFP_ATOMIC); 542 if (!skb) 543 return NULL; 544 545 skb_put_data(skb, data + head_room, MT_SKB_HEAD_LEN); 546 data += head_room + MT_SKB_HEAD_LEN; 547 page = virt_to_head_page(data); 548 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 549 page, data - page_address(page), 550 len - MT_SKB_HEAD_LEN, buf_size); 551 552 return skb; 553 } 554 555 /* fast path */ 556 skb = build_skb(data, buf_size); 557 if (!skb) 558 return NULL; 559 560 skb_reserve(skb, head_room); 561 __skb_put(skb, len); 562 563 return skb; 564 } 565 566 static int 567 mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb, 568 int buf_size) 569 { 570 u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer; 571 int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length; 572 int len, nsgs = 1, head_room, drv_flags = dev->drv->drv_flags; 573 struct sk_buff *skb; 574 575 if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state)) 576 return 0; 577 578 len = mt76u_get_rx_entry_len(dev, data, urb->actual_length); 579 if (len < 0) 580 return 0; 581 582 head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN; 583 data_len = min_t(int, len, data_len - head_room); 584 skb = mt76u_build_rx_skb(dev, data, data_len, buf_size); 585 if (!skb) 586 return 0; 587 588 len -= data_len; 589 while (len > 0 && nsgs < urb->num_sgs) { 590 data_len = min_t(int, len, urb->sg[nsgs].length); 591 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 592 sg_page(&urb->sg[nsgs]), 593 urb->sg[nsgs].offset, data_len, 594 buf_size); 595 len -= data_len; 596 nsgs++; 597 } 598 dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb); 599 600 return nsgs; 601 } 602 603 static void mt76u_complete_rx(struct urb *urb) 604 { 605 struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev); 606 struct mt76_queue *q = urb->context; 607 unsigned long flags; 608 609 trace_rx_urb(dev, urb); 610 611 switch (urb->status) { 612 case -ECONNRESET: 613 case -ESHUTDOWN: 614 case -ENOENT: 615 return; 616 default: 617 dev_err_ratelimited(dev->dev, "rx urb failed: %d\n", 618 urb->status); 619 /* fall through */ 620 case 0: 621 break; 622 } 623 624 spin_lock_irqsave(&q->lock, flags); 625 if (WARN_ONCE(q->entry[q->tail].urb != urb, "rx urb mismatch")) 626 goto out; 627 628 q->tail = (q->tail + 1) % q->ndesc; 629 q->queued++; 630 tasklet_schedule(&dev->usb.rx_tasklet); 631 out: 632 spin_unlock_irqrestore(&q->lock, flags); 633 } 634 635 static int 636 mt76u_submit_rx_buf(struct mt76_dev *dev, enum mt76_rxq_id qid, 637 struct urb *urb) 638 { 639 int ep = qid == MT_RXQ_MAIN ? MT_EP_IN_PKT_RX : MT_EP_IN_CMD_RESP; 640 641 mt76u_fill_bulk_urb(dev, USB_DIR_IN, ep, urb, 642 mt76u_complete_rx, &dev->q_rx[qid]); 643 trace_submit_urb(dev, urb); 644 645 return usb_submit_urb(urb, GFP_ATOMIC); 646 } 647 648 static void 649 mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q) 650 { 651 int qid = q - &dev->q_rx[MT_RXQ_MAIN]; 652 struct urb *urb; 653 int err, count; 654 655 while (true) { 656 urb = mt76u_get_next_rx_entry(q); 657 if (!urb) 658 break; 659 660 count = mt76u_process_rx_entry(dev, urb, q->buf_size); 661 if (count > 0) { 662 err = mt76u_refill_rx(dev, q, urb, count, GFP_ATOMIC); 663 if (err < 0) 664 break; 665 } 666 mt76u_submit_rx_buf(dev, qid, urb); 667 } 668 if (qid == MT_RXQ_MAIN) 669 mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL); 670 } 671 672 static void mt76u_rx_tasklet(unsigned long data) 673 { 674 struct mt76_dev *dev = (struct mt76_dev *)data; 675 struct mt76_queue *q; 676 int i; 677 678 rcu_read_lock(); 679 for (i = 0; i < __MT_RXQ_MAX; i++) { 680 q = &dev->q_rx[i]; 681 if (!q->ndesc) 682 continue; 683 684 mt76u_process_rx_queue(dev, q); 685 } 686 rcu_read_unlock(); 687 } 688 689 static int 690 mt76u_submit_rx_buffers(struct mt76_dev *dev, enum mt76_rxq_id qid) 691 { 692 struct mt76_queue *q = &dev->q_rx[qid]; 693 unsigned long flags; 694 int i, err = 0; 695 696 spin_lock_irqsave(&q->lock, flags); 697 for (i = 0; i < q->ndesc; i++) { 698 err = mt76u_submit_rx_buf(dev, qid, q->entry[i].urb); 699 if (err < 0) 700 break; 701 } 702 q->head = q->tail = 0; 703 q->queued = 0; 704 spin_unlock_irqrestore(&q->lock, flags); 705 706 return err; 707 } 708 709 static int 710 mt76u_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid) 711 { 712 struct mt76_queue *q = &dev->q_rx[qid]; 713 int i, err; 714 715 spin_lock_init(&q->lock); 716 q->entry = devm_kcalloc(dev->dev, 717 MT_NUM_RX_ENTRIES, sizeof(*q->entry), 718 GFP_KERNEL); 719 if (!q->entry) 720 return -ENOMEM; 721 722 q->ndesc = MT_NUM_RX_ENTRIES; 723 q->buf_size = PAGE_SIZE; 724 725 for (i = 0; i < q->ndesc; i++) { 726 err = mt76u_rx_urb_alloc(dev, q, &q->entry[i]); 727 if (err < 0) 728 return err; 729 } 730 731 return mt76u_submit_rx_buffers(dev, qid); 732 } 733 734 int mt76u_alloc_mcu_queue(struct mt76_dev *dev) 735 { 736 return mt76u_alloc_rx_queue(dev, MT_RXQ_MCU); 737 } 738 EXPORT_SYMBOL_GPL(mt76u_alloc_mcu_queue); 739 740 static void 741 mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q) 742 { 743 struct page *page; 744 int i; 745 746 for (i = 0; i < q->ndesc; i++) 747 mt76u_urb_free(q->entry[i].urb); 748 749 if (!q->rx_page.va) 750 return; 751 752 page = virt_to_page(q->rx_page.va); 753 __page_frag_cache_drain(page, q->rx_page.pagecnt_bias); 754 memset(&q->rx_page, 0, sizeof(q->rx_page)); 755 } 756 757 static void mt76u_free_rx(struct mt76_dev *dev) 758 { 759 struct mt76_queue *q; 760 int i; 761 762 for (i = 0; i < __MT_RXQ_MAX; i++) { 763 q = &dev->q_rx[i]; 764 if (!q->ndesc) 765 continue; 766 767 mt76u_free_rx_queue(dev, q); 768 } 769 } 770 771 void mt76u_stop_rx(struct mt76_dev *dev) 772 { 773 struct mt76_queue *q; 774 int i, j; 775 776 for (i = 0; i < __MT_RXQ_MAX; i++) { 777 q = &dev->q_rx[i]; 778 if (!q->ndesc) 779 continue; 780 781 for (j = 0; j < q->ndesc; j++) 782 usb_poison_urb(q->entry[j].urb); 783 } 784 785 tasklet_kill(&dev->usb.rx_tasklet); 786 } 787 EXPORT_SYMBOL_GPL(mt76u_stop_rx); 788 789 int mt76u_resume_rx(struct mt76_dev *dev) 790 { 791 struct mt76_queue *q; 792 int i, j, err; 793 794 for (i = 0; i < __MT_RXQ_MAX; i++) { 795 q = &dev->q_rx[i]; 796 797 if (!q->ndesc) 798 continue; 799 800 for (j = 0; j < q->ndesc; j++) 801 usb_unpoison_urb(q->entry[j].urb); 802 803 err = mt76u_submit_rx_buffers(dev, i); 804 if (err < 0) 805 return err; 806 } 807 808 return 0; 809 } 810 EXPORT_SYMBOL_GPL(mt76u_resume_rx); 811 812 static void mt76u_tx_tasklet(unsigned long data) 813 { 814 struct mt76_dev *dev = (struct mt76_dev *)data; 815 struct mt76_queue_entry entry; 816 struct mt76_sw_queue *sq; 817 struct mt76_queue *q; 818 bool wake; 819 int i; 820 821 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 822 u32 n_dequeued = 0, n_sw_dequeued = 0; 823 824 sq = &dev->q_tx[i]; 825 q = sq->q; 826 827 while (q->queued > n_dequeued) { 828 if (!q->entry[q->head].done) 829 break; 830 831 if (q->entry[q->head].schedule) { 832 q->entry[q->head].schedule = false; 833 n_sw_dequeued++; 834 } 835 836 entry = q->entry[q->head]; 837 q->entry[q->head].done = false; 838 q->head = (q->head + 1) % q->ndesc; 839 n_dequeued++; 840 841 dev->drv->tx_complete_skb(dev, i, &entry); 842 } 843 844 spin_lock_bh(&q->lock); 845 846 sq->swq_queued -= n_sw_dequeued; 847 q->queued -= n_dequeued; 848 849 wake = q->stopped && q->queued < q->ndesc - 8; 850 if (wake) 851 q->stopped = false; 852 853 if (!q->queued) 854 wake_up(&dev->tx_wait); 855 856 spin_unlock_bh(&q->lock); 857 858 mt76_txq_schedule(&dev->phy, i); 859 860 if (dev->drv->tx_status_data && 861 !test_and_set_bit(MT76_READING_STATS, &dev->phy.state)) 862 queue_work(dev->usb.wq, &dev->usb.stat_work); 863 if (wake) 864 ieee80211_wake_queue(dev->hw, i); 865 } 866 } 867 868 static void mt76u_tx_status_data(struct work_struct *work) 869 { 870 struct mt76_usb *usb; 871 struct mt76_dev *dev; 872 u8 update = 1; 873 u16 count = 0; 874 875 usb = container_of(work, struct mt76_usb, stat_work); 876 dev = container_of(usb, struct mt76_dev, usb); 877 878 while (true) { 879 if (test_bit(MT76_REMOVED, &dev->phy.state)) 880 break; 881 882 if (!dev->drv->tx_status_data(dev, &update)) 883 break; 884 count++; 885 } 886 887 if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state)) 888 queue_work(usb->wq, &usb->stat_work); 889 else 890 clear_bit(MT76_READING_STATS, &dev->phy.state); 891 } 892 893 static void mt76u_complete_tx(struct urb *urb) 894 { 895 struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev); 896 struct mt76_queue_entry *e = urb->context; 897 898 if (mt76u_urb_error(urb)) 899 dev_err(dev->dev, "tx urb failed: %d\n", urb->status); 900 e->done = true; 901 902 tasklet_schedule(&dev->tx_tasklet); 903 } 904 905 static int 906 mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb, 907 struct urb *urb) 908 { 909 urb->transfer_buffer_length = skb->len; 910 911 if (!dev->usb.sg_en) { 912 urb->transfer_buffer = skb->data; 913 return 0; 914 } 915 916 sg_init_table(urb->sg, MT_TX_SG_MAX_SIZE); 917 urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len); 918 if (!urb->num_sgs) 919 return -ENOMEM; 920 921 return urb->num_sgs; 922 } 923 924 int mt76u_skb_dma_info(struct sk_buff *skb, u32 info) 925 { 926 struct sk_buff *iter, *last = skb; 927 u32 pad; 928 929 put_unaligned_le32(info, skb_push(skb, sizeof(info))); 930 /* Add zero pad of 4 - 7 bytes */ 931 pad = round_up(skb->len, 4) + 4 - skb->len; 932 933 /* First packet of a A-MSDU burst keeps track of the whole burst 934 * length, need to update length of it and the last packet. 935 */ 936 skb_walk_frags(skb, iter) { 937 last = iter; 938 if (!iter->next) { 939 skb->data_len += pad; 940 skb->len += pad; 941 break; 942 } 943 } 944 945 if (skb_pad(last, pad)) 946 return -ENOMEM; 947 __skb_put(last, pad); 948 949 return 0; 950 } 951 EXPORT_SYMBOL_GPL(mt76u_skb_dma_info); 952 953 static int 954 mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid, 955 struct sk_buff *skb, struct mt76_wcid *wcid, 956 struct ieee80211_sta *sta) 957 { 958 struct mt76_queue *q = dev->q_tx[qid].q; 959 struct mt76_tx_info tx_info = { 960 .skb = skb, 961 }; 962 u16 idx = q->tail; 963 int err; 964 965 if (q->queued == q->ndesc) 966 return -ENOSPC; 967 968 skb->prev = skb->next = NULL; 969 err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info); 970 if (err < 0) 971 return err; 972 973 err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb); 974 if (err < 0) 975 return err; 976 977 mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx), 978 q->entry[idx].urb, mt76u_complete_tx, 979 &q->entry[idx]); 980 981 q->tail = (q->tail + 1) % q->ndesc; 982 q->entry[idx].skb = tx_info.skb; 983 q->queued++; 984 985 return idx; 986 } 987 988 static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q) 989 { 990 struct urb *urb; 991 int err; 992 993 while (q->first != q->tail) { 994 urb = q->entry[q->first].urb; 995 996 trace_submit_urb(dev, urb); 997 err = usb_submit_urb(urb, GFP_ATOMIC); 998 if (err < 0) { 999 if (err == -ENODEV) 1000 set_bit(MT76_REMOVED, &dev->phy.state); 1001 else 1002 dev_err(dev->dev, "tx urb submit failed:%d\n", 1003 err); 1004 break; 1005 } 1006 q->first = (q->first + 1) % q->ndesc; 1007 } 1008 } 1009 1010 static u8 mt76u_ac_to_hwq(struct mt76_dev *dev, u8 ac) 1011 { 1012 if (mt76_chip(dev) == 0x7663) { 1013 static const u8 wmm_queue_map[] = { 1014 [IEEE80211_AC_VO] = 0, 1015 [IEEE80211_AC_VI] = 1, 1016 [IEEE80211_AC_BE] = 2, 1017 [IEEE80211_AC_BK] = 4, 1018 }; 1019 1020 if (WARN_ON(ac >= ARRAY_SIZE(wmm_queue_map))) 1021 return 2; /* BE */ 1022 1023 return wmm_queue_map[ac]; 1024 } 1025 1026 return mt76_ac_to_hwq(ac); 1027 } 1028 1029 static int mt76u_alloc_tx(struct mt76_dev *dev) 1030 { 1031 struct mt76_queue *q; 1032 int i, j, err; 1033 1034 for (i = 0; i <= MT_TXQ_PSD; i++) { 1035 INIT_LIST_HEAD(&dev->q_tx[i].swq); 1036 1037 if (i >= IEEE80211_NUM_ACS) { 1038 dev->q_tx[i].q = dev->q_tx[0].q; 1039 continue; 1040 } 1041 1042 q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL); 1043 if (!q) 1044 return -ENOMEM; 1045 1046 spin_lock_init(&q->lock); 1047 q->hw_idx = mt76u_ac_to_hwq(dev, i); 1048 dev->q_tx[i].q = q; 1049 1050 q->entry = devm_kcalloc(dev->dev, 1051 MT_NUM_TX_ENTRIES, sizeof(*q->entry), 1052 GFP_KERNEL); 1053 if (!q->entry) 1054 return -ENOMEM; 1055 1056 q->ndesc = MT_NUM_TX_ENTRIES; 1057 for (j = 0; j < q->ndesc; j++) { 1058 err = mt76u_urb_alloc(dev, &q->entry[j], 1059 MT_TX_SG_MAX_SIZE); 1060 if (err < 0) 1061 return err; 1062 } 1063 } 1064 return 0; 1065 } 1066 1067 static void mt76u_free_tx(struct mt76_dev *dev) 1068 { 1069 struct mt76_queue *q; 1070 int i, j; 1071 1072 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 1073 q = dev->q_tx[i].q; 1074 for (j = 0; j < q->ndesc; j++) 1075 usb_free_urb(q->entry[j].urb); 1076 } 1077 } 1078 1079 void mt76u_stop_tx(struct mt76_dev *dev) 1080 { 1081 struct mt76_queue_entry entry; 1082 struct mt76_queue *q; 1083 int i, j, ret; 1084 1085 ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(&dev->phy), 1086 HZ / 5); 1087 if (!ret) { 1088 dev_err(dev->dev, "timed out waiting for pending tx\n"); 1089 1090 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 1091 q = dev->q_tx[i].q; 1092 for (j = 0; j < q->ndesc; j++) 1093 usb_kill_urb(q->entry[j].urb); 1094 } 1095 1096 tasklet_kill(&dev->tx_tasklet); 1097 1098 /* On device removal we maight queue skb's, but mt76u_tx_kick() 1099 * will fail to submit urb, cleanup those skb's manually. 1100 */ 1101 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 1102 q = dev->q_tx[i].q; 1103 1104 /* Assure we are in sync with killed tasklet. */ 1105 spin_lock_bh(&q->lock); 1106 while (q->queued) { 1107 entry = q->entry[q->head]; 1108 q->head = (q->head + 1) % q->ndesc; 1109 q->queued--; 1110 1111 dev->drv->tx_complete_skb(dev, i, &entry); 1112 } 1113 spin_unlock_bh(&q->lock); 1114 } 1115 } 1116 1117 cancel_work_sync(&dev->usb.stat_work); 1118 clear_bit(MT76_READING_STATS, &dev->phy.state); 1119 1120 mt76_tx_status_check(dev, NULL, true); 1121 } 1122 EXPORT_SYMBOL_GPL(mt76u_stop_tx); 1123 1124 void mt76u_queues_deinit(struct mt76_dev *dev) 1125 { 1126 mt76u_stop_rx(dev); 1127 mt76u_stop_tx(dev); 1128 1129 mt76u_free_rx(dev); 1130 mt76u_free_tx(dev); 1131 } 1132 EXPORT_SYMBOL_GPL(mt76u_queues_deinit); 1133 1134 int mt76u_alloc_queues(struct mt76_dev *dev) 1135 { 1136 int err; 1137 1138 err = mt76u_alloc_rx_queue(dev, MT_RXQ_MAIN); 1139 if (err < 0) 1140 return err; 1141 1142 return mt76u_alloc_tx(dev); 1143 } 1144 EXPORT_SYMBOL_GPL(mt76u_alloc_queues); 1145 1146 static const struct mt76_queue_ops usb_queue_ops = { 1147 .tx_queue_skb = mt76u_tx_queue_skb, 1148 .kick = mt76u_tx_kick, 1149 }; 1150 1151 void mt76u_deinit(struct mt76_dev *dev) 1152 { 1153 if (dev->usb.wq) { 1154 destroy_workqueue(dev->usb.wq); 1155 dev->usb.wq = NULL; 1156 } 1157 } 1158 EXPORT_SYMBOL_GPL(mt76u_deinit); 1159 1160 int mt76u_init(struct mt76_dev *dev, 1161 struct usb_interface *intf, bool ext) 1162 { 1163 static struct mt76_bus_ops mt76u_ops = { 1164 .read_copy = mt76u_read_copy_ext, 1165 .wr_rp = mt76u_wr_rp, 1166 .rd_rp = mt76u_rd_rp, 1167 .type = MT76_BUS_USB, 1168 }; 1169 struct usb_device *udev = interface_to_usbdev(intf); 1170 struct mt76_usb *usb = &dev->usb; 1171 int err = -ENOMEM; 1172 1173 mt76u_ops.rr = ext ? mt76u_rr_ext : mt76u_rr; 1174 mt76u_ops.wr = ext ? mt76u_wr_ext : mt76u_wr; 1175 mt76u_ops.rmw = ext ? mt76u_rmw_ext : mt76u_rmw; 1176 mt76u_ops.write_copy = ext ? mt76u_copy_ext : mt76u_copy; 1177 1178 tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev); 1179 tasklet_init(&dev->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev); 1180 INIT_WORK(&usb->stat_work, mt76u_tx_status_data); 1181 1182 usb->wq = alloc_workqueue("mt76u", WQ_UNBOUND, 0); 1183 if (!usb->wq) 1184 return -ENOMEM; 1185 1186 usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0), 1); 1187 if (usb->data_len < 32) 1188 usb->data_len = 32; 1189 1190 usb->data = devm_kmalloc(dev->dev, usb->data_len, GFP_KERNEL); 1191 if (!usb->data) 1192 goto error; 1193 1194 mutex_init(&usb->usb_ctrl_mtx); 1195 dev->bus = &mt76u_ops; 1196 dev->queue_ops = &usb_queue_ops; 1197 1198 dev_set_drvdata(&udev->dev, dev); 1199 1200 usb->sg_en = mt76u_check_sg(dev); 1201 1202 err = mt76u_set_endpoints(intf, usb); 1203 if (err < 0) 1204 goto error; 1205 1206 return 0; 1207 1208 error: 1209 mt76u_deinit(dev); 1210 return err; 1211 } 1212 EXPORT_SYMBOL_GPL(mt76u_init); 1213 1214 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>"); 1215 MODULE_LICENSE("Dual BSD/GPL"); 1216