1 /* 2 Copyright (C) 2010 Willow Garage <http://www.willowgarage.com> 3 Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com> 4 <http://rt2x00.serialmonkey.com> 5 6 This program is free software; you can redistribute it and/or modify 7 it under the terms of the GNU General Public License as published by 8 the Free Software Foundation; either version 2 of the License, or 9 (at your option) any later version. 10 11 This program is distributed in the hope that it will be useful, 12 but WITHOUT ANY WARRANTY; without even the implied warranty of 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 GNU General Public License for more details. 15 16 You should have received a copy of the GNU General Public License 17 along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 /* 21 Module: rt2x00usb 22 Abstract: rt2x00 generic usb device routines. 23 */ 24 25 #include <linux/kernel.h> 26 #include <linux/module.h> 27 #include <linux/slab.h> 28 #include <linux/usb.h> 29 #include <linux/bug.h> 30 31 #include "rt2x00.h" 32 #include "rt2x00usb.h" 33 34 static bool rt2x00usb_check_usb_error(struct rt2x00_dev *rt2x00dev, int status) 35 { 36 if (status == -ENODEV || status == -ENOENT) 37 return true; 38 39 if (status == -EPROTO || status == -ETIMEDOUT) 40 rt2x00dev->num_proto_errs++; 41 else 42 rt2x00dev->num_proto_errs = 0; 43 44 if (rt2x00dev->num_proto_errs > 3) 45 return true; 46 47 return false; 48 } 49 50 /* 51 * Interfacing with the HW. 52 */ 53 int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev, 54 const u8 request, const u8 requesttype, 55 const u16 offset, const u16 value, 56 void *buffer, const u16 buffer_length, 57 const int timeout) 58 { 59 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); 60 int status; 61 unsigned int pipe = 62 (requesttype == USB_VENDOR_REQUEST_IN) ? 63 usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0); 64 unsigned long expire = jiffies + msecs_to_jiffies(timeout); 65 66 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) 67 return -ENODEV; 68 69 do { 70 status = usb_control_msg(usb_dev, pipe, request, requesttype, 71 value, offset, buffer, buffer_length, 72 timeout / 2); 73 if (status >= 0) 74 return 0; 75 76 if (rt2x00usb_check_usb_error(rt2x00dev, status)) { 77 /* Device has disappeared. */ 78 clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); 79 break; 80 } 81 } while (time_before(jiffies, expire)); 82 83 rt2x00_err(rt2x00dev, 84 "Vendor Request 0x%02x failed for offset 0x%04x with error %d\n", 85 request, offset, status); 86 87 return status; 88 } 89 EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request); 90 91 int rt2x00usb_vendor_req_buff_lock(struct rt2x00_dev *rt2x00dev, 92 const u8 request, const u8 requesttype, 93 const u16 offset, void *buffer, 94 const u16 buffer_length, const int timeout) 95 { 96 int status; 97 98 BUG_ON(!mutex_is_locked(&rt2x00dev->csr_mutex)); 99 100 /* 101 * Check for Cache availability. 102 */ 103 if (unlikely(!rt2x00dev->csr.cache || buffer_length > CSR_CACHE_SIZE)) { 104 rt2x00_err(rt2x00dev, "CSR cache not available\n"); 105 return -ENOMEM; 106 } 107 108 if (requesttype == USB_VENDOR_REQUEST_OUT) 109 memcpy(rt2x00dev->csr.cache, buffer, buffer_length); 110 111 status = rt2x00usb_vendor_request(rt2x00dev, request, requesttype, 112 offset, 0, rt2x00dev->csr.cache, 113 buffer_length, timeout); 114 115 if (!status && requesttype == USB_VENDOR_REQUEST_IN) 116 memcpy(buffer, rt2x00dev->csr.cache, buffer_length); 117 118 return status; 119 } 120 EXPORT_SYMBOL_GPL(rt2x00usb_vendor_req_buff_lock); 121 122 int rt2x00usb_vendor_request_buff(struct rt2x00_dev *rt2x00dev, 123 const u8 request, const u8 requesttype, 124 const u16 offset, void *buffer, 125 const u16 buffer_length) 126 { 127 int status = 0; 128 unsigned char *tb; 129 u16 off, len, bsize; 130 131 mutex_lock(&rt2x00dev->csr_mutex); 132 133 tb = (char *)buffer; 134 off = offset; 135 len = buffer_length; 136 while (len && !status) { 137 bsize = min_t(u16, CSR_CACHE_SIZE, len); 138 status = rt2x00usb_vendor_req_buff_lock(rt2x00dev, request, 139 requesttype, off, tb, 140 bsize, REGISTER_TIMEOUT); 141 142 tb += bsize; 143 len -= bsize; 144 off += bsize; 145 } 146 147 mutex_unlock(&rt2x00dev->csr_mutex); 148 149 return status; 150 } 151 EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_buff); 152 153 int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev, 154 const unsigned int offset, 155 const struct rt2x00_field32 field, 156 u32 *reg) 157 { 158 unsigned int i; 159 160 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) 161 return -ENODEV; 162 163 for (i = 0; i < REGISTER_USB_BUSY_COUNT; i++) { 164 *reg = rt2x00usb_register_read_lock(rt2x00dev, offset); 165 if (!rt2x00_get_field32(*reg, field)) 166 return 1; 167 udelay(REGISTER_BUSY_DELAY); 168 } 169 170 rt2x00_err(rt2x00dev, "Indirect register access failed: offset=0x%.08x, value=0x%.08x\n", 171 offset, *reg); 172 *reg = ~0; 173 174 return 0; 175 } 176 EXPORT_SYMBOL_GPL(rt2x00usb_regbusy_read); 177 178 179 struct rt2x00_async_read_data { 180 __le32 reg; 181 struct usb_ctrlrequest cr; 182 struct rt2x00_dev *rt2x00dev; 183 bool (*callback)(struct rt2x00_dev *, int, u32); 184 }; 185 186 static void rt2x00usb_register_read_async_cb(struct urb *urb) 187 { 188 struct rt2x00_async_read_data *rd = urb->context; 189 if (rd->callback(rd->rt2x00dev, urb->status, le32_to_cpu(rd->reg))) { 190 usb_anchor_urb(urb, rd->rt2x00dev->anchor); 191 if (usb_submit_urb(urb, GFP_ATOMIC) < 0) { 192 usb_unanchor_urb(urb); 193 kfree(rd); 194 } 195 } else 196 kfree(rd); 197 } 198 199 void rt2x00usb_register_read_async(struct rt2x00_dev *rt2x00dev, 200 const unsigned int offset, 201 bool (*callback)(struct rt2x00_dev*, int, u32)) 202 { 203 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); 204 struct urb *urb; 205 struct rt2x00_async_read_data *rd; 206 207 rd = kmalloc(sizeof(*rd), GFP_ATOMIC); 208 if (!rd) 209 return; 210 211 urb = usb_alloc_urb(0, GFP_ATOMIC); 212 if (!urb) { 213 kfree(rd); 214 return; 215 } 216 217 rd->rt2x00dev = rt2x00dev; 218 rd->callback = callback; 219 rd->cr.bRequestType = USB_VENDOR_REQUEST_IN; 220 rd->cr.bRequest = USB_MULTI_READ; 221 rd->cr.wValue = 0; 222 rd->cr.wIndex = cpu_to_le16(offset); 223 rd->cr.wLength = cpu_to_le16(sizeof(u32)); 224 225 usb_fill_control_urb(urb, usb_dev, usb_rcvctrlpipe(usb_dev, 0), 226 (unsigned char *)(&rd->cr), &rd->reg, sizeof(rd->reg), 227 rt2x00usb_register_read_async_cb, rd); 228 usb_anchor_urb(urb, rt2x00dev->anchor); 229 if (usb_submit_urb(urb, GFP_ATOMIC) < 0) { 230 usb_unanchor_urb(urb); 231 kfree(rd); 232 } 233 usb_free_urb(urb); 234 } 235 EXPORT_SYMBOL_GPL(rt2x00usb_register_read_async); 236 237 /* 238 * TX data handlers. 239 */ 240 static void rt2x00usb_work_txdone_entry(struct queue_entry *entry) 241 { 242 /* 243 * If the transfer to hardware succeeded, it does not mean the 244 * frame was send out correctly. It only means the frame 245 * was successfully pushed to the hardware, we have no 246 * way to determine the transmission status right now. 247 * (Only indirectly by looking at the failed TX counters 248 * in the register). 249 */ 250 if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) 251 rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE); 252 else 253 rt2x00lib_txdone_noinfo(entry, TXDONE_UNKNOWN); 254 } 255 256 static void rt2x00usb_work_txdone(struct work_struct *work) 257 { 258 struct rt2x00_dev *rt2x00dev = 259 container_of(work, struct rt2x00_dev, txdone_work); 260 struct data_queue *queue; 261 struct queue_entry *entry; 262 263 tx_queue_for_each(rt2x00dev, queue) { 264 while (!rt2x00queue_empty(queue)) { 265 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); 266 267 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) || 268 !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) 269 break; 270 271 rt2x00usb_work_txdone_entry(entry); 272 } 273 } 274 } 275 276 static void rt2x00usb_interrupt_txdone(struct urb *urb) 277 { 278 struct queue_entry *entry = (struct queue_entry *)urb->context; 279 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 280 281 if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) 282 return; 283 /* 284 * Check if the frame was correctly uploaded 285 */ 286 if (urb->status) 287 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); 288 /* 289 * Report the frame as DMA done 290 */ 291 rt2x00lib_dmadone(entry); 292 293 if (rt2x00dev->ops->lib->tx_dma_done) 294 rt2x00dev->ops->lib->tx_dma_done(entry); 295 /* 296 * Schedule the delayed work for reading the TX status 297 * from the device. 298 */ 299 if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_TXSTATUS_FIFO) || 300 !kfifo_is_empty(&rt2x00dev->txstatus_fifo)) 301 queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work); 302 } 303 304 static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data) 305 { 306 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 307 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); 308 struct queue_entry_priv_usb *entry_priv = entry->priv_data; 309 u32 length; 310 int status; 311 312 if (!test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags) || 313 test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) 314 return false; 315 316 /* 317 * USB devices require certain padding at the end of each frame 318 * and urb. Those paddings are not included in skbs. Pass entry 319 * to the driver to determine what the overall length should be. 320 */ 321 length = rt2x00dev->ops->lib->get_tx_data_len(entry); 322 323 status = skb_padto(entry->skb, length); 324 if (unlikely(status)) { 325 /* TODO: report something more appropriate than IO_FAILED. */ 326 rt2x00_warn(rt2x00dev, "TX SKB padding error, out of memory\n"); 327 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); 328 rt2x00lib_dmadone(entry); 329 330 return false; 331 } 332 333 usb_fill_bulk_urb(entry_priv->urb, usb_dev, 334 usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint), 335 entry->skb->data, length, 336 rt2x00usb_interrupt_txdone, entry); 337 338 status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC); 339 if (status) { 340 if (rt2x00usb_check_usb_error(rt2x00dev, status)) 341 clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); 342 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); 343 rt2x00lib_dmadone(entry); 344 } 345 346 return false; 347 } 348 349 /* 350 * RX data handlers. 351 */ 352 static void rt2x00usb_work_rxdone(struct work_struct *work) 353 { 354 struct rt2x00_dev *rt2x00dev = 355 container_of(work, struct rt2x00_dev, rxdone_work); 356 struct queue_entry *entry; 357 struct skb_frame_desc *skbdesc; 358 u8 rxd[32]; 359 360 while (!rt2x00queue_empty(rt2x00dev->rx)) { 361 entry = rt2x00queue_get_entry(rt2x00dev->rx, Q_INDEX_DONE); 362 363 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) || 364 !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) 365 break; 366 367 /* 368 * Fill in desc fields of the skb descriptor 369 */ 370 skbdesc = get_skb_frame_desc(entry->skb); 371 skbdesc->desc = rxd; 372 skbdesc->desc_len = entry->queue->desc_size; 373 374 /* 375 * Send the frame to rt2x00lib for further processing. 376 */ 377 rt2x00lib_rxdone(entry, GFP_KERNEL); 378 } 379 } 380 381 static void rt2x00usb_interrupt_rxdone(struct urb *urb) 382 { 383 struct queue_entry *entry = (struct queue_entry *)urb->context; 384 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 385 386 if (!test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) 387 return; 388 389 /* 390 * Report the frame as DMA done 391 */ 392 rt2x00lib_dmadone(entry); 393 394 /* 395 * Check if the received data is simply too small 396 * to be actually valid, or if the urb is signaling 397 * a problem. 398 */ 399 if (urb->actual_length < entry->queue->desc_size || urb->status) 400 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); 401 402 /* 403 * Schedule the delayed work for reading the RX status 404 * from the device. 405 */ 406 queue_work(rt2x00dev->workqueue, &rt2x00dev->rxdone_work); 407 } 408 409 static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data) 410 { 411 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 412 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); 413 struct queue_entry_priv_usb *entry_priv = entry->priv_data; 414 int status; 415 416 if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) || 417 test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) 418 return false; 419 420 rt2x00lib_dmastart(entry); 421 422 usb_fill_bulk_urb(entry_priv->urb, usb_dev, 423 usb_rcvbulkpipe(usb_dev, entry->queue->usb_endpoint), 424 entry->skb->data, entry->skb->len, 425 rt2x00usb_interrupt_rxdone, entry); 426 427 status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC); 428 if (status) { 429 if (rt2x00usb_check_usb_error(rt2x00dev, status)) 430 clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); 431 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); 432 rt2x00lib_dmadone(entry); 433 } 434 435 return false; 436 } 437 438 void rt2x00usb_kick_queue(struct data_queue *queue) 439 { 440 switch (queue->qid) { 441 case QID_AC_VO: 442 case QID_AC_VI: 443 case QID_AC_BE: 444 case QID_AC_BK: 445 if (!rt2x00queue_empty(queue)) 446 rt2x00queue_for_each_entry(queue, 447 Q_INDEX_DONE, 448 Q_INDEX, 449 NULL, 450 rt2x00usb_kick_tx_entry); 451 break; 452 case QID_RX: 453 if (!rt2x00queue_full(queue)) 454 rt2x00queue_for_each_entry(queue, 455 Q_INDEX, 456 Q_INDEX_DONE, 457 NULL, 458 rt2x00usb_kick_rx_entry); 459 break; 460 default: 461 break; 462 } 463 } 464 EXPORT_SYMBOL_GPL(rt2x00usb_kick_queue); 465 466 static bool rt2x00usb_flush_entry(struct queue_entry *entry, void *data) 467 { 468 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 469 struct queue_entry_priv_usb *entry_priv = entry->priv_data; 470 struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data; 471 472 if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) 473 return false; 474 475 usb_kill_urb(entry_priv->urb); 476 477 /* 478 * Kill guardian urb (if required by driver). 479 */ 480 if ((entry->queue->qid == QID_BEACON) && 481 (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_BEACON_GUARD))) 482 usb_kill_urb(bcn_priv->guardian_urb); 483 484 return false; 485 } 486 487 void rt2x00usb_flush_queue(struct data_queue *queue, bool drop) 488 { 489 struct work_struct *completion; 490 unsigned int i; 491 492 if (drop) 493 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX, NULL, 494 rt2x00usb_flush_entry); 495 496 /* 497 * Obtain the queue completion handler 498 */ 499 switch (queue->qid) { 500 case QID_AC_VO: 501 case QID_AC_VI: 502 case QID_AC_BE: 503 case QID_AC_BK: 504 completion = &queue->rt2x00dev->txdone_work; 505 break; 506 case QID_RX: 507 completion = &queue->rt2x00dev->rxdone_work; 508 break; 509 default: 510 return; 511 } 512 513 for (i = 0; i < 10; i++) { 514 /* 515 * Check if the driver is already done, otherwise we 516 * have to sleep a little while to give the driver/hw 517 * the oppurtunity to complete interrupt process itself. 518 */ 519 if (rt2x00queue_empty(queue)) 520 break; 521 522 /* 523 * Schedule the completion handler manually, when this 524 * worker function runs, it should cleanup the queue. 525 */ 526 queue_work(queue->rt2x00dev->workqueue, completion); 527 528 /* 529 * Wait for a little while to give the driver 530 * the oppurtunity to recover itself. 531 */ 532 msleep(50); 533 } 534 } 535 EXPORT_SYMBOL_GPL(rt2x00usb_flush_queue); 536 537 static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue) 538 { 539 rt2x00_warn(queue->rt2x00dev, "TX queue %d DMA timed out, invoke forced forced reset\n", 540 queue->qid); 541 542 rt2x00queue_stop_queue(queue); 543 rt2x00queue_flush_queue(queue, true); 544 rt2x00queue_start_queue(queue); 545 } 546 547 static int rt2x00usb_dma_timeout(struct data_queue *queue) 548 { 549 struct queue_entry *entry; 550 551 entry = rt2x00queue_get_entry(queue, Q_INDEX_DMA_DONE); 552 return rt2x00queue_dma_timeout(entry); 553 } 554 555 void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev) 556 { 557 struct data_queue *queue; 558 559 tx_queue_for_each(rt2x00dev, queue) { 560 if (!rt2x00queue_empty(queue)) { 561 if (rt2x00usb_dma_timeout(queue)) 562 rt2x00usb_watchdog_tx_dma(queue); 563 } 564 } 565 } 566 EXPORT_SYMBOL_GPL(rt2x00usb_watchdog); 567 568 /* 569 * Radio handlers 570 */ 571 void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev) 572 { 573 rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0, 0, 574 REGISTER_TIMEOUT); 575 } 576 EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio); 577 578 /* 579 * Device initialization handlers. 580 */ 581 void rt2x00usb_clear_entry(struct queue_entry *entry) 582 { 583 entry->flags = 0; 584 585 if (entry->queue->qid == QID_RX) 586 rt2x00usb_kick_rx_entry(entry, NULL); 587 } 588 EXPORT_SYMBOL_GPL(rt2x00usb_clear_entry); 589 590 static void rt2x00usb_assign_endpoint(struct data_queue *queue, 591 struct usb_endpoint_descriptor *ep_desc) 592 { 593 struct usb_device *usb_dev = to_usb_device_intf(queue->rt2x00dev->dev); 594 int pipe; 595 596 queue->usb_endpoint = usb_endpoint_num(ep_desc); 597 598 if (queue->qid == QID_RX) { 599 pipe = usb_rcvbulkpipe(usb_dev, queue->usb_endpoint); 600 queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 0); 601 } else { 602 pipe = usb_sndbulkpipe(usb_dev, queue->usb_endpoint); 603 queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 1); 604 } 605 606 if (!queue->usb_maxpacket) 607 queue->usb_maxpacket = 1; 608 } 609 610 static int rt2x00usb_find_endpoints(struct rt2x00_dev *rt2x00dev) 611 { 612 struct usb_interface *intf = to_usb_interface(rt2x00dev->dev); 613 struct usb_host_interface *intf_desc = intf->cur_altsetting; 614 struct usb_endpoint_descriptor *ep_desc; 615 struct data_queue *queue = rt2x00dev->tx; 616 struct usb_endpoint_descriptor *tx_ep_desc = NULL; 617 unsigned int i; 618 619 /* 620 * Walk through all available endpoints to search for "bulk in" 621 * and "bulk out" endpoints. When we find such endpoints collect 622 * the information we need from the descriptor and assign it 623 * to the queue. 624 */ 625 for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) { 626 ep_desc = &intf_desc->endpoint[i].desc; 627 628 if (usb_endpoint_is_bulk_in(ep_desc)) { 629 rt2x00usb_assign_endpoint(rt2x00dev->rx, ep_desc); 630 } else if (usb_endpoint_is_bulk_out(ep_desc) && 631 (queue != queue_end(rt2x00dev))) { 632 rt2x00usb_assign_endpoint(queue, ep_desc); 633 queue = queue_next(queue); 634 635 tx_ep_desc = ep_desc; 636 } 637 } 638 639 /* 640 * At least 1 endpoint for RX and 1 endpoint for TX must be available. 641 */ 642 if (!rt2x00dev->rx->usb_endpoint || !rt2x00dev->tx->usb_endpoint) { 643 rt2x00_err(rt2x00dev, "Bulk-in/Bulk-out endpoints not found\n"); 644 return -EPIPE; 645 } 646 647 /* 648 * It might be possible not all queues have a dedicated endpoint. 649 * Loop through all TX queues and copy the endpoint information 650 * which we have gathered from already assigned endpoints. 651 */ 652 txall_queue_for_each(rt2x00dev, queue) { 653 if (!queue->usb_endpoint) 654 rt2x00usb_assign_endpoint(queue, tx_ep_desc); 655 } 656 657 return 0; 658 } 659 660 static int rt2x00usb_alloc_entries(struct data_queue *queue) 661 { 662 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; 663 struct queue_entry_priv_usb *entry_priv; 664 struct queue_entry_priv_usb_bcn *bcn_priv; 665 unsigned int i; 666 667 for (i = 0; i < queue->limit; i++) { 668 entry_priv = queue->entries[i].priv_data; 669 entry_priv->urb = usb_alloc_urb(0, GFP_KERNEL); 670 if (!entry_priv->urb) 671 return -ENOMEM; 672 } 673 674 /* 675 * If this is not the beacon queue or 676 * no guardian byte was required for the beacon, 677 * then we are done. 678 */ 679 if (queue->qid != QID_BEACON || 680 !rt2x00_has_cap_flag(rt2x00dev, REQUIRE_BEACON_GUARD)) 681 return 0; 682 683 for (i = 0; i < queue->limit; i++) { 684 bcn_priv = queue->entries[i].priv_data; 685 bcn_priv->guardian_urb = usb_alloc_urb(0, GFP_KERNEL); 686 if (!bcn_priv->guardian_urb) 687 return -ENOMEM; 688 } 689 690 return 0; 691 } 692 693 static void rt2x00usb_free_entries(struct data_queue *queue) 694 { 695 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; 696 struct queue_entry_priv_usb *entry_priv; 697 struct queue_entry_priv_usb_bcn *bcn_priv; 698 unsigned int i; 699 700 if (!queue->entries) 701 return; 702 703 for (i = 0; i < queue->limit; i++) { 704 entry_priv = queue->entries[i].priv_data; 705 usb_kill_urb(entry_priv->urb); 706 usb_free_urb(entry_priv->urb); 707 } 708 709 /* 710 * If this is not the beacon queue or 711 * no guardian byte was required for the beacon, 712 * then we are done. 713 */ 714 if (queue->qid != QID_BEACON || 715 !rt2x00_has_cap_flag(rt2x00dev, REQUIRE_BEACON_GUARD)) 716 return; 717 718 for (i = 0; i < queue->limit; i++) { 719 bcn_priv = queue->entries[i].priv_data; 720 usb_kill_urb(bcn_priv->guardian_urb); 721 usb_free_urb(bcn_priv->guardian_urb); 722 } 723 } 724 725 int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev) 726 { 727 struct data_queue *queue; 728 int status; 729 730 /* 731 * Find endpoints for each queue 732 */ 733 status = rt2x00usb_find_endpoints(rt2x00dev); 734 if (status) 735 goto exit; 736 737 /* 738 * Allocate DMA 739 */ 740 queue_for_each(rt2x00dev, queue) { 741 status = rt2x00usb_alloc_entries(queue); 742 if (status) 743 goto exit; 744 } 745 746 return 0; 747 748 exit: 749 rt2x00usb_uninitialize(rt2x00dev); 750 751 return status; 752 } 753 EXPORT_SYMBOL_GPL(rt2x00usb_initialize); 754 755 void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev) 756 { 757 struct data_queue *queue; 758 759 usb_kill_anchored_urbs(rt2x00dev->anchor); 760 hrtimer_cancel(&rt2x00dev->txstatus_timer); 761 cancel_work_sync(&rt2x00dev->rxdone_work); 762 cancel_work_sync(&rt2x00dev->txdone_work); 763 764 queue_for_each(rt2x00dev, queue) 765 rt2x00usb_free_entries(queue); 766 } 767 EXPORT_SYMBOL_GPL(rt2x00usb_uninitialize); 768 769 /* 770 * USB driver handlers. 771 */ 772 static void rt2x00usb_free_reg(struct rt2x00_dev *rt2x00dev) 773 { 774 kfree(rt2x00dev->rf); 775 rt2x00dev->rf = NULL; 776 777 kfree(rt2x00dev->eeprom); 778 rt2x00dev->eeprom = NULL; 779 780 kfree(rt2x00dev->csr.cache); 781 rt2x00dev->csr.cache = NULL; 782 } 783 784 static int rt2x00usb_alloc_reg(struct rt2x00_dev *rt2x00dev) 785 { 786 rt2x00dev->csr.cache = kzalloc(CSR_CACHE_SIZE, GFP_KERNEL); 787 if (!rt2x00dev->csr.cache) 788 goto exit; 789 790 rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL); 791 if (!rt2x00dev->eeprom) 792 goto exit; 793 794 rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL); 795 if (!rt2x00dev->rf) 796 goto exit; 797 798 return 0; 799 800 exit: 801 rt2x00_probe_err("Failed to allocate registers\n"); 802 803 rt2x00usb_free_reg(rt2x00dev); 804 805 return -ENOMEM; 806 } 807 808 int rt2x00usb_probe(struct usb_interface *usb_intf, 809 const struct rt2x00_ops *ops) 810 { 811 struct usb_device *usb_dev = interface_to_usbdev(usb_intf); 812 struct ieee80211_hw *hw; 813 struct rt2x00_dev *rt2x00dev; 814 int retval; 815 816 usb_dev = usb_get_dev(usb_dev); 817 usb_reset_device(usb_dev); 818 819 hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw); 820 if (!hw) { 821 rt2x00_probe_err("Failed to allocate hardware\n"); 822 retval = -ENOMEM; 823 goto exit_put_device; 824 } 825 826 usb_set_intfdata(usb_intf, hw); 827 828 rt2x00dev = hw->priv; 829 rt2x00dev->dev = &usb_intf->dev; 830 rt2x00dev->ops = ops; 831 rt2x00dev->hw = hw; 832 833 rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_USB); 834 835 INIT_WORK(&rt2x00dev->rxdone_work, rt2x00usb_work_rxdone); 836 INIT_WORK(&rt2x00dev->txdone_work, rt2x00usb_work_txdone); 837 hrtimer_init(&rt2x00dev->txstatus_timer, CLOCK_MONOTONIC, 838 HRTIMER_MODE_REL); 839 840 retval = rt2x00usb_alloc_reg(rt2x00dev); 841 if (retval) 842 goto exit_free_device; 843 844 rt2x00dev->anchor = devm_kmalloc(&usb_dev->dev, 845 sizeof(struct usb_anchor), 846 GFP_KERNEL); 847 if (!rt2x00dev->anchor) { 848 retval = -ENOMEM; 849 goto exit_free_reg; 850 } 851 init_usb_anchor(rt2x00dev->anchor); 852 853 retval = rt2x00lib_probe_dev(rt2x00dev); 854 if (retval) 855 goto exit_free_anchor; 856 857 return 0; 858 859 exit_free_anchor: 860 usb_kill_anchored_urbs(rt2x00dev->anchor); 861 862 exit_free_reg: 863 rt2x00usb_free_reg(rt2x00dev); 864 865 exit_free_device: 866 ieee80211_free_hw(hw); 867 868 exit_put_device: 869 usb_put_dev(usb_dev); 870 871 usb_set_intfdata(usb_intf, NULL); 872 873 return retval; 874 } 875 EXPORT_SYMBOL_GPL(rt2x00usb_probe); 876 877 void rt2x00usb_disconnect(struct usb_interface *usb_intf) 878 { 879 struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); 880 struct rt2x00_dev *rt2x00dev = hw->priv; 881 882 /* 883 * Free all allocated data. 884 */ 885 rt2x00lib_remove_dev(rt2x00dev); 886 rt2x00usb_free_reg(rt2x00dev); 887 ieee80211_free_hw(hw); 888 889 /* 890 * Free the USB device data. 891 */ 892 usb_set_intfdata(usb_intf, NULL); 893 usb_put_dev(interface_to_usbdev(usb_intf)); 894 } 895 EXPORT_SYMBOL_GPL(rt2x00usb_disconnect); 896 897 #ifdef CONFIG_PM 898 int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state) 899 { 900 struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); 901 struct rt2x00_dev *rt2x00dev = hw->priv; 902 903 return rt2x00lib_suspend(rt2x00dev, state); 904 } 905 EXPORT_SYMBOL_GPL(rt2x00usb_suspend); 906 907 int rt2x00usb_resume(struct usb_interface *usb_intf) 908 { 909 struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); 910 struct rt2x00_dev *rt2x00dev = hw->priv; 911 912 return rt2x00lib_resume(rt2x00dev); 913 } 914 EXPORT_SYMBOL_GPL(rt2x00usb_resume); 915 #endif /* CONFIG_PM */ 916 917 /* 918 * rt2x00usb module information. 919 */ 920 MODULE_AUTHOR(DRV_PROJECT); 921 MODULE_VERSION(DRV_VERSION); 922 MODULE_DESCRIPTION("rt2x00 usb library"); 923 MODULE_LICENSE("GPL"); 924