1 /* 2 * USB Network driver infrastructure 3 * Copyright (C) 2000-2005 by David Brownell 4 * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 */ 20 21 /* 22 * This is a generic "USB networking" framework that works with several 23 * kinds of full and high speed networking devices: host-to-host cables, 24 * smart usb peripherals, and actual Ethernet adapters. 25 * 26 * These devices usually differ in terms of control protocols (if they 27 * even have one!) and sometimes they define new framing to wrap or batch 28 * Ethernet packets. Otherwise, they talk to USB pretty much the same, 29 * so interface (un)binding, endpoint I/O queues, fault handling, and other 30 * issues can usefully be addressed by this framework. 31 */ 32 33 // #define DEBUG // error path messages, extra info 34 // #define VERBOSE // more; success messages 35 36 #include <linux/module.h> 37 #include <linux/init.h> 38 #include <linux/netdevice.h> 39 #include <linux/etherdevice.h> 40 #include <linux/ctype.h> 41 #include <linux/ethtool.h> 42 #include <linux/workqueue.h> 43 #include <linux/mii.h> 44 #include <linux/usb.h> 45 #include <linux/usb/usbnet.h> 46 #include <linux/slab.h> 47 #include <linux/kernel.h> 48 #include <linux/pm_runtime.h> 49 50 #define DRIVER_VERSION "22-Aug-2005" 51 52 53 /*-------------------------------------------------------------------------*/ 54 55 /* 56 * Nineteen USB 1.1 max size bulk transactions per frame (ms), max. 57 * Several dozen bytes of IPv4 data can fit in two such transactions. 58 * One maximum size Ethernet packet takes twenty four of them. 59 * For high speed, each frame comfortably fits almost 36 max size 60 * Ethernet packets (so queues should be bigger). 61 * 62 * REVISIT qlens should be members of 'struct usbnet'; the goal is to 63 * let the USB host controller be busy for 5msec or more before an irq 64 * is required, under load. Jumbograms change the equation. 65 */ 66 #define RX_MAX_QUEUE_MEMORY (60 * 1518) 67 #define RX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \ 68 (RX_MAX_QUEUE_MEMORY/(dev)->rx_urb_size) : 4) 69 #define TX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \ 70 (RX_MAX_QUEUE_MEMORY/(dev)->hard_mtu) : 4) 71 72 // reawaken network queue this soon after stopping; else watchdog barks 73 #define TX_TIMEOUT_JIFFIES (5*HZ) 74 75 // throttle rx/tx briefly after some faults, so khubd might disconnect() 76 // us (it polls at HZ/4 usually) before we report too many false errors. 77 #define THROTTLE_JIFFIES (HZ/8) 78 79 // between wakeups 80 #define UNLINK_TIMEOUT_MS 3 81 82 /*-------------------------------------------------------------------------*/ 83 84 // randomly generated ethernet address 85 static u8 node_id [ETH_ALEN]; 86 87 static const char driver_name [] = "usbnet"; 88 89 /* use ethtool to change the level for any given device */ 90 static int msg_level = -1; 91 module_param (msg_level, int, 0); 92 MODULE_PARM_DESC (msg_level, "Override default message level"); 93 94 /*-------------------------------------------------------------------------*/ 95 96 /* handles CDC Ethernet and many other network "bulk data" interfaces */ 97 int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf) 98 { 99 int tmp; 100 struct usb_host_interface *alt = NULL; 101 struct usb_host_endpoint *in = NULL, *out = NULL; 102 struct usb_host_endpoint *status = NULL; 103 104 for (tmp = 0; tmp < intf->num_altsetting; tmp++) { 105 unsigned ep; 106 107 in = out = status = NULL; 108 alt = intf->altsetting + tmp; 109 110 /* take the first altsetting with in-bulk + out-bulk; 111 * remember any status endpoint, just in case; 112 * ignore other endpoints and altsettings. 113 */ 114 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) { 115 struct usb_host_endpoint *e; 116 int intr = 0; 117 118 e = alt->endpoint + ep; 119 switch (e->desc.bmAttributes) { 120 case USB_ENDPOINT_XFER_INT: 121 if (!usb_endpoint_dir_in(&e->desc)) 122 continue; 123 intr = 1; 124 /* FALLTHROUGH */ 125 case USB_ENDPOINT_XFER_BULK: 126 break; 127 default: 128 continue; 129 } 130 if (usb_endpoint_dir_in(&e->desc)) { 131 if (!intr && !in) 132 in = e; 133 else if (intr && !status) 134 status = e; 135 } else { 136 if (!out) 137 out = e; 138 } 139 } 140 if (in && out) 141 break; 142 } 143 if (!alt || !in || !out) 144 return -EINVAL; 145 146 if (alt->desc.bAlternateSetting != 0 || 147 !(dev->driver_info->flags & FLAG_NO_SETINT)) { 148 tmp = usb_set_interface (dev->udev, alt->desc.bInterfaceNumber, 149 alt->desc.bAlternateSetting); 150 if (tmp < 0) 151 return tmp; 152 } 153 154 dev->in = usb_rcvbulkpipe (dev->udev, 155 in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 156 dev->out = usb_sndbulkpipe (dev->udev, 157 out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 158 dev->status = status; 159 return 0; 160 } 161 EXPORT_SYMBOL_GPL(usbnet_get_endpoints); 162 163 int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress) 164 { 165 int tmp, i; 166 unsigned char buf [13]; 167 168 tmp = usb_string(dev->udev, iMACAddress, buf, sizeof buf); 169 if (tmp != 12) { 170 dev_dbg(&dev->udev->dev, 171 "bad MAC string %d fetch, %d\n", iMACAddress, tmp); 172 if (tmp >= 0) 173 tmp = -EINVAL; 174 return tmp; 175 } 176 for (i = tmp = 0; i < 6; i++, tmp += 2) 177 dev->net->dev_addr [i] = 178 (hex_to_bin(buf[tmp]) << 4) + hex_to_bin(buf[tmp + 1]); 179 return 0; 180 } 181 EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr); 182 183 static void intr_complete (struct urb *urb) 184 { 185 struct usbnet *dev = urb->context; 186 int status = urb->status; 187 188 switch (status) { 189 /* success */ 190 case 0: 191 dev->driver_info->status(dev, urb); 192 break; 193 194 /* software-driven interface shutdown */ 195 case -ENOENT: /* urb killed */ 196 case -ESHUTDOWN: /* hardware gone */ 197 netif_dbg(dev, ifdown, dev->net, 198 "intr shutdown, code %d\n", status); 199 return; 200 201 /* NOTE: not throttling like RX/TX, since this endpoint 202 * already polls infrequently 203 */ 204 default: 205 netdev_dbg(dev->net, "intr status %d\n", status); 206 break; 207 } 208 209 if (!netif_running (dev->net)) 210 return; 211 212 status = usb_submit_urb (urb, GFP_ATOMIC); 213 if (status != 0) 214 netif_err(dev, timer, dev->net, 215 "intr resubmit --> %d\n", status); 216 } 217 218 static int init_status (struct usbnet *dev, struct usb_interface *intf) 219 { 220 char *buf = NULL; 221 unsigned pipe = 0; 222 unsigned maxp; 223 unsigned period; 224 225 if (!dev->driver_info->status) 226 return 0; 227 228 pipe = usb_rcvintpipe (dev->udev, 229 dev->status->desc.bEndpointAddress 230 & USB_ENDPOINT_NUMBER_MASK); 231 maxp = usb_maxpacket (dev->udev, pipe, 0); 232 233 /* avoid 1 msec chatter: min 8 msec poll rate */ 234 period = max ((int) dev->status->desc.bInterval, 235 (dev->udev->speed == USB_SPEED_HIGH) ? 7 : 3); 236 237 buf = kmalloc (maxp, GFP_KERNEL); 238 if (buf) { 239 dev->interrupt = usb_alloc_urb (0, GFP_KERNEL); 240 if (!dev->interrupt) { 241 kfree (buf); 242 return -ENOMEM; 243 } else { 244 usb_fill_int_urb(dev->interrupt, dev->udev, pipe, 245 buf, maxp, intr_complete, dev, period); 246 dev->interrupt->transfer_flags |= URB_FREE_BUFFER; 247 dev_dbg(&intf->dev, 248 "status ep%din, %d bytes period %d\n", 249 usb_pipeendpoint(pipe), maxp, period); 250 } 251 } 252 return 0; 253 } 254 255 /* Passes this packet up the stack, updating its accounting. 256 * Some link protocols batch packets, so their rx_fixup paths 257 * can return clones as well as just modify the original skb. 258 */ 259 void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb) 260 { 261 int status; 262 263 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) { 264 skb_queue_tail(&dev->rxq_pause, skb); 265 return; 266 } 267 268 skb->protocol = eth_type_trans (skb, dev->net); 269 dev->net->stats.rx_packets++; 270 dev->net->stats.rx_bytes += skb->len; 271 272 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n", 273 skb->len + sizeof (struct ethhdr), skb->protocol); 274 memset (skb->cb, 0, sizeof (struct skb_data)); 275 276 if (skb_defer_rx_timestamp(skb)) 277 return; 278 279 status = netif_rx (skb); 280 if (status != NET_RX_SUCCESS) 281 netif_dbg(dev, rx_err, dev->net, 282 "netif_rx status %d\n", status); 283 } 284 EXPORT_SYMBOL_GPL(usbnet_skb_return); 285 286 287 /*------------------------------------------------------------------------- 288 * 289 * Network Device Driver (peer link to "Host Device", from USB host) 290 * 291 *-------------------------------------------------------------------------*/ 292 293 int usbnet_change_mtu (struct net_device *net, int new_mtu) 294 { 295 struct usbnet *dev = netdev_priv(net); 296 int ll_mtu = new_mtu + net->hard_header_len; 297 int old_hard_mtu = dev->hard_mtu; 298 int old_rx_urb_size = dev->rx_urb_size; 299 300 if (new_mtu <= 0) 301 return -EINVAL; 302 // no second zero-length packet read wanted after mtu-sized packets 303 if ((ll_mtu % dev->maxpacket) == 0) 304 return -EDOM; 305 net->mtu = new_mtu; 306 307 dev->hard_mtu = net->mtu + net->hard_header_len; 308 if (dev->rx_urb_size == old_hard_mtu) { 309 dev->rx_urb_size = dev->hard_mtu; 310 if (dev->rx_urb_size > old_rx_urb_size) 311 usbnet_unlink_rx_urbs(dev); 312 } 313 314 return 0; 315 } 316 EXPORT_SYMBOL_GPL(usbnet_change_mtu); 317 318 /* The caller must hold list->lock */ 319 static void __usbnet_queue_skb(struct sk_buff_head *list, 320 struct sk_buff *newsk, enum skb_state state) 321 { 322 struct skb_data *entry = (struct skb_data *) newsk->cb; 323 324 __skb_queue_tail(list, newsk); 325 entry->state = state; 326 } 327 328 /*-------------------------------------------------------------------------*/ 329 330 /* some LK 2.4 HCDs oopsed if we freed or resubmitted urbs from 331 * completion callbacks. 2.5 should have fixed those bugs... 332 */ 333 334 static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb, 335 struct sk_buff_head *list, enum skb_state state) 336 { 337 unsigned long flags; 338 enum skb_state old_state; 339 struct skb_data *entry = (struct skb_data *) skb->cb; 340 341 spin_lock_irqsave(&list->lock, flags); 342 old_state = entry->state; 343 entry->state = state; 344 __skb_unlink(skb, list); 345 spin_unlock(&list->lock); 346 spin_lock(&dev->done.lock); 347 __skb_queue_tail(&dev->done, skb); 348 if (dev->done.qlen == 1) 349 tasklet_schedule(&dev->bh); 350 spin_unlock_irqrestore(&dev->done.lock, flags); 351 return old_state; 352 } 353 354 /* some work can't be done in tasklets, so we use keventd 355 * 356 * NOTE: annoying asymmetry: if it's active, schedule_work() fails, 357 * but tasklet_schedule() doesn't. hope the failure is rare. 358 */ 359 void usbnet_defer_kevent (struct usbnet *dev, int work) 360 { 361 set_bit (work, &dev->flags); 362 if (!schedule_work (&dev->kevent)) 363 netdev_err(dev->net, "kevent %d may have been dropped\n", work); 364 else 365 netdev_dbg(dev->net, "kevent %d scheduled\n", work); 366 } 367 EXPORT_SYMBOL_GPL(usbnet_defer_kevent); 368 369 /*-------------------------------------------------------------------------*/ 370 371 static void rx_complete (struct urb *urb); 372 373 static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) 374 { 375 struct sk_buff *skb; 376 struct skb_data *entry; 377 int retval = 0; 378 unsigned long lockflags; 379 size_t size = dev->rx_urb_size; 380 381 skb = __netdev_alloc_skb_ip_align(dev->net, size, flags); 382 if (!skb) { 383 netif_dbg(dev, rx_err, dev->net, "no rx skb\n"); 384 usbnet_defer_kevent (dev, EVENT_RX_MEMORY); 385 usb_free_urb (urb); 386 return -ENOMEM; 387 } 388 389 entry = (struct skb_data *) skb->cb; 390 entry->urb = urb; 391 entry->dev = dev; 392 entry->length = 0; 393 394 usb_fill_bulk_urb (urb, dev->udev, dev->in, 395 skb->data, size, rx_complete, skb); 396 397 spin_lock_irqsave (&dev->rxq.lock, lockflags); 398 399 if (netif_running (dev->net) && 400 netif_device_present (dev->net) && 401 !test_bit (EVENT_RX_HALT, &dev->flags) && 402 !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) { 403 switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) { 404 case -EPIPE: 405 usbnet_defer_kevent (dev, EVENT_RX_HALT); 406 break; 407 case -ENOMEM: 408 usbnet_defer_kevent (dev, EVENT_RX_MEMORY); 409 break; 410 case -ENODEV: 411 netif_dbg(dev, ifdown, dev->net, "device gone\n"); 412 netif_device_detach (dev->net); 413 break; 414 case -EHOSTUNREACH: 415 retval = -ENOLINK; 416 break; 417 default: 418 netif_dbg(dev, rx_err, dev->net, 419 "rx submit, %d\n", retval); 420 tasklet_schedule (&dev->bh); 421 break; 422 case 0: 423 __usbnet_queue_skb(&dev->rxq, skb, rx_start); 424 } 425 } else { 426 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n"); 427 retval = -ENOLINK; 428 } 429 spin_unlock_irqrestore (&dev->rxq.lock, lockflags); 430 if (retval) { 431 dev_kfree_skb_any (skb); 432 usb_free_urb (urb); 433 } 434 return retval; 435 } 436 437 438 /*-------------------------------------------------------------------------*/ 439 440 static inline void rx_process (struct usbnet *dev, struct sk_buff *skb) 441 { 442 if (dev->driver_info->rx_fixup && 443 !dev->driver_info->rx_fixup (dev, skb)) { 444 /* With RX_ASSEMBLE, rx_fixup() must update counters */ 445 if (!(dev->driver_info->flags & FLAG_RX_ASSEMBLE)) 446 dev->net->stats.rx_errors++; 447 goto done; 448 } 449 // else network stack removes extra byte if we forced a short packet 450 451 if (skb->len) { 452 /* all data was already cloned from skb inside the driver */ 453 if (dev->driver_info->flags & FLAG_MULTI_PACKET) 454 dev_kfree_skb_any(skb); 455 else 456 usbnet_skb_return(dev, skb); 457 return; 458 } 459 460 netif_dbg(dev, rx_err, dev->net, "drop\n"); 461 dev->net->stats.rx_errors++; 462 done: 463 skb_queue_tail(&dev->done, skb); 464 } 465 466 /*-------------------------------------------------------------------------*/ 467 468 static void rx_complete (struct urb *urb) 469 { 470 struct sk_buff *skb = (struct sk_buff *) urb->context; 471 struct skb_data *entry = (struct skb_data *) skb->cb; 472 struct usbnet *dev = entry->dev; 473 int urb_status = urb->status; 474 enum skb_state state; 475 476 skb_put (skb, urb->actual_length); 477 state = rx_done; 478 entry->urb = NULL; 479 480 switch (urb_status) { 481 /* success */ 482 case 0: 483 if (skb->len < dev->net->hard_header_len) { 484 state = rx_cleanup; 485 dev->net->stats.rx_errors++; 486 dev->net->stats.rx_length_errors++; 487 netif_dbg(dev, rx_err, dev->net, 488 "rx length %d\n", skb->len); 489 } 490 break; 491 492 /* stalls need manual reset. this is rare ... except that 493 * when going through USB 2.0 TTs, unplug appears this way. 494 * we avoid the highspeed version of the ETIMEDOUT/EILSEQ 495 * storm, recovering as needed. 496 */ 497 case -EPIPE: 498 dev->net->stats.rx_errors++; 499 usbnet_defer_kevent (dev, EVENT_RX_HALT); 500 // FALLTHROUGH 501 502 /* software-driven interface shutdown */ 503 case -ECONNRESET: /* async unlink */ 504 case -ESHUTDOWN: /* hardware gone */ 505 netif_dbg(dev, ifdown, dev->net, 506 "rx shutdown, code %d\n", urb_status); 507 goto block; 508 509 /* we get controller i/o faults during khubd disconnect() delays. 510 * throttle down resubmits, to avoid log floods; just temporarily, 511 * so we still recover when the fault isn't a khubd delay. 512 */ 513 case -EPROTO: 514 case -ETIME: 515 case -EILSEQ: 516 dev->net->stats.rx_errors++; 517 if (!timer_pending (&dev->delay)) { 518 mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES); 519 netif_dbg(dev, link, dev->net, 520 "rx throttle %d\n", urb_status); 521 } 522 block: 523 state = rx_cleanup; 524 entry->urb = urb; 525 urb = NULL; 526 break; 527 528 /* data overrun ... flush fifo? */ 529 case -EOVERFLOW: 530 dev->net->stats.rx_over_errors++; 531 // FALLTHROUGH 532 533 default: 534 state = rx_cleanup; 535 dev->net->stats.rx_errors++; 536 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status); 537 break; 538 } 539 540 state = defer_bh(dev, skb, &dev->rxq, state); 541 542 if (urb) { 543 if (netif_running (dev->net) && 544 !test_bit (EVENT_RX_HALT, &dev->flags) && 545 state != unlink_start) { 546 rx_submit (dev, urb, GFP_ATOMIC); 547 usb_mark_last_busy(dev->udev); 548 return; 549 } 550 usb_free_urb (urb); 551 } 552 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n"); 553 } 554 555 /*-------------------------------------------------------------------------*/ 556 void usbnet_pause_rx(struct usbnet *dev) 557 { 558 set_bit(EVENT_RX_PAUSED, &dev->flags); 559 560 netif_dbg(dev, rx_status, dev->net, "paused rx queue enabled\n"); 561 } 562 EXPORT_SYMBOL_GPL(usbnet_pause_rx); 563 564 void usbnet_resume_rx(struct usbnet *dev) 565 { 566 struct sk_buff *skb; 567 int num = 0; 568 569 clear_bit(EVENT_RX_PAUSED, &dev->flags); 570 571 while ((skb = skb_dequeue(&dev->rxq_pause)) != NULL) { 572 usbnet_skb_return(dev, skb); 573 num++; 574 } 575 576 tasklet_schedule(&dev->bh); 577 578 netif_dbg(dev, rx_status, dev->net, 579 "paused rx queue disabled, %d skbs requeued\n", num); 580 } 581 EXPORT_SYMBOL_GPL(usbnet_resume_rx); 582 583 void usbnet_purge_paused_rxq(struct usbnet *dev) 584 { 585 skb_queue_purge(&dev->rxq_pause); 586 } 587 EXPORT_SYMBOL_GPL(usbnet_purge_paused_rxq); 588 589 /*-------------------------------------------------------------------------*/ 590 591 // unlink pending rx/tx; completion handlers do all other cleanup 592 593 static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q) 594 { 595 unsigned long flags; 596 struct sk_buff *skb; 597 int count = 0; 598 599 spin_lock_irqsave (&q->lock, flags); 600 while (!skb_queue_empty(q)) { 601 struct skb_data *entry; 602 struct urb *urb; 603 int retval; 604 605 skb_queue_walk(q, skb) { 606 entry = (struct skb_data *) skb->cb; 607 if (entry->state != unlink_start) 608 goto found; 609 } 610 break; 611 found: 612 entry->state = unlink_start; 613 urb = entry->urb; 614 615 /* 616 * Get reference count of the URB to avoid it to be 617 * freed during usb_unlink_urb, which may trigger 618 * use-after-free problem inside usb_unlink_urb since 619 * usb_unlink_urb is always racing with .complete 620 * handler(include defer_bh). 621 */ 622 usb_get_urb(urb); 623 spin_unlock_irqrestore(&q->lock, flags); 624 // during some PM-driven resume scenarios, 625 // these (async) unlinks complete immediately 626 retval = usb_unlink_urb (urb); 627 if (retval != -EINPROGRESS && retval != 0) 628 netdev_dbg(dev->net, "unlink urb err, %d\n", retval); 629 else 630 count++; 631 usb_put_urb(urb); 632 spin_lock_irqsave(&q->lock, flags); 633 } 634 spin_unlock_irqrestore (&q->lock, flags); 635 return count; 636 } 637 638 // Flush all pending rx urbs 639 // minidrivers may need to do this when the MTU changes 640 641 void usbnet_unlink_rx_urbs(struct usbnet *dev) 642 { 643 if (netif_running(dev->net)) { 644 (void) unlink_urbs (dev, &dev->rxq); 645 tasklet_schedule(&dev->bh); 646 } 647 } 648 EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs); 649 650 /*-------------------------------------------------------------------------*/ 651 652 // precondition: never called in_interrupt 653 static void usbnet_terminate_urbs(struct usbnet *dev) 654 { 655 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup); 656 DECLARE_WAITQUEUE(wait, current); 657 int temp; 658 659 /* ensure there are no more active urbs */ 660 add_wait_queue(&unlink_wakeup, &wait); 661 set_current_state(TASK_UNINTERRUPTIBLE); 662 dev->wait = &unlink_wakeup; 663 temp = unlink_urbs(dev, &dev->txq) + 664 unlink_urbs(dev, &dev->rxq); 665 666 /* maybe wait for deletions to finish. */ 667 while (!skb_queue_empty(&dev->rxq) 668 && !skb_queue_empty(&dev->txq) 669 && !skb_queue_empty(&dev->done)) { 670 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS)); 671 set_current_state(TASK_UNINTERRUPTIBLE); 672 netif_dbg(dev, ifdown, dev->net, 673 "waited for %d urb completions\n", temp); 674 } 675 set_current_state(TASK_RUNNING); 676 dev->wait = NULL; 677 remove_wait_queue(&unlink_wakeup, &wait); 678 } 679 680 int usbnet_stop (struct net_device *net) 681 { 682 struct usbnet *dev = netdev_priv(net); 683 struct driver_info *info = dev->driver_info; 684 int retval; 685 686 clear_bit(EVENT_DEV_OPEN, &dev->flags); 687 netif_stop_queue (net); 688 689 netif_info(dev, ifdown, dev->net, 690 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n", 691 net->stats.rx_packets, net->stats.tx_packets, 692 net->stats.rx_errors, net->stats.tx_errors); 693 694 /* allow minidriver to stop correctly (wireless devices to turn off 695 * radio etc) */ 696 if (info->stop) { 697 retval = info->stop(dev); 698 if (retval < 0) 699 netif_info(dev, ifdown, dev->net, 700 "stop fail (%d) usbnet usb-%s-%s, %s\n", 701 retval, 702 dev->udev->bus->bus_name, dev->udev->devpath, 703 info->description); 704 } 705 706 if (!(info->flags & FLAG_AVOID_UNLINK_URBS)) 707 usbnet_terminate_urbs(dev); 708 709 usb_kill_urb(dev->interrupt); 710 711 usbnet_purge_paused_rxq(dev); 712 713 /* deferred work (task, timer, softirq) must also stop. 714 * can't flush_scheduled_work() until we drop rtnl (later), 715 * else workers could deadlock; so make workers a NOP. 716 */ 717 dev->flags = 0; 718 del_timer_sync (&dev->delay); 719 tasklet_kill (&dev->bh); 720 if (info->manage_power) 721 info->manage_power(dev, 0); 722 else 723 usb_autopm_put_interface(dev->intf); 724 725 return 0; 726 } 727 EXPORT_SYMBOL_GPL(usbnet_stop); 728 729 /*-------------------------------------------------------------------------*/ 730 731 // posts reads, and enables write queuing 732 733 // precondition: never called in_interrupt 734 735 int usbnet_open (struct net_device *net) 736 { 737 struct usbnet *dev = netdev_priv(net); 738 int retval; 739 struct driver_info *info = dev->driver_info; 740 741 if ((retval = usb_autopm_get_interface(dev->intf)) < 0) { 742 netif_info(dev, ifup, dev->net, 743 "resumption fail (%d) usbnet usb-%s-%s, %s\n", 744 retval, 745 dev->udev->bus->bus_name, 746 dev->udev->devpath, 747 info->description); 748 goto done_nopm; 749 } 750 751 // put into "known safe" state 752 if (info->reset && (retval = info->reset (dev)) < 0) { 753 netif_info(dev, ifup, dev->net, 754 "open reset fail (%d) usbnet usb-%s-%s, %s\n", 755 retval, 756 dev->udev->bus->bus_name, 757 dev->udev->devpath, 758 info->description); 759 goto done; 760 } 761 762 // insist peer be connected 763 if (info->check_connect && (retval = info->check_connect (dev)) < 0) { 764 netif_dbg(dev, ifup, dev->net, "can't open; %d\n", retval); 765 goto done; 766 } 767 768 /* start any status interrupt transfer */ 769 if (dev->interrupt) { 770 retval = usb_submit_urb (dev->interrupt, GFP_KERNEL); 771 if (retval < 0) { 772 netif_err(dev, ifup, dev->net, 773 "intr submit %d\n", retval); 774 goto done; 775 } 776 } 777 778 set_bit(EVENT_DEV_OPEN, &dev->flags); 779 netif_start_queue (net); 780 netif_info(dev, ifup, dev->net, 781 "open: enable queueing (rx %d, tx %d) mtu %d %s framing\n", 782 (int)RX_QLEN(dev), (int)TX_QLEN(dev), 783 dev->net->mtu, 784 (dev->driver_info->flags & FLAG_FRAMING_NC) ? "NetChip" : 785 (dev->driver_info->flags & FLAG_FRAMING_GL) ? "GeneSys" : 786 (dev->driver_info->flags & FLAG_FRAMING_Z) ? "Zaurus" : 787 (dev->driver_info->flags & FLAG_FRAMING_RN) ? "RNDIS" : 788 (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" : 789 "simple"); 790 791 // delay posting reads until we're fully open 792 tasklet_schedule (&dev->bh); 793 if (info->manage_power) { 794 retval = info->manage_power(dev, 1); 795 if (retval < 0) 796 goto done_manage_power_error; 797 usb_autopm_put_interface(dev->intf); 798 } 799 return retval; 800 801 done_manage_power_error: 802 clear_bit(EVENT_DEV_OPEN, &dev->flags); 803 done: 804 usb_autopm_put_interface(dev->intf); 805 done_nopm: 806 return retval; 807 } 808 EXPORT_SYMBOL_GPL(usbnet_open); 809 810 /*-------------------------------------------------------------------------*/ 811 812 /* ethtool methods; minidrivers may need to add some more, but 813 * they'll probably want to use this base set. 814 */ 815 816 int usbnet_get_settings (struct net_device *net, struct ethtool_cmd *cmd) 817 { 818 struct usbnet *dev = netdev_priv(net); 819 820 if (!dev->mii.mdio_read) 821 return -EOPNOTSUPP; 822 823 return mii_ethtool_gset(&dev->mii, cmd); 824 } 825 EXPORT_SYMBOL_GPL(usbnet_get_settings); 826 827 int usbnet_set_settings (struct net_device *net, struct ethtool_cmd *cmd) 828 { 829 struct usbnet *dev = netdev_priv(net); 830 int retval; 831 832 if (!dev->mii.mdio_write) 833 return -EOPNOTSUPP; 834 835 retval = mii_ethtool_sset(&dev->mii, cmd); 836 837 /* link speed/duplex might have changed */ 838 if (dev->driver_info->link_reset) 839 dev->driver_info->link_reset(dev); 840 841 return retval; 842 843 } 844 EXPORT_SYMBOL_GPL(usbnet_set_settings); 845 846 u32 usbnet_get_link (struct net_device *net) 847 { 848 struct usbnet *dev = netdev_priv(net); 849 850 /* If a check_connect is defined, return its result */ 851 if (dev->driver_info->check_connect) 852 return dev->driver_info->check_connect (dev) == 0; 853 854 /* if the device has mii operations, use those */ 855 if (dev->mii.mdio_read) 856 return mii_link_ok(&dev->mii); 857 858 /* Otherwise, dtrt for drivers calling netif_carrier_{on,off} */ 859 return ethtool_op_get_link(net); 860 } 861 EXPORT_SYMBOL_GPL(usbnet_get_link); 862 863 int usbnet_nway_reset(struct net_device *net) 864 { 865 struct usbnet *dev = netdev_priv(net); 866 867 if (!dev->mii.mdio_write) 868 return -EOPNOTSUPP; 869 870 return mii_nway_restart(&dev->mii); 871 } 872 EXPORT_SYMBOL_GPL(usbnet_nway_reset); 873 874 void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info) 875 { 876 struct usbnet *dev = netdev_priv(net); 877 878 strlcpy (info->driver, dev->driver_name, sizeof info->driver); 879 strlcpy (info->version, DRIVER_VERSION, sizeof info->version); 880 strlcpy (info->fw_version, dev->driver_info->description, 881 sizeof info->fw_version); 882 usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info); 883 } 884 EXPORT_SYMBOL_GPL(usbnet_get_drvinfo); 885 886 u32 usbnet_get_msglevel (struct net_device *net) 887 { 888 struct usbnet *dev = netdev_priv(net); 889 890 return dev->msg_enable; 891 } 892 EXPORT_SYMBOL_GPL(usbnet_get_msglevel); 893 894 void usbnet_set_msglevel (struct net_device *net, u32 level) 895 { 896 struct usbnet *dev = netdev_priv(net); 897 898 dev->msg_enable = level; 899 } 900 EXPORT_SYMBOL_GPL(usbnet_set_msglevel); 901 902 /* drivers may override default ethtool_ops in their bind() routine */ 903 static const struct ethtool_ops usbnet_ethtool_ops = { 904 .get_settings = usbnet_get_settings, 905 .set_settings = usbnet_set_settings, 906 .get_link = usbnet_get_link, 907 .nway_reset = usbnet_nway_reset, 908 .get_drvinfo = usbnet_get_drvinfo, 909 .get_msglevel = usbnet_get_msglevel, 910 .set_msglevel = usbnet_set_msglevel, 911 .get_ts_info = ethtool_op_get_ts_info, 912 }; 913 914 /*-------------------------------------------------------------------------*/ 915 916 /* work that cannot be done in interrupt context uses keventd. 917 * 918 * NOTE: with 2.5 we could do more of this using completion callbacks, 919 * especially now that control transfers can be queued. 920 */ 921 static void 922 kevent (struct work_struct *work) 923 { 924 struct usbnet *dev = 925 container_of(work, struct usbnet, kevent); 926 int status; 927 928 /* usb_clear_halt() needs a thread context */ 929 if (test_bit (EVENT_TX_HALT, &dev->flags)) { 930 unlink_urbs (dev, &dev->txq); 931 status = usb_autopm_get_interface(dev->intf); 932 if (status < 0) 933 goto fail_pipe; 934 status = usb_clear_halt (dev->udev, dev->out); 935 usb_autopm_put_interface(dev->intf); 936 if (status < 0 && 937 status != -EPIPE && 938 status != -ESHUTDOWN) { 939 if (netif_msg_tx_err (dev)) 940 fail_pipe: 941 netdev_err(dev->net, "can't clear tx halt, status %d\n", 942 status); 943 } else { 944 clear_bit (EVENT_TX_HALT, &dev->flags); 945 if (status != -ESHUTDOWN) 946 netif_wake_queue (dev->net); 947 } 948 } 949 if (test_bit (EVENT_RX_HALT, &dev->flags)) { 950 unlink_urbs (dev, &dev->rxq); 951 status = usb_autopm_get_interface(dev->intf); 952 if (status < 0) 953 goto fail_halt; 954 status = usb_clear_halt (dev->udev, dev->in); 955 usb_autopm_put_interface(dev->intf); 956 if (status < 0 && 957 status != -EPIPE && 958 status != -ESHUTDOWN) { 959 if (netif_msg_rx_err (dev)) 960 fail_halt: 961 netdev_err(dev->net, "can't clear rx halt, status %d\n", 962 status); 963 } else { 964 clear_bit (EVENT_RX_HALT, &dev->flags); 965 tasklet_schedule (&dev->bh); 966 } 967 } 968 969 /* tasklet could resubmit itself forever if memory is tight */ 970 if (test_bit (EVENT_RX_MEMORY, &dev->flags)) { 971 struct urb *urb = NULL; 972 int resched = 1; 973 974 if (netif_running (dev->net)) 975 urb = usb_alloc_urb (0, GFP_KERNEL); 976 else 977 clear_bit (EVENT_RX_MEMORY, &dev->flags); 978 if (urb != NULL) { 979 clear_bit (EVENT_RX_MEMORY, &dev->flags); 980 status = usb_autopm_get_interface(dev->intf); 981 if (status < 0) { 982 usb_free_urb(urb); 983 goto fail_lowmem; 984 } 985 if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK) 986 resched = 0; 987 usb_autopm_put_interface(dev->intf); 988 fail_lowmem: 989 if (resched) 990 tasklet_schedule (&dev->bh); 991 } 992 } 993 994 if (test_bit (EVENT_LINK_RESET, &dev->flags)) { 995 struct driver_info *info = dev->driver_info; 996 int retval = 0; 997 998 clear_bit (EVENT_LINK_RESET, &dev->flags); 999 status = usb_autopm_get_interface(dev->intf); 1000 if (status < 0) 1001 goto skip_reset; 1002 if(info->link_reset && (retval = info->link_reset(dev)) < 0) { 1003 usb_autopm_put_interface(dev->intf); 1004 skip_reset: 1005 netdev_info(dev->net, "link reset failed (%d) usbnet usb-%s-%s, %s\n", 1006 retval, 1007 dev->udev->bus->bus_name, 1008 dev->udev->devpath, 1009 info->description); 1010 } else { 1011 usb_autopm_put_interface(dev->intf); 1012 } 1013 } 1014 1015 if (dev->flags) 1016 netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags); 1017 } 1018 1019 /*-------------------------------------------------------------------------*/ 1020 1021 static void tx_complete (struct urb *urb) 1022 { 1023 struct sk_buff *skb = (struct sk_buff *) urb->context; 1024 struct skb_data *entry = (struct skb_data *) skb->cb; 1025 struct usbnet *dev = entry->dev; 1026 1027 if (urb->status == 0) { 1028 if (!(dev->driver_info->flags & FLAG_MULTI_PACKET)) 1029 dev->net->stats.tx_packets++; 1030 dev->net->stats.tx_bytes += entry->length; 1031 } else { 1032 dev->net->stats.tx_errors++; 1033 1034 switch (urb->status) { 1035 case -EPIPE: 1036 usbnet_defer_kevent (dev, EVENT_TX_HALT); 1037 break; 1038 1039 /* software-driven interface shutdown */ 1040 case -ECONNRESET: // async unlink 1041 case -ESHUTDOWN: // hardware gone 1042 break; 1043 1044 // like rx, tx gets controller i/o faults during khubd delays 1045 // and so it uses the same throttling mechanism. 1046 case -EPROTO: 1047 case -ETIME: 1048 case -EILSEQ: 1049 usb_mark_last_busy(dev->udev); 1050 if (!timer_pending (&dev->delay)) { 1051 mod_timer (&dev->delay, 1052 jiffies + THROTTLE_JIFFIES); 1053 netif_dbg(dev, link, dev->net, 1054 "tx throttle %d\n", urb->status); 1055 } 1056 netif_stop_queue (dev->net); 1057 break; 1058 default: 1059 netif_dbg(dev, tx_err, dev->net, 1060 "tx err %d\n", entry->urb->status); 1061 break; 1062 } 1063 } 1064 1065 usb_autopm_put_interface_async(dev->intf); 1066 (void) defer_bh(dev, skb, &dev->txq, tx_done); 1067 } 1068 1069 /*-------------------------------------------------------------------------*/ 1070 1071 void usbnet_tx_timeout (struct net_device *net) 1072 { 1073 struct usbnet *dev = netdev_priv(net); 1074 1075 unlink_urbs (dev, &dev->txq); 1076 tasklet_schedule (&dev->bh); 1077 1078 // FIXME: device recovery -- reset? 1079 } 1080 EXPORT_SYMBOL_GPL(usbnet_tx_timeout); 1081 1082 /*-------------------------------------------------------------------------*/ 1083 1084 netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, 1085 struct net_device *net) 1086 { 1087 struct usbnet *dev = netdev_priv(net); 1088 int length; 1089 struct urb *urb = NULL; 1090 struct skb_data *entry; 1091 struct driver_info *info = dev->driver_info; 1092 unsigned long flags; 1093 int retval; 1094 1095 if (skb) 1096 skb_tx_timestamp(skb); 1097 1098 // some devices want funky USB-level framing, for 1099 // win32 driver (usually) and/or hardware quirks 1100 if (info->tx_fixup) { 1101 skb = info->tx_fixup (dev, skb, GFP_ATOMIC); 1102 if (!skb) { 1103 if (netif_msg_tx_err(dev)) { 1104 netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n"); 1105 goto drop; 1106 } else { 1107 /* cdc_ncm collected packet; waits for more */ 1108 goto not_drop; 1109 } 1110 } 1111 } 1112 length = skb->len; 1113 1114 if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) { 1115 netif_dbg(dev, tx_err, dev->net, "no urb\n"); 1116 goto drop; 1117 } 1118 1119 entry = (struct skb_data *) skb->cb; 1120 entry->urb = urb; 1121 entry->dev = dev; 1122 entry->length = length; 1123 1124 usb_fill_bulk_urb (urb, dev->udev, dev->out, 1125 skb->data, skb->len, tx_complete, skb); 1126 1127 /* don't assume the hardware handles USB_ZERO_PACKET 1128 * NOTE: strictly conforming cdc-ether devices should expect 1129 * the ZLP here, but ignore the one-byte packet. 1130 * NOTE2: CDC NCM specification is different from CDC ECM when 1131 * handling ZLP/short packets, so cdc_ncm driver will make short 1132 * packet itself if needed. 1133 */ 1134 if (length % dev->maxpacket == 0) { 1135 if (!(info->flags & FLAG_SEND_ZLP)) { 1136 if (!(info->flags & FLAG_MULTI_PACKET)) { 1137 urb->transfer_buffer_length++; 1138 if (skb_tailroom(skb)) { 1139 skb->data[skb->len] = 0; 1140 __skb_put(skb, 1); 1141 } 1142 } 1143 } else 1144 urb->transfer_flags |= URB_ZERO_PACKET; 1145 } 1146 1147 spin_lock_irqsave(&dev->txq.lock, flags); 1148 retval = usb_autopm_get_interface_async(dev->intf); 1149 if (retval < 0) { 1150 spin_unlock_irqrestore(&dev->txq.lock, flags); 1151 goto drop; 1152 } 1153 1154 #ifdef CONFIG_PM 1155 /* if this triggers the device is still a sleep */ 1156 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) { 1157 /* transmission will be done in resume */ 1158 usb_anchor_urb(urb, &dev->deferred); 1159 /* no use to process more packets */ 1160 netif_stop_queue(net); 1161 spin_unlock_irqrestore(&dev->txq.lock, flags); 1162 netdev_dbg(dev->net, "Delaying transmission for resumption\n"); 1163 goto deferred; 1164 } 1165 #endif 1166 1167 switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) { 1168 case -EPIPE: 1169 netif_stop_queue (net); 1170 usbnet_defer_kevent (dev, EVENT_TX_HALT); 1171 usb_autopm_put_interface_async(dev->intf); 1172 break; 1173 default: 1174 usb_autopm_put_interface_async(dev->intf); 1175 netif_dbg(dev, tx_err, dev->net, 1176 "tx: submit urb err %d\n", retval); 1177 break; 1178 case 0: 1179 net->trans_start = jiffies; 1180 __usbnet_queue_skb(&dev->txq, skb, tx_start); 1181 if (dev->txq.qlen >= TX_QLEN (dev)) 1182 netif_stop_queue (net); 1183 } 1184 spin_unlock_irqrestore (&dev->txq.lock, flags); 1185 1186 if (retval) { 1187 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval); 1188 drop: 1189 dev->net->stats.tx_dropped++; 1190 not_drop: 1191 if (skb) 1192 dev_kfree_skb_any (skb); 1193 usb_free_urb (urb); 1194 } else 1195 netif_dbg(dev, tx_queued, dev->net, 1196 "> tx, len %d, type 0x%x\n", length, skb->protocol); 1197 #ifdef CONFIG_PM 1198 deferred: 1199 #endif 1200 return NETDEV_TX_OK; 1201 } 1202 EXPORT_SYMBOL_GPL(usbnet_start_xmit); 1203 1204 static void rx_alloc_submit(struct usbnet *dev, gfp_t flags) 1205 { 1206 struct urb *urb; 1207 int i; 1208 1209 /* don't refill the queue all at once */ 1210 for (i = 0; i < 10 && dev->rxq.qlen < RX_QLEN(dev); i++) { 1211 urb = usb_alloc_urb(0, flags); 1212 if (urb != NULL) { 1213 if (rx_submit(dev, urb, flags) == -ENOLINK) 1214 return; 1215 } 1216 } 1217 } 1218 1219 /*-------------------------------------------------------------------------*/ 1220 1221 // tasklet (work deferred from completions, in_irq) or timer 1222 1223 static void usbnet_bh (unsigned long param) 1224 { 1225 struct usbnet *dev = (struct usbnet *) param; 1226 struct sk_buff *skb; 1227 struct skb_data *entry; 1228 1229 while ((skb = skb_dequeue (&dev->done))) { 1230 entry = (struct skb_data *) skb->cb; 1231 switch (entry->state) { 1232 case rx_done: 1233 entry->state = rx_cleanup; 1234 rx_process (dev, skb); 1235 continue; 1236 case tx_done: 1237 case rx_cleanup: 1238 usb_free_urb (entry->urb); 1239 dev_kfree_skb (skb); 1240 continue; 1241 default: 1242 netdev_dbg(dev->net, "bogus skb state %d\n", entry->state); 1243 } 1244 } 1245 1246 // waiting for all pending urbs to complete? 1247 if (dev->wait) { 1248 if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) { 1249 wake_up (dev->wait); 1250 } 1251 1252 // or are we maybe short a few urbs? 1253 } else if (netif_running (dev->net) && 1254 netif_device_present (dev->net) && 1255 !timer_pending (&dev->delay) && 1256 !test_bit (EVENT_RX_HALT, &dev->flags)) { 1257 int temp = dev->rxq.qlen; 1258 1259 if (temp < RX_QLEN(dev)) { 1260 rx_alloc_submit(dev, GFP_ATOMIC); 1261 if (temp != dev->rxq.qlen) 1262 netif_dbg(dev, link, dev->net, 1263 "rxqlen %d --> %d\n", 1264 temp, dev->rxq.qlen); 1265 if (dev->rxq.qlen < RX_QLEN(dev)) 1266 tasklet_schedule (&dev->bh); 1267 } 1268 if (dev->txq.qlen < TX_QLEN (dev)) 1269 netif_wake_queue (dev->net); 1270 } 1271 } 1272 1273 1274 /*------------------------------------------------------------------------- 1275 * 1276 * USB Device Driver support 1277 * 1278 *-------------------------------------------------------------------------*/ 1279 1280 // precondition: never called in_interrupt 1281 1282 void usbnet_disconnect (struct usb_interface *intf) 1283 { 1284 struct usbnet *dev; 1285 struct usb_device *xdev; 1286 struct net_device *net; 1287 1288 dev = usb_get_intfdata(intf); 1289 usb_set_intfdata(intf, NULL); 1290 if (!dev) 1291 return; 1292 1293 xdev = interface_to_usbdev (intf); 1294 1295 netif_info(dev, probe, dev->net, "unregister '%s' usb-%s-%s, %s\n", 1296 intf->dev.driver->name, 1297 xdev->bus->bus_name, xdev->devpath, 1298 dev->driver_info->description); 1299 1300 net = dev->net; 1301 unregister_netdev (net); 1302 1303 cancel_work_sync(&dev->kevent); 1304 1305 if (dev->driver_info->unbind) 1306 dev->driver_info->unbind (dev, intf); 1307 1308 usb_kill_urb(dev->interrupt); 1309 usb_free_urb(dev->interrupt); 1310 1311 free_netdev(net); 1312 } 1313 EXPORT_SYMBOL_GPL(usbnet_disconnect); 1314 1315 static const struct net_device_ops usbnet_netdev_ops = { 1316 .ndo_open = usbnet_open, 1317 .ndo_stop = usbnet_stop, 1318 .ndo_start_xmit = usbnet_start_xmit, 1319 .ndo_tx_timeout = usbnet_tx_timeout, 1320 .ndo_change_mtu = usbnet_change_mtu, 1321 .ndo_set_mac_address = eth_mac_addr, 1322 .ndo_validate_addr = eth_validate_addr, 1323 }; 1324 1325 /*-------------------------------------------------------------------------*/ 1326 1327 // precondition: never called in_interrupt 1328 1329 static struct device_type wlan_type = { 1330 .name = "wlan", 1331 }; 1332 1333 static struct device_type wwan_type = { 1334 .name = "wwan", 1335 }; 1336 1337 int 1338 usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) 1339 { 1340 struct usbnet *dev; 1341 struct net_device *net; 1342 struct usb_host_interface *interface; 1343 struct driver_info *info; 1344 struct usb_device *xdev; 1345 int status; 1346 const char *name; 1347 struct usb_driver *driver = to_usb_driver(udev->dev.driver); 1348 1349 /* usbnet already took usb runtime pm, so have to enable the feature 1350 * for usb interface, otherwise usb_autopm_get_interface may return 1351 * failure if USB_SUSPEND(RUNTIME_PM) is enabled. 1352 */ 1353 if (!driver->supports_autosuspend) { 1354 driver->supports_autosuspend = 1; 1355 pm_runtime_enable(&udev->dev); 1356 } 1357 1358 name = udev->dev.driver->name; 1359 info = (struct driver_info *) prod->driver_info; 1360 if (!info) { 1361 dev_dbg (&udev->dev, "blacklisted by %s\n", name); 1362 return -ENODEV; 1363 } 1364 xdev = interface_to_usbdev (udev); 1365 interface = udev->cur_altsetting; 1366 1367 status = -ENOMEM; 1368 1369 // set up our own records 1370 net = alloc_etherdev(sizeof(*dev)); 1371 if (!net) 1372 goto out; 1373 1374 /* netdev_printk() needs this so do it as early as possible */ 1375 SET_NETDEV_DEV(net, &udev->dev); 1376 1377 dev = netdev_priv(net); 1378 dev->udev = xdev; 1379 dev->intf = udev; 1380 dev->driver_info = info; 1381 dev->driver_name = name; 1382 dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV 1383 | NETIF_MSG_PROBE | NETIF_MSG_LINK); 1384 skb_queue_head_init (&dev->rxq); 1385 skb_queue_head_init (&dev->txq); 1386 skb_queue_head_init (&dev->done); 1387 skb_queue_head_init(&dev->rxq_pause); 1388 dev->bh.func = usbnet_bh; 1389 dev->bh.data = (unsigned long) dev; 1390 INIT_WORK (&dev->kevent, kevent); 1391 init_usb_anchor(&dev->deferred); 1392 dev->delay.function = usbnet_bh; 1393 dev->delay.data = (unsigned long) dev; 1394 init_timer (&dev->delay); 1395 mutex_init (&dev->phy_mutex); 1396 1397 dev->net = net; 1398 strcpy (net->name, "usb%d"); 1399 memcpy (net->dev_addr, node_id, sizeof node_id); 1400 1401 /* rx and tx sides can use different message sizes; 1402 * bind() should set rx_urb_size in that case. 1403 */ 1404 dev->hard_mtu = net->mtu + net->hard_header_len; 1405 #if 0 1406 // dma_supported() is deeply broken on almost all architectures 1407 // possible with some EHCI controllers 1408 if (dma_supported (&udev->dev, DMA_BIT_MASK(64))) 1409 net->features |= NETIF_F_HIGHDMA; 1410 #endif 1411 1412 net->netdev_ops = &usbnet_netdev_ops; 1413 net->watchdog_timeo = TX_TIMEOUT_JIFFIES; 1414 net->ethtool_ops = &usbnet_ethtool_ops; 1415 1416 // allow device-specific bind/init procedures 1417 // NOTE net->name still not usable ... 1418 if (info->bind) { 1419 status = info->bind (dev, udev); 1420 if (status < 0) 1421 goto out1; 1422 1423 // heuristic: "usb%d" for links we know are two-host, 1424 // else "eth%d" when there's reasonable doubt. userspace 1425 // can rename the link if it knows better. 1426 if ((dev->driver_info->flags & FLAG_ETHER) != 0 && 1427 ((dev->driver_info->flags & FLAG_POINTTOPOINT) == 0 || 1428 (net->dev_addr [0] & 0x02) == 0)) 1429 strcpy (net->name, "eth%d"); 1430 /* WLAN devices should always be named "wlan%d" */ 1431 if ((dev->driver_info->flags & FLAG_WLAN) != 0) 1432 strcpy(net->name, "wlan%d"); 1433 /* WWAN devices should always be named "wwan%d" */ 1434 if ((dev->driver_info->flags & FLAG_WWAN) != 0) 1435 strcpy(net->name, "wwan%d"); 1436 1437 /* maybe the remote can't receive an Ethernet MTU */ 1438 if (net->mtu > (dev->hard_mtu - net->hard_header_len)) 1439 net->mtu = dev->hard_mtu - net->hard_header_len; 1440 } else if (!info->in || !info->out) 1441 status = usbnet_get_endpoints (dev, udev); 1442 else { 1443 dev->in = usb_rcvbulkpipe (xdev, info->in); 1444 dev->out = usb_sndbulkpipe (xdev, info->out); 1445 if (!(info->flags & FLAG_NO_SETINT)) 1446 status = usb_set_interface (xdev, 1447 interface->desc.bInterfaceNumber, 1448 interface->desc.bAlternateSetting); 1449 else 1450 status = 0; 1451 1452 } 1453 if (status >= 0 && dev->status) 1454 status = init_status (dev, udev); 1455 if (status < 0) 1456 goto out3; 1457 1458 if (!dev->rx_urb_size) 1459 dev->rx_urb_size = dev->hard_mtu; 1460 dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1); 1461 1462 if ((dev->driver_info->flags & FLAG_WLAN) != 0) 1463 SET_NETDEV_DEVTYPE(net, &wlan_type); 1464 if ((dev->driver_info->flags & FLAG_WWAN) != 0) 1465 SET_NETDEV_DEVTYPE(net, &wwan_type); 1466 1467 status = register_netdev (net); 1468 if (status) 1469 goto out4; 1470 netif_info(dev, probe, dev->net, 1471 "register '%s' at usb-%s-%s, %s, %pM\n", 1472 udev->dev.driver->name, 1473 xdev->bus->bus_name, xdev->devpath, 1474 dev->driver_info->description, 1475 net->dev_addr); 1476 1477 // ok, it's ready to go. 1478 usb_set_intfdata (udev, dev); 1479 1480 netif_device_attach (net); 1481 1482 if (dev->driver_info->flags & FLAG_LINK_INTR) 1483 netif_carrier_off(net); 1484 1485 return 0; 1486 1487 out4: 1488 usb_free_urb(dev->interrupt); 1489 out3: 1490 if (info->unbind) 1491 info->unbind (dev, udev); 1492 out1: 1493 free_netdev(net); 1494 out: 1495 return status; 1496 } 1497 EXPORT_SYMBOL_GPL(usbnet_probe); 1498 1499 /*-------------------------------------------------------------------------*/ 1500 1501 /* 1502 * suspend the whole driver as soon as the first interface is suspended 1503 * resume only when the last interface is resumed 1504 */ 1505 1506 int usbnet_suspend (struct usb_interface *intf, pm_message_t message) 1507 { 1508 struct usbnet *dev = usb_get_intfdata(intf); 1509 1510 if (!dev->suspend_count++) { 1511 spin_lock_irq(&dev->txq.lock); 1512 /* don't autosuspend while transmitting */ 1513 if (dev->txq.qlen && PMSG_IS_AUTO(message)) { 1514 dev->suspend_count--; 1515 spin_unlock_irq(&dev->txq.lock); 1516 return -EBUSY; 1517 } else { 1518 set_bit(EVENT_DEV_ASLEEP, &dev->flags); 1519 spin_unlock_irq(&dev->txq.lock); 1520 } 1521 /* 1522 * accelerate emptying of the rx and queues, to avoid 1523 * having everything error out. 1524 */ 1525 netif_device_detach (dev->net); 1526 usbnet_terminate_urbs(dev); 1527 usb_kill_urb(dev->interrupt); 1528 1529 /* 1530 * reattach so runtime management can use and 1531 * wake the device 1532 */ 1533 netif_device_attach (dev->net); 1534 } 1535 return 0; 1536 } 1537 EXPORT_SYMBOL_GPL(usbnet_suspend); 1538 1539 int usbnet_resume (struct usb_interface *intf) 1540 { 1541 struct usbnet *dev = usb_get_intfdata(intf); 1542 struct sk_buff *skb; 1543 struct urb *res; 1544 int retval; 1545 1546 if (!--dev->suspend_count) { 1547 /* resume interrupt URBs */ 1548 if (dev->interrupt && test_bit(EVENT_DEV_OPEN, &dev->flags)) 1549 usb_submit_urb(dev->interrupt, GFP_NOIO); 1550 1551 spin_lock_irq(&dev->txq.lock); 1552 while ((res = usb_get_from_anchor(&dev->deferred))) { 1553 1554 skb = (struct sk_buff *)res->context; 1555 retval = usb_submit_urb(res, GFP_ATOMIC); 1556 if (retval < 0) { 1557 dev_kfree_skb_any(skb); 1558 usb_free_urb(res); 1559 usb_autopm_put_interface_async(dev->intf); 1560 } else { 1561 dev->net->trans_start = jiffies; 1562 __skb_queue_tail(&dev->txq, skb); 1563 } 1564 } 1565 1566 smp_mb(); 1567 clear_bit(EVENT_DEV_ASLEEP, &dev->flags); 1568 spin_unlock_irq(&dev->txq.lock); 1569 1570 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) { 1571 /* handle remote wakeup ASAP */ 1572 if (!dev->wait && 1573 netif_device_present(dev->net) && 1574 !timer_pending(&dev->delay) && 1575 !test_bit(EVENT_RX_HALT, &dev->flags)) 1576 rx_alloc_submit(dev, GFP_NOIO); 1577 1578 if (!(dev->txq.qlen >= TX_QLEN(dev))) 1579 netif_tx_wake_all_queues(dev->net); 1580 tasklet_schedule (&dev->bh); 1581 } 1582 } 1583 return 0; 1584 } 1585 EXPORT_SYMBOL_GPL(usbnet_resume); 1586 1587 1588 /*-------------------------------------------------------------------------*/ 1589 1590 static int __init usbnet_init(void) 1591 { 1592 /* Compiler should optimize this out. */ 1593 BUILD_BUG_ON( 1594 FIELD_SIZEOF(struct sk_buff, cb) < sizeof(struct skb_data)); 1595 1596 eth_random_addr(node_id); 1597 return 0; 1598 } 1599 module_init(usbnet_init); 1600 1601 static void __exit usbnet_exit(void) 1602 { 1603 } 1604 module_exit(usbnet_exit); 1605 1606 MODULE_AUTHOR("David Brownell"); 1607 MODULE_DESCRIPTION("USB network driver framework"); 1608 MODULE_LICENSE("GPL"); 1609