1 /* 2 * USB Network driver infrastructure 3 * Copyright (C) 2000-2005 by David Brownell 4 * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 */ 20 21 /* 22 * This is a generic "USB networking" framework that works with several 23 * kinds of full and high speed networking devices: host-to-host cables, 24 * smart usb peripherals, and actual Ethernet adapters. 25 * 26 * These devices usually differ in terms of control protocols (if they 27 * even have one!) and sometimes they define new framing to wrap or batch 28 * Ethernet packets. Otherwise, they talk to USB pretty much the same, 29 * so interface (un)binding, endpoint I/O queues, fault handling, and other 30 * issues can usefully be addressed by this framework. 31 */ 32 33 // #define DEBUG // error path messages, extra info 34 // #define VERBOSE // more; success messages 35 36 #include <linux/module.h> 37 #include <linux/init.h> 38 #include <linux/netdevice.h> 39 #include <linux/etherdevice.h> 40 #include <linux/ctype.h> 41 #include <linux/ethtool.h> 42 #include <linux/workqueue.h> 43 #include <linux/mii.h> 44 #include <linux/usb.h> 45 #include <linux/usb/usbnet.h> 46 47 #define DRIVER_VERSION "22-Aug-2005" 48 49 50 /*-------------------------------------------------------------------------*/ 51 52 /* 53 * Nineteen USB 1.1 max size bulk transactions per frame (ms), max. 54 * Several dozen bytes of IPv4 data can fit in two such transactions. 55 * One maximum size Ethernet packet takes twenty four of them. 56 * For high speed, each frame comfortably fits almost 36 max size 57 * Ethernet packets (so queues should be bigger). 58 * 59 * REVISIT qlens should be members of 'struct usbnet'; the goal is to 60 * let the USB host controller be busy for 5msec or more before an irq 61 * is required, under load. Jumbograms change the equation. 62 */ 63 #define RX_MAX_QUEUE_MEMORY (60 * 1518) 64 #define RX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \ 65 (RX_MAX_QUEUE_MEMORY/(dev)->rx_urb_size) : 4) 66 #define TX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \ 67 (RX_MAX_QUEUE_MEMORY/(dev)->hard_mtu) : 4) 68 69 // reawaken network queue this soon after stopping; else watchdog barks 70 #define TX_TIMEOUT_JIFFIES (5*HZ) 71 72 // throttle rx/tx briefly after some faults, so khubd might disconnect() 73 // us (it polls at HZ/4 usually) before we report too many false errors. 74 #define THROTTLE_JIFFIES (HZ/8) 75 76 // between wakeups 77 #define UNLINK_TIMEOUT_MS 3 78 79 /*-------------------------------------------------------------------------*/ 80 81 // randomly generated ethernet address 82 static u8 node_id [ETH_ALEN]; 83 84 static const char driver_name [] = "usbnet"; 85 86 /* use ethtool to change the level for any given device */ 87 static int msg_level = -1; 88 module_param (msg_level, int, 0); 89 MODULE_PARM_DESC (msg_level, "Override default message level"); 90 91 /*-------------------------------------------------------------------------*/ 92 93 /* handles CDC Ethernet and many other network "bulk data" interfaces */ 94 int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf) 95 { 96 int tmp; 97 struct usb_host_interface *alt = NULL; 98 struct usb_host_endpoint *in = NULL, *out = NULL; 99 struct usb_host_endpoint *status = NULL; 100 101 for (tmp = 0; tmp < intf->num_altsetting; tmp++) { 102 unsigned ep; 103 104 in = out = status = NULL; 105 alt = intf->altsetting + tmp; 106 107 /* take the first altsetting with in-bulk + out-bulk; 108 * remember any status endpoint, just in case; 109 * ignore other endpoints and altsetttings. 110 */ 111 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) { 112 struct usb_host_endpoint *e; 113 int intr = 0; 114 115 e = alt->endpoint + ep; 116 switch (e->desc.bmAttributes) { 117 case USB_ENDPOINT_XFER_INT: 118 if (!usb_endpoint_dir_in(&e->desc)) 119 continue; 120 intr = 1; 121 /* FALLTHROUGH */ 122 case USB_ENDPOINT_XFER_BULK: 123 break; 124 default: 125 continue; 126 } 127 if (usb_endpoint_dir_in(&e->desc)) { 128 if (!intr && !in) 129 in = e; 130 else if (intr && !status) 131 status = e; 132 } else { 133 if (!out) 134 out = e; 135 } 136 } 137 if (in && out) 138 break; 139 } 140 if (!alt || !in || !out) 141 return -EINVAL; 142 143 if (alt->desc.bAlternateSetting != 0 144 || !(dev->driver_info->flags & FLAG_NO_SETINT)) { 145 tmp = usb_set_interface (dev->udev, alt->desc.bInterfaceNumber, 146 alt->desc.bAlternateSetting); 147 if (tmp < 0) 148 return tmp; 149 } 150 151 dev->in = usb_rcvbulkpipe (dev->udev, 152 in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 153 dev->out = usb_sndbulkpipe (dev->udev, 154 out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 155 dev->status = status; 156 return 0; 157 } 158 EXPORT_SYMBOL_GPL(usbnet_get_endpoints); 159 160 static u8 nibble(unsigned char c) 161 { 162 if (likely(isdigit(c))) 163 return c - '0'; 164 c = toupper(c); 165 if (likely(isxdigit(c))) 166 return 10 + c - 'A'; 167 return 0; 168 } 169 170 int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress) 171 { 172 int tmp, i; 173 unsigned char buf [13]; 174 175 tmp = usb_string(dev->udev, iMACAddress, buf, sizeof buf); 176 if (tmp != 12) { 177 dev_dbg(&dev->udev->dev, 178 "bad MAC string %d fetch, %d\n", iMACAddress, tmp); 179 if (tmp >= 0) 180 tmp = -EINVAL; 181 return tmp; 182 } 183 for (i = tmp = 0; i < 6; i++, tmp += 2) 184 dev->net->dev_addr [i] = 185 (nibble(buf [tmp]) << 4) + nibble(buf [tmp + 1]); 186 return 0; 187 } 188 EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr); 189 190 static void intr_complete (struct urb *urb); 191 192 static int init_status (struct usbnet *dev, struct usb_interface *intf) 193 { 194 char *buf = NULL; 195 unsigned pipe = 0; 196 unsigned maxp; 197 unsigned period; 198 199 if (!dev->driver_info->status) 200 return 0; 201 202 pipe = usb_rcvintpipe (dev->udev, 203 dev->status->desc.bEndpointAddress 204 & USB_ENDPOINT_NUMBER_MASK); 205 maxp = usb_maxpacket (dev->udev, pipe, 0); 206 207 /* avoid 1 msec chatter: min 8 msec poll rate */ 208 period = max ((int) dev->status->desc.bInterval, 209 (dev->udev->speed == USB_SPEED_HIGH) ? 7 : 3); 210 211 buf = kmalloc (maxp, GFP_KERNEL); 212 if (buf) { 213 dev->interrupt = usb_alloc_urb (0, GFP_KERNEL); 214 if (!dev->interrupt) { 215 kfree (buf); 216 return -ENOMEM; 217 } else { 218 usb_fill_int_urb(dev->interrupt, dev->udev, pipe, 219 buf, maxp, intr_complete, dev, period); 220 dev_dbg(&intf->dev, 221 "status ep%din, %d bytes period %d\n", 222 usb_pipeendpoint(pipe), maxp, period); 223 } 224 } 225 return 0; 226 } 227 228 /* Passes this packet up the stack, updating its accounting. 229 * Some link protocols batch packets, so their rx_fixup paths 230 * can return clones as well as just modify the original skb. 231 */ 232 void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb) 233 { 234 int status; 235 236 skb->protocol = eth_type_trans (skb, dev->net); 237 dev->stats.rx_packets++; 238 dev->stats.rx_bytes += skb->len; 239 240 if (netif_msg_rx_status (dev)) 241 devdbg (dev, "< rx, len %zu, type 0x%x", 242 skb->len + sizeof (struct ethhdr), skb->protocol); 243 memset (skb->cb, 0, sizeof (struct skb_data)); 244 status = netif_rx (skb); 245 if (status != NET_RX_SUCCESS && netif_msg_rx_err (dev)) 246 devdbg (dev, "netif_rx status %d", status); 247 } 248 EXPORT_SYMBOL_GPL(usbnet_skb_return); 249 250 251 /*------------------------------------------------------------------------- 252 * 253 * Network Device Driver (peer link to "Host Device", from USB host) 254 * 255 *-------------------------------------------------------------------------*/ 256 257 int usbnet_change_mtu (struct net_device *net, int new_mtu) 258 { 259 struct usbnet *dev = netdev_priv(net); 260 int ll_mtu = new_mtu + net->hard_header_len; 261 int old_hard_mtu = dev->hard_mtu; 262 int old_rx_urb_size = dev->rx_urb_size; 263 264 if (new_mtu <= 0) 265 return -EINVAL; 266 // no second zero-length packet read wanted after mtu-sized packets 267 if ((ll_mtu % dev->maxpacket) == 0) 268 return -EDOM; 269 net->mtu = new_mtu; 270 271 dev->hard_mtu = net->mtu + net->hard_header_len; 272 if (dev->rx_urb_size == old_hard_mtu) { 273 dev->rx_urb_size = dev->hard_mtu; 274 if (dev->rx_urb_size > old_rx_urb_size) 275 usbnet_unlink_rx_urbs(dev); 276 } 277 278 return 0; 279 } 280 EXPORT_SYMBOL_GPL(usbnet_change_mtu); 281 282 /*-------------------------------------------------------------------------*/ 283 284 /* some LK 2.4 HCDs oopsed if we freed or resubmitted urbs from 285 * completion callbacks. 2.5 should have fixed those bugs... 286 */ 287 288 static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_head *list) 289 { 290 unsigned long flags; 291 292 spin_lock_irqsave(&list->lock, flags); 293 __skb_unlink(skb, list); 294 spin_unlock(&list->lock); 295 spin_lock(&dev->done.lock); 296 __skb_queue_tail(&dev->done, skb); 297 if (dev->done.qlen == 1) 298 tasklet_schedule(&dev->bh); 299 spin_unlock_irqrestore(&dev->done.lock, flags); 300 } 301 302 /* some work can't be done in tasklets, so we use keventd 303 * 304 * NOTE: annoying asymmetry: if it's active, schedule_work() fails, 305 * but tasklet_schedule() doesn't. hope the failure is rare. 306 */ 307 void usbnet_defer_kevent (struct usbnet *dev, int work) 308 { 309 set_bit (work, &dev->flags); 310 if (!schedule_work (&dev->kevent)) 311 deverr (dev, "kevent %d may have been dropped", work); 312 else 313 devdbg (dev, "kevent %d scheduled", work); 314 } 315 EXPORT_SYMBOL_GPL(usbnet_defer_kevent); 316 317 /*-------------------------------------------------------------------------*/ 318 319 static void rx_complete (struct urb *urb); 320 321 static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) 322 { 323 struct sk_buff *skb; 324 struct skb_data *entry; 325 int retval = 0; 326 unsigned long lockflags; 327 size_t size = dev->rx_urb_size; 328 329 if ((skb = alloc_skb (size + NET_IP_ALIGN, flags)) == NULL) { 330 if (netif_msg_rx_err (dev)) 331 devdbg (dev, "no rx skb"); 332 usbnet_defer_kevent (dev, EVENT_RX_MEMORY); 333 usb_free_urb (urb); 334 return; 335 } 336 skb_reserve (skb, NET_IP_ALIGN); 337 338 entry = (struct skb_data *) skb->cb; 339 entry->urb = urb; 340 entry->dev = dev; 341 entry->state = rx_start; 342 entry->length = 0; 343 344 usb_fill_bulk_urb (urb, dev->udev, dev->in, 345 skb->data, size, rx_complete, skb); 346 347 spin_lock_irqsave (&dev->rxq.lock, lockflags); 348 349 if (netif_running (dev->net) 350 && netif_device_present (dev->net) 351 && !test_bit (EVENT_RX_HALT, &dev->flags)) { 352 switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) { 353 case -EPIPE: 354 usbnet_defer_kevent (dev, EVENT_RX_HALT); 355 break; 356 case -ENOMEM: 357 usbnet_defer_kevent (dev, EVENT_RX_MEMORY); 358 break; 359 case -ENODEV: 360 if (netif_msg_ifdown (dev)) 361 devdbg (dev, "device gone"); 362 netif_device_detach (dev->net); 363 break; 364 default: 365 if (netif_msg_rx_err (dev)) 366 devdbg (dev, "rx submit, %d", retval); 367 tasklet_schedule (&dev->bh); 368 break; 369 case 0: 370 __skb_queue_tail (&dev->rxq, skb); 371 } 372 } else { 373 if (netif_msg_ifdown (dev)) 374 devdbg (dev, "rx: stopped"); 375 retval = -ENOLINK; 376 } 377 spin_unlock_irqrestore (&dev->rxq.lock, lockflags); 378 if (retval) { 379 dev_kfree_skb_any (skb); 380 usb_free_urb (urb); 381 } 382 } 383 384 385 /*-------------------------------------------------------------------------*/ 386 387 static inline void rx_process (struct usbnet *dev, struct sk_buff *skb) 388 { 389 if (dev->driver_info->rx_fixup 390 && !dev->driver_info->rx_fixup (dev, skb)) 391 goto error; 392 // else network stack removes extra byte if we forced a short packet 393 394 if (skb->len) 395 usbnet_skb_return (dev, skb); 396 else { 397 if (netif_msg_rx_err (dev)) 398 devdbg (dev, "drop"); 399 error: 400 dev->stats.rx_errors++; 401 skb_queue_tail (&dev->done, skb); 402 } 403 } 404 405 /*-------------------------------------------------------------------------*/ 406 407 static void rx_complete (struct urb *urb) 408 { 409 struct sk_buff *skb = (struct sk_buff *) urb->context; 410 struct skb_data *entry = (struct skb_data *) skb->cb; 411 struct usbnet *dev = entry->dev; 412 int urb_status = urb->status; 413 414 skb_put (skb, urb->actual_length); 415 entry->state = rx_done; 416 entry->urb = NULL; 417 418 switch (urb_status) { 419 /* success */ 420 case 0: 421 if (skb->len < dev->net->hard_header_len) { 422 entry->state = rx_cleanup; 423 dev->stats.rx_errors++; 424 dev->stats.rx_length_errors++; 425 if (netif_msg_rx_err (dev)) 426 devdbg (dev, "rx length %d", skb->len); 427 } 428 break; 429 430 /* stalls need manual reset. this is rare ... except that 431 * when going through USB 2.0 TTs, unplug appears this way. 432 * we avoid the highspeed version of the ETIMEOUT/EILSEQ 433 * storm, recovering as needed. 434 */ 435 case -EPIPE: 436 dev->stats.rx_errors++; 437 usbnet_defer_kevent (dev, EVENT_RX_HALT); 438 // FALLTHROUGH 439 440 /* software-driven interface shutdown */ 441 case -ECONNRESET: /* async unlink */ 442 case -ESHUTDOWN: /* hardware gone */ 443 if (netif_msg_ifdown (dev)) 444 devdbg (dev, "rx shutdown, code %d", urb_status); 445 goto block; 446 447 /* we get controller i/o faults during khubd disconnect() delays. 448 * throttle down resubmits, to avoid log floods; just temporarily, 449 * so we still recover when the fault isn't a khubd delay. 450 */ 451 case -EPROTO: 452 case -ETIME: 453 case -EILSEQ: 454 dev->stats.rx_errors++; 455 if (!timer_pending (&dev->delay)) { 456 mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES); 457 if (netif_msg_link (dev)) 458 devdbg (dev, "rx throttle %d", urb_status); 459 } 460 block: 461 entry->state = rx_cleanup; 462 entry->urb = urb; 463 urb = NULL; 464 break; 465 466 /* data overrun ... flush fifo? */ 467 case -EOVERFLOW: 468 dev->stats.rx_over_errors++; 469 // FALLTHROUGH 470 471 default: 472 entry->state = rx_cleanup; 473 dev->stats.rx_errors++; 474 if (netif_msg_rx_err (dev)) 475 devdbg (dev, "rx status %d", urb_status); 476 break; 477 } 478 479 defer_bh(dev, skb, &dev->rxq); 480 481 if (urb) { 482 if (netif_running (dev->net) 483 && !test_bit (EVENT_RX_HALT, &dev->flags)) { 484 rx_submit (dev, urb, GFP_ATOMIC); 485 return; 486 } 487 usb_free_urb (urb); 488 } 489 if (netif_msg_rx_err (dev)) 490 devdbg (dev, "no read resubmitted"); 491 } 492 493 static void intr_complete (struct urb *urb) 494 { 495 struct usbnet *dev = urb->context; 496 int status = urb->status; 497 498 switch (status) { 499 /* success */ 500 case 0: 501 dev->driver_info->status(dev, urb); 502 break; 503 504 /* software-driven interface shutdown */ 505 case -ENOENT: /* urb killed */ 506 case -ESHUTDOWN: /* hardware gone */ 507 if (netif_msg_ifdown (dev)) 508 devdbg (dev, "intr shutdown, code %d", status); 509 return; 510 511 /* NOTE: not throttling like RX/TX, since this endpoint 512 * already polls infrequently 513 */ 514 default: 515 devdbg (dev, "intr status %d", status); 516 break; 517 } 518 519 if (!netif_running (dev->net)) 520 return; 521 522 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length); 523 status = usb_submit_urb (urb, GFP_ATOMIC); 524 if (status != 0 && netif_msg_timer (dev)) 525 deverr(dev, "intr resubmit --> %d", status); 526 } 527 528 /*-------------------------------------------------------------------------*/ 529 530 // unlink pending rx/tx; completion handlers do all other cleanup 531 532 static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q) 533 { 534 unsigned long flags; 535 struct sk_buff *skb, *skbnext; 536 int count = 0; 537 538 spin_lock_irqsave (&q->lock, flags); 539 skb_queue_walk_safe(q, skb, skbnext) { 540 struct skb_data *entry; 541 struct urb *urb; 542 int retval; 543 544 entry = (struct skb_data *) skb->cb; 545 urb = entry->urb; 546 547 // during some PM-driven resume scenarios, 548 // these (async) unlinks complete immediately 549 retval = usb_unlink_urb (urb); 550 if (retval != -EINPROGRESS && retval != 0) 551 devdbg (dev, "unlink urb err, %d", retval); 552 else 553 count++; 554 } 555 spin_unlock_irqrestore (&q->lock, flags); 556 return count; 557 } 558 559 // Flush all pending rx urbs 560 // minidrivers may need to do this when the MTU changes 561 562 void usbnet_unlink_rx_urbs(struct usbnet *dev) 563 { 564 if (netif_running(dev->net)) { 565 (void) unlink_urbs (dev, &dev->rxq); 566 tasklet_schedule(&dev->bh); 567 } 568 } 569 EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs); 570 571 /*-------------------------------------------------------------------------*/ 572 573 // precondition: never called in_interrupt 574 575 int usbnet_stop (struct net_device *net) 576 { 577 struct usbnet *dev = netdev_priv(net); 578 int temp; 579 DECLARE_WAIT_QUEUE_HEAD_ONSTACK (unlink_wakeup); 580 DECLARE_WAITQUEUE (wait, current); 581 582 netif_stop_queue (net); 583 584 if (netif_msg_ifdown (dev)) 585 devinfo (dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld", 586 dev->stats.rx_packets, dev->stats.tx_packets, 587 dev->stats.rx_errors, dev->stats.tx_errors 588 ); 589 590 // ensure there are no more active urbs 591 add_wait_queue (&unlink_wakeup, &wait); 592 dev->wait = &unlink_wakeup; 593 temp = unlink_urbs (dev, &dev->txq) + unlink_urbs (dev, &dev->rxq); 594 595 // maybe wait for deletions to finish. 596 while (!skb_queue_empty(&dev->rxq) 597 && !skb_queue_empty(&dev->txq) 598 && !skb_queue_empty(&dev->done)) { 599 msleep(UNLINK_TIMEOUT_MS); 600 if (netif_msg_ifdown (dev)) 601 devdbg (dev, "waited for %d urb completions", temp); 602 } 603 dev->wait = NULL; 604 remove_wait_queue (&unlink_wakeup, &wait); 605 606 usb_kill_urb(dev->interrupt); 607 608 /* deferred work (task, timer, softirq) must also stop. 609 * can't flush_scheduled_work() until we drop rtnl (later), 610 * else workers could deadlock; so make workers a NOP. 611 */ 612 dev->flags = 0; 613 del_timer_sync (&dev->delay); 614 tasklet_kill (&dev->bh); 615 usb_autopm_put_interface(dev->intf); 616 617 return 0; 618 } 619 EXPORT_SYMBOL_GPL(usbnet_stop); 620 621 /*-------------------------------------------------------------------------*/ 622 623 // posts reads, and enables write queuing 624 625 // precondition: never called in_interrupt 626 627 int usbnet_open (struct net_device *net) 628 { 629 struct usbnet *dev = netdev_priv(net); 630 int retval; 631 struct driver_info *info = dev->driver_info; 632 633 if ((retval = usb_autopm_get_interface(dev->intf)) < 0) { 634 if (netif_msg_ifup (dev)) 635 devinfo (dev, 636 "resumption fail (%d) usbnet usb-%s-%s, %s", 637 retval, 638 dev->udev->bus->bus_name, dev->udev->devpath, 639 info->description); 640 goto done_nopm; 641 } 642 643 // put into "known safe" state 644 if (info->reset && (retval = info->reset (dev)) < 0) { 645 if (netif_msg_ifup (dev)) 646 devinfo (dev, 647 "open reset fail (%d) usbnet usb-%s-%s, %s", 648 retval, 649 dev->udev->bus->bus_name, dev->udev->devpath, 650 info->description); 651 goto done; 652 } 653 654 // insist peer be connected 655 if (info->check_connect && (retval = info->check_connect (dev)) < 0) { 656 if (netif_msg_ifup (dev)) 657 devdbg (dev, "can't open; %d", retval); 658 goto done; 659 } 660 661 /* start any status interrupt transfer */ 662 if (dev->interrupt) { 663 retval = usb_submit_urb (dev->interrupt, GFP_KERNEL); 664 if (retval < 0) { 665 if (netif_msg_ifup (dev)) 666 deverr (dev, "intr submit %d", retval); 667 goto done; 668 } 669 } 670 671 netif_start_queue (net); 672 if (netif_msg_ifup (dev)) { 673 char *framing; 674 675 if (dev->driver_info->flags & FLAG_FRAMING_NC) 676 framing = "NetChip"; 677 else if (dev->driver_info->flags & FLAG_FRAMING_GL) 678 framing = "GeneSys"; 679 else if (dev->driver_info->flags & FLAG_FRAMING_Z) 680 framing = "Zaurus"; 681 else if (dev->driver_info->flags & FLAG_FRAMING_RN) 682 framing = "RNDIS"; 683 else if (dev->driver_info->flags & FLAG_FRAMING_AX) 684 framing = "ASIX"; 685 else 686 framing = "simple"; 687 688 devinfo (dev, "open: enable queueing " 689 "(rx %d, tx %d) mtu %d %s framing", 690 (int)RX_QLEN (dev), (int)TX_QLEN (dev), dev->net->mtu, 691 framing); 692 } 693 694 // delay posting reads until we're fully open 695 tasklet_schedule (&dev->bh); 696 return retval; 697 done: 698 usb_autopm_put_interface(dev->intf); 699 done_nopm: 700 return retval; 701 } 702 EXPORT_SYMBOL_GPL(usbnet_open); 703 704 /*-------------------------------------------------------------------------*/ 705 706 /* ethtool methods; minidrivers may need to add some more, but 707 * they'll probably want to use this base set. 708 */ 709 710 int usbnet_get_settings (struct net_device *net, struct ethtool_cmd *cmd) 711 { 712 struct usbnet *dev = netdev_priv(net); 713 714 if (!dev->mii.mdio_read) 715 return -EOPNOTSUPP; 716 717 return mii_ethtool_gset(&dev->mii, cmd); 718 } 719 EXPORT_SYMBOL_GPL(usbnet_get_settings); 720 721 int usbnet_set_settings (struct net_device *net, struct ethtool_cmd *cmd) 722 { 723 struct usbnet *dev = netdev_priv(net); 724 int retval; 725 726 if (!dev->mii.mdio_write) 727 return -EOPNOTSUPP; 728 729 retval = mii_ethtool_sset(&dev->mii, cmd); 730 731 /* link speed/duplex might have changed */ 732 if (dev->driver_info->link_reset) 733 dev->driver_info->link_reset(dev); 734 735 return retval; 736 737 } 738 EXPORT_SYMBOL_GPL(usbnet_set_settings); 739 740 u32 usbnet_get_link (struct net_device *net) 741 { 742 struct usbnet *dev = netdev_priv(net); 743 744 /* If a check_connect is defined, return its result */ 745 if (dev->driver_info->check_connect) 746 return dev->driver_info->check_connect (dev) == 0; 747 748 /* if the device has mii operations, use those */ 749 if (dev->mii.mdio_read) 750 return mii_link_ok(&dev->mii); 751 752 /* Otherwise, dtrt for drivers calling netif_carrier_{on,off} */ 753 return ethtool_op_get_link(net); 754 } 755 EXPORT_SYMBOL_GPL(usbnet_get_link); 756 757 int usbnet_nway_reset(struct net_device *net) 758 { 759 struct usbnet *dev = netdev_priv(net); 760 761 if (!dev->mii.mdio_write) 762 return -EOPNOTSUPP; 763 764 return mii_nway_restart(&dev->mii); 765 } 766 EXPORT_SYMBOL_GPL(usbnet_nway_reset); 767 768 void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info) 769 { 770 struct usbnet *dev = netdev_priv(net); 771 772 strncpy (info->driver, dev->driver_name, sizeof info->driver); 773 strncpy (info->version, DRIVER_VERSION, sizeof info->version); 774 strncpy (info->fw_version, dev->driver_info->description, 775 sizeof info->fw_version); 776 usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info); 777 } 778 EXPORT_SYMBOL_GPL(usbnet_get_drvinfo); 779 780 u32 usbnet_get_msglevel (struct net_device *net) 781 { 782 struct usbnet *dev = netdev_priv(net); 783 784 return dev->msg_enable; 785 } 786 EXPORT_SYMBOL_GPL(usbnet_get_msglevel); 787 788 void usbnet_set_msglevel (struct net_device *net, u32 level) 789 { 790 struct usbnet *dev = netdev_priv(net); 791 792 dev->msg_enable = level; 793 } 794 EXPORT_SYMBOL_GPL(usbnet_set_msglevel); 795 796 /* drivers may override default ethtool_ops in their bind() routine */ 797 static struct ethtool_ops usbnet_ethtool_ops = { 798 .get_settings = usbnet_get_settings, 799 .set_settings = usbnet_set_settings, 800 .get_link = usbnet_get_link, 801 .nway_reset = usbnet_nway_reset, 802 .get_drvinfo = usbnet_get_drvinfo, 803 .get_msglevel = usbnet_get_msglevel, 804 .set_msglevel = usbnet_set_msglevel, 805 }; 806 807 /*-------------------------------------------------------------------------*/ 808 809 /* work that cannot be done in interrupt context uses keventd. 810 * 811 * NOTE: with 2.5 we could do more of this using completion callbacks, 812 * especially now that control transfers can be queued. 813 */ 814 static void 815 kevent (struct work_struct *work) 816 { 817 struct usbnet *dev = 818 container_of(work, struct usbnet, kevent); 819 int status; 820 821 /* usb_clear_halt() needs a thread context */ 822 if (test_bit (EVENT_TX_HALT, &dev->flags)) { 823 unlink_urbs (dev, &dev->txq); 824 status = usb_clear_halt (dev->udev, dev->out); 825 if (status < 0 826 && status != -EPIPE 827 && status != -ESHUTDOWN) { 828 if (netif_msg_tx_err (dev)) 829 deverr (dev, "can't clear tx halt, status %d", 830 status); 831 } else { 832 clear_bit (EVENT_TX_HALT, &dev->flags); 833 if (status != -ESHUTDOWN) 834 netif_wake_queue (dev->net); 835 } 836 } 837 if (test_bit (EVENT_RX_HALT, &dev->flags)) { 838 unlink_urbs (dev, &dev->rxq); 839 status = usb_clear_halt (dev->udev, dev->in); 840 if (status < 0 841 && status != -EPIPE 842 && status != -ESHUTDOWN) { 843 if (netif_msg_rx_err (dev)) 844 deverr (dev, "can't clear rx halt, status %d", 845 status); 846 } else { 847 clear_bit (EVENT_RX_HALT, &dev->flags); 848 tasklet_schedule (&dev->bh); 849 } 850 } 851 852 /* tasklet could resubmit itself forever if memory is tight */ 853 if (test_bit (EVENT_RX_MEMORY, &dev->flags)) { 854 struct urb *urb = NULL; 855 856 if (netif_running (dev->net)) 857 urb = usb_alloc_urb (0, GFP_KERNEL); 858 else 859 clear_bit (EVENT_RX_MEMORY, &dev->flags); 860 if (urb != NULL) { 861 clear_bit (EVENT_RX_MEMORY, &dev->flags); 862 rx_submit (dev, urb, GFP_KERNEL); 863 tasklet_schedule (&dev->bh); 864 } 865 } 866 867 if (test_bit (EVENT_LINK_RESET, &dev->flags)) { 868 struct driver_info *info = dev->driver_info; 869 int retval = 0; 870 871 clear_bit (EVENT_LINK_RESET, &dev->flags); 872 if(info->link_reset && (retval = info->link_reset(dev)) < 0) { 873 devinfo(dev, "link reset failed (%d) usbnet usb-%s-%s, %s", 874 retval, 875 dev->udev->bus->bus_name, dev->udev->devpath, 876 info->description); 877 } 878 } 879 880 if (dev->flags) 881 devdbg (dev, "kevent done, flags = 0x%lx", 882 dev->flags); 883 } 884 885 /*-------------------------------------------------------------------------*/ 886 887 static void tx_complete (struct urb *urb) 888 { 889 struct sk_buff *skb = (struct sk_buff *) urb->context; 890 struct skb_data *entry = (struct skb_data *) skb->cb; 891 struct usbnet *dev = entry->dev; 892 893 if (urb->status == 0) { 894 dev->stats.tx_packets++; 895 dev->stats.tx_bytes += entry->length; 896 } else { 897 dev->stats.tx_errors++; 898 899 switch (urb->status) { 900 case -EPIPE: 901 usbnet_defer_kevent (dev, EVENT_TX_HALT); 902 break; 903 904 /* software-driven interface shutdown */ 905 case -ECONNRESET: // async unlink 906 case -ESHUTDOWN: // hardware gone 907 break; 908 909 // like rx, tx gets controller i/o faults during khubd delays 910 // and so it uses the same throttling mechanism. 911 case -EPROTO: 912 case -ETIME: 913 case -EILSEQ: 914 if (!timer_pending (&dev->delay)) { 915 mod_timer (&dev->delay, 916 jiffies + THROTTLE_JIFFIES); 917 if (netif_msg_link (dev)) 918 devdbg (dev, "tx throttle %d", 919 urb->status); 920 } 921 netif_stop_queue (dev->net); 922 break; 923 default: 924 if (netif_msg_tx_err (dev)) 925 devdbg (dev, "tx err %d", entry->urb->status); 926 break; 927 } 928 } 929 930 urb->dev = NULL; 931 entry->state = tx_done; 932 defer_bh(dev, skb, &dev->txq); 933 } 934 935 /*-------------------------------------------------------------------------*/ 936 937 void usbnet_tx_timeout (struct net_device *net) 938 { 939 struct usbnet *dev = netdev_priv(net); 940 941 unlink_urbs (dev, &dev->txq); 942 tasklet_schedule (&dev->bh); 943 944 // FIXME: device recovery -- reset? 945 } 946 EXPORT_SYMBOL_GPL(usbnet_tx_timeout); 947 948 /*-------------------------------------------------------------------------*/ 949 950 int usbnet_start_xmit (struct sk_buff *skb, struct net_device *net) 951 { 952 struct usbnet *dev = netdev_priv(net); 953 int length; 954 int retval = NET_XMIT_SUCCESS; 955 struct urb *urb = NULL; 956 struct skb_data *entry; 957 struct driver_info *info = dev->driver_info; 958 unsigned long flags; 959 960 // some devices want funky USB-level framing, for 961 // win32 driver (usually) and/or hardware quirks 962 if (info->tx_fixup) { 963 skb = info->tx_fixup (dev, skb, GFP_ATOMIC); 964 if (!skb) { 965 if (netif_msg_tx_err (dev)) 966 devdbg (dev, "can't tx_fixup skb"); 967 goto drop; 968 } 969 } 970 length = skb->len; 971 972 if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) { 973 if (netif_msg_tx_err (dev)) 974 devdbg (dev, "no urb"); 975 goto drop; 976 } 977 978 entry = (struct skb_data *) skb->cb; 979 entry->urb = urb; 980 entry->dev = dev; 981 entry->state = tx_start; 982 entry->length = length; 983 984 usb_fill_bulk_urb (urb, dev->udev, dev->out, 985 skb->data, skb->len, tx_complete, skb); 986 987 /* don't assume the hardware handles USB_ZERO_PACKET 988 * NOTE: strictly conforming cdc-ether devices should expect 989 * the ZLP here, but ignore the one-byte packet. 990 */ 991 if ((length % dev->maxpacket) == 0) { 992 urb->transfer_buffer_length++; 993 if (skb_tailroom(skb)) { 994 skb->data[skb->len] = 0; 995 __skb_put(skb, 1); 996 } 997 } 998 999 spin_lock_irqsave (&dev->txq.lock, flags); 1000 1001 switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) { 1002 case -EPIPE: 1003 netif_stop_queue (net); 1004 usbnet_defer_kevent (dev, EVENT_TX_HALT); 1005 break; 1006 default: 1007 if (netif_msg_tx_err (dev)) 1008 devdbg (dev, "tx: submit urb err %d", retval); 1009 break; 1010 case 0: 1011 net->trans_start = jiffies; 1012 __skb_queue_tail (&dev->txq, skb); 1013 if (dev->txq.qlen >= TX_QLEN (dev)) 1014 netif_stop_queue (net); 1015 } 1016 spin_unlock_irqrestore (&dev->txq.lock, flags); 1017 1018 if (retval) { 1019 if (netif_msg_tx_err (dev)) 1020 devdbg (dev, "drop, code %d", retval); 1021 drop: 1022 retval = NET_XMIT_SUCCESS; 1023 dev->stats.tx_dropped++; 1024 if (skb) 1025 dev_kfree_skb_any (skb); 1026 usb_free_urb (urb); 1027 } else if (netif_msg_tx_queued (dev)) { 1028 devdbg (dev, "> tx, len %d, type 0x%x", 1029 length, skb->protocol); 1030 } 1031 return retval; 1032 } 1033 EXPORT_SYMBOL_GPL(usbnet_start_xmit); 1034 1035 /*-------------------------------------------------------------------------*/ 1036 1037 // tasklet (work deferred from completions, in_irq) or timer 1038 1039 static void usbnet_bh (unsigned long param) 1040 { 1041 struct usbnet *dev = (struct usbnet *) param; 1042 struct sk_buff *skb; 1043 struct skb_data *entry; 1044 1045 while ((skb = skb_dequeue (&dev->done))) { 1046 entry = (struct skb_data *) skb->cb; 1047 switch (entry->state) { 1048 case rx_done: 1049 entry->state = rx_cleanup; 1050 rx_process (dev, skb); 1051 continue; 1052 case tx_done: 1053 case rx_cleanup: 1054 usb_free_urb (entry->urb); 1055 dev_kfree_skb (skb); 1056 continue; 1057 default: 1058 devdbg (dev, "bogus skb state %d", entry->state); 1059 } 1060 } 1061 1062 // waiting for all pending urbs to complete? 1063 if (dev->wait) { 1064 if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) { 1065 wake_up (dev->wait); 1066 } 1067 1068 // or are we maybe short a few urbs? 1069 } else if (netif_running (dev->net) 1070 && netif_device_present (dev->net) 1071 && !timer_pending (&dev->delay) 1072 && !test_bit (EVENT_RX_HALT, &dev->flags)) { 1073 int temp = dev->rxq.qlen; 1074 int qlen = RX_QLEN (dev); 1075 1076 if (temp < qlen) { 1077 struct urb *urb; 1078 int i; 1079 1080 // don't refill the queue all at once 1081 for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) { 1082 urb = usb_alloc_urb (0, GFP_ATOMIC); 1083 if (urb != NULL) 1084 rx_submit (dev, urb, GFP_ATOMIC); 1085 } 1086 if (temp != dev->rxq.qlen && netif_msg_link (dev)) 1087 devdbg (dev, "rxqlen %d --> %d", 1088 temp, dev->rxq.qlen); 1089 if (dev->rxq.qlen < qlen) 1090 tasklet_schedule (&dev->bh); 1091 } 1092 if (dev->txq.qlen < TX_QLEN (dev)) 1093 netif_wake_queue (dev->net); 1094 } 1095 } 1096 1097 1098 1099 /*------------------------------------------------------------------------- 1100 * 1101 * USB Device Driver support 1102 * 1103 *-------------------------------------------------------------------------*/ 1104 1105 // precondition: never called in_interrupt 1106 1107 void usbnet_disconnect (struct usb_interface *intf) 1108 { 1109 struct usbnet *dev; 1110 struct usb_device *xdev; 1111 struct net_device *net; 1112 1113 dev = usb_get_intfdata(intf); 1114 usb_set_intfdata(intf, NULL); 1115 if (!dev) 1116 return; 1117 1118 xdev = interface_to_usbdev (intf); 1119 1120 if (netif_msg_probe (dev)) 1121 devinfo (dev, "unregister '%s' usb-%s-%s, %s", 1122 intf->dev.driver->name, 1123 xdev->bus->bus_name, xdev->devpath, 1124 dev->driver_info->description); 1125 1126 net = dev->net; 1127 unregister_netdev (net); 1128 1129 /* we don't hold rtnl here ... */ 1130 flush_scheduled_work (); 1131 1132 if (dev->driver_info->unbind) 1133 dev->driver_info->unbind (dev, intf); 1134 1135 free_netdev(net); 1136 usb_put_dev (xdev); 1137 } 1138 EXPORT_SYMBOL_GPL(usbnet_disconnect); 1139 1140 static const struct net_device_ops usbnet_netdev_ops = { 1141 .ndo_open = usbnet_open, 1142 .ndo_stop = usbnet_stop, 1143 .ndo_start_xmit = usbnet_start_xmit, 1144 .ndo_tx_timeout = usbnet_tx_timeout, 1145 .ndo_change_mtu = usbnet_change_mtu, 1146 .ndo_set_mac_address = eth_mac_addr, 1147 .ndo_validate_addr = eth_validate_addr, 1148 }; 1149 1150 /*-------------------------------------------------------------------------*/ 1151 1152 // precondition: never called in_interrupt 1153 1154 int 1155 usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) 1156 { 1157 struct usbnet *dev; 1158 struct net_device *net; 1159 struct usb_host_interface *interface; 1160 struct driver_info *info; 1161 struct usb_device *xdev; 1162 int status; 1163 const char *name; 1164 1165 name = udev->dev.driver->name; 1166 info = (struct driver_info *) prod->driver_info; 1167 if (!info) { 1168 dev_dbg (&udev->dev, "blacklisted by %s\n", name); 1169 return -ENODEV; 1170 } 1171 xdev = interface_to_usbdev (udev); 1172 interface = udev->cur_altsetting; 1173 1174 usb_get_dev (xdev); 1175 1176 status = -ENOMEM; 1177 1178 // set up our own records 1179 net = alloc_etherdev(sizeof(*dev)); 1180 if (!net) { 1181 dbg ("can't kmalloc dev"); 1182 goto out; 1183 } 1184 1185 dev = netdev_priv(net); 1186 dev->udev = xdev; 1187 dev->intf = udev; 1188 dev->driver_info = info; 1189 dev->driver_name = name; 1190 dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV 1191 | NETIF_MSG_PROBE | NETIF_MSG_LINK); 1192 skb_queue_head_init (&dev->rxq); 1193 skb_queue_head_init (&dev->txq); 1194 skb_queue_head_init (&dev->done); 1195 dev->bh.func = usbnet_bh; 1196 dev->bh.data = (unsigned long) dev; 1197 INIT_WORK (&dev->kevent, kevent); 1198 dev->delay.function = usbnet_bh; 1199 dev->delay.data = (unsigned long) dev; 1200 init_timer (&dev->delay); 1201 mutex_init (&dev->phy_mutex); 1202 1203 dev->net = net; 1204 strcpy (net->name, "usb%d"); 1205 memcpy (net->dev_addr, node_id, sizeof node_id); 1206 1207 /* rx and tx sides can use different message sizes; 1208 * bind() should set rx_urb_size in that case. 1209 */ 1210 dev->hard_mtu = net->mtu + net->hard_header_len; 1211 #if 0 1212 // dma_supported() is deeply broken on almost all architectures 1213 // possible with some EHCI controllers 1214 if (dma_supported (&udev->dev, DMA_BIT_MASK(64))) 1215 net->features |= NETIF_F_HIGHDMA; 1216 #endif 1217 1218 net->netdev_ops = &usbnet_netdev_ops; 1219 net->watchdog_timeo = TX_TIMEOUT_JIFFIES; 1220 net->ethtool_ops = &usbnet_ethtool_ops; 1221 1222 // allow device-specific bind/init procedures 1223 // NOTE net->name still not usable ... 1224 if (info->bind) { 1225 status = info->bind (dev, udev); 1226 if (status < 0) 1227 goto out1; 1228 1229 // heuristic: "usb%d" for links we know are two-host, 1230 // else "eth%d" when there's reasonable doubt. userspace 1231 // can rename the link if it knows better. 1232 if ((dev->driver_info->flags & FLAG_ETHER) != 0 1233 && (net->dev_addr [0] & 0x02) == 0) 1234 strcpy (net->name, "eth%d"); 1235 /* WLAN devices should always be named "wlan%d" */ 1236 if ((dev->driver_info->flags & FLAG_WLAN) != 0) 1237 strcpy(net->name, "wlan%d"); 1238 1239 /* maybe the remote can't receive an Ethernet MTU */ 1240 if (net->mtu > (dev->hard_mtu - net->hard_header_len)) 1241 net->mtu = dev->hard_mtu - net->hard_header_len; 1242 } else if (!info->in || !info->out) 1243 status = usbnet_get_endpoints (dev, udev); 1244 else { 1245 dev->in = usb_rcvbulkpipe (xdev, info->in); 1246 dev->out = usb_sndbulkpipe (xdev, info->out); 1247 if (!(info->flags & FLAG_NO_SETINT)) 1248 status = usb_set_interface (xdev, 1249 interface->desc.bInterfaceNumber, 1250 interface->desc.bAlternateSetting); 1251 else 1252 status = 0; 1253 1254 } 1255 if (status >= 0 && dev->status) 1256 status = init_status (dev, udev); 1257 if (status < 0) 1258 goto out3; 1259 1260 if (!dev->rx_urb_size) 1261 dev->rx_urb_size = dev->hard_mtu; 1262 dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1); 1263 1264 SET_NETDEV_DEV(net, &udev->dev); 1265 status = register_netdev (net); 1266 if (status) 1267 goto out3; 1268 if (netif_msg_probe (dev)) 1269 devinfo (dev, "register '%s' at usb-%s-%s, %s, %pM", 1270 udev->dev.driver->name, 1271 xdev->bus->bus_name, xdev->devpath, 1272 dev->driver_info->description, 1273 net->dev_addr); 1274 1275 // ok, it's ready to go. 1276 usb_set_intfdata (udev, dev); 1277 1278 // start as if the link is up 1279 netif_device_attach (net); 1280 1281 return 0; 1282 1283 out3: 1284 if (info->unbind) 1285 info->unbind (dev, udev); 1286 out1: 1287 free_netdev(net); 1288 out: 1289 usb_put_dev(xdev); 1290 return status; 1291 } 1292 EXPORT_SYMBOL_GPL(usbnet_probe); 1293 1294 /*-------------------------------------------------------------------------*/ 1295 1296 /* 1297 * suspend the whole driver as soon as the first interface is suspended 1298 * resume only when the last interface is resumed 1299 */ 1300 1301 int usbnet_suspend (struct usb_interface *intf, pm_message_t message) 1302 { 1303 struct usbnet *dev = usb_get_intfdata(intf); 1304 1305 if (!dev->suspend_count++) { 1306 /* 1307 * accelerate emptying of the rx and queues, to avoid 1308 * having everything error out. 1309 */ 1310 netif_device_detach (dev->net); 1311 (void) unlink_urbs (dev, &dev->rxq); 1312 (void) unlink_urbs (dev, &dev->txq); 1313 /* 1314 * reattach so runtime management can use and 1315 * wake the device 1316 */ 1317 netif_device_attach (dev->net); 1318 } 1319 return 0; 1320 } 1321 EXPORT_SYMBOL_GPL(usbnet_suspend); 1322 1323 int usbnet_resume (struct usb_interface *intf) 1324 { 1325 struct usbnet *dev = usb_get_intfdata(intf); 1326 1327 if (!--dev->suspend_count) 1328 tasklet_schedule (&dev->bh); 1329 1330 return 0; 1331 } 1332 EXPORT_SYMBOL_GPL(usbnet_resume); 1333 1334 1335 /*-------------------------------------------------------------------------*/ 1336 1337 static int __init usbnet_init(void) 1338 { 1339 /* compiler should optimize this out */ 1340 BUILD_BUG_ON (sizeof (((struct sk_buff *)0)->cb) 1341 < sizeof (struct skb_data)); 1342 1343 random_ether_addr(node_id); 1344 return 0; 1345 } 1346 module_init(usbnet_init); 1347 1348 static void __exit usbnet_exit(void) 1349 { 1350 } 1351 module_exit(usbnet_exit); 1352 1353 MODULE_AUTHOR("David Brownell"); 1354 MODULE_DESCRIPTION("USB network driver framework"); 1355 MODULE_LICENSE("GPL"); 1356