1 /* 2 * USB Network driver infrastructure 3 * Copyright (C) 2000-2005 by David Brownell 4 * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 */ 20 21 /* 22 * This is a generic "USB networking" framework that works with several 23 * kinds of full and high speed networking devices: host-to-host cables, 24 * smart usb peripherals, and actual Ethernet adapters. 25 * 26 * These devices usually differ in terms of control protocols (if they 27 * even have one!) and sometimes they define new framing to wrap or batch 28 * Ethernet packets. Otherwise, they talk to USB pretty much the same, 29 * so interface (un)binding, endpoint I/O queues, fault handling, and other 30 * issues can usefully be addressed by this framework. 31 */ 32 33 // #define DEBUG // error path messages, extra info 34 // #define VERBOSE // more; success messages 35 36 #include <linux/module.h> 37 #include <linux/init.h> 38 #include <linux/netdevice.h> 39 #include <linux/etherdevice.h> 40 #include <linux/ethtool.h> 41 #include <linux/workqueue.h> 42 #include <linux/mii.h> 43 #include <linux/usb.h> 44 #include <linux/usb/usbnet.h> 45 46 #define DRIVER_VERSION "22-Aug-2005" 47 48 49 /*-------------------------------------------------------------------------*/ 50 51 /* 52 * Nineteen USB 1.1 max size bulk transactions per frame (ms), max. 53 * Several dozen bytes of IPv4 data can fit in two such transactions. 54 * One maximum size Ethernet packet takes twenty four of them. 55 * For high speed, each frame comfortably fits almost 36 max size 56 * Ethernet packets (so queues should be bigger). 57 * 58 * REVISIT qlens should be members of 'struct usbnet'; the goal is to 59 * let the USB host controller be busy for 5msec or more before an irq 60 * is required, under load. Jumbograms change the equation. 61 */ 62 #define RX_MAX_QUEUE_MEMORY (60 * 1518) 63 #define RX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \ 64 (RX_MAX_QUEUE_MEMORY/(dev)->rx_urb_size) : 4) 65 #define TX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \ 66 (RX_MAX_QUEUE_MEMORY/(dev)->hard_mtu) : 4) 67 68 // reawaken network queue this soon after stopping; else watchdog barks 69 #define TX_TIMEOUT_JIFFIES (5*HZ) 70 71 // throttle rx/tx briefly after some faults, so khubd might disconnect() 72 // us (it polls at HZ/4 usually) before we report too many false errors. 73 #define THROTTLE_JIFFIES (HZ/8) 74 75 // between wakeups 76 #define UNLINK_TIMEOUT_MS 3 77 78 /*-------------------------------------------------------------------------*/ 79 80 // randomly generated ethernet address 81 static u8 node_id [ETH_ALEN]; 82 83 static const char driver_name [] = "usbnet"; 84 85 /* use ethtool to change the level for any given device */ 86 static int msg_level = -1; 87 module_param (msg_level, int, 0); 88 MODULE_PARM_DESC (msg_level, "Override default message level"); 89 90 /*-------------------------------------------------------------------------*/ 91 92 /* handles CDC Ethernet and many other network "bulk data" interfaces */ 93 int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf) 94 { 95 int tmp; 96 struct usb_host_interface *alt = NULL; 97 struct usb_host_endpoint *in = NULL, *out = NULL; 98 struct usb_host_endpoint *status = NULL; 99 100 for (tmp = 0; tmp < intf->num_altsetting; tmp++) { 101 unsigned ep; 102 103 in = out = status = NULL; 104 alt = intf->altsetting + tmp; 105 106 /* take the first altsetting with in-bulk + out-bulk; 107 * remember any status endpoint, just in case; 108 * ignore other endpoints and altsetttings. 109 */ 110 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) { 111 struct usb_host_endpoint *e; 112 int intr = 0; 113 114 e = alt->endpoint + ep; 115 switch (e->desc.bmAttributes) { 116 case USB_ENDPOINT_XFER_INT: 117 if (!usb_endpoint_dir_in(&e->desc)) 118 continue; 119 intr = 1; 120 /* FALLTHROUGH */ 121 case USB_ENDPOINT_XFER_BULK: 122 break; 123 default: 124 continue; 125 } 126 if (usb_endpoint_dir_in(&e->desc)) { 127 if (!intr && !in) 128 in = e; 129 else if (intr && !status) 130 status = e; 131 } else { 132 if (!out) 133 out = e; 134 } 135 } 136 if (in && out) 137 break; 138 } 139 if (!alt || !in || !out) 140 return -EINVAL; 141 142 if (alt->desc.bAlternateSetting != 0 143 || !(dev->driver_info->flags & FLAG_NO_SETINT)) { 144 tmp = usb_set_interface (dev->udev, alt->desc.bInterfaceNumber, 145 alt->desc.bAlternateSetting); 146 if (tmp < 0) 147 return tmp; 148 } 149 150 dev->in = usb_rcvbulkpipe (dev->udev, 151 in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 152 dev->out = usb_sndbulkpipe (dev->udev, 153 out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 154 dev->status = status; 155 return 0; 156 } 157 EXPORT_SYMBOL_GPL(usbnet_get_endpoints); 158 159 static void intr_complete (struct urb *urb); 160 161 static int init_status (struct usbnet *dev, struct usb_interface *intf) 162 { 163 char *buf = NULL; 164 unsigned pipe = 0; 165 unsigned maxp; 166 unsigned period; 167 168 if (!dev->driver_info->status) 169 return 0; 170 171 pipe = usb_rcvintpipe (dev->udev, 172 dev->status->desc.bEndpointAddress 173 & USB_ENDPOINT_NUMBER_MASK); 174 maxp = usb_maxpacket (dev->udev, pipe, 0); 175 176 /* avoid 1 msec chatter: min 8 msec poll rate */ 177 period = max ((int) dev->status->desc.bInterval, 178 (dev->udev->speed == USB_SPEED_HIGH) ? 7 : 3); 179 180 buf = kmalloc (maxp, GFP_KERNEL); 181 if (buf) { 182 dev->interrupt = usb_alloc_urb (0, GFP_KERNEL); 183 if (!dev->interrupt) { 184 kfree (buf); 185 return -ENOMEM; 186 } else { 187 usb_fill_int_urb(dev->interrupt, dev->udev, pipe, 188 buf, maxp, intr_complete, dev, period); 189 dev_dbg(&intf->dev, 190 "status ep%din, %d bytes period %d\n", 191 usb_pipeendpoint(pipe), maxp, period); 192 } 193 } 194 return 0; 195 } 196 197 /* Passes this packet up the stack, updating its accounting. 198 * Some link protocols batch packets, so their rx_fixup paths 199 * can return clones as well as just modify the original skb. 200 */ 201 void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb) 202 { 203 int status; 204 205 skb->protocol = eth_type_trans (skb, dev->net); 206 dev->stats.rx_packets++; 207 dev->stats.rx_bytes += skb->len; 208 209 if (netif_msg_rx_status (dev)) 210 devdbg (dev, "< rx, len %zu, type 0x%x", 211 skb->len + sizeof (struct ethhdr), skb->protocol); 212 memset (skb->cb, 0, sizeof (struct skb_data)); 213 status = netif_rx (skb); 214 if (status != NET_RX_SUCCESS && netif_msg_rx_err (dev)) 215 devdbg (dev, "netif_rx status %d", status); 216 } 217 EXPORT_SYMBOL_GPL(usbnet_skb_return); 218 219 220 /*------------------------------------------------------------------------- 221 * 222 * Network Device Driver (peer link to "Host Device", from USB host) 223 * 224 *-------------------------------------------------------------------------*/ 225 226 static int usbnet_change_mtu (struct net_device *net, int new_mtu) 227 { 228 struct usbnet *dev = netdev_priv(net); 229 int ll_mtu = new_mtu + net->hard_header_len; 230 int old_hard_mtu = dev->hard_mtu; 231 int old_rx_urb_size = dev->rx_urb_size; 232 233 if (new_mtu <= 0) 234 return -EINVAL; 235 // no second zero-length packet read wanted after mtu-sized packets 236 if ((ll_mtu % dev->maxpacket) == 0) 237 return -EDOM; 238 net->mtu = new_mtu; 239 240 dev->hard_mtu = net->mtu + net->hard_header_len; 241 if (dev->rx_urb_size == old_hard_mtu) { 242 dev->rx_urb_size = dev->hard_mtu; 243 if (dev->rx_urb_size > old_rx_urb_size) 244 usbnet_unlink_rx_urbs(dev); 245 } 246 247 return 0; 248 } 249 250 /*-------------------------------------------------------------------------*/ 251 252 static struct net_device_stats *usbnet_get_stats (struct net_device *net) 253 { 254 struct usbnet *dev = netdev_priv(net); 255 return &dev->stats; 256 } 257 258 /*-------------------------------------------------------------------------*/ 259 260 /* some LK 2.4 HCDs oopsed if we freed or resubmitted urbs from 261 * completion callbacks. 2.5 should have fixed those bugs... 262 */ 263 264 static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_head *list) 265 { 266 unsigned long flags; 267 268 spin_lock_irqsave(&list->lock, flags); 269 __skb_unlink(skb, list); 270 spin_unlock(&list->lock); 271 spin_lock(&dev->done.lock); 272 __skb_queue_tail(&dev->done, skb); 273 if (dev->done.qlen == 1) 274 tasklet_schedule(&dev->bh); 275 spin_unlock_irqrestore(&dev->done.lock, flags); 276 } 277 278 /* some work can't be done in tasklets, so we use keventd 279 * 280 * NOTE: annoying asymmetry: if it's active, schedule_work() fails, 281 * but tasklet_schedule() doesn't. hope the failure is rare. 282 */ 283 void usbnet_defer_kevent (struct usbnet *dev, int work) 284 { 285 set_bit (work, &dev->flags); 286 if (!schedule_work (&dev->kevent)) 287 deverr (dev, "kevent %d may have been dropped", work); 288 else 289 devdbg (dev, "kevent %d scheduled", work); 290 } 291 EXPORT_SYMBOL_GPL(usbnet_defer_kevent); 292 293 /*-------------------------------------------------------------------------*/ 294 295 static void rx_complete (struct urb *urb); 296 297 static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) 298 { 299 struct sk_buff *skb; 300 struct skb_data *entry; 301 int retval = 0; 302 unsigned long lockflags; 303 size_t size = dev->rx_urb_size; 304 305 if ((skb = alloc_skb (size + NET_IP_ALIGN, flags)) == NULL) { 306 if (netif_msg_rx_err (dev)) 307 devdbg (dev, "no rx skb"); 308 usbnet_defer_kevent (dev, EVENT_RX_MEMORY); 309 usb_free_urb (urb); 310 return; 311 } 312 skb_reserve (skb, NET_IP_ALIGN); 313 314 entry = (struct skb_data *) skb->cb; 315 entry->urb = urb; 316 entry->dev = dev; 317 entry->state = rx_start; 318 entry->length = 0; 319 320 usb_fill_bulk_urb (urb, dev->udev, dev->in, 321 skb->data, size, rx_complete, skb); 322 323 spin_lock_irqsave (&dev->rxq.lock, lockflags); 324 325 if (netif_running (dev->net) 326 && netif_device_present (dev->net) 327 && !test_bit (EVENT_RX_HALT, &dev->flags)) { 328 switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) { 329 case -EPIPE: 330 usbnet_defer_kevent (dev, EVENT_RX_HALT); 331 break; 332 case -ENOMEM: 333 usbnet_defer_kevent (dev, EVENT_RX_MEMORY); 334 break; 335 case -ENODEV: 336 if (netif_msg_ifdown (dev)) 337 devdbg (dev, "device gone"); 338 netif_device_detach (dev->net); 339 break; 340 default: 341 if (netif_msg_rx_err (dev)) 342 devdbg (dev, "rx submit, %d", retval); 343 tasklet_schedule (&dev->bh); 344 break; 345 case 0: 346 __skb_queue_tail (&dev->rxq, skb); 347 } 348 } else { 349 if (netif_msg_ifdown (dev)) 350 devdbg (dev, "rx: stopped"); 351 retval = -ENOLINK; 352 } 353 spin_unlock_irqrestore (&dev->rxq.lock, lockflags); 354 if (retval) { 355 dev_kfree_skb_any (skb); 356 usb_free_urb (urb); 357 } 358 } 359 360 361 /*-------------------------------------------------------------------------*/ 362 363 static inline void rx_process (struct usbnet *dev, struct sk_buff *skb) 364 { 365 if (dev->driver_info->rx_fixup 366 && !dev->driver_info->rx_fixup (dev, skb)) 367 goto error; 368 // else network stack removes extra byte if we forced a short packet 369 370 if (skb->len) 371 usbnet_skb_return (dev, skb); 372 else { 373 if (netif_msg_rx_err (dev)) 374 devdbg (dev, "drop"); 375 error: 376 dev->stats.rx_errors++; 377 skb_queue_tail (&dev->done, skb); 378 } 379 } 380 381 /*-------------------------------------------------------------------------*/ 382 383 static void rx_complete (struct urb *urb) 384 { 385 struct sk_buff *skb = (struct sk_buff *) urb->context; 386 struct skb_data *entry = (struct skb_data *) skb->cb; 387 struct usbnet *dev = entry->dev; 388 int urb_status = urb->status; 389 390 skb_put (skb, urb->actual_length); 391 entry->state = rx_done; 392 entry->urb = NULL; 393 394 switch (urb_status) { 395 /* success */ 396 case 0: 397 if (skb->len < dev->net->hard_header_len) { 398 entry->state = rx_cleanup; 399 dev->stats.rx_errors++; 400 dev->stats.rx_length_errors++; 401 if (netif_msg_rx_err (dev)) 402 devdbg (dev, "rx length %d", skb->len); 403 } 404 break; 405 406 /* stalls need manual reset. this is rare ... except that 407 * when going through USB 2.0 TTs, unplug appears this way. 408 * we avoid the highspeed version of the ETIMEOUT/EILSEQ 409 * storm, recovering as needed. 410 */ 411 case -EPIPE: 412 dev->stats.rx_errors++; 413 usbnet_defer_kevent (dev, EVENT_RX_HALT); 414 // FALLTHROUGH 415 416 /* software-driven interface shutdown */ 417 case -ECONNRESET: /* async unlink */ 418 case -ESHUTDOWN: /* hardware gone */ 419 if (netif_msg_ifdown (dev)) 420 devdbg (dev, "rx shutdown, code %d", urb_status); 421 goto block; 422 423 /* we get controller i/o faults during khubd disconnect() delays. 424 * throttle down resubmits, to avoid log floods; just temporarily, 425 * so we still recover when the fault isn't a khubd delay. 426 */ 427 case -EPROTO: 428 case -ETIME: 429 case -EILSEQ: 430 dev->stats.rx_errors++; 431 if (!timer_pending (&dev->delay)) { 432 mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES); 433 if (netif_msg_link (dev)) 434 devdbg (dev, "rx throttle %d", urb_status); 435 } 436 block: 437 entry->state = rx_cleanup; 438 entry->urb = urb; 439 urb = NULL; 440 break; 441 442 /* data overrun ... flush fifo? */ 443 case -EOVERFLOW: 444 dev->stats.rx_over_errors++; 445 // FALLTHROUGH 446 447 default: 448 entry->state = rx_cleanup; 449 dev->stats.rx_errors++; 450 if (netif_msg_rx_err (dev)) 451 devdbg (dev, "rx status %d", urb_status); 452 break; 453 } 454 455 defer_bh(dev, skb, &dev->rxq); 456 457 if (urb) { 458 if (netif_running (dev->net) 459 && !test_bit (EVENT_RX_HALT, &dev->flags)) { 460 rx_submit (dev, urb, GFP_ATOMIC); 461 return; 462 } 463 usb_free_urb (urb); 464 } 465 if (netif_msg_rx_err (dev)) 466 devdbg (dev, "no read resubmitted"); 467 } 468 469 static void intr_complete (struct urb *urb) 470 { 471 struct usbnet *dev = urb->context; 472 int status = urb->status; 473 474 switch (status) { 475 /* success */ 476 case 0: 477 dev->driver_info->status(dev, urb); 478 break; 479 480 /* software-driven interface shutdown */ 481 case -ENOENT: /* urb killed */ 482 case -ESHUTDOWN: /* hardware gone */ 483 if (netif_msg_ifdown (dev)) 484 devdbg (dev, "intr shutdown, code %d", status); 485 return; 486 487 /* NOTE: not throttling like RX/TX, since this endpoint 488 * already polls infrequently 489 */ 490 default: 491 devdbg (dev, "intr status %d", status); 492 break; 493 } 494 495 if (!netif_running (dev->net)) 496 return; 497 498 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length); 499 status = usb_submit_urb (urb, GFP_ATOMIC); 500 if (status != 0 && netif_msg_timer (dev)) 501 deverr(dev, "intr resubmit --> %d", status); 502 } 503 504 /*-------------------------------------------------------------------------*/ 505 506 // unlink pending rx/tx; completion handlers do all other cleanup 507 508 static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q) 509 { 510 unsigned long flags; 511 struct sk_buff *skb, *skbnext; 512 int count = 0; 513 514 spin_lock_irqsave (&q->lock, flags); 515 skb_queue_walk_safe(q, skb, skbnext) { 516 struct skb_data *entry; 517 struct urb *urb; 518 int retval; 519 520 entry = (struct skb_data *) skb->cb; 521 urb = entry->urb; 522 523 // during some PM-driven resume scenarios, 524 // these (async) unlinks complete immediately 525 retval = usb_unlink_urb (urb); 526 if (retval != -EINPROGRESS && retval != 0) 527 devdbg (dev, "unlink urb err, %d", retval); 528 else 529 count++; 530 } 531 spin_unlock_irqrestore (&q->lock, flags); 532 return count; 533 } 534 535 // Flush all pending rx urbs 536 // minidrivers may need to do this when the MTU changes 537 538 void usbnet_unlink_rx_urbs(struct usbnet *dev) 539 { 540 if (netif_running(dev->net)) { 541 (void) unlink_urbs (dev, &dev->rxq); 542 tasklet_schedule(&dev->bh); 543 } 544 } 545 EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs); 546 547 /*-------------------------------------------------------------------------*/ 548 549 // precondition: never called in_interrupt 550 551 static int usbnet_stop (struct net_device *net) 552 { 553 struct usbnet *dev = netdev_priv(net); 554 int temp; 555 DECLARE_WAIT_QUEUE_HEAD_ONSTACK (unlink_wakeup); 556 DECLARE_WAITQUEUE (wait, current); 557 558 netif_stop_queue (net); 559 560 if (netif_msg_ifdown (dev)) 561 devinfo (dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld", 562 dev->stats.rx_packets, dev->stats.tx_packets, 563 dev->stats.rx_errors, dev->stats.tx_errors 564 ); 565 566 // ensure there are no more active urbs 567 add_wait_queue (&unlink_wakeup, &wait); 568 dev->wait = &unlink_wakeup; 569 temp = unlink_urbs (dev, &dev->txq) + unlink_urbs (dev, &dev->rxq); 570 571 // maybe wait for deletions to finish. 572 while (!skb_queue_empty(&dev->rxq) 573 && !skb_queue_empty(&dev->txq) 574 && !skb_queue_empty(&dev->done)) { 575 msleep(UNLINK_TIMEOUT_MS); 576 if (netif_msg_ifdown (dev)) 577 devdbg (dev, "waited for %d urb completions", temp); 578 } 579 dev->wait = NULL; 580 remove_wait_queue (&unlink_wakeup, &wait); 581 582 usb_kill_urb(dev->interrupt); 583 584 /* deferred work (task, timer, softirq) must also stop. 585 * can't flush_scheduled_work() until we drop rtnl (later), 586 * else workers could deadlock; so make workers a NOP. 587 */ 588 dev->flags = 0; 589 del_timer_sync (&dev->delay); 590 tasklet_kill (&dev->bh); 591 usb_autopm_put_interface(dev->intf); 592 593 return 0; 594 } 595 596 /*-------------------------------------------------------------------------*/ 597 598 // posts reads, and enables write queuing 599 600 // precondition: never called in_interrupt 601 602 static int usbnet_open (struct net_device *net) 603 { 604 struct usbnet *dev = netdev_priv(net); 605 int retval; 606 struct driver_info *info = dev->driver_info; 607 608 if ((retval = usb_autopm_get_interface(dev->intf)) < 0) { 609 if (netif_msg_ifup (dev)) 610 devinfo (dev, 611 "resumption fail (%d) usbnet usb-%s-%s, %s", 612 retval, 613 dev->udev->bus->bus_name, dev->udev->devpath, 614 info->description); 615 goto done_nopm; 616 } 617 618 // put into "known safe" state 619 if (info->reset && (retval = info->reset (dev)) < 0) { 620 if (netif_msg_ifup (dev)) 621 devinfo (dev, 622 "open reset fail (%d) usbnet usb-%s-%s, %s", 623 retval, 624 dev->udev->bus->bus_name, dev->udev->devpath, 625 info->description); 626 goto done; 627 } 628 629 // insist peer be connected 630 if (info->check_connect && (retval = info->check_connect (dev)) < 0) { 631 if (netif_msg_ifup (dev)) 632 devdbg (dev, "can't open; %d", retval); 633 goto done; 634 } 635 636 /* start any status interrupt transfer */ 637 if (dev->interrupt) { 638 retval = usb_submit_urb (dev->interrupt, GFP_KERNEL); 639 if (retval < 0) { 640 if (netif_msg_ifup (dev)) 641 deverr (dev, "intr submit %d", retval); 642 goto done; 643 } 644 } 645 646 netif_start_queue (net); 647 if (netif_msg_ifup (dev)) { 648 char *framing; 649 650 if (dev->driver_info->flags & FLAG_FRAMING_NC) 651 framing = "NetChip"; 652 else if (dev->driver_info->flags & FLAG_FRAMING_GL) 653 framing = "GeneSys"; 654 else if (dev->driver_info->flags & FLAG_FRAMING_Z) 655 framing = "Zaurus"; 656 else if (dev->driver_info->flags & FLAG_FRAMING_RN) 657 framing = "RNDIS"; 658 else if (dev->driver_info->flags & FLAG_FRAMING_AX) 659 framing = "ASIX"; 660 else 661 framing = "simple"; 662 663 devinfo (dev, "open: enable queueing " 664 "(rx %d, tx %d) mtu %d %s framing", 665 (int)RX_QLEN (dev), (int)TX_QLEN (dev), dev->net->mtu, 666 framing); 667 } 668 669 // delay posting reads until we're fully open 670 tasklet_schedule (&dev->bh); 671 return retval; 672 done: 673 usb_autopm_put_interface(dev->intf); 674 done_nopm: 675 return retval; 676 } 677 678 /*-------------------------------------------------------------------------*/ 679 680 /* ethtool methods; minidrivers may need to add some more, but 681 * they'll probably want to use this base set. 682 */ 683 684 int usbnet_get_settings (struct net_device *net, struct ethtool_cmd *cmd) 685 { 686 struct usbnet *dev = netdev_priv(net); 687 688 if (!dev->mii.mdio_read) 689 return -EOPNOTSUPP; 690 691 return mii_ethtool_gset(&dev->mii, cmd); 692 } 693 EXPORT_SYMBOL_GPL(usbnet_get_settings); 694 695 int usbnet_set_settings (struct net_device *net, struct ethtool_cmd *cmd) 696 { 697 struct usbnet *dev = netdev_priv(net); 698 int retval; 699 700 if (!dev->mii.mdio_write) 701 return -EOPNOTSUPP; 702 703 retval = mii_ethtool_sset(&dev->mii, cmd); 704 705 /* link speed/duplex might have changed */ 706 if (dev->driver_info->link_reset) 707 dev->driver_info->link_reset(dev); 708 709 return retval; 710 711 } 712 EXPORT_SYMBOL_GPL(usbnet_set_settings); 713 714 u32 usbnet_get_link (struct net_device *net) 715 { 716 struct usbnet *dev = netdev_priv(net); 717 718 /* If a check_connect is defined, return its result */ 719 if (dev->driver_info->check_connect) 720 return dev->driver_info->check_connect (dev) == 0; 721 722 /* if the device has mii operations, use those */ 723 if (dev->mii.mdio_read) 724 return mii_link_ok(&dev->mii); 725 726 /* Otherwise, say we're up (to avoid breaking scripts) */ 727 return 1; 728 } 729 EXPORT_SYMBOL_GPL(usbnet_get_link); 730 731 int usbnet_nway_reset(struct net_device *net) 732 { 733 struct usbnet *dev = netdev_priv(net); 734 735 if (!dev->mii.mdio_write) 736 return -EOPNOTSUPP; 737 738 return mii_nway_restart(&dev->mii); 739 } 740 EXPORT_SYMBOL_GPL(usbnet_nway_reset); 741 742 void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info) 743 { 744 struct usbnet *dev = netdev_priv(net); 745 746 strncpy (info->driver, dev->driver_name, sizeof info->driver); 747 strncpy (info->version, DRIVER_VERSION, sizeof info->version); 748 strncpy (info->fw_version, dev->driver_info->description, 749 sizeof info->fw_version); 750 usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info); 751 } 752 EXPORT_SYMBOL_GPL(usbnet_get_drvinfo); 753 754 u32 usbnet_get_msglevel (struct net_device *net) 755 { 756 struct usbnet *dev = netdev_priv(net); 757 758 return dev->msg_enable; 759 } 760 EXPORT_SYMBOL_GPL(usbnet_get_msglevel); 761 762 void usbnet_set_msglevel (struct net_device *net, u32 level) 763 { 764 struct usbnet *dev = netdev_priv(net); 765 766 dev->msg_enable = level; 767 } 768 EXPORT_SYMBOL_GPL(usbnet_set_msglevel); 769 770 /* drivers may override default ethtool_ops in their bind() routine */ 771 static struct ethtool_ops usbnet_ethtool_ops = { 772 .get_settings = usbnet_get_settings, 773 .set_settings = usbnet_set_settings, 774 .get_link = usbnet_get_link, 775 .nway_reset = usbnet_nway_reset, 776 .get_drvinfo = usbnet_get_drvinfo, 777 .get_msglevel = usbnet_get_msglevel, 778 .set_msglevel = usbnet_set_msglevel, 779 }; 780 781 /*-------------------------------------------------------------------------*/ 782 783 /* work that cannot be done in interrupt context uses keventd. 784 * 785 * NOTE: with 2.5 we could do more of this using completion callbacks, 786 * especially now that control transfers can be queued. 787 */ 788 static void 789 kevent (struct work_struct *work) 790 { 791 struct usbnet *dev = 792 container_of(work, struct usbnet, kevent); 793 int status; 794 795 /* usb_clear_halt() needs a thread context */ 796 if (test_bit (EVENT_TX_HALT, &dev->flags)) { 797 unlink_urbs (dev, &dev->txq); 798 status = usb_clear_halt (dev->udev, dev->out); 799 if (status < 0 800 && status != -EPIPE 801 && status != -ESHUTDOWN) { 802 if (netif_msg_tx_err (dev)) 803 deverr (dev, "can't clear tx halt, status %d", 804 status); 805 } else { 806 clear_bit (EVENT_TX_HALT, &dev->flags); 807 if (status != -ESHUTDOWN) 808 netif_wake_queue (dev->net); 809 } 810 } 811 if (test_bit (EVENT_RX_HALT, &dev->flags)) { 812 unlink_urbs (dev, &dev->rxq); 813 status = usb_clear_halt (dev->udev, dev->in); 814 if (status < 0 815 && status != -EPIPE 816 && status != -ESHUTDOWN) { 817 if (netif_msg_rx_err (dev)) 818 deverr (dev, "can't clear rx halt, status %d", 819 status); 820 } else { 821 clear_bit (EVENT_RX_HALT, &dev->flags); 822 tasklet_schedule (&dev->bh); 823 } 824 } 825 826 /* tasklet could resubmit itself forever if memory is tight */ 827 if (test_bit (EVENT_RX_MEMORY, &dev->flags)) { 828 struct urb *urb = NULL; 829 830 if (netif_running (dev->net)) 831 urb = usb_alloc_urb (0, GFP_KERNEL); 832 else 833 clear_bit (EVENT_RX_MEMORY, &dev->flags); 834 if (urb != NULL) { 835 clear_bit (EVENT_RX_MEMORY, &dev->flags); 836 rx_submit (dev, urb, GFP_KERNEL); 837 tasklet_schedule (&dev->bh); 838 } 839 } 840 841 if (test_bit (EVENT_LINK_RESET, &dev->flags)) { 842 struct driver_info *info = dev->driver_info; 843 int retval = 0; 844 845 clear_bit (EVENT_LINK_RESET, &dev->flags); 846 if(info->link_reset && (retval = info->link_reset(dev)) < 0) { 847 devinfo(dev, "link reset failed (%d) usbnet usb-%s-%s, %s", 848 retval, 849 dev->udev->bus->bus_name, dev->udev->devpath, 850 info->description); 851 } 852 } 853 854 if (dev->flags) 855 devdbg (dev, "kevent done, flags = 0x%lx", 856 dev->flags); 857 } 858 859 /*-------------------------------------------------------------------------*/ 860 861 static void tx_complete (struct urb *urb) 862 { 863 struct sk_buff *skb = (struct sk_buff *) urb->context; 864 struct skb_data *entry = (struct skb_data *) skb->cb; 865 struct usbnet *dev = entry->dev; 866 867 if (urb->status == 0) { 868 dev->stats.tx_packets++; 869 dev->stats.tx_bytes += entry->length; 870 } else { 871 dev->stats.tx_errors++; 872 873 switch (urb->status) { 874 case -EPIPE: 875 usbnet_defer_kevent (dev, EVENT_TX_HALT); 876 break; 877 878 /* software-driven interface shutdown */ 879 case -ECONNRESET: // async unlink 880 case -ESHUTDOWN: // hardware gone 881 break; 882 883 // like rx, tx gets controller i/o faults during khubd delays 884 // and so it uses the same throttling mechanism. 885 case -EPROTO: 886 case -ETIME: 887 case -EILSEQ: 888 if (!timer_pending (&dev->delay)) { 889 mod_timer (&dev->delay, 890 jiffies + THROTTLE_JIFFIES); 891 if (netif_msg_link (dev)) 892 devdbg (dev, "tx throttle %d", 893 urb->status); 894 } 895 netif_stop_queue (dev->net); 896 break; 897 default: 898 if (netif_msg_tx_err (dev)) 899 devdbg (dev, "tx err %d", entry->urb->status); 900 break; 901 } 902 } 903 904 urb->dev = NULL; 905 entry->state = tx_done; 906 defer_bh(dev, skb, &dev->txq); 907 } 908 909 /*-------------------------------------------------------------------------*/ 910 911 static void usbnet_tx_timeout (struct net_device *net) 912 { 913 struct usbnet *dev = netdev_priv(net); 914 915 unlink_urbs (dev, &dev->txq); 916 tasklet_schedule (&dev->bh); 917 918 // FIXME: device recovery -- reset? 919 } 920 921 /*-------------------------------------------------------------------------*/ 922 923 static int usbnet_start_xmit (struct sk_buff *skb, struct net_device *net) 924 { 925 struct usbnet *dev = netdev_priv(net); 926 int length; 927 int retval = NET_XMIT_SUCCESS; 928 struct urb *urb = NULL; 929 struct skb_data *entry; 930 struct driver_info *info = dev->driver_info; 931 unsigned long flags; 932 933 // some devices want funky USB-level framing, for 934 // win32 driver (usually) and/or hardware quirks 935 if (info->tx_fixup) { 936 skb = info->tx_fixup (dev, skb, GFP_ATOMIC); 937 if (!skb) { 938 if (netif_msg_tx_err (dev)) 939 devdbg (dev, "can't tx_fixup skb"); 940 goto drop; 941 } 942 } 943 length = skb->len; 944 945 if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) { 946 if (netif_msg_tx_err (dev)) 947 devdbg (dev, "no urb"); 948 goto drop; 949 } 950 951 entry = (struct skb_data *) skb->cb; 952 entry->urb = urb; 953 entry->dev = dev; 954 entry->state = tx_start; 955 entry->length = length; 956 957 usb_fill_bulk_urb (urb, dev->udev, dev->out, 958 skb->data, skb->len, tx_complete, skb); 959 960 /* don't assume the hardware handles USB_ZERO_PACKET 961 * NOTE: strictly conforming cdc-ether devices should expect 962 * the ZLP here, but ignore the one-byte packet. 963 */ 964 if ((length % dev->maxpacket) == 0) { 965 urb->transfer_buffer_length++; 966 if (skb_tailroom(skb)) { 967 skb->data[skb->len] = 0; 968 __skb_put(skb, 1); 969 } 970 } 971 972 spin_lock_irqsave (&dev->txq.lock, flags); 973 974 switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) { 975 case -EPIPE: 976 netif_stop_queue (net); 977 usbnet_defer_kevent (dev, EVENT_TX_HALT); 978 break; 979 default: 980 if (netif_msg_tx_err (dev)) 981 devdbg (dev, "tx: submit urb err %d", retval); 982 break; 983 case 0: 984 net->trans_start = jiffies; 985 __skb_queue_tail (&dev->txq, skb); 986 if (dev->txq.qlen >= TX_QLEN (dev)) 987 netif_stop_queue (net); 988 } 989 spin_unlock_irqrestore (&dev->txq.lock, flags); 990 991 if (retval) { 992 if (netif_msg_tx_err (dev)) 993 devdbg (dev, "drop, code %d", retval); 994 drop: 995 retval = NET_XMIT_SUCCESS; 996 dev->stats.tx_dropped++; 997 if (skb) 998 dev_kfree_skb_any (skb); 999 usb_free_urb (urb); 1000 } else if (netif_msg_tx_queued (dev)) { 1001 devdbg (dev, "> tx, len %d, type 0x%x", 1002 length, skb->protocol); 1003 } 1004 return retval; 1005 } 1006 1007 1008 /*-------------------------------------------------------------------------*/ 1009 1010 // tasklet (work deferred from completions, in_irq) or timer 1011 1012 static void usbnet_bh (unsigned long param) 1013 { 1014 struct usbnet *dev = (struct usbnet *) param; 1015 struct sk_buff *skb; 1016 struct skb_data *entry; 1017 1018 while ((skb = skb_dequeue (&dev->done))) { 1019 entry = (struct skb_data *) skb->cb; 1020 switch (entry->state) { 1021 case rx_done: 1022 entry->state = rx_cleanup; 1023 rx_process (dev, skb); 1024 continue; 1025 case tx_done: 1026 case rx_cleanup: 1027 usb_free_urb (entry->urb); 1028 dev_kfree_skb (skb); 1029 continue; 1030 default: 1031 devdbg (dev, "bogus skb state %d", entry->state); 1032 } 1033 } 1034 1035 // waiting for all pending urbs to complete? 1036 if (dev->wait) { 1037 if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) { 1038 wake_up (dev->wait); 1039 } 1040 1041 // or are we maybe short a few urbs? 1042 } else if (netif_running (dev->net) 1043 && netif_device_present (dev->net) 1044 && !timer_pending (&dev->delay) 1045 && !test_bit (EVENT_RX_HALT, &dev->flags)) { 1046 int temp = dev->rxq.qlen; 1047 int qlen = RX_QLEN (dev); 1048 1049 if (temp < qlen) { 1050 struct urb *urb; 1051 int i; 1052 1053 // don't refill the queue all at once 1054 for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) { 1055 urb = usb_alloc_urb (0, GFP_ATOMIC); 1056 if (urb != NULL) 1057 rx_submit (dev, urb, GFP_ATOMIC); 1058 } 1059 if (temp != dev->rxq.qlen && netif_msg_link (dev)) 1060 devdbg (dev, "rxqlen %d --> %d", 1061 temp, dev->rxq.qlen); 1062 if (dev->rxq.qlen < qlen) 1063 tasklet_schedule (&dev->bh); 1064 } 1065 if (dev->txq.qlen < TX_QLEN (dev)) 1066 netif_wake_queue (dev->net); 1067 } 1068 } 1069 1070 1071 1072 /*------------------------------------------------------------------------- 1073 * 1074 * USB Device Driver support 1075 * 1076 *-------------------------------------------------------------------------*/ 1077 1078 // precondition: never called in_interrupt 1079 1080 void usbnet_disconnect (struct usb_interface *intf) 1081 { 1082 struct usbnet *dev; 1083 struct usb_device *xdev; 1084 struct net_device *net; 1085 1086 dev = usb_get_intfdata(intf); 1087 usb_set_intfdata(intf, NULL); 1088 if (!dev) 1089 return; 1090 1091 xdev = interface_to_usbdev (intf); 1092 1093 if (netif_msg_probe (dev)) 1094 devinfo (dev, "unregister '%s' usb-%s-%s, %s", 1095 intf->dev.driver->name, 1096 xdev->bus->bus_name, xdev->devpath, 1097 dev->driver_info->description); 1098 1099 net = dev->net; 1100 unregister_netdev (net); 1101 1102 /* we don't hold rtnl here ... */ 1103 flush_scheduled_work (); 1104 1105 if (dev->driver_info->unbind) 1106 dev->driver_info->unbind (dev, intf); 1107 1108 free_netdev(net); 1109 usb_put_dev (xdev); 1110 } 1111 EXPORT_SYMBOL_GPL(usbnet_disconnect); 1112 1113 1114 /*-------------------------------------------------------------------------*/ 1115 1116 // precondition: never called in_interrupt 1117 1118 int 1119 usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) 1120 { 1121 struct usbnet *dev; 1122 struct net_device *net; 1123 struct usb_host_interface *interface; 1124 struct driver_info *info; 1125 struct usb_device *xdev; 1126 int status; 1127 const char *name; 1128 DECLARE_MAC_BUF(mac); 1129 1130 name = udev->dev.driver->name; 1131 info = (struct driver_info *) prod->driver_info; 1132 if (!info) { 1133 dev_dbg (&udev->dev, "blacklisted by %s\n", name); 1134 return -ENODEV; 1135 } 1136 xdev = interface_to_usbdev (udev); 1137 interface = udev->cur_altsetting; 1138 1139 usb_get_dev (xdev); 1140 1141 status = -ENOMEM; 1142 1143 // set up our own records 1144 net = alloc_etherdev(sizeof(*dev)); 1145 if (!net) { 1146 dbg ("can't kmalloc dev"); 1147 goto out; 1148 } 1149 1150 dev = netdev_priv(net); 1151 dev->udev = xdev; 1152 dev->intf = udev; 1153 dev->driver_info = info; 1154 dev->driver_name = name; 1155 dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV 1156 | NETIF_MSG_PROBE | NETIF_MSG_LINK); 1157 skb_queue_head_init (&dev->rxq); 1158 skb_queue_head_init (&dev->txq); 1159 skb_queue_head_init (&dev->done); 1160 dev->bh.func = usbnet_bh; 1161 dev->bh.data = (unsigned long) dev; 1162 INIT_WORK (&dev->kevent, kevent); 1163 dev->delay.function = usbnet_bh; 1164 dev->delay.data = (unsigned long) dev; 1165 init_timer (&dev->delay); 1166 mutex_init (&dev->phy_mutex); 1167 1168 dev->net = net; 1169 strcpy (net->name, "usb%d"); 1170 memcpy (net->dev_addr, node_id, sizeof node_id); 1171 1172 /* rx and tx sides can use different message sizes; 1173 * bind() should set rx_urb_size in that case. 1174 */ 1175 dev->hard_mtu = net->mtu + net->hard_header_len; 1176 #if 0 1177 // dma_supported() is deeply broken on almost all architectures 1178 // possible with some EHCI controllers 1179 if (dma_supported (&udev->dev, DMA_64BIT_MASK)) 1180 net->features |= NETIF_F_HIGHDMA; 1181 #endif 1182 1183 net->change_mtu = usbnet_change_mtu; 1184 net->get_stats = usbnet_get_stats; 1185 net->hard_start_xmit = usbnet_start_xmit; 1186 net->open = usbnet_open; 1187 net->stop = usbnet_stop; 1188 net->watchdog_timeo = TX_TIMEOUT_JIFFIES; 1189 net->tx_timeout = usbnet_tx_timeout; 1190 net->ethtool_ops = &usbnet_ethtool_ops; 1191 1192 // allow device-specific bind/init procedures 1193 // NOTE net->name still not usable ... 1194 if (info->bind) { 1195 status = info->bind (dev, udev); 1196 if (status < 0) 1197 goto out1; 1198 1199 // heuristic: "usb%d" for links we know are two-host, 1200 // else "eth%d" when there's reasonable doubt. userspace 1201 // can rename the link if it knows better. 1202 if ((dev->driver_info->flags & FLAG_ETHER) != 0 1203 && (net->dev_addr [0] & 0x02) == 0) 1204 strcpy (net->name, "eth%d"); 1205 /* WLAN devices should always be named "wlan%d" */ 1206 if ((dev->driver_info->flags & FLAG_WLAN) != 0) 1207 strcpy(net->name, "wlan%d"); 1208 1209 /* maybe the remote can't receive an Ethernet MTU */ 1210 if (net->mtu > (dev->hard_mtu - net->hard_header_len)) 1211 net->mtu = dev->hard_mtu - net->hard_header_len; 1212 } else if (!info->in || !info->out) 1213 status = usbnet_get_endpoints (dev, udev); 1214 else { 1215 dev->in = usb_rcvbulkpipe (xdev, info->in); 1216 dev->out = usb_sndbulkpipe (xdev, info->out); 1217 if (!(info->flags & FLAG_NO_SETINT)) 1218 status = usb_set_interface (xdev, 1219 interface->desc.bInterfaceNumber, 1220 interface->desc.bAlternateSetting); 1221 else 1222 status = 0; 1223 1224 } 1225 if (status >= 0 && dev->status) 1226 status = init_status (dev, udev); 1227 if (status < 0) 1228 goto out3; 1229 1230 if (!dev->rx_urb_size) 1231 dev->rx_urb_size = dev->hard_mtu; 1232 dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1); 1233 1234 SET_NETDEV_DEV(net, &udev->dev); 1235 status = register_netdev (net); 1236 if (status) 1237 goto out3; 1238 if (netif_msg_probe (dev)) 1239 devinfo (dev, "register '%s' at usb-%s-%s, %s, %s", 1240 udev->dev.driver->name, 1241 xdev->bus->bus_name, xdev->devpath, 1242 dev->driver_info->description, 1243 print_mac(mac, net->dev_addr)); 1244 1245 // ok, it's ready to go. 1246 usb_set_intfdata (udev, dev); 1247 1248 // start as if the link is up 1249 netif_device_attach (net); 1250 1251 return 0; 1252 1253 out3: 1254 if (info->unbind) 1255 info->unbind (dev, udev); 1256 out1: 1257 free_netdev(net); 1258 out: 1259 usb_put_dev(xdev); 1260 return status; 1261 } 1262 EXPORT_SYMBOL_GPL(usbnet_probe); 1263 1264 /*-------------------------------------------------------------------------*/ 1265 1266 /* 1267 * suspend the whole driver as soon as the first interface is suspended 1268 * resume only when the last interface is resumed 1269 */ 1270 1271 int usbnet_suspend (struct usb_interface *intf, pm_message_t message) 1272 { 1273 struct usbnet *dev = usb_get_intfdata(intf); 1274 1275 if (!dev->suspend_count++) { 1276 /* 1277 * accelerate emptying of the rx and queues, to avoid 1278 * having everything error out. 1279 */ 1280 netif_device_detach (dev->net); 1281 (void) unlink_urbs (dev, &dev->rxq); 1282 (void) unlink_urbs (dev, &dev->txq); 1283 /* 1284 * reattach so runtime management can use and 1285 * wake the device 1286 */ 1287 netif_device_attach (dev->net); 1288 } 1289 return 0; 1290 } 1291 EXPORT_SYMBOL_GPL(usbnet_suspend); 1292 1293 int usbnet_resume (struct usb_interface *intf) 1294 { 1295 struct usbnet *dev = usb_get_intfdata(intf); 1296 1297 if (!--dev->suspend_count) 1298 tasklet_schedule (&dev->bh); 1299 1300 return 0; 1301 } 1302 EXPORT_SYMBOL_GPL(usbnet_resume); 1303 1304 1305 /*-------------------------------------------------------------------------*/ 1306 1307 static int __init usbnet_init(void) 1308 { 1309 /* compiler should optimize this out */ 1310 BUILD_BUG_ON (sizeof (((struct sk_buff *)0)->cb) 1311 < sizeof (struct skb_data)); 1312 1313 random_ether_addr(node_id); 1314 return 0; 1315 } 1316 module_init(usbnet_init); 1317 1318 static void __exit usbnet_exit(void) 1319 { 1320 } 1321 module_exit(usbnet_exit); 1322 1323 MODULE_AUTHOR("David Brownell"); 1324 MODULE_DESCRIPTION("USB network driver framework"); 1325 MODULE_LICENSE("GPL"); 1326