1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * USB Network driver infrastructure 4 * Copyright (C) 2000-2005 by David Brownell 5 * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com> 6 */ 7 8 /* 9 * This is a generic "USB networking" framework that works with several 10 * kinds of full and high speed networking devices: host-to-host cables, 11 * smart usb peripherals, and actual Ethernet adapters. 12 * 13 * These devices usually differ in terms of control protocols (if they 14 * even have one!) and sometimes they define new framing to wrap or batch 15 * Ethernet packets. Otherwise, they talk to USB pretty much the same, 16 * so interface (un)binding, endpoint I/O queues, fault handling, and other 17 * issues can usefully be addressed by this framework. 18 */ 19 20 // #define DEBUG // error path messages, extra info 21 // #define VERBOSE // more; success messages 22 23 #include <linux/module.h> 24 #include <linux/init.h> 25 #include <linux/netdevice.h> 26 #include <linux/etherdevice.h> 27 #include <linux/ctype.h> 28 #include <linux/ethtool.h> 29 #include <linux/workqueue.h> 30 #include <linux/mii.h> 31 #include <linux/usb.h> 32 #include <linux/usb/usbnet.h> 33 #include <linux/slab.h> 34 #include <linux/kernel.h> 35 #include <linux/pm_runtime.h> 36 37 #define DRIVER_VERSION "22-Aug-2005" 38 39 40 /*-------------------------------------------------------------------------*/ 41 42 /* 43 * Nineteen USB 1.1 max size bulk transactions per frame (ms), max. 44 * Several dozen bytes of IPv4 data can fit in two such transactions. 45 * One maximum size Ethernet packet takes twenty four of them. 46 * For high speed, each frame comfortably fits almost 36 max size 47 * Ethernet packets (so queues should be bigger). 48 * 49 * The goal is to let the USB host controller be busy for 5msec or 50 * more before an irq is required, under load. Jumbograms change 51 * the equation. 52 */ 53 #define MAX_QUEUE_MEMORY (60 * 1518) 54 #define RX_QLEN(dev) ((dev)->rx_qlen) 55 #define TX_QLEN(dev) ((dev)->tx_qlen) 56 57 // reawaken network queue this soon after stopping; else watchdog barks 58 #define TX_TIMEOUT_JIFFIES (5*HZ) 59 60 /* throttle rx/tx briefly after some faults, so hub_wq might disconnect() 61 * us (it polls at HZ/4 usually) before we report too many false errors. 62 */ 63 #define THROTTLE_JIFFIES (HZ/8) 64 65 // between wakeups 66 #define UNLINK_TIMEOUT_MS 3 67 68 /*-------------------------------------------------------------------------*/ 69 70 // randomly generated ethernet address 71 static u8 node_id [ETH_ALEN]; 72 73 /* use ethtool to change the level for any given device */ 74 static int msg_level = -1; 75 module_param (msg_level, int, 0); 76 MODULE_PARM_DESC (msg_level, "Override default message level"); 77 78 /*-------------------------------------------------------------------------*/ 79 80 /* handles CDC Ethernet and many other network "bulk data" interfaces */ 81 int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf) 82 { 83 int tmp; 84 struct usb_host_interface *alt = NULL; 85 struct usb_host_endpoint *in = NULL, *out = NULL; 86 struct usb_host_endpoint *status = NULL; 87 88 for (tmp = 0; tmp < intf->num_altsetting; tmp++) { 89 unsigned ep; 90 91 in = out = status = NULL; 92 alt = intf->altsetting + tmp; 93 94 /* take the first altsetting with in-bulk + out-bulk; 95 * remember any status endpoint, just in case; 96 * ignore other endpoints and altsettings. 97 */ 98 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) { 99 struct usb_host_endpoint *e; 100 int intr = 0; 101 102 e = alt->endpoint + ep; 103 switch (e->desc.bmAttributes) { 104 case USB_ENDPOINT_XFER_INT: 105 if (!usb_endpoint_dir_in(&e->desc)) 106 continue; 107 intr = 1; 108 /* FALLTHROUGH */ 109 case USB_ENDPOINT_XFER_BULK: 110 break; 111 default: 112 continue; 113 } 114 if (usb_endpoint_dir_in(&e->desc)) { 115 if (!intr && !in) 116 in = e; 117 else if (intr && !status) 118 status = e; 119 } else { 120 if (!out) 121 out = e; 122 } 123 } 124 if (in && out) 125 break; 126 } 127 if (!alt || !in || !out) 128 return -EINVAL; 129 130 if (alt->desc.bAlternateSetting != 0 || 131 !(dev->driver_info->flags & FLAG_NO_SETINT)) { 132 tmp = usb_set_interface (dev->udev, alt->desc.bInterfaceNumber, 133 alt->desc.bAlternateSetting); 134 if (tmp < 0) 135 return tmp; 136 } 137 138 dev->in = usb_rcvbulkpipe (dev->udev, 139 in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 140 dev->out = usb_sndbulkpipe (dev->udev, 141 out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 142 dev->status = status; 143 return 0; 144 } 145 EXPORT_SYMBOL_GPL(usbnet_get_endpoints); 146 147 int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress) 148 { 149 int tmp = -1, ret; 150 unsigned char buf [13]; 151 152 ret = usb_string(dev->udev, iMACAddress, buf, sizeof buf); 153 if (ret == 12) 154 tmp = hex2bin(dev->net->dev_addr, buf, 6); 155 if (tmp < 0) { 156 dev_dbg(&dev->udev->dev, 157 "bad MAC string %d fetch, %d\n", iMACAddress, tmp); 158 if (ret >= 0) 159 ret = -EINVAL; 160 return ret; 161 } 162 return 0; 163 } 164 EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr); 165 166 static void intr_complete (struct urb *urb) 167 { 168 struct usbnet *dev = urb->context; 169 int status = urb->status; 170 171 switch (status) { 172 /* success */ 173 case 0: 174 dev->driver_info->status(dev, urb); 175 break; 176 177 /* software-driven interface shutdown */ 178 case -ENOENT: /* urb killed */ 179 case -ESHUTDOWN: /* hardware gone */ 180 netif_dbg(dev, ifdown, dev->net, 181 "intr shutdown, code %d\n", status); 182 return; 183 184 /* NOTE: not throttling like RX/TX, since this endpoint 185 * already polls infrequently 186 */ 187 default: 188 netdev_dbg(dev->net, "intr status %d\n", status); 189 break; 190 } 191 192 status = usb_submit_urb (urb, GFP_ATOMIC); 193 if (status != 0) 194 netif_err(dev, timer, dev->net, 195 "intr resubmit --> %d\n", status); 196 } 197 198 static int init_status (struct usbnet *dev, struct usb_interface *intf) 199 { 200 char *buf = NULL; 201 unsigned pipe = 0; 202 unsigned maxp; 203 unsigned period; 204 205 if (!dev->driver_info->status) 206 return 0; 207 208 pipe = usb_rcvintpipe (dev->udev, 209 dev->status->desc.bEndpointAddress 210 & USB_ENDPOINT_NUMBER_MASK); 211 maxp = usb_maxpacket (dev->udev, pipe, 0); 212 213 /* avoid 1 msec chatter: min 8 msec poll rate */ 214 period = max ((int) dev->status->desc.bInterval, 215 (dev->udev->speed == USB_SPEED_HIGH) ? 7 : 3); 216 217 buf = kmalloc (maxp, GFP_KERNEL); 218 if (buf) { 219 dev->interrupt = usb_alloc_urb (0, GFP_KERNEL); 220 if (!dev->interrupt) { 221 kfree (buf); 222 return -ENOMEM; 223 } else { 224 usb_fill_int_urb(dev->interrupt, dev->udev, pipe, 225 buf, maxp, intr_complete, dev, period); 226 dev->interrupt->transfer_flags |= URB_FREE_BUFFER; 227 dev_dbg(&intf->dev, 228 "status ep%din, %d bytes period %d\n", 229 usb_pipeendpoint(pipe), maxp, period); 230 } 231 } 232 return 0; 233 } 234 235 /* Submit the interrupt URB if not previously submitted, increasing refcount */ 236 int usbnet_status_start(struct usbnet *dev, gfp_t mem_flags) 237 { 238 int ret = 0; 239 240 WARN_ON_ONCE(dev->interrupt == NULL); 241 if (dev->interrupt) { 242 mutex_lock(&dev->interrupt_mutex); 243 244 if (++dev->interrupt_count == 1) 245 ret = usb_submit_urb(dev->interrupt, mem_flags); 246 247 dev_dbg(&dev->udev->dev, "incremented interrupt URB count to %d\n", 248 dev->interrupt_count); 249 mutex_unlock(&dev->interrupt_mutex); 250 } 251 return ret; 252 } 253 EXPORT_SYMBOL_GPL(usbnet_status_start); 254 255 /* For resume; submit interrupt URB if previously submitted */ 256 static int __usbnet_status_start_force(struct usbnet *dev, gfp_t mem_flags) 257 { 258 int ret = 0; 259 260 mutex_lock(&dev->interrupt_mutex); 261 if (dev->interrupt_count) { 262 ret = usb_submit_urb(dev->interrupt, mem_flags); 263 dev_dbg(&dev->udev->dev, 264 "submitted interrupt URB for resume\n"); 265 } 266 mutex_unlock(&dev->interrupt_mutex); 267 return ret; 268 } 269 270 /* Kill the interrupt URB if all submitters want it killed */ 271 void usbnet_status_stop(struct usbnet *dev) 272 { 273 if (dev->interrupt) { 274 mutex_lock(&dev->interrupt_mutex); 275 WARN_ON(dev->interrupt_count == 0); 276 277 if (dev->interrupt_count && --dev->interrupt_count == 0) 278 usb_kill_urb(dev->interrupt); 279 280 dev_dbg(&dev->udev->dev, 281 "decremented interrupt URB count to %d\n", 282 dev->interrupt_count); 283 mutex_unlock(&dev->interrupt_mutex); 284 } 285 } 286 EXPORT_SYMBOL_GPL(usbnet_status_stop); 287 288 /* For suspend; always kill interrupt URB */ 289 static void __usbnet_status_stop_force(struct usbnet *dev) 290 { 291 if (dev->interrupt) { 292 mutex_lock(&dev->interrupt_mutex); 293 usb_kill_urb(dev->interrupt); 294 dev_dbg(&dev->udev->dev, "killed interrupt URB for suspend\n"); 295 mutex_unlock(&dev->interrupt_mutex); 296 } 297 } 298 299 /* Passes this packet up the stack, updating its accounting. 300 * Some link protocols batch packets, so their rx_fixup paths 301 * can return clones as well as just modify the original skb. 302 */ 303 void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb) 304 { 305 struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); 306 unsigned long flags; 307 int status; 308 309 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) { 310 skb_queue_tail(&dev->rxq_pause, skb); 311 return; 312 } 313 314 /* only update if unset to allow minidriver rx_fixup override */ 315 if (skb->protocol == 0) 316 skb->protocol = eth_type_trans (skb, dev->net); 317 318 flags = u64_stats_update_begin_irqsave(&stats64->syncp); 319 stats64->rx_packets++; 320 stats64->rx_bytes += skb->len; 321 u64_stats_update_end_irqrestore(&stats64->syncp, flags); 322 323 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n", 324 skb->len + sizeof (struct ethhdr), skb->protocol); 325 memset (skb->cb, 0, sizeof (struct skb_data)); 326 327 if (skb_defer_rx_timestamp(skb)) 328 return; 329 330 status = netif_rx (skb); 331 if (status != NET_RX_SUCCESS) 332 netif_dbg(dev, rx_err, dev->net, 333 "netif_rx status %d\n", status); 334 } 335 EXPORT_SYMBOL_GPL(usbnet_skb_return); 336 337 /* must be called if hard_mtu or rx_urb_size changed */ 338 void usbnet_update_max_qlen(struct usbnet *dev) 339 { 340 enum usb_device_speed speed = dev->udev->speed; 341 342 switch (speed) { 343 case USB_SPEED_HIGH: 344 dev->rx_qlen = MAX_QUEUE_MEMORY / dev->rx_urb_size; 345 dev->tx_qlen = MAX_QUEUE_MEMORY / dev->hard_mtu; 346 break; 347 case USB_SPEED_SUPER: 348 case USB_SPEED_SUPER_PLUS: 349 /* 350 * Not take default 5ms qlen for super speed HC to 351 * save memory, and iperf tests show 2.5ms qlen can 352 * work well 353 */ 354 dev->rx_qlen = 5 * MAX_QUEUE_MEMORY / dev->rx_urb_size; 355 dev->tx_qlen = 5 * MAX_QUEUE_MEMORY / dev->hard_mtu; 356 break; 357 default: 358 dev->rx_qlen = dev->tx_qlen = 4; 359 } 360 } 361 EXPORT_SYMBOL_GPL(usbnet_update_max_qlen); 362 363 364 /*------------------------------------------------------------------------- 365 * 366 * Network Device Driver (peer link to "Host Device", from USB host) 367 * 368 *-------------------------------------------------------------------------*/ 369 370 int usbnet_change_mtu (struct net_device *net, int new_mtu) 371 { 372 struct usbnet *dev = netdev_priv(net); 373 int ll_mtu = new_mtu + net->hard_header_len; 374 int old_hard_mtu = dev->hard_mtu; 375 int old_rx_urb_size = dev->rx_urb_size; 376 377 // no second zero-length packet read wanted after mtu-sized packets 378 if ((ll_mtu % dev->maxpacket) == 0) 379 return -EDOM; 380 net->mtu = new_mtu; 381 382 dev->hard_mtu = net->mtu + net->hard_header_len; 383 if (dev->rx_urb_size == old_hard_mtu) { 384 dev->rx_urb_size = dev->hard_mtu; 385 if (dev->rx_urb_size > old_rx_urb_size) { 386 usbnet_pause_rx(dev); 387 usbnet_unlink_rx_urbs(dev); 388 usbnet_resume_rx(dev); 389 } 390 } 391 392 /* max qlen depend on hard_mtu and rx_urb_size */ 393 usbnet_update_max_qlen(dev); 394 395 return 0; 396 } 397 EXPORT_SYMBOL_GPL(usbnet_change_mtu); 398 399 /* The caller must hold list->lock */ 400 static void __usbnet_queue_skb(struct sk_buff_head *list, 401 struct sk_buff *newsk, enum skb_state state) 402 { 403 struct skb_data *entry = (struct skb_data *) newsk->cb; 404 405 __skb_queue_tail(list, newsk); 406 entry->state = state; 407 } 408 409 /*-------------------------------------------------------------------------*/ 410 411 /* some LK 2.4 HCDs oopsed if we freed or resubmitted urbs from 412 * completion callbacks. 2.5 should have fixed those bugs... 413 */ 414 415 static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb, 416 struct sk_buff_head *list, enum skb_state state) 417 { 418 unsigned long flags; 419 enum skb_state old_state; 420 struct skb_data *entry = (struct skb_data *) skb->cb; 421 422 spin_lock_irqsave(&list->lock, flags); 423 old_state = entry->state; 424 entry->state = state; 425 __skb_unlink(skb, list); 426 427 /* defer_bh() is never called with list == &dev->done. 428 * spin_lock_nested() tells lockdep that it is OK to take 429 * dev->done.lock here with list->lock held. 430 */ 431 spin_lock_nested(&dev->done.lock, SINGLE_DEPTH_NESTING); 432 433 __skb_queue_tail(&dev->done, skb); 434 if (dev->done.qlen == 1) 435 tasklet_schedule(&dev->bh); 436 spin_unlock(&dev->done.lock); 437 spin_unlock_irqrestore(&list->lock, flags); 438 return old_state; 439 } 440 441 /* some work can't be done in tasklets, so we use keventd 442 * 443 * NOTE: annoying asymmetry: if it's active, schedule_work() fails, 444 * but tasklet_schedule() doesn't. hope the failure is rare. 445 */ 446 void usbnet_defer_kevent (struct usbnet *dev, int work) 447 { 448 set_bit (work, &dev->flags); 449 if (!schedule_work (&dev->kevent)) 450 netdev_dbg(dev->net, "kevent %d may have been dropped\n", work); 451 else 452 netdev_dbg(dev->net, "kevent %d scheduled\n", work); 453 } 454 EXPORT_SYMBOL_GPL(usbnet_defer_kevent); 455 456 /*-------------------------------------------------------------------------*/ 457 458 static void rx_complete (struct urb *urb); 459 460 static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) 461 { 462 struct sk_buff *skb; 463 struct skb_data *entry; 464 int retval = 0; 465 unsigned long lockflags; 466 size_t size = dev->rx_urb_size; 467 468 /* prevent rx skb allocation when error ratio is high */ 469 if (test_bit(EVENT_RX_KILL, &dev->flags)) { 470 usb_free_urb(urb); 471 return -ENOLINK; 472 } 473 474 if (test_bit(EVENT_NO_IP_ALIGN, &dev->flags)) 475 skb = __netdev_alloc_skb(dev->net, size, flags); 476 else 477 skb = __netdev_alloc_skb_ip_align(dev->net, size, flags); 478 if (!skb) { 479 netif_dbg(dev, rx_err, dev->net, "no rx skb\n"); 480 usbnet_defer_kevent (dev, EVENT_RX_MEMORY); 481 usb_free_urb (urb); 482 return -ENOMEM; 483 } 484 485 entry = (struct skb_data *) skb->cb; 486 entry->urb = urb; 487 entry->dev = dev; 488 entry->length = 0; 489 490 usb_fill_bulk_urb (urb, dev->udev, dev->in, 491 skb->data, size, rx_complete, skb); 492 493 spin_lock_irqsave (&dev->rxq.lock, lockflags); 494 495 if (netif_running (dev->net) && 496 netif_device_present (dev->net) && 497 !test_bit (EVENT_RX_HALT, &dev->flags) && 498 !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) { 499 switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) { 500 case -EPIPE: 501 usbnet_defer_kevent (dev, EVENT_RX_HALT); 502 break; 503 case -ENOMEM: 504 usbnet_defer_kevent (dev, EVENT_RX_MEMORY); 505 break; 506 case -ENODEV: 507 netif_dbg(dev, ifdown, dev->net, "device gone\n"); 508 netif_device_detach (dev->net); 509 break; 510 case -EHOSTUNREACH: 511 retval = -ENOLINK; 512 break; 513 default: 514 netif_dbg(dev, rx_err, dev->net, 515 "rx submit, %d\n", retval); 516 tasklet_schedule (&dev->bh); 517 break; 518 case 0: 519 __usbnet_queue_skb(&dev->rxq, skb, rx_start); 520 } 521 } else { 522 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n"); 523 retval = -ENOLINK; 524 } 525 spin_unlock_irqrestore (&dev->rxq.lock, lockflags); 526 if (retval) { 527 dev_kfree_skb_any (skb); 528 usb_free_urb (urb); 529 } 530 return retval; 531 } 532 533 534 /*-------------------------------------------------------------------------*/ 535 536 static inline void rx_process (struct usbnet *dev, struct sk_buff *skb) 537 { 538 if (dev->driver_info->rx_fixup && 539 !dev->driver_info->rx_fixup (dev, skb)) { 540 /* With RX_ASSEMBLE, rx_fixup() must update counters */ 541 if (!(dev->driver_info->flags & FLAG_RX_ASSEMBLE)) 542 dev->net->stats.rx_errors++; 543 goto done; 544 } 545 // else network stack removes extra byte if we forced a short packet 546 547 /* all data was already cloned from skb inside the driver */ 548 if (dev->driver_info->flags & FLAG_MULTI_PACKET) 549 goto done; 550 551 if (skb->len < ETH_HLEN) { 552 dev->net->stats.rx_errors++; 553 dev->net->stats.rx_length_errors++; 554 netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len); 555 } else { 556 usbnet_skb_return(dev, skb); 557 return; 558 } 559 560 done: 561 skb_queue_tail(&dev->done, skb); 562 } 563 564 /*-------------------------------------------------------------------------*/ 565 566 static void rx_complete (struct urb *urb) 567 { 568 struct sk_buff *skb = (struct sk_buff *) urb->context; 569 struct skb_data *entry = (struct skb_data *) skb->cb; 570 struct usbnet *dev = entry->dev; 571 int urb_status = urb->status; 572 enum skb_state state; 573 574 skb_put (skb, urb->actual_length); 575 state = rx_done; 576 entry->urb = NULL; 577 578 switch (urb_status) { 579 /* success */ 580 case 0: 581 break; 582 583 /* stalls need manual reset. this is rare ... except that 584 * when going through USB 2.0 TTs, unplug appears this way. 585 * we avoid the highspeed version of the ETIMEDOUT/EILSEQ 586 * storm, recovering as needed. 587 */ 588 case -EPIPE: 589 dev->net->stats.rx_errors++; 590 usbnet_defer_kevent (dev, EVENT_RX_HALT); 591 // FALLTHROUGH 592 593 /* software-driven interface shutdown */ 594 case -ECONNRESET: /* async unlink */ 595 case -ESHUTDOWN: /* hardware gone */ 596 netif_dbg(dev, ifdown, dev->net, 597 "rx shutdown, code %d\n", urb_status); 598 goto block; 599 600 /* we get controller i/o faults during hub_wq disconnect() delays. 601 * throttle down resubmits, to avoid log floods; just temporarily, 602 * so we still recover when the fault isn't a hub_wq delay. 603 */ 604 case -EPROTO: 605 case -ETIME: 606 case -EILSEQ: 607 dev->net->stats.rx_errors++; 608 if (!timer_pending (&dev->delay)) { 609 mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES); 610 netif_dbg(dev, link, dev->net, 611 "rx throttle %d\n", urb_status); 612 } 613 block: 614 state = rx_cleanup; 615 entry->urb = urb; 616 urb = NULL; 617 break; 618 619 /* data overrun ... flush fifo? */ 620 case -EOVERFLOW: 621 dev->net->stats.rx_over_errors++; 622 // FALLTHROUGH 623 624 default: 625 state = rx_cleanup; 626 dev->net->stats.rx_errors++; 627 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status); 628 break; 629 } 630 631 /* stop rx if packet error rate is high */ 632 if (++dev->pkt_cnt > 30) { 633 dev->pkt_cnt = 0; 634 dev->pkt_err = 0; 635 } else { 636 if (state == rx_cleanup) 637 dev->pkt_err++; 638 if (dev->pkt_err > 20) 639 set_bit(EVENT_RX_KILL, &dev->flags); 640 } 641 642 state = defer_bh(dev, skb, &dev->rxq, state); 643 644 if (urb) { 645 if (netif_running (dev->net) && 646 !test_bit (EVENT_RX_HALT, &dev->flags) && 647 state != unlink_start) { 648 rx_submit (dev, urb, GFP_ATOMIC); 649 usb_mark_last_busy(dev->udev); 650 return; 651 } 652 usb_free_urb (urb); 653 } 654 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n"); 655 } 656 657 /*-------------------------------------------------------------------------*/ 658 void usbnet_pause_rx(struct usbnet *dev) 659 { 660 set_bit(EVENT_RX_PAUSED, &dev->flags); 661 662 netif_dbg(dev, rx_status, dev->net, "paused rx queue enabled\n"); 663 } 664 EXPORT_SYMBOL_GPL(usbnet_pause_rx); 665 666 void usbnet_resume_rx(struct usbnet *dev) 667 { 668 struct sk_buff *skb; 669 int num = 0; 670 671 clear_bit(EVENT_RX_PAUSED, &dev->flags); 672 673 while ((skb = skb_dequeue(&dev->rxq_pause)) != NULL) { 674 usbnet_skb_return(dev, skb); 675 num++; 676 } 677 678 tasklet_schedule(&dev->bh); 679 680 netif_dbg(dev, rx_status, dev->net, 681 "paused rx queue disabled, %d skbs requeued\n", num); 682 } 683 EXPORT_SYMBOL_GPL(usbnet_resume_rx); 684 685 void usbnet_purge_paused_rxq(struct usbnet *dev) 686 { 687 skb_queue_purge(&dev->rxq_pause); 688 } 689 EXPORT_SYMBOL_GPL(usbnet_purge_paused_rxq); 690 691 /*-------------------------------------------------------------------------*/ 692 693 // unlink pending rx/tx; completion handlers do all other cleanup 694 695 static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q) 696 { 697 unsigned long flags; 698 struct sk_buff *skb; 699 int count = 0; 700 701 spin_lock_irqsave (&q->lock, flags); 702 while (!skb_queue_empty(q)) { 703 struct skb_data *entry; 704 struct urb *urb; 705 int retval; 706 707 skb_queue_walk(q, skb) { 708 entry = (struct skb_data *) skb->cb; 709 if (entry->state != unlink_start) 710 goto found; 711 } 712 break; 713 found: 714 entry->state = unlink_start; 715 urb = entry->urb; 716 717 /* 718 * Get reference count of the URB to avoid it to be 719 * freed during usb_unlink_urb, which may trigger 720 * use-after-free problem inside usb_unlink_urb since 721 * usb_unlink_urb is always racing with .complete 722 * handler(include defer_bh). 723 */ 724 usb_get_urb(urb); 725 spin_unlock_irqrestore(&q->lock, flags); 726 // during some PM-driven resume scenarios, 727 // these (async) unlinks complete immediately 728 retval = usb_unlink_urb (urb); 729 if (retval != -EINPROGRESS && retval != 0) 730 netdev_dbg(dev->net, "unlink urb err, %d\n", retval); 731 else 732 count++; 733 usb_put_urb(urb); 734 spin_lock_irqsave(&q->lock, flags); 735 } 736 spin_unlock_irqrestore (&q->lock, flags); 737 return count; 738 } 739 740 // Flush all pending rx urbs 741 // minidrivers may need to do this when the MTU changes 742 743 void usbnet_unlink_rx_urbs(struct usbnet *dev) 744 { 745 if (netif_running(dev->net)) { 746 (void) unlink_urbs (dev, &dev->rxq); 747 tasklet_schedule(&dev->bh); 748 } 749 } 750 EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs); 751 752 /*-------------------------------------------------------------------------*/ 753 754 static void wait_skb_queue_empty(struct sk_buff_head *q) 755 { 756 unsigned long flags; 757 758 spin_lock_irqsave(&q->lock, flags); 759 while (!skb_queue_empty(q)) { 760 spin_unlock_irqrestore(&q->lock, flags); 761 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS)); 762 set_current_state(TASK_UNINTERRUPTIBLE); 763 spin_lock_irqsave(&q->lock, flags); 764 } 765 spin_unlock_irqrestore(&q->lock, flags); 766 } 767 768 // precondition: never called in_interrupt 769 static void usbnet_terminate_urbs(struct usbnet *dev) 770 { 771 DECLARE_WAITQUEUE(wait, current); 772 int temp; 773 774 /* ensure there are no more active urbs */ 775 add_wait_queue(&dev->wait, &wait); 776 set_current_state(TASK_UNINTERRUPTIBLE); 777 temp = unlink_urbs(dev, &dev->txq) + 778 unlink_urbs(dev, &dev->rxq); 779 780 /* maybe wait for deletions to finish. */ 781 wait_skb_queue_empty(&dev->rxq); 782 wait_skb_queue_empty(&dev->txq); 783 wait_skb_queue_empty(&dev->done); 784 netif_dbg(dev, ifdown, dev->net, 785 "waited for %d urb completions\n", temp); 786 set_current_state(TASK_RUNNING); 787 remove_wait_queue(&dev->wait, &wait); 788 } 789 790 int usbnet_stop (struct net_device *net) 791 { 792 struct usbnet *dev = netdev_priv(net); 793 const struct driver_info *info = dev->driver_info; 794 int retval, pm, mpn; 795 796 clear_bit(EVENT_DEV_OPEN, &dev->flags); 797 netif_stop_queue (net); 798 799 netif_info(dev, ifdown, dev->net, 800 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n", 801 net->stats.rx_packets, net->stats.tx_packets, 802 net->stats.rx_errors, net->stats.tx_errors); 803 804 /* to not race resume */ 805 pm = usb_autopm_get_interface(dev->intf); 806 /* allow minidriver to stop correctly (wireless devices to turn off 807 * radio etc) */ 808 if (info->stop) { 809 retval = info->stop(dev); 810 if (retval < 0) 811 netif_info(dev, ifdown, dev->net, 812 "stop fail (%d) usbnet usb-%s-%s, %s\n", 813 retval, 814 dev->udev->bus->bus_name, dev->udev->devpath, 815 info->description); 816 } 817 818 if (!(info->flags & FLAG_AVOID_UNLINK_URBS)) 819 usbnet_terminate_urbs(dev); 820 821 usbnet_status_stop(dev); 822 823 usbnet_purge_paused_rxq(dev); 824 825 mpn = !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags); 826 827 /* deferred work (task, timer, softirq) must also stop. 828 * can't flush_scheduled_work() until we drop rtnl (later), 829 * else workers could deadlock; so make workers a NOP. 830 */ 831 dev->flags = 0; 832 del_timer_sync (&dev->delay); 833 tasklet_kill (&dev->bh); 834 if (!pm) 835 usb_autopm_put_interface(dev->intf); 836 837 if (info->manage_power && mpn) 838 info->manage_power(dev, 0); 839 else 840 usb_autopm_put_interface(dev->intf); 841 842 return 0; 843 } 844 EXPORT_SYMBOL_GPL(usbnet_stop); 845 846 /*-------------------------------------------------------------------------*/ 847 848 // posts reads, and enables write queuing 849 850 // precondition: never called in_interrupt 851 852 int usbnet_open (struct net_device *net) 853 { 854 struct usbnet *dev = netdev_priv(net); 855 int retval; 856 const struct driver_info *info = dev->driver_info; 857 858 if ((retval = usb_autopm_get_interface(dev->intf)) < 0) { 859 netif_info(dev, ifup, dev->net, 860 "resumption fail (%d) usbnet usb-%s-%s, %s\n", 861 retval, 862 dev->udev->bus->bus_name, 863 dev->udev->devpath, 864 info->description); 865 goto done_nopm; 866 } 867 868 // put into "known safe" state 869 if (info->reset && (retval = info->reset (dev)) < 0) { 870 netif_info(dev, ifup, dev->net, 871 "open reset fail (%d) usbnet usb-%s-%s, %s\n", 872 retval, 873 dev->udev->bus->bus_name, 874 dev->udev->devpath, 875 info->description); 876 goto done; 877 } 878 879 /* hard_mtu or rx_urb_size may change in reset() */ 880 usbnet_update_max_qlen(dev); 881 882 // insist peer be connected 883 if (info->check_connect && (retval = info->check_connect (dev)) < 0) { 884 netif_dbg(dev, ifup, dev->net, "can't open; %d\n", retval); 885 goto done; 886 } 887 888 /* start any status interrupt transfer */ 889 if (dev->interrupt) { 890 retval = usbnet_status_start(dev, GFP_KERNEL); 891 if (retval < 0) { 892 netif_err(dev, ifup, dev->net, 893 "intr submit %d\n", retval); 894 goto done; 895 } 896 } 897 898 set_bit(EVENT_DEV_OPEN, &dev->flags); 899 netif_start_queue (net); 900 netif_info(dev, ifup, dev->net, 901 "open: enable queueing (rx %d, tx %d) mtu %d %s framing\n", 902 (int)RX_QLEN(dev), (int)TX_QLEN(dev), 903 dev->net->mtu, 904 (dev->driver_info->flags & FLAG_FRAMING_NC) ? "NetChip" : 905 (dev->driver_info->flags & FLAG_FRAMING_GL) ? "GeneSys" : 906 (dev->driver_info->flags & FLAG_FRAMING_Z) ? "Zaurus" : 907 (dev->driver_info->flags & FLAG_FRAMING_RN) ? "RNDIS" : 908 (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" : 909 "simple"); 910 911 /* reset rx error state */ 912 dev->pkt_cnt = 0; 913 dev->pkt_err = 0; 914 clear_bit(EVENT_RX_KILL, &dev->flags); 915 916 // delay posting reads until we're fully open 917 tasklet_schedule (&dev->bh); 918 if (info->manage_power) { 919 retval = info->manage_power(dev, 1); 920 if (retval < 0) { 921 retval = 0; 922 set_bit(EVENT_NO_RUNTIME_PM, &dev->flags); 923 } else { 924 usb_autopm_put_interface(dev->intf); 925 } 926 } 927 return retval; 928 done: 929 usb_autopm_put_interface(dev->intf); 930 done_nopm: 931 return retval; 932 } 933 EXPORT_SYMBOL_GPL(usbnet_open); 934 935 /*-------------------------------------------------------------------------*/ 936 937 /* ethtool methods; minidrivers may need to add some more, but 938 * they'll probably want to use this base set. 939 */ 940 941 int usbnet_get_link_ksettings(struct net_device *net, 942 struct ethtool_link_ksettings *cmd) 943 { 944 struct usbnet *dev = netdev_priv(net); 945 946 if (!dev->mii.mdio_read) 947 return -EOPNOTSUPP; 948 949 mii_ethtool_get_link_ksettings(&dev->mii, cmd); 950 951 return 0; 952 } 953 EXPORT_SYMBOL_GPL(usbnet_get_link_ksettings); 954 955 int usbnet_set_link_ksettings(struct net_device *net, 956 const struct ethtool_link_ksettings *cmd) 957 { 958 struct usbnet *dev = netdev_priv(net); 959 int retval; 960 961 if (!dev->mii.mdio_write) 962 return -EOPNOTSUPP; 963 964 retval = mii_ethtool_set_link_ksettings(&dev->mii, cmd); 965 966 /* link speed/duplex might have changed */ 967 if (dev->driver_info->link_reset) 968 dev->driver_info->link_reset(dev); 969 970 /* hard_mtu or rx_urb_size may change in link_reset() */ 971 usbnet_update_max_qlen(dev); 972 973 return retval; 974 } 975 EXPORT_SYMBOL_GPL(usbnet_set_link_ksettings); 976 977 void usbnet_get_stats64(struct net_device *net, struct rtnl_link_stats64 *stats) 978 { 979 struct usbnet *dev = netdev_priv(net); 980 unsigned int start; 981 int cpu; 982 983 netdev_stats_to_stats64(stats, &net->stats); 984 985 for_each_possible_cpu(cpu) { 986 struct pcpu_sw_netstats *stats64; 987 u64 rx_packets, rx_bytes; 988 u64 tx_packets, tx_bytes; 989 990 stats64 = per_cpu_ptr(dev->stats64, cpu); 991 992 do { 993 start = u64_stats_fetch_begin_irq(&stats64->syncp); 994 rx_packets = stats64->rx_packets; 995 rx_bytes = stats64->rx_bytes; 996 tx_packets = stats64->tx_packets; 997 tx_bytes = stats64->tx_bytes; 998 } while (u64_stats_fetch_retry_irq(&stats64->syncp, start)); 999 1000 stats->rx_packets += rx_packets; 1001 stats->rx_bytes += rx_bytes; 1002 stats->tx_packets += tx_packets; 1003 stats->tx_bytes += tx_bytes; 1004 } 1005 } 1006 EXPORT_SYMBOL_GPL(usbnet_get_stats64); 1007 1008 u32 usbnet_get_link (struct net_device *net) 1009 { 1010 struct usbnet *dev = netdev_priv(net); 1011 1012 /* If a check_connect is defined, return its result */ 1013 if (dev->driver_info->check_connect) 1014 return dev->driver_info->check_connect (dev) == 0; 1015 1016 /* if the device has mii operations, use those */ 1017 if (dev->mii.mdio_read) 1018 return mii_link_ok(&dev->mii); 1019 1020 /* Otherwise, dtrt for drivers calling netif_carrier_{on,off} */ 1021 return ethtool_op_get_link(net); 1022 } 1023 EXPORT_SYMBOL_GPL(usbnet_get_link); 1024 1025 int usbnet_nway_reset(struct net_device *net) 1026 { 1027 struct usbnet *dev = netdev_priv(net); 1028 1029 if (!dev->mii.mdio_write) 1030 return -EOPNOTSUPP; 1031 1032 return mii_nway_restart(&dev->mii); 1033 } 1034 EXPORT_SYMBOL_GPL(usbnet_nway_reset); 1035 1036 void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info) 1037 { 1038 struct usbnet *dev = netdev_priv(net); 1039 1040 strlcpy (info->driver, dev->driver_name, sizeof info->driver); 1041 strlcpy (info->version, DRIVER_VERSION, sizeof info->version); 1042 strlcpy (info->fw_version, dev->driver_info->description, 1043 sizeof info->fw_version); 1044 usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info); 1045 } 1046 EXPORT_SYMBOL_GPL(usbnet_get_drvinfo); 1047 1048 u32 usbnet_get_msglevel (struct net_device *net) 1049 { 1050 struct usbnet *dev = netdev_priv(net); 1051 1052 return dev->msg_enable; 1053 } 1054 EXPORT_SYMBOL_GPL(usbnet_get_msglevel); 1055 1056 void usbnet_set_msglevel (struct net_device *net, u32 level) 1057 { 1058 struct usbnet *dev = netdev_priv(net); 1059 1060 dev->msg_enable = level; 1061 } 1062 EXPORT_SYMBOL_GPL(usbnet_set_msglevel); 1063 1064 /* drivers may override default ethtool_ops in their bind() routine */ 1065 static const struct ethtool_ops usbnet_ethtool_ops = { 1066 .get_link = usbnet_get_link, 1067 .nway_reset = usbnet_nway_reset, 1068 .get_drvinfo = usbnet_get_drvinfo, 1069 .get_msglevel = usbnet_get_msglevel, 1070 .set_msglevel = usbnet_set_msglevel, 1071 .get_ts_info = ethtool_op_get_ts_info, 1072 .get_link_ksettings = usbnet_get_link_ksettings, 1073 .set_link_ksettings = usbnet_set_link_ksettings, 1074 }; 1075 1076 /*-------------------------------------------------------------------------*/ 1077 1078 static void __handle_link_change(struct usbnet *dev) 1079 { 1080 if (!test_bit(EVENT_DEV_OPEN, &dev->flags)) 1081 return; 1082 1083 if (!netif_carrier_ok(dev->net)) { 1084 /* kill URBs for reading packets to save bus bandwidth */ 1085 unlink_urbs(dev, &dev->rxq); 1086 1087 /* 1088 * tx_timeout will unlink URBs for sending packets and 1089 * tx queue is stopped by netcore after link becomes off 1090 */ 1091 } else { 1092 /* submitting URBs for reading packets */ 1093 tasklet_schedule(&dev->bh); 1094 } 1095 1096 /* hard_mtu or rx_urb_size may change during link change */ 1097 usbnet_update_max_qlen(dev); 1098 1099 clear_bit(EVENT_LINK_CHANGE, &dev->flags); 1100 } 1101 1102 static void usbnet_set_rx_mode(struct net_device *net) 1103 { 1104 struct usbnet *dev = netdev_priv(net); 1105 1106 usbnet_defer_kevent(dev, EVENT_SET_RX_MODE); 1107 } 1108 1109 static void __handle_set_rx_mode(struct usbnet *dev) 1110 { 1111 if (dev->driver_info->set_rx_mode) 1112 (dev->driver_info->set_rx_mode)(dev); 1113 1114 clear_bit(EVENT_SET_RX_MODE, &dev->flags); 1115 } 1116 1117 /* work that cannot be done in interrupt context uses keventd. 1118 * 1119 * NOTE: with 2.5 we could do more of this using completion callbacks, 1120 * especially now that control transfers can be queued. 1121 */ 1122 static void 1123 usbnet_deferred_kevent (struct work_struct *work) 1124 { 1125 struct usbnet *dev = 1126 container_of(work, struct usbnet, kevent); 1127 int status; 1128 1129 /* usb_clear_halt() needs a thread context */ 1130 if (test_bit (EVENT_TX_HALT, &dev->flags)) { 1131 unlink_urbs (dev, &dev->txq); 1132 status = usb_autopm_get_interface(dev->intf); 1133 if (status < 0) 1134 goto fail_pipe; 1135 status = usb_clear_halt (dev->udev, dev->out); 1136 usb_autopm_put_interface(dev->intf); 1137 if (status < 0 && 1138 status != -EPIPE && 1139 status != -ESHUTDOWN) { 1140 if (netif_msg_tx_err (dev)) 1141 fail_pipe: 1142 netdev_err(dev->net, "can't clear tx halt, status %d\n", 1143 status); 1144 } else { 1145 clear_bit (EVENT_TX_HALT, &dev->flags); 1146 if (status != -ESHUTDOWN) 1147 netif_wake_queue (dev->net); 1148 } 1149 } 1150 if (test_bit (EVENT_RX_HALT, &dev->flags)) { 1151 unlink_urbs (dev, &dev->rxq); 1152 status = usb_autopm_get_interface(dev->intf); 1153 if (status < 0) 1154 goto fail_halt; 1155 status = usb_clear_halt (dev->udev, dev->in); 1156 usb_autopm_put_interface(dev->intf); 1157 if (status < 0 && 1158 status != -EPIPE && 1159 status != -ESHUTDOWN) { 1160 if (netif_msg_rx_err (dev)) 1161 fail_halt: 1162 netdev_err(dev->net, "can't clear rx halt, status %d\n", 1163 status); 1164 } else { 1165 clear_bit (EVENT_RX_HALT, &dev->flags); 1166 tasklet_schedule (&dev->bh); 1167 } 1168 } 1169 1170 /* tasklet could resubmit itself forever if memory is tight */ 1171 if (test_bit (EVENT_RX_MEMORY, &dev->flags)) { 1172 struct urb *urb = NULL; 1173 int resched = 1; 1174 1175 if (netif_running (dev->net)) 1176 urb = usb_alloc_urb (0, GFP_KERNEL); 1177 else 1178 clear_bit (EVENT_RX_MEMORY, &dev->flags); 1179 if (urb != NULL) { 1180 clear_bit (EVENT_RX_MEMORY, &dev->flags); 1181 status = usb_autopm_get_interface(dev->intf); 1182 if (status < 0) { 1183 usb_free_urb(urb); 1184 goto fail_lowmem; 1185 } 1186 if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK) 1187 resched = 0; 1188 usb_autopm_put_interface(dev->intf); 1189 fail_lowmem: 1190 if (resched) 1191 tasklet_schedule (&dev->bh); 1192 } 1193 } 1194 1195 if (test_bit (EVENT_LINK_RESET, &dev->flags)) { 1196 const struct driver_info *info = dev->driver_info; 1197 int retval = 0; 1198 1199 clear_bit (EVENT_LINK_RESET, &dev->flags); 1200 status = usb_autopm_get_interface(dev->intf); 1201 if (status < 0) 1202 goto skip_reset; 1203 if(info->link_reset && (retval = info->link_reset(dev)) < 0) { 1204 usb_autopm_put_interface(dev->intf); 1205 skip_reset: 1206 netdev_info(dev->net, "link reset failed (%d) usbnet usb-%s-%s, %s\n", 1207 retval, 1208 dev->udev->bus->bus_name, 1209 dev->udev->devpath, 1210 info->description); 1211 } else { 1212 usb_autopm_put_interface(dev->intf); 1213 } 1214 1215 /* handle link change from link resetting */ 1216 __handle_link_change(dev); 1217 } 1218 1219 if (test_bit (EVENT_LINK_CHANGE, &dev->flags)) 1220 __handle_link_change(dev); 1221 1222 if (test_bit (EVENT_SET_RX_MODE, &dev->flags)) 1223 __handle_set_rx_mode(dev); 1224 1225 1226 if (dev->flags) 1227 netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags); 1228 } 1229 1230 /*-------------------------------------------------------------------------*/ 1231 1232 static void tx_complete (struct urb *urb) 1233 { 1234 struct sk_buff *skb = (struct sk_buff *) urb->context; 1235 struct skb_data *entry = (struct skb_data *) skb->cb; 1236 struct usbnet *dev = entry->dev; 1237 1238 if (urb->status == 0) { 1239 struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); 1240 unsigned long flags; 1241 1242 flags = u64_stats_update_begin_irqsave(&stats64->syncp); 1243 stats64->tx_packets += entry->packets; 1244 stats64->tx_bytes += entry->length; 1245 u64_stats_update_end_irqrestore(&stats64->syncp, flags); 1246 } else { 1247 dev->net->stats.tx_errors++; 1248 1249 switch (urb->status) { 1250 case -EPIPE: 1251 usbnet_defer_kevent (dev, EVENT_TX_HALT); 1252 break; 1253 1254 /* software-driven interface shutdown */ 1255 case -ECONNRESET: // async unlink 1256 case -ESHUTDOWN: // hardware gone 1257 break; 1258 1259 /* like rx, tx gets controller i/o faults during hub_wq 1260 * delays and so it uses the same throttling mechanism. 1261 */ 1262 case -EPROTO: 1263 case -ETIME: 1264 case -EILSEQ: 1265 usb_mark_last_busy(dev->udev); 1266 if (!timer_pending (&dev->delay)) { 1267 mod_timer (&dev->delay, 1268 jiffies + THROTTLE_JIFFIES); 1269 netif_dbg(dev, link, dev->net, 1270 "tx throttle %d\n", urb->status); 1271 } 1272 netif_stop_queue (dev->net); 1273 break; 1274 default: 1275 netif_dbg(dev, tx_err, dev->net, 1276 "tx err %d\n", entry->urb->status); 1277 break; 1278 } 1279 } 1280 1281 usb_autopm_put_interface_async(dev->intf); 1282 (void) defer_bh(dev, skb, &dev->txq, tx_done); 1283 } 1284 1285 /*-------------------------------------------------------------------------*/ 1286 1287 void usbnet_tx_timeout (struct net_device *net) 1288 { 1289 struct usbnet *dev = netdev_priv(net); 1290 1291 unlink_urbs (dev, &dev->txq); 1292 tasklet_schedule (&dev->bh); 1293 /* this needs to be handled individually because the generic layer 1294 * doesn't know what is sufficient and could not restore private 1295 * information if a remedy of an unconditional reset were used. 1296 */ 1297 if (dev->driver_info->recover) 1298 (dev->driver_info->recover)(dev); 1299 } 1300 EXPORT_SYMBOL_GPL(usbnet_tx_timeout); 1301 1302 /*-------------------------------------------------------------------------*/ 1303 1304 static int build_dma_sg(const struct sk_buff *skb, struct urb *urb) 1305 { 1306 unsigned num_sgs, total_len = 0; 1307 int i, s = 0; 1308 1309 num_sgs = skb_shinfo(skb)->nr_frags + 1; 1310 if (num_sgs == 1) 1311 return 0; 1312 1313 /* reserve one for zero packet */ 1314 urb->sg = kmalloc_array(num_sgs + 1, sizeof(struct scatterlist), 1315 GFP_ATOMIC); 1316 if (!urb->sg) 1317 return -ENOMEM; 1318 1319 urb->num_sgs = num_sgs; 1320 sg_init_table(urb->sg, urb->num_sgs + 1); 1321 1322 sg_set_buf(&urb->sg[s++], skb->data, skb_headlen(skb)); 1323 total_len += skb_headlen(skb); 1324 1325 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1326 struct skb_frag_struct *f = &skb_shinfo(skb)->frags[i]; 1327 1328 total_len += skb_frag_size(f); 1329 sg_set_page(&urb->sg[i + s], f->page.p, f->size, 1330 f->page_offset); 1331 } 1332 urb->transfer_buffer_length = total_len; 1333 1334 return 1; 1335 } 1336 1337 netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, 1338 struct net_device *net) 1339 { 1340 struct usbnet *dev = netdev_priv(net); 1341 unsigned int length; 1342 struct urb *urb = NULL; 1343 struct skb_data *entry; 1344 const struct driver_info *info = dev->driver_info; 1345 unsigned long flags; 1346 int retval; 1347 1348 if (skb) 1349 skb_tx_timestamp(skb); 1350 1351 // some devices want funky USB-level framing, for 1352 // win32 driver (usually) and/or hardware quirks 1353 if (info->tx_fixup) { 1354 skb = info->tx_fixup (dev, skb, GFP_ATOMIC); 1355 if (!skb) { 1356 /* packet collected; minidriver waiting for more */ 1357 if (info->flags & FLAG_MULTI_PACKET) 1358 goto not_drop; 1359 netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n"); 1360 goto drop; 1361 } 1362 } 1363 1364 if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) { 1365 netif_dbg(dev, tx_err, dev->net, "no urb\n"); 1366 goto drop; 1367 } 1368 1369 entry = (struct skb_data *) skb->cb; 1370 entry->urb = urb; 1371 entry->dev = dev; 1372 1373 usb_fill_bulk_urb (urb, dev->udev, dev->out, 1374 skb->data, skb->len, tx_complete, skb); 1375 if (dev->can_dma_sg) { 1376 if (build_dma_sg(skb, urb) < 0) 1377 goto drop; 1378 } 1379 length = urb->transfer_buffer_length; 1380 1381 /* don't assume the hardware handles USB_ZERO_PACKET 1382 * NOTE: strictly conforming cdc-ether devices should expect 1383 * the ZLP here, but ignore the one-byte packet. 1384 * NOTE2: CDC NCM specification is different from CDC ECM when 1385 * handling ZLP/short packets, so cdc_ncm driver will make short 1386 * packet itself if needed. 1387 */ 1388 if (length % dev->maxpacket == 0) { 1389 if (!(info->flags & FLAG_SEND_ZLP)) { 1390 if (!(info->flags & FLAG_MULTI_PACKET)) { 1391 length++; 1392 if (skb_tailroom(skb) && !urb->num_sgs) { 1393 skb->data[skb->len] = 0; 1394 __skb_put(skb, 1); 1395 } else if (urb->num_sgs) 1396 sg_set_buf(&urb->sg[urb->num_sgs++], 1397 dev->padding_pkt, 1); 1398 } 1399 } else 1400 urb->transfer_flags |= URB_ZERO_PACKET; 1401 } 1402 urb->transfer_buffer_length = length; 1403 1404 if (info->flags & FLAG_MULTI_PACKET) { 1405 /* Driver has set number of packets and a length delta. 1406 * Calculate the complete length and ensure that it's 1407 * positive. 1408 */ 1409 entry->length += length; 1410 if (WARN_ON_ONCE(entry->length <= 0)) 1411 entry->length = length; 1412 } else { 1413 usbnet_set_skb_tx_stats(skb, 1, length); 1414 } 1415 1416 spin_lock_irqsave(&dev->txq.lock, flags); 1417 retval = usb_autopm_get_interface_async(dev->intf); 1418 if (retval < 0) { 1419 spin_unlock_irqrestore(&dev->txq.lock, flags); 1420 goto drop; 1421 } 1422 1423 #ifdef CONFIG_PM 1424 /* if this triggers the device is still a sleep */ 1425 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) { 1426 /* transmission will be done in resume */ 1427 usb_anchor_urb(urb, &dev->deferred); 1428 /* no use to process more packets */ 1429 netif_stop_queue(net); 1430 usb_put_urb(urb); 1431 spin_unlock_irqrestore(&dev->txq.lock, flags); 1432 netdev_dbg(dev->net, "Delaying transmission for resumption\n"); 1433 goto deferred; 1434 } 1435 #endif 1436 1437 switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) { 1438 case -EPIPE: 1439 netif_stop_queue (net); 1440 usbnet_defer_kevent (dev, EVENT_TX_HALT); 1441 usb_autopm_put_interface_async(dev->intf); 1442 break; 1443 default: 1444 usb_autopm_put_interface_async(dev->intf); 1445 netif_dbg(dev, tx_err, dev->net, 1446 "tx: submit urb err %d\n", retval); 1447 break; 1448 case 0: 1449 netif_trans_update(net); 1450 __usbnet_queue_skb(&dev->txq, skb, tx_start); 1451 if (dev->txq.qlen >= TX_QLEN (dev)) 1452 netif_stop_queue (net); 1453 } 1454 spin_unlock_irqrestore (&dev->txq.lock, flags); 1455 1456 if (retval) { 1457 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval); 1458 drop: 1459 dev->net->stats.tx_dropped++; 1460 not_drop: 1461 if (skb) 1462 dev_kfree_skb_any (skb); 1463 if (urb) { 1464 kfree(urb->sg); 1465 usb_free_urb(urb); 1466 } 1467 } else 1468 netif_dbg(dev, tx_queued, dev->net, 1469 "> tx, len %u, type 0x%x\n", length, skb->protocol); 1470 #ifdef CONFIG_PM 1471 deferred: 1472 #endif 1473 return NETDEV_TX_OK; 1474 } 1475 EXPORT_SYMBOL_GPL(usbnet_start_xmit); 1476 1477 static int rx_alloc_submit(struct usbnet *dev, gfp_t flags) 1478 { 1479 struct urb *urb; 1480 int i; 1481 int ret = 0; 1482 1483 /* don't refill the queue all at once */ 1484 for (i = 0; i < 10 && dev->rxq.qlen < RX_QLEN(dev); i++) { 1485 urb = usb_alloc_urb(0, flags); 1486 if (urb != NULL) { 1487 ret = rx_submit(dev, urb, flags); 1488 if (ret) 1489 goto err; 1490 } else { 1491 ret = -ENOMEM; 1492 goto err; 1493 } 1494 } 1495 err: 1496 return ret; 1497 } 1498 1499 /*-------------------------------------------------------------------------*/ 1500 1501 // tasklet (work deferred from completions, in_irq) or timer 1502 1503 static void usbnet_bh (struct timer_list *t) 1504 { 1505 struct usbnet *dev = from_timer(dev, t, delay); 1506 struct sk_buff *skb; 1507 struct skb_data *entry; 1508 1509 while ((skb = skb_dequeue (&dev->done))) { 1510 entry = (struct skb_data *) skb->cb; 1511 switch (entry->state) { 1512 case rx_done: 1513 entry->state = rx_cleanup; 1514 rx_process (dev, skb); 1515 continue; 1516 case tx_done: 1517 kfree(entry->urb->sg); 1518 /* fall through */ 1519 case rx_cleanup: 1520 usb_free_urb (entry->urb); 1521 dev_kfree_skb (skb); 1522 continue; 1523 default: 1524 netdev_dbg(dev->net, "bogus skb state %d\n", entry->state); 1525 } 1526 } 1527 1528 /* restart RX again after disabling due to high error rate */ 1529 clear_bit(EVENT_RX_KILL, &dev->flags); 1530 1531 /* waiting for all pending urbs to complete? 1532 * only then can we forgo submitting anew 1533 */ 1534 if (waitqueue_active(&dev->wait)) { 1535 if (dev->txq.qlen + dev->rxq.qlen + dev->done.qlen == 0) 1536 wake_up_all(&dev->wait); 1537 1538 // or are we maybe short a few urbs? 1539 } else if (netif_running (dev->net) && 1540 netif_device_present (dev->net) && 1541 netif_carrier_ok(dev->net) && 1542 !timer_pending(&dev->delay) && 1543 !test_bit(EVENT_RX_PAUSED, &dev->flags) && 1544 !test_bit(EVENT_RX_HALT, &dev->flags)) { 1545 int temp = dev->rxq.qlen; 1546 1547 if (temp < RX_QLEN(dev)) { 1548 if (rx_alloc_submit(dev, GFP_ATOMIC) == -ENOLINK) 1549 return; 1550 if (temp != dev->rxq.qlen) 1551 netif_dbg(dev, link, dev->net, 1552 "rxqlen %d --> %d\n", 1553 temp, dev->rxq.qlen); 1554 if (dev->rxq.qlen < RX_QLEN(dev)) 1555 tasklet_schedule (&dev->bh); 1556 } 1557 if (dev->txq.qlen < TX_QLEN (dev)) 1558 netif_wake_queue (dev->net); 1559 } 1560 } 1561 1562 1563 /*------------------------------------------------------------------------- 1564 * 1565 * USB Device Driver support 1566 * 1567 *-------------------------------------------------------------------------*/ 1568 1569 // precondition: never called in_interrupt 1570 1571 void usbnet_disconnect (struct usb_interface *intf) 1572 { 1573 struct usbnet *dev; 1574 struct usb_device *xdev; 1575 struct net_device *net; 1576 1577 dev = usb_get_intfdata(intf); 1578 usb_set_intfdata(intf, NULL); 1579 if (!dev) 1580 return; 1581 1582 xdev = interface_to_usbdev (intf); 1583 1584 netif_info(dev, probe, dev->net, "unregister '%s' usb-%s-%s, %s\n", 1585 intf->dev.driver->name, 1586 xdev->bus->bus_name, xdev->devpath, 1587 dev->driver_info->description); 1588 1589 net = dev->net; 1590 unregister_netdev (net); 1591 1592 cancel_work_sync(&dev->kevent); 1593 1594 usb_scuttle_anchored_urbs(&dev->deferred); 1595 1596 if (dev->driver_info->unbind) 1597 dev->driver_info->unbind (dev, intf); 1598 1599 usb_kill_urb(dev->interrupt); 1600 usb_free_urb(dev->interrupt); 1601 kfree(dev->padding_pkt); 1602 1603 free_percpu(dev->stats64); 1604 free_netdev(net); 1605 } 1606 EXPORT_SYMBOL_GPL(usbnet_disconnect); 1607 1608 static const struct net_device_ops usbnet_netdev_ops = { 1609 .ndo_open = usbnet_open, 1610 .ndo_stop = usbnet_stop, 1611 .ndo_start_xmit = usbnet_start_xmit, 1612 .ndo_tx_timeout = usbnet_tx_timeout, 1613 .ndo_set_rx_mode = usbnet_set_rx_mode, 1614 .ndo_change_mtu = usbnet_change_mtu, 1615 .ndo_get_stats64 = usbnet_get_stats64, 1616 .ndo_set_mac_address = eth_mac_addr, 1617 .ndo_validate_addr = eth_validate_addr, 1618 }; 1619 1620 /*-------------------------------------------------------------------------*/ 1621 1622 // precondition: never called in_interrupt 1623 1624 static struct device_type wlan_type = { 1625 .name = "wlan", 1626 }; 1627 1628 static struct device_type wwan_type = { 1629 .name = "wwan", 1630 }; 1631 1632 int 1633 usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) 1634 { 1635 struct usbnet *dev; 1636 struct net_device *net; 1637 struct usb_host_interface *interface; 1638 const struct driver_info *info; 1639 struct usb_device *xdev; 1640 int status; 1641 const char *name; 1642 struct usb_driver *driver = to_usb_driver(udev->dev.driver); 1643 1644 /* usbnet already took usb runtime pm, so have to enable the feature 1645 * for usb interface, otherwise usb_autopm_get_interface may return 1646 * failure if RUNTIME_PM is enabled. 1647 */ 1648 if (!driver->supports_autosuspend) { 1649 driver->supports_autosuspend = 1; 1650 pm_runtime_enable(&udev->dev); 1651 } 1652 1653 name = udev->dev.driver->name; 1654 info = (const struct driver_info *) prod->driver_info; 1655 if (!info) { 1656 dev_dbg (&udev->dev, "blacklisted by %s\n", name); 1657 return -ENODEV; 1658 } 1659 xdev = interface_to_usbdev (udev); 1660 interface = udev->cur_altsetting; 1661 1662 status = -ENOMEM; 1663 1664 // set up our own records 1665 net = alloc_etherdev(sizeof(*dev)); 1666 if (!net) 1667 goto out; 1668 1669 /* netdev_printk() needs this so do it as early as possible */ 1670 SET_NETDEV_DEV(net, &udev->dev); 1671 1672 dev = netdev_priv(net); 1673 dev->udev = xdev; 1674 dev->intf = udev; 1675 dev->driver_info = info; 1676 dev->driver_name = name; 1677 1678 dev->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 1679 if (!dev->stats64) 1680 goto out0; 1681 1682 dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV 1683 | NETIF_MSG_PROBE | NETIF_MSG_LINK); 1684 init_waitqueue_head(&dev->wait); 1685 skb_queue_head_init (&dev->rxq); 1686 skb_queue_head_init (&dev->txq); 1687 skb_queue_head_init (&dev->done); 1688 skb_queue_head_init(&dev->rxq_pause); 1689 dev->bh.func = (void (*)(unsigned long))usbnet_bh; 1690 dev->bh.data = (unsigned long)&dev->delay; 1691 INIT_WORK (&dev->kevent, usbnet_deferred_kevent); 1692 init_usb_anchor(&dev->deferred); 1693 timer_setup(&dev->delay, usbnet_bh, 0); 1694 mutex_init (&dev->phy_mutex); 1695 mutex_init(&dev->interrupt_mutex); 1696 dev->interrupt_count = 0; 1697 1698 dev->net = net; 1699 strcpy (net->name, "usb%d"); 1700 memcpy (net->dev_addr, node_id, sizeof node_id); 1701 1702 /* rx and tx sides can use different message sizes; 1703 * bind() should set rx_urb_size in that case. 1704 */ 1705 dev->hard_mtu = net->mtu + net->hard_header_len; 1706 net->min_mtu = 0; 1707 net->max_mtu = ETH_MAX_MTU; 1708 1709 net->netdev_ops = &usbnet_netdev_ops; 1710 net->watchdog_timeo = TX_TIMEOUT_JIFFIES; 1711 net->ethtool_ops = &usbnet_ethtool_ops; 1712 1713 // allow device-specific bind/init procedures 1714 // NOTE net->name still not usable ... 1715 if (info->bind) { 1716 status = info->bind (dev, udev); 1717 if (status < 0) 1718 goto out1; 1719 1720 // heuristic: "usb%d" for links we know are two-host, 1721 // else "eth%d" when there's reasonable doubt. userspace 1722 // can rename the link if it knows better. 1723 if ((dev->driver_info->flags & FLAG_ETHER) != 0 && 1724 ((dev->driver_info->flags & FLAG_POINTTOPOINT) == 0 || 1725 (net->dev_addr [0] & 0x02) == 0)) 1726 strcpy (net->name, "eth%d"); 1727 /* WLAN devices should always be named "wlan%d" */ 1728 if ((dev->driver_info->flags & FLAG_WLAN) != 0) 1729 strcpy(net->name, "wlan%d"); 1730 /* WWAN devices should always be named "wwan%d" */ 1731 if ((dev->driver_info->flags & FLAG_WWAN) != 0) 1732 strcpy(net->name, "wwan%d"); 1733 1734 /* devices that cannot do ARP */ 1735 if ((dev->driver_info->flags & FLAG_NOARP) != 0) 1736 net->flags |= IFF_NOARP; 1737 1738 /* maybe the remote can't receive an Ethernet MTU */ 1739 if (net->mtu > (dev->hard_mtu - net->hard_header_len)) 1740 net->mtu = dev->hard_mtu - net->hard_header_len; 1741 } else if (!info->in || !info->out) 1742 status = usbnet_get_endpoints (dev, udev); 1743 else { 1744 dev->in = usb_rcvbulkpipe (xdev, info->in); 1745 dev->out = usb_sndbulkpipe (xdev, info->out); 1746 if (!(info->flags & FLAG_NO_SETINT)) 1747 status = usb_set_interface (xdev, 1748 interface->desc.bInterfaceNumber, 1749 interface->desc.bAlternateSetting); 1750 else 1751 status = 0; 1752 1753 } 1754 if (status >= 0 && dev->status) 1755 status = init_status (dev, udev); 1756 if (status < 0) 1757 goto out3; 1758 1759 if (!dev->rx_urb_size) 1760 dev->rx_urb_size = dev->hard_mtu; 1761 dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1); 1762 1763 /* let userspace know we have a random address */ 1764 if (ether_addr_equal(net->dev_addr, node_id)) 1765 net->addr_assign_type = NET_ADDR_RANDOM; 1766 1767 if ((dev->driver_info->flags & FLAG_WLAN) != 0) 1768 SET_NETDEV_DEVTYPE(net, &wlan_type); 1769 if ((dev->driver_info->flags & FLAG_WWAN) != 0) 1770 SET_NETDEV_DEVTYPE(net, &wwan_type); 1771 1772 /* initialize max rx_qlen and tx_qlen */ 1773 usbnet_update_max_qlen(dev); 1774 1775 if (dev->can_dma_sg && !(info->flags & FLAG_SEND_ZLP) && 1776 !(info->flags & FLAG_MULTI_PACKET)) { 1777 dev->padding_pkt = kzalloc(1, GFP_KERNEL); 1778 if (!dev->padding_pkt) { 1779 status = -ENOMEM; 1780 goto out4; 1781 } 1782 } 1783 1784 status = register_netdev (net); 1785 if (status) 1786 goto out5; 1787 netif_info(dev, probe, dev->net, 1788 "register '%s' at usb-%s-%s, %s, %pM\n", 1789 udev->dev.driver->name, 1790 xdev->bus->bus_name, xdev->devpath, 1791 dev->driver_info->description, 1792 net->dev_addr); 1793 1794 // ok, it's ready to go. 1795 usb_set_intfdata (udev, dev); 1796 1797 netif_device_attach (net); 1798 1799 if (dev->driver_info->flags & FLAG_LINK_INTR) 1800 usbnet_link_change(dev, 0, 0); 1801 1802 return 0; 1803 1804 out5: 1805 kfree(dev->padding_pkt); 1806 out4: 1807 usb_free_urb(dev->interrupt); 1808 out3: 1809 if (info->unbind) 1810 info->unbind (dev, udev); 1811 out1: 1812 /* subdrivers must undo all they did in bind() if they 1813 * fail it, but we may fail later and a deferred kevent 1814 * may trigger an error resubmitting itself and, worse, 1815 * schedule a timer. So we kill it all just in case. 1816 */ 1817 cancel_work_sync(&dev->kevent); 1818 del_timer_sync(&dev->delay); 1819 free_percpu(dev->stats64); 1820 out0: 1821 free_netdev(net); 1822 out: 1823 return status; 1824 } 1825 EXPORT_SYMBOL_GPL(usbnet_probe); 1826 1827 /*-------------------------------------------------------------------------*/ 1828 1829 /* 1830 * suspend the whole driver as soon as the first interface is suspended 1831 * resume only when the last interface is resumed 1832 */ 1833 1834 int usbnet_suspend (struct usb_interface *intf, pm_message_t message) 1835 { 1836 struct usbnet *dev = usb_get_intfdata(intf); 1837 1838 if (!dev->suspend_count++) { 1839 spin_lock_irq(&dev->txq.lock); 1840 /* don't autosuspend while transmitting */ 1841 if (dev->txq.qlen && PMSG_IS_AUTO(message)) { 1842 dev->suspend_count--; 1843 spin_unlock_irq(&dev->txq.lock); 1844 return -EBUSY; 1845 } else { 1846 set_bit(EVENT_DEV_ASLEEP, &dev->flags); 1847 spin_unlock_irq(&dev->txq.lock); 1848 } 1849 /* 1850 * accelerate emptying of the rx and queues, to avoid 1851 * having everything error out. 1852 */ 1853 netif_device_detach (dev->net); 1854 usbnet_terminate_urbs(dev); 1855 __usbnet_status_stop_force(dev); 1856 1857 /* 1858 * reattach so runtime management can use and 1859 * wake the device 1860 */ 1861 netif_device_attach (dev->net); 1862 } 1863 return 0; 1864 } 1865 EXPORT_SYMBOL_GPL(usbnet_suspend); 1866 1867 int usbnet_resume (struct usb_interface *intf) 1868 { 1869 struct usbnet *dev = usb_get_intfdata(intf); 1870 struct sk_buff *skb; 1871 struct urb *res; 1872 int retval; 1873 1874 if (!--dev->suspend_count) { 1875 /* resume interrupt URB if it was previously submitted */ 1876 __usbnet_status_start_force(dev, GFP_NOIO); 1877 1878 spin_lock_irq(&dev->txq.lock); 1879 while ((res = usb_get_from_anchor(&dev->deferred))) { 1880 1881 skb = (struct sk_buff *)res->context; 1882 retval = usb_submit_urb(res, GFP_ATOMIC); 1883 if (retval < 0) { 1884 dev_kfree_skb_any(skb); 1885 kfree(res->sg); 1886 usb_free_urb(res); 1887 usb_autopm_put_interface_async(dev->intf); 1888 } else { 1889 netif_trans_update(dev->net); 1890 __skb_queue_tail(&dev->txq, skb); 1891 } 1892 } 1893 1894 smp_mb(); 1895 clear_bit(EVENT_DEV_ASLEEP, &dev->flags); 1896 spin_unlock_irq(&dev->txq.lock); 1897 1898 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) { 1899 /* handle remote wakeup ASAP 1900 * we cannot race against stop 1901 */ 1902 if (netif_device_present(dev->net) && 1903 !timer_pending(&dev->delay) && 1904 !test_bit(EVENT_RX_HALT, &dev->flags)) 1905 rx_alloc_submit(dev, GFP_NOIO); 1906 1907 if (!(dev->txq.qlen >= TX_QLEN(dev))) 1908 netif_tx_wake_all_queues(dev->net); 1909 tasklet_schedule (&dev->bh); 1910 } 1911 } 1912 1913 if (test_and_clear_bit(EVENT_DEVICE_REPORT_IDLE, &dev->flags)) 1914 usb_autopm_get_interface_no_resume(intf); 1915 1916 return 0; 1917 } 1918 EXPORT_SYMBOL_GPL(usbnet_resume); 1919 1920 /* 1921 * Either a subdriver implements manage_power, then it is assumed to always 1922 * be ready to be suspended or it reports the readiness to be suspended 1923 * explicitly 1924 */ 1925 void usbnet_device_suggests_idle(struct usbnet *dev) 1926 { 1927 if (!test_and_set_bit(EVENT_DEVICE_REPORT_IDLE, &dev->flags)) { 1928 dev->intf->needs_remote_wakeup = 1; 1929 usb_autopm_put_interface_async(dev->intf); 1930 } 1931 } 1932 EXPORT_SYMBOL(usbnet_device_suggests_idle); 1933 1934 /* 1935 * For devices that can do without special commands 1936 */ 1937 int usbnet_manage_power(struct usbnet *dev, int on) 1938 { 1939 dev->intf->needs_remote_wakeup = on; 1940 return 0; 1941 } 1942 EXPORT_SYMBOL(usbnet_manage_power); 1943 1944 void usbnet_link_change(struct usbnet *dev, bool link, bool need_reset) 1945 { 1946 /* update link after link is reseted */ 1947 if (link && !need_reset) 1948 netif_carrier_on(dev->net); 1949 else 1950 netif_carrier_off(dev->net); 1951 1952 if (need_reset && link) 1953 usbnet_defer_kevent(dev, EVENT_LINK_RESET); 1954 else 1955 usbnet_defer_kevent(dev, EVENT_LINK_CHANGE); 1956 } 1957 EXPORT_SYMBOL(usbnet_link_change); 1958 1959 /*-------------------------------------------------------------------------*/ 1960 static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, 1961 u16 value, u16 index, void *data, u16 size) 1962 { 1963 void *buf = NULL; 1964 int err = -ENOMEM; 1965 1966 netdev_dbg(dev->net, "usbnet_read_cmd cmd=0x%02x reqtype=%02x" 1967 " value=0x%04x index=0x%04x size=%d\n", 1968 cmd, reqtype, value, index, size); 1969 1970 if (size) { 1971 buf = kmalloc(size, GFP_KERNEL); 1972 if (!buf) 1973 goto out; 1974 } 1975 1976 err = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), 1977 cmd, reqtype, value, index, buf, size, 1978 USB_CTRL_GET_TIMEOUT); 1979 if (err > 0 && err <= size) { 1980 if (data) 1981 memcpy(data, buf, err); 1982 else 1983 netdev_dbg(dev->net, 1984 "Huh? Data requested but thrown away.\n"); 1985 } 1986 kfree(buf); 1987 out: 1988 return err; 1989 } 1990 1991 static int __usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, 1992 u16 value, u16 index, const void *data, 1993 u16 size) 1994 { 1995 void *buf = NULL; 1996 int err = -ENOMEM; 1997 1998 netdev_dbg(dev->net, "usbnet_write_cmd cmd=0x%02x reqtype=%02x" 1999 " value=0x%04x index=0x%04x size=%d\n", 2000 cmd, reqtype, value, index, size); 2001 2002 if (data) { 2003 buf = kmemdup(data, size, GFP_KERNEL); 2004 if (!buf) 2005 goto out; 2006 } else { 2007 if (size) { 2008 WARN_ON_ONCE(1); 2009 err = -EINVAL; 2010 goto out; 2011 } 2012 } 2013 2014 err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 2015 cmd, reqtype, value, index, buf, size, 2016 USB_CTRL_SET_TIMEOUT); 2017 kfree(buf); 2018 2019 out: 2020 return err; 2021 } 2022 2023 /* 2024 * The function can't be called inside suspend/resume callback, 2025 * otherwise deadlock will be caused. 2026 */ 2027 int usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, 2028 u16 value, u16 index, void *data, u16 size) 2029 { 2030 int ret; 2031 2032 if (usb_autopm_get_interface(dev->intf) < 0) 2033 return -ENODEV; 2034 ret = __usbnet_read_cmd(dev, cmd, reqtype, value, index, 2035 data, size); 2036 usb_autopm_put_interface(dev->intf); 2037 return ret; 2038 } 2039 EXPORT_SYMBOL_GPL(usbnet_read_cmd); 2040 2041 /* 2042 * The function can't be called inside suspend/resume callback, 2043 * otherwise deadlock will be caused. 2044 */ 2045 int usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, 2046 u16 value, u16 index, const void *data, u16 size) 2047 { 2048 int ret; 2049 2050 if (usb_autopm_get_interface(dev->intf) < 0) 2051 return -ENODEV; 2052 ret = __usbnet_write_cmd(dev, cmd, reqtype, value, index, 2053 data, size); 2054 usb_autopm_put_interface(dev->intf); 2055 return ret; 2056 } 2057 EXPORT_SYMBOL_GPL(usbnet_write_cmd); 2058 2059 /* 2060 * The function can be called inside suspend/resume callback safely 2061 * and should only be called by suspend/resume callback generally. 2062 */ 2063 int usbnet_read_cmd_nopm(struct usbnet *dev, u8 cmd, u8 reqtype, 2064 u16 value, u16 index, void *data, u16 size) 2065 { 2066 return __usbnet_read_cmd(dev, cmd, reqtype, value, index, 2067 data, size); 2068 } 2069 EXPORT_SYMBOL_GPL(usbnet_read_cmd_nopm); 2070 2071 /* 2072 * The function can be called inside suspend/resume callback safely 2073 * and should only be called by suspend/resume callback generally. 2074 */ 2075 int usbnet_write_cmd_nopm(struct usbnet *dev, u8 cmd, u8 reqtype, 2076 u16 value, u16 index, const void *data, 2077 u16 size) 2078 { 2079 return __usbnet_write_cmd(dev, cmd, reqtype, value, index, 2080 data, size); 2081 } 2082 EXPORT_SYMBOL_GPL(usbnet_write_cmd_nopm); 2083 2084 static void usbnet_async_cmd_cb(struct urb *urb) 2085 { 2086 struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context; 2087 int status = urb->status; 2088 2089 if (status < 0) 2090 dev_dbg(&urb->dev->dev, "%s failed with %d", 2091 __func__, status); 2092 2093 kfree(req); 2094 usb_free_urb(urb); 2095 } 2096 2097 /* 2098 * The caller must make sure that device can't be put into suspend 2099 * state until the control URB completes. 2100 */ 2101 int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype, 2102 u16 value, u16 index, const void *data, u16 size) 2103 { 2104 struct usb_ctrlrequest *req = NULL; 2105 struct urb *urb; 2106 int err = -ENOMEM; 2107 void *buf = NULL; 2108 2109 netdev_dbg(dev->net, "usbnet_write_cmd cmd=0x%02x reqtype=%02x" 2110 " value=0x%04x index=0x%04x size=%d\n", 2111 cmd, reqtype, value, index, size); 2112 2113 urb = usb_alloc_urb(0, GFP_ATOMIC); 2114 if (!urb) 2115 goto fail; 2116 2117 if (data) { 2118 buf = kmemdup(data, size, GFP_ATOMIC); 2119 if (!buf) { 2120 netdev_err(dev->net, "Error allocating buffer" 2121 " in %s!\n", __func__); 2122 goto fail_free; 2123 } 2124 } 2125 2126 req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC); 2127 if (!req) 2128 goto fail_free_buf; 2129 2130 req->bRequestType = reqtype; 2131 req->bRequest = cmd; 2132 req->wValue = cpu_to_le16(value); 2133 req->wIndex = cpu_to_le16(index); 2134 req->wLength = cpu_to_le16(size); 2135 2136 usb_fill_control_urb(urb, dev->udev, 2137 usb_sndctrlpipe(dev->udev, 0), 2138 (void *)req, buf, size, 2139 usbnet_async_cmd_cb, req); 2140 urb->transfer_flags |= URB_FREE_BUFFER; 2141 2142 err = usb_submit_urb(urb, GFP_ATOMIC); 2143 if (err < 0) { 2144 netdev_err(dev->net, "Error submitting the control" 2145 " message: status=%d\n", err); 2146 goto fail_free; 2147 } 2148 return 0; 2149 2150 fail_free_buf: 2151 kfree(buf); 2152 fail_free: 2153 kfree(req); 2154 usb_free_urb(urb); 2155 fail: 2156 return err; 2157 2158 } 2159 EXPORT_SYMBOL_GPL(usbnet_write_cmd_async); 2160 /*-------------------------------------------------------------------------*/ 2161 2162 static int __init usbnet_init(void) 2163 { 2164 /* Compiler should optimize this out. */ 2165 BUILD_BUG_ON( 2166 FIELD_SIZEOF(struct sk_buff, cb) < sizeof(struct skb_data)); 2167 2168 eth_random_addr(node_id); 2169 return 0; 2170 } 2171 module_init(usbnet_init); 2172 2173 static void __exit usbnet_exit(void) 2174 { 2175 } 2176 module_exit(usbnet_exit); 2177 2178 MODULE_AUTHOR("David Brownell"); 2179 MODULE_DESCRIPTION("USB network driver framework"); 2180 MODULE_LICENSE("GPL"); 2181