1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * USB Network driver infrastructure 4 * Copyright (C) 2000-2005 by David Brownell 5 * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com> 6 */ 7 8 /* 9 * This is a generic "USB networking" framework that works with several 10 * kinds of full and high speed networking devices: host-to-host cables, 11 * smart usb peripherals, and actual Ethernet adapters. 12 * 13 * These devices usually differ in terms of control protocols (if they 14 * even have one!) and sometimes they define new framing to wrap or batch 15 * Ethernet packets. Otherwise, they talk to USB pretty much the same, 16 * so interface (un)binding, endpoint I/O queues, fault handling, and other 17 * issues can usefully be addressed by this framework. 18 */ 19 20 #include <linux/module.h> 21 #include <linux/init.h> 22 #include <linux/netdevice.h> 23 #include <linux/etherdevice.h> 24 #include <linux/ctype.h> 25 #include <linux/ethtool.h> 26 #include <linux/workqueue.h> 27 #include <linux/mii.h> 28 #include <linux/usb.h> 29 #include <linux/usb/usbnet.h> 30 #include <linux/slab.h> 31 #include <linux/kernel.h> 32 #include <linux/pm_runtime.h> 33 34 /*-------------------------------------------------------------------------*/ 35 36 /* 37 * Nineteen USB 1.1 max size bulk transactions per frame (ms), max. 38 * Several dozen bytes of IPv4 data can fit in two such transactions. 39 * One maximum size Ethernet packet takes twenty four of them. 40 * For high speed, each frame comfortably fits almost 36 max size 41 * Ethernet packets (so queues should be bigger). 42 * 43 * The goal is to let the USB host controller be busy for 5msec or 44 * more before an irq is required, under load. Jumbograms change 45 * the equation. 46 */ 47 #define MAX_QUEUE_MEMORY (60 * 1518) 48 #define RX_QLEN(dev) ((dev)->rx_qlen) 49 #define TX_QLEN(dev) ((dev)->tx_qlen) 50 51 // reawaken network queue this soon after stopping; else watchdog barks 52 #define TX_TIMEOUT_JIFFIES (5*HZ) 53 54 /* throttle rx/tx briefly after some faults, so hub_wq might disconnect() 55 * us (it polls at HZ/4 usually) before we report too many false errors. 56 */ 57 #define THROTTLE_JIFFIES (HZ/8) 58 59 // between wakeups 60 #define UNLINK_TIMEOUT_MS 3 61 62 /*-------------------------------------------------------------------------*/ 63 64 /* use ethtool to change the level for any given device */ 65 static int msg_level = -1; 66 module_param (msg_level, int, 0); 67 MODULE_PARM_DESC (msg_level, "Override default message level"); 68 69 /*-------------------------------------------------------------------------*/ 70 71 static const char * const usbnet_event_names[] = { 72 [EVENT_TX_HALT] = "EVENT_TX_HALT", 73 [EVENT_RX_HALT] = "EVENT_RX_HALT", 74 [EVENT_RX_MEMORY] = "EVENT_RX_MEMORY", 75 [EVENT_STS_SPLIT] = "EVENT_STS_SPLIT", 76 [EVENT_LINK_RESET] = "EVENT_LINK_RESET", 77 [EVENT_RX_PAUSED] = "EVENT_RX_PAUSED", 78 [EVENT_DEV_ASLEEP] = "EVENT_DEV_ASLEEP", 79 [EVENT_DEV_OPEN] = "EVENT_DEV_OPEN", 80 [EVENT_DEVICE_REPORT_IDLE] = "EVENT_DEVICE_REPORT_IDLE", 81 [EVENT_NO_RUNTIME_PM] = "EVENT_NO_RUNTIME_PM", 82 [EVENT_RX_KILL] = "EVENT_RX_KILL", 83 [EVENT_LINK_CHANGE] = "EVENT_LINK_CHANGE", 84 [EVENT_SET_RX_MODE] = "EVENT_SET_RX_MODE", 85 [EVENT_NO_IP_ALIGN] = "EVENT_NO_IP_ALIGN", 86 }; 87 88 /* handles CDC Ethernet and many other network "bulk data" interfaces */ 89 int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf) 90 { 91 int tmp; 92 struct usb_host_interface *alt = NULL; 93 struct usb_host_endpoint *in = NULL, *out = NULL; 94 struct usb_host_endpoint *status = NULL; 95 96 for (tmp = 0; tmp < intf->num_altsetting; tmp++) { 97 unsigned ep; 98 99 in = out = status = NULL; 100 alt = intf->altsetting + tmp; 101 102 /* take the first altsetting with in-bulk + out-bulk; 103 * remember any status endpoint, just in case; 104 * ignore other endpoints and altsettings. 105 */ 106 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) { 107 struct usb_host_endpoint *e; 108 int intr = 0; 109 110 e = alt->endpoint + ep; 111 112 /* ignore endpoints which cannot transfer data */ 113 if (!usb_endpoint_maxp(&e->desc)) 114 continue; 115 116 switch (e->desc.bmAttributes) { 117 case USB_ENDPOINT_XFER_INT: 118 if (!usb_endpoint_dir_in(&e->desc)) 119 continue; 120 intr = 1; 121 fallthrough; 122 case USB_ENDPOINT_XFER_BULK: 123 break; 124 default: 125 continue; 126 } 127 if (usb_endpoint_dir_in(&e->desc)) { 128 if (!intr && !in) 129 in = e; 130 else if (intr && !status) 131 status = e; 132 } else { 133 if (!out) 134 out = e; 135 } 136 } 137 if (in && out) 138 break; 139 } 140 if (!alt || !in || !out) 141 return -EINVAL; 142 143 if (alt->desc.bAlternateSetting != 0 || 144 !(dev->driver_info->flags & FLAG_NO_SETINT)) { 145 tmp = usb_set_interface (dev->udev, alt->desc.bInterfaceNumber, 146 alt->desc.bAlternateSetting); 147 if (tmp < 0) 148 return tmp; 149 } 150 151 dev->in = usb_rcvbulkpipe (dev->udev, 152 in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 153 dev->out = usb_sndbulkpipe (dev->udev, 154 out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 155 dev->status = status; 156 return 0; 157 } 158 EXPORT_SYMBOL_GPL(usbnet_get_endpoints); 159 160 int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress) 161 { 162 u8 addr[ETH_ALEN]; 163 int tmp = -1, ret; 164 unsigned char buf [13]; 165 166 ret = usb_string(dev->udev, iMACAddress, buf, sizeof buf); 167 if (ret == 12) 168 tmp = hex2bin(addr, buf, 6); 169 if (tmp < 0) { 170 dev_dbg(&dev->udev->dev, 171 "bad MAC string %d fetch, %d\n", iMACAddress, tmp); 172 if (ret >= 0) 173 ret = -EINVAL; 174 return ret; 175 } 176 eth_hw_addr_set(dev->net, addr); 177 return 0; 178 } 179 EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr); 180 181 static void intr_complete (struct urb *urb) 182 { 183 struct usbnet *dev = urb->context; 184 int status = urb->status; 185 186 switch (status) { 187 /* success */ 188 case 0: 189 dev->driver_info->status(dev, urb); 190 break; 191 192 /* software-driven interface shutdown */ 193 case -ENOENT: /* urb killed */ 194 case -ESHUTDOWN: /* hardware gone */ 195 netif_dbg(dev, ifdown, dev->net, 196 "intr shutdown, code %d\n", status); 197 return; 198 199 /* NOTE: not throttling like RX/TX, since this endpoint 200 * already polls infrequently 201 */ 202 default: 203 netdev_dbg(dev->net, "intr status %d\n", status); 204 break; 205 } 206 207 status = usb_submit_urb (urb, GFP_ATOMIC); 208 if (status != 0) 209 netif_err(dev, timer, dev->net, 210 "intr resubmit --> %d\n", status); 211 } 212 213 static int init_status (struct usbnet *dev, struct usb_interface *intf) 214 { 215 char *buf = NULL; 216 unsigned pipe = 0; 217 unsigned maxp; 218 unsigned period; 219 220 if (!dev->driver_info->status) 221 return 0; 222 223 pipe = usb_rcvintpipe (dev->udev, 224 dev->status->desc.bEndpointAddress 225 & USB_ENDPOINT_NUMBER_MASK); 226 maxp = usb_maxpacket(dev->udev, pipe); 227 228 /* avoid 1 msec chatter: min 8 msec poll rate */ 229 period = max ((int) dev->status->desc.bInterval, 230 (dev->udev->speed == USB_SPEED_HIGH) ? 7 : 3); 231 232 buf = kmalloc (maxp, GFP_KERNEL); 233 if (buf) { 234 dev->interrupt = usb_alloc_urb (0, GFP_KERNEL); 235 if (!dev->interrupt) { 236 kfree (buf); 237 return -ENOMEM; 238 } else { 239 usb_fill_int_urb(dev->interrupt, dev->udev, pipe, 240 buf, maxp, intr_complete, dev, period); 241 dev->interrupt->transfer_flags |= URB_FREE_BUFFER; 242 dev_dbg(&intf->dev, 243 "status ep%din, %d bytes period %d\n", 244 usb_pipeendpoint(pipe), maxp, period); 245 } 246 } 247 return 0; 248 } 249 250 /* Submit the interrupt URB if not previously submitted, increasing refcount */ 251 int usbnet_status_start(struct usbnet *dev, gfp_t mem_flags) 252 { 253 int ret = 0; 254 255 WARN_ON_ONCE(dev->interrupt == NULL); 256 if (dev->interrupt) { 257 mutex_lock(&dev->interrupt_mutex); 258 259 if (++dev->interrupt_count == 1) 260 ret = usb_submit_urb(dev->interrupt, mem_flags); 261 262 dev_dbg(&dev->udev->dev, "incremented interrupt URB count to %d\n", 263 dev->interrupt_count); 264 mutex_unlock(&dev->interrupt_mutex); 265 } 266 return ret; 267 } 268 EXPORT_SYMBOL_GPL(usbnet_status_start); 269 270 /* For resume; submit interrupt URB if previously submitted */ 271 static int __usbnet_status_start_force(struct usbnet *dev, gfp_t mem_flags) 272 { 273 int ret = 0; 274 275 mutex_lock(&dev->interrupt_mutex); 276 if (dev->interrupt_count) { 277 ret = usb_submit_urb(dev->interrupt, mem_flags); 278 dev_dbg(&dev->udev->dev, 279 "submitted interrupt URB for resume\n"); 280 } 281 mutex_unlock(&dev->interrupt_mutex); 282 return ret; 283 } 284 285 /* Kill the interrupt URB if all submitters want it killed */ 286 void usbnet_status_stop(struct usbnet *dev) 287 { 288 if (dev->interrupt) { 289 mutex_lock(&dev->interrupt_mutex); 290 WARN_ON(dev->interrupt_count == 0); 291 292 if (dev->interrupt_count && --dev->interrupt_count == 0) 293 usb_kill_urb(dev->interrupt); 294 295 dev_dbg(&dev->udev->dev, 296 "decremented interrupt URB count to %d\n", 297 dev->interrupt_count); 298 mutex_unlock(&dev->interrupt_mutex); 299 } 300 } 301 EXPORT_SYMBOL_GPL(usbnet_status_stop); 302 303 /* For suspend; always kill interrupt URB */ 304 static void __usbnet_status_stop_force(struct usbnet *dev) 305 { 306 if (dev->interrupt) { 307 mutex_lock(&dev->interrupt_mutex); 308 usb_kill_urb(dev->interrupt); 309 dev_dbg(&dev->udev->dev, "killed interrupt URB for suspend\n"); 310 mutex_unlock(&dev->interrupt_mutex); 311 } 312 } 313 314 /* Passes this packet up the stack, updating its accounting. 315 * Some link protocols batch packets, so their rx_fixup paths 316 * can return clones as well as just modify the original skb. 317 */ 318 void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb) 319 { 320 struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->net->tstats); 321 unsigned long flags; 322 int status; 323 324 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) { 325 skb_queue_tail(&dev->rxq_pause, skb); 326 return; 327 } 328 329 /* only update if unset to allow minidriver rx_fixup override */ 330 if (skb->protocol == 0) 331 skb->protocol = eth_type_trans (skb, dev->net); 332 333 flags = u64_stats_update_begin_irqsave(&stats64->syncp); 334 u64_stats_inc(&stats64->rx_packets); 335 u64_stats_add(&stats64->rx_bytes, skb->len); 336 u64_stats_update_end_irqrestore(&stats64->syncp, flags); 337 338 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n", 339 skb->len + sizeof (struct ethhdr), skb->protocol); 340 memset (skb->cb, 0, sizeof (struct skb_data)); 341 342 if (skb_defer_rx_timestamp(skb)) 343 return; 344 345 status = netif_rx (skb); 346 if (status != NET_RX_SUCCESS) 347 netif_dbg(dev, rx_err, dev->net, 348 "netif_rx status %d\n", status); 349 } 350 EXPORT_SYMBOL_GPL(usbnet_skb_return); 351 352 /* must be called if hard_mtu or rx_urb_size changed */ 353 void usbnet_update_max_qlen(struct usbnet *dev) 354 { 355 enum usb_device_speed speed = dev->udev->speed; 356 357 if (!dev->rx_urb_size || !dev->hard_mtu) 358 goto insanity; 359 switch (speed) { 360 case USB_SPEED_HIGH: 361 dev->rx_qlen = MAX_QUEUE_MEMORY / dev->rx_urb_size; 362 dev->tx_qlen = MAX_QUEUE_MEMORY / dev->hard_mtu; 363 break; 364 case USB_SPEED_SUPER: 365 case USB_SPEED_SUPER_PLUS: 366 /* 367 * Not take default 5ms qlen for super speed HC to 368 * save memory, and iperf tests show 2.5ms qlen can 369 * work well 370 */ 371 dev->rx_qlen = 5 * MAX_QUEUE_MEMORY / dev->rx_urb_size; 372 dev->tx_qlen = 5 * MAX_QUEUE_MEMORY / dev->hard_mtu; 373 break; 374 default: 375 insanity: 376 dev->rx_qlen = dev->tx_qlen = 4; 377 } 378 } 379 EXPORT_SYMBOL_GPL(usbnet_update_max_qlen); 380 381 382 /*------------------------------------------------------------------------- 383 * 384 * Network Device Driver (peer link to "Host Device", from USB host) 385 * 386 *-------------------------------------------------------------------------*/ 387 388 int usbnet_change_mtu (struct net_device *net, int new_mtu) 389 { 390 struct usbnet *dev = netdev_priv(net); 391 int ll_mtu = new_mtu + net->hard_header_len; 392 int old_hard_mtu = dev->hard_mtu; 393 int old_rx_urb_size = dev->rx_urb_size; 394 395 // no second zero-length packet read wanted after mtu-sized packets 396 if ((ll_mtu % dev->maxpacket) == 0) 397 return -EDOM; 398 net->mtu = new_mtu; 399 400 dev->hard_mtu = net->mtu + net->hard_header_len; 401 if (dev->rx_urb_size == old_hard_mtu) { 402 dev->rx_urb_size = dev->hard_mtu; 403 if (dev->rx_urb_size > old_rx_urb_size) { 404 usbnet_pause_rx(dev); 405 usbnet_unlink_rx_urbs(dev); 406 usbnet_resume_rx(dev); 407 } 408 } 409 410 /* max qlen depend on hard_mtu and rx_urb_size */ 411 usbnet_update_max_qlen(dev); 412 413 return 0; 414 } 415 EXPORT_SYMBOL_GPL(usbnet_change_mtu); 416 417 /* The caller must hold list->lock */ 418 static void __usbnet_queue_skb(struct sk_buff_head *list, 419 struct sk_buff *newsk, enum skb_state state) 420 { 421 struct skb_data *entry = (struct skb_data *) newsk->cb; 422 423 __skb_queue_tail(list, newsk); 424 entry->state = state; 425 } 426 427 /*-------------------------------------------------------------------------*/ 428 429 /* some LK 2.4 HCDs oopsed if we freed or resubmitted urbs from 430 * completion callbacks. 2.5 should have fixed those bugs... 431 */ 432 433 static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb, 434 struct sk_buff_head *list, enum skb_state state) 435 { 436 unsigned long flags; 437 enum skb_state old_state; 438 struct skb_data *entry = (struct skb_data *) skb->cb; 439 440 spin_lock_irqsave(&list->lock, flags); 441 old_state = entry->state; 442 entry->state = state; 443 __skb_unlink(skb, list); 444 445 /* defer_bh() is never called with list == &dev->done. 446 * spin_lock_nested() tells lockdep that it is OK to take 447 * dev->done.lock here with list->lock held. 448 */ 449 spin_lock_nested(&dev->done.lock, SINGLE_DEPTH_NESTING); 450 451 __skb_queue_tail(&dev->done, skb); 452 if (dev->done.qlen == 1) 453 tasklet_schedule(&dev->bh); 454 spin_unlock(&dev->done.lock); 455 spin_unlock_irqrestore(&list->lock, flags); 456 return old_state; 457 } 458 459 /* some work can't be done in tasklets, so we use keventd 460 * 461 * NOTE: annoying asymmetry: if it's active, schedule_work() fails, 462 * but tasklet_schedule() doesn't. hope the failure is rare. 463 */ 464 void usbnet_defer_kevent (struct usbnet *dev, int work) 465 { 466 set_bit (work, &dev->flags); 467 if (!usbnet_going_away(dev)) { 468 if (!schedule_work(&dev->kevent)) 469 netdev_dbg(dev->net, 470 "kevent %s may have been dropped\n", 471 usbnet_event_names[work]); 472 else 473 netdev_dbg(dev->net, 474 "kevent %s scheduled\n", usbnet_event_names[work]); 475 } 476 } 477 EXPORT_SYMBOL_GPL(usbnet_defer_kevent); 478 479 /*-------------------------------------------------------------------------*/ 480 481 static void rx_complete (struct urb *urb); 482 483 static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) 484 { 485 struct sk_buff *skb; 486 struct skb_data *entry; 487 int retval = 0; 488 unsigned long lockflags; 489 size_t size = dev->rx_urb_size; 490 491 /* prevent rx skb allocation when error ratio is high */ 492 if (test_bit(EVENT_RX_KILL, &dev->flags)) { 493 usb_free_urb(urb); 494 return -ENOLINK; 495 } 496 497 if (test_bit(EVENT_NO_IP_ALIGN, &dev->flags)) 498 skb = __netdev_alloc_skb(dev->net, size, flags); 499 else 500 skb = __netdev_alloc_skb_ip_align(dev->net, size, flags); 501 if (!skb) { 502 netif_dbg(dev, rx_err, dev->net, "no rx skb\n"); 503 usbnet_defer_kevent (dev, EVENT_RX_MEMORY); 504 usb_free_urb (urb); 505 return -ENOMEM; 506 } 507 508 entry = (struct skb_data *) skb->cb; 509 entry->urb = urb; 510 entry->dev = dev; 511 entry->length = 0; 512 513 usb_fill_bulk_urb (urb, dev->udev, dev->in, 514 skb->data, size, rx_complete, skb); 515 516 spin_lock_irqsave (&dev->rxq.lock, lockflags); 517 518 if (netif_running (dev->net) && 519 netif_device_present (dev->net) && 520 test_bit(EVENT_DEV_OPEN, &dev->flags) && 521 !test_bit (EVENT_RX_HALT, &dev->flags) && 522 !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) { 523 switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) { 524 case -EPIPE: 525 usbnet_defer_kevent (dev, EVENT_RX_HALT); 526 break; 527 case -ENOMEM: 528 usbnet_defer_kevent (dev, EVENT_RX_MEMORY); 529 break; 530 case -ENODEV: 531 netif_dbg(dev, ifdown, dev->net, "device gone\n"); 532 netif_device_detach (dev->net); 533 break; 534 case -EHOSTUNREACH: 535 retval = -ENOLINK; 536 break; 537 default: 538 netif_dbg(dev, rx_err, dev->net, 539 "rx submit, %d\n", retval); 540 tasklet_schedule (&dev->bh); 541 break; 542 case 0: 543 if (!usbnet_going_away(dev)) 544 __usbnet_queue_skb(&dev->rxq, skb, rx_start); 545 } 546 } else { 547 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n"); 548 retval = -ENOLINK; 549 } 550 spin_unlock_irqrestore (&dev->rxq.lock, lockflags); 551 if (retval) { 552 dev_kfree_skb_any (skb); 553 usb_free_urb (urb); 554 } 555 return retval; 556 } 557 558 559 /*-------------------------------------------------------------------------*/ 560 561 static inline int rx_process(struct usbnet *dev, struct sk_buff *skb) 562 { 563 if (dev->driver_info->rx_fixup && 564 !dev->driver_info->rx_fixup (dev, skb)) { 565 /* With RX_ASSEMBLE, rx_fixup() must update counters */ 566 if (!(dev->driver_info->flags & FLAG_RX_ASSEMBLE)) 567 dev->net->stats.rx_errors++; 568 return -EPROTO; 569 } 570 // else network stack removes extra byte if we forced a short packet 571 572 /* all data was already cloned from skb inside the driver */ 573 if (dev->driver_info->flags & FLAG_MULTI_PACKET) 574 return -EALREADY; 575 576 if (skb->len < ETH_HLEN) { 577 dev->net->stats.rx_errors++; 578 dev->net->stats.rx_length_errors++; 579 netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len); 580 return -EPROTO; 581 } 582 583 usbnet_skb_return(dev, skb); 584 return 0; 585 } 586 587 /*-------------------------------------------------------------------------*/ 588 589 static void rx_complete (struct urb *urb) 590 { 591 struct sk_buff *skb = (struct sk_buff *) urb->context; 592 struct skb_data *entry = (struct skb_data *) skb->cb; 593 struct usbnet *dev = entry->dev; 594 int urb_status = urb->status; 595 enum skb_state state; 596 597 skb_put (skb, urb->actual_length); 598 state = rx_done; 599 entry->urb = NULL; 600 601 switch (urb_status) { 602 /* success */ 603 case 0: 604 break; 605 606 /* stalls need manual reset. this is rare ... except that 607 * when going through USB 2.0 TTs, unplug appears this way. 608 * we avoid the highspeed version of the ETIMEDOUT/EILSEQ 609 * storm, recovering as needed. 610 */ 611 case -EPIPE: 612 dev->net->stats.rx_errors++; 613 usbnet_defer_kevent (dev, EVENT_RX_HALT); 614 fallthrough; 615 616 /* software-driven interface shutdown */ 617 case -ECONNRESET: /* async unlink */ 618 case -ESHUTDOWN: /* hardware gone */ 619 netif_dbg(dev, ifdown, dev->net, 620 "rx shutdown, code %d\n", urb_status); 621 goto block; 622 623 /* we get controller i/o faults during hub_wq disconnect() delays. 624 * throttle down resubmits, to avoid log floods; just temporarily, 625 * so we still recover when the fault isn't a hub_wq delay. 626 */ 627 case -EPROTO: 628 case -ETIME: 629 case -EILSEQ: 630 dev->net->stats.rx_errors++; 631 if (!timer_pending (&dev->delay)) { 632 mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES); 633 netif_dbg(dev, link, dev->net, 634 "rx throttle %d\n", urb_status); 635 } 636 block: 637 state = rx_cleanup; 638 entry->urb = urb; 639 urb = NULL; 640 break; 641 642 /* data overrun ... flush fifo? */ 643 case -EOVERFLOW: 644 dev->net->stats.rx_over_errors++; 645 fallthrough; 646 647 default: 648 state = rx_cleanup; 649 dev->net->stats.rx_errors++; 650 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status); 651 break; 652 } 653 654 /* stop rx if packet error rate is high */ 655 if (++dev->pkt_cnt > 30) { 656 dev->pkt_cnt = 0; 657 dev->pkt_err = 0; 658 } else { 659 if (state == rx_cleanup) 660 dev->pkt_err++; 661 if (dev->pkt_err > 20) 662 set_bit(EVENT_RX_KILL, &dev->flags); 663 } 664 665 state = defer_bh(dev, skb, &dev->rxq, state); 666 667 if (urb) { 668 if (netif_running (dev->net) && 669 !test_bit (EVENT_RX_HALT, &dev->flags) && 670 state != unlink_start) { 671 rx_submit (dev, urb, GFP_ATOMIC); 672 usb_mark_last_busy(dev->udev); 673 return; 674 } 675 usb_free_urb (urb); 676 } 677 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n"); 678 } 679 680 /*-------------------------------------------------------------------------*/ 681 void usbnet_pause_rx(struct usbnet *dev) 682 { 683 set_bit(EVENT_RX_PAUSED, &dev->flags); 684 685 netif_dbg(dev, rx_status, dev->net, "paused rx queue enabled\n"); 686 } 687 EXPORT_SYMBOL_GPL(usbnet_pause_rx); 688 689 void usbnet_resume_rx(struct usbnet *dev) 690 { 691 struct sk_buff *skb; 692 int num = 0; 693 694 clear_bit(EVENT_RX_PAUSED, &dev->flags); 695 696 while ((skb = skb_dequeue(&dev->rxq_pause)) != NULL) { 697 usbnet_skb_return(dev, skb); 698 num++; 699 } 700 701 tasklet_schedule(&dev->bh); 702 703 netif_dbg(dev, rx_status, dev->net, 704 "paused rx queue disabled, %d skbs requeued\n", num); 705 } 706 EXPORT_SYMBOL_GPL(usbnet_resume_rx); 707 708 void usbnet_purge_paused_rxq(struct usbnet *dev) 709 { 710 skb_queue_purge(&dev->rxq_pause); 711 } 712 EXPORT_SYMBOL_GPL(usbnet_purge_paused_rxq); 713 714 /*-------------------------------------------------------------------------*/ 715 716 // unlink pending rx/tx; completion handlers do all other cleanup 717 718 static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q) 719 { 720 unsigned long flags; 721 struct sk_buff *skb; 722 int count = 0; 723 724 spin_lock_irqsave (&q->lock, flags); 725 while (!skb_queue_empty(q)) { 726 struct skb_data *entry; 727 struct urb *urb; 728 int retval; 729 730 skb_queue_walk(q, skb) { 731 entry = (struct skb_data *) skb->cb; 732 if (entry->state != unlink_start) 733 goto found; 734 } 735 break; 736 found: 737 entry->state = unlink_start; 738 urb = entry->urb; 739 740 /* 741 * Get reference count of the URB to avoid it to be 742 * freed during usb_unlink_urb, which may trigger 743 * use-after-free problem inside usb_unlink_urb since 744 * usb_unlink_urb is always racing with .complete 745 * handler(include defer_bh). 746 */ 747 usb_get_urb(urb); 748 spin_unlock_irqrestore(&q->lock, flags); 749 // during some PM-driven resume scenarios, 750 // these (async) unlinks complete immediately 751 retval = usb_unlink_urb (urb); 752 if (retval != -EINPROGRESS && retval != 0) 753 netdev_dbg(dev->net, "unlink urb err, %d\n", retval); 754 else 755 count++; 756 usb_put_urb(urb); 757 spin_lock_irqsave(&q->lock, flags); 758 } 759 spin_unlock_irqrestore (&q->lock, flags); 760 return count; 761 } 762 763 // Flush all pending rx urbs 764 // minidrivers may need to do this when the MTU changes 765 766 void usbnet_unlink_rx_urbs(struct usbnet *dev) 767 { 768 if (netif_running(dev->net)) { 769 (void) unlink_urbs (dev, &dev->rxq); 770 tasklet_schedule(&dev->bh); 771 } 772 } 773 EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs); 774 775 /*-------------------------------------------------------------------------*/ 776 777 static void wait_skb_queue_empty(struct sk_buff_head *q) 778 { 779 unsigned long flags; 780 781 spin_lock_irqsave(&q->lock, flags); 782 while (!skb_queue_empty(q)) { 783 spin_unlock_irqrestore(&q->lock, flags); 784 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS)); 785 set_current_state(TASK_UNINTERRUPTIBLE); 786 spin_lock_irqsave(&q->lock, flags); 787 } 788 spin_unlock_irqrestore(&q->lock, flags); 789 } 790 791 // precondition: never called in_interrupt 792 static void usbnet_terminate_urbs(struct usbnet *dev) 793 { 794 DECLARE_WAITQUEUE(wait, current); 795 int temp; 796 797 /* ensure there are no more active urbs */ 798 add_wait_queue(&dev->wait, &wait); 799 set_current_state(TASK_UNINTERRUPTIBLE); 800 temp = unlink_urbs(dev, &dev->txq) + 801 unlink_urbs(dev, &dev->rxq); 802 803 /* maybe wait for deletions to finish. */ 804 wait_skb_queue_empty(&dev->rxq); 805 wait_skb_queue_empty(&dev->txq); 806 wait_skb_queue_empty(&dev->done); 807 netif_dbg(dev, ifdown, dev->net, 808 "waited for %d urb completions\n", temp); 809 set_current_state(TASK_RUNNING); 810 remove_wait_queue(&dev->wait, &wait); 811 } 812 813 int usbnet_stop (struct net_device *net) 814 { 815 struct usbnet *dev = netdev_priv(net); 816 const struct driver_info *info = dev->driver_info; 817 int retval, pm, mpn; 818 819 clear_bit(EVENT_DEV_OPEN, &dev->flags); 820 netif_stop_queue (net); 821 822 netif_info(dev, ifdown, dev->net, 823 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n", 824 net->stats.rx_packets, net->stats.tx_packets, 825 net->stats.rx_errors, net->stats.tx_errors); 826 827 /* to not race resume */ 828 pm = usb_autopm_get_interface(dev->intf); 829 /* allow minidriver to stop correctly (wireless devices to turn off 830 * radio etc) */ 831 if (info->stop) { 832 retval = info->stop(dev); 833 if (retval < 0) 834 netif_info(dev, ifdown, dev->net, 835 "stop fail (%d) usbnet usb-%s-%s, %s\n", 836 retval, 837 dev->udev->bus->bus_name, dev->udev->devpath, 838 info->description); 839 } 840 841 if (!(info->flags & FLAG_AVOID_UNLINK_URBS)) 842 usbnet_terminate_urbs(dev); 843 844 usbnet_status_stop(dev); 845 846 usbnet_purge_paused_rxq(dev); 847 848 mpn = !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags); 849 850 /* deferred work (timer, softirq, task) must also stop */ 851 dev->flags = 0; 852 del_timer_sync(&dev->delay); 853 tasklet_kill(&dev->bh); 854 cancel_work_sync(&dev->kevent); 855 856 /* We have cyclic dependencies. Those calls are needed 857 * to break a cycle. We cannot fall into the gaps because 858 * we have a flag 859 */ 860 tasklet_kill(&dev->bh); 861 del_timer_sync(&dev->delay); 862 cancel_work_sync(&dev->kevent); 863 864 if (!pm) 865 usb_autopm_put_interface(dev->intf); 866 867 if (info->manage_power && mpn) 868 info->manage_power(dev, 0); 869 else 870 usb_autopm_put_interface(dev->intf); 871 872 return 0; 873 } 874 EXPORT_SYMBOL_GPL(usbnet_stop); 875 876 /*-------------------------------------------------------------------------*/ 877 878 // posts reads, and enables write queuing 879 880 // precondition: never called in_interrupt 881 882 int usbnet_open (struct net_device *net) 883 { 884 struct usbnet *dev = netdev_priv(net); 885 int retval; 886 const struct driver_info *info = dev->driver_info; 887 888 if ((retval = usb_autopm_get_interface(dev->intf)) < 0) { 889 netif_info(dev, ifup, dev->net, 890 "resumption fail (%d) usbnet usb-%s-%s, %s\n", 891 retval, 892 dev->udev->bus->bus_name, 893 dev->udev->devpath, 894 info->description); 895 goto done_nopm; 896 } 897 898 // put into "known safe" state 899 if (info->reset && (retval = info->reset (dev)) < 0) { 900 netif_info(dev, ifup, dev->net, 901 "open reset fail (%d) usbnet usb-%s-%s, %s\n", 902 retval, 903 dev->udev->bus->bus_name, 904 dev->udev->devpath, 905 info->description); 906 goto done; 907 } 908 909 /* hard_mtu or rx_urb_size may change in reset() */ 910 usbnet_update_max_qlen(dev); 911 912 // insist peer be connected 913 if (info->check_connect && (retval = info->check_connect (dev)) < 0) { 914 netif_err(dev, ifup, dev->net, "can't open; %d\n", retval); 915 goto done; 916 } 917 918 /* start any status interrupt transfer */ 919 if (dev->interrupt) { 920 retval = usbnet_status_start(dev, GFP_KERNEL); 921 if (retval < 0) { 922 netif_err(dev, ifup, dev->net, 923 "intr submit %d\n", retval); 924 goto done; 925 } 926 } 927 928 set_bit(EVENT_DEV_OPEN, &dev->flags); 929 netif_start_queue (net); 930 netif_info(dev, ifup, dev->net, 931 "open: enable queueing (rx %d, tx %d) mtu %d %s framing\n", 932 (int)RX_QLEN(dev), (int)TX_QLEN(dev), 933 dev->net->mtu, 934 (dev->driver_info->flags & FLAG_FRAMING_NC) ? "NetChip" : 935 (dev->driver_info->flags & FLAG_FRAMING_GL) ? "GeneSys" : 936 (dev->driver_info->flags & FLAG_FRAMING_Z) ? "Zaurus" : 937 (dev->driver_info->flags & FLAG_FRAMING_RN) ? "RNDIS" : 938 (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" : 939 "simple"); 940 941 /* reset rx error state */ 942 dev->pkt_cnt = 0; 943 dev->pkt_err = 0; 944 clear_bit(EVENT_RX_KILL, &dev->flags); 945 946 // delay posting reads until we're fully open 947 tasklet_schedule (&dev->bh); 948 if (info->manage_power) { 949 retval = info->manage_power(dev, 1); 950 if (retval < 0) { 951 retval = 0; 952 set_bit(EVENT_NO_RUNTIME_PM, &dev->flags); 953 } else { 954 usb_autopm_put_interface(dev->intf); 955 } 956 } 957 return retval; 958 done: 959 usb_autopm_put_interface(dev->intf); 960 done_nopm: 961 return retval; 962 } 963 EXPORT_SYMBOL_GPL(usbnet_open); 964 965 /*-------------------------------------------------------------------------*/ 966 967 /* ethtool methods; minidrivers may need to add some more, but 968 * they'll probably want to use this base set. 969 */ 970 971 /* These methods are written on the assumption that the device 972 * uses MII 973 */ 974 int usbnet_get_link_ksettings_mii(struct net_device *net, 975 struct ethtool_link_ksettings *cmd) 976 { 977 struct usbnet *dev = netdev_priv(net); 978 979 if (!dev->mii.mdio_read) 980 return -EOPNOTSUPP; 981 982 mii_ethtool_get_link_ksettings(&dev->mii, cmd); 983 984 return 0; 985 } 986 EXPORT_SYMBOL_GPL(usbnet_get_link_ksettings_mii); 987 988 int usbnet_get_link_ksettings_internal(struct net_device *net, 989 struct ethtool_link_ksettings *cmd) 990 { 991 struct usbnet *dev = netdev_priv(net); 992 993 /* the assumption that speed is equal on tx and rx 994 * is deeply engrained into the networking layer. 995 * For wireless stuff it is not true. 996 * We assume that rx_speed matters more. 997 */ 998 if (dev->rx_speed != SPEED_UNSET) 999 cmd->base.speed = dev->rx_speed / 1000000; 1000 else if (dev->tx_speed != SPEED_UNSET) 1001 cmd->base.speed = dev->tx_speed / 1000000; 1002 else 1003 cmd->base.speed = SPEED_UNKNOWN; 1004 1005 return 0; 1006 } 1007 EXPORT_SYMBOL_GPL(usbnet_get_link_ksettings_internal); 1008 1009 int usbnet_set_link_ksettings_mii(struct net_device *net, 1010 const struct ethtool_link_ksettings *cmd) 1011 { 1012 struct usbnet *dev = netdev_priv(net); 1013 int retval; 1014 1015 if (!dev->mii.mdio_write) 1016 return -EOPNOTSUPP; 1017 1018 retval = mii_ethtool_set_link_ksettings(&dev->mii, cmd); 1019 1020 /* link speed/duplex might have changed */ 1021 if (dev->driver_info->link_reset) 1022 dev->driver_info->link_reset(dev); 1023 1024 /* hard_mtu or rx_urb_size may change in link_reset() */ 1025 usbnet_update_max_qlen(dev); 1026 1027 return retval; 1028 } 1029 EXPORT_SYMBOL_GPL(usbnet_set_link_ksettings_mii); 1030 1031 u32 usbnet_get_link (struct net_device *net) 1032 { 1033 struct usbnet *dev = netdev_priv(net); 1034 1035 /* If a check_connect is defined, return its result */ 1036 if (dev->driver_info->check_connect) 1037 return dev->driver_info->check_connect (dev) == 0; 1038 1039 /* if the device has mii operations, use those */ 1040 if (dev->mii.mdio_read) 1041 return mii_link_ok(&dev->mii); 1042 1043 /* Otherwise, dtrt for drivers calling netif_carrier_{on,off} */ 1044 return ethtool_op_get_link(net); 1045 } 1046 EXPORT_SYMBOL_GPL(usbnet_get_link); 1047 1048 int usbnet_nway_reset(struct net_device *net) 1049 { 1050 struct usbnet *dev = netdev_priv(net); 1051 1052 if (!dev->mii.mdio_write) 1053 return -EOPNOTSUPP; 1054 1055 return mii_nway_restart(&dev->mii); 1056 } 1057 EXPORT_SYMBOL_GPL(usbnet_nway_reset); 1058 1059 void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info) 1060 { 1061 struct usbnet *dev = netdev_priv(net); 1062 1063 strscpy(info->driver, dev->driver_name, sizeof(info->driver)); 1064 strscpy(info->fw_version, dev->driver_info->description, 1065 sizeof(info->fw_version)); 1066 usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info); 1067 } 1068 EXPORT_SYMBOL_GPL(usbnet_get_drvinfo); 1069 1070 u32 usbnet_get_msglevel (struct net_device *net) 1071 { 1072 struct usbnet *dev = netdev_priv(net); 1073 1074 return dev->msg_enable; 1075 } 1076 EXPORT_SYMBOL_GPL(usbnet_get_msglevel); 1077 1078 void usbnet_set_msglevel (struct net_device *net, u32 level) 1079 { 1080 struct usbnet *dev = netdev_priv(net); 1081 1082 dev->msg_enable = level; 1083 } 1084 EXPORT_SYMBOL_GPL(usbnet_set_msglevel); 1085 1086 /* drivers may override default ethtool_ops in their bind() routine */ 1087 static const struct ethtool_ops usbnet_ethtool_ops = { 1088 .get_link = usbnet_get_link, 1089 .nway_reset = usbnet_nway_reset, 1090 .get_drvinfo = usbnet_get_drvinfo, 1091 .get_msglevel = usbnet_get_msglevel, 1092 .set_msglevel = usbnet_set_msglevel, 1093 .get_ts_info = ethtool_op_get_ts_info, 1094 .get_link_ksettings = usbnet_get_link_ksettings_mii, 1095 .set_link_ksettings = usbnet_set_link_ksettings_mii, 1096 }; 1097 1098 /*-------------------------------------------------------------------------*/ 1099 1100 static void __handle_link_change(struct usbnet *dev) 1101 { 1102 if (!test_bit(EVENT_DEV_OPEN, &dev->flags)) 1103 return; 1104 1105 if (!netif_carrier_ok(dev->net)) { 1106 /* kill URBs for reading packets to save bus bandwidth */ 1107 unlink_urbs(dev, &dev->rxq); 1108 1109 /* 1110 * tx_timeout will unlink URBs for sending packets and 1111 * tx queue is stopped by netcore after link becomes off 1112 */ 1113 } else { 1114 /* submitting URBs for reading packets */ 1115 tasklet_schedule(&dev->bh); 1116 } 1117 1118 /* hard_mtu or rx_urb_size may change during link change */ 1119 usbnet_update_max_qlen(dev); 1120 1121 clear_bit(EVENT_LINK_CHANGE, &dev->flags); 1122 } 1123 1124 void usbnet_set_rx_mode(struct net_device *net) 1125 { 1126 struct usbnet *dev = netdev_priv(net); 1127 1128 usbnet_defer_kevent(dev, EVENT_SET_RX_MODE); 1129 } 1130 EXPORT_SYMBOL_GPL(usbnet_set_rx_mode); 1131 1132 static void __handle_set_rx_mode(struct usbnet *dev) 1133 { 1134 if (dev->driver_info->set_rx_mode) 1135 (dev->driver_info->set_rx_mode)(dev); 1136 1137 clear_bit(EVENT_SET_RX_MODE, &dev->flags); 1138 } 1139 1140 /* work that cannot be done in interrupt context uses keventd. 1141 * 1142 * NOTE: with 2.5 we could do more of this using completion callbacks, 1143 * especially now that control transfers can be queued. 1144 */ 1145 static void 1146 usbnet_deferred_kevent (struct work_struct *work) 1147 { 1148 struct usbnet *dev = 1149 container_of(work, struct usbnet, kevent); 1150 int status; 1151 1152 /* usb_clear_halt() needs a thread context */ 1153 if (test_bit (EVENT_TX_HALT, &dev->flags)) { 1154 unlink_urbs (dev, &dev->txq); 1155 status = usb_autopm_get_interface(dev->intf); 1156 if (status < 0) 1157 goto fail_pipe; 1158 status = usb_clear_halt (dev->udev, dev->out); 1159 usb_autopm_put_interface(dev->intf); 1160 if (status < 0 && 1161 status != -EPIPE && 1162 status != -ESHUTDOWN) { 1163 if (netif_msg_tx_err (dev)) 1164 fail_pipe: 1165 netdev_err(dev->net, "can't clear tx halt, status %d\n", 1166 status); 1167 } else { 1168 clear_bit (EVENT_TX_HALT, &dev->flags); 1169 if (status != -ESHUTDOWN) 1170 netif_wake_queue (dev->net); 1171 } 1172 } 1173 if (test_bit (EVENT_RX_HALT, &dev->flags)) { 1174 unlink_urbs (dev, &dev->rxq); 1175 status = usb_autopm_get_interface(dev->intf); 1176 if (status < 0) 1177 goto fail_halt; 1178 status = usb_clear_halt (dev->udev, dev->in); 1179 usb_autopm_put_interface(dev->intf); 1180 if (status < 0 && 1181 status != -EPIPE && 1182 status != -ESHUTDOWN) { 1183 if (netif_msg_rx_err (dev)) 1184 fail_halt: 1185 netdev_err(dev->net, "can't clear rx halt, status %d\n", 1186 status); 1187 } else { 1188 clear_bit (EVENT_RX_HALT, &dev->flags); 1189 if (!usbnet_going_away(dev)) 1190 tasklet_schedule(&dev->bh); 1191 } 1192 } 1193 1194 /* tasklet could resubmit itself forever if memory is tight */ 1195 if (test_bit (EVENT_RX_MEMORY, &dev->flags)) { 1196 struct urb *urb = NULL; 1197 int resched = 1; 1198 1199 if (netif_running (dev->net)) 1200 urb = usb_alloc_urb (0, GFP_KERNEL); 1201 else 1202 clear_bit (EVENT_RX_MEMORY, &dev->flags); 1203 if (urb != NULL) { 1204 clear_bit (EVENT_RX_MEMORY, &dev->flags); 1205 status = usb_autopm_get_interface(dev->intf); 1206 if (status < 0) { 1207 usb_free_urb(urb); 1208 goto fail_lowmem; 1209 } 1210 if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK) 1211 resched = 0; 1212 usb_autopm_put_interface(dev->intf); 1213 fail_lowmem: 1214 if (resched) 1215 if (!usbnet_going_away(dev)) 1216 tasklet_schedule(&dev->bh); 1217 } 1218 } 1219 1220 if (test_bit (EVENT_LINK_RESET, &dev->flags)) { 1221 const struct driver_info *info = dev->driver_info; 1222 int retval = 0; 1223 1224 clear_bit (EVENT_LINK_RESET, &dev->flags); 1225 status = usb_autopm_get_interface(dev->intf); 1226 if (status < 0) 1227 goto skip_reset; 1228 if(info->link_reset && (retval = info->link_reset(dev)) < 0) { 1229 usb_autopm_put_interface(dev->intf); 1230 skip_reset: 1231 netdev_info(dev->net, "link reset failed (%d) usbnet usb-%s-%s, %s\n", 1232 retval, 1233 dev->udev->bus->bus_name, 1234 dev->udev->devpath, 1235 info->description); 1236 } else { 1237 usb_autopm_put_interface(dev->intf); 1238 } 1239 1240 /* handle link change from link resetting */ 1241 __handle_link_change(dev); 1242 } 1243 1244 if (test_bit (EVENT_LINK_CHANGE, &dev->flags)) 1245 __handle_link_change(dev); 1246 1247 if (test_bit (EVENT_SET_RX_MODE, &dev->flags)) 1248 __handle_set_rx_mode(dev); 1249 1250 1251 if (dev->flags) 1252 netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags); 1253 } 1254 1255 /*-------------------------------------------------------------------------*/ 1256 1257 static void tx_complete (struct urb *urb) 1258 { 1259 struct sk_buff *skb = (struct sk_buff *) urb->context; 1260 struct skb_data *entry = (struct skb_data *) skb->cb; 1261 struct usbnet *dev = entry->dev; 1262 1263 if (urb->status == 0) { 1264 struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->net->tstats); 1265 unsigned long flags; 1266 1267 flags = u64_stats_update_begin_irqsave(&stats64->syncp); 1268 u64_stats_add(&stats64->tx_packets, entry->packets); 1269 u64_stats_add(&stats64->tx_bytes, entry->length); 1270 u64_stats_update_end_irqrestore(&stats64->syncp, flags); 1271 } else { 1272 dev->net->stats.tx_errors++; 1273 1274 switch (urb->status) { 1275 case -EPIPE: 1276 usbnet_defer_kevent (dev, EVENT_TX_HALT); 1277 break; 1278 1279 /* software-driven interface shutdown */ 1280 case -ECONNRESET: // async unlink 1281 case -ESHUTDOWN: // hardware gone 1282 break; 1283 1284 /* like rx, tx gets controller i/o faults during hub_wq 1285 * delays and so it uses the same throttling mechanism. 1286 */ 1287 case -EPROTO: 1288 case -ETIME: 1289 case -EILSEQ: 1290 usb_mark_last_busy(dev->udev); 1291 if (!timer_pending (&dev->delay)) { 1292 mod_timer (&dev->delay, 1293 jiffies + THROTTLE_JIFFIES); 1294 netif_dbg(dev, link, dev->net, 1295 "tx throttle %d\n", urb->status); 1296 } 1297 netif_stop_queue (dev->net); 1298 break; 1299 default: 1300 netif_dbg(dev, tx_err, dev->net, 1301 "tx err %d\n", entry->urb->status); 1302 break; 1303 } 1304 } 1305 1306 usb_autopm_put_interface_async(dev->intf); 1307 (void) defer_bh(dev, skb, &dev->txq, tx_done); 1308 } 1309 1310 /*-------------------------------------------------------------------------*/ 1311 1312 void usbnet_tx_timeout (struct net_device *net, unsigned int txqueue) 1313 { 1314 struct usbnet *dev = netdev_priv(net); 1315 1316 unlink_urbs (dev, &dev->txq); 1317 tasklet_schedule (&dev->bh); 1318 /* this needs to be handled individually because the generic layer 1319 * doesn't know what is sufficient and could not restore private 1320 * information if a remedy of an unconditional reset were used. 1321 */ 1322 if (dev->driver_info->recover) 1323 (dev->driver_info->recover)(dev); 1324 } 1325 EXPORT_SYMBOL_GPL(usbnet_tx_timeout); 1326 1327 /*-------------------------------------------------------------------------*/ 1328 1329 static int build_dma_sg(const struct sk_buff *skb, struct urb *urb) 1330 { 1331 unsigned num_sgs, total_len = 0; 1332 int i, s = 0; 1333 1334 num_sgs = skb_shinfo(skb)->nr_frags + 1; 1335 if (num_sgs == 1) 1336 return 0; 1337 1338 /* reserve one for zero packet */ 1339 urb->sg = kmalloc_array(num_sgs + 1, sizeof(struct scatterlist), 1340 GFP_ATOMIC); 1341 if (!urb->sg) 1342 return -ENOMEM; 1343 1344 urb->num_sgs = num_sgs; 1345 sg_init_table(urb->sg, urb->num_sgs + 1); 1346 1347 sg_set_buf(&urb->sg[s++], skb->data, skb_headlen(skb)); 1348 total_len += skb_headlen(skb); 1349 1350 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1351 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 1352 1353 total_len += skb_frag_size(f); 1354 sg_set_page(&urb->sg[i + s], skb_frag_page(f), skb_frag_size(f), 1355 skb_frag_off(f)); 1356 } 1357 urb->transfer_buffer_length = total_len; 1358 1359 return 1; 1360 } 1361 1362 netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, 1363 struct net_device *net) 1364 { 1365 struct usbnet *dev = netdev_priv(net); 1366 unsigned int length; 1367 struct urb *urb = NULL; 1368 struct skb_data *entry; 1369 const struct driver_info *info = dev->driver_info; 1370 unsigned long flags; 1371 int retval; 1372 1373 if (skb) 1374 skb_tx_timestamp(skb); 1375 1376 // some devices want funky USB-level framing, for 1377 // win32 driver (usually) and/or hardware quirks 1378 if (info->tx_fixup) { 1379 skb = info->tx_fixup (dev, skb, GFP_ATOMIC); 1380 if (!skb) { 1381 /* packet collected; minidriver waiting for more */ 1382 if (info->flags & FLAG_MULTI_PACKET) 1383 goto not_drop; 1384 netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n"); 1385 goto drop; 1386 } 1387 } 1388 1389 if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) { 1390 netif_dbg(dev, tx_err, dev->net, "no urb\n"); 1391 goto drop; 1392 } 1393 1394 entry = (struct skb_data *) skb->cb; 1395 entry->urb = urb; 1396 entry->dev = dev; 1397 1398 usb_fill_bulk_urb (urb, dev->udev, dev->out, 1399 skb->data, skb->len, tx_complete, skb); 1400 if (dev->can_dma_sg) { 1401 if (build_dma_sg(skb, urb) < 0) 1402 goto drop; 1403 } 1404 length = urb->transfer_buffer_length; 1405 1406 /* don't assume the hardware handles USB_ZERO_PACKET 1407 * NOTE: strictly conforming cdc-ether devices should expect 1408 * the ZLP here, but ignore the one-byte packet. 1409 * NOTE2: CDC NCM specification is different from CDC ECM when 1410 * handling ZLP/short packets, so cdc_ncm driver will make short 1411 * packet itself if needed. 1412 */ 1413 if (length % dev->maxpacket == 0) { 1414 if (!(info->flags & FLAG_SEND_ZLP)) { 1415 if (!(info->flags & FLAG_MULTI_PACKET)) { 1416 length++; 1417 if (skb_tailroom(skb) && !urb->num_sgs) { 1418 skb->data[skb->len] = 0; 1419 __skb_put(skb, 1); 1420 } else if (urb->num_sgs) 1421 sg_set_buf(&urb->sg[urb->num_sgs++], 1422 dev->padding_pkt, 1); 1423 } 1424 } else 1425 urb->transfer_flags |= URB_ZERO_PACKET; 1426 } 1427 urb->transfer_buffer_length = length; 1428 1429 if (info->flags & FLAG_MULTI_PACKET) { 1430 /* Driver has set number of packets and a length delta. 1431 * Calculate the complete length and ensure that it's 1432 * positive. 1433 */ 1434 entry->length += length; 1435 if (WARN_ON_ONCE(entry->length <= 0)) 1436 entry->length = length; 1437 } else { 1438 usbnet_set_skb_tx_stats(skb, 1, length); 1439 } 1440 1441 spin_lock_irqsave(&dev->txq.lock, flags); 1442 retval = usb_autopm_get_interface_async(dev->intf); 1443 if (retval < 0) { 1444 spin_unlock_irqrestore(&dev->txq.lock, flags); 1445 goto drop; 1446 } 1447 if (netif_queue_stopped(net)) { 1448 usb_autopm_put_interface_async(dev->intf); 1449 spin_unlock_irqrestore(&dev->txq.lock, flags); 1450 goto drop; 1451 } 1452 1453 #ifdef CONFIG_PM 1454 /* if this triggers the device is still a sleep */ 1455 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) { 1456 /* transmission will be done in resume */ 1457 usb_anchor_urb(urb, &dev->deferred); 1458 /* no use to process more packets */ 1459 netif_stop_queue(net); 1460 usb_put_urb(urb); 1461 spin_unlock_irqrestore(&dev->txq.lock, flags); 1462 netdev_dbg(dev->net, "Delaying transmission for resumption\n"); 1463 goto deferred; 1464 } 1465 #endif 1466 1467 switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) { 1468 case -EPIPE: 1469 netif_stop_queue (net); 1470 usbnet_defer_kevent (dev, EVENT_TX_HALT); 1471 usb_autopm_put_interface_async(dev->intf); 1472 break; 1473 default: 1474 usb_autopm_put_interface_async(dev->intf); 1475 netif_dbg(dev, tx_err, dev->net, 1476 "tx: submit urb err %d\n", retval); 1477 break; 1478 case 0: 1479 netif_trans_update(net); 1480 __usbnet_queue_skb(&dev->txq, skb, tx_start); 1481 if (dev->txq.qlen >= TX_QLEN (dev)) 1482 netif_stop_queue (net); 1483 } 1484 spin_unlock_irqrestore (&dev->txq.lock, flags); 1485 1486 if (retval) { 1487 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval); 1488 drop: 1489 dev->net->stats.tx_dropped++; 1490 not_drop: 1491 if (skb) 1492 dev_kfree_skb_any (skb); 1493 if (urb) { 1494 kfree(urb->sg); 1495 usb_free_urb(urb); 1496 } 1497 } else 1498 netif_dbg(dev, tx_queued, dev->net, 1499 "> tx, len %u, type 0x%x\n", length, skb->protocol); 1500 #ifdef CONFIG_PM 1501 deferred: 1502 #endif 1503 return NETDEV_TX_OK; 1504 } 1505 EXPORT_SYMBOL_GPL(usbnet_start_xmit); 1506 1507 static int rx_alloc_submit(struct usbnet *dev, gfp_t flags) 1508 { 1509 struct urb *urb; 1510 int i; 1511 int ret = 0; 1512 1513 /* don't refill the queue all at once */ 1514 for (i = 0; i < 10 && dev->rxq.qlen < RX_QLEN(dev); i++) { 1515 urb = usb_alloc_urb(0, flags); 1516 if (urb != NULL) { 1517 ret = rx_submit(dev, urb, flags); 1518 if (ret) 1519 goto err; 1520 } else { 1521 ret = -ENOMEM; 1522 goto err; 1523 } 1524 } 1525 err: 1526 return ret; 1527 } 1528 1529 static inline void usb_free_skb(struct sk_buff *skb) 1530 { 1531 struct skb_data *entry = (struct skb_data *)skb->cb; 1532 1533 usb_free_urb(entry->urb); 1534 dev_kfree_skb(skb); 1535 } 1536 1537 /*-------------------------------------------------------------------------*/ 1538 1539 // tasklet (work deferred from completions, in_irq) or timer 1540 1541 static void usbnet_bh (struct timer_list *t) 1542 { 1543 struct usbnet *dev = from_timer(dev, t, delay); 1544 struct sk_buff *skb; 1545 struct skb_data *entry; 1546 1547 while ((skb = skb_dequeue (&dev->done))) { 1548 entry = (struct skb_data *) skb->cb; 1549 switch (entry->state) { 1550 case rx_done: 1551 if (rx_process(dev, skb)) 1552 usb_free_skb(skb); 1553 continue; 1554 case tx_done: 1555 kfree(entry->urb->sg); 1556 fallthrough; 1557 case rx_cleanup: 1558 usb_free_skb(skb); 1559 continue; 1560 default: 1561 netdev_dbg(dev->net, "bogus skb state %d\n", entry->state); 1562 } 1563 } 1564 1565 /* restart RX again after disabling due to high error rate */ 1566 clear_bit(EVENT_RX_KILL, &dev->flags); 1567 1568 /* waiting for all pending urbs to complete? 1569 * only then can we forgo submitting anew 1570 */ 1571 if (waitqueue_active(&dev->wait)) { 1572 if (dev->txq.qlen + dev->rxq.qlen + dev->done.qlen == 0) 1573 wake_up_all(&dev->wait); 1574 1575 // or are we maybe short a few urbs? 1576 } else if (netif_running (dev->net) && 1577 netif_device_present (dev->net) && 1578 netif_carrier_ok(dev->net) && 1579 !usbnet_going_away(dev) && 1580 !timer_pending(&dev->delay) && 1581 !test_bit(EVENT_RX_PAUSED, &dev->flags) && 1582 !test_bit(EVENT_RX_HALT, &dev->flags)) { 1583 int temp = dev->rxq.qlen; 1584 1585 if (temp < RX_QLEN(dev)) { 1586 if (rx_alloc_submit(dev, GFP_ATOMIC) == -ENOLINK) 1587 return; 1588 if (temp != dev->rxq.qlen) 1589 netif_dbg(dev, link, dev->net, 1590 "rxqlen %d --> %d\n", 1591 temp, dev->rxq.qlen); 1592 if (dev->rxq.qlen < RX_QLEN(dev)) 1593 tasklet_schedule (&dev->bh); 1594 } 1595 if (dev->txq.qlen < TX_QLEN (dev)) 1596 netif_wake_queue (dev->net); 1597 } 1598 } 1599 1600 static void usbnet_bh_tasklet(struct tasklet_struct *t) 1601 { 1602 struct usbnet *dev = from_tasklet(dev, t, bh); 1603 1604 usbnet_bh(&dev->delay); 1605 } 1606 1607 1608 /*------------------------------------------------------------------------- 1609 * 1610 * USB Device Driver support 1611 * 1612 *-------------------------------------------------------------------------*/ 1613 1614 // precondition: never called in_interrupt 1615 1616 void usbnet_disconnect (struct usb_interface *intf) 1617 { 1618 struct usbnet *dev; 1619 struct usb_device *xdev; 1620 struct net_device *net; 1621 struct urb *urb; 1622 1623 dev = usb_get_intfdata(intf); 1624 usb_set_intfdata(intf, NULL); 1625 if (!dev) 1626 return; 1627 usbnet_mark_going_away(dev); 1628 1629 xdev = interface_to_usbdev (intf); 1630 1631 netif_info(dev, probe, dev->net, "unregister '%s' usb-%s-%s, %s\n", 1632 intf->dev.driver->name, 1633 xdev->bus->bus_name, xdev->devpath, 1634 dev->driver_info->description); 1635 1636 net = dev->net; 1637 unregister_netdev (net); 1638 1639 while ((urb = usb_get_from_anchor(&dev->deferred))) { 1640 dev_kfree_skb(urb->context); 1641 kfree(urb->sg); 1642 usb_free_urb(urb); 1643 } 1644 1645 if (dev->driver_info->unbind) 1646 dev->driver_info->unbind(dev, intf); 1647 1648 usb_kill_urb(dev->interrupt); 1649 usb_free_urb(dev->interrupt); 1650 kfree(dev->padding_pkt); 1651 1652 free_percpu(net->tstats); 1653 free_netdev(net); 1654 } 1655 EXPORT_SYMBOL_GPL(usbnet_disconnect); 1656 1657 static const struct net_device_ops usbnet_netdev_ops = { 1658 .ndo_open = usbnet_open, 1659 .ndo_stop = usbnet_stop, 1660 .ndo_start_xmit = usbnet_start_xmit, 1661 .ndo_tx_timeout = usbnet_tx_timeout, 1662 .ndo_set_rx_mode = usbnet_set_rx_mode, 1663 .ndo_change_mtu = usbnet_change_mtu, 1664 .ndo_get_stats64 = dev_get_tstats64, 1665 .ndo_set_mac_address = eth_mac_addr, 1666 .ndo_validate_addr = eth_validate_addr, 1667 }; 1668 1669 /*-------------------------------------------------------------------------*/ 1670 1671 // precondition: never called in_interrupt 1672 1673 static struct device_type wlan_type = { 1674 .name = "wlan", 1675 }; 1676 1677 static struct device_type wwan_type = { 1678 .name = "wwan", 1679 }; 1680 1681 int 1682 usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) 1683 { 1684 struct usbnet *dev; 1685 struct net_device *net; 1686 struct usb_host_interface *interface; 1687 const struct driver_info *info; 1688 struct usb_device *xdev; 1689 int status; 1690 const char *name; 1691 struct usb_driver *driver = to_usb_driver(udev->dev.driver); 1692 1693 /* usbnet already took usb runtime pm, so have to enable the feature 1694 * for usb interface, otherwise usb_autopm_get_interface may return 1695 * failure if RUNTIME_PM is enabled. 1696 */ 1697 if (!driver->supports_autosuspend) { 1698 driver->supports_autosuspend = 1; 1699 pm_runtime_enable(&udev->dev); 1700 } 1701 1702 name = udev->dev.driver->name; 1703 info = (const struct driver_info *) prod->driver_info; 1704 if (!info) { 1705 dev_dbg (&udev->dev, "blacklisted by %s\n", name); 1706 return -ENODEV; 1707 } 1708 xdev = interface_to_usbdev (udev); 1709 interface = udev->cur_altsetting; 1710 1711 status = -ENOMEM; 1712 1713 // set up our own records 1714 net = alloc_etherdev(sizeof(*dev)); 1715 if (!net) 1716 goto out; 1717 1718 /* netdev_printk() needs this so do it as early as possible */ 1719 SET_NETDEV_DEV(net, &udev->dev); 1720 1721 dev = netdev_priv(net); 1722 dev->udev = xdev; 1723 dev->intf = udev; 1724 dev->driver_info = info; 1725 dev->driver_name = name; 1726 dev->rx_speed = SPEED_UNSET; 1727 dev->tx_speed = SPEED_UNSET; 1728 1729 net->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 1730 if (!net->tstats) 1731 goto out0; 1732 1733 dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV 1734 | NETIF_MSG_PROBE | NETIF_MSG_LINK); 1735 init_waitqueue_head(&dev->wait); 1736 skb_queue_head_init (&dev->rxq); 1737 skb_queue_head_init (&dev->txq); 1738 skb_queue_head_init (&dev->done); 1739 skb_queue_head_init(&dev->rxq_pause); 1740 tasklet_setup(&dev->bh, usbnet_bh_tasklet); 1741 INIT_WORK (&dev->kevent, usbnet_deferred_kevent); 1742 init_usb_anchor(&dev->deferred); 1743 timer_setup(&dev->delay, usbnet_bh, 0); 1744 mutex_init (&dev->phy_mutex); 1745 mutex_init(&dev->interrupt_mutex); 1746 dev->interrupt_count = 0; 1747 1748 dev->net = net; 1749 strscpy(net->name, "usb%d", sizeof(net->name)); 1750 1751 /* rx and tx sides can use different message sizes; 1752 * bind() should set rx_urb_size in that case. 1753 */ 1754 dev->hard_mtu = net->mtu + net->hard_header_len; 1755 net->min_mtu = 0; 1756 net->max_mtu = ETH_MAX_MTU; 1757 1758 net->netdev_ops = &usbnet_netdev_ops; 1759 net->watchdog_timeo = TX_TIMEOUT_JIFFIES; 1760 net->ethtool_ops = &usbnet_ethtool_ops; 1761 1762 // allow device-specific bind/init procedures 1763 // NOTE net->name still not usable ... 1764 if (info->bind) { 1765 status = info->bind (dev, udev); 1766 if (status < 0) 1767 goto out1; 1768 1769 // heuristic: "usb%d" for links we know are two-host, 1770 // else "eth%d" when there's reasonable doubt. userspace 1771 // can rename the link if it knows better. 1772 if ((dev->driver_info->flags & FLAG_ETHER) != 0 && 1773 ((dev->driver_info->flags & FLAG_POINTTOPOINT) == 0 || 1774 /* somebody touched it*/ 1775 !is_zero_ether_addr(net->dev_addr))) 1776 strscpy(net->name, "eth%d", sizeof(net->name)); 1777 /* WLAN devices should always be named "wlan%d" */ 1778 if ((dev->driver_info->flags & FLAG_WLAN) != 0) 1779 strscpy(net->name, "wlan%d", sizeof(net->name)); 1780 /* WWAN devices should always be named "wwan%d" */ 1781 if ((dev->driver_info->flags & FLAG_WWAN) != 0) 1782 strscpy(net->name, "wwan%d", sizeof(net->name)); 1783 1784 /* devices that cannot do ARP */ 1785 if ((dev->driver_info->flags & FLAG_NOARP) != 0) 1786 net->flags |= IFF_NOARP; 1787 1788 /* maybe the remote can't receive an Ethernet MTU */ 1789 if (net->mtu > (dev->hard_mtu - net->hard_header_len)) 1790 net->mtu = dev->hard_mtu - net->hard_header_len; 1791 } else if (!info->in || !info->out) 1792 status = usbnet_get_endpoints (dev, udev); 1793 else { 1794 u8 ep_addrs[3] = { 1795 info->in + USB_DIR_IN, info->out + USB_DIR_OUT, 0 1796 }; 1797 1798 dev->in = usb_rcvbulkpipe (xdev, info->in); 1799 dev->out = usb_sndbulkpipe (xdev, info->out); 1800 if (!(info->flags & FLAG_NO_SETINT)) 1801 status = usb_set_interface (xdev, 1802 interface->desc.bInterfaceNumber, 1803 interface->desc.bAlternateSetting); 1804 else 1805 status = 0; 1806 1807 if (status == 0 && !usb_check_bulk_endpoints(udev, ep_addrs)) 1808 status = -EINVAL; 1809 } 1810 if (status >= 0 && dev->status) 1811 status = init_status (dev, udev); 1812 if (status < 0) 1813 goto out3; 1814 1815 if (!dev->rx_urb_size) 1816 dev->rx_urb_size = dev->hard_mtu; 1817 dev->maxpacket = usb_maxpacket(dev->udev, dev->out); 1818 if (dev->maxpacket == 0) { 1819 /* that is a broken device */ 1820 status = -ENODEV; 1821 goto out4; 1822 } 1823 1824 /* this flags the device for user space */ 1825 if (!is_valid_ether_addr(net->dev_addr)) 1826 eth_hw_addr_random(net); 1827 1828 if ((dev->driver_info->flags & FLAG_WLAN) != 0) 1829 SET_NETDEV_DEVTYPE(net, &wlan_type); 1830 if ((dev->driver_info->flags & FLAG_WWAN) != 0) 1831 SET_NETDEV_DEVTYPE(net, &wwan_type); 1832 1833 /* initialize max rx_qlen and tx_qlen */ 1834 usbnet_update_max_qlen(dev); 1835 1836 if (dev->can_dma_sg && !(info->flags & FLAG_SEND_ZLP) && 1837 !(info->flags & FLAG_MULTI_PACKET)) { 1838 dev->padding_pkt = kzalloc(1, GFP_KERNEL); 1839 if (!dev->padding_pkt) { 1840 status = -ENOMEM; 1841 goto out4; 1842 } 1843 } 1844 1845 status = register_netdev (net); 1846 if (status) 1847 goto out5; 1848 netif_info(dev, probe, dev->net, 1849 "register '%s' at usb-%s-%s, %s, %pM\n", 1850 udev->dev.driver->name, 1851 xdev->bus->bus_name, xdev->devpath, 1852 dev->driver_info->description, 1853 net->dev_addr); 1854 1855 // ok, it's ready to go. 1856 usb_set_intfdata (udev, dev); 1857 1858 netif_device_attach (net); 1859 1860 if (dev->driver_info->flags & FLAG_LINK_INTR) 1861 usbnet_link_change(dev, 0, 0); 1862 1863 return 0; 1864 1865 out5: 1866 kfree(dev->padding_pkt); 1867 out4: 1868 usb_free_urb(dev->interrupt); 1869 out3: 1870 if (info->unbind) 1871 info->unbind (dev, udev); 1872 out1: 1873 /* subdrivers must undo all they did in bind() if they 1874 * fail it, but we may fail later and a deferred kevent 1875 * may trigger an error resubmitting itself and, worse, 1876 * schedule a timer. So we kill it all just in case. 1877 */ 1878 usbnet_mark_going_away(dev); 1879 cancel_work_sync(&dev->kevent); 1880 del_timer_sync(&dev->delay); 1881 free_percpu(net->tstats); 1882 out0: 1883 free_netdev(net); 1884 out: 1885 return status; 1886 } 1887 EXPORT_SYMBOL_GPL(usbnet_probe); 1888 1889 /*-------------------------------------------------------------------------*/ 1890 1891 /* 1892 * suspend the whole driver as soon as the first interface is suspended 1893 * resume only when the last interface is resumed 1894 */ 1895 1896 int usbnet_suspend (struct usb_interface *intf, pm_message_t message) 1897 { 1898 struct usbnet *dev = usb_get_intfdata(intf); 1899 1900 if (!dev->suspend_count++) { 1901 spin_lock_irq(&dev->txq.lock); 1902 /* don't autosuspend while transmitting */ 1903 if (dev->txq.qlen && PMSG_IS_AUTO(message)) { 1904 dev->suspend_count--; 1905 spin_unlock_irq(&dev->txq.lock); 1906 return -EBUSY; 1907 } else { 1908 set_bit(EVENT_DEV_ASLEEP, &dev->flags); 1909 spin_unlock_irq(&dev->txq.lock); 1910 } 1911 /* 1912 * accelerate emptying of the rx and queues, to avoid 1913 * having everything error out. 1914 */ 1915 netif_device_detach (dev->net); 1916 usbnet_terminate_urbs(dev); 1917 __usbnet_status_stop_force(dev); 1918 1919 /* 1920 * reattach so runtime management can use and 1921 * wake the device 1922 */ 1923 netif_device_attach (dev->net); 1924 } 1925 return 0; 1926 } 1927 EXPORT_SYMBOL_GPL(usbnet_suspend); 1928 1929 int usbnet_resume (struct usb_interface *intf) 1930 { 1931 struct usbnet *dev = usb_get_intfdata(intf); 1932 struct sk_buff *skb; 1933 struct urb *res; 1934 int retval; 1935 1936 if (!--dev->suspend_count) { 1937 /* resume interrupt URB if it was previously submitted */ 1938 __usbnet_status_start_force(dev, GFP_NOIO); 1939 1940 spin_lock_irq(&dev->txq.lock); 1941 while ((res = usb_get_from_anchor(&dev->deferred))) { 1942 1943 skb = (struct sk_buff *)res->context; 1944 retval = usb_submit_urb(res, GFP_ATOMIC); 1945 if (retval < 0) { 1946 dev_kfree_skb_any(skb); 1947 kfree(res->sg); 1948 usb_free_urb(res); 1949 usb_autopm_put_interface_async(dev->intf); 1950 } else { 1951 netif_trans_update(dev->net); 1952 __skb_queue_tail(&dev->txq, skb); 1953 } 1954 } 1955 1956 smp_mb(); 1957 clear_bit(EVENT_DEV_ASLEEP, &dev->flags); 1958 spin_unlock_irq(&dev->txq.lock); 1959 1960 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) { 1961 /* handle remote wakeup ASAP 1962 * we cannot race against stop 1963 */ 1964 if (netif_device_present(dev->net) && 1965 !timer_pending(&dev->delay) && 1966 !test_bit(EVENT_RX_HALT, &dev->flags)) 1967 rx_alloc_submit(dev, GFP_NOIO); 1968 1969 if (!(dev->txq.qlen >= TX_QLEN(dev))) 1970 netif_tx_wake_all_queues(dev->net); 1971 tasklet_schedule (&dev->bh); 1972 } 1973 } 1974 1975 if (test_and_clear_bit(EVENT_DEVICE_REPORT_IDLE, &dev->flags)) 1976 usb_autopm_get_interface_no_resume(intf); 1977 1978 return 0; 1979 } 1980 EXPORT_SYMBOL_GPL(usbnet_resume); 1981 1982 /* 1983 * Either a subdriver implements manage_power, then it is assumed to always 1984 * be ready to be suspended or it reports the readiness to be suspended 1985 * explicitly 1986 */ 1987 void usbnet_device_suggests_idle(struct usbnet *dev) 1988 { 1989 if (!test_and_set_bit(EVENT_DEVICE_REPORT_IDLE, &dev->flags)) { 1990 dev->intf->needs_remote_wakeup = 1; 1991 usb_autopm_put_interface_async(dev->intf); 1992 } 1993 } 1994 EXPORT_SYMBOL(usbnet_device_suggests_idle); 1995 1996 /* 1997 * For devices that can do without special commands 1998 */ 1999 int usbnet_manage_power(struct usbnet *dev, int on) 2000 { 2001 dev->intf->needs_remote_wakeup = on; 2002 return 0; 2003 } 2004 EXPORT_SYMBOL(usbnet_manage_power); 2005 2006 void usbnet_link_change(struct usbnet *dev, bool link, bool need_reset) 2007 { 2008 /* update link after link is reseted */ 2009 if (link && !need_reset) 2010 netif_carrier_on(dev->net); 2011 else 2012 netif_carrier_off(dev->net); 2013 2014 if (need_reset && link) 2015 usbnet_defer_kevent(dev, EVENT_LINK_RESET); 2016 else 2017 usbnet_defer_kevent(dev, EVENT_LINK_CHANGE); 2018 } 2019 EXPORT_SYMBOL(usbnet_link_change); 2020 2021 /*-------------------------------------------------------------------------*/ 2022 static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, 2023 u16 value, u16 index, void *data, u16 size) 2024 { 2025 void *buf = NULL; 2026 int err = -ENOMEM; 2027 2028 netdev_dbg(dev->net, "usbnet_read_cmd cmd=0x%02x reqtype=%02x" 2029 " value=0x%04x index=0x%04x size=%d\n", 2030 cmd, reqtype, value, index, size); 2031 2032 if (size) { 2033 buf = kmalloc(size, GFP_NOIO); 2034 if (!buf) 2035 goto out; 2036 } 2037 2038 err = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), 2039 cmd, reqtype, value, index, buf, size, 2040 USB_CTRL_GET_TIMEOUT); 2041 if (err > 0 && err <= size) { 2042 if (data) 2043 memcpy(data, buf, err); 2044 else 2045 netdev_dbg(dev->net, 2046 "Huh? Data requested but thrown away.\n"); 2047 } 2048 kfree(buf); 2049 out: 2050 return err; 2051 } 2052 2053 static int __usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, 2054 u16 value, u16 index, const void *data, 2055 u16 size) 2056 { 2057 void *buf = NULL; 2058 int err = -ENOMEM; 2059 2060 netdev_dbg(dev->net, "usbnet_write_cmd cmd=0x%02x reqtype=%02x" 2061 " value=0x%04x index=0x%04x size=%d\n", 2062 cmd, reqtype, value, index, size); 2063 2064 if (data) { 2065 buf = kmemdup(data, size, GFP_NOIO); 2066 if (!buf) 2067 goto out; 2068 } else { 2069 if (size) { 2070 WARN_ON_ONCE(1); 2071 err = -EINVAL; 2072 goto out; 2073 } 2074 } 2075 2076 err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 2077 cmd, reqtype, value, index, buf, size, 2078 USB_CTRL_SET_TIMEOUT); 2079 kfree(buf); 2080 2081 out: 2082 return err; 2083 } 2084 2085 /* 2086 * The function can't be called inside suspend/resume callback, 2087 * otherwise deadlock will be caused. 2088 */ 2089 int usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, 2090 u16 value, u16 index, void *data, u16 size) 2091 { 2092 int ret; 2093 2094 if (usb_autopm_get_interface(dev->intf) < 0) 2095 return -ENODEV; 2096 ret = __usbnet_read_cmd(dev, cmd, reqtype, value, index, 2097 data, size); 2098 usb_autopm_put_interface(dev->intf); 2099 return ret; 2100 } 2101 EXPORT_SYMBOL_GPL(usbnet_read_cmd); 2102 2103 /* 2104 * The function can't be called inside suspend/resume callback, 2105 * otherwise deadlock will be caused. 2106 */ 2107 int usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, 2108 u16 value, u16 index, const void *data, u16 size) 2109 { 2110 int ret; 2111 2112 if (usb_autopm_get_interface(dev->intf) < 0) 2113 return -ENODEV; 2114 ret = __usbnet_write_cmd(dev, cmd, reqtype, value, index, 2115 data, size); 2116 usb_autopm_put_interface(dev->intf); 2117 return ret; 2118 } 2119 EXPORT_SYMBOL_GPL(usbnet_write_cmd); 2120 2121 /* 2122 * The function can be called inside suspend/resume callback safely 2123 * and should only be called by suspend/resume callback generally. 2124 */ 2125 int usbnet_read_cmd_nopm(struct usbnet *dev, u8 cmd, u8 reqtype, 2126 u16 value, u16 index, void *data, u16 size) 2127 { 2128 return __usbnet_read_cmd(dev, cmd, reqtype, value, index, 2129 data, size); 2130 } 2131 EXPORT_SYMBOL_GPL(usbnet_read_cmd_nopm); 2132 2133 /* 2134 * The function can be called inside suspend/resume callback safely 2135 * and should only be called by suspend/resume callback generally. 2136 */ 2137 int usbnet_write_cmd_nopm(struct usbnet *dev, u8 cmd, u8 reqtype, 2138 u16 value, u16 index, const void *data, 2139 u16 size) 2140 { 2141 return __usbnet_write_cmd(dev, cmd, reqtype, value, index, 2142 data, size); 2143 } 2144 EXPORT_SYMBOL_GPL(usbnet_write_cmd_nopm); 2145 2146 static void usbnet_async_cmd_cb(struct urb *urb) 2147 { 2148 struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context; 2149 int status = urb->status; 2150 2151 if (status < 0) 2152 dev_dbg(&urb->dev->dev, "%s failed with %d", 2153 __func__, status); 2154 2155 kfree(req); 2156 usb_free_urb(urb); 2157 } 2158 2159 /* 2160 * The caller must make sure that device can't be put into suspend 2161 * state until the control URB completes. 2162 */ 2163 int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype, 2164 u16 value, u16 index, const void *data, u16 size) 2165 { 2166 struct usb_ctrlrequest *req; 2167 struct urb *urb; 2168 int err = -ENOMEM; 2169 void *buf = NULL; 2170 2171 netdev_dbg(dev->net, "usbnet_write_cmd cmd=0x%02x reqtype=%02x" 2172 " value=0x%04x index=0x%04x size=%d\n", 2173 cmd, reqtype, value, index, size); 2174 2175 urb = usb_alloc_urb(0, GFP_ATOMIC); 2176 if (!urb) 2177 goto fail; 2178 2179 if (data) { 2180 buf = kmemdup(data, size, GFP_ATOMIC); 2181 if (!buf) { 2182 netdev_err(dev->net, "Error allocating buffer" 2183 " in %s!\n", __func__); 2184 goto fail_free_urb; 2185 } 2186 } 2187 2188 req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC); 2189 if (!req) 2190 goto fail_free_buf; 2191 2192 req->bRequestType = reqtype; 2193 req->bRequest = cmd; 2194 req->wValue = cpu_to_le16(value); 2195 req->wIndex = cpu_to_le16(index); 2196 req->wLength = cpu_to_le16(size); 2197 2198 usb_fill_control_urb(urb, dev->udev, 2199 usb_sndctrlpipe(dev->udev, 0), 2200 (void *)req, buf, size, 2201 usbnet_async_cmd_cb, req); 2202 urb->transfer_flags |= URB_FREE_BUFFER; 2203 2204 err = usb_submit_urb(urb, GFP_ATOMIC); 2205 if (err < 0) { 2206 netdev_err(dev->net, "Error submitting the control" 2207 " message: status=%d\n", err); 2208 goto fail_free_all; 2209 } 2210 return 0; 2211 2212 fail_free_all: 2213 kfree(req); 2214 fail_free_buf: 2215 kfree(buf); 2216 /* 2217 * avoid a double free 2218 * needed because the flag can be set only 2219 * after filling the URB 2220 */ 2221 urb->transfer_flags = 0; 2222 fail_free_urb: 2223 usb_free_urb(urb); 2224 fail: 2225 return err; 2226 2227 } 2228 EXPORT_SYMBOL_GPL(usbnet_write_cmd_async); 2229 /*-------------------------------------------------------------------------*/ 2230 2231 static int __init usbnet_init(void) 2232 { 2233 /* Compiler should optimize this out. */ 2234 BUILD_BUG_ON( 2235 sizeof_field(struct sk_buff, cb) < sizeof(struct skb_data)); 2236 2237 return 0; 2238 } 2239 module_init(usbnet_init); 2240 2241 static void __exit usbnet_exit(void) 2242 { 2243 } 2244 module_exit(usbnet_exit); 2245 2246 MODULE_AUTHOR("David Brownell"); 2247 MODULE_DESCRIPTION("USB network driver framework"); 2248 MODULE_LICENSE("GPL"); 2249