1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * USB Network driver infrastructure 4 * Copyright (C) 2000-2005 by David Brownell 5 * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com> 6 */ 7 8 /* 9 * This is a generic "USB networking" framework that works with several 10 * kinds of full and high speed networking devices: host-to-host cables, 11 * smart usb peripherals, and actual Ethernet adapters. 12 * 13 * These devices usually differ in terms of control protocols (if they 14 * even have one!) and sometimes they define new framing to wrap or batch 15 * Ethernet packets. Otherwise, they talk to USB pretty much the same, 16 * so interface (un)binding, endpoint I/O queues, fault handling, and other 17 * issues can usefully be addressed by this framework. 18 */ 19 20 #include <linux/module.h> 21 #include <linux/init.h> 22 #include <linux/netdevice.h> 23 #include <linux/etherdevice.h> 24 #include <linux/ctype.h> 25 #include <linux/ethtool.h> 26 #include <linux/workqueue.h> 27 #include <linux/mii.h> 28 #include <linux/usb.h> 29 #include <linux/usb/usbnet.h> 30 #include <linux/slab.h> 31 #include <linux/kernel.h> 32 #include <linux/pm_runtime.h> 33 34 /*-------------------------------------------------------------------------*/ 35 36 /* 37 * Nineteen USB 1.1 max size bulk transactions per frame (ms), max. 38 * Several dozen bytes of IPv4 data can fit in two such transactions. 39 * One maximum size Ethernet packet takes twenty four of them. 40 * For high speed, each frame comfortably fits almost 36 max size 41 * Ethernet packets (so queues should be bigger). 42 * 43 * The goal is to let the USB host controller be busy for 5msec or 44 * more before an irq is required, under load. Jumbograms change 45 * the equation. 46 */ 47 #define MAX_QUEUE_MEMORY (60 * 1518) 48 #define RX_QLEN(dev) ((dev)->rx_qlen) 49 #define TX_QLEN(dev) ((dev)->tx_qlen) 50 51 // reawaken network queue this soon after stopping; else watchdog barks 52 #define TX_TIMEOUT_JIFFIES (5*HZ) 53 54 /* throttle rx/tx briefly after some faults, so hub_wq might disconnect() 55 * us (it polls at HZ/4 usually) before we report too many false errors. 56 */ 57 #define THROTTLE_JIFFIES (HZ/8) 58 59 // between wakeups 60 #define UNLINK_TIMEOUT_MS 3 61 62 /*-------------------------------------------------------------------------*/ 63 64 /* use ethtool to change the level for any given device */ 65 static int msg_level = -1; 66 module_param (msg_level, int, 0); 67 MODULE_PARM_DESC (msg_level, "Override default message level"); 68 69 /*-------------------------------------------------------------------------*/ 70 71 static const char * const usbnet_event_names[] = { 72 [EVENT_TX_HALT] = "EVENT_TX_HALT", 73 [EVENT_RX_HALT] = "EVENT_RX_HALT", 74 [EVENT_RX_MEMORY] = "EVENT_RX_MEMORY", 75 [EVENT_STS_SPLIT] = "EVENT_STS_SPLIT", 76 [EVENT_LINK_RESET] = "EVENT_LINK_RESET", 77 [EVENT_RX_PAUSED] = "EVENT_RX_PAUSED", 78 [EVENT_DEV_ASLEEP] = "EVENT_DEV_ASLEEP", 79 [EVENT_DEV_OPEN] = "EVENT_DEV_OPEN", 80 [EVENT_DEVICE_REPORT_IDLE] = "EVENT_DEVICE_REPORT_IDLE", 81 [EVENT_NO_RUNTIME_PM] = "EVENT_NO_RUNTIME_PM", 82 [EVENT_RX_KILL] = "EVENT_RX_KILL", 83 [EVENT_LINK_CHANGE] = "EVENT_LINK_CHANGE", 84 [EVENT_SET_RX_MODE] = "EVENT_SET_RX_MODE", 85 [EVENT_NO_IP_ALIGN] = "EVENT_NO_IP_ALIGN", 86 }; 87 88 /* handles CDC Ethernet and many other network "bulk data" interfaces */ 89 int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf) 90 { 91 int tmp; 92 struct usb_host_interface *alt = NULL; 93 struct usb_host_endpoint *in = NULL, *out = NULL; 94 struct usb_host_endpoint *status = NULL; 95 96 for (tmp = 0; tmp < intf->num_altsetting; tmp++) { 97 unsigned ep; 98 99 in = out = status = NULL; 100 alt = intf->altsetting + tmp; 101 102 /* take the first altsetting with in-bulk + out-bulk; 103 * remember any status endpoint, just in case; 104 * ignore other endpoints and altsettings. 105 */ 106 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) { 107 struct usb_host_endpoint *e; 108 int intr = 0; 109 110 e = alt->endpoint + ep; 111 112 /* ignore endpoints which cannot transfer data */ 113 if (!usb_endpoint_maxp(&e->desc)) 114 continue; 115 116 switch (e->desc.bmAttributes) { 117 case USB_ENDPOINT_XFER_INT: 118 if (!usb_endpoint_dir_in(&e->desc)) 119 continue; 120 intr = 1; 121 fallthrough; 122 case USB_ENDPOINT_XFER_BULK: 123 break; 124 default: 125 continue; 126 } 127 if (usb_endpoint_dir_in(&e->desc)) { 128 if (!intr && !in) 129 in = e; 130 else if (intr && !status) 131 status = e; 132 } else { 133 if (!out) 134 out = e; 135 } 136 } 137 if (in && out) 138 break; 139 } 140 if (!alt || !in || !out) 141 return -EINVAL; 142 143 if (alt->desc.bAlternateSetting != 0 || 144 !(dev->driver_info->flags & FLAG_NO_SETINT)) { 145 tmp = usb_set_interface (dev->udev, alt->desc.bInterfaceNumber, 146 alt->desc.bAlternateSetting); 147 if (tmp < 0) 148 return tmp; 149 } 150 151 dev->in = usb_rcvbulkpipe (dev->udev, 152 in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 153 dev->out = usb_sndbulkpipe (dev->udev, 154 out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 155 dev->status = status; 156 return 0; 157 } 158 EXPORT_SYMBOL_GPL(usbnet_get_endpoints); 159 160 int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress) 161 { 162 u8 addr[ETH_ALEN]; 163 int tmp = -1, ret; 164 unsigned char buf [13]; 165 166 ret = usb_string(dev->udev, iMACAddress, buf, sizeof buf); 167 if (ret == 12) 168 tmp = hex2bin(addr, buf, 6); 169 if (tmp < 0) { 170 dev_dbg(&dev->udev->dev, 171 "bad MAC string %d fetch, %d\n", iMACAddress, tmp); 172 if (ret >= 0) 173 ret = -EINVAL; 174 return ret; 175 } 176 eth_hw_addr_set(dev->net, addr); 177 return 0; 178 } 179 EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr); 180 181 static void intr_complete (struct urb *urb) 182 { 183 struct usbnet *dev = urb->context; 184 int status = urb->status; 185 186 switch (status) { 187 /* success */ 188 case 0: 189 dev->driver_info->status(dev, urb); 190 break; 191 192 /* software-driven interface shutdown */ 193 case -ENOENT: /* urb killed */ 194 case -ESHUTDOWN: /* hardware gone */ 195 netif_dbg(dev, ifdown, dev->net, 196 "intr shutdown, code %d\n", status); 197 return; 198 199 /* NOTE: not throttling like RX/TX, since this endpoint 200 * already polls infrequently 201 */ 202 default: 203 netdev_dbg(dev->net, "intr status %d\n", status); 204 break; 205 } 206 207 status = usb_submit_urb (urb, GFP_ATOMIC); 208 if (status != 0) 209 netif_err(dev, timer, dev->net, 210 "intr resubmit --> %d\n", status); 211 } 212 213 static int init_status (struct usbnet *dev, struct usb_interface *intf) 214 { 215 char *buf = NULL; 216 unsigned pipe = 0; 217 unsigned maxp; 218 unsigned period; 219 220 if (!dev->driver_info->status) 221 return 0; 222 223 pipe = usb_rcvintpipe (dev->udev, 224 dev->status->desc.bEndpointAddress 225 & USB_ENDPOINT_NUMBER_MASK); 226 maxp = usb_maxpacket(dev->udev, pipe); 227 228 /* avoid 1 msec chatter: min 8 msec poll rate */ 229 period = max ((int) dev->status->desc.bInterval, 230 (dev->udev->speed == USB_SPEED_HIGH) ? 7 : 3); 231 232 buf = kmalloc (maxp, GFP_KERNEL); 233 if (buf) { 234 dev->interrupt = usb_alloc_urb (0, GFP_KERNEL); 235 if (!dev->interrupt) { 236 kfree (buf); 237 return -ENOMEM; 238 } else { 239 usb_fill_int_urb(dev->interrupt, dev->udev, pipe, 240 buf, maxp, intr_complete, dev, period); 241 dev->interrupt->transfer_flags |= URB_FREE_BUFFER; 242 dev_dbg(&intf->dev, 243 "status ep%din, %d bytes period %d\n", 244 usb_pipeendpoint(pipe), maxp, period); 245 } 246 } 247 return 0; 248 } 249 250 /* Submit the interrupt URB if not previously submitted, increasing refcount */ 251 int usbnet_status_start(struct usbnet *dev, gfp_t mem_flags) 252 { 253 int ret = 0; 254 255 WARN_ON_ONCE(dev->interrupt == NULL); 256 if (dev->interrupt) { 257 mutex_lock(&dev->interrupt_mutex); 258 259 if (++dev->interrupt_count == 1) 260 ret = usb_submit_urb(dev->interrupt, mem_flags); 261 262 dev_dbg(&dev->udev->dev, "incremented interrupt URB count to %d\n", 263 dev->interrupt_count); 264 mutex_unlock(&dev->interrupt_mutex); 265 } 266 return ret; 267 } 268 EXPORT_SYMBOL_GPL(usbnet_status_start); 269 270 /* For resume; submit interrupt URB if previously submitted */ 271 static int __usbnet_status_start_force(struct usbnet *dev, gfp_t mem_flags) 272 { 273 int ret = 0; 274 275 mutex_lock(&dev->interrupt_mutex); 276 if (dev->interrupt_count) { 277 ret = usb_submit_urb(dev->interrupt, mem_flags); 278 dev_dbg(&dev->udev->dev, 279 "submitted interrupt URB for resume\n"); 280 } 281 mutex_unlock(&dev->interrupt_mutex); 282 return ret; 283 } 284 285 /* Kill the interrupt URB if all submitters want it killed */ 286 void usbnet_status_stop(struct usbnet *dev) 287 { 288 if (dev->interrupt) { 289 mutex_lock(&dev->interrupt_mutex); 290 WARN_ON(dev->interrupt_count == 0); 291 292 if (dev->interrupt_count && --dev->interrupt_count == 0) 293 usb_kill_urb(dev->interrupt); 294 295 dev_dbg(&dev->udev->dev, 296 "decremented interrupt URB count to %d\n", 297 dev->interrupt_count); 298 mutex_unlock(&dev->interrupt_mutex); 299 } 300 } 301 EXPORT_SYMBOL_GPL(usbnet_status_stop); 302 303 /* For suspend; always kill interrupt URB */ 304 static void __usbnet_status_stop_force(struct usbnet *dev) 305 { 306 if (dev->interrupt) { 307 mutex_lock(&dev->interrupt_mutex); 308 usb_kill_urb(dev->interrupt); 309 dev_dbg(&dev->udev->dev, "killed interrupt URB for suspend\n"); 310 mutex_unlock(&dev->interrupt_mutex); 311 } 312 } 313 314 /* Passes this packet up the stack, updating its accounting. 315 * Some link protocols batch packets, so their rx_fixup paths 316 * can return clones as well as just modify the original skb. 317 */ 318 void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb) 319 { 320 struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->net->tstats); 321 unsigned long flags; 322 int status; 323 324 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) { 325 skb_queue_tail(&dev->rxq_pause, skb); 326 return; 327 } 328 329 /* only update if unset to allow minidriver rx_fixup override */ 330 if (skb->protocol == 0) 331 skb->protocol = eth_type_trans (skb, dev->net); 332 333 flags = u64_stats_update_begin_irqsave(&stats64->syncp); 334 u64_stats_inc(&stats64->rx_packets); 335 u64_stats_add(&stats64->rx_bytes, skb->len); 336 u64_stats_update_end_irqrestore(&stats64->syncp, flags); 337 338 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n", 339 skb->len + sizeof (struct ethhdr), skb->protocol); 340 memset (skb->cb, 0, sizeof (struct skb_data)); 341 342 if (skb_defer_rx_timestamp(skb)) 343 return; 344 345 status = netif_rx (skb); 346 if (status != NET_RX_SUCCESS) 347 netif_dbg(dev, rx_err, dev->net, 348 "netif_rx status %d\n", status); 349 } 350 EXPORT_SYMBOL_GPL(usbnet_skb_return); 351 352 /* must be called if hard_mtu or rx_urb_size changed */ 353 void usbnet_update_max_qlen(struct usbnet *dev) 354 { 355 enum usb_device_speed speed = dev->udev->speed; 356 357 if (!dev->rx_urb_size || !dev->hard_mtu) 358 goto insanity; 359 switch (speed) { 360 case USB_SPEED_HIGH: 361 dev->rx_qlen = MAX_QUEUE_MEMORY / dev->rx_urb_size; 362 dev->tx_qlen = MAX_QUEUE_MEMORY / dev->hard_mtu; 363 break; 364 case USB_SPEED_SUPER: 365 case USB_SPEED_SUPER_PLUS: 366 /* 367 * Not take default 5ms qlen for super speed HC to 368 * save memory, and iperf tests show 2.5ms qlen can 369 * work well 370 */ 371 dev->rx_qlen = 5 * MAX_QUEUE_MEMORY / dev->rx_urb_size; 372 dev->tx_qlen = 5 * MAX_QUEUE_MEMORY / dev->hard_mtu; 373 break; 374 default: 375 insanity: 376 dev->rx_qlen = dev->tx_qlen = 4; 377 } 378 } 379 EXPORT_SYMBOL_GPL(usbnet_update_max_qlen); 380 381 382 /*------------------------------------------------------------------------- 383 * 384 * Network Device Driver (peer link to "Host Device", from USB host) 385 * 386 *-------------------------------------------------------------------------*/ 387 388 int usbnet_change_mtu (struct net_device *net, int new_mtu) 389 { 390 struct usbnet *dev = netdev_priv(net); 391 int ll_mtu = new_mtu + net->hard_header_len; 392 int old_hard_mtu = dev->hard_mtu; 393 int old_rx_urb_size = dev->rx_urb_size; 394 395 // no second zero-length packet read wanted after mtu-sized packets 396 if ((ll_mtu % dev->maxpacket) == 0) 397 return -EDOM; 398 net->mtu = new_mtu; 399 400 dev->hard_mtu = net->mtu + net->hard_header_len; 401 if (dev->rx_urb_size == old_hard_mtu) { 402 dev->rx_urb_size = dev->hard_mtu; 403 if (dev->rx_urb_size > old_rx_urb_size) { 404 usbnet_pause_rx(dev); 405 usbnet_unlink_rx_urbs(dev); 406 usbnet_resume_rx(dev); 407 } 408 } 409 410 /* max qlen depend on hard_mtu and rx_urb_size */ 411 usbnet_update_max_qlen(dev); 412 413 return 0; 414 } 415 EXPORT_SYMBOL_GPL(usbnet_change_mtu); 416 417 /* The caller must hold list->lock */ 418 static void __usbnet_queue_skb(struct sk_buff_head *list, 419 struct sk_buff *newsk, enum skb_state state) 420 { 421 struct skb_data *entry = (struct skb_data *) newsk->cb; 422 423 __skb_queue_tail(list, newsk); 424 entry->state = state; 425 } 426 427 /*-------------------------------------------------------------------------*/ 428 429 /* some LK 2.4 HCDs oopsed if we freed or resubmitted urbs from 430 * completion callbacks. 2.5 should have fixed those bugs... 431 */ 432 433 static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb, 434 struct sk_buff_head *list, enum skb_state state) 435 { 436 unsigned long flags; 437 enum skb_state old_state; 438 struct skb_data *entry = (struct skb_data *) skb->cb; 439 440 spin_lock_irqsave(&list->lock, flags); 441 old_state = entry->state; 442 entry->state = state; 443 __skb_unlink(skb, list); 444 445 /* defer_bh() is never called with list == &dev->done. 446 * spin_lock_nested() tells lockdep that it is OK to take 447 * dev->done.lock here with list->lock held. 448 */ 449 spin_lock_nested(&dev->done.lock, SINGLE_DEPTH_NESTING); 450 451 __skb_queue_tail(&dev->done, skb); 452 if (dev->done.qlen == 1) 453 tasklet_schedule(&dev->bh); 454 spin_unlock(&dev->done.lock); 455 spin_unlock_irqrestore(&list->lock, flags); 456 return old_state; 457 } 458 459 /* some work can't be done in tasklets, so we use keventd 460 * 461 * NOTE: annoying asymmetry: if it's active, schedule_work() fails, 462 * but tasklet_schedule() doesn't. hope the failure is rare. 463 */ 464 void usbnet_defer_kevent (struct usbnet *dev, int work) 465 { 466 set_bit (work, &dev->flags); 467 if (!usbnet_going_away(dev)) { 468 if (!schedule_work(&dev->kevent)) 469 netdev_dbg(dev->net, 470 "kevent %s may have been dropped\n", 471 usbnet_event_names[work]); 472 else 473 netdev_dbg(dev->net, 474 "kevent %s scheduled\n", usbnet_event_names[work]); 475 } 476 } 477 EXPORT_SYMBOL_GPL(usbnet_defer_kevent); 478 479 /*-------------------------------------------------------------------------*/ 480 481 static void rx_complete (struct urb *urb); 482 483 static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) 484 { 485 struct sk_buff *skb; 486 struct skb_data *entry; 487 int retval = 0; 488 unsigned long lockflags; 489 size_t size = dev->rx_urb_size; 490 491 /* prevent rx skb allocation when error ratio is high */ 492 if (test_bit(EVENT_RX_KILL, &dev->flags)) { 493 usb_free_urb(urb); 494 return -ENOLINK; 495 } 496 497 if (test_bit(EVENT_NO_IP_ALIGN, &dev->flags)) 498 skb = __netdev_alloc_skb(dev->net, size, flags); 499 else 500 skb = __netdev_alloc_skb_ip_align(dev->net, size, flags); 501 if (!skb) { 502 netif_dbg(dev, rx_err, dev->net, "no rx skb\n"); 503 usbnet_defer_kevent (dev, EVENT_RX_MEMORY); 504 usb_free_urb (urb); 505 return -ENOMEM; 506 } 507 508 entry = (struct skb_data *) skb->cb; 509 entry->urb = urb; 510 entry->dev = dev; 511 entry->length = 0; 512 513 usb_fill_bulk_urb (urb, dev->udev, dev->in, 514 skb->data, size, rx_complete, skb); 515 516 spin_lock_irqsave (&dev->rxq.lock, lockflags); 517 518 if (netif_running (dev->net) && 519 netif_device_present (dev->net) && 520 test_bit(EVENT_DEV_OPEN, &dev->flags) && 521 !test_bit (EVENT_RX_HALT, &dev->flags) && 522 !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) { 523 switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) { 524 case -EPIPE: 525 usbnet_defer_kevent (dev, EVENT_RX_HALT); 526 break; 527 case -ENOMEM: 528 usbnet_defer_kevent (dev, EVENT_RX_MEMORY); 529 break; 530 case -ENODEV: 531 netif_dbg(dev, ifdown, dev->net, "device gone\n"); 532 netif_device_detach (dev->net); 533 break; 534 case -EHOSTUNREACH: 535 retval = -ENOLINK; 536 break; 537 default: 538 netif_dbg(dev, rx_err, dev->net, 539 "rx submit, %d\n", retval); 540 tasklet_schedule (&dev->bh); 541 break; 542 case 0: 543 if (!usbnet_going_away(dev)) 544 __usbnet_queue_skb(&dev->rxq, skb, rx_start); 545 } 546 } else { 547 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n"); 548 retval = -ENOLINK; 549 } 550 spin_unlock_irqrestore (&dev->rxq.lock, lockflags); 551 if (retval) { 552 dev_kfree_skb_any (skb); 553 usb_free_urb (urb); 554 } 555 return retval; 556 } 557 558 559 /*-------------------------------------------------------------------------*/ 560 561 static inline int rx_process(struct usbnet *dev, struct sk_buff *skb) 562 { 563 if (dev->driver_info->rx_fixup && 564 !dev->driver_info->rx_fixup (dev, skb)) { 565 /* With RX_ASSEMBLE, rx_fixup() must update counters */ 566 if (!(dev->driver_info->flags & FLAG_RX_ASSEMBLE)) 567 dev->net->stats.rx_errors++; 568 return -EPROTO; 569 } 570 // else network stack removes extra byte if we forced a short packet 571 572 /* all data was already cloned from skb inside the driver */ 573 if (dev->driver_info->flags & FLAG_MULTI_PACKET) 574 return -EALREADY; 575 576 if (skb->len < ETH_HLEN) { 577 dev->net->stats.rx_errors++; 578 dev->net->stats.rx_length_errors++; 579 netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len); 580 return -EPROTO; 581 } 582 583 usbnet_skb_return(dev, skb); 584 return 0; 585 } 586 587 /*-------------------------------------------------------------------------*/ 588 589 static void rx_complete (struct urb *urb) 590 { 591 struct sk_buff *skb = (struct sk_buff *) urb->context; 592 struct skb_data *entry = (struct skb_data *) skb->cb; 593 struct usbnet *dev = entry->dev; 594 int urb_status = urb->status; 595 enum skb_state state; 596 597 skb_put (skb, urb->actual_length); 598 state = rx_done; 599 entry->urb = NULL; 600 601 switch (urb_status) { 602 /* success */ 603 case 0: 604 break; 605 606 /* stalls need manual reset. this is rare ... except that 607 * when going through USB 2.0 TTs, unplug appears this way. 608 * we avoid the highspeed version of the ETIMEDOUT/EILSEQ 609 * storm, recovering as needed. 610 */ 611 case -EPIPE: 612 dev->net->stats.rx_errors++; 613 usbnet_defer_kevent (dev, EVENT_RX_HALT); 614 fallthrough; 615 616 /* software-driven interface shutdown */ 617 case -ECONNRESET: /* async unlink */ 618 case -ESHUTDOWN: /* hardware gone */ 619 netif_dbg(dev, ifdown, dev->net, 620 "rx shutdown, code %d\n", urb_status); 621 goto block; 622 623 /* we get controller i/o faults during hub_wq disconnect() delays. 624 * throttle down resubmits, to avoid log floods; just temporarily, 625 * so we still recover when the fault isn't a hub_wq delay. 626 */ 627 case -EPROTO: 628 case -ETIME: 629 case -EILSEQ: 630 dev->net->stats.rx_errors++; 631 if (!timer_pending (&dev->delay)) { 632 mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES); 633 netif_dbg(dev, link, dev->net, 634 "rx throttle %d\n", urb_status); 635 } 636 block: 637 state = rx_cleanup; 638 entry->urb = urb; 639 urb = NULL; 640 break; 641 642 /* data overrun ... flush fifo? */ 643 case -EOVERFLOW: 644 dev->net->stats.rx_over_errors++; 645 fallthrough; 646 647 default: 648 state = rx_cleanup; 649 dev->net->stats.rx_errors++; 650 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status); 651 break; 652 } 653 654 /* stop rx if packet error rate is high */ 655 if (++dev->pkt_cnt > 30) { 656 dev->pkt_cnt = 0; 657 dev->pkt_err = 0; 658 } else { 659 if (state == rx_cleanup) 660 dev->pkt_err++; 661 if (dev->pkt_err > 20) 662 set_bit(EVENT_RX_KILL, &dev->flags); 663 } 664 665 state = defer_bh(dev, skb, &dev->rxq, state); 666 667 if (urb) { 668 if (netif_running (dev->net) && 669 !test_bit (EVENT_RX_HALT, &dev->flags) && 670 state != unlink_start) { 671 rx_submit (dev, urb, GFP_ATOMIC); 672 usb_mark_last_busy(dev->udev); 673 return; 674 } 675 usb_free_urb (urb); 676 } 677 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n"); 678 } 679 680 /*-------------------------------------------------------------------------*/ 681 void usbnet_pause_rx(struct usbnet *dev) 682 { 683 set_bit(EVENT_RX_PAUSED, &dev->flags); 684 685 netif_dbg(dev, rx_status, dev->net, "paused rx queue enabled\n"); 686 } 687 EXPORT_SYMBOL_GPL(usbnet_pause_rx); 688 689 void usbnet_resume_rx(struct usbnet *dev) 690 { 691 struct sk_buff *skb; 692 int num = 0; 693 694 clear_bit(EVENT_RX_PAUSED, &dev->flags); 695 696 while ((skb = skb_dequeue(&dev->rxq_pause)) != NULL) { 697 usbnet_skb_return(dev, skb); 698 num++; 699 } 700 701 tasklet_schedule(&dev->bh); 702 703 netif_dbg(dev, rx_status, dev->net, 704 "paused rx queue disabled, %d skbs requeued\n", num); 705 } 706 EXPORT_SYMBOL_GPL(usbnet_resume_rx); 707 708 void usbnet_purge_paused_rxq(struct usbnet *dev) 709 { 710 skb_queue_purge(&dev->rxq_pause); 711 } 712 EXPORT_SYMBOL_GPL(usbnet_purge_paused_rxq); 713 714 /*-------------------------------------------------------------------------*/ 715 716 // unlink pending rx/tx; completion handlers do all other cleanup 717 718 static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q) 719 { 720 unsigned long flags; 721 struct sk_buff *skb; 722 int count = 0; 723 724 spin_lock_irqsave (&q->lock, flags); 725 while (!skb_queue_empty(q)) { 726 struct skb_data *entry; 727 struct urb *urb; 728 int retval; 729 730 skb_queue_walk(q, skb) { 731 entry = (struct skb_data *) skb->cb; 732 if (entry->state != unlink_start) 733 goto found; 734 } 735 break; 736 found: 737 entry->state = unlink_start; 738 urb = entry->urb; 739 740 /* 741 * Get reference count of the URB to avoid it to be 742 * freed during usb_unlink_urb, which may trigger 743 * use-after-free problem inside usb_unlink_urb since 744 * usb_unlink_urb is always racing with .complete 745 * handler(include defer_bh). 746 */ 747 usb_get_urb(urb); 748 spin_unlock_irqrestore(&q->lock, flags); 749 // during some PM-driven resume scenarios, 750 // these (async) unlinks complete immediately 751 retval = usb_unlink_urb (urb); 752 if (retval != -EINPROGRESS && retval != 0) 753 netdev_dbg(dev->net, "unlink urb err, %d\n", retval); 754 else 755 count++; 756 usb_put_urb(urb); 757 spin_lock_irqsave(&q->lock, flags); 758 } 759 spin_unlock_irqrestore (&q->lock, flags); 760 return count; 761 } 762 763 // Flush all pending rx urbs 764 // minidrivers may need to do this when the MTU changes 765 766 void usbnet_unlink_rx_urbs(struct usbnet *dev) 767 { 768 if (netif_running(dev->net)) { 769 (void) unlink_urbs (dev, &dev->rxq); 770 tasklet_schedule(&dev->bh); 771 } 772 } 773 EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs); 774 775 /*-------------------------------------------------------------------------*/ 776 777 static void wait_skb_queue_empty(struct sk_buff_head *q) 778 { 779 unsigned long flags; 780 781 spin_lock_irqsave(&q->lock, flags); 782 while (!skb_queue_empty(q)) { 783 spin_unlock_irqrestore(&q->lock, flags); 784 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS)); 785 set_current_state(TASK_UNINTERRUPTIBLE); 786 spin_lock_irqsave(&q->lock, flags); 787 } 788 spin_unlock_irqrestore(&q->lock, flags); 789 } 790 791 // precondition: never called in_interrupt 792 static void usbnet_terminate_urbs(struct usbnet *dev) 793 { 794 DECLARE_WAITQUEUE(wait, current); 795 int temp; 796 797 /* ensure there are no more active urbs */ 798 add_wait_queue(&dev->wait, &wait); 799 set_current_state(TASK_UNINTERRUPTIBLE); 800 temp = unlink_urbs(dev, &dev->txq) + 801 unlink_urbs(dev, &dev->rxq); 802 803 /* maybe wait for deletions to finish. */ 804 wait_skb_queue_empty(&dev->rxq); 805 wait_skb_queue_empty(&dev->txq); 806 wait_skb_queue_empty(&dev->done); 807 netif_dbg(dev, ifdown, dev->net, 808 "waited for %d urb completions\n", temp); 809 set_current_state(TASK_RUNNING); 810 remove_wait_queue(&dev->wait, &wait); 811 } 812 813 int usbnet_stop (struct net_device *net) 814 { 815 struct usbnet *dev = netdev_priv(net); 816 const struct driver_info *info = dev->driver_info; 817 int retval, pm, mpn; 818 819 clear_bit(EVENT_DEV_OPEN, &dev->flags); 820 netif_stop_queue (net); 821 822 netif_info(dev, ifdown, dev->net, 823 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n", 824 net->stats.rx_packets, net->stats.tx_packets, 825 net->stats.rx_errors, net->stats.tx_errors); 826 827 /* to not race resume */ 828 pm = usb_autopm_get_interface(dev->intf); 829 /* allow minidriver to stop correctly (wireless devices to turn off 830 * radio etc) */ 831 if (info->stop) { 832 retval = info->stop(dev); 833 if (retval < 0) 834 netif_info(dev, ifdown, dev->net, 835 "stop fail (%d) usbnet usb-%s-%s, %s\n", 836 retval, 837 dev->udev->bus->bus_name, dev->udev->devpath, 838 info->description); 839 } 840 841 if (!(info->flags & FLAG_AVOID_UNLINK_URBS)) 842 usbnet_terminate_urbs(dev); 843 844 usbnet_status_stop(dev); 845 846 usbnet_purge_paused_rxq(dev); 847 848 mpn = !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags); 849 850 /* deferred work (timer, softirq, task) must also stop */ 851 dev->flags = 0; 852 del_timer_sync(&dev->delay); 853 tasklet_kill(&dev->bh); 854 cancel_work_sync(&dev->kevent); 855 856 /* We have cyclic dependencies. Those calls are needed 857 * to break a cycle. We cannot fall into the gaps because 858 * we have a flag 859 */ 860 tasklet_kill(&dev->bh); 861 del_timer_sync(&dev->delay); 862 cancel_work_sync(&dev->kevent); 863 864 if (!pm) 865 usb_autopm_put_interface(dev->intf); 866 867 if (info->manage_power && mpn) 868 info->manage_power(dev, 0); 869 else 870 usb_autopm_put_interface(dev->intf); 871 872 return 0; 873 } 874 EXPORT_SYMBOL_GPL(usbnet_stop); 875 876 /*-------------------------------------------------------------------------*/ 877 878 // posts reads, and enables write queuing 879 880 // precondition: never called in_interrupt 881 882 int usbnet_open (struct net_device *net) 883 { 884 struct usbnet *dev = netdev_priv(net); 885 int retval; 886 const struct driver_info *info = dev->driver_info; 887 888 if ((retval = usb_autopm_get_interface(dev->intf)) < 0) { 889 netif_info(dev, ifup, dev->net, 890 "resumption fail (%d) usbnet usb-%s-%s, %s\n", 891 retval, 892 dev->udev->bus->bus_name, 893 dev->udev->devpath, 894 info->description); 895 goto done_nopm; 896 } 897 898 // put into "known safe" state 899 if (info->reset && (retval = info->reset (dev)) < 0) { 900 netif_info(dev, ifup, dev->net, 901 "open reset fail (%d) usbnet usb-%s-%s, %s\n", 902 retval, 903 dev->udev->bus->bus_name, 904 dev->udev->devpath, 905 info->description); 906 goto done; 907 } 908 909 /* hard_mtu or rx_urb_size may change in reset() */ 910 usbnet_update_max_qlen(dev); 911 912 // insist peer be connected 913 if (info->check_connect && (retval = info->check_connect (dev)) < 0) { 914 netif_err(dev, ifup, dev->net, "can't open; %d\n", retval); 915 goto done; 916 } 917 918 /* start any status interrupt transfer */ 919 if (dev->interrupt) { 920 retval = usbnet_status_start(dev, GFP_KERNEL); 921 if (retval < 0) { 922 netif_err(dev, ifup, dev->net, 923 "intr submit %d\n", retval); 924 goto done; 925 } 926 } 927 928 set_bit(EVENT_DEV_OPEN, &dev->flags); 929 netif_start_queue (net); 930 netif_info(dev, ifup, dev->net, 931 "open: enable queueing (rx %d, tx %d) mtu %d %s framing\n", 932 (int)RX_QLEN(dev), (int)TX_QLEN(dev), 933 dev->net->mtu, 934 (dev->driver_info->flags & FLAG_FRAMING_NC) ? "NetChip" : 935 (dev->driver_info->flags & FLAG_FRAMING_GL) ? "GeneSys" : 936 (dev->driver_info->flags & FLAG_FRAMING_Z) ? "Zaurus" : 937 (dev->driver_info->flags & FLAG_FRAMING_RN) ? "RNDIS" : 938 (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" : 939 "simple"); 940 941 /* reset rx error state */ 942 dev->pkt_cnt = 0; 943 dev->pkt_err = 0; 944 clear_bit(EVENT_RX_KILL, &dev->flags); 945 946 // delay posting reads until we're fully open 947 tasklet_schedule (&dev->bh); 948 if (info->manage_power) { 949 retval = info->manage_power(dev, 1); 950 if (retval < 0) { 951 retval = 0; 952 set_bit(EVENT_NO_RUNTIME_PM, &dev->flags); 953 } else { 954 usb_autopm_put_interface(dev->intf); 955 } 956 } 957 return retval; 958 done: 959 usb_autopm_put_interface(dev->intf); 960 done_nopm: 961 return retval; 962 } 963 EXPORT_SYMBOL_GPL(usbnet_open); 964 965 /*-------------------------------------------------------------------------*/ 966 967 /* ethtool methods; minidrivers may need to add some more, but 968 * they'll probably want to use this base set. 969 */ 970 971 /* These methods are written on the assumption that the device 972 * uses MII 973 */ 974 int usbnet_get_link_ksettings_mii(struct net_device *net, 975 struct ethtool_link_ksettings *cmd) 976 { 977 struct usbnet *dev = netdev_priv(net); 978 979 if (!dev->mii.mdio_read) 980 return -EOPNOTSUPP; 981 982 mii_ethtool_get_link_ksettings(&dev->mii, cmd); 983 984 return 0; 985 } 986 EXPORT_SYMBOL_GPL(usbnet_get_link_ksettings_mii); 987 988 int usbnet_get_link_ksettings_internal(struct net_device *net, 989 struct ethtool_link_ksettings *cmd) 990 { 991 struct usbnet *dev = netdev_priv(net); 992 993 /* the assumption that speed is equal on tx and rx 994 * is deeply engrained into the networking layer. 995 * For wireless stuff it is not true. 996 * We assume that rx_speed matters more. 997 */ 998 if (dev->rx_speed != SPEED_UNSET) 999 cmd->base.speed = dev->rx_speed / 1000000; 1000 else if (dev->tx_speed != SPEED_UNSET) 1001 cmd->base.speed = dev->tx_speed / 1000000; 1002 else 1003 cmd->base.speed = SPEED_UNKNOWN; 1004 1005 return 0; 1006 } 1007 EXPORT_SYMBOL_GPL(usbnet_get_link_ksettings_internal); 1008 1009 int usbnet_set_link_ksettings_mii(struct net_device *net, 1010 const struct ethtool_link_ksettings *cmd) 1011 { 1012 struct usbnet *dev = netdev_priv(net); 1013 int retval; 1014 1015 if (!dev->mii.mdio_write) 1016 return -EOPNOTSUPP; 1017 1018 retval = mii_ethtool_set_link_ksettings(&dev->mii, cmd); 1019 1020 /* link speed/duplex might have changed */ 1021 if (dev->driver_info->link_reset) 1022 dev->driver_info->link_reset(dev); 1023 1024 /* hard_mtu or rx_urb_size may change in link_reset() */ 1025 usbnet_update_max_qlen(dev); 1026 1027 return retval; 1028 } 1029 EXPORT_SYMBOL_GPL(usbnet_set_link_ksettings_mii); 1030 1031 u32 usbnet_get_link (struct net_device *net) 1032 { 1033 struct usbnet *dev = netdev_priv(net); 1034 1035 /* If a check_connect is defined, return its result */ 1036 if (dev->driver_info->check_connect) 1037 return dev->driver_info->check_connect (dev) == 0; 1038 1039 /* if the device has mii operations, use those */ 1040 if (dev->mii.mdio_read) 1041 return mii_link_ok(&dev->mii); 1042 1043 /* Otherwise, dtrt for drivers calling netif_carrier_{on,off} */ 1044 return ethtool_op_get_link(net); 1045 } 1046 EXPORT_SYMBOL_GPL(usbnet_get_link); 1047 1048 int usbnet_nway_reset(struct net_device *net) 1049 { 1050 struct usbnet *dev = netdev_priv(net); 1051 1052 if (!dev->mii.mdio_write) 1053 return -EOPNOTSUPP; 1054 1055 return mii_nway_restart(&dev->mii); 1056 } 1057 EXPORT_SYMBOL_GPL(usbnet_nway_reset); 1058 1059 void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info) 1060 { 1061 struct usbnet *dev = netdev_priv(net); 1062 1063 strscpy(info->driver, dev->driver_name, sizeof(info->driver)); 1064 strscpy(info->fw_version, dev->driver_info->description, 1065 sizeof(info->fw_version)); 1066 usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info); 1067 } 1068 EXPORT_SYMBOL_GPL(usbnet_get_drvinfo); 1069 1070 u32 usbnet_get_msglevel (struct net_device *net) 1071 { 1072 struct usbnet *dev = netdev_priv(net); 1073 1074 return dev->msg_enable; 1075 } 1076 EXPORT_SYMBOL_GPL(usbnet_get_msglevel); 1077 1078 void usbnet_set_msglevel (struct net_device *net, u32 level) 1079 { 1080 struct usbnet *dev = netdev_priv(net); 1081 1082 dev->msg_enable = level; 1083 } 1084 EXPORT_SYMBOL_GPL(usbnet_set_msglevel); 1085 1086 /* drivers may override default ethtool_ops in their bind() routine */ 1087 static const struct ethtool_ops usbnet_ethtool_ops = { 1088 .get_link = usbnet_get_link, 1089 .nway_reset = usbnet_nway_reset, 1090 .get_drvinfo = usbnet_get_drvinfo, 1091 .get_msglevel = usbnet_get_msglevel, 1092 .set_msglevel = usbnet_set_msglevel, 1093 .get_ts_info = ethtool_op_get_ts_info, 1094 .get_link_ksettings = usbnet_get_link_ksettings_mii, 1095 .set_link_ksettings = usbnet_set_link_ksettings_mii, 1096 }; 1097 1098 /*-------------------------------------------------------------------------*/ 1099 1100 static void __handle_link_change(struct usbnet *dev) 1101 { 1102 if (!test_bit(EVENT_DEV_OPEN, &dev->flags)) 1103 return; 1104 1105 if (!netif_carrier_ok(dev->net)) { 1106 /* kill URBs for reading packets to save bus bandwidth */ 1107 unlink_urbs(dev, &dev->rxq); 1108 1109 /* 1110 * tx_timeout will unlink URBs for sending packets and 1111 * tx queue is stopped by netcore after link becomes off 1112 */ 1113 } else { 1114 /* submitting URBs for reading packets */ 1115 tasklet_schedule(&dev->bh); 1116 } 1117 1118 /* hard_mtu or rx_urb_size may change during link change */ 1119 usbnet_update_max_qlen(dev); 1120 1121 clear_bit(EVENT_LINK_CHANGE, &dev->flags); 1122 } 1123 1124 void usbnet_set_rx_mode(struct net_device *net) 1125 { 1126 struct usbnet *dev = netdev_priv(net); 1127 1128 usbnet_defer_kevent(dev, EVENT_SET_RX_MODE); 1129 } 1130 EXPORT_SYMBOL_GPL(usbnet_set_rx_mode); 1131 1132 static void __handle_set_rx_mode(struct usbnet *dev) 1133 { 1134 if (dev->driver_info->set_rx_mode) 1135 (dev->driver_info->set_rx_mode)(dev); 1136 1137 clear_bit(EVENT_SET_RX_MODE, &dev->flags); 1138 } 1139 1140 /* work that cannot be done in interrupt context uses keventd. 1141 * 1142 * NOTE: with 2.5 we could do more of this using completion callbacks, 1143 * especially now that control transfers can be queued. 1144 */ 1145 static void 1146 usbnet_deferred_kevent (struct work_struct *work) 1147 { 1148 struct usbnet *dev = 1149 container_of(work, struct usbnet, kevent); 1150 int status; 1151 1152 /* usb_clear_halt() needs a thread context */ 1153 if (test_bit (EVENT_TX_HALT, &dev->flags)) { 1154 unlink_urbs (dev, &dev->txq); 1155 status = usb_autopm_get_interface(dev->intf); 1156 if (status < 0) 1157 goto fail_pipe; 1158 status = usb_clear_halt (dev->udev, dev->out); 1159 usb_autopm_put_interface(dev->intf); 1160 if (status < 0 && 1161 status != -EPIPE && 1162 status != -ESHUTDOWN) { 1163 if (netif_msg_tx_err (dev)) 1164 fail_pipe: 1165 netdev_err(dev->net, "can't clear tx halt, status %d\n", 1166 status); 1167 } else { 1168 clear_bit (EVENT_TX_HALT, &dev->flags); 1169 if (status != -ESHUTDOWN) 1170 netif_wake_queue (dev->net); 1171 } 1172 } 1173 if (test_bit (EVENT_RX_HALT, &dev->flags)) { 1174 unlink_urbs (dev, &dev->rxq); 1175 status = usb_autopm_get_interface(dev->intf); 1176 if (status < 0) 1177 goto fail_halt; 1178 status = usb_clear_halt (dev->udev, dev->in); 1179 usb_autopm_put_interface(dev->intf); 1180 if (status < 0 && 1181 status != -EPIPE && 1182 status != -ESHUTDOWN) { 1183 if (netif_msg_rx_err (dev)) 1184 fail_halt: 1185 netdev_err(dev->net, "can't clear rx halt, status %d\n", 1186 status); 1187 } else { 1188 clear_bit (EVENT_RX_HALT, &dev->flags); 1189 if (!usbnet_going_away(dev)) 1190 tasklet_schedule(&dev->bh); 1191 } 1192 } 1193 1194 /* tasklet could resubmit itself forever if memory is tight */ 1195 if (test_bit (EVENT_RX_MEMORY, &dev->flags)) { 1196 struct urb *urb = NULL; 1197 int resched = 1; 1198 1199 if (netif_running (dev->net)) 1200 urb = usb_alloc_urb (0, GFP_KERNEL); 1201 else 1202 clear_bit (EVENT_RX_MEMORY, &dev->flags); 1203 if (urb != NULL) { 1204 clear_bit (EVENT_RX_MEMORY, &dev->flags); 1205 status = usb_autopm_get_interface(dev->intf); 1206 if (status < 0) { 1207 usb_free_urb(urb); 1208 goto fail_lowmem; 1209 } 1210 if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK) 1211 resched = 0; 1212 usb_autopm_put_interface(dev->intf); 1213 fail_lowmem: 1214 if (resched) 1215 if (!usbnet_going_away(dev)) 1216 tasklet_schedule(&dev->bh); 1217 } 1218 } 1219 1220 if (test_bit (EVENT_LINK_RESET, &dev->flags)) { 1221 const struct driver_info *info = dev->driver_info; 1222 int retval = 0; 1223 1224 clear_bit (EVENT_LINK_RESET, &dev->flags); 1225 status = usb_autopm_get_interface(dev->intf); 1226 if (status < 0) 1227 goto skip_reset; 1228 if(info->link_reset && (retval = info->link_reset(dev)) < 0) { 1229 usb_autopm_put_interface(dev->intf); 1230 skip_reset: 1231 netdev_info(dev->net, "link reset failed (%d) usbnet usb-%s-%s, %s\n", 1232 retval, 1233 dev->udev->bus->bus_name, 1234 dev->udev->devpath, 1235 info->description); 1236 } else { 1237 usb_autopm_put_interface(dev->intf); 1238 } 1239 1240 /* handle link change from link resetting */ 1241 __handle_link_change(dev); 1242 } 1243 1244 if (test_bit (EVENT_LINK_CHANGE, &dev->flags)) 1245 __handle_link_change(dev); 1246 1247 if (test_bit (EVENT_SET_RX_MODE, &dev->flags)) 1248 __handle_set_rx_mode(dev); 1249 1250 1251 if (dev->flags) 1252 netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags); 1253 } 1254 1255 /*-------------------------------------------------------------------------*/ 1256 1257 static void tx_complete (struct urb *urb) 1258 { 1259 struct sk_buff *skb = (struct sk_buff *) urb->context; 1260 struct skb_data *entry = (struct skb_data *) skb->cb; 1261 struct usbnet *dev = entry->dev; 1262 1263 if (urb->status == 0) { 1264 struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->net->tstats); 1265 unsigned long flags; 1266 1267 flags = u64_stats_update_begin_irqsave(&stats64->syncp); 1268 u64_stats_add(&stats64->tx_packets, entry->packets); 1269 u64_stats_add(&stats64->tx_bytes, entry->length); 1270 u64_stats_update_end_irqrestore(&stats64->syncp, flags); 1271 } else { 1272 dev->net->stats.tx_errors++; 1273 1274 switch (urb->status) { 1275 case -EPIPE: 1276 usbnet_defer_kevent (dev, EVENT_TX_HALT); 1277 break; 1278 1279 /* software-driven interface shutdown */ 1280 case -ECONNRESET: // async unlink 1281 case -ESHUTDOWN: // hardware gone 1282 break; 1283 1284 /* like rx, tx gets controller i/o faults during hub_wq 1285 * delays and so it uses the same throttling mechanism. 1286 */ 1287 case -EPROTO: 1288 case -ETIME: 1289 case -EILSEQ: 1290 usb_mark_last_busy(dev->udev); 1291 if (!timer_pending (&dev->delay)) { 1292 mod_timer (&dev->delay, 1293 jiffies + THROTTLE_JIFFIES); 1294 netif_dbg(dev, link, dev->net, 1295 "tx throttle %d\n", urb->status); 1296 } 1297 netif_stop_queue (dev->net); 1298 break; 1299 default: 1300 netif_dbg(dev, tx_err, dev->net, 1301 "tx err %d\n", entry->urb->status); 1302 break; 1303 } 1304 } 1305 1306 usb_autopm_put_interface_async(dev->intf); 1307 (void) defer_bh(dev, skb, &dev->txq, tx_done); 1308 } 1309 1310 /*-------------------------------------------------------------------------*/ 1311 1312 void usbnet_tx_timeout (struct net_device *net, unsigned int txqueue) 1313 { 1314 struct usbnet *dev = netdev_priv(net); 1315 1316 unlink_urbs (dev, &dev->txq); 1317 tasklet_schedule (&dev->bh); 1318 /* this needs to be handled individually because the generic layer 1319 * doesn't know what is sufficient and could not restore private 1320 * information if a remedy of an unconditional reset were used. 1321 */ 1322 if (dev->driver_info->recover) 1323 (dev->driver_info->recover)(dev); 1324 } 1325 EXPORT_SYMBOL_GPL(usbnet_tx_timeout); 1326 1327 /*-------------------------------------------------------------------------*/ 1328 1329 static int build_dma_sg(const struct sk_buff *skb, struct urb *urb) 1330 { 1331 unsigned num_sgs, total_len = 0; 1332 int i, s = 0; 1333 1334 num_sgs = skb_shinfo(skb)->nr_frags + 1; 1335 if (num_sgs == 1) 1336 return 0; 1337 1338 /* reserve one for zero packet */ 1339 urb->sg = kmalloc_array(num_sgs + 1, sizeof(struct scatterlist), 1340 GFP_ATOMIC); 1341 if (!urb->sg) 1342 return -ENOMEM; 1343 1344 urb->num_sgs = num_sgs; 1345 sg_init_table(urb->sg, urb->num_sgs + 1); 1346 1347 sg_set_buf(&urb->sg[s++], skb->data, skb_headlen(skb)); 1348 total_len += skb_headlen(skb); 1349 1350 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1351 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 1352 1353 total_len += skb_frag_size(f); 1354 sg_set_page(&urb->sg[i + s], skb_frag_page(f), skb_frag_size(f), 1355 skb_frag_off(f)); 1356 } 1357 urb->transfer_buffer_length = total_len; 1358 1359 return 1; 1360 } 1361 1362 netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, 1363 struct net_device *net) 1364 { 1365 struct usbnet *dev = netdev_priv(net); 1366 unsigned int length; 1367 struct urb *urb = NULL; 1368 struct skb_data *entry; 1369 const struct driver_info *info = dev->driver_info; 1370 unsigned long flags; 1371 int retval; 1372 1373 if (skb) 1374 skb_tx_timestamp(skb); 1375 1376 // some devices want funky USB-level framing, for 1377 // win32 driver (usually) and/or hardware quirks 1378 if (info->tx_fixup) { 1379 skb = info->tx_fixup (dev, skb, GFP_ATOMIC); 1380 if (!skb) { 1381 /* packet collected; minidriver waiting for more */ 1382 if (info->flags & FLAG_MULTI_PACKET) 1383 goto not_drop; 1384 netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n"); 1385 goto drop; 1386 } 1387 } 1388 1389 if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) { 1390 netif_dbg(dev, tx_err, dev->net, "no urb\n"); 1391 goto drop; 1392 } 1393 1394 entry = (struct skb_data *) skb->cb; 1395 entry->urb = urb; 1396 entry->dev = dev; 1397 1398 usb_fill_bulk_urb (urb, dev->udev, dev->out, 1399 skb->data, skb->len, tx_complete, skb); 1400 if (dev->can_dma_sg) { 1401 if (build_dma_sg(skb, urb) < 0) 1402 goto drop; 1403 } 1404 length = urb->transfer_buffer_length; 1405 1406 /* don't assume the hardware handles USB_ZERO_PACKET 1407 * NOTE: strictly conforming cdc-ether devices should expect 1408 * the ZLP here, but ignore the one-byte packet. 1409 * NOTE2: CDC NCM specification is different from CDC ECM when 1410 * handling ZLP/short packets, so cdc_ncm driver will make short 1411 * packet itself if needed. 1412 */ 1413 if (length % dev->maxpacket == 0) { 1414 if (!(info->flags & FLAG_SEND_ZLP)) { 1415 if (!(info->flags & FLAG_MULTI_PACKET)) { 1416 length++; 1417 if (skb_tailroom(skb) && !urb->num_sgs) { 1418 skb->data[skb->len] = 0; 1419 __skb_put(skb, 1); 1420 } else if (urb->num_sgs) 1421 sg_set_buf(&urb->sg[urb->num_sgs++], 1422 dev->padding_pkt, 1); 1423 } 1424 } else 1425 urb->transfer_flags |= URB_ZERO_PACKET; 1426 } 1427 urb->transfer_buffer_length = length; 1428 1429 if (info->flags & FLAG_MULTI_PACKET) { 1430 /* Driver has set number of packets and a length delta. 1431 * Calculate the complete length and ensure that it's 1432 * positive. 1433 */ 1434 entry->length += length; 1435 if (WARN_ON_ONCE(entry->length <= 0)) 1436 entry->length = length; 1437 } else { 1438 usbnet_set_skb_tx_stats(skb, 1, length); 1439 } 1440 1441 spin_lock_irqsave(&dev->txq.lock, flags); 1442 retval = usb_autopm_get_interface_async(dev->intf); 1443 if (retval < 0) { 1444 spin_unlock_irqrestore(&dev->txq.lock, flags); 1445 goto drop; 1446 } 1447 if (netif_queue_stopped(net)) { 1448 usb_autopm_put_interface_async(dev->intf); 1449 spin_unlock_irqrestore(&dev->txq.lock, flags); 1450 goto drop; 1451 } 1452 1453 #ifdef CONFIG_PM 1454 /* if this triggers the device is still a sleep */ 1455 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) { 1456 /* transmission will be done in resume */ 1457 usb_anchor_urb(urb, &dev->deferred); 1458 /* no use to process more packets */ 1459 netif_stop_queue(net); 1460 usb_put_urb(urb); 1461 spin_unlock_irqrestore(&dev->txq.lock, flags); 1462 netdev_dbg(dev->net, "Delaying transmission for resumption\n"); 1463 goto deferred; 1464 } 1465 #endif 1466 1467 switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) { 1468 case -EPIPE: 1469 netif_stop_queue (net); 1470 usbnet_defer_kevent (dev, EVENT_TX_HALT); 1471 usb_autopm_put_interface_async(dev->intf); 1472 break; 1473 default: 1474 usb_autopm_put_interface_async(dev->intf); 1475 netif_dbg(dev, tx_err, dev->net, 1476 "tx: submit urb err %d\n", retval); 1477 break; 1478 case 0: 1479 netif_trans_update(net); 1480 __usbnet_queue_skb(&dev->txq, skb, tx_start); 1481 if (dev->txq.qlen >= TX_QLEN (dev)) 1482 netif_stop_queue (net); 1483 } 1484 spin_unlock_irqrestore (&dev->txq.lock, flags); 1485 1486 if (retval) { 1487 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval); 1488 drop: 1489 dev->net->stats.tx_dropped++; 1490 not_drop: 1491 if (skb) 1492 dev_kfree_skb_any (skb); 1493 if (urb) { 1494 kfree(urb->sg); 1495 usb_free_urb(urb); 1496 } 1497 } else 1498 netif_dbg(dev, tx_queued, dev->net, 1499 "> tx, len %u, type 0x%x\n", length, skb->protocol); 1500 #ifdef CONFIG_PM 1501 deferred: 1502 #endif 1503 return NETDEV_TX_OK; 1504 } 1505 EXPORT_SYMBOL_GPL(usbnet_start_xmit); 1506 1507 static int rx_alloc_submit(struct usbnet *dev, gfp_t flags) 1508 { 1509 struct urb *urb; 1510 int i; 1511 int ret = 0; 1512 1513 /* don't refill the queue all at once */ 1514 for (i = 0; i < 10 && dev->rxq.qlen < RX_QLEN(dev); i++) { 1515 urb = usb_alloc_urb(0, flags); 1516 if (urb != NULL) { 1517 ret = rx_submit(dev, urb, flags); 1518 if (ret) 1519 goto err; 1520 } else { 1521 ret = -ENOMEM; 1522 goto err; 1523 } 1524 } 1525 err: 1526 return ret; 1527 } 1528 1529 static inline void usb_free_skb(struct sk_buff *skb) 1530 { 1531 struct skb_data *entry = (struct skb_data *)skb->cb; 1532 1533 usb_free_urb(entry->urb); 1534 dev_kfree_skb(skb); 1535 } 1536 1537 /*-------------------------------------------------------------------------*/ 1538 1539 // tasklet (work deferred from completions, in_irq) or timer 1540 1541 static void usbnet_bh (struct timer_list *t) 1542 { 1543 struct usbnet *dev = from_timer(dev, t, delay); 1544 struct sk_buff *skb; 1545 struct skb_data *entry; 1546 1547 while ((skb = skb_dequeue (&dev->done))) { 1548 entry = (struct skb_data *) skb->cb; 1549 switch (entry->state) { 1550 case rx_done: 1551 if (rx_process(dev, skb)) 1552 usb_free_skb(skb); 1553 continue; 1554 case tx_done: 1555 kfree(entry->urb->sg); 1556 fallthrough; 1557 case rx_cleanup: 1558 usb_free_skb(skb); 1559 continue; 1560 default: 1561 netdev_dbg(dev->net, "bogus skb state %d\n", entry->state); 1562 } 1563 } 1564 1565 /* restart RX again after disabling due to high error rate */ 1566 clear_bit(EVENT_RX_KILL, &dev->flags); 1567 1568 /* waiting for all pending urbs to complete? 1569 * only then can we forgo submitting anew 1570 */ 1571 if (waitqueue_active(&dev->wait)) { 1572 if (dev->txq.qlen + dev->rxq.qlen + dev->done.qlen == 0) 1573 wake_up_all(&dev->wait); 1574 1575 // or are we maybe short a few urbs? 1576 } else if (netif_running (dev->net) && 1577 netif_device_present (dev->net) && 1578 netif_carrier_ok(dev->net) && 1579 !usbnet_going_away(dev) && 1580 !timer_pending(&dev->delay) && 1581 !test_bit(EVENT_RX_PAUSED, &dev->flags) && 1582 !test_bit(EVENT_RX_HALT, &dev->flags)) { 1583 int temp = dev->rxq.qlen; 1584 1585 if (temp < RX_QLEN(dev)) { 1586 if (rx_alloc_submit(dev, GFP_ATOMIC) == -ENOLINK) 1587 return; 1588 if (temp != dev->rxq.qlen) 1589 netif_dbg(dev, link, dev->net, 1590 "rxqlen %d --> %d\n", 1591 temp, dev->rxq.qlen); 1592 if (dev->rxq.qlen < RX_QLEN(dev)) 1593 tasklet_schedule (&dev->bh); 1594 } 1595 if (dev->txq.qlen < TX_QLEN (dev)) 1596 netif_wake_queue (dev->net); 1597 } 1598 } 1599 1600 static void usbnet_bh_tasklet(struct tasklet_struct *t) 1601 { 1602 struct usbnet *dev = from_tasklet(dev, t, bh); 1603 1604 usbnet_bh(&dev->delay); 1605 } 1606 1607 1608 /*------------------------------------------------------------------------- 1609 * 1610 * USB Device Driver support 1611 * 1612 *-------------------------------------------------------------------------*/ 1613 1614 // precondition: never called in_interrupt 1615 1616 void usbnet_disconnect (struct usb_interface *intf) 1617 { 1618 struct usbnet *dev; 1619 struct usb_device *xdev; 1620 struct net_device *net; 1621 struct urb *urb; 1622 1623 dev = usb_get_intfdata(intf); 1624 usb_set_intfdata(intf, NULL); 1625 if (!dev) 1626 return; 1627 usbnet_mark_going_away(dev); 1628 1629 xdev = interface_to_usbdev (intf); 1630 1631 netif_info(dev, probe, dev->net, "unregister '%s' usb-%s-%s, %s\n", 1632 intf->dev.driver->name, 1633 xdev->bus->bus_name, xdev->devpath, 1634 dev->driver_info->description); 1635 1636 net = dev->net; 1637 unregister_netdev (net); 1638 1639 while ((urb = usb_get_from_anchor(&dev->deferred))) { 1640 dev_kfree_skb(urb->context); 1641 kfree(urb->sg); 1642 usb_free_urb(urb); 1643 } 1644 1645 if (dev->driver_info->unbind) 1646 dev->driver_info->unbind(dev, intf); 1647 1648 usb_kill_urb(dev->interrupt); 1649 usb_free_urb(dev->interrupt); 1650 kfree(dev->padding_pkt); 1651 1652 free_percpu(net->tstats); 1653 free_netdev(net); 1654 } 1655 EXPORT_SYMBOL_GPL(usbnet_disconnect); 1656 1657 static const struct net_device_ops usbnet_netdev_ops = { 1658 .ndo_open = usbnet_open, 1659 .ndo_stop = usbnet_stop, 1660 .ndo_start_xmit = usbnet_start_xmit, 1661 .ndo_tx_timeout = usbnet_tx_timeout, 1662 .ndo_set_rx_mode = usbnet_set_rx_mode, 1663 .ndo_change_mtu = usbnet_change_mtu, 1664 .ndo_get_stats64 = dev_get_tstats64, 1665 .ndo_set_mac_address = eth_mac_addr, 1666 .ndo_validate_addr = eth_validate_addr, 1667 }; 1668 1669 /*-------------------------------------------------------------------------*/ 1670 1671 // precondition: never called in_interrupt 1672 1673 static struct device_type wlan_type = { 1674 .name = "wlan", 1675 }; 1676 1677 static struct device_type wwan_type = { 1678 .name = "wwan", 1679 }; 1680 1681 int 1682 usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) 1683 { 1684 struct usbnet *dev; 1685 struct net_device *net; 1686 struct usb_host_interface *interface; 1687 const struct driver_info *info; 1688 struct usb_device *xdev; 1689 int status; 1690 const char *name; 1691 struct usb_driver *driver = to_usb_driver(udev->dev.driver); 1692 1693 /* usbnet already took usb runtime pm, so have to enable the feature 1694 * for usb interface, otherwise usb_autopm_get_interface may return 1695 * failure if RUNTIME_PM is enabled. 1696 */ 1697 if (!driver->supports_autosuspend) { 1698 driver->supports_autosuspend = 1; 1699 pm_runtime_enable(&udev->dev); 1700 } 1701 1702 name = udev->dev.driver->name; 1703 info = (const struct driver_info *) prod->driver_info; 1704 if (!info) { 1705 dev_dbg (&udev->dev, "blacklisted by %s\n", name); 1706 return -ENODEV; 1707 } 1708 xdev = interface_to_usbdev (udev); 1709 interface = udev->cur_altsetting; 1710 1711 status = -ENOMEM; 1712 1713 // set up our own records 1714 net = alloc_etherdev(sizeof(*dev)); 1715 if (!net) 1716 goto out; 1717 1718 /* netdev_printk() needs this so do it as early as possible */ 1719 SET_NETDEV_DEV(net, &udev->dev); 1720 1721 dev = netdev_priv(net); 1722 dev->udev = xdev; 1723 dev->intf = udev; 1724 dev->driver_info = info; 1725 dev->driver_name = name; 1726 dev->rx_speed = SPEED_UNSET; 1727 dev->tx_speed = SPEED_UNSET; 1728 1729 net->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 1730 if (!net->tstats) 1731 goto out0; 1732 1733 dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV 1734 | NETIF_MSG_PROBE | NETIF_MSG_LINK); 1735 init_waitqueue_head(&dev->wait); 1736 skb_queue_head_init (&dev->rxq); 1737 skb_queue_head_init (&dev->txq); 1738 skb_queue_head_init (&dev->done); 1739 skb_queue_head_init(&dev->rxq_pause); 1740 tasklet_setup(&dev->bh, usbnet_bh_tasklet); 1741 INIT_WORK (&dev->kevent, usbnet_deferred_kevent); 1742 init_usb_anchor(&dev->deferred); 1743 timer_setup(&dev->delay, usbnet_bh, 0); 1744 mutex_init (&dev->phy_mutex); 1745 mutex_init(&dev->interrupt_mutex); 1746 dev->interrupt_count = 0; 1747 1748 dev->net = net; 1749 strscpy(net->name, "usb%d", sizeof(net->name)); 1750 1751 /* rx and tx sides can use different message sizes; 1752 * bind() should set rx_urb_size in that case. 1753 */ 1754 dev->hard_mtu = net->mtu + net->hard_header_len; 1755 net->min_mtu = 0; 1756 net->max_mtu = ETH_MAX_MTU; 1757 1758 net->netdev_ops = &usbnet_netdev_ops; 1759 net->watchdog_timeo = TX_TIMEOUT_JIFFIES; 1760 net->ethtool_ops = &usbnet_ethtool_ops; 1761 1762 // allow device-specific bind/init procedures 1763 // NOTE net->name still not usable ... 1764 if (info->bind) { 1765 status = info->bind (dev, udev); 1766 if (status < 0) 1767 goto out1; 1768 1769 // heuristic: "usb%d" for links we know are two-host, 1770 // else "eth%d" when there's reasonable doubt. userspace 1771 // can rename the link if it knows better. 1772 if ((dev->driver_info->flags & FLAG_ETHER) != 0 && 1773 ((dev->driver_info->flags & FLAG_POINTTOPOINT) == 0 || 1774 (net->dev_addr [0] & 0x02) == 0)) 1775 strscpy(net->name, "eth%d", sizeof(net->name)); 1776 /* WLAN devices should always be named "wlan%d" */ 1777 if ((dev->driver_info->flags & FLAG_WLAN) != 0) 1778 strscpy(net->name, "wlan%d", sizeof(net->name)); 1779 /* WWAN devices should always be named "wwan%d" */ 1780 if ((dev->driver_info->flags & FLAG_WWAN) != 0) 1781 strscpy(net->name, "wwan%d", sizeof(net->name)); 1782 1783 /* devices that cannot do ARP */ 1784 if ((dev->driver_info->flags & FLAG_NOARP) != 0) 1785 net->flags |= IFF_NOARP; 1786 1787 /* maybe the remote can't receive an Ethernet MTU */ 1788 if (net->mtu > (dev->hard_mtu - net->hard_header_len)) 1789 net->mtu = dev->hard_mtu - net->hard_header_len; 1790 } else if (!info->in || !info->out) 1791 status = usbnet_get_endpoints (dev, udev); 1792 else { 1793 u8 ep_addrs[3] = { 1794 info->in + USB_DIR_IN, info->out + USB_DIR_OUT, 0 1795 }; 1796 1797 dev->in = usb_rcvbulkpipe (xdev, info->in); 1798 dev->out = usb_sndbulkpipe (xdev, info->out); 1799 if (!(info->flags & FLAG_NO_SETINT)) 1800 status = usb_set_interface (xdev, 1801 interface->desc.bInterfaceNumber, 1802 interface->desc.bAlternateSetting); 1803 else 1804 status = 0; 1805 1806 if (status == 0 && !usb_check_bulk_endpoints(udev, ep_addrs)) 1807 status = -EINVAL; 1808 } 1809 if (status >= 0 && dev->status) 1810 status = init_status (dev, udev); 1811 if (status < 0) 1812 goto out3; 1813 1814 if (!dev->rx_urb_size) 1815 dev->rx_urb_size = dev->hard_mtu; 1816 dev->maxpacket = usb_maxpacket(dev->udev, dev->out); 1817 if (dev->maxpacket == 0) { 1818 /* that is a broken device */ 1819 status = -ENODEV; 1820 goto out4; 1821 } 1822 1823 /* this flags the device for user space */ 1824 if (!is_valid_ether_addr(net->dev_addr)) 1825 eth_hw_addr_random(net); 1826 1827 if ((dev->driver_info->flags & FLAG_WLAN) != 0) 1828 SET_NETDEV_DEVTYPE(net, &wlan_type); 1829 if ((dev->driver_info->flags & FLAG_WWAN) != 0) 1830 SET_NETDEV_DEVTYPE(net, &wwan_type); 1831 1832 /* initialize max rx_qlen and tx_qlen */ 1833 usbnet_update_max_qlen(dev); 1834 1835 if (dev->can_dma_sg && !(info->flags & FLAG_SEND_ZLP) && 1836 !(info->flags & FLAG_MULTI_PACKET)) { 1837 dev->padding_pkt = kzalloc(1, GFP_KERNEL); 1838 if (!dev->padding_pkt) { 1839 status = -ENOMEM; 1840 goto out4; 1841 } 1842 } 1843 1844 status = register_netdev (net); 1845 if (status) 1846 goto out5; 1847 netif_info(dev, probe, dev->net, 1848 "register '%s' at usb-%s-%s, %s, %pM\n", 1849 udev->dev.driver->name, 1850 xdev->bus->bus_name, xdev->devpath, 1851 dev->driver_info->description, 1852 net->dev_addr); 1853 1854 // ok, it's ready to go. 1855 usb_set_intfdata (udev, dev); 1856 1857 netif_device_attach (net); 1858 1859 if (dev->driver_info->flags & FLAG_LINK_INTR) 1860 usbnet_link_change(dev, 0, 0); 1861 1862 return 0; 1863 1864 out5: 1865 kfree(dev->padding_pkt); 1866 out4: 1867 usb_free_urb(dev->interrupt); 1868 out3: 1869 if (info->unbind) 1870 info->unbind (dev, udev); 1871 out1: 1872 /* subdrivers must undo all they did in bind() if they 1873 * fail it, but we may fail later and a deferred kevent 1874 * may trigger an error resubmitting itself and, worse, 1875 * schedule a timer. So we kill it all just in case. 1876 */ 1877 cancel_work_sync(&dev->kevent); 1878 del_timer_sync(&dev->delay); 1879 free_percpu(net->tstats); 1880 out0: 1881 free_netdev(net); 1882 out: 1883 return status; 1884 } 1885 EXPORT_SYMBOL_GPL(usbnet_probe); 1886 1887 /*-------------------------------------------------------------------------*/ 1888 1889 /* 1890 * suspend the whole driver as soon as the first interface is suspended 1891 * resume only when the last interface is resumed 1892 */ 1893 1894 int usbnet_suspend (struct usb_interface *intf, pm_message_t message) 1895 { 1896 struct usbnet *dev = usb_get_intfdata(intf); 1897 1898 if (!dev->suspend_count++) { 1899 spin_lock_irq(&dev->txq.lock); 1900 /* don't autosuspend while transmitting */ 1901 if (dev->txq.qlen && PMSG_IS_AUTO(message)) { 1902 dev->suspend_count--; 1903 spin_unlock_irq(&dev->txq.lock); 1904 return -EBUSY; 1905 } else { 1906 set_bit(EVENT_DEV_ASLEEP, &dev->flags); 1907 spin_unlock_irq(&dev->txq.lock); 1908 } 1909 /* 1910 * accelerate emptying of the rx and queues, to avoid 1911 * having everything error out. 1912 */ 1913 netif_device_detach (dev->net); 1914 usbnet_terminate_urbs(dev); 1915 __usbnet_status_stop_force(dev); 1916 1917 /* 1918 * reattach so runtime management can use and 1919 * wake the device 1920 */ 1921 netif_device_attach (dev->net); 1922 } 1923 return 0; 1924 } 1925 EXPORT_SYMBOL_GPL(usbnet_suspend); 1926 1927 int usbnet_resume (struct usb_interface *intf) 1928 { 1929 struct usbnet *dev = usb_get_intfdata(intf); 1930 struct sk_buff *skb; 1931 struct urb *res; 1932 int retval; 1933 1934 if (!--dev->suspend_count) { 1935 /* resume interrupt URB if it was previously submitted */ 1936 __usbnet_status_start_force(dev, GFP_NOIO); 1937 1938 spin_lock_irq(&dev->txq.lock); 1939 while ((res = usb_get_from_anchor(&dev->deferred))) { 1940 1941 skb = (struct sk_buff *)res->context; 1942 retval = usb_submit_urb(res, GFP_ATOMIC); 1943 if (retval < 0) { 1944 dev_kfree_skb_any(skb); 1945 kfree(res->sg); 1946 usb_free_urb(res); 1947 usb_autopm_put_interface_async(dev->intf); 1948 } else { 1949 netif_trans_update(dev->net); 1950 __skb_queue_tail(&dev->txq, skb); 1951 } 1952 } 1953 1954 smp_mb(); 1955 clear_bit(EVENT_DEV_ASLEEP, &dev->flags); 1956 spin_unlock_irq(&dev->txq.lock); 1957 1958 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) { 1959 /* handle remote wakeup ASAP 1960 * we cannot race against stop 1961 */ 1962 if (netif_device_present(dev->net) && 1963 !timer_pending(&dev->delay) && 1964 !test_bit(EVENT_RX_HALT, &dev->flags)) 1965 rx_alloc_submit(dev, GFP_NOIO); 1966 1967 if (!(dev->txq.qlen >= TX_QLEN(dev))) 1968 netif_tx_wake_all_queues(dev->net); 1969 tasklet_schedule (&dev->bh); 1970 } 1971 } 1972 1973 if (test_and_clear_bit(EVENT_DEVICE_REPORT_IDLE, &dev->flags)) 1974 usb_autopm_get_interface_no_resume(intf); 1975 1976 return 0; 1977 } 1978 EXPORT_SYMBOL_GPL(usbnet_resume); 1979 1980 /* 1981 * Either a subdriver implements manage_power, then it is assumed to always 1982 * be ready to be suspended or it reports the readiness to be suspended 1983 * explicitly 1984 */ 1985 void usbnet_device_suggests_idle(struct usbnet *dev) 1986 { 1987 if (!test_and_set_bit(EVENT_DEVICE_REPORT_IDLE, &dev->flags)) { 1988 dev->intf->needs_remote_wakeup = 1; 1989 usb_autopm_put_interface_async(dev->intf); 1990 } 1991 } 1992 EXPORT_SYMBOL(usbnet_device_suggests_idle); 1993 1994 /* 1995 * For devices that can do without special commands 1996 */ 1997 int usbnet_manage_power(struct usbnet *dev, int on) 1998 { 1999 dev->intf->needs_remote_wakeup = on; 2000 return 0; 2001 } 2002 EXPORT_SYMBOL(usbnet_manage_power); 2003 2004 void usbnet_link_change(struct usbnet *dev, bool link, bool need_reset) 2005 { 2006 /* update link after link is reseted */ 2007 if (link && !need_reset) 2008 netif_carrier_on(dev->net); 2009 else 2010 netif_carrier_off(dev->net); 2011 2012 if (need_reset && link) 2013 usbnet_defer_kevent(dev, EVENT_LINK_RESET); 2014 else 2015 usbnet_defer_kevent(dev, EVENT_LINK_CHANGE); 2016 } 2017 EXPORT_SYMBOL(usbnet_link_change); 2018 2019 /*-------------------------------------------------------------------------*/ 2020 static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, 2021 u16 value, u16 index, void *data, u16 size) 2022 { 2023 void *buf = NULL; 2024 int err = -ENOMEM; 2025 2026 netdev_dbg(dev->net, "usbnet_read_cmd cmd=0x%02x reqtype=%02x" 2027 " value=0x%04x index=0x%04x size=%d\n", 2028 cmd, reqtype, value, index, size); 2029 2030 if (size) { 2031 buf = kmalloc(size, GFP_NOIO); 2032 if (!buf) 2033 goto out; 2034 } 2035 2036 err = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), 2037 cmd, reqtype, value, index, buf, size, 2038 USB_CTRL_GET_TIMEOUT); 2039 if (err > 0 && err <= size) { 2040 if (data) 2041 memcpy(data, buf, err); 2042 else 2043 netdev_dbg(dev->net, 2044 "Huh? Data requested but thrown away.\n"); 2045 } 2046 kfree(buf); 2047 out: 2048 return err; 2049 } 2050 2051 static int __usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, 2052 u16 value, u16 index, const void *data, 2053 u16 size) 2054 { 2055 void *buf = NULL; 2056 int err = -ENOMEM; 2057 2058 netdev_dbg(dev->net, "usbnet_write_cmd cmd=0x%02x reqtype=%02x" 2059 " value=0x%04x index=0x%04x size=%d\n", 2060 cmd, reqtype, value, index, size); 2061 2062 if (data) { 2063 buf = kmemdup(data, size, GFP_NOIO); 2064 if (!buf) 2065 goto out; 2066 } else { 2067 if (size) { 2068 WARN_ON_ONCE(1); 2069 err = -EINVAL; 2070 goto out; 2071 } 2072 } 2073 2074 err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 2075 cmd, reqtype, value, index, buf, size, 2076 USB_CTRL_SET_TIMEOUT); 2077 kfree(buf); 2078 2079 out: 2080 return err; 2081 } 2082 2083 /* 2084 * The function can't be called inside suspend/resume callback, 2085 * otherwise deadlock will be caused. 2086 */ 2087 int usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, 2088 u16 value, u16 index, void *data, u16 size) 2089 { 2090 int ret; 2091 2092 if (usb_autopm_get_interface(dev->intf) < 0) 2093 return -ENODEV; 2094 ret = __usbnet_read_cmd(dev, cmd, reqtype, value, index, 2095 data, size); 2096 usb_autopm_put_interface(dev->intf); 2097 return ret; 2098 } 2099 EXPORT_SYMBOL_GPL(usbnet_read_cmd); 2100 2101 /* 2102 * The function can't be called inside suspend/resume callback, 2103 * otherwise deadlock will be caused. 2104 */ 2105 int usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, 2106 u16 value, u16 index, const void *data, u16 size) 2107 { 2108 int ret; 2109 2110 if (usb_autopm_get_interface(dev->intf) < 0) 2111 return -ENODEV; 2112 ret = __usbnet_write_cmd(dev, cmd, reqtype, value, index, 2113 data, size); 2114 usb_autopm_put_interface(dev->intf); 2115 return ret; 2116 } 2117 EXPORT_SYMBOL_GPL(usbnet_write_cmd); 2118 2119 /* 2120 * The function can be called inside suspend/resume callback safely 2121 * and should only be called by suspend/resume callback generally. 2122 */ 2123 int usbnet_read_cmd_nopm(struct usbnet *dev, u8 cmd, u8 reqtype, 2124 u16 value, u16 index, void *data, u16 size) 2125 { 2126 return __usbnet_read_cmd(dev, cmd, reqtype, value, index, 2127 data, size); 2128 } 2129 EXPORT_SYMBOL_GPL(usbnet_read_cmd_nopm); 2130 2131 /* 2132 * The function can be called inside suspend/resume callback safely 2133 * and should only be called by suspend/resume callback generally. 2134 */ 2135 int usbnet_write_cmd_nopm(struct usbnet *dev, u8 cmd, u8 reqtype, 2136 u16 value, u16 index, const void *data, 2137 u16 size) 2138 { 2139 return __usbnet_write_cmd(dev, cmd, reqtype, value, index, 2140 data, size); 2141 } 2142 EXPORT_SYMBOL_GPL(usbnet_write_cmd_nopm); 2143 2144 static void usbnet_async_cmd_cb(struct urb *urb) 2145 { 2146 struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context; 2147 int status = urb->status; 2148 2149 if (status < 0) 2150 dev_dbg(&urb->dev->dev, "%s failed with %d", 2151 __func__, status); 2152 2153 kfree(req); 2154 usb_free_urb(urb); 2155 } 2156 2157 /* 2158 * The caller must make sure that device can't be put into suspend 2159 * state until the control URB completes. 2160 */ 2161 int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype, 2162 u16 value, u16 index, const void *data, u16 size) 2163 { 2164 struct usb_ctrlrequest *req; 2165 struct urb *urb; 2166 int err = -ENOMEM; 2167 void *buf = NULL; 2168 2169 netdev_dbg(dev->net, "usbnet_write_cmd cmd=0x%02x reqtype=%02x" 2170 " value=0x%04x index=0x%04x size=%d\n", 2171 cmd, reqtype, value, index, size); 2172 2173 urb = usb_alloc_urb(0, GFP_ATOMIC); 2174 if (!urb) 2175 goto fail; 2176 2177 if (data) { 2178 buf = kmemdup(data, size, GFP_ATOMIC); 2179 if (!buf) { 2180 netdev_err(dev->net, "Error allocating buffer" 2181 " in %s!\n", __func__); 2182 goto fail_free_urb; 2183 } 2184 } 2185 2186 req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC); 2187 if (!req) 2188 goto fail_free_buf; 2189 2190 req->bRequestType = reqtype; 2191 req->bRequest = cmd; 2192 req->wValue = cpu_to_le16(value); 2193 req->wIndex = cpu_to_le16(index); 2194 req->wLength = cpu_to_le16(size); 2195 2196 usb_fill_control_urb(urb, dev->udev, 2197 usb_sndctrlpipe(dev->udev, 0), 2198 (void *)req, buf, size, 2199 usbnet_async_cmd_cb, req); 2200 urb->transfer_flags |= URB_FREE_BUFFER; 2201 2202 err = usb_submit_urb(urb, GFP_ATOMIC); 2203 if (err < 0) { 2204 netdev_err(dev->net, "Error submitting the control" 2205 " message: status=%d\n", err); 2206 goto fail_free_all; 2207 } 2208 return 0; 2209 2210 fail_free_all: 2211 kfree(req); 2212 fail_free_buf: 2213 kfree(buf); 2214 /* 2215 * avoid a double free 2216 * needed because the flag can be set only 2217 * after filling the URB 2218 */ 2219 urb->transfer_flags = 0; 2220 fail_free_urb: 2221 usb_free_urb(urb); 2222 fail: 2223 return err; 2224 2225 } 2226 EXPORT_SYMBOL_GPL(usbnet_write_cmd_async); 2227 /*-------------------------------------------------------------------------*/ 2228 2229 static int __init usbnet_init(void) 2230 { 2231 /* Compiler should optimize this out. */ 2232 BUILD_BUG_ON( 2233 sizeof_field(struct sk_buff, cb) < sizeof(struct skb_data)); 2234 2235 return 0; 2236 } 2237 module_init(usbnet_init); 2238 2239 static void __exit usbnet_exit(void) 2240 { 2241 } 2242 module_exit(usbnet_exit); 2243 2244 MODULE_AUTHOR("David Brownell"); 2245 MODULE_DESCRIPTION("USB network driver framework"); 2246 MODULE_LICENSE("GPL"); 2247