1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack 4 * 5 * Copyright (C) 2003-2005,2008 David Brownell 6 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger 7 * Copyright (C) 2008 Nokia Corporation 8 */ 9 10 /* #define VERBOSE_DEBUG */ 11 12 #include <linux/kernel.h> 13 #include <linux/module.h> 14 #include <linux/gfp.h> 15 #include <linux/device.h> 16 #include <linux/ctype.h> 17 #include <linux/etherdevice.h> 18 #include <linux/ethtool.h> 19 #include <linux/if_vlan.h> 20 #include <linux/string_helpers.h> 21 #include <linux/usb/composite.h> 22 23 #include "u_ether.h" 24 25 26 /* 27 * This component encapsulates the Ethernet link glue needed to provide 28 * one (!) network link through the USB gadget stack, normally "usb0". 29 * 30 * The control and data models are handled by the function driver which 31 * connects to this code; such as CDC Ethernet (ECM or EEM), 32 * "CDC Subset", or RNDIS. That includes all descriptor and endpoint 33 * management. 34 * 35 * Link level addressing is handled by this component using module 36 * parameters; if no such parameters are provided, random link level 37 * addresses are used. Each end of the link uses one address. The 38 * host end address is exported in various ways, and is often recorded 39 * in configuration databases. 40 * 41 * The driver which assembles each configuration using such a link is 42 * responsible for ensuring that each configuration includes at most one 43 * instance of is network link. (The network layer provides ways for 44 * this single "physical" link to be used by multiple virtual links.) 45 */ 46 47 #define UETH__VERSION "29-May-2008" 48 49 /* Experiments show that both Linux and Windows hosts allow up to 16k 50 * frame sizes. Set the max MTU size to 15k+52 to prevent allocating 32k 51 * blocks and still have efficient handling. */ 52 #define GETHER_MAX_MTU_SIZE 15412 53 #define GETHER_MAX_ETH_FRAME_LEN (GETHER_MAX_MTU_SIZE + ETH_HLEN) 54 55 struct eth_dev { 56 /* lock is held while accessing port_usb 57 */ 58 spinlock_t lock; 59 struct gether *port_usb; 60 61 struct net_device *net; 62 struct usb_gadget *gadget; 63 64 spinlock_t req_lock; /* guard {rx,tx}_reqs */ 65 struct list_head tx_reqs, rx_reqs; 66 atomic_t tx_qlen; 67 68 struct sk_buff_head rx_frames; 69 70 unsigned qmult; 71 72 unsigned header_len; 73 struct sk_buff *(*wrap)(struct gether *, struct sk_buff *skb); 74 int (*unwrap)(struct gether *, 75 struct sk_buff *skb, 76 struct sk_buff_head *list); 77 78 struct work_struct work; 79 80 unsigned long todo; 81 #define WORK_RX_MEMORY 0 82 83 bool zlp; 84 bool no_skb_reserve; 85 bool ifname_set; 86 u8 host_mac[ETH_ALEN]; 87 u8 dev_mac[ETH_ALEN]; 88 }; 89 90 /*-------------------------------------------------------------------------*/ 91 92 #define RX_EXTRA 20 /* bytes guarding against rx overflows */ 93 94 #define DEFAULT_QLEN 2 /* double buffering by default */ 95 96 /* for dual-speed hardware, use deeper queues at high/super speed */ 97 static inline int qlen(struct usb_gadget *gadget, unsigned qmult) 98 { 99 if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH || 100 gadget->speed >= USB_SPEED_SUPER)) 101 return qmult * DEFAULT_QLEN; 102 else 103 return DEFAULT_QLEN; 104 } 105 106 /*-------------------------------------------------------------------------*/ 107 108 /* NETWORK DRIVER HOOKUP (to the layer above this driver) */ 109 110 static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p) 111 { 112 struct eth_dev *dev = netdev_priv(net); 113 114 strscpy(p->driver, "g_ether", sizeof(p->driver)); 115 strscpy(p->version, UETH__VERSION, sizeof(p->version)); 116 strscpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version)); 117 strscpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info)); 118 } 119 120 /* REVISIT can also support: 121 * - WOL (by tracking suspends and issuing remote wakeup) 122 * - msglevel (implies updated messaging) 123 * - ... probably more ethtool ops 124 */ 125 126 static const struct ethtool_ops ops = { 127 .get_drvinfo = eth_get_drvinfo, 128 .get_link = ethtool_op_get_link, 129 }; 130 131 static void defer_kevent(struct eth_dev *dev, int flag) 132 { 133 if (test_and_set_bit(flag, &dev->todo)) 134 return; 135 if (!schedule_work(&dev->work)) 136 ERROR(dev, "kevent %d may have been dropped\n", flag); 137 else 138 DBG(dev, "kevent %d scheduled\n", flag); 139 } 140 141 static void rx_complete(struct usb_ep *ep, struct usb_request *req); 142 143 static int 144 rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags) 145 { 146 struct usb_gadget *g = dev->gadget; 147 struct sk_buff *skb; 148 int retval = -ENOMEM; 149 size_t size = 0; 150 struct usb_ep *out; 151 unsigned long flags; 152 153 spin_lock_irqsave(&dev->lock, flags); 154 if (dev->port_usb) 155 out = dev->port_usb->out_ep; 156 else 157 out = NULL; 158 159 if (!out) 160 { 161 spin_unlock_irqrestore(&dev->lock, flags); 162 return -ENOTCONN; 163 } 164 165 /* Padding up to RX_EXTRA handles minor disagreements with host. 166 * Normally we use the USB "terminate on short read" convention; 167 * so allow up to (N*maxpacket), since that memory is normally 168 * already allocated. Some hardware doesn't deal well with short 169 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a 170 * byte off the end (to force hardware errors on overflow). 171 * 172 * RNDIS uses internal framing, and explicitly allows senders to 173 * pad to end-of-packet. That's potentially nice for speed, but 174 * means receivers can't recover lost synch on their own (because 175 * new packets don't only start after a short RX). 176 */ 177 size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA; 178 size += dev->port_usb->header_len; 179 180 if (g->quirk_ep_out_aligned_size) { 181 size += out->maxpacket - 1; 182 size -= size % out->maxpacket; 183 } 184 185 if (dev->port_usb->is_fixed) 186 size = max_t(size_t, size, dev->port_usb->fixed_out_len); 187 spin_unlock_irqrestore(&dev->lock, flags); 188 189 skb = __netdev_alloc_skb(dev->net, size + NET_IP_ALIGN, gfp_flags); 190 if (skb == NULL) { 191 DBG(dev, "no rx skb\n"); 192 goto enomem; 193 } 194 195 /* Some platforms perform better when IP packets are aligned, 196 * but on at least one, checksumming fails otherwise. Note: 197 * RNDIS headers involve variable numbers of LE32 values. 198 */ 199 if (likely(!dev->no_skb_reserve)) 200 skb_reserve(skb, NET_IP_ALIGN); 201 202 req->buf = skb->data; 203 req->length = size; 204 req->complete = rx_complete; 205 req->context = skb; 206 207 retval = usb_ep_queue(out, req, gfp_flags); 208 if (retval == -ENOMEM) 209 enomem: 210 defer_kevent(dev, WORK_RX_MEMORY); 211 if (retval) { 212 DBG(dev, "rx submit --> %d\n", retval); 213 if (skb) 214 dev_kfree_skb_any(skb); 215 spin_lock_irqsave(&dev->req_lock, flags); 216 list_add(&req->list, &dev->rx_reqs); 217 spin_unlock_irqrestore(&dev->req_lock, flags); 218 } 219 return retval; 220 } 221 222 static void rx_complete(struct usb_ep *ep, struct usb_request *req) 223 { 224 struct sk_buff *skb = req->context, *skb2; 225 struct eth_dev *dev = ep->driver_data; 226 int status = req->status; 227 228 switch (status) { 229 230 /* normal completion */ 231 case 0: 232 skb_put(skb, req->actual); 233 234 if (dev->unwrap) { 235 unsigned long flags; 236 237 spin_lock_irqsave(&dev->lock, flags); 238 if (dev->port_usb) { 239 status = dev->unwrap(dev->port_usb, 240 skb, 241 &dev->rx_frames); 242 } else { 243 dev_kfree_skb_any(skb); 244 status = -ENOTCONN; 245 } 246 spin_unlock_irqrestore(&dev->lock, flags); 247 } else { 248 skb_queue_tail(&dev->rx_frames, skb); 249 } 250 skb = NULL; 251 252 skb2 = skb_dequeue(&dev->rx_frames); 253 while (skb2) { 254 if (status < 0 255 || ETH_HLEN > skb2->len 256 || skb2->len > GETHER_MAX_ETH_FRAME_LEN) { 257 dev->net->stats.rx_errors++; 258 dev->net->stats.rx_length_errors++; 259 DBG(dev, "rx length %d\n", skb2->len); 260 dev_kfree_skb_any(skb2); 261 goto next_frame; 262 } 263 skb2->protocol = eth_type_trans(skb2, dev->net); 264 dev->net->stats.rx_packets++; 265 dev->net->stats.rx_bytes += skb2->len; 266 267 /* no buffer copies needed, unless hardware can't 268 * use skb buffers. 269 */ 270 status = netif_rx(skb2); 271 next_frame: 272 skb2 = skb_dequeue(&dev->rx_frames); 273 } 274 break; 275 276 /* software-driven interface shutdown */ 277 case -ECONNRESET: /* unlink */ 278 case -ESHUTDOWN: /* disconnect etc */ 279 VDBG(dev, "rx shutdown, code %d\n", status); 280 goto quiesce; 281 282 /* for hardware automagic (such as pxa) */ 283 case -ECONNABORTED: /* endpoint reset */ 284 DBG(dev, "rx %s reset\n", ep->name); 285 defer_kevent(dev, WORK_RX_MEMORY); 286 quiesce: 287 dev_kfree_skb_any(skb); 288 goto clean; 289 290 /* data overrun */ 291 case -EOVERFLOW: 292 dev->net->stats.rx_over_errors++; 293 fallthrough; 294 295 default: 296 dev->net->stats.rx_errors++; 297 DBG(dev, "rx status %d\n", status); 298 break; 299 } 300 301 if (skb) 302 dev_kfree_skb_any(skb); 303 if (!netif_running(dev->net)) { 304 clean: 305 spin_lock(&dev->req_lock); 306 list_add(&req->list, &dev->rx_reqs); 307 spin_unlock(&dev->req_lock); 308 req = NULL; 309 } 310 if (req) 311 rx_submit(dev, req, GFP_ATOMIC); 312 } 313 314 static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n) 315 { 316 unsigned i; 317 struct usb_request *req; 318 319 if (!n) 320 return -ENOMEM; 321 322 /* queue/recycle up to N requests */ 323 i = n; 324 list_for_each_entry(req, list, list) { 325 if (i-- == 0) 326 goto extra; 327 } 328 while (i--) { 329 req = usb_ep_alloc_request(ep, GFP_ATOMIC); 330 if (!req) 331 return list_empty(list) ? -ENOMEM : 0; 332 list_add(&req->list, list); 333 } 334 return 0; 335 336 extra: 337 /* free extras */ 338 for (;;) { 339 struct list_head *next; 340 341 next = req->list.next; 342 list_del(&req->list); 343 usb_ep_free_request(ep, req); 344 345 if (next == list) 346 break; 347 348 req = container_of(next, struct usb_request, list); 349 } 350 return 0; 351 } 352 353 static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n) 354 { 355 int status; 356 357 spin_lock(&dev->req_lock); 358 status = prealloc(&dev->tx_reqs, link->in_ep, n); 359 if (status < 0) 360 goto fail; 361 status = prealloc(&dev->rx_reqs, link->out_ep, n); 362 if (status < 0) 363 goto fail; 364 goto done; 365 fail: 366 DBG(dev, "can't alloc requests\n"); 367 done: 368 spin_unlock(&dev->req_lock); 369 return status; 370 } 371 372 static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags) 373 { 374 struct usb_request *req; 375 unsigned long flags; 376 377 /* fill unused rxq slots with some skb */ 378 spin_lock_irqsave(&dev->req_lock, flags); 379 while (!list_empty(&dev->rx_reqs)) { 380 req = list_first_entry(&dev->rx_reqs, struct usb_request, list); 381 list_del_init(&req->list); 382 spin_unlock_irqrestore(&dev->req_lock, flags); 383 384 if (rx_submit(dev, req, gfp_flags) < 0) { 385 defer_kevent(dev, WORK_RX_MEMORY); 386 return; 387 } 388 389 spin_lock_irqsave(&dev->req_lock, flags); 390 } 391 spin_unlock_irqrestore(&dev->req_lock, flags); 392 } 393 394 static void eth_work(struct work_struct *work) 395 { 396 struct eth_dev *dev = container_of(work, struct eth_dev, work); 397 398 if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) { 399 if (netif_running(dev->net)) 400 rx_fill(dev, GFP_KERNEL); 401 } 402 403 if (dev->todo) 404 DBG(dev, "work done, flags = 0x%lx\n", dev->todo); 405 } 406 407 static void tx_complete(struct usb_ep *ep, struct usb_request *req) 408 { 409 struct sk_buff *skb = req->context; 410 struct eth_dev *dev = ep->driver_data; 411 412 switch (req->status) { 413 default: 414 dev->net->stats.tx_errors++; 415 VDBG(dev, "tx err %d\n", req->status); 416 fallthrough; 417 case -ECONNRESET: /* unlink */ 418 case -ESHUTDOWN: /* disconnect etc */ 419 dev_kfree_skb_any(skb); 420 break; 421 case 0: 422 dev->net->stats.tx_bytes += skb->len; 423 dev_consume_skb_any(skb); 424 } 425 dev->net->stats.tx_packets++; 426 427 spin_lock(&dev->req_lock); 428 list_add(&req->list, &dev->tx_reqs); 429 spin_unlock(&dev->req_lock); 430 431 atomic_dec(&dev->tx_qlen); 432 if (netif_carrier_ok(dev->net)) 433 netif_wake_queue(dev->net); 434 } 435 436 static inline int is_promisc(u16 cdc_filter) 437 { 438 return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS; 439 } 440 441 static int ether_wakeup_host(struct gether *port) 442 { 443 int ret; 444 struct usb_function *func = &port->func; 445 struct usb_gadget *gadget = func->config->cdev->gadget; 446 447 if (func->func_suspended) 448 ret = usb_func_wakeup(func); 449 else 450 ret = usb_gadget_wakeup(gadget); 451 452 return ret; 453 } 454 455 static netdev_tx_t eth_start_xmit(struct sk_buff *skb, 456 struct net_device *net) 457 { 458 struct eth_dev *dev = netdev_priv(net); 459 int length = 0; 460 int retval; 461 struct usb_request *req = NULL; 462 unsigned long flags; 463 struct usb_ep *in; 464 u16 cdc_filter; 465 466 spin_lock_irqsave(&dev->lock, flags); 467 if (dev->port_usb) { 468 in = dev->port_usb->in_ep; 469 cdc_filter = dev->port_usb->cdc_filter; 470 } else { 471 in = NULL; 472 cdc_filter = 0; 473 } 474 475 if (dev->port_usb && dev->port_usb->is_suspend) { 476 DBG(dev, "Port suspended. Triggering wakeup\n"); 477 netif_stop_queue(net); 478 spin_unlock_irqrestore(&dev->lock, flags); 479 ether_wakeup_host(dev->port_usb); 480 return NETDEV_TX_BUSY; 481 } 482 483 spin_unlock_irqrestore(&dev->lock, flags); 484 485 if (!in) { 486 if (skb) 487 dev_kfree_skb_any(skb); 488 return NETDEV_TX_OK; 489 } 490 491 /* apply outgoing CDC or RNDIS filters */ 492 if (skb && !is_promisc(cdc_filter)) { 493 u8 *dest = skb->data; 494 495 if (is_multicast_ether_addr(dest)) { 496 u16 type; 497 498 /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host 499 * SET_ETHERNET_MULTICAST_FILTERS requests 500 */ 501 if (is_broadcast_ether_addr(dest)) 502 type = USB_CDC_PACKET_TYPE_BROADCAST; 503 else 504 type = USB_CDC_PACKET_TYPE_ALL_MULTICAST; 505 if (!(cdc_filter & type)) { 506 dev_kfree_skb_any(skb); 507 return NETDEV_TX_OK; 508 } 509 } 510 /* ignores USB_CDC_PACKET_TYPE_DIRECTED */ 511 } 512 513 spin_lock_irqsave(&dev->req_lock, flags); 514 /* 515 * this freelist can be empty if an interrupt triggered disconnect() 516 * and reconfigured the gadget (shutting down this queue) after the 517 * network stack decided to xmit but before we got the spinlock. 518 */ 519 if (list_empty(&dev->tx_reqs)) { 520 spin_unlock_irqrestore(&dev->req_lock, flags); 521 return NETDEV_TX_BUSY; 522 } 523 524 req = list_first_entry(&dev->tx_reqs, struct usb_request, list); 525 list_del(&req->list); 526 527 /* temporarily stop TX queue when the freelist empties */ 528 if (list_empty(&dev->tx_reqs)) 529 netif_stop_queue(net); 530 spin_unlock_irqrestore(&dev->req_lock, flags); 531 532 /* no buffer copies needed, unless the network stack did it 533 * or the hardware can't use skb buffers. 534 * or there's not enough space for extra headers we need 535 */ 536 if (dev->wrap) { 537 unsigned long flags; 538 539 spin_lock_irqsave(&dev->lock, flags); 540 if (dev->port_usb) 541 skb = dev->wrap(dev->port_usb, skb); 542 spin_unlock_irqrestore(&dev->lock, flags); 543 if (!skb) { 544 /* Multi frame CDC protocols may store the frame for 545 * later which is not a dropped frame. 546 */ 547 if (dev->port_usb && 548 dev->port_usb->supports_multi_frame) 549 goto multiframe; 550 goto drop; 551 } 552 } 553 554 length = skb->len; 555 req->buf = skb->data; 556 req->context = skb; 557 req->complete = tx_complete; 558 559 /* NCM requires no zlp if transfer is dwNtbInMaxSize */ 560 if (dev->port_usb && 561 dev->port_usb->is_fixed && 562 length == dev->port_usb->fixed_in_len && 563 (length % in->maxpacket) == 0) 564 req->zero = 0; 565 else 566 req->zero = 1; 567 568 /* use zlp framing on tx for strict CDC-Ether conformance, 569 * though any robust network rx path ignores extra padding. 570 * and some hardware doesn't like to write zlps. 571 */ 572 if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) 573 length++; 574 575 req->length = length; 576 577 retval = usb_ep_queue(in, req, GFP_ATOMIC); 578 switch (retval) { 579 default: 580 DBG(dev, "tx queue err %d\n", retval); 581 break; 582 case 0: 583 netif_trans_update(net); 584 atomic_inc(&dev->tx_qlen); 585 } 586 587 if (retval) { 588 dev_kfree_skb_any(skb); 589 drop: 590 dev->net->stats.tx_dropped++; 591 multiframe: 592 spin_lock_irqsave(&dev->req_lock, flags); 593 if (list_empty(&dev->tx_reqs)) 594 netif_start_queue(net); 595 list_add(&req->list, &dev->tx_reqs); 596 spin_unlock_irqrestore(&dev->req_lock, flags); 597 } 598 return NETDEV_TX_OK; 599 } 600 601 /*-------------------------------------------------------------------------*/ 602 603 static void eth_start(struct eth_dev *dev, gfp_t gfp_flags) 604 { 605 DBG(dev, "%s\n", __func__); 606 607 /* fill the rx queue */ 608 rx_fill(dev, gfp_flags); 609 610 /* and open the tx floodgates */ 611 atomic_set(&dev->tx_qlen, 0); 612 netif_wake_queue(dev->net); 613 } 614 615 static int eth_open(struct net_device *net) 616 { 617 struct eth_dev *dev = netdev_priv(net); 618 struct gether *link; 619 620 DBG(dev, "%s\n", __func__); 621 if (netif_carrier_ok(dev->net)) 622 eth_start(dev, GFP_KERNEL); 623 624 spin_lock_irq(&dev->lock); 625 link = dev->port_usb; 626 if (link && link->open) 627 link->open(link); 628 spin_unlock_irq(&dev->lock); 629 630 return 0; 631 } 632 633 static int eth_stop(struct net_device *net) 634 { 635 struct eth_dev *dev = netdev_priv(net); 636 unsigned long flags; 637 638 VDBG(dev, "%s\n", __func__); 639 netif_stop_queue(net); 640 641 DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n", 642 dev->net->stats.rx_packets, dev->net->stats.tx_packets, 643 dev->net->stats.rx_errors, dev->net->stats.tx_errors 644 ); 645 646 /* ensure there are no more active requests */ 647 spin_lock_irqsave(&dev->lock, flags); 648 if (dev->port_usb) { 649 struct gether *link = dev->port_usb; 650 const struct usb_endpoint_descriptor *in; 651 const struct usb_endpoint_descriptor *out; 652 653 if (link->close) 654 link->close(link); 655 656 /* NOTE: we have no abort-queue primitive we could use 657 * to cancel all pending I/O. Instead, we disable then 658 * reenable the endpoints ... this idiom may leave toggle 659 * wrong, but that's a self-correcting error. 660 * 661 * REVISIT: we *COULD* just let the transfers complete at 662 * their own pace; the network stack can handle old packets. 663 * For the moment we leave this here, since it works. 664 */ 665 in = link->in_ep->desc; 666 out = link->out_ep->desc; 667 usb_ep_disable(link->in_ep); 668 usb_ep_disable(link->out_ep); 669 if (netif_carrier_ok(net)) { 670 DBG(dev, "host still using in/out endpoints\n"); 671 link->in_ep->desc = in; 672 link->out_ep->desc = out; 673 usb_ep_enable(link->in_ep); 674 usb_ep_enable(link->out_ep); 675 } 676 } 677 spin_unlock_irqrestore(&dev->lock, flags); 678 679 return 0; 680 } 681 682 /*-------------------------------------------------------------------------*/ 683 684 static int get_ether_addr(const char *str, u8 *dev_addr) 685 { 686 if (str) { 687 unsigned i; 688 689 for (i = 0; i < 6; i++) { 690 unsigned char num; 691 692 if ((*str == '.') || (*str == ':')) 693 str++; 694 num = hex_to_bin(*str++) << 4; 695 num |= hex_to_bin(*str++); 696 dev_addr [i] = num; 697 } 698 if (is_valid_ether_addr(dev_addr)) 699 return 0; 700 } 701 eth_random_addr(dev_addr); 702 return 1; 703 } 704 705 static int get_ether_addr_str(u8 dev_addr[ETH_ALEN], char *str, int len) 706 { 707 if (len < 18) 708 return -EINVAL; 709 710 snprintf(str, len, "%pM", dev_addr); 711 return 18; 712 } 713 714 static const struct net_device_ops eth_netdev_ops = { 715 .ndo_open = eth_open, 716 .ndo_stop = eth_stop, 717 .ndo_start_xmit = eth_start_xmit, 718 .ndo_set_mac_address = eth_mac_addr, 719 .ndo_validate_addr = eth_validate_addr, 720 }; 721 722 static struct device_type gadget_type = { 723 .name = "gadget", 724 }; 725 726 /* 727 * gether_setup_name - initialize one ethernet-over-usb link 728 * @g: gadget to associated with these links 729 * @ethaddr: NULL, or a buffer in which the ethernet address of the 730 * host side of the link is recorded 731 * @netname: name for network device (for example, "usb") 732 * Context: may sleep 733 * 734 * This sets up the single network link that may be exported by a 735 * gadget driver using this framework. The link layer addresses are 736 * set up using module parameters. 737 * 738 * Returns an eth_dev pointer on success, or an ERR_PTR on failure. 739 */ 740 struct eth_dev *gether_setup_name(struct usb_gadget *g, 741 const char *dev_addr, const char *host_addr, 742 u8 ethaddr[ETH_ALEN], unsigned qmult, const char *netname) 743 { 744 struct eth_dev *dev; 745 struct net_device *net; 746 int status; 747 u8 addr[ETH_ALEN]; 748 749 net = alloc_etherdev(sizeof *dev); 750 if (!net) 751 return ERR_PTR(-ENOMEM); 752 753 dev = netdev_priv(net); 754 spin_lock_init(&dev->lock); 755 spin_lock_init(&dev->req_lock); 756 INIT_WORK(&dev->work, eth_work); 757 INIT_LIST_HEAD(&dev->tx_reqs); 758 INIT_LIST_HEAD(&dev->rx_reqs); 759 760 skb_queue_head_init(&dev->rx_frames); 761 762 /* network device setup */ 763 dev->net = net; 764 dev->qmult = qmult; 765 snprintf(net->name, sizeof(net->name), "%s%%d", netname); 766 767 if (get_ether_addr(dev_addr, addr)) { 768 net->addr_assign_type = NET_ADDR_RANDOM; 769 dev_warn(&g->dev, 770 "using random %s ethernet address\n", "self"); 771 } else { 772 net->addr_assign_type = NET_ADDR_SET; 773 } 774 eth_hw_addr_set(net, addr); 775 if (get_ether_addr(host_addr, dev->host_mac)) 776 dev_warn(&g->dev, 777 "using random %s ethernet address\n", "host"); 778 779 if (ethaddr) 780 memcpy(ethaddr, dev->host_mac, ETH_ALEN); 781 782 net->netdev_ops = ð_netdev_ops; 783 784 net->ethtool_ops = &ops; 785 786 /* MTU range: 14 - 15412 */ 787 net->min_mtu = ETH_HLEN; 788 net->max_mtu = GETHER_MAX_MTU_SIZE; 789 790 dev->gadget = g; 791 SET_NETDEV_DEV(net, &g->dev); 792 SET_NETDEV_DEVTYPE(net, &gadget_type); 793 794 status = register_netdev(net); 795 if (status < 0) { 796 dev_dbg(&g->dev, "register_netdev failed, %d\n", status); 797 free_netdev(net); 798 dev = ERR_PTR(status); 799 } else { 800 INFO(dev, "MAC %pM\n", net->dev_addr); 801 INFO(dev, "HOST MAC %pM\n", dev->host_mac); 802 803 /* 804 * two kinds of host-initiated state changes: 805 * - iff DATA transfer is active, carrier is "on" 806 * - tx queueing enabled if open *and* carrier is "on" 807 */ 808 netif_carrier_off(net); 809 } 810 811 return dev; 812 } 813 EXPORT_SYMBOL_GPL(gether_setup_name); 814 815 struct net_device *gether_setup_name_default(const char *netname) 816 { 817 struct net_device *net; 818 struct eth_dev *dev; 819 820 net = alloc_etherdev(sizeof(*dev)); 821 if (!net) 822 return ERR_PTR(-ENOMEM); 823 824 dev = netdev_priv(net); 825 spin_lock_init(&dev->lock); 826 spin_lock_init(&dev->req_lock); 827 INIT_WORK(&dev->work, eth_work); 828 INIT_LIST_HEAD(&dev->tx_reqs); 829 INIT_LIST_HEAD(&dev->rx_reqs); 830 831 skb_queue_head_init(&dev->rx_frames); 832 833 /* network device setup */ 834 dev->net = net; 835 dev->qmult = QMULT_DEFAULT; 836 snprintf(net->name, sizeof(net->name), "%s%%d", netname); 837 838 eth_random_addr(dev->dev_mac); 839 840 /* by default we always have a random MAC address */ 841 net->addr_assign_type = NET_ADDR_RANDOM; 842 843 eth_random_addr(dev->host_mac); 844 845 net->netdev_ops = ð_netdev_ops; 846 847 net->ethtool_ops = &ops; 848 SET_NETDEV_DEVTYPE(net, &gadget_type); 849 850 /* MTU range: 14 - 15412 */ 851 net->min_mtu = ETH_HLEN; 852 net->max_mtu = GETHER_MAX_MTU_SIZE; 853 854 return net; 855 } 856 EXPORT_SYMBOL_GPL(gether_setup_name_default); 857 858 int gether_register_netdev(struct net_device *net) 859 { 860 struct eth_dev *dev; 861 struct usb_gadget *g; 862 int status; 863 864 if (!net->dev.parent) 865 return -EINVAL; 866 dev = netdev_priv(net); 867 g = dev->gadget; 868 869 eth_hw_addr_set(net, dev->dev_mac); 870 871 status = register_netdev(net); 872 if (status < 0) { 873 dev_dbg(&g->dev, "register_netdev failed, %d\n", status); 874 return status; 875 } else { 876 INFO(dev, "HOST MAC %pM\n", dev->host_mac); 877 INFO(dev, "MAC %pM\n", dev->dev_mac); 878 879 /* two kinds of host-initiated state changes: 880 * - iff DATA transfer is active, carrier is "on" 881 * - tx queueing enabled if open *and* carrier is "on" 882 */ 883 netif_carrier_off(net); 884 } 885 886 return status; 887 } 888 EXPORT_SYMBOL_GPL(gether_register_netdev); 889 890 void gether_set_gadget(struct net_device *net, struct usb_gadget *g) 891 { 892 struct eth_dev *dev; 893 894 dev = netdev_priv(net); 895 dev->gadget = g; 896 SET_NETDEV_DEV(net, &g->dev); 897 } 898 EXPORT_SYMBOL_GPL(gether_set_gadget); 899 900 int gether_set_dev_addr(struct net_device *net, const char *dev_addr) 901 { 902 struct eth_dev *dev; 903 u8 new_addr[ETH_ALEN]; 904 905 dev = netdev_priv(net); 906 if (get_ether_addr(dev_addr, new_addr)) 907 return -EINVAL; 908 memcpy(dev->dev_mac, new_addr, ETH_ALEN); 909 net->addr_assign_type = NET_ADDR_SET; 910 return 0; 911 } 912 EXPORT_SYMBOL_GPL(gether_set_dev_addr); 913 914 int gether_get_dev_addr(struct net_device *net, char *dev_addr, int len) 915 { 916 struct eth_dev *dev; 917 int ret; 918 919 dev = netdev_priv(net); 920 ret = get_ether_addr_str(dev->dev_mac, dev_addr, len); 921 if (ret + 1 < len) { 922 dev_addr[ret++] = '\n'; 923 dev_addr[ret] = '\0'; 924 } 925 926 return ret; 927 } 928 EXPORT_SYMBOL_GPL(gether_get_dev_addr); 929 930 int gether_set_host_addr(struct net_device *net, const char *host_addr) 931 { 932 struct eth_dev *dev; 933 u8 new_addr[ETH_ALEN]; 934 935 dev = netdev_priv(net); 936 if (get_ether_addr(host_addr, new_addr)) 937 return -EINVAL; 938 memcpy(dev->host_mac, new_addr, ETH_ALEN); 939 return 0; 940 } 941 EXPORT_SYMBOL_GPL(gether_set_host_addr); 942 943 int gether_get_host_addr(struct net_device *net, char *host_addr, int len) 944 { 945 struct eth_dev *dev; 946 int ret; 947 948 dev = netdev_priv(net); 949 ret = get_ether_addr_str(dev->host_mac, host_addr, len); 950 if (ret + 1 < len) { 951 host_addr[ret++] = '\n'; 952 host_addr[ret] = '\0'; 953 } 954 955 return ret; 956 } 957 EXPORT_SYMBOL_GPL(gether_get_host_addr); 958 959 int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len) 960 { 961 struct eth_dev *dev; 962 963 if (len < 13) 964 return -EINVAL; 965 966 dev = netdev_priv(net); 967 snprintf(host_addr, len, "%pm", dev->host_mac); 968 969 string_upper(host_addr, host_addr); 970 971 return strlen(host_addr); 972 } 973 EXPORT_SYMBOL_GPL(gether_get_host_addr_cdc); 974 975 void gether_get_host_addr_u8(struct net_device *net, u8 host_mac[ETH_ALEN]) 976 { 977 struct eth_dev *dev; 978 979 dev = netdev_priv(net); 980 memcpy(host_mac, dev->host_mac, ETH_ALEN); 981 } 982 EXPORT_SYMBOL_GPL(gether_get_host_addr_u8); 983 984 void gether_set_qmult(struct net_device *net, unsigned qmult) 985 { 986 struct eth_dev *dev; 987 988 dev = netdev_priv(net); 989 dev->qmult = qmult; 990 } 991 EXPORT_SYMBOL_GPL(gether_set_qmult); 992 993 unsigned gether_get_qmult(struct net_device *net) 994 { 995 struct eth_dev *dev; 996 997 dev = netdev_priv(net); 998 return dev->qmult; 999 } 1000 EXPORT_SYMBOL_GPL(gether_get_qmult); 1001 1002 int gether_get_ifname(struct net_device *net, char *name, int len) 1003 { 1004 struct eth_dev *dev = netdev_priv(net); 1005 int ret; 1006 1007 rtnl_lock(); 1008 ret = scnprintf(name, len, "%s\n", 1009 dev->ifname_set ? net->name : netdev_name(net)); 1010 rtnl_unlock(); 1011 return ret; 1012 } 1013 EXPORT_SYMBOL_GPL(gether_get_ifname); 1014 1015 int gether_set_ifname(struct net_device *net, const char *name, int len) 1016 { 1017 struct eth_dev *dev = netdev_priv(net); 1018 char tmp[IFNAMSIZ]; 1019 const char *p; 1020 1021 if (name[len - 1] == '\n') 1022 len--; 1023 1024 if (len >= sizeof(tmp)) 1025 return -E2BIG; 1026 1027 strscpy(tmp, name, len + 1); 1028 if (!dev_valid_name(tmp)) 1029 return -EINVAL; 1030 1031 /* Require exactly one %d, so binding will not fail with EEXIST. */ 1032 p = strchr(name, '%'); 1033 if (!p || p[1] != 'd' || strchr(p + 2, '%')) 1034 return -EINVAL; 1035 1036 strncpy(net->name, tmp, sizeof(net->name)); 1037 dev->ifname_set = true; 1038 1039 return 0; 1040 } 1041 EXPORT_SYMBOL_GPL(gether_set_ifname); 1042 1043 void gether_suspend(struct gether *link) 1044 { 1045 struct eth_dev *dev = link->ioport; 1046 unsigned long flags; 1047 1048 if (!dev) 1049 return; 1050 1051 if (atomic_read(&dev->tx_qlen)) { 1052 /* 1053 * There is a transfer in progress. So we trigger a remote 1054 * wakeup to inform the host. 1055 */ 1056 ether_wakeup_host(dev->port_usb); 1057 return; 1058 } 1059 spin_lock_irqsave(&dev->lock, flags); 1060 link->is_suspend = true; 1061 spin_unlock_irqrestore(&dev->lock, flags); 1062 } 1063 EXPORT_SYMBOL_GPL(gether_suspend); 1064 1065 void gether_resume(struct gether *link) 1066 { 1067 struct eth_dev *dev = link->ioport; 1068 unsigned long flags; 1069 1070 if (!dev) 1071 return; 1072 1073 if (netif_queue_stopped(dev->net)) 1074 netif_start_queue(dev->net); 1075 1076 spin_lock_irqsave(&dev->lock, flags); 1077 link->is_suspend = false; 1078 spin_unlock_irqrestore(&dev->lock, flags); 1079 } 1080 EXPORT_SYMBOL_GPL(gether_resume); 1081 1082 /* 1083 * gether_cleanup - remove Ethernet-over-USB device 1084 * Context: may sleep 1085 * 1086 * This is called to free all resources allocated by @gether_setup(). 1087 */ 1088 void gether_cleanup(struct eth_dev *dev) 1089 { 1090 if (!dev) 1091 return; 1092 1093 unregister_netdev(dev->net); 1094 flush_work(&dev->work); 1095 free_netdev(dev->net); 1096 } 1097 EXPORT_SYMBOL_GPL(gether_cleanup); 1098 1099 /** 1100 * gether_connect - notify network layer that USB link is active 1101 * @link: the USB link, set up with endpoints, descriptors matching 1102 * current device speed, and any framing wrapper(s) set up. 1103 * Context: irqs blocked 1104 * 1105 * This is called to activate endpoints and let the network layer know 1106 * the connection is active ("carrier detect"). It may cause the I/O 1107 * queues to open and start letting network packets flow, but will in 1108 * any case activate the endpoints so that they respond properly to the 1109 * USB host. 1110 * 1111 * Verify net_device pointer returned using IS_ERR(). If it doesn't 1112 * indicate some error code (negative errno), ep->driver_data values 1113 * have been overwritten. 1114 */ 1115 struct net_device *gether_connect(struct gether *link) 1116 { 1117 struct eth_dev *dev = link->ioport; 1118 int result = 0; 1119 1120 if (!dev) 1121 return ERR_PTR(-EINVAL); 1122 1123 link->in_ep->driver_data = dev; 1124 result = usb_ep_enable(link->in_ep); 1125 if (result != 0) { 1126 DBG(dev, "enable %s --> %d\n", 1127 link->in_ep->name, result); 1128 goto fail0; 1129 } 1130 1131 link->out_ep->driver_data = dev; 1132 result = usb_ep_enable(link->out_ep); 1133 if (result != 0) { 1134 DBG(dev, "enable %s --> %d\n", 1135 link->out_ep->name, result); 1136 goto fail1; 1137 } 1138 1139 if (result == 0) 1140 result = alloc_requests(dev, link, qlen(dev->gadget, 1141 dev->qmult)); 1142 1143 if (result == 0) { 1144 dev->zlp = link->is_zlp_ok; 1145 dev->no_skb_reserve = gadget_avoids_skb_reserve(dev->gadget); 1146 DBG(dev, "qlen %d\n", qlen(dev->gadget, dev->qmult)); 1147 1148 dev->header_len = link->header_len; 1149 dev->unwrap = link->unwrap; 1150 dev->wrap = link->wrap; 1151 1152 spin_lock(&dev->lock); 1153 dev->port_usb = link; 1154 if (netif_running(dev->net)) { 1155 if (link->open) 1156 link->open(link); 1157 } else { 1158 if (link->close) 1159 link->close(link); 1160 } 1161 spin_unlock(&dev->lock); 1162 1163 netif_carrier_on(dev->net); 1164 if (netif_running(dev->net)) 1165 eth_start(dev, GFP_ATOMIC); 1166 1167 /* on error, disable any endpoints */ 1168 } else { 1169 (void) usb_ep_disable(link->out_ep); 1170 fail1: 1171 (void) usb_ep_disable(link->in_ep); 1172 } 1173 fail0: 1174 /* caller is responsible for cleanup on error */ 1175 if (result < 0) 1176 return ERR_PTR(result); 1177 return dev->net; 1178 } 1179 EXPORT_SYMBOL_GPL(gether_connect); 1180 1181 /** 1182 * gether_disconnect - notify network layer that USB link is inactive 1183 * @link: the USB link, on which gether_connect() was called 1184 * Context: irqs blocked 1185 * 1186 * This is called to deactivate endpoints and let the network layer know 1187 * the connection went inactive ("no carrier"). 1188 * 1189 * On return, the state is as if gether_connect() had never been called. 1190 * The endpoints are inactive, and accordingly without active USB I/O. 1191 * Pointers to endpoint descriptors and endpoint private data are nulled. 1192 */ 1193 void gether_disconnect(struct gether *link) 1194 { 1195 struct eth_dev *dev = link->ioport; 1196 struct usb_request *req; 1197 1198 WARN_ON(!dev); 1199 if (!dev) 1200 return; 1201 1202 DBG(dev, "%s\n", __func__); 1203 1204 netif_stop_queue(dev->net); 1205 netif_carrier_off(dev->net); 1206 1207 /* disable endpoints, forcing (synchronous) completion 1208 * of all pending i/o. then free the request objects 1209 * and forget about the endpoints. 1210 */ 1211 usb_ep_disable(link->in_ep); 1212 spin_lock(&dev->req_lock); 1213 while (!list_empty(&dev->tx_reqs)) { 1214 req = list_first_entry(&dev->tx_reqs, struct usb_request, list); 1215 list_del(&req->list); 1216 1217 spin_unlock(&dev->req_lock); 1218 usb_ep_free_request(link->in_ep, req); 1219 spin_lock(&dev->req_lock); 1220 } 1221 spin_unlock(&dev->req_lock); 1222 link->in_ep->desc = NULL; 1223 1224 usb_ep_disable(link->out_ep); 1225 spin_lock(&dev->req_lock); 1226 while (!list_empty(&dev->rx_reqs)) { 1227 req = list_first_entry(&dev->rx_reqs, struct usb_request, list); 1228 list_del(&req->list); 1229 1230 spin_unlock(&dev->req_lock); 1231 usb_ep_free_request(link->out_ep, req); 1232 spin_lock(&dev->req_lock); 1233 } 1234 spin_unlock(&dev->req_lock); 1235 link->out_ep->desc = NULL; 1236 1237 /* finish forgetting about this USB link episode */ 1238 dev->header_len = 0; 1239 dev->unwrap = NULL; 1240 dev->wrap = NULL; 1241 1242 spin_lock(&dev->lock); 1243 dev->port_usb = NULL; 1244 link->is_suspend = false; 1245 spin_unlock(&dev->lock); 1246 } 1247 EXPORT_SYMBOL_GPL(gether_disconnect); 1248 1249 MODULE_LICENSE("GPL"); 1250 MODULE_AUTHOR("David Brownell"); 1251