1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack 4 * 5 * Copyright (C) 2003-2005,2008 David Brownell 6 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger 7 * Copyright (C) 2008 Nokia Corporation 8 */ 9 10 /* #define VERBOSE_DEBUG */ 11 12 #include <linux/kernel.h> 13 #include <linux/module.h> 14 #include <linux/gfp.h> 15 #include <linux/device.h> 16 #include <linux/ctype.h> 17 #include <linux/etherdevice.h> 18 #include <linux/ethtool.h> 19 #include <linux/if_vlan.h> 20 #include <linux/usb/composite.h> 21 22 #include "u_ether.h" 23 24 25 /* 26 * This component encapsulates the Ethernet link glue needed to provide 27 * one (!) network link through the USB gadget stack, normally "usb0". 28 * 29 * The control and data models are handled by the function driver which 30 * connects to this code; such as CDC Ethernet (ECM or EEM), 31 * "CDC Subset", or RNDIS. That includes all descriptor and endpoint 32 * management. 33 * 34 * Link level addressing is handled by this component using module 35 * parameters; if no such parameters are provided, random link level 36 * addresses are used. Each end of the link uses one address. The 37 * host end address is exported in various ways, and is often recorded 38 * in configuration databases. 39 * 40 * The driver which assembles each configuration using such a link is 41 * responsible for ensuring that each configuration includes at most one 42 * instance of is network link. (The network layer provides ways for 43 * this single "physical" link to be used by multiple virtual links.) 44 */ 45 46 #define UETH__VERSION "29-May-2008" 47 48 /* Experiments show that both Linux and Windows hosts allow up to 16k 49 * frame sizes. Set the max MTU size to 15k+52 to prevent allocating 32k 50 * blocks and still have efficient handling. */ 51 #define GETHER_MAX_MTU_SIZE 15412 52 #define GETHER_MAX_ETH_FRAME_LEN (GETHER_MAX_MTU_SIZE + ETH_HLEN) 53 54 struct eth_dev { 55 /* lock is held while accessing port_usb 56 */ 57 spinlock_t lock; 58 struct gether *port_usb; 59 60 struct net_device *net; 61 struct usb_gadget *gadget; 62 63 spinlock_t req_lock; /* guard {rx,tx}_reqs */ 64 struct list_head tx_reqs, rx_reqs; 65 atomic_t tx_qlen; 66 67 struct sk_buff_head rx_frames; 68 69 unsigned qmult; 70 71 unsigned header_len; 72 struct sk_buff *(*wrap)(struct gether *, struct sk_buff *skb); 73 int (*unwrap)(struct gether *, 74 struct sk_buff *skb, 75 struct sk_buff_head *list); 76 77 struct work_struct work; 78 79 unsigned long todo; 80 #define WORK_RX_MEMORY 0 81 82 bool zlp; 83 bool no_skb_reserve; 84 bool ifname_set; 85 u8 host_mac[ETH_ALEN]; 86 u8 dev_mac[ETH_ALEN]; 87 }; 88 89 /*-------------------------------------------------------------------------*/ 90 91 #define RX_EXTRA 20 /* bytes guarding against rx overflows */ 92 93 #define DEFAULT_QLEN 2 /* double buffering by default */ 94 95 /* for dual-speed hardware, use deeper queues at high/super speed */ 96 static inline int qlen(struct usb_gadget *gadget, unsigned qmult) 97 { 98 if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH || 99 gadget->speed >= USB_SPEED_SUPER)) 100 return qmult * DEFAULT_QLEN; 101 else 102 return DEFAULT_QLEN; 103 } 104 105 /*-------------------------------------------------------------------------*/ 106 107 /* NETWORK DRIVER HOOKUP (to the layer above this driver) */ 108 109 static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p) 110 { 111 struct eth_dev *dev = netdev_priv(net); 112 113 strscpy(p->driver, "g_ether", sizeof(p->driver)); 114 strscpy(p->version, UETH__VERSION, sizeof(p->version)); 115 strscpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version)); 116 strscpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info)); 117 } 118 119 /* REVISIT can also support: 120 * - WOL (by tracking suspends and issuing remote wakeup) 121 * - msglevel (implies updated messaging) 122 * - ... probably more ethtool ops 123 */ 124 125 static const struct ethtool_ops ops = { 126 .get_drvinfo = eth_get_drvinfo, 127 .get_link = ethtool_op_get_link, 128 }; 129 130 static void defer_kevent(struct eth_dev *dev, int flag) 131 { 132 if (test_and_set_bit(flag, &dev->todo)) 133 return; 134 if (!schedule_work(&dev->work)) 135 ERROR(dev, "kevent %d may have been dropped\n", flag); 136 else 137 DBG(dev, "kevent %d scheduled\n", flag); 138 } 139 140 static void rx_complete(struct usb_ep *ep, struct usb_request *req); 141 142 static int 143 rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags) 144 { 145 struct usb_gadget *g = dev->gadget; 146 struct sk_buff *skb; 147 int retval = -ENOMEM; 148 size_t size = 0; 149 struct usb_ep *out; 150 unsigned long flags; 151 152 spin_lock_irqsave(&dev->lock, flags); 153 if (dev->port_usb) 154 out = dev->port_usb->out_ep; 155 else 156 out = NULL; 157 158 if (!out) 159 { 160 spin_unlock_irqrestore(&dev->lock, flags); 161 return -ENOTCONN; 162 } 163 164 /* Padding up to RX_EXTRA handles minor disagreements with host. 165 * Normally we use the USB "terminate on short read" convention; 166 * so allow up to (N*maxpacket), since that memory is normally 167 * already allocated. Some hardware doesn't deal well with short 168 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a 169 * byte off the end (to force hardware errors on overflow). 170 * 171 * RNDIS uses internal framing, and explicitly allows senders to 172 * pad to end-of-packet. That's potentially nice for speed, but 173 * means receivers can't recover lost synch on their own (because 174 * new packets don't only start after a short RX). 175 */ 176 size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA; 177 size += dev->port_usb->header_len; 178 179 if (g->quirk_ep_out_aligned_size) { 180 size += out->maxpacket - 1; 181 size -= size % out->maxpacket; 182 } 183 184 if (dev->port_usb->is_fixed) 185 size = max_t(size_t, size, dev->port_usb->fixed_out_len); 186 spin_unlock_irqrestore(&dev->lock, flags); 187 188 skb = __netdev_alloc_skb(dev->net, size + NET_IP_ALIGN, gfp_flags); 189 if (skb == NULL) { 190 DBG(dev, "no rx skb\n"); 191 goto enomem; 192 } 193 194 /* Some platforms perform better when IP packets are aligned, 195 * but on at least one, checksumming fails otherwise. Note: 196 * RNDIS headers involve variable numbers of LE32 values. 197 */ 198 if (likely(!dev->no_skb_reserve)) 199 skb_reserve(skb, NET_IP_ALIGN); 200 201 req->buf = skb->data; 202 req->length = size; 203 req->complete = rx_complete; 204 req->context = skb; 205 206 retval = usb_ep_queue(out, req, gfp_flags); 207 if (retval == -ENOMEM) 208 enomem: 209 defer_kevent(dev, WORK_RX_MEMORY); 210 if (retval) { 211 DBG(dev, "rx submit --> %d\n", retval); 212 if (skb) 213 dev_kfree_skb_any(skb); 214 spin_lock_irqsave(&dev->req_lock, flags); 215 list_add(&req->list, &dev->rx_reqs); 216 spin_unlock_irqrestore(&dev->req_lock, flags); 217 } 218 return retval; 219 } 220 221 static void rx_complete(struct usb_ep *ep, struct usb_request *req) 222 { 223 struct sk_buff *skb = req->context, *skb2; 224 struct eth_dev *dev = ep->driver_data; 225 int status = req->status; 226 227 switch (status) { 228 229 /* normal completion */ 230 case 0: 231 skb_put(skb, req->actual); 232 233 if (dev->unwrap) { 234 unsigned long flags; 235 236 spin_lock_irqsave(&dev->lock, flags); 237 if (dev->port_usb) { 238 status = dev->unwrap(dev->port_usb, 239 skb, 240 &dev->rx_frames); 241 } else { 242 dev_kfree_skb_any(skb); 243 status = -ENOTCONN; 244 } 245 spin_unlock_irqrestore(&dev->lock, flags); 246 } else { 247 skb_queue_tail(&dev->rx_frames, skb); 248 } 249 skb = NULL; 250 251 skb2 = skb_dequeue(&dev->rx_frames); 252 while (skb2) { 253 if (status < 0 254 || ETH_HLEN > skb2->len 255 || skb2->len > GETHER_MAX_ETH_FRAME_LEN) { 256 dev->net->stats.rx_errors++; 257 dev->net->stats.rx_length_errors++; 258 DBG(dev, "rx length %d\n", skb2->len); 259 dev_kfree_skb_any(skb2); 260 goto next_frame; 261 } 262 skb2->protocol = eth_type_trans(skb2, dev->net); 263 dev->net->stats.rx_packets++; 264 dev->net->stats.rx_bytes += skb2->len; 265 266 /* no buffer copies needed, unless hardware can't 267 * use skb buffers. 268 */ 269 status = netif_rx(skb2); 270 next_frame: 271 skb2 = skb_dequeue(&dev->rx_frames); 272 } 273 break; 274 275 /* software-driven interface shutdown */ 276 case -ECONNRESET: /* unlink */ 277 case -ESHUTDOWN: /* disconnect etc */ 278 VDBG(dev, "rx shutdown, code %d\n", status); 279 goto quiesce; 280 281 /* for hardware automagic (such as pxa) */ 282 case -ECONNABORTED: /* endpoint reset */ 283 DBG(dev, "rx %s reset\n", ep->name); 284 defer_kevent(dev, WORK_RX_MEMORY); 285 quiesce: 286 dev_kfree_skb_any(skb); 287 goto clean; 288 289 /* data overrun */ 290 case -EOVERFLOW: 291 dev->net->stats.rx_over_errors++; 292 fallthrough; 293 294 default: 295 dev->net->stats.rx_errors++; 296 DBG(dev, "rx status %d\n", status); 297 break; 298 } 299 300 if (skb) 301 dev_kfree_skb_any(skb); 302 if (!netif_running(dev->net)) { 303 clean: 304 spin_lock(&dev->req_lock); 305 list_add(&req->list, &dev->rx_reqs); 306 spin_unlock(&dev->req_lock); 307 req = NULL; 308 } 309 if (req) 310 rx_submit(dev, req, GFP_ATOMIC); 311 } 312 313 static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n) 314 { 315 unsigned i; 316 struct usb_request *req; 317 318 if (!n) 319 return -ENOMEM; 320 321 /* queue/recycle up to N requests */ 322 i = n; 323 list_for_each_entry(req, list, list) { 324 if (i-- == 0) 325 goto extra; 326 } 327 while (i--) { 328 req = usb_ep_alloc_request(ep, GFP_ATOMIC); 329 if (!req) 330 return list_empty(list) ? -ENOMEM : 0; 331 list_add(&req->list, list); 332 } 333 return 0; 334 335 extra: 336 /* free extras */ 337 for (;;) { 338 struct list_head *next; 339 340 next = req->list.next; 341 list_del(&req->list); 342 usb_ep_free_request(ep, req); 343 344 if (next == list) 345 break; 346 347 req = container_of(next, struct usb_request, list); 348 } 349 return 0; 350 } 351 352 static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n) 353 { 354 int status; 355 356 spin_lock(&dev->req_lock); 357 status = prealloc(&dev->tx_reqs, link->in_ep, n); 358 if (status < 0) 359 goto fail; 360 status = prealloc(&dev->rx_reqs, link->out_ep, n); 361 if (status < 0) 362 goto fail; 363 goto done; 364 fail: 365 DBG(dev, "can't alloc requests\n"); 366 done: 367 spin_unlock(&dev->req_lock); 368 return status; 369 } 370 371 static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags) 372 { 373 struct usb_request *req; 374 unsigned long flags; 375 376 /* fill unused rxq slots with some skb */ 377 spin_lock_irqsave(&dev->req_lock, flags); 378 while (!list_empty(&dev->rx_reqs)) { 379 req = list_first_entry(&dev->rx_reqs, struct usb_request, list); 380 list_del_init(&req->list); 381 spin_unlock_irqrestore(&dev->req_lock, flags); 382 383 if (rx_submit(dev, req, gfp_flags) < 0) { 384 defer_kevent(dev, WORK_RX_MEMORY); 385 return; 386 } 387 388 spin_lock_irqsave(&dev->req_lock, flags); 389 } 390 spin_unlock_irqrestore(&dev->req_lock, flags); 391 } 392 393 static void eth_work(struct work_struct *work) 394 { 395 struct eth_dev *dev = container_of(work, struct eth_dev, work); 396 397 if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) { 398 if (netif_running(dev->net)) 399 rx_fill(dev, GFP_KERNEL); 400 } 401 402 if (dev->todo) 403 DBG(dev, "work done, flags = 0x%lx\n", dev->todo); 404 } 405 406 static void tx_complete(struct usb_ep *ep, struct usb_request *req) 407 { 408 struct sk_buff *skb = req->context; 409 struct eth_dev *dev = ep->driver_data; 410 411 switch (req->status) { 412 default: 413 dev->net->stats.tx_errors++; 414 VDBG(dev, "tx err %d\n", req->status); 415 fallthrough; 416 case -ECONNRESET: /* unlink */ 417 case -ESHUTDOWN: /* disconnect etc */ 418 dev_kfree_skb_any(skb); 419 break; 420 case 0: 421 dev->net->stats.tx_bytes += skb->len; 422 dev_consume_skb_any(skb); 423 } 424 dev->net->stats.tx_packets++; 425 426 spin_lock(&dev->req_lock); 427 list_add(&req->list, &dev->tx_reqs); 428 spin_unlock(&dev->req_lock); 429 430 atomic_dec(&dev->tx_qlen); 431 if (netif_carrier_ok(dev->net)) 432 netif_wake_queue(dev->net); 433 } 434 435 static inline int is_promisc(u16 cdc_filter) 436 { 437 return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS; 438 } 439 440 static int ether_wakeup_host(struct gether *port) 441 { 442 int ret; 443 struct usb_function *func = &port->func; 444 struct usb_gadget *gadget = func->config->cdev->gadget; 445 446 if (func->func_suspended) 447 ret = usb_func_wakeup(func); 448 else 449 ret = usb_gadget_wakeup(gadget); 450 451 return ret; 452 } 453 454 static netdev_tx_t eth_start_xmit(struct sk_buff *skb, 455 struct net_device *net) 456 { 457 struct eth_dev *dev = netdev_priv(net); 458 int length = 0; 459 int retval; 460 struct usb_request *req = NULL; 461 unsigned long flags; 462 struct usb_ep *in; 463 u16 cdc_filter; 464 465 spin_lock_irqsave(&dev->lock, flags); 466 if (dev->port_usb) { 467 in = dev->port_usb->in_ep; 468 cdc_filter = dev->port_usb->cdc_filter; 469 } else { 470 in = NULL; 471 cdc_filter = 0; 472 } 473 474 if (dev->port_usb && dev->port_usb->is_suspend) { 475 DBG(dev, "Port suspended. Triggering wakeup\n"); 476 netif_stop_queue(net); 477 spin_unlock_irqrestore(&dev->lock, flags); 478 ether_wakeup_host(dev->port_usb); 479 return NETDEV_TX_BUSY; 480 } 481 482 spin_unlock_irqrestore(&dev->lock, flags); 483 484 if (!in) { 485 if (skb) 486 dev_kfree_skb_any(skb); 487 return NETDEV_TX_OK; 488 } 489 490 /* apply outgoing CDC or RNDIS filters */ 491 if (skb && !is_promisc(cdc_filter)) { 492 u8 *dest = skb->data; 493 494 if (is_multicast_ether_addr(dest)) { 495 u16 type; 496 497 /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host 498 * SET_ETHERNET_MULTICAST_FILTERS requests 499 */ 500 if (is_broadcast_ether_addr(dest)) 501 type = USB_CDC_PACKET_TYPE_BROADCAST; 502 else 503 type = USB_CDC_PACKET_TYPE_ALL_MULTICAST; 504 if (!(cdc_filter & type)) { 505 dev_kfree_skb_any(skb); 506 return NETDEV_TX_OK; 507 } 508 } 509 /* ignores USB_CDC_PACKET_TYPE_DIRECTED */ 510 } 511 512 spin_lock_irqsave(&dev->req_lock, flags); 513 /* 514 * this freelist can be empty if an interrupt triggered disconnect() 515 * and reconfigured the gadget (shutting down this queue) after the 516 * network stack decided to xmit but before we got the spinlock. 517 */ 518 if (list_empty(&dev->tx_reqs)) { 519 spin_unlock_irqrestore(&dev->req_lock, flags); 520 return NETDEV_TX_BUSY; 521 } 522 523 req = list_first_entry(&dev->tx_reqs, struct usb_request, list); 524 list_del(&req->list); 525 526 /* temporarily stop TX queue when the freelist empties */ 527 if (list_empty(&dev->tx_reqs)) 528 netif_stop_queue(net); 529 spin_unlock_irqrestore(&dev->req_lock, flags); 530 531 /* no buffer copies needed, unless the network stack did it 532 * or the hardware can't use skb buffers. 533 * or there's not enough space for extra headers we need 534 */ 535 if (dev->wrap) { 536 unsigned long flags; 537 538 spin_lock_irqsave(&dev->lock, flags); 539 if (dev->port_usb) 540 skb = dev->wrap(dev->port_usb, skb); 541 spin_unlock_irqrestore(&dev->lock, flags); 542 if (!skb) { 543 /* Multi frame CDC protocols may store the frame for 544 * later which is not a dropped frame. 545 */ 546 if (dev->port_usb && 547 dev->port_usb->supports_multi_frame) 548 goto multiframe; 549 goto drop; 550 } 551 } 552 553 length = skb->len; 554 req->buf = skb->data; 555 req->context = skb; 556 req->complete = tx_complete; 557 558 /* NCM requires no zlp if transfer is dwNtbInMaxSize */ 559 if (dev->port_usb && 560 dev->port_usb->is_fixed && 561 length == dev->port_usb->fixed_in_len && 562 (length % in->maxpacket) == 0) 563 req->zero = 0; 564 else 565 req->zero = 1; 566 567 /* use zlp framing on tx for strict CDC-Ether conformance, 568 * though any robust network rx path ignores extra padding. 569 * and some hardware doesn't like to write zlps. 570 */ 571 if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) 572 length++; 573 574 req->length = length; 575 576 retval = usb_ep_queue(in, req, GFP_ATOMIC); 577 switch (retval) { 578 default: 579 DBG(dev, "tx queue err %d\n", retval); 580 break; 581 case 0: 582 netif_trans_update(net); 583 atomic_inc(&dev->tx_qlen); 584 } 585 586 if (retval) { 587 dev_kfree_skb_any(skb); 588 drop: 589 dev->net->stats.tx_dropped++; 590 multiframe: 591 spin_lock_irqsave(&dev->req_lock, flags); 592 if (list_empty(&dev->tx_reqs)) 593 netif_start_queue(net); 594 list_add(&req->list, &dev->tx_reqs); 595 spin_unlock_irqrestore(&dev->req_lock, flags); 596 } 597 return NETDEV_TX_OK; 598 } 599 600 /*-------------------------------------------------------------------------*/ 601 602 static void eth_start(struct eth_dev *dev, gfp_t gfp_flags) 603 { 604 DBG(dev, "%s\n", __func__); 605 606 /* fill the rx queue */ 607 rx_fill(dev, gfp_flags); 608 609 /* and open the tx floodgates */ 610 atomic_set(&dev->tx_qlen, 0); 611 netif_wake_queue(dev->net); 612 } 613 614 static int eth_open(struct net_device *net) 615 { 616 struct eth_dev *dev = netdev_priv(net); 617 struct gether *link; 618 619 DBG(dev, "%s\n", __func__); 620 if (netif_carrier_ok(dev->net)) 621 eth_start(dev, GFP_KERNEL); 622 623 spin_lock_irq(&dev->lock); 624 link = dev->port_usb; 625 if (link && link->open) 626 link->open(link); 627 spin_unlock_irq(&dev->lock); 628 629 return 0; 630 } 631 632 static int eth_stop(struct net_device *net) 633 { 634 struct eth_dev *dev = netdev_priv(net); 635 unsigned long flags; 636 637 VDBG(dev, "%s\n", __func__); 638 netif_stop_queue(net); 639 640 DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n", 641 dev->net->stats.rx_packets, dev->net->stats.tx_packets, 642 dev->net->stats.rx_errors, dev->net->stats.tx_errors 643 ); 644 645 /* ensure there are no more active requests */ 646 spin_lock_irqsave(&dev->lock, flags); 647 if (dev->port_usb) { 648 struct gether *link = dev->port_usb; 649 const struct usb_endpoint_descriptor *in; 650 const struct usb_endpoint_descriptor *out; 651 652 if (link->close) 653 link->close(link); 654 655 /* NOTE: we have no abort-queue primitive we could use 656 * to cancel all pending I/O. Instead, we disable then 657 * reenable the endpoints ... this idiom may leave toggle 658 * wrong, but that's a self-correcting error. 659 * 660 * REVISIT: we *COULD* just let the transfers complete at 661 * their own pace; the network stack can handle old packets. 662 * For the moment we leave this here, since it works. 663 */ 664 in = link->in_ep->desc; 665 out = link->out_ep->desc; 666 usb_ep_disable(link->in_ep); 667 usb_ep_disable(link->out_ep); 668 if (netif_carrier_ok(net)) { 669 DBG(dev, "host still using in/out endpoints\n"); 670 link->in_ep->desc = in; 671 link->out_ep->desc = out; 672 usb_ep_enable(link->in_ep); 673 usb_ep_enable(link->out_ep); 674 } 675 } 676 spin_unlock_irqrestore(&dev->lock, flags); 677 678 return 0; 679 } 680 681 /*-------------------------------------------------------------------------*/ 682 683 static int get_ether_addr(const char *str, u8 *dev_addr) 684 { 685 if (str) { 686 unsigned i; 687 688 for (i = 0; i < 6; i++) { 689 unsigned char num; 690 691 if ((*str == '.') || (*str == ':')) 692 str++; 693 num = hex_to_bin(*str++) << 4; 694 num |= hex_to_bin(*str++); 695 dev_addr [i] = num; 696 } 697 if (is_valid_ether_addr(dev_addr)) 698 return 0; 699 } 700 eth_random_addr(dev_addr); 701 return 1; 702 } 703 704 static int get_ether_addr_str(u8 dev_addr[ETH_ALEN], char *str, int len) 705 { 706 if (len < 18) 707 return -EINVAL; 708 709 snprintf(str, len, "%pM", dev_addr); 710 return 18; 711 } 712 713 static const struct net_device_ops eth_netdev_ops = { 714 .ndo_open = eth_open, 715 .ndo_stop = eth_stop, 716 .ndo_start_xmit = eth_start_xmit, 717 .ndo_set_mac_address = eth_mac_addr, 718 .ndo_validate_addr = eth_validate_addr, 719 }; 720 721 static struct device_type gadget_type = { 722 .name = "gadget", 723 }; 724 725 /* 726 * gether_setup_name - initialize one ethernet-over-usb link 727 * @g: gadget to associated with these links 728 * @ethaddr: NULL, or a buffer in which the ethernet address of the 729 * host side of the link is recorded 730 * @netname: name for network device (for example, "usb") 731 * Context: may sleep 732 * 733 * This sets up the single network link that may be exported by a 734 * gadget driver using this framework. The link layer addresses are 735 * set up using module parameters. 736 * 737 * Returns an eth_dev pointer on success, or an ERR_PTR on failure. 738 */ 739 struct eth_dev *gether_setup_name(struct usb_gadget *g, 740 const char *dev_addr, const char *host_addr, 741 u8 ethaddr[ETH_ALEN], unsigned qmult, const char *netname) 742 { 743 struct eth_dev *dev; 744 struct net_device *net; 745 int status; 746 u8 addr[ETH_ALEN]; 747 748 net = alloc_etherdev(sizeof *dev); 749 if (!net) 750 return ERR_PTR(-ENOMEM); 751 752 dev = netdev_priv(net); 753 spin_lock_init(&dev->lock); 754 spin_lock_init(&dev->req_lock); 755 INIT_WORK(&dev->work, eth_work); 756 INIT_LIST_HEAD(&dev->tx_reqs); 757 INIT_LIST_HEAD(&dev->rx_reqs); 758 759 skb_queue_head_init(&dev->rx_frames); 760 761 /* network device setup */ 762 dev->net = net; 763 dev->qmult = qmult; 764 snprintf(net->name, sizeof(net->name), "%s%%d", netname); 765 766 if (get_ether_addr(dev_addr, addr)) { 767 net->addr_assign_type = NET_ADDR_RANDOM; 768 dev_warn(&g->dev, 769 "using random %s ethernet address\n", "self"); 770 } else { 771 net->addr_assign_type = NET_ADDR_SET; 772 } 773 eth_hw_addr_set(net, addr); 774 if (get_ether_addr(host_addr, dev->host_mac)) 775 dev_warn(&g->dev, 776 "using random %s ethernet address\n", "host"); 777 778 if (ethaddr) 779 memcpy(ethaddr, dev->host_mac, ETH_ALEN); 780 781 net->netdev_ops = ð_netdev_ops; 782 783 net->ethtool_ops = &ops; 784 785 /* MTU range: 14 - 15412 */ 786 net->min_mtu = ETH_HLEN; 787 net->max_mtu = GETHER_MAX_MTU_SIZE; 788 789 dev->gadget = g; 790 SET_NETDEV_DEV(net, &g->dev); 791 SET_NETDEV_DEVTYPE(net, &gadget_type); 792 793 status = register_netdev(net); 794 if (status < 0) { 795 dev_dbg(&g->dev, "register_netdev failed, %d\n", status); 796 free_netdev(net); 797 dev = ERR_PTR(status); 798 } else { 799 INFO(dev, "MAC %pM\n", net->dev_addr); 800 INFO(dev, "HOST MAC %pM\n", dev->host_mac); 801 802 /* 803 * two kinds of host-initiated state changes: 804 * - iff DATA transfer is active, carrier is "on" 805 * - tx queueing enabled if open *and* carrier is "on" 806 */ 807 netif_carrier_off(net); 808 } 809 810 return dev; 811 } 812 EXPORT_SYMBOL_GPL(gether_setup_name); 813 814 struct net_device *gether_setup_name_default(const char *netname) 815 { 816 struct net_device *net; 817 struct eth_dev *dev; 818 819 net = alloc_etherdev(sizeof(*dev)); 820 if (!net) 821 return ERR_PTR(-ENOMEM); 822 823 dev = netdev_priv(net); 824 spin_lock_init(&dev->lock); 825 spin_lock_init(&dev->req_lock); 826 INIT_WORK(&dev->work, eth_work); 827 INIT_LIST_HEAD(&dev->tx_reqs); 828 INIT_LIST_HEAD(&dev->rx_reqs); 829 830 skb_queue_head_init(&dev->rx_frames); 831 832 /* network device setup */ 833 dev->net = net; 834 dev->qmult = QMULT_DEFAULT; 835 snprintf(net->name, sizeof(net->name), "%s%%d", netname); 836 837 eth_random_addr(dev->dev_mac); 838 839 /* by default we always have a random MAC address */ 840 net->addr_assign_type = NET_ADDR_RANDOM; 841 842 eth_random_addr(dev->host_mac); 843 844 net->netdev_ops = ð_netdev_ops; 845 846 net->ethtool_ops = &ops; 847 SET_NETDEV_DEVTYPE(net, &gadget_type); 848 849 /* MTU range: 14 - 15412 */ 850 net->min_mtu = ETH_HLEN; 851 net->max_mtu = GETHER_MAX_MTU_SIZE; 852 853 return net; 854 } 855 EXPORT_SYMBOL_GPL(gether_setup_name_default); 856 857 int gether_register_netdev(struct net_device *net) 858 { 859 struct eth_dev *dev; 860 struct usb_gadget *g; 861 int status; 862 863 if (!net->dev.parent) 864 return -EINVAL; 865 dev = netdev_priv(net); 866 g = dev->gadget; 867 868 eth_hw_addr_set(net, dev->dev_mac); 869 870 status = register_netdev(net); 871 if (status < 0) { 872 dev_dbg(&g->dev, "register_netdev failed, %d\n", status); 873 return status; 874 } else { 875 INFO(dev, "HOST MAC %pM\n", dev->host_mac); 876 INFO(dev, "MAC %pM\n", dev->dev_mac); 877 878 /* two kinds of host-initiated state changes: 879 * - iff DATA transfer is active, carrier is "on" 880 * - tx queueing enabled if open *and* carrier is "on" 881 */ 882 netif_carrier_off(net); 883 } 884 885 return status; 886 } 887 EXPORT_SYMBOL_GPL(gether_register_netdev); 888 889 void gether_set_gadget(struct net_device *net, struct usb_gadget *g) 890 { 891 struct eth_dev *dev; 892 893 dev = netdev_priv(net); 894 dev->gadget = g; 895 SET_NETDEV_DEV(net, &g->dev); 896 } 897 EXPORT_SYMBOL_GPL(gether_set_gadget); 898 899 int gether_set_dev_addr(struct net_device *net, const char *dev_addr) 900 { 901 struct eth_dev *dev; 902 u8 new_addr[ETH_ALEN]; 903 904 dev = netdev_priv(net); 905 if (get_ether_addr(dev_addr, new_addr)) 906 return -EINVAL; 907 memcpy(dev->dev_mac, new_addr, ETH_ALEN); 908 net->addr_assign_type = NET_ADDR_SET; 909 return 0; 910 } 911 EXPORT_SYMBOL_GPL(gether_set_dev_addr); 912 913 int gether_get_dev_addr(struct net_device *net, char *dev_addr, int len) 914 { 915 struct eth_dev *dev; 916 int ret; 917 918 dev = netdev_priv(net); 919 ret = get_ether_addr_str(dev->dev_mac, dev_addr, len); 920 if (ret + 1 < len) { 921 dev_addr[ret++] = '\n'; 922 dev_addr[ret] = '\0'; 923 } 924 925 return ret; 926 } 927 EXPORT_SYMBOL_GPL(gether_get_dev_addr); 928 929 int gether_set_host_addr(struct net_device *net, const char *host_addr) 930 { 931 struct eth_dev *dev; 932 u8 new_addr[ETH_ALEN]; 933 934 dev = netdev_priv(net); 935 if (get_ether_addr(host_addr, new_addr)) 936 return -EINVAL; 937 memcpy(dev->host_mac, new_addr, ETH_ALEN); 938 return 0; 939 } 940 EXPORT_SYMBOL_GPL(gether_set_host_addr); 941 942 int gether_get_host_addr(struct net_device *net, char *host_addr, int len) 943 { 944 struct eth_dev *dev; 945 int ret; 946 947 dev = netdev_priv(net); 948 ret = get_ether_addr_str(dev->host_mac, host_addr, len); 949 if (ret + 1 < len) { 950 host_addr[ret++] = '\n'; 951 host_addr[ret] = '\0'; 952 } 953 954 return ret; 955 } 956 EXPORT_SYMBOL_GPL(gether_get_host_addr); 957 958 int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len) 959 { 960 struct eth_dev *dev; 961 962 if (len < 13) 963 return -EINVAL; 964 965 dev = netdev_priv(net); 966 snprintf(host_addr, len, "%pm", dev->host_mac); 967 968 return strlen(host_addr); 969 } 970 EXPORT_SYMBOL_GPL(gether_get_host_addr_cdc); 971 972 void gether_get_host_addr_u8(struct net_device *net, u8 host_mac[ETH_ALEN]) 973 { 974 struct eth_dev *dev; 975 976 dev = netdev_priv(net); 977 memcpy(host_mac, dev->host_mac, ETH_ALEN); 978 } 979 EXPORT_SYMBOL_GPL(gether_get_host_addr_u8); 980 981 void gether_set_qmult(struct net_device *net, unsigned qmult) 982 { 983 struct eth_dev *dev; 984 985 dev = netdev_priv(net); 986 dev->qmult = qmult; 987 } 988 EXPORT_SYMBOL_GPL(gether_set_qmult); 989 990 unsigned gether_get_qmult(struct net_device *net) 991 { 992 struct eth_dev *dev; 993 994 dev = netdev_priv(net); 995 return dev->qmult; 996 } 997 EXPORT_SYMBOL_GPL(gether_get_qmult); 998 999 int gether_get_ifname(struct net_device *net, char *name, int len) 1000 { 1001 struct eth_dev *dev = netdev_priv(net); 1002 int ret; 1003 1004 rtnl_lock(); 1005 ret = scnprintf(name, len, "%s\n", 1006 dev->ifname_set ? net->name : netdev_name(net)); 1007 rtnl_unlock(); 1008 return ret; 1009 } 1010 EXPORT_SYMBOL_GPL(gether_get_ifname); 1011 1012 int gether_set_ifname(struct net_device *net, const char *name, int len) 1013 { 1014 struct eth_dev *dev = netdev_priv(net); 1015 char tmp[IFNAMSIZ]; 1016 const char *p; 1017 1018 if (name[len - 1] == '\n') 1019 len--; 1020 1021 if (len >= sizeof(tmp)) 1022 return -E2BIG; 1023 1024 strscpy(tmp, name, len + 1); 1025 if (!dev_valid_name(tmp)) 1026 return -EINVAL; 1027 1028 /* Require exactly one %d, so binding will not fail with EEXIST. */ 1029 p = strchr(name, '%'); 1030 if (!p || p[1] != 'd' || strchr(p + 2, '%')) 1031 return -EINVAL; 1032 1033 strncpy(net->name, tmp, sizeof(net->name)); 1034 dev->ifname_set = true; 1035 1036 return 0; 1037 } 1038 EXPORT_SYMBOL_GPL(gether_set_ifname); 1039 1040 void gether_suspend(struct gether *link) 1041 { 1042 struct eth_dev *dev = link->ioport; 1043 unsigned long flags; 1044 1045 if (!dev) 1046 return; 1047 1048 if (atomic_read(&dev->tx_qlen)) { 1049 /* 1050 * There is a transfer in progress. So we trigger a remote 1051 * wakeup to inform the host. 1052 */ 1053 ether_wakeup_host(dev->port_usb); 1054 return; 1055 } 1056 spin_lock_irqsave(&dev->lock, flags); 1057 link->is_suspend = true; 1058 spin_unlock_irqrestore(&dev->lock, flags); 1059 } 1060 EXPORT_SYMBOL_GPL(gether_suspend); 1061 1062 void gether_resume(struct gether *link) 1063 { 1064 struct eth_dev *dev = link->ioport; 1065 unsigned long flags; 1066 1067 if (!dev) 1068 return; 1069 1070 if (netif_queue_stopped(dev->net)) 1071 netif_start_queue(dev->net); 1072 1073 spin_lock_irqsave(&dev->lock, flags); 1074 link->is_suspend = false; 1075 spin_unlock_irqrestore(&dev->lock, flags); 1076 } 1077 EXPORT_SYMBOL_GPL(gether_resume); 1078 1079 /* 1080 * gether_cleanup - remove Ethernet-over-USB device 1081 * Context: may sleep 1082 * 1083 * This is called to free all resources allocated by @gether_setup(). 1084 */ 1085 void gether_cleanup(struct eth_dev *dev) 1086 { 1087 if (!dev) 1088 return; 1089 1090 unregister_netdev(dev->net); 1091 flush_work(&dev->work); 1092 free_netdev(dev->net); 1093 } 1094 EXPORT_SYMBOL_GPL(gether_cleanup); 1095 1096 /** 1097 * gether_connect - notify network layer that USB link is active 1098 * @link: the USB link, set up with endpoints, descriptors matching 1099 * current device speed, and any framing wrapper(s) set up. 1100 * Context: irqs blocked 1101 * 1102 * This is called to activate endpoints and let the network layer know 1103 * the connection is active ("carrier detect"). It may cause the I/O 1104 * queues to open and start letting network packets flow, but will in 1105 * any case activate the endpoints so that they respond properly to the 1106 * USB host. 1107 * 1108 * Verify net_device pointer returned using IS_ERR(). If it doesn't 1109 * indicate some error code (negative errno), ep->driver_data values 1110 * have been overwritten. 1111 */ 1112 struct net_device *gether_connect(struct gether *link) 1113 { 1114 struct eth_dev *dev = link->ioport; 1115 int result = 0; 1116 1117 if (!dev) 1118 return ERR_PTR(-EINVAL); 1119 1120 link->in_ep->driver_data = dev; 1121 result = usb_ep_enable(link->in_ep); 1122 if (result != 0) { 1123 DBG(dev, "enable %s --> %d\n", 1124 link->in_ep->name, result); 1125 goto fail0; 1126 } 1127 1128 link->out_ep->driver_data = dev; 1129 result = usb_ep_enable(link->out_ep); 1130 if (result != 0) { 1131 DBG(dev, "enable %s --> %d\n", 1132 link->out_ep->name, result); 1133 goto fail1; 1134 } 1135 1136 if (result == 0) 1137 result = alloc_requests(dev, link, qlen(dev->gadget, 1138 dev->qmult)); 1139 1140 if (result == 0) { 1141 dev->zlp = link->is_zlp_ok; 1142 dev->no_skb_reserve = gadget_avoids_skb_reserve(dev->gadget); 1143 DBG(dev, "qlen %d\n", qlen(dev->gadget, dev->qmult)); 1144 1145 dev->header_len = link->header_len; 1146 dev->unwrap = link->unwrap; 1147 dev->wrap = link->wrap; 1148 1149 spin_lock(&dev->lock); 1150 dev->port_usb = link; 1151 if (netif_running(dev->net)) { 1152 if (link->open) 1153 link->open(link); 1154 } else { 1155 if (link->close) 1156 link->close(link); 1157 } 1158 spin_unlock(&dev->lock); 1159 1160 netif_carrier_on(dev->net); 1161 if (netif_running(dev->net)) 1162 eth_start(dev, GFP_ATOMIC); 1163 1164 /* on error, disable any endpoints */ 1165 } else { 1166 (void) usb_ep_disable(link->out_ep); 1167 fail1: 1168 (void) usb_ep_disable(link->in_ep); 1169 } 1170 fail0: 1171 /* caller is responsible for cleanup on error */ 1172 if (result < 0) 1173 return ERR_PTR(result); 1174 return dev->net; 1175 } 1176 EXPORT_SYMBOL_GPL(gether_connect); 1177 1178 /** 1179 * gether_disconnect - notify network layer that USB link is inactive 1180 * @link: the USB link, on which gether_connect() was called 1181 * Context: irqs blocked 1182 * 1183 * This is called to deactivate endpoints and let the network layer know 1184 * the connection went inactive ("no carrier"). 1185 * 1186 * On return, the state is as if gether_connect() had never been called. 1187 * The endpoints are inactive, and accordingly without active USB I/O. 1188 * Pointers to endpoint descriptors and endpoint private data are nulled. 1189 */ 1190 void gether_disconnect(struct gether *link) 1191 { 1192 struct eth_dev *dev = link->ioport; 1193 struct usb_request *req; 1194 1195 WARN_ON(!dev); 1196 if (!dev) 1197 return; 1198 1199 DBG(dev, "%s\n", __func__); 1200 1201 netif_stop_queue(dev->net); 1202 netif_carrier_off(dev->net); 1203 1204 /* disable endpoints, forcing (synchronous) completion 1205 * of all pending i/o. then free the request objects 1206 * and forget about the endpoints. 1207 */ 1208 usb_ep_disable(link->in_ep); 1209 spin_lock(&dev->req_lock); 1210 while (!list_empty(&dev->tx_reqs)) { 1211 req = list_first_entry(&dev->tx_reqs, struct usb_request, list); 1212 list_del(&req->list); 1213 1214 spin_unlock(&dev->req_lock); 1215 usb_ep_free_request(link->in_ep, req); 1216 spin_lock(&dev->req_lock); 1217 } 1218 spin_unlock(&dev->req_lock); 1219 link->in_ep->desc = NULL; 1220 1221 usb_ep_disable(link->out_ep); 1222 spin_lock(&dev->req_lock); 1223 while (!list_empty(&dev->rx_reqs)) { 1224 req = list_first_entry(&dev->rx_reqs, struct usb_request, list); 1225 list_del(&req->list); 1226 1227 spin_unlock(&dev->req_lock); 1228 usb_ep_free_request(link->out_ep, req); 1229 spin_lock(&dev->req_lock); 1230 } 1231 spin_unlock(&dev->req_lock); 1232 link->out_ep->desc = NULL; 1233 1234 /* finish forgetting about this USB link episode */ 1235 dev->header_len = 0; 1236 dev->unwrap = NULL; 1237 dev->wrap = NULL; 1238 1239 spin_lock(&dev->lock); 1240 dev->port_usb = NULL; 1241 link->is_suspend = false; 1242 spin_unlock(&dev->lock); 1243 } 1244 EXPORT_SYMBOL_GPL(gether_disconnect); 1245 1246 MODULE_LICENSE("GPL"); 1247 MODULE_AUTHOR("David Brownell"); 1248