1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack 4 * 5 * Copyright (C) 2003-2005,2008 David Brownell 6 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger 7 * Copyright (C) 2008 Nokia Corporation 8 */ 9 10 /* #define VERBOSE_DEBUG */ 11 12 #include <linux/kernel.h> 13 #include <linux/module.h> 14 #include <linux/gfp.h> 15 #include <linux/device.h> 16 #include <linux/ctype.h> 17 #include <linux/etherdevice.h> 18 #include <linux/ethtool.h> 19 #include <linux/if_vlan.h> 20 21 #include "u_ether.h" 22 23 24 /* 25 * This component encapsulates the Ethernet link glue needed to provide 26 * one (!) network link through the USB gadget stack, normally "usb0". 27 * 28 * The control and data models are handled by the function driver which 29 * connects to this code; such as CDC Ethernet (ECM or EEM), 30 * "CDC Subset", or RNDIS. That includes all descriptor and endpoint 31 * management. 32 * 33 * Link level addressing is handled by this component using module 34 * parameters; if no such parameters are provided, random link level 35 * addresses are used. Each end of the link uses one address. The 36 * host end address is exported in various ways, and is often recorded 37 * in configuration databases. 38 * 39 * The driver which assembles each configuration using such a link is 40 * responsible for ensuring that each configuration includes at most one 41 * instance of is network link. (The network layer provides ways for 42 * this single "physical" link to be used by multiple virtual links.) 43 */ 44 45 #define UETH__VERSION "29-May-2008" 46 47 /* Experiments show that both Linux and Windows hosts allow up to 16k 48 * frame sizes. Set the max MTU size to 15k+52 to prevent allocating 32k 49 * blocks and still have efficient handling. */ 50 #define GETHER_MAX_MTU_SIZE 15412 51 #define GETHER_MAX_ETH_FRAME_LEN (GETHER_MAX_MTU_SIZE + ETH_HLEN) 52 53 struct eth_dev { 54 /* lock is held while accessing port_usb 55 */ 56 spinlock_t lock; 57 struct gether *port_usb; 58 59 struct net_device *net; 60 struct usb_gadget *gadget; 61 62 spinlock_t req_lock; /* guard {rx,tx}_reqs */ 63 struct list_head tx_reqs, rx_reqs; 64 atomic_t tx_qlen; 65 66 struct sk_buff_head rx_frames; 67 68 unsigned qmult; 69 70 unsigned header_len; 71 struct sk_buff *(*wrap)(struct gether *, struct sk_buff *skb); 72 int (*unwrap)(struct gether *, 73 struct sk_buff *skb, 74 struct sk_buff_head *list); 75 76 struct work_struct work; 77 78 unsigned long todo; 79 #define WORK_RX_MEMORY 0 80 81 bool zlp; 82 bool no_skb_reserve; 83 u8 host_mac[ETH_ALEN]; 84 u8 dev_mac[ETH_ALEN]; 85 }; 86 87 /*-------------------------------------------------------------------------*/ 88 89 #define RX_EXTRA 20 /* bytes guarding against rx overflows */ 90 91 #define DEFAULT_QLEN 2 /* double buffering by default */ 92 93 /* for dual-speed hardware, use deeper queues at high/super speed */ 94 static inline int qlen(struct usb_gadget *gadget, unsigned qmult) 95 { 96 if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH || 97 gadget->speed >= USB_SPEED_SUPER)) 98 return qmult * DEFAULT_QLEN; 99 else 100 return DEFAULT_QLEN; 101 } 102 103 /*-------------------------------------------------------------------------*/ 104 105 /* REVISIT there must be a better way than having two sets 106 * of debug calls ... 107 */ 108 109 #undef DBG 110 #undef VDBG 111 #undef ERROR 112 #undef INFO 113 114 #define xprintk(d, level, fmt, args...) \ 115 printk(level "%s: " fmt , (d)->net->name , ## args) 116 117 #ifdef DEBUG 118 #undef DEBUG 119 #define DBG(dev, fmt, args...) \ 120 xprintk(dev , KERN_DEBUG , fmt , ## args) 121 #else 122 #define DBG(dev, fmt, args...) \ 123 do { } while (0) 124 #endif /* DEBUG */ 125 126 #ifdef VERBOSE_DEBUG 127 #define VDBG DBG 128 #else 129 #define VDBG(dev, fmt, args...) \ 130 do { } while (0) 131 #endif /* DEBUG */ 132 133 #define ERROR(dev, fmt, args...) \ 134 xprintk(dev , KERN_ERR , fmt , ## args) 135 #define INFO(dev, fmt, args...) \ 136 xprintk(dev , KERN_INFO , fmt , ## args) 137 138 /*-------------------------------------------------------------------------*/ 139 140 /* NETWORK DRIVER HOOKUP (to the layer above this driver) */ 141 142 static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p) 143 { 144 struct eth_dev *dev = netdev_priv(net); 145 146 strlcpy(p->driver, "g_ether", sizeof(p->driver)); 147 strlcpy(p->version, UETH__VERSION, sizeof(p->version)); 148 strlcpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version)); 149 strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info)); 150 } 151 152 /* REVISIT can also support: 153 * - WOL (by tracking suspends and issuing remote wakeup) 154 * - msglevel (implies updated messaging) 155 * - ... probably more ethtool ops 156 */ 157 158 static const struct ethtool_ops ops = { 159 .get_drvinfo = eth_get_drvinfo, 160 .get_link = ethtool_op_get_link, 161 }; 162 163 static void defer_kevent(struct eth_dev *dev, int flag) 164 { 165 if (test_and_set_bit(flag, &dev->todo)) 166 return; 167 if (!schedule_work(&dev->work)) 168 ERROR(dev, "kevent %d may have been dropped\n", flag); 169 else 170 DBG(dev, "kevent %d scheduled\n", flag); 171 } 172 173 static void rx_complete(struct usb_ep *ep, struct usb_request *req); 174 175 static int 176 rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags) 177 { 178 struct usb_gadget *g = dev->gadget; 179 struct sk_buff *skb; 180 int retval = -ENOMEM; 181 size_t size = 0; 182 struct usb_ep *out; 183 unsigned long flags; 184 185 spin_lock_irqsave(&dev->lock, flags); 186 if (dev->port_usb) 187 out = dev->port_usb->out_ep; 188 else 189 out = NULL; 190 191 if (!out) 192 { 193 spin_unlock_irqrestore(&dev->lock, flags); 194 return -ENOTCONN; 195 } 196 197 /* Padding up to RX_EXTRA handles minor disagreements with host. 198 * Normally we use the USB "terminate on short read" convention; 199 * so allow up to (N*maxpacket), since that memory is normally 200 * already allocated. Some hardware doesn't deal well with short 201 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a 202 * byte off the end (to force hardware errors on overflow). 203 * 204 * RNDIS uses internal framing, and explicitly allows senders to 205 * pad to end-of-packet. That's potentially nice for speed, but 206 * means receivers can't recover lost synch on their own (because 207 * new packets don't only start after a short RX). 208 */ 209 size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA; 210 size += dev->port_usb->header_len; 211 212 if (g->quirk_ep_out_aligned_size) { 213 size += out->maxpacket - 1; 214 size -= size % out->maxpacket; 215 } 216 217 if (dev->port_usb->is_fixed) 218 size = max_t(size_t, size, dev->port_usb->fixed_out_len); 219 spin_unlock_irqrestore(&dev->lock, flags); 220 221 skb = __netdev_alloc_skb(dev->net, size + NET_IP_ALIGN, gfp_flags); 222 if (skb == NULL) { 223 DBG(dev, "no rx skb\n"); 224 goto enomem; 225 } 226 227 /* Some platforms perform better when IP packets are aligned, 228 * but on at least one, checksumming fails otherwise. Note: 229 * RNDIS headers involve variable numbers of LE32 values. 230 */ 231 if (likely(!dev->no_skb_reserve)) 232 skb_reserve(skb, NET_IP_ALIGN); 233 234 req->buf = skb->data; 235 req->length = size; 236 req->complete = rx_complete; 237 req->context = skb; 238 239 retval = usb_ep_queue(out, req, gfp_flags); 240 if (retval == -ENOMEM) 241 enomem: 242 defer_kevent(dev, WORK_RX_MEMORY); 243 if (retval) { 244 DBG(dev, "rx submit --> %d\n", retval); 245 if (skb) 246 dev_kfree_skb_any(skb); 247 spin_lock_irqsave(&dev->req_lock, flags); 248 list_add(&req->list, &dev->rx_reqs); 249 spin_unlock_irqrestore(&dev->req_lock, flags); 250 } 251 return retval; 252 } 253 254 static void rx_complete(struct usb_ep *ep, struct usb_request *req) 255 { 256 struct sk_buff *skb = req->context, *skb2; 257 struct eth_dev *dev = ep->driver_data; 258 int status = req->status; 259 260 switch (status) { 261 262 /* normal completion */ 263 case 0: 264 skb_put(skb, req->actual); 265 266 if (dev->unwrap) { 267 unsigned long flags; 268 269 spin_lock_irqsave(&dev->lock, flags); 270 if (dev->port_usb) { 271 status = dev->unwrap(dev->port_usb, 272 skb, 273 &dev->rx_frames); 274 } else { 275 dev_kfree_skb_any(skb); 276 status = -ENOTCONN; 277 } 278 spin_unlock_irqrestore(&dev->lock, flags); 279 } else { 280 skb_queue_tail(&dev->rx_frames, skb); 281 } 282 skb = NULL; 283 284 skb2 = skb_dequeue(&dev->rx_frames); 285 while (skb2) { 286 if (status < 0 287 || ETH_HLEN > skb2->len 288 || skb2->len > GETHER_MAX_ETH_FRAME_LEN) { 289 dev->net->stats.rx_errors++; 290 dev->net->stats.rx_length_errors++; 291 DBG(dev, "rx length %d\n", skb2->len); 292 dev_kfree_skb_any(skb2); 293 goto next_frame; 294 } 295 skb2->protocol = eth_type_trans(skb2, dev->net); 296 dev->net->stats.rx_packets++; 297 dev->net->stats.rx_bytes += skb2->len; 298 299 /* no buffer copies needed, unless hardware can't 300 * use skb buffers. 301 */ 302 status = netif_rx(skb2); 303 next_frame: 304 skb2 = skb_dequeue(&dev->rx_frames); 305 } 306 break; 307 308 /* software-driven interface shutdown */ 309 case -ECONNRESET: /* unlink */ 310 case -ESHUTDOWN: /* disconnect etc */ 311 VDBG(dev, "rx shutdown, code %d\n", status); 312 goto quiesce; 313 314 /* for hardware automagic (such as pxa) */ 315 case -ECONNABORTED: /* endpoint reset */ 316 DBG(dev, "rx %s reset\n", ep->name); 317 defer_kevent(dev, WORK_RX_MEMORY); 318 quiesce: 319 dev_kfree_skb_any(skb); 320 goto clean; 321 322 /* data overrun */ 323 case -EOVERFLOW: 324 dev->net->stats.rx_over_errors++; 325 fallthrough; 326 327 default: 328 dev->net->stats.rx_errors++; 329 DBG(dev, "rx status %d\n", status); 330 break; 331 } 332 333 if (skb) 334 dev_kfree_skb_any(skb); 335 if (!netif_running(dev->net)) { 336 clean: 337 spin_lock(&dev->req_lock); 338 list_add(&req->list, &dev->rx_reqs); 339 spin_unlock(&dev->req_lock); 340 req = NULL; 341 } 342 if (req) 343 rx_submit(dev, req, GFP_ATOMIC); 344 } 345 346 static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n) 347 { 348 unsigned i; 349 struct usb_request *req; 350 351 if (!n) 352 return -ENOMEM; 353 354 /* queue/recycle up to N requests */ 355 i = n; 356 list_for_each_entry(req, list, list) { 357 if (i-- == 0) 358 goto extra; 359 } 360 while (i--) { 361 req = usb_ep_alloc_request(ep, GFP_ATOMIC); 362 if (!req) 363 return list_empty(list) ? -ENOMEM : 0; 364 list_add(&req->list, list); 365 } 366 return 0; 367 368 extra: 369 /* free extras */ 370 for (;;) { 371 struct list_head *next; 372 373 next = req->list.next; 374 list_del(&req->list); 375 usb_ep_free_request(ep, req); 376 377 if (next == list) 378 break; 379 380 req = container_of(next, struct usb_request, list); 381 } 382 return 0; 383 } 384 385 static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n) 386 { 387 int status; 388 389 spin_lock(&dev->req_lock); 390 status = prealloc(&dev->tx_reqs, link->in_ep, n); 391 if (status < 0) 392 goto fail; 393 status = prealloc(&dev->rx_reqs, link->out_ep, n); 394 if (status < 0) 395 goto fail; 396 goto done; 397 fail: 398 DBG(dev, "can't alloc requests\n"); 399 done: 400 spin_unlock(&dev->req_lock); 401 return status; 402 } 403 404 static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags) 405 { 406 struct usb_request *req; 407 unsigned long flags; 408 409 /* fill unused rxq slots with some skb */ 410 spin_lock_irqsave(&dev->req_lock, flags); 411 while (!list_empty(&dev->rx_reqs)) { 412 req = list_first_entry(&dev->rx_reqs, struct usb_request, list); 413 list_del_init(&req->list); 414 spin_unlock_irqrestore(&dev->req_lock, flags); 415 416 if (rx_submit(dev, req, gfp_flags) < 0) { 417 defer_kevent(dev, WORK_RX_MEMORY); 418 return; 419 } 420 421 spin_lock_irqsave(&dev->req_lock, flags); 422 } 423 spin_unlock_irqrestore(&dev->req_lock, flags); 424 } 425 426 static void eth_work(struct work_struct *work) 427 { 428 struct eth_dev *dev = container_of(work, struct eth_dev, work); 429 430 if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) { 431 if (netif_running(dev->net)) 432 rx_fill(dev, GFP_KERNEL); 433 } 434 435 if (dev->todo) 436 DBG(dev, "work done, flags = 0x%lx\n", dev->todo); 437 } 438 439 static void tx_complete(struct usb_ep *ep, struct usb_request *req) 440 { 441 struct sk_buff *skb = req->context; 442 struct eth_dev *dev = ep->driver_data; 443 444 switch (req->status) { 445 default: 446 dev->net->stats.tx_errors++; 447 VDBG(dev, "tx err %d\n", req->status); 448 fallthrough; 449 case -ECONNRESET: /* unlink */ 450 case -ESHUTDOWN: /* disconnect etc */ 451 dev_kfree_skb_any(skb); 452 break; 453 case 0: 454 dev->net->stats.tx_bytes += skb->len; 455 dev_consume_skb_any(skb); 456 } 457 dev->net->stats.tx_packets++; 458 459 spin_lock(&dev->req_lock); 460 list_add(&req->list, &dev->tx_reqs); 461 spin_unlock(&dev->req_lock); 462 463 atomic_dec(&dev->tx_qlen); 464 if (netif_carrier_ok(dev->net)) 465 netif_wake_queue(dev->net); 466 } 467 468 static inline int is_promisc(u16 cdc_filter) 469 { 470 return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS; 471 } 472 473 static netdev_tx_t eth_start_xmit(struct sk_buff *skb, 474 struct net_device *net) 475 { 476 struct eth_dev *dev = netdev_priv(net); 477 int length = 0; 478 int retval; 479 struct usb_request *req = NULL; 480 unsigned long flags; 481 struct usb_ep *in; 482 u16 cdc_filter; 483 484 spin_lock_irqsave(&dev->lock, flags); 485 if (dev->port_usb) { 486 in = dev->port_usb->in_ep; 487 cdc_filter = dev->port_usb->cdc_filter; 488 } else { 489 in = NULL; 490 cdc_filter = 0; 491 } 492 spin_unlock_irqrestore(&dev->lock, flags); 493 494 if (skb && !in) { 495 dev_kfree_skb_any(skb); 496 return NETDEV_TX_OK; 497 } 498 499 /* apply outgoing CDC or RNDIS filters */ 500 if (skb && !is_promisc(cdc_filter)) { 501 u8 *dest = skb->data; 502 503 if (is_multicast_ether_addr(dest)) { 504 u16 type; 505 506 /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host 507 * SET_ETHERNET_MULTICAST_FILTERS requests 508 */ 509 if (is_broadcast_ether_addr(dest)) 510 type = USB_CDC_PACKET_TYPE_BROADCAST; 511 else 512 type = USB_CDC_PACKET_TYPE_ALL_MULTICAST; 513 if (!(cdc_filter & type)) { 514 dev_kfree_skb_any(skb); 515 return NETDEV_TX_OK; 516 } 517 } 518 /* ignores USB_CDC_PACKET_TYPE_DIRECTED */ 519 } 520 521 spin_lock_irqsave(&dev->req_lock, flags); 522 /* 523 * this freelist can be empty if an interrupt triggered disconnect() 524 * and reconfigured the gadget (shutting down this queue) after the 525 * network stack decided to xmit but before we got the spinlock. 526 */ 527 if (list_empty(&dev->tx_reqs)) { 528 spin_unlock_irqrestore(&dev->req_lock, flags); 529 return NETDEV_TX_BUSY; 530 } 531 532 req = list_first_entry(&dev->tx_reqs, struct usb_request, list); 533 list_del(&req->list); 534 535 /* temporarily stop TX queue when the freelist empties */ 536 if (list_empty(&dev->tx_reqs)) 537 netif_stop_queue(net); 538 spin_unlock_irqrestore(&dev->req_lock, flags); 539 540 /* no buffer copies needed, unless the network stack did it 541 * or the hardware can't use skb buffers. 542 * or there's not enough space for extra headers we need 543 */ 544 if (dev->wrap) { 545 unsigned long flags; 546 547 spin_lock_irqsave(&dev->lock, flags); 548 if (dev->port_usb) 549 skb = dev->wrap(dev->port_usb, skb); 550 spin_unlock_irqrestore(&dev->lock, flags); 551 if (!skb) { 552 /* Multi frame CDC protocols may store the frame for 553 * later which is not a dropped frame. 554 */ 555 if (dev->port_usb && 556 dev->port_usb->supports_multi_frame) 557 goto multiframe; 558 goto drop; 559 } 560 } 561 562 length = skb->len; 563 req->buf = skb->data; 564 req->context = skb; 565 req->complete = tx_complete; 566 567 /* NCM requires no zlp if transfer is dwNtbInMaxSize */ 568 if (dev->port_usb && 569 dev->port_usb->is_fixed && 570 length == dev->port_usb->fixed_in_len && 571 (length % in->maxpacket) == 0) 572 req->zero = 0; 573 else 574 req->zero = 1; 575 576 /* use zlp framing on tx for strict CDC-Ether conformance, 577 * though any robust network rx path ignores extra padding. 578 * and some hardware doesn't like to write zlps. 579 */ 580 if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) 581 length++; 582 583 req->length = length; 584 585 retval = usb_ep_queue(in, req, GFP_ATOMIC); 586 switch (retval) { 587 default: 588 DBG(dev, "tx queue err %d\n", retval); 589 break; 590 case 0: 591 netif_trans_update(net); 592 atomic_inc(&dev->tx_qlen); 593 } 594 595 if (retval) { 596 dev_kfree_skb_any(skb); 597 drop: 598 dev->net->stats.tx_dropped++; 599 multiframe: 600 spin_lock_irqsave(&dev->req_lock, flags); 601 if (list_empty(&dev->tx_reqs)) 602 netif_start_queue(net); 603 list_add(&req->list, &dev->tx_reqs); 604 spin_unlock_irqrestore(&dev->req_lock, flags); 605 } 606 return NETDEV_TX_OK; 607 } 608 609 /*-------------------------------------------------------------------------*/ 610 611 static void eth_start(struct eth_dev *dev, gfp_t gfp_flags) 612 { 613 DBG(dev, "%s\n", __func__); 614 615 /* fill the rx queue */ 616 rx_fill(dev, gfp_flags); 617 618 /* and open the tx floodgates */ 619 atomic_set(&dev->tx_qlen, 0); 620 netif_wake_queue(dev->net); 621 } 622 623 static int eth_open(struct net_device *net) 624 { 625 struct eth_dev *dev = netdev_priv(net); 626 struct gether *link; 627 628 DBG(dev, "%s\n", __func__); 629 if (netif_carrier_ok(dev->net)) 630 eth_start(dev, GFP_KERNEL); 631 632 spin_lock_irq(&dev->lock); 633 link = dev->port_usb; 634 if (link && link->open) 635 link->open(link); 636 spin_unlock_irq(&dev->lock); 637 638 return 0; 639 } 640 641 static int eth_stop(struct net_device *net) 642 { 643 struct eth_dev *dev = netdev_priv(net); 644 unsigned long flags; 645 646 VDBG(dev, "%s\n", __func__); 647 netif_stop_queue(net); 648 649 DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n", 650 dev->net->stats.rx_packets, dev->net->stats.tx_packets, 651 dev->net->stats.rx_errors, dev->net->stats.tx_errors 652 ); 653 654 /* ensure there are no more active requests */ 655 spin_lock_irqsave(&dev->lock, flags); 656 if (dev->port_usb) { 657 struct gether *link = dev->port_usb; 658 const struct usb_endpoint_descriptor *in; 659 const struct usb_endpoint_descriptor *out; 660 661 if (link->close) 662 link->close(link); 663 664 /* NOTE: we have no abort-queue primitive we could use 665 * to cancel all pending I/O. Instead, we disable then 666 * reenable the endpoints ... this idiom may leave toggle 667 * wrong, but that's a self-correcting error. 668 * 669 * REVISIT: we *COULD* just let the transfers complete at 670 * their own pace; the network stack can handle old packets. 671 * For the moment we leave this here, since it works. 672 */ 673 in = link->in_ep->desc; 674 out = link->out_ep->desc; 675 usb_ep_disable(link->in_ep); 676 usb_ep_disable(link->out_ep); 677 if (netif_carrier_ok(net)) { 678 DBG(dev, "host still using in/out endpoints\n"); 679 link->in_ep->desc = in; 680 link->out_ep->desc = out; 681 usb_ep_enable(link->in_ep); 682 usb_ep_enable(link->out_ep); 683 } 684 } 685 spin_unlock_irqrestore(&dev->lock, flags); 686 687 return 0; 688 } 689 690 /*-------------------------------------------------------------------------*/ 691 692 static int get_ether_addr(const char *str, u8 *dev_addr) 693 { 694 if (str) { 695 unsigned i; 696 697 for (i = 0; i < 6; i++) { 698 unsigned char num; 699 700 if ((*str == '.') || (*str == ':')) 701 str++; 702 num = hex_to_bin(*str++) << 4; 703 num |= hex_to_bin(*str++); 704 dev_addr [i] = num; 705 } 706 if (is_valid_ether_addr(dev_addr)) 707 return 0; 708 } 709 eth_random_addr(dev_addr); 710 return 1; 711 } 712 713 static int get_ether_addr_str(u8 dev_addr[ETH_ALEN], char *str, int len) 714 { 715 if (len < 18) 716 return -EINVAL; 717 718 snprintf(str, len, "%pM", dev_addr); 719 return 18; 720 } 721 722 static const struct net_device_ops eth_netdev_ops = { 723 .ndo_open = eth_open, 724 .ndo_stop = eth_stop, 725 .ndo_start_xmit = eth_start_xmit, 726 .ndo_set_mac_address = eth_mac_addr, 727 .ndo_validate_addr = eth_validate_addr, 728 }; 729 730 static struct device_type gadget_type = { 731 .name = "gadget", 732 }; 733 734 /* 735 * gether_setup_name - initialize one ethernet-over-usb link 736 * @g: gadget to associated with these links 737 * @ethaddr: NULL, or a buffer in which the ethernet address of the 738 * host side of the link is recorded 739 * @netname: name for network device (for example, "usb") 740 * Context: may sleep 741 * 742 * This sets up the single network link that may be exported by a 743 * gadget driver using this framework. The link layer addresses are 744 * set up using module parameters. 745 * 746 * Returns an eth_dev pointer on success, or an ERR_PTR on failure. 747 */ 748 struct eth_dev *gether_setup_name(struct usb_gadget *g, 749 const char *dev_addr, const char *host_addr, 750 u8 ethaddr[ETH_ALEN], unsigned qmult, const char *netname) 751 { 752 struct eth_dev *dev; 753 struct net_device *net; 754 int status; 755 756 net = alloc_etherdev(sizeof *dev); 757 if (!net) 758 return ERR_PTR(-ENOMEM); 759 760 dev = netdev_priv(net); 761 spin_lock_init(&dev->lock); 762 spin_lock_init(&dev->req_lock); 763 INIT_WORK(&dev->work, eth_work); 764 INIT_LIST_HEAD(&dev->tx_reqs); 765 INIT_LIST_HEAD(&dev->rx_reqs); 766 767 skb_queue_head_init(&dev->rx_frames); 768 769 /* network device setup */ 770 dev->net = net; 771 dev->qmult = qmult; 772 snprintf(net->name, sizeof(net->name), "%s%%d", netname); 773 774 if (get_ether_addr(dev_addr, net->dev_addr)) 775 dev_warn(&g->dev, 776 "using random %s ethernet address\n", "self"); 777 if (get_ether_addr(host_addr, dev->host_mac)) 778 dev_warn(&g->dev, 779 "using random %s ethernet address\n", "host"); 780 781 if (ethaddr) 782 memcpy(ethaddr, dev->host_mac, ETH_ALEN); 783 784 net->netdev_ops = ð_netdev_ops; 785 786 net->ethtool_ops = &ops; 787 788 /* MTU range: 14 - 15412 */ 789 net->min_mtu = ETH_HLEN; 790 net->max_mtu = GETHER_MAX_MTU_SIZE; 791 792 dev->gadget = g; 793 SET_NETDEV_DEV(net, &g->dev); 794 SET_NETDEV_DEVTYPE(net, &gadget_type); 795 796 status = register_netdev(net); 797 if (status < 0) { 798 dev_dbg(&g->dev, "register_netdev failed, %d\n", status); 799 free_netdev(net); 800 dev = ERR_PTR(status); 801 } else { 802 INFO(dev, "MAC %pM\n", net->dev_addr); 803 INFO(dev, "HOST MAC %pM\n", dev->host_mac); 804 805 /* 806 * two kinds of host-initiated state changes: 807 * - iff DATA transfer is active, carrier is "on" 808 * - tx queueing enabled if open *and* carrier is "on" 809 */ 810 netif_carrier_off(net); 811 } 812 813 return dev; 814 } 815 EXPORT_SYMBOL_GPL(gether_setup_name); 816 817 struct net_device *gether_setup_name_default(const char *netname) 818 { 819 struct net_device *net; 820 struct eth_dev *dev; 821 822 net = alloc_etherdev(sizeof(*dev)); 823 if (!net) 824 return ERR_PTR(-ENOMEM); 825 826 dev = netdev_priv(net); 827 spin_lock_init(&dev->lock); 828 spin_lock_init(&dev->req_lock); 829 INIT_WORK(&dev->work, eth_work); 830 INIT_LIST_HEAD(&dev->tx_reqs); 831 INIT_LIST_HEAD(&dev->rx_reqs); 832 833 skb_queue_head_init(&dev->rx_frames); 834 835 /* network device setup */ 836 dev->net = net; 837 dev->qmult = QMULT_DEFAULT; 838 snprintf(net->name, sizeof(net->name), "%s%%d", netname); 839 840 eth_random_addr(dev->dev_mac); 841 pr_warn("using random %s ethernet address\n", "self"); 842 eth_random_addr(dev->host_mac); 843 pr_warn("using random %s ethernet address\n", "host"); 844 845 net->netdev_ops = ð_netdev_ops; 846 847 net->ethtool_ops = &ops; 848 SET_NETDEV_DEVTYPE(net, &gadget_type); 849 850 /* MTU range: 14 - 15412 */ 851 net->min_mtu = ETH_HLEN; 852 net->max_mtu = GETHER_MAX_MTU_SIZE; 853 854 return net; 855 } 856 EXPORT_SYMBOL_GPL(gether_setup_name_default); 857 858 int gether_register_netdev(struct net_device *net) 859 { 860 struct eth_dev *dev; 861 struct usb_gadget *g; 862 struct sockaddr sa; 863 int status; 864 865 if (!net->dev.parent) 866 return -EINVAL; 867 dev = netdev_priv(net); 868 g = dev->gadget; 869 status = register_netdev(net); 870 if (status < 0) { 871 dev_dbg(&g->dev, "register_netdev failed, %d\n", status); 872 return status; 873 } else { 874 INFO(dev, "HOST MAC %pM\n", dev->host_mac); 875 876 /* two kinds of host-initiated state changes: 877 * - iff DATA transfer is active, carrier is "on" 878 * - tx queueing enabled if open *and* carrier is "on" 879 */ 880 netif_carrier_off(net); 881 } 882 sa.sa_family = net->type; 883 memcpy(sa.sa_data, dev->dev_mac, ETH_ALEN); 884 rtnl_lock(); 885 status = dev_set_mac_address(net, &sa, NULL); 886 rtnl_unlock(); 887 if (status) 888 pr_warn("cannot set self ethernet address: %d\n", status); 889 else 890 INFO(dev, "MAC %pM\n", dev->dev_mac); 891 892 return status; 893 } 894 EXPORT_SYMBOL_GPL(gether_register_netdev); 895 896 void gether_set_gadget(struct net_device *net, struct usb_gadget *g) 897 { 898 struct eth_dev *dev; 899 900 dev = netdev_priv(net); 901 dev->gadget = g; 902 SET_NETDEV_DEV(net, &g->dev); 903 } 904 EXPORT_SYMBOL_GPL(gether_set_gadget); 905 906 int gether_set_dev_addr(struct net_device *net, const char *dev_addr) 907 { 908 struct eth_dev *dev; 909 u8 new_addr[ETH_ALEN]; 910 911 dev = netdev_priv(net); 912 if (get_ether_addr(dev_addr, new_addr)) 913 return -EINVAL; 914 memcpy(dev->dev_mac, new_addr, ETH_ALEN); 915 return 0; 916 } 917 EXPORT_SYMBOL_GPL(gether_set_dev_addr); 918 919 int gether_get_dev_addr(struct net_device *net, char *dev_addr, int len) 920 { 921 struct eth_dev *dev; 922 int ret; 923 924 dev = netdev_priv(net); 925 ret = get_ether_addr_str(dev->dev_mac, dev_addr, len); 926 if (ret + 1 < len) { 927 dev_addr[ret++] = '\n'; 928 dev_addr[ret] = '\0'; 929 } 930 931 return ret; 932 } 933 EXPORT_SYMBOL_GPL(gether_get_dev_addr); 934 935 int gether_set_host_addr(struct net_device *net, const char *host_addr) 936 { 937 struct eth_dev *dev; 938 u8 new_addr[ETH_ALEN]; 939 940 dev = netdev_priv(net); 941 if (get_ether_addr(host_addr, new_addr)) 942 return -EINVAL; 943 memcpy(dev->host_mac, new_addr, ETH_ALEN); 944 return 0; 945 } 946 EXPORT_SYMBOL_GPL(gether_set_host_addr); 947 948 int gether_get_host_addr(struct net_device *net, char *host_addr, int len) 949 { 950 struct eth_dev *dev; 951 int ret; 952 953 dev = netdev_priv(net); 954 ret = get_ether_addr_str(dev->host_mac, host_addr, len); 955 if (ret + 1 < len) { 956 host_addr[ret++] = '\n'; 957 host_addr[ret] = '\0'; 958 } 959 960 return ret; 961 } 962 EXPORT_SYMBOL_GPL(gether_get_host_addr); 963 964 int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len) 965 { 966 struct eth_dev *dev; 967 968 if (len < 13) 969 return -EINVAL; 970 971 dev = netdev_priv(net); 972 snprintf(host_addr, len, "%pm", dev->host_mac); 973 974 return strlen(host_addr); 975 } 976 EXPORT_SYMBOL_GPL(gether_get_host_addr_cdc); 977 978 void gether_get_host_addr_u8(struct net_device *net, u8 host_mac[ETH_ALEN]) 979 { 980 struct eth_dev *dev; 981 982 dev = netdev_priv(net); 983 memcpy(host_mac, dev->host_mac, ETH_ALEN); 984 } 985 EXPORT_SYMBOL_GPL(gether_get_host_addr_u8); 986 987 void gether_set_qmult(struct net_device *net, unsigned qmult) 988 { 989 struct eth_dev *dev; 990 991 dev = netdev_priv(net); 992 dev->qmult = qmult; 993 } 994 EXPORT_SYMBOL_GPL(gether_set_qmult); 995 996 unsigned gether_get_qmult(struct net_device *net) 997 { 998 struct eth_dev *dev; 999 1000 dev = netdev_priv(net); 1001 return dev->qmult; 1002 } 1003 EXPORT_SYMBOL_GPL(gether_get_qmult); 1004 1005 int gether_get_ifname(struct net_device *net, char *name, int len) 1006 { 1007 int ret; 1008 1009 rtnl_lock(); 1010 ret = scnprintf(name, len, "%s\n", netdev_name(net)); 1011 rtnl_unlock(); 1012 return ret; 1013 } 1014 EXPORT_SYMBOL_GPL(gether_get_ifname); 1015 1016 /* 1017 * gether_cleanup - remove Ethernet-over-USB device 1018 * Context: may sleep 1019 * 1020 * This is called to free all resources allocated by @gether_setup(). 1021 */ 1022 void gether_cleanup(struct eth_dev *dev) 1023 { 1024 if (!dev) 1025 return; 1026 1027 unregister_netdev(dev->net); 1028 flush_work(&dev->work); 1029 free_netdev(dev->net); 1030 } 1031 EXPORT_SYMBOL_GPL(gether_cleanup); 1032 1033 /** 1034 * gether_connect - notify network layer that USB link is active 1035 * @link: the USB link, set up with endpoints, descriptors matching 1036 * current device speed, and any framing wrapper(s) set up. 1037 * Context: irqs blocked 1038 * 1039 * This is called to activate endpoints and let the network layer know 1040 * the connection is active ("carrier detect"). It may cause the I/O 1041 * queues to open and start letting network packets flow, but will in 1042 * any case activate the endpoints so that they respond properly to the 1043 * USB host. 1044 * 1045 * Verify net_device pointer returned using IS_ERR(). If it doesn't 1046 * indicate some error code (negative errno), ep->driver_data values 1047 * have been overwritten. 1048 */ 1049 struct net_device *gether_connect(struct gether *link) 1050 { 1051 struct eth_dev *dev = link->ioport; 1052 int result = 0; 1053 1054 if (!dev) 1055 return ERR_PTR(-EINVAL); 1056 1057 link->in_ep->driver_data = dev; 1058 result = usb_ep_enable(link->in_ep); 1059 if (result != 0) { 1060 DBG(dev, "enable %s --> %d\n", 1061 link->in_ep->name, result); 1062 goto fail0; 1063 } 1064 1065 link->out_ep->driver_data = dev; 1066 result = usb_ep_enable(link->out_ep); 1067 if (result != 0) { 1068 DBG(dev, "enable %s --> %d\n", 1069 link->out_ep->name, result); 1070 goto fail1; 1071 } 1072 1073 if (result == 0) 1074 result = alloc_requests(dev, link, qlen(dev->gadget, 1075 dev->qmult)); 1076 1077 if (result == 0) { 1078 dev->zlp = link->is_zlp_ok; 1079 dev->no_skb_reserve = gadget_avoids_skb_reserve(dev->gadget); 1080 DBG(dev, "qlen %d\n", qlen(dev->gadget, dev->qmult)); 1081 1082 dev->header_len = link->header_len; 1083 dev->unwrap = link->unwrap; 1084 dev->wrap = link->wrap; 1085 1086 spin_lock(&dev->lock); 1087 dev->port_usb = link; 1088 if (netif_running(dev->net)) { 1089 if (link->open) 1090 link->open(link); 1091 } else { 1092 if (link->close) 1093 link->close(link); 1094 } 1095 spin_unlock(&dev->lock); 1096 1097 netif_carrier_on(dev->net); 1098 if (netif_running(dev->net)) 1099 eth_start(dev, GFP_ATOMIC); 1100 1101 /* on error, disable any endpoints */ 1102 } else { 1103 (void) usb_ep_disable(link->out_ep); 1104 fail1: 1105 (void) usb_ep_disable(link->in_ep); 1106 } 1107 fail0: 1108 /* caller is responsible for cleanup on error */ 1109 if (result < 0) 1110 return ERR_PTR(result); 1111 return dev->net; 1112 } 1113 EXPORT_SYMBOL_GPL(gether_connect); 1114 1115 /** 1116 * gether_disconnect - notify network layer that USB link is inactive 1117 * @link: the USB link, on which gether_connect() was called 1118 * Context: irqs blocked 1119 * 1120 * This is called to deactivate endpoints and let the network layer know 1121 * the connection went inactive ("no carrier"). 1122 * 1123 * On return, the state is as if gether_connect() had never been called. 1124 * The endpoints are inactive, and accordingly without active USB I/O. 1125 * Pointers to endpoint descriptors and endpoint private data are nulled. 1126 */ 1127 void gether_disconnect(struct gether *link) 1128 { 1129 struct eth_dev *dev = link->ioport; 1130 struct usb_request *req; 1131 1132 WARN_ON(!dev); 1133 if (!dev) 1134 return; 1135 1136 DBG(dev, "%s\n", __func__); 1137 1138 netif_stop_queue(dev->net); 1139 netif_carrier_off(dev->net); 1140 1141 /* disable endpoints, forcing (synchronous) completion 1142 * of all pending i/o. then free the request objects 1143 * and forget about the endpoints. 1144 */ 1145 usb_ep_disable(link->in_ep); 1146 spin_lock(&dev->req_lock); 1147 while (!list_empty(&dev->tx_reqs)) { 1148 req = list_first_entry(&dev->tx_reqs, struct usb_request, list); 1149 list_del(&req->list); 1150 1151 spin_unlock(&dev->req_lock); 1152 usb_ep_free_request(link->in_ep, req); 1153 spin_lock(&dev->req_lock); 1154 } 1155 spin_unlock(&dev->req_lock); 1156 link->in_ep->desc = NULL; 1157 1158 usb_ep_disable(link->out_ep); 1159 spin_lock(&dev->req_lock); 1160 while (!list_empty(&dev->rx_reqs)) { 1161 req = list_first_entry(&dev->rx_reqs, struct usb_request, list); 1162 list_del(&req->list); 1163 1164 spin_unlock(&dev->req_lock); 1165 usb_ep_free_request(link->out_ep, req); 1166 spin_lock(&dev->req_lock); 1167 } 1168 spin_unlock(&dev->req_lock); 1169 link->out_ep->desc = NULL; 1170 1171 /* finish forgetting about this USB link episode */ 1172 dev->header_len = 0; 1173 dev->unwrap = NULL; 1174 dev->wrap = NULL; 1175 1176 spin_lock(&dev->lock); 1177 dev->port_usb = NULL; 1178 spin_unlock(&dev->lock); 1179 } 1180 EXPORT_SYMBOL_GPL(gether_disconnect); 1181 1182 MODULE_LICENSE("GPL"); 1183 MODULE_AUTHOR("David Brownell"); 1184