1 /* 2 * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack 3 * 4 * Copyright (C) 2003-2005,2008 David Brownell 5 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger 6 * Copyright (C) 2008 Nokia Corporation 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 */ 13 14 /* #define VERBOSE_DEBUG */ 15 16 #include <linux/kernel.h> 17 #include <linux/module.h> 18 #include <linux/gfp.h> 19 #include <linux/device.h> 20 #include <linux/ctype.h> 21 #include <linux/etherdevice.h> 22 #include <linux/ethtool.h> 23 #include <linux/if_vlan.h> 24 25 #include "u_ether.h" 26 27 28 /* 29 * This component encapsulates the Ethernet link glue needed to provide 30 * one (!) network link through the USB gadget stack, normally "usb0". 31 * 32 * The control and data models are handled by the function driver which 33 * connects to this code; such as CDC Ethernet (ECM or EEM), 34 * "CDC Subset", or RNDIS. That includes all descriptor and endpoint 35 * management. 36 * 37 * Link level addressing is handled by this component using module 38 * parameters; if no such parameters are provided, random link level 39 * addresses are used. Each end of the link uses one address. The 40 * host end address is exported in various ways, and is often recorded 41 * in configuration databases. 42 * 43 * The driver which assembles each configuration using such a link is 44 * responsible for ensuring that each configuration includes at most one 45 * instance of is network link. (The network layer provides ways for 46 * this single "physical" link to be used by multiple virtual links.) 47 */ 48 49 #define UETH__VERSION "29-May-2008" 50 51 /* Experiments show that both Linux and Windows hosts allow up to 16k 52 * frame sizes. Set the max size to 15k+52 to prevent allocating 32k 53 * blocks and still have efficient handling. */ 54 #define GETHER_MAX_ETH_FRAME_LEN 15412 55 56 struct eth_dev { 57 /* lock is held while accessing port_usb 58 */ 59 spinlock_t lock; 60 struct gether *port_usb; 61 62 struct net_device *net; 63 struct usb_gadget *gadget; 64 65 spinlock_t req_lock; /* guard {rx,tx}_reqs */ 66 struct list_head tx_reqs, rx_reqs; 67 atomic_t tx_qlen; 68 69 struct sk_buff_head rx_frames; 70 71 unsigned qmult; 72 73 unsigned header_len; 74 struct sk_buff *(*wrap)(struct gether *, struct sk_buff *skb); 75 int (*unwrap)(struct gether *, 76 struct sk_buff *skb, 77 struct sk_buff_head *list); 78 79 struct work_struct work; 80 81 unsigned long todo; 82 #define WORK_RX_MEMORY 0 83 84 bool zlp; 85 u8 host_mac[ETH_ALEN]; 86 u8 dev_mac[ETH_ALEN]; 87 }; 88 89 /*-------------------------------------------------------------------------*/ 90 91 #define RX_EXTRA 20 /* bytes guarding against rx overflows */ 92 93 #define DEFAULT_QLEN 2 /* double buffering by default */ 94 95 /* for dual-speed hardware, use deeper queues at high/super speed */ 96 static inline int qlen(struct usb_gadget *gadget, unsigned qmult) 97 { 98 if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH || 99 gadget->speed == USB_SPEED_SUPER)) 100 return qmult * DEFAULT_QLEN; 101 else 102 return DEFAULT_QLEN; 103 } 104 105 /*-------------------------------------------------------------------------*/ 106 107 /* REVISIT there must be a better way than having two sets 108 * of debug calls ... 109 */ 110 111 #undef DBG 112 #undef VDBG 113 #undef ERROR 114 #undef INFO 115 116 #define xprintk(d, level, fmt, args...) \ 117 printk(level "%s: " fmt , (d)->net->name , ## args) 118 119 #ifdef DEBUG 120 #undef DEBUG 121 #define DBG(dev, fmt, args...) \ 122 xprintk(dev , KERN_DEBUG , fmt , ## args) 123 #else 124 #define DBG(dev, fmt, args...) \ 125 do { } while (0) 126 #endif /* DEBUG */ 127 128 #ifdef VERBOSE_DEBUG 129 #define VDBG DBG 130 #else 131 #define VDBG(dev, fmt, args...) \ 132 do { } while (0) 133 #endif /* DEBUG */ 134 135 #define ERROR(dev, fmt, args...) \ 136 xprintk(dev , KERN_ERR , fmt , ## args) 137 #define INFO(dev, fmt, args...) \ 138 xprintk(dev , KERN_INFO , fmt , ## args) 139 140 /*-------------------------------------------------------------------------*/ 141 142 /* NETWORK DRIVER HOOKUP (to the layer above this driver) */ 143 144 static int ueth_change_mtu(struct net_device *net, int new_mtu) 145 { 146 if (new_mtu <= ETH_HLEN || new_mtu > GETHER_MAX_ETH_FRAME_LEN) 147 return -ERANGE; 148 net->mtu = new_mtu; 149 150 return 0; 151 } 152 153 static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p) 154 { 155 struct eth_dev *dev = netdev_priv(net); 156 157 strlcpy(p->driver, "g_ether", sizeof(p->driver)); 158 strlcpy(p->version, UETH__VERSION, sizeof(p->version)); 159 strlcpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version)); 160 strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info)); 161 } 162 163 /* REVISIT can also support: 164 * - WOL (by tracking suspends and issuing remote wakeup) 165 * - msglevel (implies updated messaging) 166 * - ... probably more ethtool ops 167 */ 168 169 static const struct ethtool_ops ops = { 170 .get_drvinfo = eth_get_drvinfo, 171 .get_link = ethtool_op_get_link, 172 }; 173 174 static void defer_kevent(struct eth_dev *dev, int flag) 175 { 176 if (test_and_set_bit(flag, &dev->todo)) 177 return; 178 if (!schedule_work(&dev->work)) 179 ERROR(dev, "kevent %d may have been dropped\n", flag); 180 else 181 DBG(dev, "kevent %d scheduled\n", flag); 182 } 183 184 static void rx_complete(struct usb_ep *ep, struct usb_request *req); 185 186 static int 187 rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags) 188 { 189 struct sk_buff *skb; 190 int retval = -ENOMEM; 191 size_t size = 0; 192 struct usb_ep *out; 193 unsigned long flags; 194 195 spin_lock_irqsave(&dev->lock, flags); 196 if (dev->port_usb) 197 out = dev->port_usb->out_ep; 198 else 199 out = NULL; 200 spin_unlock_irqrestore(&dev->lock, flags); 201 202 if (!out) 203 return -ENOTCONN; 204 205 206 /* Padding up to RX_EXTRA handles minor disagreements with host. 207 * Normally we use the USB "terminate on short read" convention; 208 * so allow up to (N*maxpacket), since that memory is normally 209 * already allocated. Some hardware doesn't deal well with short 210 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a 211 * byte off the end (to force hardware errors on overflow). 212 * 213 * RNDIS uses internal framing, and explicitly allows senders to 214 * pad to end-of-packet. That's potentially nice for speed, but 215 * means receivers can't recover lost synch on their own (because 216 * new packets don't only start after a short RX). 217 */ 218 size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA; 219 size += dev->port_usb->header_len; 220 size += out->maxpacket - 1; 221 size -= size % out->maxpacket; 222 223 if (dev->port_usb->is_fixed) 224 size = max_t(size_t, size, dev->port_usb->fixed_out_len); 225 226 skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags); 227 if (skb == NULL) { 228 DBG(dev, "no rx skb\n"); 229 goto enomem; 230 } 231 232 /* Some platforms perform better when IP packets are aligned, 233 * but on at least one, checksumming fails otherwise. Note: 234 * RNDIS headers involve variable numbers of LE32 values. 235 */ 236 skb_reserve(skb, NET_IP_ALIGN); 237 238 req->buf = skb->data; 239 req->length = size; 240 req->complete = rx_complete; 241 req->context = skb; 242 243 retval = usb_ep_queue(out, req, gfp_flags); 244 if (retval == -ENOMEM) 245 enomem: 246 defer_kevent(dev, WORK_RX_MEMORY); 247 if (retval) { 248 DBG(dev, "rx submit --> %d\n", retval); 249 if (skb) 250 dev_kfree_skb_any(skb); 251 spin_lock_irqsave(&dev->req_lock, flags); 252 list_add(&req->list, &dev->rx_reqs); 253 spin_unlock_irqrestore(&dev->req_lock, flags); 254 } 255 return retval; 256 } 257 258 static void rx_complete(struct usb_ep *ep, struct usb_request *req) 259 { 260 struct sk_buff *skb = req->context, *skb2; 261 struct eth_dev *dev = ep->driver_data; 262 int status = req->status; 263 264 switch (status) { 265 266 /* normal completion */ 267 case 0: 268 skb_put(skb, req->actual); 269 270 if (dev->unwrap) { 271 unsigned long flags; 272 273 spin_lock_irqsave(&dev->lock, flags); 274 if (dev->port_usb) { 275 status = dev->unwrap(dev->port_usb, 276 skb, 277 &dev->rx_frames); 278 } else { 279 dev_kfree_skb_any(skb); 280 status = -ENOTCONN; 281 } 282 spin_unlock_irqrestore(&dev->lock, flags); 283 } else { 284 skb_queue_tail(&dev->rx_frames, skb); 285 } 286 skb = NULL; 287 288 skb2 = skb_dequeue(&dev->rx_frames); 289 while (skb2) { 290 if (status < 0 291 || ETH_HLEN > skb2->len 292 || skb2->len > GETHER_MAX_ETH_FRAME_LEN) { 293 dev->net->stats.rx_errors++; 294 dev->net->stats.rx_length_errors++; 295 DBG(dev, "rx length %d\n", skb2->len); 296 dev_kfree_skb_any(skb2); 297 goto next_frame; 298 } 299 skb2->protocol = eth_type_trans(skb2, dev->net); 300 dev->net->stats.rx_packets++; 301 dev->net->stats.rx_bytes += skb2->len; 302 303 /* no buffer copies needed, unless hardware can't 304 * use skb buffers. 305 */ 306 status = netif_rx(skb2); 307 next_frame: 308 skb2 = skb_dequeue(&dev->rx_frames); 309 } 310 break; 311 312 /* software-driven interface shutdown */ 313 case -ECONNRESET: /* unlink */ 314 case -ESHUTDOWN: /* disconnect etc */ 315 VDBG(dev, "rx shutdown, code %d\n", status); 316 goto quiesce; 317 318 /* for hardware automagic (such as pxa) */ 319 case -ECONNABORTED: /* endpoint reset */ 320 DBG(dev, "rx %s reset\n", ep->name); 321 defer_kevent(dev, WORK_RX_MEMORY); 322 quiesce: 323 dev_kfree_skb_any(skb); 324 goto clean; 325 326 /* data overrun */ 327 case -EOVERFLOW: 328 dev->net->stats.rx_over_errors++; 329 /* FALLTHROUGH */ 330 331 default: 332 dev->net->stats.rx_errors++; 333 DBG(dev, "rx status %d\n", status); 334 break; 335 } 336 337 if (skb) 338 dev_kfree_skb_any(skb); 339 if (!netif_running(dev->net)) { 340 clean: 341 spin_lock(&dev->req_lock); 342 list_add(&req->list, &dev->rx_reqs); 343 spin_unlock(&dev->req_lock); 344 req = NULL; 345 } 346 if (req) 347 rx_submit(dev, req, GFP_ATOMIC); 348 } 349 350 static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n) 351 { 352 unsigned i; 353 struct usb_request *req; 354 355 if (!n) 356 return -ENOMEM; 357 358 /* queue/recycle up to N requests */ 359 i = n; 360 list_for_each_entry(req, list, list) { 361 if (i-- == 0) 362 goto extra; 363 } 364 while (i--) { 365 req = usb_ep_alloc_request(ep, GFP_ATOMIC); 366 if (!req) 367 return list_empty(list) ? -ENOMEM : 0; 368 list_add(&req->list, list); 369 } 370 return 0; 371 372 extra: 373 /* free extras */ 374 for (;;) { 375 struct list_head *next; 376 377 next = req->list.next; 378 list_del(&req->list); 379 usb_ep_free_request(ep, req); 380 381 if (next == list) 382 break; 383 384 req = container_of(next, struct usb_request, list); 385 } 386 return 0; 387 } 388 389 static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n) 390 { 391 int status; 392 393 spin_lock(&dev->req_lock); 394 status = prealloc(&dev->tx_reqs, link->in_ep, n); 395 if (status < 0) 396 goto fail; 397 status = prealloc(&dev->rx_reqs, link->out_ep, n); 398 if (status < 0) 399 goto fail; 400 goto done; 401 fail: 402 DBG(dev, "can't alloc requests\n"); 403 done: 404 spin_unlock(&dev->req_lock); 405 return status; 406 } 407 408 static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags) 409 { 410 struct usb_request *req; 411 unsigned long flags; 412 413 /* fill unused rxq slots with some skb */ 414 spin_lock_irqsave(&dev->req_lock, flags); 415 while (!list_empty(&dev->rx_reqs)) { 416 req = container_of(dev->rx_reqs.next, 417 struct usb_request, list); 418 list_del_init(&req->list); 419 spin_unlock_irqrestore(&dev->req_lock, flags); 420 421 if (rx_submit(dev, req, gfp_flags) < 0) { 422 defer_kevent(dev, WORK_RX_MEMORY); 423 return; 424 } 425 426 spin_lock_irqsave(&dev->req_lock, flags); 427 } 428 spin_unlock_irqrestore(&dev->req_lock, flags); 429 } 430 431 static void eth_work(struct work_struct *work) 432 { 433 struct eth_dev *dev = container_of(work, struct eth_dev, work); 434 435 if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) { 436 if (netif_running(dev->net)) 437 rx_fill(dev, GFP_KERNEL); 438 } 439 440 if (dev->todo) 441 DBG(dev, "work done, flags = 0x%lx\n", dev->todo); 442 } 443 444 static void tx_complete(struct usb_ep *ep, struct usb_request *req) 445 { 446 struct sk_buff *skb = req->context; 447 struct eth_dev *dev = ep->driver_data; 448 449 switch (req->status) { 450 default: 451 dev->net->stats.tx_errors++; 452 VDBG(dev, "tx err %d\n", req->status); 453 /* FALLTHROUGH */ 454 case -ECONNRESET: /* unlink */ 455 case -ESHUTDOWN: /* disconnect etc */ 456 break; 457 case 0: 458 dev->net->stats.tx_bytes += skb->len; 459 } 460 dev->net->stats.tx_packets++; 461 462 spin_lock(&dev->req_lock); 463 list_add(&req->list, &dev->tx_reqs); 464 spin_unlock(&dev->req_lock); 465 dev_kfree_skb_any(skb); 466 467 atomic_dec(&dev->tx_qlen); 468 if (netif_carrier_ok(dev->net)) 469 netif_wake_queue(dev->net); 470 } 471 472 static inline int is_promisc(u16 cdc_filter) 473 { 474 return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS; 475 } 476 477 static netdev_tx_t eth_start_xmit(struct sk_buff *skb, 478 struct net_device *net) 479 { 480 struct eth_dev *dev = netdev_priv(net); 481 int length = 0; 482 int retval; 483 struct usb_request *req = NULL; 484 unsigned long flags; 485 struct usb_ep *in; 486 u16 cdc_filter; 487 488 spin_lock_irqsave(&dev->lock, flags); 489 if (dev->port_usb) { 490 in = dev->port_usb->in_ep; 491 cdc_filter = dev->port_usb->cdc_filter; 492 } else { 493 in = NULL; 494 cdc_filter = 0; 495 } 496 spin_unlock_irqrestore(&dev->lock, flags); 497 498 if (skb && !in) { 499 dev_kfree_skb_any(skb); 500 return NETDEV_TX_OK; 501 } 502 503 /* apply outgoing CDC or RNDIS filters */ 504 if (skb && !is_promisc(cdc_filter)) { 505 u8 *dest = skb->data; 506 507 if (is_multicast_ether_addr(dest)) { 508 u16 type; 509 510 /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host 511 * SET_ETHERNET_MULTICAST_FILTERS requests 512 */ 513 if (is_broadcast_ether_addr(dest)) 514 type = USB_CDC_PACKET_TYPE_BROADCAST; 515 else 516 type = USB_CDC_PACKET_TYPE_ALL_MULTICAST; 517 if (!(cdc_filter & type)) { 518 dev_kfree_skb_any(skb); 519 return NETDEV_TX_OK; 520 } 521 } 522 /* ignores USB_CDC_PACKET_TYPE_DIRECTED */ 523 } 524 525 spin_lock_irqsave(&dev->req_lock, flags); 526 /* 527 * this freelist can be empty if an interrupt triggered disconnect() 528 * and reconfigured the gadget (shutting down this queue) after the 529 * network stack decided to xmit but before we got the spinlock. 530 */ 531 if (list_empty(&dev->tx_reqs)) { 532 spin_unlock_irqrestore(&dev->req_lock, flags); 533 return NETDEV_TX_BUSY; 534 } 535 536 req = container_of(dev->tx_reqs.next, struct usb_request, list); 537 list_del(&req->list); 538 539 /* temporarily stop TX queue when the freelist empties */ 540 if (list_empty(&dev->tx_reqs)) 541 netif_stop_queue(net); 542 spin_unlock_irqrestore(&dev->req_lock, flags); 543 544 /* no buffer copies needed, unless the network stack did it 545 * or the hardware can't use skb buffers. 546 * or there's not enough space for extra headers we need 547 */ 548 if (dev->wrap) { 549 unsigned long flags; 550 551 spin_lock_irqsave(&dev->lock, flags); 552 if (dev->port_usb) 553 skb = dev->wrap(dev->port_usb, skb); 554 spin_unlock_irqrestore(&dev->lock, flags); 555 if (!skb) { 556 /* Multi frame CDC protocols may store the frame for 557 * later which is not a dropped frame. 558 */ 559 if (dev->port_usb && 560 dev->port_usb->supports_multi_frame) 561 goto multiframe; 562 goto drop; 563 } 564 } 565 566 length = skb->len; 567 req->buf = skb->data; 568 req->context = skb; 569 req->complete = tx_complete; 570 571 /* NCM requires no zlp if transfer is dwNtbInMaxSize */ 572 if (dev->port_usb->is_fixed && 573 length == dev->port_usb->fixed_in_len && 574 (length % in->maxpacket) == 0) 575 req->zero = 0; 576 else 577 req->zero = 1; 578 579 /* use zlp framing on tx for strict CDC-Ether conformance, 580 * though any robust network rx path ignores extra padding. 581 * and some hardware doesn't like to write zlps. 582 */ 583 if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) 584 length++; 585 586 req->length = length; 587 588 /* throttle high/super speed IRQ rate back slightly */ 589 if (gadget_is_dualspeed(dev->gadget)) 590 req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH || 591 dev->gadget->speed == USB_SPEED_SUPER) 592 ? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0) 593 : 0; 594 595 retval = usb_ep_queue(in, req, GFP_ATOMIC); 596 switch (retval) { 597 default: 598 DBG(dev, "tx queue err %d\n", retval); 599 break; 600 case 0: 601 netif_trans_update(net); 602 atomic_inc(&dev->tx_qlen); 603 } 604 605 if (retval) { 606 dev_kfree_skb_any(skb); 607 drop: 608 dev->net->stats.tx_dropped++; 609 multiframe: 610 spin_lock_irqsave(&dev->req_lock, flags); 611 if (list_empty(&dev->tx_reqs)) 612 netif_start_queue(net); 613 list_add(&req->list, &dev->tx_reqs); 614 spin_unlock_irqrestore(&dev->req_lock, flags); 615 } 616 return NETDEV_TX_OK; 617 } 618 619 /*-------------------------------------------------------------------------*/ 620 621 static void eth_start(struct eth_dev *dev, gfp_t gfp_flags) 622 { 623 DBG(dev, "%s\n", __func__); 624 625 /* fill the rx queue */ 626 rx_fill(dev, gfp_flags); 627 628 /* and open the tx floodgates */ 629 atomic_set(&dev->tx_qlen, 0); 630 netif_wake_queue(dev->net); 631 } 632 633 static int eth_open(struct net_device *net) 634 { 635 struct eth_dev *dev = netdev_priv(net); 636 struct gether *link; 637 638 DBG(dev, "%s\n", __func__); 639 if (netif_carrier_ok(dev->net)) 640 eth_start(dev, GFP_KERNEL); 641 642 spin_lock_irq(&dev->lock); 643 link = dev->port_usb; 644 if (link && link->open) 645 link->open(link); 646 spin_unlock_irq(&dev->lock); 647 648 return 0; 649 } 650 651 static int eth_stop(struct net_device *net) 652 { 653 struct eth_dev *dev = netdev_priv(net); 654 unsigned long flags; 655 656 VDBG(dev, "%s\n", __func__); 657 netif_stop_queue(net); 658 659 DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n", 660 dev->net->stats.rx_packets, dev->net->stats.tx_packets, 661 dev->net->stats.rx_errors, dev->net->stats.tx_errors 662 ); 663 664 /* ensure there are no more active requests */ 665 spin_lock_irqsave(&dev->lock, flags); 666 if (dev->port_usb) { 667 struct gether *link = dev->port_usb; 668 const struct usb_endpoint_descriptor *in; 669 const struct usb_endpoint_descriptor *out; 670 671 if (link->close) 672 link->close(link); 673 674 /* NOTE: we have no abort-queue primitive we could use 675 * to cancel all pending I/O. Instead, we disable then 676 * reenable the endpoints ... this idiom may leave toggle 677 * wrong, but that's a self-correcting error. 678 * 679 * REVISIT: we *COULD* just let the transfers complete at 680 * their own pace; the network stack can handle old packets. 681 * For the moment we leave this here, since it works. 682 */ 683 in = link->in_ep->desc; 684 out = link->out_ep->desc; 685 usb_ep_disable(link->in_ep); 686 usb_ep_disable(link->out_ep); 687 if (netif_carrier_ok(net)) { 688 DBG(dev, "host still using in/out endpoints\n"); 689 link->in_ep->desc = in; 690 link->out_ep->desc = out; 691 usb_ep_enable(link->in_ep); 692 usb_ep_enable(link->out_ep); 693 } 694 } 695 spin_unlock_irqrestore(&dev->lock, flags); 696 697 return 0; 698 } 699 700 /*-------------------------------------------------------------------------*/ 701 702 static int get_ether_addr(const char *str, u8 *dev_addr) 703 { 704 if (str) { 705 unsigned i; 706 707 for (i = 0; i < 6; i++) { 708 unsigned char num; 709 710 if ((*str == '.') || (*str == ':')) 711 str++; 712 num = hex_to_bin(*str++) << 4; 713 num |= hex_to_bin(*str++); 714 dev_addr [i] = num; 715 } 716 if (is_valid_ether_addr(dev_addr)) 717 return 0; 718 } 719 eth_random_addr(dev_addr); 720 return 1; 721 } 722 723 static int get_ether_addr_str(u8 dev_addr[ETH_ALEN], char *str, int len) 724 { 725 if (len < 18) 726 return -EINVAL; 727 728 snprintf(str, len, "%pM", dev_addr); 729 return 18; 730 } 731 732 static const struct net_device_ops eth_netdev_ops = { 733 .ndo_open = eth_open, 734 .ndo_stop = eth_stop, 735 .ndo_start_xmit = eth_start_xmit, 736 .ndo_change_mtu = ueth_change_mtu, 737 .ndo_set_mac_address = eth_mac_addr, 738 .ndo_validate_addr = eth_validate_addr, 739 }; 740 741 static struct device_type gadget_type = { 742 .name = "gadget", 743 }; 744 745 /** 746 * gether_setup_name - initialize one ethernet-over-usb link 747 * @g: gadget to associated with these links 748 * @ethaddr: NULL, or a buffer in which the ethernet address of the 749 * host side of the link is recorded 750 * @netname: name for network device (for example, "usb") 751 * Context: may sleep 752 * 753 * This sets up the single network link that may be exported by a 754 * gadget driver using this framework. The link layer addresses are 755 * set up using module parameters. 756 * 757 * Returns an eth_dev pointer on success, or an ERR_PTR on failure. 758 */ 759 struct eth_dev *gether_setup_name(struct usb_gadget *g, 760 const char *dev_addr, const char *host_addr, 761 u8 ethaddr[ETH_ALEN], unsigned qmult, const char *netname) 762 { 763 struct eth_dev *dev; 764 struct net_device *net; 765 int status; 766 767 net = alloc_etherdev(sizeof *dev); 768 if (!net) 769 return ERR_PTR(-ENOMEM); 770 771 dev = netdev_priv(net); 772 spin_lock_init(&dev->lock); 773 spin_lock_init(&dev->req_lock); 774 INIT_WORK(&dev->work, eth_work); 775 INIT_LIST_HEAD(&dev->tx_reqs); 776 INIT_LIST_HEAD(&dev->rx_reqs); 777 778 skb_queue_head_init(&dev->rx_frames); 779 780 /* network device setup */ 781 dev->net = net; 782 dev->qmult = qmult; 783 snprintf(net->name, sizeof(net->name), "%s%%d", netname); 784 785 if (get_ether_addr(dev_addr, net->dev_addr)) 786 dev_warn(&g->dev, 787 "using random %s ethernet address\n", "self"); 788 if (get_ether_addr(host_addr, dev->host_mac)) 789 dev_warn(&g->dev, 790 "using random %s ethernet address\n", "host"); 791 792 if (ethaddr) 793 memcpy(ethaddr, dev->host_mac, ETH_ALEN); 794 795 net->netdev_ops = ð_netdev_ops; 796 797 net->ethtool_ops = &ops; 798 799 dev->gadget = g; 800 SET_NETDEV_DEV(net, &g->dev); 801 SET_NETDEV_DEVTYPE(net, &gadget_type); 802 803 status = register_netdev(net); 804 if (status < 0) { 805 dev_dbg(&g->dev, "register_netdev failed, %d\n", status); 806 free_netdev(net); 807 dev = ERR_PTR(status); 808 } else { 809 INFO(dev, "MAC %pM\n", net->dev_addr); 810 INFO(dev, "HOST MAC %pM\n", dev->host_mac); 811 812 /* 813 * two kinds of host-initiated state changes: 814 * - iff DATA transfer is active, carrier is "on" 815 * - tx queueing enabled if open *and* carrier is "on" 816 */ 817 netif_carrier_off(net); 818 } 819 820 return dev; 821 } 822 EXPORT_SYMBOL_GPL(gether_setup_name); 823 824 struct net_device *gether_setup_name_default(const char *netname) 825 { 826 struct net_device *net; 827 struct eth_dev *dev; 828 829 net = alloc_etherdev(sizeof(*dev)); 830 if (!net) 831 return ERR_PTR(-ENOMEM); 832 833 dev = netdev_priv(net); 834 spin_lock_init(&dev->lock); 835 spin_lock_init(&dev->req_lock); 836 INIT_WORK(&dev->work, eth_work); 837 INIT_LIST_HEAD(&dev->tx_reqs); 838 INIT_LIST_HEAD(&dev->rx_reqs); 839 840 skb_queue_head_init(&dev->rx_frames); 841 842 /* network device setup */ 843 dev->net = net; 844 dev->qmult = QMULT_DEFAULT; 845 snprintf(net->name, sizeof(net->name), "%s%%d", netname); 846 847 eth_random_addr(dev->dev_mac); 848 pr_warn("using random %s ethernet address\n", "self"); 849 eth_random_addr(dev->host_mac); 850 pr_warn("using random %s ethernet address\n", "host"); 851 852 net->netdev_ops = ð_netdev_ops; 853 854 net->ethtool_ops = &ops; 855 SET_NETDEV_DEVTYPE(net, &gadget_type); 856 857 return net; 858 } 859 EXPORT_SYMBOL_GPL(gether_setup_name_default); 860 861 int gether_register_netdev(struct net_device *net) 862 { 863 struct eth_dev *dev; 864 struct usb_gadget *g; 865 struct sockaddr sa; 866 int status; 867 868 if (!net->dev.parent) 869 return -EINVAL; 870 dev = netdev_priv(net); 871 g = dev->gadget; 872 status = register_netdev(net); 873 if (status < 0) { 874 dev_dbg(&g->dev, "register_netdev failed, %d\n", status); 875 return status; 876 } else { 877 INFO(dev, "HOST MAC %pM\n", dev->host_mac); 878 879 /* two kinds of host-initiated state changes: 880 * - iff DATA transfer is active, carrier is "on" 881 * - tx queueing enabled if open *and* carrier is "on" 882 */ 883 netif_carrier_off(net); 884 } 885 sa.sa_family = net->type; 886 memcpy(sa.sa_data, dev->dev_mac, ETH_ALEN); 887 rtnl_lock(); 888 status = dev_set_mac_address(net, &sa); 889 rtnl_unlock(); 890 if (status) 891 pr_warn("cannot set self ethernet address: %d\n", status); 892 else 893 INFO(dev, "MAC %pM\n", dev->dev_mac); 894 895 return status; 896 } 897 EXPORT_SYMBOL_GPL(gether_register_netdev); 898 899 void gether_set_gadget(struct net_device *net, struct usb_gadget *g) 900 { 901 struct eth_dev *dev; 902 903 dev = netdev_priv(net); 904 dev->gadget = g; 905 SET_NETDEV_DEV(net, &g->dev); 906 } 907 EXPORT_SYMBOL_GPL(gether_set_gadget); 908 909 int gether_set_dev_addr(struct net_device *net, const char *dev_addr) 910 { 911 struct eth_dev *dev; 912 u8 new_addr[ETH_ALEN]; 913 914 dev = netdev_priv(net); 915 if (get_ether_addr(dev_addr, new_addr)) 916 return -EINVAL; 917 memcpy(dev->dev_mac, new_addr, ETH_ALEN); 918 return 0; 919 } 920 EXPORT_SYMBOL_GPL(gether_set_dev_addr); 921 922 int gether_get_dev_addr(struct net_device *net, char *dev_addr, int len) 923 { 924 struct eth_dev *dev; 925 926 dev = netdev_priv(net); 927 return get_ether_addr_str(dev->dev_mac, dev_addr, len); 928 } 929 EXPORT_SYMBOL_GPL(gether_get_dev_addr); 930 931 int gether_set_host_addr(struct net_device *net, const char *host_addr) 932 { 933 struct eth_dev *dev; 934 u8 new_addr[ETH_ALEN]; 935 936 dev = netdev_priv(net); 937 if (get_ether_addr(host_addr, new_addr)) 938 return -EINVAL; 939 memcpy(dev->host_mac, new_addr, ETH_ALEN); 940 return 0; 941 } 942 EXPORT_SYMBOL_GPL(gether_set_host_addr); 943 944 int gether_get_host_addr(struct net_device *net, char *host_addr, int len) 945 { 946 struct eth_dev *dev; 947 948 dev = netdev_priv(net); 949 return get_ether_addr_str(dev->host_mac, host_addr, len); 950 } 951 EXPORT_SYMBOL_GPL(gether_get_host_addr); 952 953 int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len) 954 { 955 struct eth_dev *dev; 956 957 if (len < 13) 958 return -EINVAL; 959 960 dev = netdev_priv(net); 961 snprintf(host_addr, len, "%pm", dev->host_mac); 962 963 return strlen(host_addr); 964 } 965 EXPORT_SYMBOL_GPL(gether_get_host_addr_cdc); 966 967 void gether_get_host_addr_u8(struct net_device *net, u8 host_mac[ETH_ALEN]) 968 { 969 struct eth_dev *dev; 970 971 dev = netdev_priv(net); 972 memcpy(host_mac, dev->host_mac, ETH_ALEN); 973 } 974 EXPORT_SYMBOL_GPL(gether_get_host_addr_u8); 975 976 void gether_set_qmult(struct net_device *net, unsigned qmult) 977 { 978 struct eth_dev *dev; 979 980 dev = netdev_priv(net); 981 dev->qmult = qmult; 982 } 983 EXPORT_SYMBOL_GPL(gether_set_qmult); 984 985 unsigned gether_get_qmult(struct net_device *net) 986 { 987 struct eth_dev *dev; 988 989 dev = netdev_priv(net); 990 return dev->qmult; 991 } 992 EXPORT_SYMBOL_GPL(gether_get_qmult); 993 994 int gether_get_ifname(struct net_device *net, char *name, int len) 995 { 996 rtnl_lock(); 997 strlcpy(name, netdev_name(net), len); 998 rtnl_unlock(); 999 return strlen(name); 1000 } 1001 EXPORT_SYMBOL_GPL(gether_get_ifname); 1002 1003 /** 1004 * gether_cleanup - remove Ethernet-over-USB device 1005 * Context: may sleep 1006 * 1007 * This is called to free all resources allocated by @gether_setup(). 1008 */ 1009 void gether_cleanup(struct eth_dev *dev) 1010 { 1011 if (!dev) 1012 return; 1013 1014 unregister_netdev(dev->net); 1015 flush_work(&dev->work); 1016 free_netdev(dev->net); 1017 } 1018 EXPORT_SYMBOL_GPL(gether_cleanup); 1019 1020 /** 1021 * gether_connect - notify network layer that USB link is active 1022 * @link: the USB link, set up with endpoints, descriptors matching 1023 * current device speed, and any framing wrapper(s) set up. 1024 * Context: irqs blocked 1025 * 1026 * This is called to activate endpoints and let the network layer know 1027 * the connection is active ("carrier detect"). It may cause the I/O 1028 * queues to open and start letting network packets flow, but will in 1029 * any case activate the endpoints so that they respond properly to the 1030 * USB host. 1031 * 1032 * Verify net_device pointer returned using IS_ERR(). If it doesn't 1033 * indicate some error code (negative errno), ep->driver_data values 1034 * have been overwritten. 1035 */ 1036 struct net_device *gether_connect(struct gether *link) 1037 { 1038 struct eth_dev *dev = link->ioport; 1039 int result = 0; 1040 1041 if (!dev) 1042 return ERR_PTR(-EINVAL); 1043 1044 link->in_ep->driver_data = dev; 1045 result = usb_ep_enable(link->in_ep); 1046 if (result != 0) { 1047 DBG(dev, "enable %s --> %d\n", 1048 link->in_ep->name, result); 1049 goto fail0; 1050 } 1051 1052 link->out_ep->driver_data = dev; 1053 result = usb_ep_enable(link->out_ep); 1054 if (result != 0) { 1055 DBG(dev, "enable %s --> %d\n", 1056 link->out_ep->name, result); 1057 goto fail1; 1058 } 1059 1060 if (result == 0) 1061 result = alloc_requests(dev, link, qlen(dev->gadget, 1062 dev->qmult)); 1063 1064 if (result == 0) { 1065 dev->zlp = link->is_zlp_ok; 1066 DBG(dev, "qlen %d\n", qlen(dev->gadget, dev->qmult)); 1067 1068 dev->header_len = link->header_len; 1069 dev->unwrap = link->unwrap; 1070 dev->wrap = link->wrap; 1071 1072 spin_lock(&dev->lock); 1073 dev->port_usb = link; 1074 if (netif_running(dev->net)) { 1075 if (link->open) 1076 link->open(link); 1077 } else { 1078 if (link->close) 1079 link->close(link); 1080 } 1081 spin_unlock(&dev->lock); 1082 1083 netif_carrier_on(dev->net); 1084 if (netif_running(dev->net)) 1085 eth_start(dev, GFP_ATOMIC); 1086 1087 /* on error, disable any endpoints */ 1088 } else { 1089 (void) usb_ep_disable(link->out_ep); 1090 fail1: 1091 (void) usb_ep_disable(link->in_ep); 1092 } 1093 fail0: 1094 /* caller is responsible for cleanup on error */ 1095 if (result < 0) 1096 return ERR_PTR(result); 1097 return dev->net; 1098 } 1099 EXPORT_SYMBOL_GPL(gether_connect); 1100 1101 /** 1102 * gether_disconnect - notify network layer that USB link is inactive 1103 * @link: the USB link, on which gether_connect() was called 1104 * Context: irqs blocked 1105 * 1106 * This is called to deactivate endpoints and let the network layer know 1107 * the connection went inactive ("no carrier"). 1108 * 1109 * On return, the state is as if gether_connect() had never been called. 1110 * The endpoints are inactive, and accordingly without active USB I/O. 1111 * Pointers to endpoint descriptors and endpoint private data are nulled. 1112 */ 1113 void gether_disconnect(struct gether *link) 1114 { 1115 struct eth_dev *dev = link->ioport; 1116 struct usb_request *req; 1117 1118 WARN_ON(!dev); 1119 if (!dev) 1120 return; 1121 1122 DBG(dev, "%s\n", __func__); 1123 1124 netif_stop_queue(dev->net); 1125 netif_carrier_off(dev->net); 1126 1127 /* disable endpoints, forcing (synchronous) completion 1128 * of all pending i/o. then free the request objects 1129 * and forget about the endpoints. 1130 */ 1131 usb_ep_disable(link->in_ep); 1132 spin_lock(&dev->req_lock); 1133 while (!list_empty(&dev->tx_reqs)) { 1134 req = container_of(dev->tx_reqs.next, 1135 struct usb_request, list); 1136 list_del(&req->list); 1137 1138 spin_unlock(&dev->req_lock); 1139 usb_ep_free_request(link->in_ep, req); 1140 spin_lock(&dev->req_lock); 1141 } 1142 spin_unlock(&dev->req_lock); 1143 link->in_ep->desc = NULL; 1144 1145 usb_ep_disable(link->out_ep); 1146 spin_lock(&dev->req_lock); 1147 while (!list_empty(&dev->rx_reqs)) { 1148 req = container_of(dev->rx_reqs.next, 1149 struct usb_request, list); 1150 list_del(&req->list); 1151 1152 spin_unlock(&dev->req_lock); 1153 usb_ep_free_request(link->out_ep, req); 1154 spin_lock(&dev->req_lock); 1155 } 1156 spin_unlock(&dev->req_lock); 1157 link->out_ep->desc = NULL; 1158 1159 /* finish forgetting about this USB link episode */ 1160 dev->header_len = 0; 1161 dev->unwrap = NULL; 1162 dev->wrap = NULL; 1163 1164 spin_lock(&dev->lock); 1165 dev->port_usb = NULL; 1166 spin_unlock(&dev->lock); 1167 } 1168 EXPORT_SYMBOL_GPL(gether_disconnect); 1169 1170 MODULE_LICENSE("GPL"); 1171 MODULE_AUTHOR("David Brownell"); 1172