1 /*
2  * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
3  *
4  * Copyright (C) 2003-2005,2008 David Brownell
5  * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
6  * Copyright (C) 2008 Nokia Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  */
13 
14 /* #define VERBOSE_DEBUG */
15 
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/gfp.h>
19 #include <linux/device.h>
20 #include <linux/ctype.h>
21 #include <linux/etherdevice.h>
22 #include <linux/ethtool.h>
23 #include <linux/if_vlan.h>
24 
25 #include "u_ether.h"
26 
27 
28 /*
29  * This component encapsulates the Ethernet link glue needed to provide
30  * one (!) network link through the USB gadget stack, normally "usb0".
31  *
32  * The control and data models are handled by the function driver which
33  * connects to this code; such as CDC Ethernet (ECM or EEM),
34  * "CDC Subset", or RNDIS.  That includes all descriptor and endpoint
35  * management.
36  *
37  * Link level addressing is handled by this component using module
38  * parameters; if no such parameters are provided, random link level
39  * addresses are used.  Each end of the link uses one address.  The
40  * host end address is exported in various ways, and is often recorded
41  * in configuration databases.
42  *
43  * The driver which assembles each configuration using such a link is
44  * responsible for ensuring that each configuration includes at most one
45  * instance of is network link.  (The network layer provides ways for
46  * this single "physical" link to be used by multiple virtual links.)
47  */
48 
49 #define UETH__VERSION	"29-May-2008"
50 
51 /* Experiments show that both Linux and Windows hosts allow up to 16k
52  * frame sizes. Set the max size to 15k+52 to prevent allocating 32k
53  * blocks and still have efficient handling. */
54 #define GETHER_MAX_ETH_FRAME_LEN 15412
55 
56 struct eth_dev {
57 	/* lock is held while accessing port_usb
58 	 */
59 	spinlock_t		lock;
60 	struct gether		*port_usb;
61 
62 	struct net_device	*net;
63 	struct usb_gadget	*gadget;
64 
65 	spinlock_t		req_lock;	/* guard {rx,tx}_reqs */
66 	struct list_head	tx_reqs, rx_reqs;
67 	atomic_t		tx_qlen;
68 
69 	struct sk_buff_head	rx_frames;
70 
71 	unsigned		qmult;
72 
73 	unsigned		header_len;
74 	struct sk_buff		*(*wrap)(struct gether *, struct sk_buff *skb);
75 	int			(*unwrap)(struct gether *,
76 						struct sk_buff *skb,
77 						struct sk_buff_head *list);
78 
79 	struct work_struct	work;
80 
81 	unsigned long		todo;
82 #define	WORK_RX_MEMORY		0
83 
84 	bool			zlp;
85 	u8			host_mac[ETH_ALEN];
86 	u8			dev_mac[ETH_ALEN];
87 };
88 
89 /*-------------------------------------------------------------------------*/
90 
91 #define RX_EXTRA	20	/* bytes guarding against rx overflows */
92 
93 #define DEFAULT_QLEN	2	/* double buffering by default */
94 
95 /* for dual-speed hardware, use deeper queues at high/super speed */
96 static inline int qlen(struct usb_gadget *gadget, unsigned qmult)
97 {
98 	if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
99 					    gadget->speed == USB_SPEED_SUPER))
100 		return qmult * DEFAULT_QLEN;
101 	else
102 		return DEFAULT_QLEN;
103 }
104 
105 /*-------------------------------------------------------------------------*/
106 
107 /* REVISIT there must be a better way than having two sets
108  * of debug calls ...
109  */
110 
111 #undef DBG
112 #undef VDBG
113 #undef ERROR
114 #undef INFO
115 
116 #define xprintk(d, level, fmt, args...) \
117 	printk(level "%s: " fmt , (d)->net->name , ## args)
118 
119 #ifdef DEBUG
120 #undef DEBUG
121 #define DBG(dev, fmt, args...) \
122 	xprintk(dev , KERN_DEBUG , fmt , ## args)
123 #else
124 #define DBG(dev, fmt, args...) \
125 	do { } while (0)
126 #endif /* DEBUG */
127 
128 #ifdef VERBOSE_DEBUG
129 #define VDBG	DBG
130 #else
131 #define VDBG(dev, fmt, args...) \
132 	do { } while (0)
133 #endif /* DEBUG */
134 
135 #define ERROR(dev, fmt, args...) \
136 	xprintk(dev , KERN_ERR , fmt , ## args)
137 #define INFO(dev, fmt, args...) \
138 	xprintk(dev , KERN_INFO , fmt , ## args)
139 
140 /*-------------------------------------------------------------------------*/
141 
142 /* NETWORK DRIVER HOOKUP (to the layer above this driver) */
143 
144 static int ueth_change_mtu(struct net_device *net, int new_mtu)
145 {
146 	struct eth_dev	*dev = netdev_priv(net);
147 	unsigned long	flags;
148 	int		status = 0;
149 
150 	/* don't change MTU on "live" link (peer won't know) */
151 	spin_lock_irqsave(&dev->lock, flags);
152 	if (dev->port_usb)
153 		status = -EBUSY;
154 	else if (new_mtu <= ETH_HLEN || new_mtu > GETHER_MAX_ETH_FRAME_LEN)
155 		status = -ERANGE;
156 	else
157 		net->mtu = new_mtu;
158 	spin_unlock_irqrestore(&dev->lock, flags);
159 
160 	return status;
161 }
162 
163 static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
164 {
165 	struct eth_dev *dev = netdev_priv(net);
166 
167 	strlcpy(p->driver, "g_ether", sizeof(p->driver));
168 	strlcpy(p->version, UETH__VERSION, sizeof(p->version));
169 	strlcpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version));
170 	strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info));
171 }
172 
173 /* REVISIT can also support:
174  *   - WOL (by tracking suspends and issuing remote wakeup)
175  *   - msglevel (implies updated messaging)
176  *   - ... probably more ethtool ops
177  */
178 
179 static const struct ethtool_ops ops = {
180 	.get_drvinfo = eth_get_drvinfo,
181 	.get_link = ethtool_op_get_link,
182 };
183 
184 static void defer_kevent(struct eth_dev *dev, int flag)
185 {
186 	if (test_and_set_bit(flag, &dev->todo))
187 		return;
188 	if (!schedule_work(&dev->work))
189 		ERROR(dev, "kevent %d may have been dropped\n", flag);
190 	else
191 		DBG(dev, "kevent %d scheduled\n", flag);
192 }
193 
194 static void rx_complete(struct usb_ep *ep, struct usb_request *req);
195 
196 static int
197 rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
198 {
199 	struct sk_buff	*skb;
200 	int		retval = -ENOMEM;
201 	size_t		size = 0;
202 	struct usb_ep	*out;
203 	unsigned long	flags;
204 
205 	spin_lock_irqsave(&dev->lock, flags);
206 	if (dev->port_usb)
207 		out = dev->port_usb->out_ep;
208 	else
209 		out = NULL;
210 	spin_unlock_irqrestore(&dev->lock, flags);
211 
212 	if (!out)
213 		return -ENOTCONN;
214 
215 
216 	/* Padding up to RX_EXTRA handles minor disagreements with host.
217 	 * Normally we use the USB "terminate on short read" convention;
218 	 * so allow up to (N*maxpacket), since that memory is normally
219 	 * already allocated.  Some hardware doesn't deal well with short
220 	 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
221 	 * byte off the end (to force hardware errors on overflow).
222 	 *
223 	 * RNDIS uses internal framing, and explicitly allows senders to
224 	 * pad to end-of-packet.  That's potentially nice for speed, but
225 	 * means receivers can't recover lost synch on their own (because
226 	 * new packets don't only start after a short RX).
227 	 */
228 	size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
229 	size += dev->port_usb->header_len;
230 	size += out->maxpacket - 1;
231 	size -= size % out->maxpacket;
232 
233 	if (dev->port_usb->is_fixed)
234 		size = max_t(size_t, size, dev->port_usb->fixed_out_len);
235 
236 	skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
237 	if (skb == NULL) {
238 		DBG(dev, "no rx skb\n");
239 		goto enomem;
240 	}
241 
242 	/* Some platforms perform better when IP packets are aligned,
243 	 * but on at least one, checksumming fails otherwise.  Note:
244 	 * RNDIS headers involve variable numbers of LE32 values.
245 	 */
246 	skb_reserve(skb, NET_IP_ALIGN);
247 
248 	req->buf = skb->data;
249 	req->length = size;
250 	req->complete = rx_complete;
251 	req->context = skb;
252 
253 	retval = usb_ep_queue(out, req, gfp_flags);
254 	if (retval == -ENOMEM)
255 enomem:
256 		defer_kevent(dev, WORK_RX_MEMORY);
257 	if (retval) {
258 		DBG(dev, "rx submit --> %d\n", retval);
259 		if (skb)
260 			dev_kfree_skb_any(skb);
261 		spin_lock_irqsave(&dev->req_lock, flags);
262 		list_add(&req->list, &dev->rx_reqs);
263 		spin_unlock_irqrestore(&dev->req_lock, flags);
264 	}
265 	return retval;
266 }
267 
268 static void rx_complete(struct usb_ep *ep, struct usb_request *req)
269 {
270 	struct sk_buff	*skb = req->context, *skb2;
271 	struct eth_dev	*dev = ep->driver_data;
272 	int		status = req->status;
273 
274 	switch (status) {
275 
276 	/* normal completion */
277 	case 0:
278 		skb_put(skb, req->actual);
279 
280 		if (dev->unwrap) {
281 			unsigned long	flags;
282 
283 			spin_lock_irqsave(&dev->lock, flags);
284 			if (dev->port_usb) {
285 				status = dev->unwrap(dev->port_usb,
286 							skb,
287 							&dev->rx_frames);
288 			} else {
289 				dev_kfree_skb_any(skb);
290 				status = -ENOTCONN;
291 			}
292 			spin_unlock_irqrestore(&dev->lock, flags);
293 		} else {
294 			skb_queue_tail(&dev->rx_frames, skb);
295 		}
296 		skb = NULL;
297 
298 		skb2 = skb_dequeue(&dev->rx_frames);
299 		while (skb2) {
300 			if (status < 0
301 					|| ETH_HLEN > skb2->len
302 					|| skb2->len > GETHER_MAX_ETH_FRAME_LEN) {
303 				dev->net->stats.rx_errors++;
304 				dev->net->stats.rx_length_errors++;
305 				DBG(dev, "rx length %d\n", skb2->len);
306 				dev_kfree_skb_any(skb2);
307 				goto next_frame;
308 			}
309 			skb2->protocol = eth_type_trans(skb2, dev->net);
310 			dev->net->stats.rx_packets++;
311 			dev->net->stats.rx_bytes += skb2->len;
312 
313 			/* no buffer copies needed, unless hardware can't
314 			 * use skb buffers.
315 			 */
316 			status = netif_rx(skb2);
317 next_frame:
318 			skb2 = skb_dequeue(&dev->rx_frames);
319 		}
320 		break;
321 
322 	/* software-driven interface shutdown */
323 	case -ECONNRESET:		/* unlink */
324 	case -ESHUTDOWN:		/* disconnect etc */
325 		VDBG(dev, "rx shutdown, code %d\n", status);
326 		goto quiesce;
327 
328 	/* for hardware automagic (such as pxa) */
329 	case -ECONNABORTED:		/* endpoint reset */
330 		DBG(dev, "rx %s reset\n", ep->name);
331 		defer_kevent(dev, WORK_RX_MEMORY);
332 quiesce:
333 		dev_kfree_skb_any(skb);
334 		goto clean;
335 
336 	/* data overrun */
337 	case -EOVERFLOW:
338 		dev->net->stats.rx_over_errors++;
339 		/* FALLTHROUGH */
340 
341 	default:
342 		dev->net->stats.rx_errors++;
343 		DBG(dev, "rx status %d\n", status);
344 		break;
345 	}
346 
347 	if (skb)
348 		dev_kfree_skb_any(skb);
349 	if (!netif_running(dev->net)) {
350 clean:
351 		spin_lock(&dev->req_lock);
352 		list_add(&req->list, &dev->rx_reqs);
353 		spin_unlock(&dev->req_lock);
354 		req = NULL;
355 	}
356 	if (req)
357 		rx_submit(dev, req, GFP_ATOMIC);
358 }
359 
360 static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
361 {
362 	unsigned		i;
363 	struct usb_request	*req;
364 
365 	if (!n)
366 		return -ENOMEM;
367 
368 	/* queue/recycle up to N requests */
369 	i = n;
370 	list_for_each_entry(req, list, list) {
371 		if (i-- == 0)
372 			goto extra;
373 	}
374 	while (i--) {
375 		req = usb_ep_alloc_request(ep, GFP_ATOMIC);
376 		if (!req)
377 			return list_empty(list) ? -ENOMEM : 0;
378 		list_add(&req->list, list);
379 	}
380 	return 0;
381 
382 extra:
383 	/* free extras */
384 	for (;;) {
385 		struct list_head	*next;
386 
387 		next = req->list.next;
388 		list_del(&req->list);
389 		usb_ep_free_request(ep, req);
390 
391 		if (next == list)
392 			break;
393 
394 		req = container_of(next, struct usb_request, list);
395 	}
396 	return 0;
397 }
398 
399 static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
400 {
401 	int	status;
402 
403 	spin_lock(&dev->req_lock);
404 	status = prealloc(&dev->tx_reqs, link->in_ep, n);
405 	if (status < 0)
406 		goto fail;
407 	status = prealloc(&dev->rx_reqs, link->out_ep, n);
408 	if (status < 0)
409 		goto fail;
410 	goto done;
411 fail:
412 	DBG(dev, "can't alloc requests\n");
413 done:
414 	spin_unlock(&dev->req_lock);
415 	return status;
416 }
417 
418 static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
419 {
420 	struct usb_request	*req;
421 	unsigned long		flags;
422 
423 	/* fill unused rxq slots with some skb */
424 	spin_lock_irqsave(&dev->req_lock, flags);
425 	while (!list_empty(&dev->rx_reqs)) {
426 		req = container_of(dev->rx_reqs.next,
427 				struct usb_request, list);
428 		list_del_init(&req->list);
429 		spin_unlock_irqrestore(&dev->req_lock, flags);
430 
431 		if (rx_submit(dev, req, gfp_flags) < 0) {
432 			defer_kevent(dev, WORK_RX_MEMORY);
433 			return;
434 		}
435 
436 		spin_lock_irqsave(&dev->req_lock, flags);
437 	}
438 	spin_unlock_irqrestore(&dev->req_lock, flags);
439 }
440 
441 static void eth_work(struct work_struct *work)
442 {
443 	struct eth_dev	*dev = container_of(work, struct eth_dev, work);
444 
445 	if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
446 		if (netif_running(dev->net))
447 			rx_fill(dev, GFP_KERNEL);
448 	}
449 
450 	if (dev->todo)
451 		DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
452 }
453 
454 static void tx_complete(struct usb_ep *ep, struct usb_request *req)
455 {
456 	struct sk_buff	*skb = req->context;
457 	struct eth_dev	*dev = ep->driver_data;
458 
459 	switch (req->status) {
460 	default:
461 		dev->net->stats.tx_errors++;
462 		VDBG(dev, "tx err %d\n", req->status);
463 		/* FALLTHROUGH */
464 	case -ECONNRESET:		/* unlink */
465 	case -ESHUTDOWN:		/* disconnect etc */
466 		break;
467 	case 0:
468 		dev->net->stats.tx_bytes += skb->len;
469 	}
470 	dev->net->stats.tx_packets++;
471 
472 	spin_lock(&dev->req_lock);
473 	list_add(&req->list, &dev->tx_reqs);
474 	spin_unlock(&dev->req_lock);
475 	dev_kfree_skb_any(skb);
476 
477 	atomic_dec(&dev->tx_qlen);
478 	if (netif_carrier_ok(dev->net))
479 		netif_wake_queue(dev->net);
480 }
481 
482 static inline int is_promisc(u16 cdc_filter)
483 {
484 	return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
485 }
486 
487 static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
488 					struct net_device *net)
489 {
490 	struct eth_dev		*dev = netdev_priv(net);
491 	int			length = 0;
492 	int			retval;
493 	struct usb_request	*req = NULL;
494 	unsigned long		flags;
495 	struct usb_ep		*in;
496 	u16			cdc_filter;
497 
498 	spin_lock_irqsave(&dev->lock, flags);
499 	if (dev->port_usb) {
500 		in = dev->port_usb->in_ep;
501 		cdc_filter = dev->port_usb->cdc_filter;
502 	} else {
503 		in = NULL;
504 		cdc_filter = 0;
505 	}
506 	spin_unlock_irqrestore(&dev->lock, flags);
507 
508 	if (skb && !in) {
509 		dev_kfree_skb_any(skb);
510 		return NETDEV_TX_OK;
511 	}
512 
513 	/* apply outgoing CDC or RNDIS filters */
514 	if (skb && !is_promisc(cdc_filter)) {
515 		u8		*dest = skb->data;
516 
517 		if (is_multicast_ether_addr(dest)) {
518 			u16	type;
519 
520 			/* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
521 			 * SET_ETHERNET_MULTICAST_FILTERS requests
522 			 */
523 			if (is_broadcast_ether_addr(dest))
524 				type = USB_CDC_PACKET_TYPE_BROADCAST;
525 			else
526 				type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
527 			if (!(cdc_filter & type)) {
528 				dev_kfree_skb_any(skb);
529 				return NETDEV_TX_OK;
530 			}
531 		}
532 		/* ignores USB_CDC_PACKET_TYPE_DIRECTED */
533 	}
534 
535 	spin_lock_irqsave(&dev->req_lock, flags);
536 	/*
537 	 * this freelist can be empty if an interrupt triggered disconnect()
538 	 * and reconfigured the gadget (shutting down this queue) after the
539 	 * network stack decided to xmit but before we got the spinlock.
540 	 */
541 	if (list_empty(&dev->tx_reqs)) {
542 		spin_unlock_irqrestore(&dev->req_lock, flags);
543 		return NETDEV_TX_BUSY;
544 	}
545 
546 	req = container_of(dev->tx_reqs.next, struct usb_request, list);
547 	list_del(&req->list);
548 
549 	/* temporarily stop TX queue when the freelist empties */
550 	if (list_empty(&dev->tx_reqs))
551 		netif_stop_queue(net);
552 	spin_unlock_irqrestore(&dev->req_lock, flags);
553 
554 	/* no buffer copies needed, unless the network stack did it
555 	 * or the hardware can't use skb buffers.
556 	 * or there's not enough space for extra headers we need
557 	 */
558 	if (dev->wrap) {
559 		unsigned long	flags;
560 
561 		spin_lock_irqsave(&dev->lock, flags);
562 		if (dev->port_usb)
563 			skb = dev->wrap(dev->port_usb, skb);
564 		spin_unlock_irqrestore(&dev->lock, flags);
565 		if (!skb) {
566 			/* Multi frame CDC protocols may store the frame for
567 			 * later which is not a dropped frame.
568 			 */
569 			if (dev->port_usb->supports_multi_frame)
570 				goto multiframe;
571 			goto drop;
572 		}
573 	}
574 
575 	length = skb->len;
576 	req->buf = skb->data;
577 	req->context = skb;
578 	req->complete = tx_complete;
579 
580 	/* NCM requires no zlp if transfer is dwNtbInMaxSize */
581 	if (dev->port_usb->is_fixed &&
582 	    length == dev->port_usb->fixed_in_len &&
583 	    (length % in->maxpacket) == 0)
584 		req->zero = 0;
585 	else
586 		req->zero = 1;
587 
588 	/* use zlp framing on tx for strict CDC-Ether conformance,
589 	 * though any robust network rx path ignores extra padding.
590 	 * and some hardware doesn't like to write zlps.
591 	 */
592 	if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
593 		length++;
594 
595 	req->length = length;
596 
597 	/* throttle high/super speed IRQ rate back slightly */
598 	if (gadget_is_dualspeed(dev->gadget))
599 		req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
600 				     dev->gadget->speed == USB_SPEED_SUPER)
601 			? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0)
602 			: 0;
603 
604 	retval = usb_ep_queue(in, req, GFP_ATOMIC);
605 	switch (retval) {
606 	default:
607 		DBG(dev, "tx queue err %d\n", retval);
608 		break;
609 	case 0:
610 		net->trans_start = jiffies;
611 		atomic_inc(&dev->tx_qlen);
612 	}
613 
614 	if (retval) {
615 		dev_kfree_skb_any(skb);
616 drop:
617 		dev->net->stats.tx_dropped++;
618 multiframe:
619 		spin_lock_irqsave(&dev->req_lock, flags);
620 		if (list_empty(&dev->tx_reqs))
621 			netif_start_queue(net);
622 		list_add(&req->list, &dev->tx_reqs);
623 		spin_unlock_irqrestore(&dev->req_lock, flags);
624 	}
625 	return NETDEV_TX_OK;
626 }
627 
628 /*-------------------------------------------------------------------------*/
629 
630 static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
631 {
632 	DBG(dev, "%s\n", __func__);
633 
634 	/* fill the rx queue */
635 	rx_fill(dev, gfp_flags);
636 
637 	/* and open the tx floodgates */
638 	atomic_set(&dev->tx_qlen, 0);
639 	netif_wake_queue(dev->net);
640 }
641 
642 static int eth_open(struct net_device *net)
643 {
644 	struct eth_dev	*dev = netdev_priv(net);
645 	struct gether	*link;
646 
647 	DBG(dev, "%s\n", __func__);
648 	if (netif_carrier_ok(dev->net))
649 		eth_start(dev, GFP_KERNEL);
650 
651 	spin_lock_irq(&dev->lock);
652 	link = dev->port_usb;
653 	if (link && link->open)
654 		link->open(link);
655 	spin_unlock_irq(&dev->lock);
656 
657 	return 0;
658 }
659 
660 static int eth_stop(struct net_device *net)
661 {
662 	struct eth_dev	*dev = netdev_priv(net);
663 	unsigned long	flags;
664 
665 	VDBG(dev, "%s\n", __func__);
666 	netif_stop_queue(net);
667 
668 	DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
669 		dev->net->stats.rx_packets, dev->net->stats.tx_packets,
670 		dev->net->stats.rx_errors, dev->net->stats.tx_errors
671 		);
672 
673 	/* ensure there are no more active requests */
674 	spin_lock_irqsave(&dev->lock, flags);
675 	if (dev->port_usb) {
676 		struct gether	*link = dev->port_usb;
677 		const struct usb_endpoint_descriptor *in;
678 		const struct usb_endpoint_descriptor *out;
679 
680 		if (link->close)
681 			link->close(link);
682 
683 		/* NOTE:  we have no abort-queue primitive we could use
684 		 * to cancel all pending I/O.  Instead, we disable then
685 		 * reenable the endpoints ... this idiom may leave toggle
686 		 * wrong, but that's a self-correcting error.
687 		 *
688 		 * REVISIT:  we *COULD* just let the transfers complete at
689 		 * their own pace; the network stack can handle old packets.
690 		 * For the moment we leave this here, since it works.
691 		 */
692 		in = link->in_ep->desc;
693 		out = link->out_ep->desc;
694 		usb_ep_disable(link->in_ep);
695 		usb_ep_disable(link->out_ep);
696 		if (netif_carrier_ok(net)) {
697 			DBG(dev, "host still using in/out endpoints\n");
698 			link->in_ep->desc = in;
699 			link->out_ep->desc = out;
700 			usb_ep_enable(link->in_ep);
701 			usb_ep_enable(link->out_ep);
702 		}
703 	}
704 	spin_unlock_irqrestore(&dev->lock, flags);
705 
706 	return 0;
707 }
708 
709 /*-------------------------------------------------------------------------*/
710 
711 static int get_ether_addr(const char *str, u8 *dev_addr)
712 {
713 	if (str) {
714 		unsigned	i;
715 
716 		for (i = 0; i < 6; i++) {
717 			unsigned char num;
718 
719 			if ((*str == '.') || (*str == ':'))
720 				str++;
721 			num = hex_to_bin(*str++) << 4;
722 			num |= hex_to_bin(*str++);
723 			dev_addr [i] = num;
724 		}
725 		if (is_valid_ether_addr(dev_addr))
726 			return 0;
727 	}
728 	eth_random_addr(dev_addr);
729 	return 1;
730 }
731 
732 static int get_ether_addr_str(u8 dev_addr[ETH_ALEN], char *str, int len)
733 {
734 	if (len < 18)
735 		return -EINVAL;
736 
737 	snprintf(str, len, "%pM", dev_addr);
738 	return 18;
739 }
740 
741 static const struct net_device_ops eth_netdev_ops = {
742 	.ndo_open		= eth_open,
743 	.ndo_stop		= eth_stop,
744 	.ndo_start_xmit		= eth_start_xmit,
745 	.ndo_change_mtu		= ueth_change_mtu,
746 	.ndo_set_mac_address 	= eth_mac_addr,
747 	.ndo_validate_addr	= eth_validate_addr,
748 };
749 
750 static struct device_type gadget_type = {
751 	.name	= "gadget",
752 };
753 
754 /**
755  * gether_setup_name - initialize one ethernet-over-usb link
756  * @g: gadget to associated with these links
757  * @ethaddr: NULL, or a buffer in which the ethernet address of the
758  *	host side of the link is recorded
759  * @netname: name for network device (for example, "usb")
760  * Context: may sleep
761  *
762  * This sets up the single network link that may be exported by a
763  * gadget driver using this framework.  The link layer addresses are
764  * set up using module parameters.
765  *
766  * Returns an eth_dev pointer on success, or an ERR_PTR on failure.
767  */
768 struct eth_dev *gether_setup_name(struct usb_gadget *g,
769 		const char *dev_addr, const char *host_addr,
770 		u8 ethaddr[ETH_ALEN], unsigned qmult, const char *netname)
771 {
772 	struct eth_dev		*dev;
773 	struct net_device	*net;
774 	int			status;
775 
776 	net = alloc_etherdev(sizeof *dev);
777 	if (!net)
778 		return ERR_PTR(-ENOMEM);
779 
780 	dev = netdev_priv(net);
781 	spin_lock_init(&dev->lock);
782 	spin_lock_init(&dev->req_lock);
783 	INIT_WORK(&dev->work, eth_work);
784 	INIT_LIST_HEAD(&dev->tx_reqs);
785 	INIT_LIST_HEAD(&dev->rx_reqs);
786 
787 	skb_queue_head_init(&dev->rx_frames);
788 
789 	/* network device setup */
790 	dev->net = net;
791 	dev->qmult = qmult;
792 	snprintf(net->name, sizeof(net->name), "%s%%d", netname);
793 
794 	if (get_ether_addr(dev_addr, net->dev_addr))
795 		dev_warn(&g->dev,
796 			"using random %s ethernet address\n", "self");
797 	if (get_ether_addr(host_addr, dev->host_mac))
798 		dev_warn(&g->dev,
799 			"using random %s ethernet address\n", "host");
800 
801 	if (ethaddr)
802 		memcpy(ethaddr, dev->host_mac, ETH_ALEN);
803 
804 	net->netdev_ops = &eth_netdev_ops;
805 
806 	net->ethtool_ops = &ops;
807 
808 	dev->gadget = g;
809 	SET_NETDEV_DEV(net, &g->dev);
810 	SET_NETDEV_DEVTYPE(net, &gadget_type);
811 
812 	status = register_netdev(net);
813 	if (status < 0) {
814 		dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
815 		free_netdev(net);
816 		dev = ERR_PTR(status);
817 	} else {
818 		INFO(dev, "MAC %pM\n", net->dev_addr);
819 		INFO(dev, "HOST MAC %pM\n", dev->host_mac);
820 
821 		/*
822 		 * two kinds of host-initiated state changes:
823 		 *  - iff DATA transfer is active, carrier is "on"
824 		 *  - tx queueing enabled if open *and* carrier is "on"
825 		 */
826 		netif_carrier_off(net);
827 	}
828 
829 	return dev;
830 }
831 EXPORT_SYMBOL_GPL(gether_setup_name);
832 
833 struct net_device *gether_setup_name_default(const char *netname)
834 {
835 	struct net_device	*net;
836 	struct eth_dev		*dev;
837 
838 	net = alloc_etherdev(sizeof(*dev));
839 	if (!net)
840 		return ERR_PTR(-ENOMEM);
841 
842 	dev = netdev_priv(net);
843 	spin_lock_init(&dev->lock);
844 	spin_lock_init(&dev->req_lock);
845 	INIT_WORK(&dev->work, eth_work);
846 	INIT_LIST_HEAD(&dev->tx_reqs);
847 	INIT_LIST_HEAD(&dev->rx_reqs);
848 
849 	skb_queue_head_init(&dev->rx_frames);
850 
851 	/* network device setup */
852 	dev->net = net;
853 	dev->qmult = QMULT_DEFAULT;
854 	snprintf(net->name, sizeof(net->name), "%s%%d", netname);
855 
856 	eth_random_addr(dev->dev_mac);
857 	pr_warn("using random %s ethernet address\n", "self");
858 	eth_random_addr(dev->host_mac);
859 	pr_warn("using random %s ethernet address\n", "host");
860 
861 	net->netdev_ops = &eth_netdev_ops;
862 
863 	net->ethtool_ops = &ops;
864 	SET_NETDEV_DEVTYPE(net, &gadget_type);
865 
866 	return net;
867 }
868 EXPORT_SYMBOL_GPL(gether_setup_name_default);
869 
870 int gether_register_netdev(struct net_device *net)
871 {
872 	struct eth_dev *dev;
873 	struct usb_gadget *g;
874 	struct sockaddr sa;
875 	int status;
876 
877 	if (!net->dev.parent)
878 		return -EINVAL;
879 	dev = netdev_priv(net);
880 	g = dev->gadget;
881 	status = register_netdev(net);
882 	if (status < 0) {
883 		dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
884 		return status;
885 	} else {
886 		INFO(dev, "HOST MAC %pM\n", dev->host_mac);
887 
888 		/* two kinds of host-initiated state changes:
889 		 *  - iff DATA transfer is active, carrier is "on"
890 		 *  - tx queueing enabled if open *and* carrier is "on"
891 		 */
892 		netif_carrier_off(net);
893 	}
894 	sa.sa_family = net->type;
895 	memcpy(sa.sa_data, dev->dev_mac, ETH_ALEN);
896 	rtnl_lock();
897 	status = dev_set_mac_address(net, &sa);
898 	rtnl_unlock();
899 	if (status)
900 		pr_warn("cannot set self ethernet address: %d\n", status);
901 	else
902 		INFO(dev, "MAC %pM\n", dev->dev_mac);
903 
904 	return status;
905 }
906 EXPORT_SYMBOL_GPL(gether_register_netdev);
907 
908 void gether_set_gadget(struct net_device *net, struct usb_gadget *g)
909 {
910 	struct eth_dev *dev;
911 
912 	dev = netdev_priv(net);
913 	dev->gadget = g;
914 	SET_NETDEV_DEV(net, &g->dev);
915 }
916 EXPORT_SYMBOL_GPL(gether_set_gadget);
917 
918 int gether_set_dev_addr(struct net_device *net, const char *dev_addr)
919 {
920 	struct eth_dev *dev;
921 	u8 new_addr[ETH_ALEN];
922 
923 	dev = netdev_priv(net);
924 	if (get_ether_addr(dev_addr, new_addr))
925 		return -EINVAL;
926 	memcpy(dev->dev_mac, new_addr, ETH_ALEN);
927 	return 0;
928 }
929 EXPORT_SYMBOL_GPL(gether_set_dev_addr);
930 
931 int gether_get_dev_addr(struct net_device *net, char *dev_addr, int len)
932 {
933 	struct eth_dev *dev;
934 
935 	dev = netdev_priv(net);
936 	return get_ether_addr_str(dev->dev_mac, dev_addr, len);
937 }
938 EXPORT_SYMBOL_GPL(gether_get_dev_addr);
939 
940 int gether_set_host_addr(struct net_device *net, const char *host_addr)
941 {
942 	struct eth_dev *dev;
943 	u8 new_addr[ETH_ALEN];
944 
945 	dev = netdev_priv(net);
946 	if (get_ether_addr(host_addr, new_addr))
947 		return -EINVAL;
948 	memcpy(dev->host_mac, new_addr, ETH_ALEN);
949 	return 0;
950 }
951 EXPORT_SYMBOL_GPL(gether_set_host_addr);
952 
953 int gether_get_host_addr(struct net_device *net, char *host_addr, int len)
954 {
955 	struct eth_dev *dev;
956 
957 	dev = netdev_priv(net);
958 	return get_ether_addr_str(dev->host_mac, host_addr, len);
959 }
960 EXPORT_SYMBOL_GPL(gether_get_host_addr);
961 
962 int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len)
963 {
964 	struct eth_dev *dev;
965 
966 	if (len < 13)
967 		return -EINVAL;
968 
969 	dev = netdev_priv(net);
970 	snprintf(host_addr, len, "%pm", dev->host_mac);
971 
972 	return strlen(host_addr);
973 }
974 EXPORT_SYMBOL_GPL(gether_get_host_addr_cdc);
975 
976 void gether_get_host_addr_u8(struct net_device *net, u8 host_mac[ETH_ALEN])
977 {
978 	struct eth_dev *dev;
979 
980 	dev = netdev_priv(net);
981 	memcpy(host_mac, dev->host_mac, ETH_ALEN);
982 }
983 EXPORT_SYMBOL_GPL(gether_get_host_addr_u8);
984 
985 void gether_set_qmult(struct net_device *net, unsigned qmult)
986 {
987 	struct eth_dev *dev;
988 
989 	dev = netdev_priv(net);
990 	dev->qmult = qmult;
991 }
992 EXPORT_SYMBOL_GPL(gether_set_qmult);
993 
994 unsigned gether_get_qmult(struct net_device *net)
995 {
996 	struct eth_dev *dev;
997 
998 	dev = netdev_priv(net);
999 	return dev->qmult;
1000 }
1001 EXPORT_SYMBOL_GPL(gether_get_qmult);
1002 
1003 int gether_get_ifname(struct net_device *net, char *name, int len)
1004 {
1005 	rtnl_lock();
1006 	strlcpy(name, netdev_name(net), len);
1007 	rtnl_unlock();
1008 	return strlen(name);
1009 }
1010 EXPORT_SYMBOL_GPL(gether_get_ifname);
1011 
1012 /**
1013  * gether_cleanup - remove Ethernet-over-USB device
1014  * Context: may sleep
1015  *
1016  * This is called to free all resources allocated by @gether_setup().
1017  */
1018 void gether_cleanup(struct eth_dev *dev)
1019 {
1020 	if (!dev)
1021 		return;
1022 
1023 	unregister_netdev(dev->net);
1024 	flush_work(&dev->work);
1025 	free_netdev(dev->net);
1026 }
1027 EXPORT_SYMBOL_GPL(gether_cleanup);
1028 
1029 /**
1030  * gether_connect - notify network layer that USB link is active
1031  * @link: the USB link, set up with endpoints, descriptors matching
1032  *	current device speed, and any framing wrapper(s) set up.
1033  * Context: irqs blocked
1034  *
1035  * This is called to activate endpoints and let the network layer know
1036  * the connection is active ("carrier detect").  It may cause the I/O
1037  * queues to open and start letting network packets flow, but will in
1038  * any case activate the endpoints so that they respond properly to the
1039  * USB host.
1040  *
1041  * Verify net_device pointer returned using IS_ERR().  If it doesn't
1042  * indicate some error code (negative errno), ep->driver_data values
1043  * have been overwritten.
1044  */
1045 struct net_device *gether_connect(struct gether *link)
1046 {
1047 	struct eth_dev		*dev = link->ioport;
1048 	int			result = 0;
1049 
1050 	if (!dev)
1051 		return ERR_PTR(-EINVAL);
1052 
1053 	link->in_ep->driver_data = dev;
1054 	result = usb_ep_enable(link->in_ep);
1055 	if (result != 0) {
1056 		DBG(dev, "enable %s --> %d\n",
1057 			link->in_ep->name, result);
1058 		goto fail0;
1059 	}
1060 
1061 	link->out_ep->driver_data = dev;
1062 	result = usb_ep_enable(link->out_ep);
1063 	if (result != 0) {
1064 		DBG(dev, "enable %s --> %d\n",
1065 			link->out_ep->name, result);
1066 		goto fail1;
1067 	}
1068 
1069 	if (result == 0)
1070 		result = alloc_requests(dev, link, qlen(dev->gadget,
1071 					dev->qmult));
1072 
1073 	if (result == 0) {
1074 		dev->zlp = link->is_zlp_ok;
1075 		DBG(dev, "qlen %d\n", qlen(dev->gadget, dev->qmult));
1076 
1077 		dev->header_len = link->header_len;
1078 		dev->unwrap = link->unwrap;
1079 		dev->wrap = link->wrap;
1080 
1081 		spin_lock(&dev->lock);
1082 		dev->port_usb = link;
1083 		if (netif_running(dev->net)) {
1084 			if (link->open)
1085 				link->open(link);
1086 		} else {
1087 			if (link->close)
1088 				link->close(link);
1089 		}
1090 		spin_unlock(&dev->lock);
1091 
1092 		netif_carrier_on(dev->net);
1093 		if (netif_running(dev->net))
1094 			eth_start(dev, GFP_ATOMIC);
1095 
1096 	/* on error, disable any endpoints  */
1097 	} else {
1098 		(void) usb_ep_disable(link->out_ep);
1099 fail1:
1100 		(void) usb_ep_disable(link->in_ep);
1101 	}
1102 fail0:
1103 	/* caller is responsible for cleanup on error */
1104 	if (result < 0)
1105 		return ERR_PTR(result);
1106 	return dev->net;
1107 }
1108 EXPORT_SYMBOL_GPL(gether_connect);
1109 
1110 /**
1111  * gether_disconnect - notify network layer that USB link is inactive
1112  * @link: the USB link, on which gether_connect() was called
1113  * Context: irqs blocked
1114  *
1115  * This is called to deactivate endpoints and let the network layer know
1116  * the connection went inactive ("no carrier").
1117  *
1118  * On return, the state is as if gether_connect() had never been called.
1119  * The endpoints are inactive, and accordingly without active USB I/O.
1120  * Pointers to endpoint descriptors and endpoint private data are nulled.
1121  */
1122 void gether_disconnect(struct gether *link)
1123 {
1124 	struct eth_dev		*dev = link->ioport;
1125 	struct usb_request	*req;
1126 
1127 	WARN_ON(!dev);
1128 	if (!dev)
1129 		return;
1130 
1131 	DBG(dev, "%s\n", __func__);
1132 
1133 	netif_stop_queue(dev->net);
1134 	netif_carrier_off(dev->net);
1135 
1136 	/* disable endpoints, forcing (synchronous) completion
1137 	 * of all pending i/o.  then free the request objects
1138 	 * and forget about the endpoints.
1139 	 */
1140 	usb_ep_disable(link->in_ep);
1141 	spin_lock(&dev->req_lock);
1142 	while (!list_empty(&dev->tx_reqs)) {
1143 		req = container_of(dev->tx_reqs.next,
1144 					struct usb_request, list);
1145 		list_del(&req->list);
1146 
1147 		spin_unlock(&dev->req_lock);
1148 		usb_ep_free_request(link->in_ep, req);
1149 		spin_lock(&dev->req_lock);
1150 	}
1151 	spin_unlock(&dev->req_lock);
1152 	link->in_ep->desc = NULL;
1153 
1154 	usb_ep_disable(link->out_ep);
1155 	spin_lock(&dev->req_lock);
1156 	while (!list_empty(&dev->rx_reqs)) {
1157 		req = container_of(dev->rx_reqs.next,
1158 					struct usb_request, list);
1159 		list_del(&req->list);
1160 
1161 		spin_unlock(&dev->req_lock);
1162 		usb_ep_free_request(link->out_ep, req);
1163 		spin_lock(&dev->req_lock);
1164 	}
1165 	spin_unlock(&dev->req_lock);
1166 	link->out_ep->desc = NULL;
1167 
1168 	/* finish forgetting about this USB link episode */
1169 	dev->header_len = 0;
1170 	dev->unwrap = NULL;
1171 	dev->wrap = NULL;
1172 
1173 	spin_lock(&dev->lock);
1174 	dev->port_usb = NULL;
1175 	spin_unlock(&dev->lock);
1176 }
1177 EXPORT_SYMBOL_GPL(gether_disconnect);
1178 
1179 MODULE_LICENSE("GPL");
1180 MODULE_AUTHOR("David Brownell");
1181