1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
4  *
5  * Copyright (C) 2003-2005,2008 David Brownell
6  * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
7  * Copyright (C) 2008 Nokia Corporation
8  */
9 
10 /* #define VERBOSE_DEBUG */
11 
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/gfp.h>
15 #include <linux/device.h>
16 #include <linux/ctype.h>
17 #include <linux/etherdevice.h>
18 #include <linux/ethtool.h>
19 #include <linux/if_vlan.h>
20 #include <linux/usb/composite.h>
21 
22 #include "u_ether.h"
23 
24 
25 /*
26  * This component encapsulates the Ethernet link glue needed to provide
27  * one (!) network link through the USB gadget stack, normally "usb0".
28  *
29  * The control and data models are handled by the function driver which
30  * connects to this code; such as CDC Ethernet (ECM or EEM),
31  * "CDC Subset", or RNDIS.  That includes all descriptor and endpoint
32  * management.
33  *
34  * Link level addressing is handled by this component using module
35  * parameters; if no such parameters are provided, random link level
36  * addresses are used.  Each end of the link uses one address.  The
37  * host end address is exported in various ways, and is often recorded
38  * in configuration databases.
39  *
40  * The driver which assembles each configuration using such a link is
41  * responsible for ensuring that each configuration includes at most one
42  * instance of is network link.  (The network layer provides ways for
43  * this single "physical" link to be used by multiple virtual links.)
44  */
45 
46 #define UETH__VERSION	"29-May-2008"
47 
48 /* Experiments show that both Linux and Windows hosts allow up to 16k
49  * frame sizes. Set the max MTU size to 15k+52 to prevent allocating 32k
50  * blocks and still have efficient handling. */
51 #define GETHER_MAX_MTU_SIZE 15412
52 #define GETHER_MAX_ETH_FRAME_LEN (GETHER_MAX_MTU_SIZE + ETH_HLEN)
53 
54 struct eth_dev {
55 	/* lock is held while accessing port_usb
56 	 */
57 	spinlock_t		lock;
58 	struct gether		*port_usb;
59 
60 	struct net_device	*net;
61 	struct usb_gadget	*gadget;
62 
63 	spinlock_t		req_lock;	/* guard {rx,tx}_reqs */
64 	struct list_head	tx_reqs, rx_reqs;
65 	atomic_t		tx_qlen;
66 
67 	struct sk_buff_head	rx_frames;
68 
69 	unsigned		qmult;
70 
71 	unsigned		header_len;
72 	struct sk_buff		*(*wrap)(struct gether *, struct sk_buff *skb);
73 	int			(*unwrap)(struct gether *,
74 						struct sk_buff *skb,
75 						struct sk_buff_head *list);
76 
77 	struct work_struct	work;
78 
79 	unsigned long		todo;
80 #define	WORK_RX_MEMORY		0
81 
82 	bool			zlp;
83 	bool			no_skb_reserve;
84 	bool			ifname_set;
85 	u8			host_mac[ETH_ALEN];
86 	u8			dev_mac[ETH_ALEN];
87 };
88 
89 /*-------------------------------------------------------------------------*/
90 
91 #define RX_EXTRA	20	/* bytes guarding against rx overflows */
92 
93 #define DEFAULT_QLEN	2	/* double buffering by default */
94 
95 /* for dual-speed hardware, use deeper queues at high/super speed */
96 static inline int qlen(struct usb_gadget *gadget, unsigned qmult)
97 {
98 	if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
99 					    gadget->speed >= USB_SPEED_SUPER))
100 		return qmult * DEFAULT_QLEN;
101 	else
102 		return DEFAULT_QLEN;
103 }
104 
105 /*-------------------------------------------------------------------------*/
106 
107 /* NETWORK DRIVER HOOKUP (to the layer above this driver) */
108 
109 static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
110 {
111 	struct eth_dev *dev = netdev_priv(net);
112 
113 	strscpy(p->driver, "g_ether", sizeof(p->driver));
114 	strscpy(p->version, UETH__VERSION, sizeof(p->version));
115 	strscpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version));
116 	strscpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info));
117 }
118 
119 /* REVISIT can also support:
120  *   - WOL (by tracking suspends and issuing remote wakeup)
121  *   - msglevel (implies updated messaging)
122  *   - ... probably more ethtool ops
123  */
124 
125 static const struct ethtool_ops ops = {
126 	.get_drvinfo = eth_get_drvinfo,
127 	.get_link = ethtool_op_get_link,
128 };
129 
130 static void defer_kevent(struct eth_dev *dev, int flag)
131 {
132 	if (test_and_set_bit(flag, &dev->todo))
133 		return;
134 	if (!schedule_work(&dev->work))
135 		ERROR(dev, "kevent %d may have been dropped\n", flag);
136 	else
137 		DBG(dev, "kevent %d scheduled\n", flag);
138 }
139 
140 static void rx_complete(struct usb_ep *ep, struct usb_request *req);
141 
142 static int
143 rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
144 {
145 	struct usb_gadget *g = dev->gadget;
146 	struct sk_buff	*skb;
147 	int		retval = -ENOMEM;
148 	size_t		size = 0;
149 	struct usb_ep	*out;
150 	unsigned long	flags;
151 
152 	spin_lock_irqsave(&dev->lock, flags);
153 	if (dev->port_usb)
154 		out = dev->port_usb->out_ep;
155 	else
156 		out = NULL;
157 
158 	if (!out)
159 	{
160 		spin_unlock_irqrestore(&dev->lock, flags);
161 		return -ENOTCONN;
162 	}
163 
164 	/* Padding up to RX_EXTRA handles minor disagreements with host.
165 	 * Normally we use the USB "terminate on short read" convention;
166 	 * so allow up to (N*maxpacket), since that memory is normally
167 	 * already allocated.  Some hardware doesn't deal well with short
168 	 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
169 	 * byte off the end (to force hardware errors on overflow).
170 	 *
171 	 * RNDIS uses internal framing, and explicitly allows senders to
172 	 * pad to end-of-packet.  That's potentially nice for speed, but
173 	 * means receivers can't recover lost synch on their own (because
174 	 * new packets don't only start after a short RX).
175 	 */
176 	size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
177 	size += dev->port_usb->header_len;
178 
179 	if (g->quirk_ep_out_aligned_size) {
180 		size += out->maxpacket - 1;
181 		size -= size % out->maxpacket;
182 	}
183 
184 	if (dev->port_usb->is_fixed)
185 		size = max_t(size_t, size, dev->port_usb->fixed_out_len);
186 	spin_unlock_irqrestore(&dev->lock, flags);
187 
188 	skb = __netdev_alloc_skb(dev->net, size + NET_IP_ALIGN, gfp_flags);
189 	if (skb == NULL) {
190 		DBG(dev, "no rx skb\n");
191 		goto enomem;
192 	}
193 
194 	/* Some platforms perform better when IP packets are aligned,
195 	 * but on at least one, checksumming fails otherwise.  Note:
196 	 * RNDIS headers involve variable numbers of LE32 values.
197 	 */
198 	if (likely(!dev->no_skb_reserve))
199 		skb_reserve(skb, NET_IP_ALIGN);
200 
201 	req->buf = skb->data;
202 	req->length = size;
203 	req->complete = rx_complete;
204 	req->context = skb;
205 
206 	retval = usb_ep_queue(out, req, gfp_flags);
207 	if (retval == -ENOMEM)
208 enomem:
209 		defer_kevent(dev, WORK_RX_MEMORY);
210 	if (retval) {
211 		DBG(dev, "rx submit --> %d\n", retval);
212 		if (skb)
213 			dev_kfree_skb_any(skb);
214 		spin_lock_irqsave(&dev->req_lock, flags);
215 		list_add(&req->list, &dev->rx_reqs);
216 		spin_unlock_irqrestore(&dev->req_lock, flags);
217 	}
218 	return retval;
219 }
220 
221 static void rx_complete(struct usb_ep *ep, struct usb_request *req)
222 {
223 	struct sk_buff	*skb = req->context, *skb2;
224 	struct eth_dev	*dev = ep->driver_data;
225 	int		status = req->status;
226 
227 	switch (status) {
228 
229 	/* normal completion */
230 	case 0:
231 		skb_put(skb, req->actual);
232 
233 		if (dev->unwrap) {
234 			unsigned long	flags;
235 
236 			spin_lock_irqsave(&dev->lock, flags);
237 			if (dev->port_usb) {
238 				status = dev->unwrap(dev->port_usb,
239 							skb,
240 							&dev->rx_frames);
241 			} else {
242 				dev_kfree_skb_any(skb);
243 				status = -ENOTCONN;
244 			}
245 			spin_unlock_irqrestore(&dev->lock, flags);
246 		} else {
247 			skb_queue_tail(&dev->rx_frames, skb);
248 		}
249 		skb = NULL;
250 
251 		skb2 = skb_dequeue(&dev->rx_frames);
252 		while (skb2) {
253 			if (status < 0
254 					|| ETH_HLEN > skb2->len
255 					|| skb2->len > GETHER_MAX_ETH_FRAME_LEN) {
256 				dev->net->stats.rx_errors++;
257 				dev->net->stats.rx_length_errors++;
258 				DBG(dev, "rx length %d\n", skb2->len);
259 				dev_kfree_skb_any(skb2);
260 				goto next_frame;
261 			}
262 			skb2->protocol = eth_type_trans(skb2, dev->net);
263 			dev->net->stats.rx_packets++;
264 			dev->net->stats.rx_bytes += skb2->len;
265 
266 			/* no buffer copies needed, unless hardware can't
267 			 * use skb buffers.
268 			 */
269 			status = netif_rx(skb2);
270 next_frame:
271 			skb2 = skb_dequeue(&dev->rx_frames);
272 		}
273 		break;
274 
275 	/* software-driven interface shutdown */
276 	case -ECONNRESET:		/* unlink */
277 	case -ESHUTDOWN:		/* disconnect etc */
278 		VDBG(dev, "rx shutdown, code %d\n", status);
279 		goto quiesce;
280 
281 	/* for hardware automagic (such as pxa) */
282 	case -ECONNABORTED:		/* endpoint reset */
283 		DBG(dev, "rx %s reset\n", ep->name);
284 		defer_kevent(dev, WORK_RX_MEMORY);
285 quiesce:
286 		dev_kfree_skb_any(skb);
287 		goto clean;
288 
289 	/* data overrun */
290 	case -EOVERFLOW:
291 		dev->net->stats.rx_over_errors++;
292 		fallthrough;
293 
294 	default:
295 		dev->net->stats.rx_errors++;
296 		DBG(dev, "rx status %d\n", status);
297 		break;
298 	}
299 
300 	if (skb)
301 		dev_kfree_skb_any(skb);
302 	if (!netif_running(dev->net)) {
303 clean:
304 		spin_lock(&dev->req_lock);
305 		list_add(&req->list, &dev->rx_reqs);
306 		spin_unlock(&dev->req_lock);
307 		req = NULL;
308 	}
309 	if (req)
310 		rx_submit(dev, req, GFP_ATOMIC);
311 }
312 
313 static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
314 {
315 	unsigned		i;
316 	struct usb_request	*req;
317 
318 	if (!n)
319 		return -ENOMEM;
320 
321 	/* queue/recycle up to N requests */
322 	i = n;
323 	list_for_each_entry(req, list, list) {
324 		if (i-- == 0)
325 			goto extra;
326 	}
327 	while (i--) {
328 		req = usb_ep_alloc_request(ep, GFP_ATOMIC);
329 		if (!req)
330 			return list_empty(list) ? -ENOMEM : 0;
331 		list_add(&req->list, list);
332 	}
333 	return 0;
334 
335 extra:
336 	/* free extras */
337 	for (;;) {
338 		struct list_head	*next;
339 
340 		next = req->list.next;
341 		list_del(&req->list);
342 		usb_ep_free_request(ep, req);
343 
344 		if (next == list)
345 			break;
346 
347 		req = container_of(next, struct usb_request, list);
348 	}
349 	return 0;
350 }
351 
352 static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
353 {
354 	int	status;
355 
356 	spin_lock(&dev->req_lock);
357 	status = prealloc(&dev->tx_reqs, link->in_ep, n);
358 	if (status < 0)
359 		goto fail;
360 	status = prealloc(&dev->rx_reqs, link->out_ep, n);
361 	if (status < 0)
362 		goto fail;
363 	goto done;
364 fail:
365 	DBG(dev, "can't alloc requests\n");
366 done:
367 	spin_unlock(&dev->req_lock);
368 	return status;
369 }
370 
371 static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
372 {
373 	struct usb_request	*req;
374 	unsigned long		flags;
375 
376 	/* fill unused rxq slots with some skb */
377 	spin_lock_irqsave(&dev->req_lock, flags);
378 	while (!list_empty(&dev->rx_reqs)) {
379 		req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
380 		list_del_init(&req->list);
381 		spin_unlock_irqrestore(&dev->req_lock, flags);
382 
383 		if (rx_submit(dev, req, gfp_flags) < 0) {
384 			defer_kevent(dev, WORK_RX_MEMORY);
385 			return;
386 		}
387 
388 		spin_lock_irqsave(&dev->req_lock, flags);
389 	}
390 	spin_unlock_irqrestore(&dev->req_lock, flags);
391 }
392 
393 static void eth_work(struct work_struct *work)
394 {
395 	struct eth_dev	*dev = container_of(work, struct eth_dev, work);
396 
397 	if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
398 		if (netif_running(dev->net))
399 			rx_fill(dev, GFP_KERNEL);
400 	}
401 
402 	if (dev->todo)
403 		DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
404 }
405 
406 static void tx_complete(struct usb_ep *ep, struct usb_request *req)
407 {
408 	struct sk_buff	*skb = req->context;
409 	struct eth_dev	*dev = ep->driver_data;
410 
411 	switch (req->status) {
412 	default:
413 		dev->net->stats.tx_errors++;
414 		VDBG(dev, "tx err %d\n", req->status);
415 		fallthrough;
416 	case -ECONNRESET:		/* unlink */
417 	case -ESHUTDOWN:		/* disconnect etc */
418 		dev_kfree_skb_any(skb);
419 		break;
420 	case 0:
421 		dev->net->stats.tx_bytes += skb->len;
422 		dev_consume_skb_any(skb);
423 	}
424 	dev->net->stats.tx_packets++;
425 
426 	spin_lock(&dev->req_lock);
427 	list_add(&req->list, &dev->tx_reqs);
428 	spin_unlock(&dev->req_lock);
429 
430 	atomic_dec(&dev->tx_qlen);
431 	if (netif_carrier_ok(dev->net))
432 		netif_wake_queue(dev->net);
433 }
434 
435 static inline int is_promisc(u16 cdc_filter)
436 {
437 	return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
438 }
439 
440 static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
441 					struct net_device *net)
442 {
443 	struct eth_dev		*dev = netdev_priv(net);
444 	int			length = 0;
445 	int			retval;
446 	struct usb_request	*req = NULL;
447 	unsigned long		flags;
448 	struct usb_ep		*in;
449 	u16			cdc_filter;
450 
451 	spin_lock_irqsave(&dev->lock, flags);
452 	if (dev->port_usb) {
453 		in = dev->port_usb->in_ep;
454 		cdc_filter = dev->port_usb->cdc_filter;
455 	} else {
456 		in = NULL;
457 		cdc_filter = 0;
458 	}
459 	spin_unlock_irqrestore(&dev->lock, flags);
460 
461 	if (!in) {
462 		if (skb)
463 			dev_kfree_skb_any(skb);
464 		return NETDEV_TX_OK;
465 	}
466 
467 	/* apply outgoing CDC or RNDIS filters */
468 	if (skb && !is_promisc(cdc_filter)) {
469 		u8		*dest = skb->data;
470 
471 		if (is_multicast_ether_addr(dest)) {
472 			u16	type;
473 
474 			/* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
475 			 * SET_ETHERNET_MULTICAST_FILTERS requests
476 			 */
477 			if (is_broadcast_ether_addr(dest))
478 				type = USB_CDC_PACKET_TYPE_BROADCAST;
479 			else
480 				type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
481 			if (!(cdc_filter & type)) {
482 				dev_kfree_skb_any(skb);
483 				return NETDEV_TX_OK;
484 			}
485 		}
486 		/* ignores USB_CDC_PACKET_TYPE_DIRECTED */
487 	}
488 
489 	spin_lock_irqsave(&dev->req_lock, flags);
490 	/*
491 	 * this freelist can be empty if an interrupt triggered disconnect()
492 	 * and reconfigured the gadget (shutting down this queue) after the
493 	 * network stack decided to xmit but before we got the spinlock.
494 	 */
495 	if (list_empty(&dev->tx_reqs)) {
496 		spin_unlock_irqrestore(&dev->req_lock, flags);
497 		return NETDEV_TX_BUSY;
498 	}
499 
500 	req = list_first_entry(&dev->tx_reqs, struct usb_request, list);
501 	list_del(&req->list);
502 
503 	/* temporarily stop TX queue when the freelist empties */
504 	if (list_empty(&dev->tx_reqs))
505 		netif_stop_queue(net);
506 	spin_unlock_irqrestore(&dev->req_lock, flags);
507 
508 	/* no buffer copies needed, unless the network stack did it
509 	 * or the hardware can't use skb buffers.
510 	 * or there's not enough space for extra headers we need
511 	 */
512 	if (dev->wrap) {
513 		unsigned long	flags;
514 
515 		spin_lock_irqsave(&dev->lock, flags);
516 		if (dev->port_usb)
517 			skb = dev->wrap(dev->port_usb, skb);
518 		spin_unlock_irqrestore(&dev->lock, flags);
519 		if (!skb) {
520 			/* Multi frame CDC protocols may store the frame for
521 			 * later which is not a dropped frame.
522 			 */
523 			if (dev->port_usb &&
524 					dev->port_usb->supports_multi_frame)
525 				goto multiframe;
526 			goto drop;
527 		}
528 	}
529 
530 	length = skb->len;
531 	req->buf = skb->data;
532 	req->context = skb;
533 	req->complete = tx_complete;
534 
535 	/* NCM requires no zlp if transfer is dwNtbInMaxSize */
536 	if (dev->port_usb &&
537 	    dev->port_usb->is_fixed &&
538 	    length == dev->port_usb->fixed_in_len &&
539 	    (length % in->maxpacket) == 0)
540 		req->zero = 0;
541 	else
542 		req->zero = 1;
543 
544 	/* use zlp framing on tx for strict CDC-Ether conformance,
545 	 * though any robust network rx path ignores extra padding.
546 	 * and some hardware doesn't like to write zlps.
547 	 */
548 	if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
549 		length++;
550 
551 	req->length = length;
552 
553 	retval = usb_ep_queue(in, req, GFP_ATOMIC);
554 	switch (retval) {
555 	default:
556 		DBG(dev, "tx queue err %d\n", retval);
557 		break;
558 	case 0:
559 		netif_trans_update(net);
560 		atomic_inc(&dev->tx_qlen);
561 	}
562 
563 	if (retval) {
564 		dev_kfree_skb_any(skb);
565 drop:
566 		dev->net->stats.tx_dropped++;
567 multiframe:
568 		spin_lock_irqsave(&dev->req_lock, flags);
569 		if (list_empty(&dev->tx_reqs))
570 			netif_start_queue(net);
571 		list_add(&req->list, &dev->tx_reqs);
572 		spin_unlock_irqrestore(&dev->req_lock, flags);
573 	}
574 	return NETDEV_TX_OK;
575 }
576 
577 /*-------------------------------------------------------------------------*/
578 
579 static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
580 {
581 	DBG(dev, "%s\n", __func__);
582 
583 	/* fill the rx queue */
584 	rx_fill(dev, gfp_flags);
585 
586 	/* and open the tx floodgates */
587 	atomic_set(&dev->tx_qlen, 0);
588 	netif_wake_queue(dev->net);
589 }
590 
591 static int eth_open(struct net_device *net)
592 {
593 	struct eth_dev	*dev = netdev_priv(net);
594 	struct gether	*link;
595 
596 	DBG(dev, "%s\n", __func__);
597 	if (netif_carrier_ok(dev->net))
598 		eth_start(dev, GFP_KERNEL);
599 
600 	spin_lock_irq(&dev->lock);
601 	link = dev->port_usb;
602 	if (link && link->open)
603 		link->open(link);
604 	spin_unlock_irq(&dev->lock);
605 
606 	return 0;
607 }
608 
609 static int eth_stop(struct net_device *net)
610 {
611 	struct eth_dev	*dev = netdev_priv(net);
612 	unsigned long	flags;
613 
614 	VDBG(dev, "%s\n", __func__);
615 	netif_stop_queue(net);
616 
617 	DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
618 		dev->net->stats.rx_packets, dev->net->stats.tx_packets,
619 		dev->net->stats.rx_errors, dev->net->stats.tx_errors
620 		);
621 
622 	/* ensure there are no more active requests */
623 	spin_lock_irqsave(&dev->lock, flags);
624 	if (dev->port_usb) {
625 		struct gether	*link = dev->port_usb;
626 		const struct usb_endpoint_descriptor *in;
627 		const struct usb_endpoint_descriptor *out;
628 
629 		if (link->close)
630 			link->close(link);
631 
632 		/* NOTE:  we have no abort-queue primitive we could use
633 		 * to cancel all pending I/O.  Instead, we disable then
634 		 * reenable the endpoints ... this idiom may leave toggle
635 		 * wrong, but that's a self-correcting error.
636 		 *
637 		 * REVISIT:  we *COULD* just let the transfers complete at
638 		 * their own pace; the network stack can handle old packets.
639 		 * For the moment we leave this here, since it works.
640 		 */
641 		in = link->in_ep->desc;
642 		out = link->out_ep->desc;
643 		usb_ep_disable(link->in_ep);
644 		usb_ep_disable(link->out_ep);
645 		if (netif_carrier_ok(net)) {
646 			DBG(dev, "host still using in/out endpoints\n");
647 			link->in_ep->desc = in;
648 			link->out_ep->desc = out;
649 			usb_ep_enable(link->in_ep);
650 			usb_ep_enable(link->out_ep);
651 		}
652 	}
653 	spin_unlock_irqrestore(&dev->lock, flags);
654 
655 	return 0;
656 }
657 
658 /*-------------------------------------------------------------------------*/
659 
660 static int get_ether_addr(const char *str, u8 *dev_addr)
661 {
662 	if (str) {
663 		unsigned	i;
664 
665 		for (i = 0; i < 6; i++) {
666 			unsigned char num;
667 
668 			if ((*str == '.') || (*str == ':'))
669 				str++;
670 			num = hex_to_bin(*str++) << 4;
671 			num |= hex_to_bin(*str++);
672 			dev_addr [i] = num;
673 		}
674 		if (is_valid_ether_addr(dev_addr))
675 			return 0;
676 	}
677 	eth_random_addr(dev_addr);
678 	return 1;
679 }
680 
681 static int get_ether_addr_str(u8 dev_addr[ETH_ALEN], char *str, int len)
682 {
683 	if (len < 18)
684 		return -EINVAL;
685 
686 	snprintf(str, len, "%pM", dev_addr);
687 	return 18;
688 }
689 
690 static const struct net_device_ops eth_netdev_ops = {
691 	.ndo_open		= eth_open,
692 	.ndo_stop		= eth_stop,
693 	.ndo_start_xmit		= eth_start_xmit,
694 	.ndo_set_mac_address 	= eth_mac_addr,
695 	.ndo_validate_addr	= eth_validate_addr,
696 };
697 
698 static struct device_type gadget_type = {
699 	.name	= "gadget",
700 };
701 
702 /*
703  * gether_setup_name - initialize one ethernet-over-usb link
704  * @g: gadget to associated with these links
705  * @ethaddr: NULL, or a buffer in which the ethernet address of the
706  *	host side of the link is recorded
707  * @netname: name for network device (for example, "usb")
708  * Context: may sleep
709  *
710  * This sets up the single network link that may be exported by a
711  * gadget driver using this framework.  The link layer addresses are
712  * set up using module parameters.
713  *
714  * Returns an eth_dev pointer on success, or an ERR_PTR on failure.
715  */
716 struct eth_dev *gether_setup_name(struct usb_gadget *g,
717 		const char *dev_addr, const char *host_addr,
718 		u8 ethaddr[ETH_ALEN], unsigned qmult, const char *netname)
719 {
720 	struct eth_dev		*dev;
721 	struct net_device	*net;
722 	int			status;
723 	u8			addr[ETH_ALEN];
724 
725 	net = alloc_etherdev(sizeof *dev);
726 	if (!net)
727 		return ERR_PTR(-ENOMEM);
728 
729 	dev = netdev_priv(net);
730 	spin_lock_init(&dev->lock);
731 	spin_lock_init(&dev->req_lock);
732 	INIT_WORK(&dev->work, eth_work);
733 	INIT_LIST_HEAD(&dev->tx_reqs);
734 	INIT_LIST_HEAD(&dev->rx_reqs);
735 
736 	skb_queue_head_init(&dev->rx_frames);
737 
738 	/* network device setup */
739 	dev->net = net;
740 	dev->qmult = qmult;
741 	snprintf(net->name, sizeof(net->name), "%s%%d", netname);
742 
743 	if (get_ether_addr(dev_addr, addr)) {
744 		net->addr_assign_type = NET_ADDR_RANDOM;
745 		dev_warn(&g->dev,
746 			"using random %s ethernet address\n", "self");
747 	} else {
748 		net->addr_assign_type = NET_ADDR_SET;
749 	}
750 	eth_hw_addr_set(net, addr);
751 	if (get_ether_addr(host_addr, dev->host_mac))
752 		dev_warn(&g->dev,
753 			"using random %s ethernet address\n", "host");
754 
755 	if (ethaddr)
756 		memcpy(ethaddr, dev->host_mac, ETH_ALEN);
757 
758 	net->netdev_ops = &eth_netdev_ops;
759 
760 	net->ethtool_ops = &ops;
761 
762 	/* MTU range: 14 - 15412 */
763 	net->min_mtu = ETH_HLEN;
764 	net->max_mtu = GETHER_MAX_MTU_SIZE;
765 
766 	dev->gadget = g;
767 	SET_NETDEV_DEV(net, &g->dev);
768 	SET_NETDEV_DEVTYPE(net, &gadget_type);
769 
770 	status = register_netdev(net);
771 	if (status < 0) {
772 		dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
773 		free_netdev(net);
774 		dev = ERR_PTR(status);
775 	} else {
776 		INFO(dev, "MAC %pM\n", net->dev_addr);
777 		INFO(dev, "HOST MAC %pM\n", dev->host_mac);
778 
779 		/*
780 		 * two kinds of host-initiated state changes:
781 		 *  - iff DATA transfer is active, carrier is "on"
782 		 *  - tx queueing enabled if open *and* carrier is "on"
783 		 */
784 		netif_carrier_off(net);
785 	}
786 
787 	return dev;
788 }
789 EXPORT_SYMBOL_GPL(gether_setup_name);
790 
791 struct net_device *gether_setup_name_default(const char *netname)
792 {
793 	struct net_device	*net;
794 	struct eth_dev		*dev;
795 
796 	net = alloc_etherdev(sizeof(*dev));
797 	if (!net)
798 		return ERR_PTR(-ENOMEM);
799 
800 	dev = netdev_priv(net);
801 	spin_lock_init(&dev->lock);
802 	spin_lock_init(&dev->req_lock);
803 	INIT_WORK(&dev->work, eth_work);
804 	INIT_LIST_HEAD(&dev->tx_reqs);
805 	INIT_LIST_HEAD(&dev->rx_reqs);
806 
807 	skb_queue_head_init(&dev->rx_frames);
808 
809 	/* network device setup */
810 	dev->net = net;
811 	dev->qmult = QMULT_DEFAULT;
812 	snprintf(net->name, sizeof(net->name), "%s%%d", netname);
813 
814 	eth_random_addr(dev->dev_mac);
815 
816 	/* by default we always have a random MAC address */
817 	net->addr_assign_type = NET_ADDR_RANDOM;
818 
819 	eth_random_addr(dev->host_mac);
820 
821 	net->netdev_ops = &eth_netdev_ops;
822 
823 	net->ethtool_ops = &ops;
824 	SET_NETDEV_DEVTYPE(net, &gadget_type);
825 
826 	/* MTU range: 14 - 15412 */
827 	net->min_mtu = ETH_HLEN;
828 	net->max_mtu = GETHER_MAX_MTU_SIZE;
829 
830 	return net;
831 }
832 EXPORT_SYMBOL_GPL(gether_setup_name_default);
833 
834 int gether_register_netdev(struct net_device *net)
835 {
836 	struct eth_dev *dev;
837 	struct usb_gadget *g;
838 	int status;
839 
840 	if (!net->dev.parent)
841 		return -EINVAL;
842 	dev = netdev_priv(net);
843 	g = dev->gadget;
844 
845 	eth_hw_addr_set(net, dev->dev_mac);
846 
847 	status = register_netdev(net);
848 	if (status < 0) {
849 		dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
850 		return status;
851 	} else {
852 		INFO(dev, "HOST MAC %pM\n", dev->host_mac);
853 		INFO(dev, "MAC %pM\n", dev->dev_mac);
854 
855 		/* two kinds of host-initiated state changes:
856 		 *  - iff DATA transfer is active, carrier is "on"
857 		 *  - tx queueing enabled if open *and* carrier is "on"
858 		 */
859 		netif_carrier_off(net);
860 	}
861 
862 	return status;
863 }
864 EXPORT_SYMBOL_GPL(gether_register_netdev);
865 
866 void gether_set_gadget(struct net_device *net, struct usb_gadget *g)
867 {
868 	struct eth_dev *dev;
869 
870 	dev = netdev_priv(net);
871 	dev->gadget = g;
872 	SET_NETDEV_DEV(net, &g->dev);
873 }
874 EXPORT_SYMBOL_GPL(gether_set_gadget);
875 
876 int gether_set_dev_addr(struct net_device *net, const char *dev_addr)
877 {
878 	struct eth_dev *dev;
879 	u8 new_addr[ETH_ALEN];
880 
881 	dev = netdev_priv(net);
882 	if (get_ether_addr(dev_addr, new_addr))
883 		return -EINVAL;
884 	memcpy(dev->dev_mac, new_addr, ETH_ALEN);
885 	net->addr_assign_type = NET_ADDR_SET;
886 	return 0;
887 }
888 EXPORT_SYMBOL_GPL(gether_set_dev_addr);
889 
890 int gether_get_dev_addr(struct net_device *net, char *dev_addr, int len)
891 {
892 	struct eth_dev *dev;
893 	int ret;
894 
895 	dev = netdev_priv(net);
896 	ret = get_ether_addr_str(dev->dev_mac, dev_addr, len);
897 	if (ret + 1 < len) {
898 		dev_addr[ret++] = '\n';
899 		dev_addr[ret] = '\0';
900 	}
901 
902 	return ret;
903 }
904 EXPORT_SYMBOL_GPL(gether_get_dev_addr);
905 
906 int gether_set_host_addr(struct net_device *net, const char *host_addr)
907 {
908 	struct eth_dev *dev;
909 	u8 new_addr[ETH_ALEN];
910 
911 	dev = netdev_priv(net);
912 	if (get_ether_addr(host_addr, new_addr))
913 		return -EINVAL;
914 	memcpy(dev->host_mac, new_addr, ETH_ALEN);
915 	return 0;
916 }
917 EXPORT_SYMBOL_GPL(gether_set_host_addr);
918 
919 int gether_get_host_addr(struct net_device *net, char *host_addr, int len)
920 {
921 	struct eth_dev *dev;
922 	int ret;
923 
924 	dev = netdev_priv(net);
925 	ret = get_ether_addr_str(dev->host_mac, host_addr, len);
926 	if (ret + 1 < len) {
927 		host_addr[ret++] = '\n';
928 		host_addr[ret] = '\0';
929 	}
930 
931 	return ret;
932 }
933 EXPORT_SYMBOL_GPL(gether_get_host_addr);
934 
935 int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len)
936 {
937 	struct eth_dev *dev;
938 
939 	if (len < 13)
940 		return -EINVAL;
941 
942 	dev = netdev_priv(net);
943 	snprintf(host_addr, len, "%pm", dev->host_mac);
944 
945 	return strlen(host_addr);
946 }
947 EXPORT_SYMBOL_GPL(gether_get_host_addr_cdc);
948 
949 void gether_get_host_addr_u8(struct net_device *net, u8 host_mac[ETH_ALEN])
950 {
951 	struct eth_dev *dev;
952 
953 	dev = netdev_priv(net);
954 	memcpy(host_mac, dev->host_mac, ETH_ALEN);
955 }
956 EXPORT_SYMBOL_GPL(gether_get_host_addr_u8);
957 
958 void gether_set_qmult(struct net_device *net, unsigned qmult)
959 {
960 	struct eth_dev *dev;
961 
962 	dev = netdev_priv(net);
963 	dev->qmult = qmult;
964 }
965 EXPORT_SYMBOL_GPL(gether_set_qmult);
966 
967 unsigned gether_get_qmult(struct net_device *net)
968 {
969 	struct eth_dev *dev;
970 
971 	dev = netdev_priv(net);
972 	return dev->qmult;
973 }
974 EXPORT_SYMBOL_GPL(gether_get_qmult);
975 
976 int gether_get_ifname(struct net_device *net, char *name, int len)
977 {
978 	struct eth_dev *dev = netdev_priv(net);
979 	int ret;
980 
981 	rtnl_lock();
982 	ret = scnprintf(name, len, "%s\n",
983 			dev->ifname_set ? net->name : netdev_name(net));
984 	rtnl_unlock();
985 	return ret;
986 }
987 EXPORT_SYMBOL_GPL(gether_get_ifname);
988 
989 int gether_set_ifname(struct net_device *net, const char *name, int len)
990 {
991 	struct eth_dev *dev = netdev_priv(net);
992 	char tmp[IFNAMSIZ];
993 	const char *p;
994 
995 	if (name[len - 1] == '\n')
996 		len--;
997 
998 	if (len >= sizeof(tmp))
999 		return -E2BIG;
1000 
1001 	strscpy(tmp, name, len + 1);
1002 	if (!dev_valid_name(tmp))
1003 		return -EINVAL;
1004 
1005 	/* Require exactly one %d, so binding will not fail with EEXIST. */
1006 	p = strchr(name, '%');
1007 	if (!p || p[1] != 'd' || strchr(p + 2, '%'))
1008 		return -EINVAL;
1009 
1010 	strncpy(net->name, tmp, sizeof(net->name));
1011 	dev->ifname_set = true;
1012 
1013 	return 0;
1014 }
1015 EXPORT_SYMBOL_GPL(gether_set_ifname);
1016 
1017 /*
1018  * gether_cleanup - remove Ethernet-over-USB device
1019  * Context: may sleep
1020  *
1021  * This is called to free all resources allocated by @gether_setup().
1022  */
1023 void gether_cleanup(struct eth_dev *dev)
1024 {
1025 	if (!dev)
1026 		return;
1027 
1028 	unregister_netdev(dev->net);
1029 	flush_work(&dev->work);
1030 	free_netdev(dev->net);
1031 }
1032 EXPORT_SYMBOL_GPL(gether_cleanup);
1033 
1034 /**
1035  * gether_connect - notify network layer that USB link is active
1036  * @link: the USB link, set up with endpoints, descriptors matching
1037  *	current device speed, and any framing wrapper(s) set up.
1038  * Context: irqs blocked
1039  *
1040  * This is called to activate endpoints and let the network layer know
1041  * the connection is active ("carrier detect").  It may cause the I/O
1042  * queues to open and start letting network packets flow, but will in
1043  * any case activate the endpoints so that they respond properly to the
1044  * USB host.
1045  *
1046  * Verify net_device pointer returned using IS_ERR().  If it doesn't
1047  * indicate some error code (negative errno), ep->driver_data values
1048  * have been overwritten.
1049  */
1050 struct net_device *gether_connect(struct gether *link)
1051 {
1052 	struct eth_dev		*dev = link->ioport;
1053 	int			result = 0;
1054 
1055 	if (!dev)
1056 		return ERR_PTR(-EINVAL);
1057 
1058 	link->in_ep->driver_data = dev;
1059 	result = usb_ep_enable(link->in_ep);
1060 	if (result != 0) {
1061 		DBG(dev, "enable %s --> %d\n",
1062 			link->in_ep->name, result);
1063 		goto fail0;
1064 	}
1065 
1066 	link->out_ep->driver_data = dev;
1067 	result = usb_ep_enable(link->out_ep);
1068 	if (result != 0) {
1069 		DBG(dev, "enable %s --> %d\n",
1070 			link->out_ep->name, result);
1071 		goto fail1;
1072 	}
1073 
1074 	if (result == 0)
1075 		result = alloc_requests(dev, link, qlen(dev->gadget,
1076 					dev->qmult));
1077 
1078 	if (result == 0) {
1079 		dev->zlp = link->is_zlp_ok;
1080 		dev->no_skb_reserve = gadget_avoids_skb_reserve(dev->gadget);
1081 		DBG(dev, "qlen %d\n", qlen(dev->gadget, dev->qmult));
1082 
1083 		dev->header_len = link->header_len;
1084 		dev->unwrap = link->unwrap;
1085 		dev->wrap = link->wrap;
1086 
1087 		spin_lock(&dev->lock);
1088 		dev->port_usb = link;
1089 		if (netif_running(dev->net)) {
1090 			if (link->open)
1091 				link->open(link);
1092 		} else {
1093 			if (link->close)
1094 				link->close(link);
1095 		}
1096 		spin_unlock(&dev->lock);
1097 
1098 		netif_carrier_on(dev->net);
1099 		if (netif_running(dev->net))
1100 			eth_start(dev, GFP_ATOMIC);
1101 
1102 	/* on error, disable any endpoints  */
1103 	} else {
1104 		(void) usb_ep_disable(link->out_ep);
1105 fail1:
1106 		(void) usb_ep_disable(link->in_ep);
1107 	}
1108 fail0:
1109 	/* caller is responsible for cleanup on error */
1110 	if (result < 0)
1111 		return ERR_PTR(result);
1112 	return dev->net;
1113 }
1114 EXPORT_SYMBOL_GPL(gether_connect);
1115 
1116 /**
1117  * gether_disconnect - notify network layer that USB link is inactive
1118  * @link: the USB link, on which gether_connect() was called
1119  * Context: irqs blocked
1120  *
1121  * This is called to deactivate endpoints and let the network layer know
1122  * the connection went inactive ("no carrier").
1123  *
1124  * On return, the state is as if gether_connect() had never been called.
1125  * The endpoints are inactive, and accordingly without active USB I/O.
1126  * Pointers to endpoint descriptors and endpoint private data are nulled.
1127  */
1128 void gether_disconnect(struct gether *link)
1129 {
1130 	struct eth_dev		*dev = link->ioport;
1131 	struct usb_request	*req;
1132 
1133 	WARN_ON(!dev);
1134 	if (!dev)
1135 		return;
1136 
1137 	DBG(dev, "%s\n", __func__);
1138 
1139 	netif_stop_queue(dev->net);
1140 	netif_carrier_off(dev->net);
1141 
1142 	/* disable endpoints, forcing (synchronous) completion
1143 	 * of all pending i/o.  then free the request objects
1144 	 * and forget about the endpoints.
1145 	 */
1146 	usb_ep_disable(link->in_ep);
1147 	spin_lock(&dev->req_lock);
1148 	while (!list_empty(&dev->tx_reqs)) {
1149 		req = list_first_entry(&dev->tx_reqs, struct usb_request, list);
1150 		list_del(&req->list);
1151 
1152 		spin_unlock(&dev->req_lock);
1153 		usb_ep_free_request(link->in_ep, req);
1154 		spin_lock(&dev->req_lock);
1155 	}
1156 	spin_unlock(&dev->req_lock);
1157 	link->in_ep->desc = NULL;
1158 
1159 	usb_ep_disable(link->out_ep);
1160 	spin_lock(&dev->req_lock);
1161 	while (!list_empty(&dev->rx_reqs)) {
1162 		req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
1163 		list_del(&req->list);
1164 
1165 		spin_unlock(&dev->req_lock);
1166 		usb_ep_free_request(link->out_ep, req);
1167 		spin_lock(&dev->req_lock);
1168 	}
1169 	spin_unlock(&dev->req_lock);
1170 	link->out_ep->desc = NULL;
1171 
1172 	/* finish forgetting about this USB link episode */
1173 	dev->header_len = 0;
1174 	dev->unwrap = NULL;
1175 	dev->wrap = NULL;
1176 
1177 	spin_lock(&dev->lock);
1178 	dev->port_usb = NULL;
1179 	spin_unlock(&dev->lock);
1180 }
1181 EXPORT_SYMBOL_GPL(gether_disconnect);
1182 
1183 MODULE_LICENSE("GPL");
1184 MODULE_AUTHOR("David Brownell");
1185