xref: /openbmc/linux/drivers/net/usb/usbnet.c (revision 5bc65793cbf8da0d35f19ef025dda22887e79e80)
1 /*
2  * USB Network driver infrastructure
3  * Copyright (C) 2000-2005 by David Brownell
4  * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19  */
20 
21 /*
22  * This is a generic "USB networking" framework that works with several
23  * kinds of full and high speed networking devices:  host-to-host cables,
24  * smart usb peripherals, and actual Ethernet adapters.
25  *
26  * These devices usually differ in terms of control protocols (if they
27  * even have one!) and sometimes they define new framing to wrap or batch
28  * Ethernet packets.  Otherwise, they talk to USB pretty much the same,
29  * so interface (un)binding, endpoint I/O queues, fault handling, and other
30  * issues can usefully be addressed by this framework.
31  */
32 
33 // #define	DEBUG			// error path messages, extra info
34 // #define	VERBOSE			// more; success messages
35 
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/ethtool.h>
41 #include <linux/workqueue.h>
42 #include <linux/mii.h>
43 #include <linux/usb.h>
44 
45 #include "usbnet.h"
46 
47 #define DRIVER_VERSION		"22-Aug-2005"
48 
49 
50 /*-------------------------------------------------------------------------*/
51 
52 /*
53  * Nineteen USB 1.1 max size bulk transactions per frame (ms), max.
54  * Several dozen bytes of IPv4 data can fit in two such transactions.
55  * One maximum size Ethernet packet takes twenty four of them.
56  * For high speed, each frame comfortably fits almost 36 max size
57  * Ethernet packets (so queues should be bigger).
58  *
59  * REVISIT qlens should be members of 'struct usbnet'; the goal is to
60  * let the USB host controller be busy for 5msec or more before an irq
61  * is required, under load.  Jumbograms change the equation.
62  */
63 #define RX_MAX_QUEUE_MEMORY (60 * 1518)
64 #define	RX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \
65 			(RX_MAX_QUEUE_MEMORY/(dev)->rx_urb_size) : 4)
66 #define	TX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \
67 			(RX_MAX_QUEUE_MEMORY/(dev)->hard_mtu) : 4)
68 
69 // reawaken network queue this soon after stopping; else watchdog barks
70 #define TX_TIMEOUT_JIFFIES	(5*HZ)
71 
72 // throttle rx/tx briefly after some faults, so khubd might disconnect()
73 // us (it polls at HZ/4 usually) before we report too many false errors.
74 #define THROTTLE_JIFFIES	(HZ/8)
75 
76 // between wakeups
77 #define UNLINK_TIMEOUT_MS	3
78 
79 /*-------------------------------------------------------------------------*/
80 
81 // randomly generated ethernet address
82 static u8	node_id [ETH_ALEN];
83 
84 static const char driver_name [] = "usbnet";
85 
86 /* use ethtool to change the level for any given device */
87 static int msg_level = -1;
88 module_param (msg_level, int, 0);
89 MODULE_PARM_DESC (msg_level, "Override default message level");
90 
91 /*-------------------------------------------------------------------------*/
92 
93 /* handles CDC Ethernet and many other network "bulk data" interfaces */
94 int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf)
95 {
96 	int				tmp;
97 	struct usb_host_interface	*alt = NULL;
98 	struct usb_host_endpoint	*in = NULL, *out = NULL;
99 	struct usb_host_endpoint	*status = NULL;
100 
101 	for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
102 		unsigned	ep;
103 
104 		in = out = status = NULL;
105 		alt = intf->altsetting + tmp;
106 
107 		/* take the first altsetting with in-bulk + out-bulk;
108 		 * remember any status endpoint, just in case;
109 		 * ignore other endpoints and altsetttings.
110 		 */
111 		for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
112 			struct usb_host_endpoint	*e;
113 			int				intr = 0;
114 
115 			e = alt->endpoint + ep;
116 			switch (e->desc.bmAttributes) {
117 			case USB_ENDPOINT_XFER_INT:
118 				if (!usb_endpoint_dir_in(&e->desc))
119 					continue;
120 				intr = 1;
121 				/* FALLTHROUGH */
122 			case USB_ENDPOINT_XFER_BULK:
123 				break;
124 			default:
125 				continue;
126 			}
127 			if (usb_endpoint_dir_in(&e->desc)) {
128 				if (!intr && !in)
129 					in = e;
130 				else if (intr && !status)
131 					status = e;
132 			} else {
133 				if (!out)
134 					out = e;
135 			}
136 		}
137 		if (in && out)
138 			break;
139 	}
140 	if (!alt || !in || !out)
141 		return -EINVAL;
142 
143 	if (alt->desc.bAlternateSetting != 0
144 			|| !(dev->driver_info->flags & FLAG_NO_SETINT)) {
145 		tmp = usb_set_interface (dev->udev, alt->desc.bInterfaceNumber,
146 				alt->desc.bAlternateSetting);
147 		if (tmp < 0)
148 			return tmp;
149 	}
150 
151 	dev->in = usb_rcvbulkpipe (dev->udev,
152 			in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
153 	dev->out = usb_sndbulkpipe (dev->udev,
154 			out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
155 	dev->status = status;
156 	return 0;
157 }
158 EXPORT_SYMBOL_GPL(usbnet_get_endpoints);
159 
160 static void intr_complete (struct urb *urb);
161 
162 static int init_status (struct usbnet *dev, struct usb_interface *intf)
163 {
164 	char		*buf = NULL;
165 	unsigned	pipe = 0;
166 	unsigned	maxp;
167 	unsigned	period;
168 
169 	if (!dev->driver_info->status)
170 		return 0;
171 
172 	pipe = usb_rcvintpipe (dev->udev,
173 			dev->status->desc.bEndpointAddress
174 				& USB_ENDPOINT_NUMBER_MASK);
175 	maxp = usb_maxpacket (dev->udev, pipe, 0);
176 
177 	/* avoid 1 msec chatter:  min 8 msec poll rate */
178 	period = max ((int) dev->status->desc.bInterval,
179 		(dev->udev->speed == USB_SPEED_HIGH) ? 7 : 3);
180 
181 	buf = kmalloc (maxp, GFP_KERNEL);
182 	if (buf) {
183 		dev->interrupt = usb_alloc_urb (0, GFP_KERNEL);
184 		if (!dev->interrupt) {
185 			kfree (buf);
186 			return -ENOMEM;
187 		} else {
188 			usb_fill_int_urb(dev->interrupt, dev->udev, pipe,
189 				buf, maxp, intr_complete, dev, period);
190 			dev_dbg(&intf->dev,
191 				"status ep%din, %d bytes period %d\n",
192 				usb_pipeendpoint(pipe), maxp, period);
193 		}
194 	}
195 	return  0;
196 }
197 
198 /* Passes this packet up the stack, updating its accounting.
199  * Some link protocols batch packets, so their rx_fixup paths
200  * can return clones as well as just modify the original skb.
201  */
202 void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
203 {
204 	int	status;
205 
206 	skb->protocol = eth_type_trans (skb, dev->net);
207 	dev->stats.rx_packets++;
208 	dev->stats.rx_bytes += skb->len;
209 
210 	if (netif_msg_rx_status (dev))
211 		devdbg (dev, "< rx, len %zu, type 0x%x",
212 			skb->len + sizeof (struct ethhdr), skb->protocol);
213 	memset (skb->cb, 0, sizeof (struct skb_data));
214 	status = netif_rx (skb);
215 	if (status != NET_RX_SUCCESS && netif_msg_rx_err (dev))
216 		devdbg (dev, "netif_rx status %d", status);
217 }
218 EXPORT_SYMBOL_GPL(usbnet_skb_return);
219 
220 
221 /*-------------------------------------------------------------------------
222  *
223  * Network Device Driver (peer link to "Host Device", from USB host)
224  *
225  *-------------------------------------------------------------------------*/
226 
227 static int usbnet_change_mtu (struct net_device *net, int new_mtu)
228 {
229 	struct usbnet	*dev = netdev_priv(net);
230 	int		ll_mtu = new_mtu + net->hard_header_len;
231 	int		old_hard_mtu = dev->hard_mtu;
232 	int		old_rx_urb_size = dev->rx_urb_size;
233 
234 	if (new_mtu <= 0)
235 		return -EINVAL;
236 	// no second zero-length packet read wanted after mtu-sized packets
237 	if ((ll_mtu % dev->maxpacket) == 0)
238 		return -EDOM;
239 	net->mtu = new_mtu;
240 
241 	dev->hard_mtu = net->mtu + net->hard_header_len;
242 	if (dev->rx_urb_size == old_hard_mtu) {
243 		dev->rx_urb_size = dev->hard_mtu;
244 		if (dev->rx_urb_size > old_rx_urb_size)
245 			usbnet_unlink_rx_urbs(dev);
246 	}
247 
248 	return 0;
249 }
250 
251 /*-------------------------------------------------------------------------*/
252 
253 static struct net_device_stats *usbnet_get_stats (struct net_device *net)
254 {
255 	struct usbnet	*dev = netdev_priv(net);
256 	return &dev->stats;
257 }
258 
259 /*-------------------------------------------------------------------------*/
260 
261 /* some LK 2.4 HCDs oopsed if we freed or resubmitted urbs from
262  * completion callbacks.  2.5 should have fixed those bugs...
263  */
264 
265 static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_head *list)
266 {
267 	unsigned long		flags;
268 
269 	spin_lock_irqsave(&list->lock, flags);
270 	__skb_unlink(skb, list);
271 	spin_unlock(&list->lock);
272 	spin_lock(&dev->done.lock);
273 	__skb_queue_tail(&dev->done, skb);
274 	if (dev->done.qlen == 1)
275 		tasklet_schedule(&dev->bh);
276 	spin_unlock_irqrestore(&dev->done.lock, flags);
277 }
278 
279 /* some work can't be done in tasklets, so we use keventd
280  *
281  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
282  * but tasklet_schedule() doesn't.  hope the failure is rare.
283  */
284 void usbnet_defer_kevent (struct usbnet *dev, int work)
285 {
286 	set_bit (work, &dev->flags);
287 	if (!schedule_work (&dev->kevent))
288 		deverr (dev, "kevent %d may have been dropped", work);
289 	else
290 		devdbg (dev, "kevent %d scheduled", work);
291 }
292 EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
293 
294 /*-------------------------------------------------------------------------*/
295 
296 static void rx_complete (struct urb *urb);
297 
298 static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
299 {
300 	struct sk_buff		*skb;
301 	struct skb_data		*entry;
302 	int			retval = 0;
303 	unsigned long		lockflags;
304 	size_t			size = dev->rx_urb_size;
305 
306 	if ((skb = alloc_skb (size + NET_IP_ALIGN, flags)) == NULL) {
307 		if (netif_msg_rx_err (dev))
308 			devdbg (dev, "no rx skb");
309 		usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
310 		usb_free_urb (urb);
311 		return;
312 	}
313 	skb_reserve (skb, NET_IP_ALIGN);
314 
315 	entry = (struct skb_data *) skb->cb;
316 	entry->urb = urb;
317 	entry->dev = dev;
318 	entry->state = rx_start;
319 	entry->length = 0;
320 
321 	usb_fill_bulk_urb (urb, dev->udev, dev->in,
322 		skb->data, size, rx_complete, skb);
323 
324 	spin_lock_irqsave (&dev->rxq.lock, lockflags);
325 
326 	if (netif_running (dev->net)
327 			&& netif_device_present (dev->net)
328 			&& !test_bit (EVENT_RX_HALT, &dev->flags)) {
329 		switch (retval = usb_submit_urb (urb, GFP_ATOMIC)){
330 		case -EPIPE:
331 			usbnet_defer_kevent (dev, EVENT_RX_HALT);
332 			break;
333 		case -ENOMEM:
334 			usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
335 			break;
336 		case -ENODEV:
337 			if (netif_msg_ifdown (dev))
338 				devdbg (dev, "device gone");
339 			netif_device_detach (dev->net);
340 			break;
341 		default:
342 			if (netif_msg_rx_err (dev))
343 				devdbg (dev, "rx submit, %d", retval);
344 			tasklet_schedule (&dev->bh);
345 			break;
346 		case 0:
347 			__skb_queue_tail (&dev->rxq, skb);
348 		}
349 	} else {
350 		if (netif_msg_ifdown (dev))
351 			devdbg (dev, "rx: stopped");
352 		retval = -ENOLINK;
353 	}
354 	spin_unlock_irqrestore (&dev->rxq.lock, lockflags);
355 	if (retval) {
356 		dev_kfree_skb_any (skb);
357 		usb_free_urb (urb);
358 	}
359 }
360 
361 
362 /*-------------------------------------------------------------------------*/
363 
364 static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
365 {
366 	if (dev->driver_info->rx_fixup
367 			&& !dev->driver_info->rx_fixup (dev, skb))
368 		goto error;
369 	// else network stack removes extra byte if we forced a short packet
370 
371 	if (skb->len)
372 		usbnet_skb_return (dev, skb);
373 	else {
374 		if (netif_msg_rx_err (dev))
375 			devdbg (dev, "drop");
376 error:
377 		dev->stats.rx_errors++;
378 		skb_queue_tail (&dev->done, skb);
379 	}
380 }
381 
382 /*-------------------------------------------------------------------------*/
383 
384 static void rx_complete (struct urb *urb)
385 {
386 	struct sk_buff		*skb = (struct sk_buff *) urb->context;
387 	struct skb_data		*entry = (struct skb_data *) skb->cb;
388 	struct usbnet		*dev = entry->dev;
389 	int			urb_status = urb->status;
390 
391 	skb_put (skb, urb->actual_length);
392 	entry->state = rx_done;
393 	entry->urb = NULL;
394 
395 	switch (urb_status) {
396 	    // success
397 	    case 0:
398 		if (skb->len < dev->net->hard_header_len) {
399 			entry->state = rx_cleanup;
400 			dev->stats.rx_errors++;
401 			dev->stats.rx_length_errors++;
402 			if (netif_msg_rx_err (dev))
403 				devdbg (dev, "rx length %d", skb->len);
404 		}
405 		break;
406 
407 	    // stalls need manual reset. this is rare ... except that
408 	    // when going through USB 2.0 TTs, unplug appears this way.
409 	    // we avoid the highspeed version of the ETIMEOUT/EILSEQ
410 	    // storm, recovering as needed.
411 	    case -EPIPE:
412 		dev->stats.rx_errors++;
413 		usbnet_defer_kevent (dev, EVENT_RX_HALT);
414 		// FALLTHROUGH
415 
416 	    // software-driven interface shutdown
417 	    case -ECONNRESET:		// async unlink
418 	    case -ESHUTDOWN:		// hardware gone
419 		if (netif_msg_ifdown (dev))
420 			devdbg (dev, "rx shutdown, code %d", urb_status);
421 		goto block;
422 
423 	    // we get controller i/o faults during khubd disconnect() delays.
424 	    // throttle down resubmits, to avoid log floods; just temporarily,
425 	    // so we still recover when the fault isn't a khubd delay.
426 	    case -EPROTO:
427 	    case -ETIME:
428 	    case -EILSEQ:
429 		dev->stats.rx_errors++;
430 		if (!timer_pending (&dev->delay)) {
431 			mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES);
432 			if (netif_msg_link (dev))
433 				devdbg (dev, "rx throttle %d", urb_status);
434 		}
435 block:
436 		entry->state = rx_cleanup;
437 		entry->urb = urb;
438 		urb = NULL;
439 		break;
440 
441 	    // data overrun ... flush fifo?
442 	    case -EOVERFLOW:
443 		dev->stats.rx_over_errors++;
444 		// FALLTHROUGH
445 
446 	    default:
447 		entry->state = rx_cleanup;
448 		dev->stats.rx_errors++;
449 		if (netif_msg_rx_err (dev))
450 			devdbg (dev, "rx status %d", urb_status);
451 		break;
452 	}
453 
454 	defer_bh(dev, skb, &dev->rxq);
455 
456 	if (urb) {
457 		if (netif_running (dev->net)
458 				&& !test_bit (EVENT_RX_HALT, &dev->flags)) {
459 			rx_submit (dev, urb, GFP_ATOMIC);
460 			return;
461 		}
462 		usb_free_urb (urb);
463 	}
464 	if (netif_msg_rx_err (dev))
465 		devdbg (dev, "no read resubmitted");
466 }
467 
468 static void intr_complete (struct urb *urb)
469 {
470 	struct usbnet	*dev = urb->context;
471 	int		status = urb->status;
472 
473 	switch (status) {
474 	    /* success */
475 	    case 0:
476 		dev->driver_info->status(dev, urb);
477 		break;
478 
479 	    /* software-driven interface shutdown */
480 	    case -ENOENT:		// urb killed
481 	    case -ESHUTDOWN:		// hardware gone
482 		if (netif_msg_ifdown (dev))
483 			devdbg (dev, "intr shutdown, code %d", status);
484 		return;
485 
486 	    /* NOTE:  not throttling like RX/TX, since this endpoint
487 	     * already polls infrequently
488 	     */
489 	    default:
490 		devdbg (dev, "intr status %d", status);
491 		break;
492 	}
493 
494 	if (!netif_running (dev->net))
495 		return;
496 
497 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
498 	status = usb_submit_urb (urb, GFP_ATOMIC);
499 	if (status != 0 && netif_msg_timer (dev))
500 		deverr(dev, "intr resubmit --> %d", status);
501 }
502 
503 /*-------------------------------------------------------------------------*/
504 
505 // unlink pending rx/tx; completion handlers do all other cleanup
506 
507 static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
508 {
509 	unsigned long		flags;
510 	struct sk_buff		*skb, *skbnext;
511 	int			count = 0;
512 
513 	spin_lock_irqsave (&q->lock, flags);
514 	for (skb = q->next; skb != (struct sk_buff *) q; skb = skbnext) {
515 		struct skb_data		*entry;
516 		struct urb		*urb;
517 		int			retval;
518 
519 		entry = (struct skb_data *) skb->cb;
520 		urb = entry->urb;
521 		skbnext = skb->next;
522 
523 		// during some PM-driven resume scenarios,
524 		// these (async) unlinks complete immediately
525 		retval = usb_unlink_urb (urb);
526 		if (retval != -EINPROGRESS && retval != 0)
527 			devdbg (dev, "unlink urb err, %d", retval);
528 		else
529 			count++;
530 	}
531 	spin_unlock_irqrestore (&q->lock, flags);
532 	return count;
533 }
534 
535 // Flush all pending rx urbs
536 // minidrivers may need to do this when the MTU changes
537 
538 void usbnet_unlink_rx_urbs(struct usbnet *dev)
539 {
540 	if (netif_running(dev->net)) {
541 		(void) unlink_urbs (dev, &dev->rxq);
542 		tasklet_schedule(&dev->bh);
543 	}
544 }
545 EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
546 
547 /*-------------------------------------------------------------------------*/
548 
549 // precondition: never called in_interrupt
550 
551 static int usbnet_stop (struct net_device *net)
552 {
553 	struct usbnet		*dev = netdev_priv(net);
554 	int			temp;
555 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK (unlink_wakeup);
556 	DECLARE_WAITQUEUE (wait, current);
557 
558 	netif_stop_queue (net);
559 
560 	if (netif_msg_ifdown (dev))
561 		devinfo (dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld",
562 			dev->stats.rx_packets, dev->stats.tx_packets,
563 			dev->stats.rx_errors, dev->stats.tx_errors
564 			);
565 
566 	// ensure there are no more active urbs
567 	add_wait_queue (&unlink_wakeup, &wait);
568 	dev->wait = &unlink_wakeup;
569 	temp = unlink_urbs (dev, &dev->txq) + unlink_urbs (dev, &dev->rxq);
570 
571 	// maybe wait for deletions to finish.
572 	while (!skb_queue_empty(&dev->rxq) &&
573 	       !skb_queue_empty(&dev->txq) &&
574 	       !skb_queue_empty(&dev->done)) {
575 		msleep(UNLINK_TIMEOUT_MS);
576 		if (netif_msg_ifdown (dev))
577 			devdbg (dev, "waited for %d urb completions", temp);
578 	}
579 	dev->wait = NULL;
580 	remove_wait_queue (&unlink_wakeup, &wait);
581 
582 	usb_kill_urb(dev->interrupt);
583 
584 	/* deferred work (task, timer, softirq) must also stop.
585 	 * can't flush_scheduled_work() until we drop rtnl (later),
586 	 * else workers could deadlock; so make workers a NOP.
587 	 */
588 	dev->flags = 0;
589 	del_timer_sync (&dev->delay);
590 	tasklet_kill (&dev->bh);
591 
592 	return 0;
593 }
594 
595 /*-------------------------------------------------------------------------*/
596 
597 // posts reads, and enables write queuing
598 
599 // precondition: never called in_interrupt
600 
601 static int usbnet_open (struct net_device *net)
602 {
603 	struct usbnet		*dev = netdev_priv(net);
604 	int			retval = 0;
605 	struct driver_info	*info = dev->driver_info;
606 
607 	// put into "known safe" state
608 	if (info->reset && (retval = info->reset (dev)) < 0) {
609 		if (netif_msg_ifup (dev))
610 			devinfo (dev,
611 				"open reset fail (%d) usbnet usb-%s-%s, %s",
612 				retval,
613 				dev->udev->bus->bus_name, dev->udev->devpath,
614 			info->description);
615 		goto done;
616 	}
617 
618 	// insist peer be connected
619 	if (info->check_connect && (retval = info->check_connect (dev)) < 0) {
620 		if (netif_msg_ifup (dev))
621 			devdbg (dev, "can't open; %d", retval);
622 		goto done;
623 	}
624 
625 	/* start any status interrupt transfer */
626 	if (dev->interrupt) {
627 		retval = usb_submit_urb (dev->interrupt, GFP_KERNEL);
628 		if (retval < 0) {
629 			if (netif_msg_ifup (dev))
630 				deverr (dev, "intr submit %d", retval);
631 			goto done;
632 		}
633 	}
634 
635 	netif_start_queue (net);
636 	if (netif_msg_ifup (dev)) {
637 		char	*framing;
638 
639 		if (dev->driver_info->flags & FLAG_FRAMING_NC)
640 			framing = "NetChip";
641 		else if (dev->driver_info->flags & FLAG_FRAMING_GL)
642 			framing = "GeneSys";
643 		else if (dev->driver_info->flags & FLAG_FRAMING_Z)
644 			framing = "Zaurus";
645 		else if (dev->driver_info->flags & FLAG_FRAMING_RN)
646 			framing = "RNDIS";
647 		else if (dev->driver_info->flags & FLAG_FRAMING_AX)
648 			framing = "ASIX";
649 		else
650 			framing = "simple";
651 
652 		devinfo (dev, "open: enable queueing "
653 				"(rx %d, tx %d) mtu %d %s framing",
654 			(int)RX_QLEN (dev), (int)TX_QLEN (dev), dev->net->mtu,
655 			framing);
656 	}
657 
658 	// delay posting reads until we're fully open
659 	tasklet_schedule (&dev->bh);
660 done:
661 	return retval;
662 }
663 
664 /*-------------------------------------------------------------------------*/
665 
666 /* ethtool methods; minidrivers may need to add some more, but
667  * they'll probably want to use this base set.
668  */
669 
670 #if defined(CONFIG_MII) || defined(CONFIG_MII_MODULE)
671 #define HAVE_MII
672 
673 int usbnet_get_settings (struct net_device *net, struct ethtool_cmd *cmd)
674 {
675 	struct usbnet *dev = netdev_priv(net);
676 
677 	if (!dev->mii.mdio_read)
678 		return -EOPNOTSUPP;
679 
680 	return mii_ethtool_gset(&dev->mii, cmd);
681 }
682 EXPORT_SYMBOL_GPL(usbnet_get_settings);
683 
684 int usbnet_set_settings (struct net_device *net, struct ethtool_cmd *cmd)
685 {
686 	struct usbnet *dev = netdev_priv(net);
687 	int retval;
688 
689 	if (!dev->mii.mdio_write)
690 		return -EOPNOTSUPP;
691 
692 	retval = mii_ethtool_sset(&dev->mii, cmd);
693 
694 	/* link speed/duplex might have changed */
695 	if (dev->driver_info->link_reset)
696 		dev->driver_info->link_reset(dev);
697 
698 	return retval;
699 
700 }
701 EXPORT_SYMBOL_GPL(usbnet_set_settings);
702 
703 u32 usbnet_get_link (struct net_device *net)
704 {
705 	struct usbnet *dev = netdev_priv(net);
706 
707 	/* If a check_connect is defined, return its result */
708 	if (dev->driver_info->check_connect)
709 		return dev->driver_info->check_connect (dev) == 0;
710 
711 	/* if the device has mii operations, use those */
712 	if (dev->mii.mdio_read)
713 		return mii_link_ok(&dev->mii);
714 
715 	/* Otherwise, say we're up (to avoid breaking scripts) */
716 	return 1;
717 }
718 EXPORT_SYMBOL_GPL(usbnet_get_link);
719 
720 int usbnet_nway_reset(struct net_device *net)
721 {
722 	struct usbnet *dev = netdev_priv(net);
723 
724 	if (!dev->mii.mdio_write)
725 		return -EOPNOTSUPP;
726 
727 	return mii_nway_restart(&dev->mii);
728 }
729 EXPORT_SYMBOL_GPL(usbnet_nway_reset);
730 
731 #endif	/* HAVE_MII */
732 
733 void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info)
734 {
735 	struct usbnet *dev = netdev_priv(net);
736 
737 	strncpy (info->driver, dev->driver_name, sizeof info->driver);
738 	strncpy (info->version, DRIVER_VERSION, sizeof info->version);
739 	strncpy (info->fw_version, dev->driver_info->description,
740 		sizeof info->fw_version);
741 	usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info);
742 }
743 EXPORT_SYMBOL_GPL(usbnet_get_drvinfo);
744 
745 u32 usbnet_get_msglevel (struct net_device *net)
746 {
747 	struct usbnet *dev = netdev_priv(net);
748 
749 	return dev->msg_enable;
750 }
751 EXPORT_SYMBOL_GPL(usbnet_get_msglevel);
752 
753 void usbnet_set_msglevel (struct net_device *net, u32 level)
754 {
755 	struct usbnet *dev = netdev_priv(net);
756 
757 	dev->msg_enable = level;
758 }
759 EXPORT_SYMBOL_GPL(usbnet_set_msglevel);
760 
761 /* drivers may override default ethtool_ops in their bind() routine */
762 static struct ethtool_ops usbnet_ethtool_ops = {
763 #ifdef	HAVE_MII
764 	.get_settings		= usbnet_get_settings,
765 	.set_settings		= usbnet_set_settings,
766 	.get_link		= usbnet_get_link,
767 	.nway_reset		= usbnet_nway_reset,
768 #endif
769 	.get_drvinfo		= usbnet_get_drvinfo,
770 	.get_msglevel		= usbnet_get_msglevel,
771 	.set_msglevel		= usbnet_set_msglevel,
772 };
773 
774 /*-------------------------------------------------------------------------*/
775 
776 /* work that cannot be done in interrupt context uses keventd.
777  *
778  * NOTE:  with 2.5 we could do more of this using completion callbacks,
779  * especially now that control transfers can be queued.
780  */
781 static void
782 kevent (struct work_struct *work)
783 {
784 	struct usbnet		*dev =
785 		container_of(work, struct usbnet, kevent);
786 	int			status;
787 
788 	/* usb_clear_halt() needs a thread context */
789 	if (test_bit (EVENT_TX_HALT, &dev->flags)) {
790 		unlink_urbs (dev, &dev->txq);
791 		status = usb_clear_halt (dev->udev, dev->out);
792 		if (status < 0
793 				&& status != -EPIPE
794 				&& status != -ESHUTDOWN) {
795 			if (netif_msg_tx_err (dev))
796 				deverr (dev, "can't clear tx halt, status %d",
797 					status);
798 		} else {
799 			clear_bit (EVENT_TX_HALT, &dev->flags);
800 			if (status != -ESHUTDOWN)
801 				netif_wake_queue (dev->net);
802 		}
803 	}
804 	if (test_bit (EVENT_RX_HALT, &dev->flags)) {
805 		unlink_urbs (dev, &dev->rxq);
806 		status = usb_clear_halt (dev->udev, dev->in);
807 		if (status < 0
808 				&& status != -EPIPE
809 				&& status != -ESHUTDOWN) {
810 			if (netif_msg_rx_err (dev))
811 				deverr (dev, "can't clear rx halt, status %d",
812 					status);
813 		} else {
814 			clear_bit (EVENT_RX_HALT, &dev->flags);
815 			tasklet_schedule (&dev->bh);
816 		}
817 	}
818 
819 	/* tasklet could resubmit itself forever if memory is tight */
820 	if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
821 		struct urb	*urb = NULL;
822 
823 		if (netif_running (dev->net))
824 			urb = usb_alloc_urb (0, GFP_KERNEL);
825 		else
826 			clear_bit (EVENT_RX_MEMORY, &dev->flags);
827 		if (urb != NULL) {
828 			clear_bit (EVENT_RX_MEMORY, &dev->flags);
829 			rx_submit (dev, urb, GFP_KERNEL);
830 			tasklet_schedule (&dev->bh);
831 		}
832 	}
833 
834 	if (test_bit (EVENT_LINK_RESET, &dev->flags)) {
835 		struct driver_info	*info = dev->driver_info;
836 		int			retval = 0;
837 
838 		clear_bit (EVENT_LINK_RESET, &dev->flags);
839 		if(info->link_reset && (retval = info->link_reset(dev)) < 0) {
840 			devinfo(dev, "link reset failed (%d) usbnet usb-%s-%s, %s",
841 				retval,
842 				dev->udev->bus->bus_name, dev->udev->devpath,
843 				info->description);
844 		}
845 	}
846 
847 	if (dev->flags)
848 		devdbg (dev, "kevent done, flags = 0x%lx",
849 			dev->flags);
850 }
851 
852 /*-------------------------------------------------------------------------*/
853 
854 static void tx_complete (struct urb *urb)
855 {
856 	struct sk_buff		*skb = (struct sk_buff *) urb->context;
857 	struct skb_data		*entry = (struct skb_data *) skb->cb;
858 	struct usbnet		*dev = entry->dev;
859 
860 	if (urb->status == 0) {
861 		dev->stats.tx_packets++;
862 		dev->stats.tx_bytes += entry->length;
863 	} else {
864 		dev->stats.tx_errors++;
865 
866 		switch (urb->status) {
867 		case -EPIPE:
868 			usbnet_defer_kevent (dev, EVENT_TX_HALT);
869 			break;
870 
871 		/* software-driven interface shutdown */
872 		case -ECONNRESET:		// async unlink
873 		case -ESHUTDOWN:		// hardware gone
874 			break;
875 
876 		// like rx, tx gets controller i/o faults during khubd delays
877 		// and so it uses the same throttling mechanism.
878 		case -EPROTO:
879 		case -ETIME:
880 		case -EILSEQ:
881 			if (!timer_pending (&dev->delay)) {
882 				mod_timer (&dev->delay,
883 					jiffies + THROTTLE_JIFFIES);
884 				if (netif_msg_link (dev))
885 					devdbg (dev, "tx throttle %d",
886 							urb->status);
887 			}
888 			netif_stop_queue (dev->net);
889 			break;
890 		default:
891 			if (netif_msg_tx_err (dev))
892 				devdbg (dev, "tx err %d", entry->urb->status);
893 			break;
894 		}
895 	}
896 
897 	urb->dev = NULL;
898 	entry->state = tx_done;
899 	defer_bh(dev, skb, &dev->txq);
900 }
901 
902 /*-------------------------------------------------------------------------*/
903 
904 static void usbnet_tx_timeout (struct net_device *net)
905 {
906 	struct usbnet		*dev = netdev_priv(net);
907 
908 	unlink_urbs (dev, &dev->txq);
909 	tasklet_schedule (&dev->bh);
910 
911 	// FIXME: device recovery -- reset?
912 }
913 
914 /*-------------------------------------------------------------------------*/
915 
916 static int usbnet_start_xmit (struct sk_buff *skb, struct net_device *net)
917 {
918 	struct usbnet		*dev = netdev_priv(net);
919 	int			length;
920 	int			retval = NET_XMIT_SUCCESS;
921 	struct urb		*urb = NULL;
922 	struct skb_data		*entry;
923 	struct driver_info	*info = dev->driver_info;
924 	unsigned long		flags;
925 
926 	// some devices want funky USB-level framing, for
927 	// win32 driver (usually) and/or hardware quirks
928 	if (info->tx_fixup) {
929 		skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
930 		if (!skb) {
931 			if (netif_msg_tx_err (dev))
932 				devdbg (dev, "can't tx_fixup skb");
933 			goto drop;
934 		}
935 	}
936 	length = skb->len;
937 
938 	if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) {
939 		if (netif_msg_tx_err (dev))
940 			devdbg (dev, "no urb");
941 		goto drop;
942 	}
943 
944 	entry = (struct skb_data *) skb->cb;
945 	entry->urb = urb;
946 	entry->dev = dev;
947 	entry->state = tx_start;
948 	entry->length = length;
949 
950 	usb_fill_bulk_urb (urb, dev->udev, dev->out,
951 			skb->data, skb->len, tx_complete, skb);
952 
953 	/* don't assume the hardware handles USB_ZERO_PACKET
954 	 * NOTE:  strictly conforming cdc-ether devices should expect
955 	 * the ZLP here, but ignore the one-byte packet.
956 	 *
957 	 * FIXME zero that byte, if it doesn't require a new skb.
958 	 */
959 	if ((length % dev->maxpacket) == 0)
960 		urb->transfer_buffer_length++;
961 
962 	spin_lock_irqsave (&dev->txq.lock, flags);
963 
964 	switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) {
965 	case -EPIPE:
966 		netif_stop_queue (net);
967 		usbnet_defer_kevent (dev, EVENT_TX_HALT);
968 		break;
969 	default:
970 		if (netif_msg_tx_err (dev))
971 			devdbg (dev, "tx: submit urb err %d", retval);
972 		break;
973 	case 0:
974 		net->trans_start = jiffies;
975 		__skb_queue_tail (&dev->txq, skb);
976 		if (dev->txq.qlen >= TX_QLEN (dev))
977 			netif_stop_queue (net);
978 	}
979 	spin_unlock_irqrestore (&dev->txq.lock, flags);
980 
981 	if (retval) {
982 		if (netif_msg_tx_err (dev))
983 			devdbg (dev, "drop, code %d", retval);
984 drop:
985 		retval = NET_XMIT_SUCCESS;
986 		dev->stats.tx_dropped++;
987 		if (skb)
988 			dev_kfree_skb_any (skb);
989 		usb_free_urb (urb);
990 	} else if (netif_msg_tx_queued (dev)) {
991 		devdbg (dev, "> tx, len %d, type 0x%x",
992 			length, skb->protocol);
993 	}
994 	return retval;
995 }
996 
997 
998 /*-------------------------------------------------------------------------*/
999 
1000 // tasklet (work deferred from completions, in_irq) or timer
1001 
1002 static void usbnet_bh (unsigned long param)
1003 {
1004 	struct usbnet		*dev = (struct usbnet *) param;
1005 	struct sk_buff		*skb;
1006 	struct skb_data		*entry;
1007 
1008 	while ((skb = skb_dequeue (&dev->done))) {
1009 		entry = (struct skb_data *) skb->cb;
1010 		switch (entry->state) {
1011 		    case rx_done:
1012 			entry->state = rx_cleanup;
1013 			rx_process (dev, skb);
1014 			continue;
1015 		    case tx_done:
1016 		    case rx_cleanup:
1017 			usb_free_urb (entry->urb);
1018 			dev_kfree_skb (skb);
1019 			continue;
1020 		    default:
1021 			devdbg (dev, "bogus skb state %d", entry->state);
1022 		}
1023 	}
1024 
1025 	// waiting for all pending urbs to complete?
1026 	if (dev->wait) {
1027 		if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
1028 			wake_up (dev->wait);
1029 		}
1030 
1031 	// or are we maybe short a few urbs?
1032 	} else if (netif_running (dev->net)
1033 			&& netif_device_present (dev->net)
1034 			&& !timer_pending (&dev->delay)
1035 			&& !test_bit (EVENT_RX_HALT, &dev->flags)) {
1036 		int	temp = dev->rxq.qlen;
1037 		int	qlen = RX_QLEN (dev);
1038 
1039 		if (temp < qlen) {
1040 			struct urb	*urb;
1041 			int		i;
1042 
1043 			// don't refill the queue all at once
1044 			for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) {
1045 				urb = usb_alloc_urb (0, GFP_ATOMIC);
1046 				if (urb != NULL)
1047 					rx_submit (dev, urb, GFP_ATOMIC);
1048 			}
1049 			if (temp != dev->rxq.qlen && netif_msg_link (dev))
1050 				devdbg (dev, "rxqlen %d --> %d",
1051 						temp, dev->rxq.qlen);
1052 			if (dev->rxq.qlen < qlen)
1053 				tasklet_schedule (&dev->bh);
1054 		}
1055 		if (dev->txq.qlen < TX_QLEN (dev))
1056 			netif_wake_queue (dev->net);
1057 	}
1058 }
1059 
1060 
1061 
1062 /*-------------------------------------------------------------------------
1063  *
1064  * USB Device Driver support
1065  *
1066  *-------------------------------------------------------------------------*/
1067 
1068 // precondition: never called in_interrupt
1069 
1070 void usbnet_disconnect (struct usb_interface *intf)
1071 {
1072 	struct usbnet		*dev;
1073 	struct usb_device	*xdev;
1074 	struct net_device	*net;
1075 
1076 	dev = usb_get_intfdata(intf);
1077 	usb_set_intfdata(intf, NULL);
1078 	if (!dev)
1079 		return;
1080 
1081 	xdev = interface_to_usbdev (intf);
1082 
1083 	if (netif_msg_probe (dev))
1084 		devinfo (dev, "unregister '%s' usb-%s-%s, %s",
1085 			intf->dev.driver->name,
1086 			xdev->bus->bus_name, xdev->devpath,
1087 			dev->driver_info->description);
1088 
1089 	net = dev->net;
1090 	unregister_netdev (net);
1091 
1092 	/* we don't hold rtnl here ... */
1093 	flush_scheduled_work ();
1094 
1095 	if (dev->driver_info->unbind)
1096 		dev->driver_info->unbind (dev, intf);
1097 
1098 	free_netdev(net);
1099 	usb_put_dev (xdev);
1100 }
1101 EXPORT_SYMBOL_GPL(usbnet_disconnect);
1102 
1103 
1104 /*-------------------------------------------------------------------------*/
1105 
1106 // precondition: never called in_interrupt
1107 
1108 int
1109 usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1110 {
1111 	struct usbnet			*dev;
1112 	struct net_device		*net;
1113 	struct usb_host_interface	*interface;
1114 	struct driver_info		*info;
1115 	struct usb_device		*xdev;
1116 	int				status;
1117 	const char			*name;
1118 
1119 	name = udev->dev.driver->name;
1120 	info = (struct driver_info *) prod->driver_info;
1121 	if (!info) {
1122 		dev_dbg (&udev->dev, "blacklisted by %s\n", name);
1123 		return -ENODEV;
1124 	}
1125 	xdev = interface_to_usbdev (udev);
1126 	interface = udev->cur_altsetting;
1127 
1128 	usb_get_dev (xdev);
1129 
1130 	status = -ENOMEM;
1131 
1132 	// set up our own records
1133 	net = alloc_etherdev(sizeof(*dev));
1134 	if (!net) {
1135 		dbg ("can't kmalloc dev");
1136 		goto out;
1137 	}
1138 
1139 	dev = netdev_priv(net);
1140 	dev->udev = xdev;
1141 	dev->driver_info = info;
1142 	dev->driver_name = name;
1143 	dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
1144 				| NETIF_MSG_PROBE | NETIF_MSG_LINK);
1145 	skb_queue_head_init (&dev->rxq);
1146 	skb_queue_head_init (&dev->txq);
1147 	skb_queue_head_init (&dev->done);
1148 	dev->bh.func = usbnet_bh;
1149 	dev->bh.data = (unsigned long) dev;
1150 	INIT_WORK (&dev->kevent, kevent);
1151 	dev->delay.function = usbnet_bh;
1152 	dev->delay.data = (unsigned long) dev;
1153 	init_timer (&dev->delay);
1154 	mutex_init (&dev->phy_mutex);
1155 
1156 	SET_MODULE_OWNER (net);
1157 	dev->net = net;
1158 	strcpy (net->name, "usb%d");
1159 	memcpy (net->dev_addr, node_id, sizeof node_id);
1160 
1161 	/* rx and tx sides can use different message sizes;
1162 	 * bind() should set rx_urb_size in that case.
1163 	 */
1164 	dev->hard_mtu = net->mtu + net->hard_header_len;
1165 #if 0
1166 // dma_supported() is deeply broken on almost all architectures
1167 	// possible with some EHCI controllers
1168 	if (dma_supported (&udev->dev, DMA_64BIT_MASK))
1169 		net->features |= NETIF_F_HIGHDMA;
1170 #endif
1171 
1172 	net->change_mtu = usbnet_change_mtu;
1173 	net->get_stats = usbnet_get_stats;
1174 	net->hard_start_xmit = usbnet_start_xmit;
1175 	net->open = usbnet_open;
1176 	net->stop = usbnet_stop;
1177 	net->watchdog_timeo = TX_TIMEOUT_JIFFIES;
1178 	net->tx_timeout = usbnet_tx_timeout;
1179 	net->ethtool_ops = &usbnet_ethtool_ops;
1180 
1181 	// allow device-specific bind/init procedures
1182 	// NOTE net->name still not usable ...
1183 	if (info->bind) {
1184 		status = info->bind (dev, udev);
1185 		if (status < 0)
1186 			goto out1;
1187 
1188 		// heuristic:  "usb%d" for links we know are two-host,
1189 		// else "eth%d" when there's reasonable doubt.  userspace
1190 		// can rename the link if it knows better.
1191 		if ((dev->driver_info->flags & FLAG_ETHER) != 0
1192 				&& (net->dev_addr [0] & 0x02) == 0)
1193 			strcpy (net->name, "eth%d");
1194 
1195 		/* maybe the remote can't receive an Ethernet MTU */
1196 		if (net->mtu > (dev->hard_mtu - net->hard_header_len))
1197 			net->mtu = dev->hard_mtu - net->hard_header_len;
1198 	} else if (!info->in || !info->out)
1199 		status = usbnet_get_endpoints (dev, udev);
1200 	else {
1201 		dev->in = usb_rcvbulkpipe (xdev, info->in);
1202 		dev->out = usb_sndbulkpipe (xdev, info->out);
1203 		if (!(info->flags & FLAG_NO_SETINT))
1204 			status = usb_set_interface (xdev,
1205 				interface->desc.bInterfaceNumber,
1206 				interface->desc.bAlternateSetting);
1207 		else
1208 			status = 0;
1209 
1210 	}
1211 	if (status == 0 && dev->status)
1212 		status = init_status (dev, udev);
1213 	if (status < 0)
1214 		goto out3;
1215 
1216 	if (!dev->rx_urb_size)
1217 		dev->rx_urb_size = dev->hard_mtu;
1218 	dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
1219 
1220 	SET_NETDEV_DEV(net, &udev->dev);
1221 	status = register_netdev (net);
1222 	if (status)
1223 		goto out3;
1224 	if (netif_msg_probe (dev))
1225 		devinfo (dev, "register '%s' at usb-%s-%s, %s, "
1226 				"%02x:%02x:%02x:%02x:%02x:%02x",
1227 			udev->dev.driver->name,
1228 			xdev->bus->bus_name, xdev->devpath,
1229 			dev->driver_info->description,
1230 			net->dev_addr [0], net->dev_addr [1],
1231 			net->dev_addr [2], net->dev_addr [3],
1232 			net->dev_addr [4], net->dev_addr [5]);
1233 
1234 	// ok, it's ready to go.
1235 	usb_set_intfdata (udev, dev);
1236 
1237 	// start as if the link is up
1238 	netif_device_attach (net);
1239 
1240 	return 0;
1241 
1242 out3:
1243 	if (info->unbind)
1244 		info->unbind (dev, udev);
1245 out1:
1246 	free_netdev(net);
1247 out:
1248 	usb_put_dev(xdev);
1249 	return status;
1250 }
1251 EXPORT_SYMBOL_GPL(usbnet_probe);
1252 
1253 /*-------------------------------------------------------------------------*/
1254 
1255 /*
1256  * suspend the whole driver as soon as the first interface is suspended
1257  * resume only when the last interface is resumed
1258  */
1259 
1260 int usbnet_suspend (struct usb_interface *intf, pm_message_t message)
1261 {
1262 	struct usbnet		*dev = usb_get_intfdata(intf);
1263 
1264 	if (!dev->suspend_count++) {
1265 		/* accelerate emptying of the rx and queues, to avoid
1266 		 * having everything error out.
1267 		 */
1268 		netif_device_detach (dev->net);
1269 		(void) unlink_urbs (dev, &dev->rxq);
1270 		(void) unlink_urbs (dev, &dev->txq);
1271 	}
1272 	return 0;
1273 }
1274 EXPORT_SYMBOL_GPL(usbnet_suspend);
1275 
1276 int usbnet_resume (struct usb_interface *intf)
1277 {
1278 	struct usbnet		*dev = usb_get_intfdata(intf);
1279 
1280 	if (!--dev->suspend_count) {
1281 		netif_device_attach (dev->net);
1282 		tasklet_schedule (&dev->bh);
1283 	}
1284 	return 0;
1285 }
1286 EXPORT_SYMBOL_GPL(usbnet_resume);
1287 
1288 
1289 /*-------------------------------------------------------------------------*/
1290 
1291 static int __init usbnet_init(void)
1292 {
1293 	/* compiler should optimize this out */
1294 	BUILD_BUG_ON (sizeof (((struct sk_buff *)0)->cb)
1295 			< sizeof (struct skb_data));
1296 
1297 	random_ether_addr(node_id);
1298 	return 0;
1299 }
1300 module_init(usbnet_init);
1301 
1302 static void __exit usbnet_exit(void)
1303 {
1304 }
1305 module_exit(usbnet_exit);
1306 
1307 MODULE_AUTHOR("David Brownell");
1308 MODULE_DESCRIPTION("USB network driver framework");
1309 MODULE_LICENSE("GPL");
1310