xref: /openbmc/linux/drivers/usb/misc/usbtest.c (revision 8cf4328569acc37ac5c5b4eb27ae86c3758f627b)
1 #include <linux/kernel.h>
2 #include <linux/errno.h>
3 #include <linux/init.h>
4 #include <linux/slab.h>
5 #include <linux/mm.h>
6 #include <linux/module.h>
7 #include <linux/moduleparam.h>
8 #include <linux/scatterlist.h>
9 #include <linux/mutex.h>
10 
11 #include <linux/usb.h>
12 
13 
14 /*-------------------------------------------------------------------------*/
15 
16 static int override_alt = -1;
17 module_param_named(alt, override_alt, int, 0644);
18 MODULE_PARM_DESC(alt, ">= 0 to override altsetting selection");
19 
20 /*-------------------------------------------------------------------------*/
21 
22 /* FIXME make these public somewhere; usbdevfs.h? */
23 struct usbtest_param {
24 	/* inputs */
25 	unsigned		test_num;	/* 0..(TEST_CASES-1) */
26 	unsigned		iterations;
27 	unsigned		length;
28 	unsigned		vary;
29 	unsigned		sglen;
30 
31 	/* outputs */
32 	struct timeval		duration;
33 };
34 #define USBTEST_REQUEST	_IOWR('U', 100, struct usbtest_param)
35 
36 /*-------------------------------------------------------------------------*/
37 
38 #define	GENERIC		/* let probe() bind using module params */
39 
40 /* Some devices that can be used for testing will have "real" drivers.
41  * Entries for those need to be enabled here by hand, after disabling
42  * that "real" driver.
43  */
44 //#define	IBOT2		/* grab iBOT2 webcams */
45 //#define	KEYSPAN_19Qi	/* grab un-renumerated serial adapter */
46 
47 /*-------------------------------------------------------------------------*/
48 
49 struct usbtest_info {
50 	const char		*name;
51 	u8			ep_in;		/* bulk/intr source */
52 	u8			ep_out;		/* bulk/intr sink */
53 	unsigned		autoconf:1;
54 	unsigned		ctrl_out:1;
55 	unsigned		iso:1;		/* try iso in/out */
56 	int			alt;
57 };
58 
59 /* this is accessed only through usbfs ioctl calls.
60  * one ioctl to issue a test ... one lock per device.
61  * tests create other threads if they need them.
62  * urbs and buffers are allocated dynamically,
63  * and data generated deterministically.
64  */
65 struct usbtest_dev {
66 	struct usb_interface	*intf;
67 	struct usbtest_info	*info;
68 	int			in_pipe;
69 	int			out_pipe;
70 	int			in_iso_pipe;
71 	int			out_iso_pipe;
72 	struct usb_endpoint_descriptor	*iso_in, *iso_out;
73 	struct mutex		lock;
74 
75 #define TBUF_SIZE	256
76 	u8			*buf;
77 };
78 
79 static struct usb_device *testdev_to_usbdev(struct usbtest_dev *test)
80 {
81 	return interface_to_usbdev(test->intf);
82 }
83 
84 /* set up all urbs so they can be used with either bulk or interrupt */
85 #define	INTERRUPT_RATE		1	/* msec/transfer */
86 
87 #define ERROR(tdev, fmt, args...) \
88 	dev_err(&(tdev)->intf->dev , fmt , ## args)
89 #define WARNING(tdev, fmt, args...) \
90 	dev_warn(&(tdev)->intf->dev , fmt , ## args)
91 
92 #define GUARD_BYTE	0xA5
93 
94 /*-------------------------------------------------------------------------*/
95 
96 static int
97 get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf)
98 {
99 	int				tmp;
100 	struct usb_host_interface	*alt;
101 	struct usb_host_endpoint	*in, *out;
102 	struct usb_host_endpoint	*iso_in, *iso_out;
103 	struct usb_device		*udev;
104 
105 	for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
106 		unsigned	ep;
107 
108 		in = out = NULL;
109 		iso_in = iso_out = NULL;
110 		alt = intf->altsetting + tmp;
111 
112 		if (override_alt >= 0 &&
113 				override_alt != alt->desc.bAlternateSetting)
114 			continue;
115 
116 		/* take the first altsetting with in-bulk + out-bulk;
117 		 * ignore other endpoints and altsettings.
118 		 */
119 		for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
120 			struct usb_host_endpoint	*e;
121 
122 			e = alt->endpoint + ep;
123 			switch (usb_endpoint_type(&e->desc)) {
124 			case USB_ENDPOINT_XFER_BULK:
125 				break;
126 			case USB_ENDPOINT_XFER_ISOC:
127 				if (dev->info->iso)
128 					goto try_iso;
129 				/* FALLTHROUGH */
130 			default:
131 				continue;
132 			}
133 			if (usb_endpoint_dir_in(&e->desc)) {
134 				if (!in)
135 					in = e;
136 			} else {
137 				if (!out)
138 					out = e;
139 			}
140 			continue;
141 try_iso:
142 			if (usb_endpoint_dir_in(&e->desc)) {
143 				if (!iso_in)
144 					iso_in = e;
145 			} else {
146 				if (!iso_out)
147 					iso_out = e;
148 			}
149 		}
150 		if ((in && out)  ||  iso_in || iso_out)
151 			goto found;
152 	}
153 	return -EINVAL;
154 
155 found:
156 	udev = testdev_to_usbdev(dev);
157 	dev->info->alt = alt->desc.bAlternateSetting;
158 	if (alt->desc.bAlternateSetting != 0) {
159 		tmp = usb_set_interface(udev,
160 				alt->desc.bInterfaceNumber,
161 				alt->desc.bAlternateSetting);
162 		if (tmp < 0)
163 			return tmp;
164 	}
165 
166 	if (in) {
167 		dev->in_pipe = usb_rcvbulkpipe(udev,
168 			in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
169 		dev->out_pipe = usb_sndbulkpipe(udev,
170 			out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
171 	}
172 	if (iso_in) {
173 		dev->iso_in = &iso_in->desc;
174 		dev->in_iso_pipe = usb_rcvisocpipe(udev,
175 				iso_in->desc.bEndpointAddress
176 					& USB_ENDPOINT_NUMBER_MASK);
177 	}
178 
179 	if (iso_out) {
180 		dev->iso_out = &iso_out->desc;
181 		dev->out_iso_pipe = usb_sndisocpipe(udev,
182 				iso_out->desc.bEndpointAddress
183 					& USB_ENDPOINT_NUMBER_MASK);
184 	}
185 	return 0;
186 }
187 
188 /*-------------------------------------------------------------------------*/
189 
190 /* Support for testing basic non-queued I/O streams.
191  *
192  * These just package urbs as requests that can be easily canceled.
193  * Each urb's data buffer is dynamically allocated; callers can fill
194  * them with non-zero test data (or test for it) when appropriate.
195  */
196 
197 static void simple_callback(struct urb *urb)
198 {
199 	complete(urb->context);
200 }
201 
202 static struct urb *usbtest_alloc_urb(
203 	struct usb_device	*udev,
204 	int			pipe,
205 	unsigned long		bytes,
206 	unsigned		transfer_flags,
207 	unsigned		offset)
208 {
209 	struct urb		*urb;
210 
211 	urb = usb_alloc_urb(0, GFP_KERNEL);
212 	if (!urb)
213 		return urb;
214 	usb_fill_bulk_urb(urb, udev, pipe, NULL, bytes, simple_callback, NULL);
215 	urb->interval = (udev->speed == USB_SPEED_HIGH)
216 			? (INTERRUPT_RATE << 3)
217 			: INTERRUPT_RATE;
218 	urb->transfer_flags = transfer_flags;
219 	if (usb_pipein(pipe))
220 		urb->transfer_flags |= URB_SHORT_NOT_OK;
221 
222 	if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
223 		urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
224 			GFP_KERNEL, &urb->transfer_dma);
225 	else
226 		urb->transfer_buffer = kmalloc(bytes + offset, GFP_KERNEL);
227 
228 	if (!urb->transfer_buffer) {
229 		usb_free_urb(urb);
230 		return NULL;
231 	}
232 
233 	/* To test unaligned transfers add an offset and fill the
234 		unused memory with a guard value */
235 	if (offset) {
236 		memset(urb->transfer_buffer, GUARD_BYTE, offset);
237 		urb->transfer_buffer += offset;
238 		if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
239 			urb->transfer_dma += offset;
240 	}
241 
242 	/* For inbound transfers use guard byte so that test fails if
243 		data not correctly copied */
244 	memset(urb->transfer_buffer,
245 			usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
246 			bytes);
247 	return urb;
248 }
249 
250 static struct urb *simple_alloc_urb(
251 	struct usb_device	*udev,
252 	int			pipe,
253 	unsigned long		bytes)
254 {
255 	return usbtest_alloc_urb(udev, pipe, bytes, URB_NO_TRANSFER_DMA_MAP, 0);
256 }
257 
258 static unsigned pattern;
259 static unsigned mod_pattern;
260 module_param_named(pattern, mod_pattern, uint, S_IRUGO | S_IWUSR);
261 MODULE_PARM_DESC(mod_pattern, "i/o pattern (0 == zeroes)");
262 
263 static inline void simple_fill_buf(struct urb *urb)
264 {
265 	unsigned	i;
266 	u8		*buf = urb->transfer_buffer;
267 	unsigned	len = urb->transfer_buffer_length;
268 
269 	switch (pattern) {
270 	default:
271 		/* FALLTHROUGH */
272 	case 0:
273 		memset(buf, 0, len);
274 		break;
275 	case 1:			/* mod63 */
276 		for (i = 0; i < len; i++)
277 			*buf++ = (u8) (i % 63);
278 		break;
279 	}
280 }
281 
282 static inline unsigned long buffer_offset(void *buf)
283 {
284 	return (unsigned long)buf & (ARCH_KMALLOC_MINALIGN - 1);
285 }
286 
287 static int check_guard_bytes(struct usbtest_dev *tdev, struct urb *urb)
288 {
289 	u8 *buf = urb->transfer_buffer;
290 	u8 *guard = buf - buffer_offset(buf);
291 	unsigned i;
292 
293 	for (i = 0; guard < buf; i++, guard++) {
294 		if (*guard != GUARD_BYTE) {
295 			ERROR(tdev, "guard byte[%d] %d (not %d)\n",
296 				i, *guard, GUARD_BYTE);
297 			return -EINVAL;
298 		}
299 	}
300 	return 0;
301 }
302 
303 static int simple_check_buf(struct usbtest_dev *tdev, struct urb *urb)
304 {
305 	unsigned	i;
306 	u8		expected;
307 	u8		*buf = urb->transfer_buffer;
308 	unsigned	len = urb->actual_length;
309 
310 	int ret = check_guard_bytes(tdev, urb);
311 	if (ret)
312 		return ret;
313 
314 	for (i = 0; i < len; i++, buf++) {
315 		switch (pattern) {
316 		/* all-zeroes has no synchronization issues */
317 		case 0:
318 			expected = 0;
319 			break;
320 		/* mod63 stays in sync with short-terminated transfers,
321 		 * or otherwise when host and gadget agree on how large
322 		 * each usb transfer request should be.  resync is done
323 		 * with set_interface or set_config.
324 		 */
325 		case 1:			/* mod63 */
326 			expected = i % 63;
327 			break;
328 		/* always fail unsupported patterns */
329 		default:
330 			expected = !*buf;
331 			break;
332 		}
333 		if (*buf == expected)
334 			continue;
335 		ERROR(tdev, "buf[%d] = %d (not %d)\n", i, *buf, expected);
336 		return -EINVAL;
337 	}
338 	return 0;
339 }
340 
341 static void simple_free_urb(struct urb *urb)
342 {
343 	unsigned long offset = buffer_offset(urb->transfer_buffer);
344 
345 	if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
346 		usb_free_coherent(
347 			urb->dev,
348 			urb->transfer_buffer_length + offset,
349 			urb->transfer_buffer - offset,
350 			urb->transfer_dma - offset);
351 	else
352 		kfree(urb->transfer_buffer - offset);
353 	usb_free_urb(urb);
354 }
355 
356 static int simple_io(
357 	struct usbtest_dev	*tdev,
358 	struct urb		*urb,
359 	int			iterations,
360 	int			vary,
361 	int			expected,
362 	const char		*label
363 )
364 {
365 	struct usb_device	*udev = urb->dev;
366 	int			max = urb->transfer_buffer_length;
367 	struct completion	completion;
368 	int			retval = 0;
369 
370 	urb->context = &completion;
371 	while (retval == 0 && iterations-- > 0) {
372 		init_completion(&completion);
373 		if (usb_pipeout(urb->pipe)) {
374 			simple_fill_buf(urb);
375 			urb->transfer_flags |= URB_ZERO_PACKET;
376 		}
377 		retval = usb_submit_urb(urb, GFP_KERNEL);
378 		if (retval != 0)
379 			break;
380 
381 		/* NOTE:  no timeouts; can't be broken out of by interrupt */
382 		wait_for_completion(&completion);
383 		retval = urb->status;
384 		urb->dev = udev;
385 		if (retval == 0 && usb_pipein(urb->pipe))
386 			retval = simple_check_buf(tdev, urb);
387 
388 		if (vary) {
389 			int	len = urb->transfer_buffer_length;
390 
391 			len += vary;
392 			len %= max;
393 			if (len == 0)
394 				len = (vary < max) ? vary : max;
395 			urb->transfer_buffer_length = len;
396 		}
397 
398 		/* FIXME if endpoint halted, clear halt (and log) */
399 	}
400 	urb->transfer_buffer_length = max;
401 
402 	if (expected != retval)
403 		dev_err(&udev->dev,
404 			"%s failed, iterations left %d, status %d (not %d)\n",
405 				label, iterations, retval, expected);
406 	return retval;
407 }
408 
409 
410 /*-------------------------------------------------------------------------*/
411 
412 /* We use scatterlist primitives to test queued I/O.
413  * Yes, this also tests the scatterlist primitives.
414  */
415 
416 static void free_sglist(struct scatterlist *sg, int nents)
417 {
418 	unsigned		i;
419 
420 	if (!sg)
421 		return;
422 	for (i = 0; i < nents; i++) {
423 		if (!sg_page(&sg[i]))
424 			continue;
425 		kfree(sg_virt(&sg[i]));
426 	}
427 	kfree(sg);
428 }
429 
430 static struct scatterlist *
431 alloc_sglist(int nents, int max, int vary)
432 {
433 	struct scatterlist	*sg;
434 	unsigned		i;
435 	unsigned		size = max;
436 
437 	if (max == 0)
438 		return NULL;
439 
440 	sg = kmalloc_array(nents, sizeof(*sg), GFP_KERNEL);
441 	if (!sg)
442 		return NULL;
443 	sg_init_table(sg, nents);
444 
445 	for (i = 0; i < nents; i++) {
446 		char		*buf;
447 		unsigned	j;
448 
449 		buf = kzalloc(size, GFP_KERNEL);
450 		if (!buf) {
451 			free_sglist(sg, i);
452 			return NULL;
453 		}
454 
455 		/* kmalloc pages are always physically contiguous! */
456 		sg_set_buf(&sg[i], buf, size);
457 
458 		switch (pattern) {
459 		case 0:
460 			/* already zeroed */
461 			break;
462 		case 1:
463 			for (j = 0; j < size; j++)
464 				*buf++ = (u8) (j % 63);
465 			break;
466 		}
467 
468 		if (vary) {
469 			size += vary;
470 			size %= max;
471 			if (size == 0)
472 				size = (vary < max) ? vary : max;
473 		}
474 	}
475 
476 	return sg;
477 }
478 
479 static int perform_sglist(
480 	struct usbtest_dev	*tdev,
481 	unsigned		iterations,
482 	int			pipe,
483 	struct usb_sg_request	*req,
484 	struct scatterlist	*sg,
485 	int			nents
486 )
487 {
488 	struct usb_device	*udev = testdev_to_usbdev(tdev);
489 	int			retval = 0;
490 
491 	while (retval == 0 && iterations-- > 0) {
492 		retval = usb_sg_init(req, udev, pipe,
493 				(udev->speed == USB_SPEED_HIGH)
494 					? (INTERRUPT_RATE << 3)
495 					: INTERRUPT_RATE,
496 				sg, nents, 0, GFP_KERNEL);
497 
498 		if (retval)
499 			break;
500 		usb_sg_wait(req);
501 		retval = req->status;
502 
503 		/* FIXME check resulting data pattern */
504 
505 		/* FIXME if endpoint halted, clear halt (and log) */
506 	}
507 
508 	/* FIXME for unlink or fault handling tests, don't report
509 	 * failure if retval is as we expected ...
510 	 */
511 	if (retval)
512 		ERROR(tdev, "perform_sglist failed, "
513 				"iterations left %d, status %d\n",
514 				iterations, retval);
515 	return retval;
516 }
517 
518 
519 /*-------------------------------------------------------------------------*/
520 
521 /* unqueued control message testing
522  *
523  * there's a nice set of device functional requirements in chapter 9 of the
524  * usb 2.0 spec, which we can apply to ANY device, even ones that don't use
525  * special test firmware.
526  *
527  * we know the device is configured (or suspended) by the time it's visible
528  * through usbfs.  we can't change that, so we won't test enumeration (which
529  * worked 'well enough' to get here, this time), power management (ditto),
530  * or remote wakeup (which needs human interaction).
531  */
532 
533 static unsigned realworld = 1;
534 module_param(realworld, uint, 0);
535 MODULE_PARM_DESC(realworld, "clear to demand stricter spec compliance");
536 
537 static int get_altsetting(struct usbtest_dev *dev)
538 {
539 	struct usb_interface	*iface = dev->intf;
540 	struct usb_device	*udev = interface_to_usbdev(iface);
541 	int			retval;
542 
543 	retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
544 			USB_REQ_GET_INTERFACE, USB_DIR_IN|USB_RECIP_INTERFACE,
545 			0, iface->altsetting[0].desc.bInterfaceNumber,
546 			dev->buf, 1, USB_CTRL_GET_TIMEOUT);
547 	switch (retval) {
548 	case 1:
549 		return dev->buf[0];
550 	case 0:
551 		retval = -ERANGE;
552 		/* FALLTHROUGH */
553 	default:
554 		return retval;
555 	}
556 }
557 
558 static int set_altsetting(struct usbtest_dev *dev, int alternate)
559 {
560 	struct usb_interface		*iface = dev->intf;
561 	struct usb_device		*udev;
562 
563 	if (alternate < 0 || alternate >= 256)
564 		return -EINVAL;
565 
566 	udev = interface_to_usbdev(iface);
567 	return usb_set_interface(udev,
568 			iface->altsetting[0].desc.bInterfaceNumber,
569 			alternate);
570 }
571 
572 static int is_good_config(struct usbtest_dev *tdev, int len)
573 {
574 	struct usb_config_descriptor	*config;
575 
576 	if (len < sizeof(*config))
577 		return 0;
578 	config = (struct usb_config_descriptor *) tdev->buf;
579 
580 	switch (config->bDescriptorType) {
581 	case USB_DT_CONFIG:
582 	case USB_DT_OTHER_SPEED_CONFIG:
583 		if (config->bLength != 9) {
584 			ERROR(tdev, "bogus config descriptor length\n");
585 			return 0;
586 		}
587 		/* this bit 'must be 1' but often isn't */
588 		if (!realworld && !(config->bmAttributes & 0x80)) {
589 			ERROR(tdev, "high bit of config attributes not set\n");
590 			return 0;
591 		}
592 		if (config->bmAttributes & 0x1f) {	/* reserved == 0 */
593 			ERROR(tdev, "reserved config bits set\n");
594 			return 0;
595 		}
596 		break;
597 	default:
598 		return 0;
599 	}
600 
601 	if (le16_to_cpu(config->wTotalLength) == len)	/* read it all */
602 		return 1;
603 	if (le16_to_cpu(config->wTotalLength) >= TBUF_SIZE)	/* max partial read */
604 		return 1;
605 	ERROR(tdev, "bogus config descriptor read size\n");
606 	return 0;
607 }
608 
609 static int is_good_ext(struct usbtest_dev *tdev, u8 *buf)
610 {
611 	struct usb_ext_cap_descriptor *ext;
612 	u32 attr;
613 
614 	ext = (struct usb_ext_cap_descriptor *) buf;
615 
616 	if (ext->bLength != USB_DT_USB_EXT_CAP_SIZE) {
617 		ERROR(tdev, "bogus usb 2.0 extension descriptor length\n");
618 		return 0;
619 	}
620 
621 	attr = le32_to_cpu(ext->bmAttributes);
622 	/* bits[1:15] is used and others are reserved */
623 	if (attr & ~0xfffe) {	/* reserved == 0 */
624 		ERROR(tdev, "reserved bits set\n");
625 		return 0;
626 	}
627 
628 	return 1;
629 }
630 
631 static int is_good_ss_cap(struct usbtest_dev *tdev, u8 *buf)
632 {
633 	struct usb_ss_cap_descriptor *ss;
634 
635 	ss = (struct usb_ss_cap_descriptor *) buf;
636 
637 	if (ss->bLength != USB_DT_USB_SS_CAP_SIZE) {
638 		ERROR(tdev, "bogus superspeed device capability descriptor length\n");
639 		return 0;
640 	}
641 
642 	/*
643 	 * only bit[1] of bmAttributes is used for LTM and others are
644 	 * reserved
645 	 */
646 	if (ss->bmAttributes & ~0x02) {	/* reserved == 0 */
647 		ERROR(tdev, "reserved bits set in bmAttributes\n");
648 		return 0;
649 	}
650 
651 	/* bits[0:3] of wSpeedSupported is used and others are reserved */
652 	if (le16_to_cpu(ss->wSpeedSupported) & ~0x0f) {	/* reserved == 0 */
653 		ERROR(tdev, "reserved bits set in wSpeedSupported\n");
654 		return 0;
655 	}
656 
657 	return 1;
658 }
659 
660 static int is_good_con_id(struct usbtest_dev *tdev, u8 *buf)
661 {
662 	struct usb_ss_container_id_descriptor *con_id;
663 
664 	con_id = (struct usb_ss_container_id_descriptor *) buf;
665 
666 	if (con_id->bLength != USB_DT_USB_SS_CONTN_ID_SIZE) {
667 		ERROR(tdev, "bogus container id descriptor length\n");
668 		return 0;
669 	}
670 
671 	if (con_id->bReserved) {	/* reserved == 0 */
672 		ERROR(tdev, "reserved bits set\n");
673 		return 0;
674 	}
675 
676 	return 1;
677 }
678 
679 /* sanity test for standard requests working with usb_control_mesg() and some
680  * of the utility functions which use it.
681  *
682  * this doesn't test how endpoint halts behave or data toggles get set, since
683  * we won't do I/O to bulk/interrupt endpoints here (which is how to change
684  * halt or toggle).  toggle testing is impractical without support from hcds.
685  *
686  * this avoids failing devices linux would normally work with, by not testing
687  * config/altsetting operations for devices that only support their defaults.
688  * such devices rarely support those needless operations.
689  *
690  * NOTE that since this is a sanity test, it's not examining boundary cases
691  * to see if usbcore, hcd, and device all behave right.  such testing would
692  * involve varied read sizes and other operation sequences.
693  */
694 static int ch9_postconfig(struct usbtest_dev *dev)
695 {
696 	struct usb_interface	*iface = dev->intf;
697 	struct usb_device	*udev = interface_to_usbdev(iface);
698 	int			i, alt, retval;
699 
700 	/* [9.2.3] if there's more than one altsetting, we need to be able to
701 	 * set and get each one.  mostly trusts the descriptors from usbcore.
702 	 */
703 	for (i = 0; i < iface->num_altsetting; i++) {
704 
705 		/* 9.2.3 constrains the range here */
706 		alt = iface->altsetting[i].desc.bAlternateSetting;
707 		if (alt < 0 || alt >= iface->num_altsetting) {
708 			dev_err(&iface->dev,
709 					"invalid alt [%d].bAltSetting = %d\n",
710 					i, alt);
711 		}
712 
713 		/* [real world] get/set unimplemented if there's only one */
714 		if (realworld && iface->num_altsetting == 1)
715 			continue;
716 
717 		/* [9.4.10] set_interface */
718 		retval = set_altsetting(dev, alt);
719 		if (retval) {
720 			dev_err(&iface->dev, "can't set_interface = %d, %d\n",
721 					alt, retval);
722 			return retval;
723 		}
724 
725 		/* [9.4.4] get_interface always works */
726 		retval = get_altsetting(dev);
727 		if (retval != alt) {
728 			dev_err(&iface->dev, "get alt should be %d, was %d\n",
729 					alt, retval);
730 			return (retval < 0) ? retval : -EDOM;
731 		}
732 
733 	}
734 
735 	/* [real world] get_config unimplemented if there's only one */
736 	if (!realworld || udev->descriptor.bNumConfigurations != 1) {
737 		int	expected = udev->actconfig->desc.bConfigurationValue;
738 
739 		/* [9.4.2] get_configuration always works
740 		 * ... although some cheap devices (like one TI Hub I've got)
741 		 * won't return config descriptors except before set_config.
742 		 */
743 		retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
744 				USB_REQ_GET_CONFIGURATION,
745 				USB_DIR_IN | USB_RECIP_DEVICE,
746 				0, 0, dev->buf, 1, USB_CTRL_GET_TIMEOUT);
747 		if (retval != 1 || dev->buf[0] != expected) {
748 			dev_err(&iface->dev, "get config --> %d %d (1 %d)\n",
749 				retval, dev->buf[0], expected);
750 			return (retval < 0) ? retval : -EDOM;
751 		}
752 	}
753 
754 	/* there's always [9.4.3] a device descriptor [9.6.1] */
755 	retval = usb_get_descriptor(udev, USB_DT_DEVICE, 0,
756 			dev->buf, sizeof(udev->descriptor));
757 	if (retval != sizeof(udev->descriptor)) {
758 		dev_err(&iface->dev, "dev descriptor --> %d\n", retval);
759 		return (retval < 0) ? retval : -EDOM;
760 	}
761 
762 	/*
763 	 * there's always [9.4.3] a bos device descriptor [9.6.2] in USB
764 	 * 3.0 spec
765 	 */
766 	if (le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0210) {
767 		struct usb_bos_descriptor *bos = NULL;
768 		struct usb_dev_cap_header *header = NULL;
769 		unsigned total, num, length;
770 		u8 *buf;
771 
772 		retval = usb_get_descriptor(udev, USB_DT_BOS, 0, dev->buf,
773 				sizeof(*udev->bos->desc));
774 		if (retval != sizeof(*udev->bos->desc)) {
775 			dev_err(&iface->dev, "bos descriptor --> %d\n", retval);
776 			return (retval < 0) ? retval : -EDOM;
777 		}
778 
779 		bos = (struct usb_bos_descriptor *)dev->buf;
780 		total = le16_to_cpu(bos->wTotalLength);
781 		num = bos->bNumDeviceCaps;
782 
783 		if (total > TBUF_SIZE)
784 			total = TBUF_SIZE;
785 
786 		/*
787 		 * get generic device-level capability descriptors [9.6.2]
788 		 * in USB 3.0 spec
789 		 */
790 		retval = usb_get_descriptor(udev, USB_DT_BOS, 0, dev->buf,
791 				total);
792 		if (retval != total) {
793 			dev_err(&iface->dev, "bos descriptor set --> %d\n",
794 					retval);
795 			return (retval < 0) ? retval : -EDOM;
796 		}
797 
798 		length = sizeof(*udev->bos->desc);
799 		buf = dev->buf;
800 		for (i = 0; i < num; i++) {
801 			buf += length;
802 			if (buf + sizeof(struct usb_dev_cap_header) >
803 					dev->buf + total)
804 				break;
805 
806 			header = (struct usb_dev_cap_header *)buf;
807 			length = header->bLength;
808 
809 			if (header->bDescriptorType !=
810 					USB_DT_DEVICE_CAPABILITY) {
811 				dev_warn(&udev->dev, "not device capability descriptor, skip\n");
812 				continue;
813 			}
814 
815 			switch (header->bDevCapabilityType) {
816 			case USB_CAP_TYPE_EXT:
817 				if (buf + USB_DT_USB_EXT_CAP_SIZE >
818 						dev->buf + total ||
819 						!is_good_ext(dev, buf)) {
820 					dev_err(&iface->dev, "bogus usb 2.0 extension descriptor\n");
821 					return -EDOM;
822 				}
823 				break;
824 			case USB_SS_CAP_TYPE:
825 				if (buf + USB_DT_USB_SS_CAP_SIZE >
826 						dev->buf + total ||
827 						!is_good_ss_cap(dev, buf)) {
828 					dev_err(&iface->dev, "bogus superspeed device capability descriptor\n");
829 					return -EDOM;
830 				}
831 				break;
832 			case CONTAINER_ID_TYPE:
833 				if (buf + USB_DT_USB_SS_CONTN_ID_SIZE >
834 						dev->buf + total ||
835 						!is_good_con_id(dev, buf)) {
836 					dev_err(&iface->dev, "bogus container id descriptor\n");
837 					return -EDOM;
838 				}
839 				break;
840 			default:
841 				break;
842 			}
843 		}
844 	}
845 
846 	/* there's always [9.4.3] at least one config descriptor [9.6.3] */
847 	for (i = 0; i < udev->descriptor.bNumConfigurations; i++) {
848 		retval = usb_get_descriptor(udev, USB_DT_CONFIG, i,
849 				dev->buf, TBUF_SIZE);
850 		if (!is_good_config(dev, retval)) {
851 			dev_err(&iface->dev,
852 					"config [%d] descriptor --> %d\n",
853 					i, retval);
854 			return (retval < 0) ? retval : -EDOM;
855 		}
856 
857 		/* FIXME cross-checking udev->config[i] to make sure usbcore
858 		 * parsed it right (etc) would be good testing paranoia
859 		 */
860 	}
861 
862 	/* and sometimes [9.2.6.6] speed dependent descriptors */
863 	if (le16_to_cpu(udev->descriptor.bcdUSB) == 0x0200) {
864 		struct usb_qualifier_descriptor *d = NULL;
865 
866 		/* device qualifier [9.6.2] */
867 		retval = usb_get_descriptor(udev,
868 				USB_DT_DEVICE_QUALIFIER, 0, dev->buf,
869 				sizeof(struct usb_qualifier_descriptor));
870 		if (retval == -EPIPE) {
871 			if (udev->speed == USB_SPEED_HIGH) {
872 				dev_err(&iface->dev,
873 						"hs dev qualifier --> %d\n",
874 						retval);
875 				return (retval < 0) ? retval : -EDOM;
876 			}
877 			/* usb2.0 but not high-speed capable; fine */
878 		} else if (retval != sizeof(struct usb_qualifier_descriptor)) {
879 			dev_err(&iface->dev, "dev qualifier --> %d\n", retval);
880 			return (retval < 0) ? retval : -EDOM;
881 		} else
882 			d = (struct usb_qualifier_descriptor *) dev->buf;
883 
884 		/* might not have [9.6.2] any other-speed configs [9.6.4] */
885 		if (d) {
886 			unsigned max = d->bNumConfigurations;
887 			for (i = 0; i < max; i++) {
888 				retval = usb_get_descriptor(udev,
889 					USB_DT_OTHER_SPEED_CONFIG, i,
890 					dev->buf, TBUF_SIZE);
891 				if (!is_good_config(dev, retval)) {
892 					dev_err(&iface->dev,
893 						"other speed config --> %d\n",
894 						retval);
895 					return (retval < 0) ? retval : -EDOM;
896 				}
897 			}
898 		}
899 	}
900 	/* FIXME fetch strings from at least the device descriptor */
901 
902 	/* [9.4.5] get_status always works */
903 	retval = usb_get_status(udev, USB_RECIP_DEVICE, 0, dev->buf);
904 	if (retval) {
905 		dev_err(&iface->dev, "get dev status --> %d\n", retval);
906 		return retval;
907 	}
908 
909 	/* FIXME configuration.bmAttributes says if we could try to set/clear
910 	 * the device's remote wakeup feature ... if we can, test that here
911 	 */
912 
913 	retval = usb_get_status(udev, USB_RECIP_INTERFACE,
914 			iface->altsetting[0].desc.bInterfaceNumber, dev->buf);
915 	if (retval) {
916 		dev_err(&iface->dev, "get interface status --> %d\n", retval);
917 		return retval;
918 	}
919 	/* FIXME get status for each endpoint in the interface */
920 
921 	return 0;
922 }
923 
924 /*-------------------------------------------------------------------------*/
925 
926 /* use ch9 requests to test whether:
927  *   (a) queues work for control, keeping N subtests queued and
928  *       active (auto-resubmit) for M loops through the queue.
929  *   (b) protocol stalls (control-only) will autorecover.
930  *       it's not like bulk/intr; no halt clearing.
931  *   (c) short control reads are reported and handled.
932  *   (d) queues are always processed in-order
933  */
934 
935 struct ctrl_ctx {
936 	spinlock_t		lock;
937 	struct usbtest_dev	*dev;
938 	struct completion	complete;
939 	unsigned		count;
940 	unsigned		pending;
941 	int			status;
942 	struct urb		**urb;
943 	struct usbtest_param	*param;
944 	int			last;
945 };
946 
947 #define NUM_SUBCASES	16		/* how many test subcases here? */
948 
949 struct subcase {
950 	struct usb_ctrlrequest	setup;
951 	int			number;
952 	int			expected;
953 };
954 
955 static void ctrl_complete(struct urb *urb)
956 {
957 	struct ctrl_ctx		*ctx = urb->context;
958 	struct usb_ctrlrequest	*reqp;
959 	struct subcase		*subcase;
960 	int			status = urb->status;
961 
962 	reqp = (struct usb_ctrlrequest *)urb->setup_packet;
963 	subcase = container_of(reqp, struct subcase, setup);
964 
965 	spin_lock(&ctx->lock);
966 	ctx->count--;
967 	ctx->pending--;
968 
969 	/* queue must transfer and complete in fifo order, unless
970 	 * usb_unlink_urb() is used to unlink something not at the
971 	 * physical queue head (not tested).
972 	 */
973 	if (subcase->number > 0) {
974 		if ((subcase->number - ctx->last) != 1) {
975 			ERROR(ctx->dev,
976 				"subcase %d completed out of order, last %d\n",
977 				subcase->number, ctx->last);
978 			status = -EDOM;
979 			ctx->last = subcase->number;
980 			goto error;
981 		}
982 	}
983 	ctx->last = subcase->number;
984 
985 	/* succeed or fault in only one way? */
986 	if (status == subcase->expected)
987 		status = 0;
988 
989 	/* async unlink for cleanup? */
990 	else if (status != -ECONNRESET) {
991 
992 		/* some faults are allowed, not required */
993 		if (subcase->expected > 0 && (
994 			  ((status == -subcase->expected	/* happened */
995 			   || status == 0))))			/* didn't */
996 			status = 0;
997 		/* sometimes more than one fault is allowed */
998 		else if (subcase->number == 12 && status == -EPIPE)
999 			status = 0;
1000 		else
1001 			ERROR(ctx->dev, "subtest %d error, status %d\n",
1002 					subcase->number, status);
1003 	}
1004 
1005 	/* unexpected status codes mean errors; ideally, in hardware */
1006 	if (status) {
1007 error:
1008 		if (ctx->status == 0) {
1009 			int		i;
1010 
1011 			ctx->status = status;
1012 			ERROR(ctx->dev, "control queue %02x.%02x, err %d, "
1013 					"%d left, subcase %d, len %d/%d\n",
1014 					reqp->bRequestType, reqp->bRequest,
1015 					status, ctx->count, subcase->number,
1016 					urb->actual_length,
1017 					urb->transfer_buffer_length);
1018 
1019 			/* FIXME this "unlink everything" exit route should
1020 			 * be a separate test case.
1021 			 */
1022 
1023 			/* unlink whatever's still pending */
1024 			for (i = 1; i < ctx->param->sglen; i++) {
1025 				struct urb *u = ctx->urb[
1026 							(i + subcase->number)
1027 							% ctx->param->sglen];
1028 
1029 				if (u == urb || !u->dev)
1030 					continue;
1031 				spin_unlock(&ctx->lock);
1032 				status = usb_unlink_urb(u);
1033 				spin_lock(&ctx->lock);
1034 				switch (status) {
1035 				case -EINPROGRESS:
1036 				case -EBUSY:
1037 				case -EIDRM:
1038 					continue;
1039 				default:
1040 					ERROR(ctx->dev, "urb unlink --> %d\n",
1041 							status);
1042 				}
1043 			}
1044 			status = ctx->status;
1045 		}
1046 	}
1047 
1048 	/* resubmit if we need to, else mark this as done */
1049 	if ((status == 0) && (ctx->pending < ctx->count)) {
1050 		status = usb_submit_urb(urb, GFP_ATOMIC);
1051 		if (status != 0) {
1052 			ERROR(ctx->dev,
1053 				"can't resubmit ctrl %02x.%02x, err %d\n",
1054 				reqp->bRequestType, reqp->bRequest, status);
1055 			urb->dev = NULL;
1056 		} else
1057 			ctx->pending++;
1058 	} else
1059 		urb->dev = NULL;
1060 
1061 	/* signal completion when nothing's queued */
1062 	if (ctx->pending == 0)
1063 		complete(&ctx->complete);
1064 	spin_unlock(&ctx->lock);
1065 }
1066 
1067 static int
1068 test_ctrl_queue(struct usbtest_dev *dev, struct usbtest_param *param)
1069 {
1070 	struct usb_device	*udev = testdev_to_usbdev(dev);
1071 	struct urb		**urb;
1072 	struct ctrl_ctx		context;
1073 	int			i;
1074 
1075 	if (param->sglen == 0 || param->iterations > UINT_MAX / param->sglen)
1076 		return -EOPNOTSUPP;
1077 
1078 	spin_lock_init(&context.lock);
1079 	context.dev = dev;
1080 	init_completion(&context.complete);
1081 	context.count = param->sglen * param->iterations;
1082 	context.pending = 0;
1083 	context.status = -ENOMEM;
1084 	context.param = param;
1085 	context.last = -1;
1086 
1087 	/* allocate and init the urbs we'll queue.
1088 	 * as with bulk/intr sglists, sglen is the queue depth; it also
1089 	 * controls which subtests run (more tests than sglen) or rerun.
1090 	 */
1091 	urb = kcalloc(param->sglen, sizeof(struct urb *), GFP_KERNEL);
1092 	if (!urb)
1093 		return -ENOMEM;
1094 	for (i = 0; i < param->sglen; i++) {
1095 		int			pipe = usb_rcvctrlpipe(udev, 0);
1096 		unsigned		len;
1097 		struct urb		*u;
1098 		struct usb_ctrlrequest	req;
1099 		struct subcase		*reqp;
1100 
1101 		/* sign of this variable means:
1102 		 *  -: tested code must return this (negative) error code
1103 		 *  +: tested code may return this (negative too) error code
1104 		 */
1105 		int			expected = 0;
1106 
1107 		/* requests here are mostly expected to succeed on any
1108 		 * device, but some are chosen to trigger protocol stalls
1109 		 * or short reads.
1110 		 */
1111 		memset(&req, 0, sizeof(req));
1112 		req.bRequest = USB_REQ_GET_DESCRIPTOR;
1113 		req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
1114 
1115 		switch (i % NUM_SUBCASES) {
1116 		case 0:		/* get device descriptor */
1117 			req.wValue = cpu_to_le16(USB_DT_DEVICE << 8);
1118 			len = sizeof(struct usb_device_descriptor);
1119 			break;
1120 		case 1:		/* get first config descriptor (only) */
1121 			req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
1122 			len = sizeof(struct usb_config_descriptor);
1123 			break;
1124 		case 2:		/* get altsetting (OFTEN STALLS) */
1125 			req.bRequest = USB_REQ_GET_INTERFACE;
1126 			req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
1127 			/* index = 0 means first interface */
1128 			len = 1;
1129 			expected = EPIPE;
1130 			break;
1131 		case 3:		/* get interface status */
1132 			req.bRequest = USB_REQ_GET_STATUS;
1133 			req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
1134 			/* interface 0 */
1135 			len = 2;
1136 			break;
1137 		case 4:		/* get device status */
1138 			req.bRequest = USB_REQ_GET_STATUS;
1139 			req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
1140 			len = 2;
1141 			break;
1142 		case 5:		/* get device qualifier (MAY STALL) */
1143 			req.wValue = cpu_to_le16 (USB_DT_DEVICE_QUALIFIER << 8);
1144 			len = sizeof(struct usb_qualifier_descriptor);
1145 			if (udev->speed != USB_SPEED_HIGH)
1146 				expected = EPIPE;
1147 			break;
1148 		case 6:		/* get first config descriptor, plus interface */
1149 			req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
1150 			len = sizeof(struct usb_config_descriptor);
1151 			len += sizeof(struct usb_interface_descriptor);
1152 			break;
1153 		case 7:		/* get interface descriptor (ALWAYS STALLS) */
1154 			req.wValue = cpu_to_le16 (USB_DT_INTERFACE << 8);
1155 			/* interface == 0 */
1156 			len = sizeof(struct usb_interface_descriptor);
1157 			expected = -EPIPE;
1158 			break;
1159 		/* NOTE: two consecutive stalls in the queue here.
1160 		 *  that tests fault recovery a bit more aggressively. */
1161 		case 8:		/* clear endpoint halt (MAY STALL) */
1162 			req.bRequest = USB_REQ_CLEAR_FEATURE;
1163 			req.bRequestType = USB_RECIP_ENDPOINT;
1164 			/* wValue 0 == ep halt */
1165 			/* wIndex 0 == ep0 (shouldn't halt!) */
1166 			len = 0;
1167 			pipe = usb_sndctrlpipe(udev, 0);
1168 			expected = EPIPE;
1169 			break;
1170 		case 9:		/* get endpoint status */
1171 			req.bRequest = USB_REQ_GET_STATUS;
1172 			req.bRequestType = USB_DIR_IN|USB_RECIP_ENDPOINT;
1173 			/* endpoint 0 */
1174 			len = 2;
1175 			break;
1176 		case 10:	/* trigger short read (EREMOTEIO) */
1177 			req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
1178 			len = 1024;
1179 			expected = -EREMOTEIO;
1180 			break;
1181 		/* NOTE: two consecutive _different_ faults in the queue. */
1182 		case 11:	/* get endpoint descriptor (ALWAYS STALLS) */
1183 			req.wValue = cpu_to_le16(USB_DT_ENDPOINT << 8);
1184 			/* endpoint == 0 */
1185 			len = sizeof(struct usb_interface_descriptor);
1186 			expected = EPIPE;
1187 			break;
1188 		/* NOTE: sometimes even a third fault in the queue! */
1189 		case 12:	/* get string 0 descriptor (MAY STALL) */
1190 			req.wValue = cpu_to_le16(USB_DT_STRING << 8);
1191 			/* string == 0, for language IDs */
1192 			len = sizeof(struct usb_interface_descriptor);
1193 			/* may succeed when > 4 languages */
1194 			expected = EREMOTEIO;	/* or EPIPE, if no strings */
1195 			break;
1196 		case 13:	/* short read, resembling case 10 */
1197 			req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
1198 			/* last data packet "should" be DATA1, not DATA0 */
1199 			if (udev->speed == USB_SPEED_SUPER)
1200 				len = 1024 - 512;
1201 			else
1202 				len = 1024 - udev->descriptor.bMaxPacketSize0;
1203 			expected = -EREMOTEIO;
1204 			break;
1205 		case 14:	/* short read; try to fill the last packet */
1206 			req.wValue = cpu_to_le16((USB_DT_DEVICE << 8) | 0);
1207 			/* device descriptor size == 18 bytes */
1208 			len = udev->descriptor.bMaxPacketSize0;
1209 			if (udev->speed == USB_SPEED_SUPER)
1210 				len = 512;
1211 			switch (len) {
1212 			case 8:
1213 				len = 24;
1214 				break;
1215 			case 16:
1216 				len = 32;
1217 				break;
1218 			}
1219 			expected = -EREMOTEIO;
1220 			break;
1221 		case 15:
1222 			req.wValue = cpu_to_le16(USB_DT_BOS << 8);
1223 			if (udev->bos)
1224 				len = le16_to_cpu(udev->bos->desc->wTotalLength);
1225 			else
1226 				len = sizeof(struct usb_bos_descriptor);
1227 			if (le16_to_cpu(udev->descriptor.bcdUSB) < 0x0201)
1228 				expected = -EPIPE;
1229 			break;
1230 		default:
1231 			ERROR(dev, "bogus number of ctrl queue testcases!\n");
1232 			context.status = -EINVAL;
1233 			goto cleanup;
1234 		}
1235 		req.wLength = cpu_to_le16(len);
1236 		urb[i] = u = simple_alloc_urb(udev, pipe, len);
1237 		if (!u)
1238 			goto cleanup;
1239 
1240 		reqp = kmalloc(sizeof(*reqp), GFP_KERNEL);
1241 		if (!reqp)
1242 			goto cleanup;
1243 		reqp->setup = req;
1244 		reqp->number = i % NUM_SUBCASES;
1245 		reqp->expected = expected;
1246 		u->setup_packet = (char *) &reqp->setup;
1247 
1248 		u->context = &context;
1249 		u->complete = ctrl_complete;
1250 	}
1251 
1252 	/* queue the urbs */
1253 	context.urb = urb;
1254 	spin_lock_irq(&context.lock);
1255 	for (i = 0; i < param->sglen; i++) {
1256 		context.status = usb_submit_urb(urb[i], GFP_ATOMIC);
1257 		if (context.status != 0) {
1258 			ERROR(dev, "can't submit urb[%d], status %d\n",
1259 					i, context.status);
1260 			context.count = context.pending;
1261 			break;
1262 		}
1263 		context.pending++;
1264 	}
1265 	spin_unlock_irq(&context.lock);
1266 
1267 	/* FIXME  set timer and time out; provide a disconnect hook */
1268 
1269 	/* wait for the last one to complete */
1270 	if (context.pending > 0)
1271 		wait_for_completion(&context.complete);
1272 
1273 cleanup:
1274 	for (i = 0; i < param->sglen; i++) {
1275 		if (!urb[i])
1276 			continue;
1277 		urb[i]->dev = udev;
1278 		kfree(urb[i]->setup_packet);
1279 		simple_free_urb(urb[i]);
1280 	}
1281 	kfree(urb);
1282 	return context.status;
1283 }
1284 #undef NUM_SUBCASES
1285 
1286 
1287 /*-------------------------------------------------------------------------*/
1288 
1289 static void unlink1_callback(struct urb *urb)
1290 {
1291 	int	status = urb->status;
1292 
1293 	/* we "know" -EPIPE (stall) never happens */
1294 	if (!status)
1295 		status = usb_submit_urb(urb, GFP_ATOMIC);
1296 	if (status) {
1297 		urb->status = status;
1298 		complete(urb->context);
1299 	}
1300 }
1301 
1302 static int unlink1(struct usbtest_dev *dev, int pipe, int size, int async)
1303 {
1304 	struct urb		*urb;
1305 	struct completion	completion;
1306 	int			retval = 0;
1307 
1308 	init_completion(&completion);
1309 	urb = simple_alloc_urb(testdev_to_usbdev(dev), pipe, size);
1310 	if (!urb)
1311 		return -ENOMEM;
1312 	urb->context = &completion;
1313 	urb->complete = unlink1_callback;
1314 
1315 	/* keep the endpoint busy.  there are lots of hc/hcd-internal
1316 	 * states, and testing should get to all of them over time.
1317 	 *
1318 	 * FIXME want additional tests for when endpoint is STALLing
1319 	 * due to errors, or is just NAKing requests.
1320 	 */
1321 	retval = usb_submit_urb(urb, GFP_KERNEL);
1322 	if (retval != 0) {
1323 		dev_err(&dev->intf->dev, "submit fail %d\n", retval);
1324 		return retval;
1325 	}
1326 
1327 	/* unlinking that should always work.  variable delay tests more
1328 	 * hcd states and code paths, even with little other system load.
1329 	 */
1330 	msleep(jiffies % (2 * INTERRUPT_RATE));
1331 	if (async) {
1332 		while (!completion_done(&completion)) {
1333 			retval = usb_unlink_urb(urb);
1334 
1335 			switch (retval) {
1336 			case -EBUSY:
1337 			case -EIDRM:
1338 				/* we can't unlink urbs while they're completing
1339 				 * or if they've completed, and we haven't
1340 				 * resubmitted. "normal" drivers would prevent
1341 				 * resubmission, but since we're testing unlink
1342 				 * paths, we can't.
1343 				 */
1344 				ERROR(dev, "unlink retry\n");
1345 				continue;
1346 			case 0:
1347 			case -EINPROGRESS:
1348 				break;
1349 
1350 			default:
1351 				dev_err(&dev->intf->dev,
1352 					"unlink fail %d\n", retval);
1353 				return retval;
1354 			}
1355 
1356 			break;
1357 		}
1358 	} else
1359 		usb_kill_urb(urb);
1360 
1361 	wait_for_completion(&completion);
1362 	retval = urb->status;
1363 	simple_free_urb(urb);
1364 
1365 	if (async)
1366 		return (retval == -ECONNRESET) ? 0 : retval - 1000;
1367 	else
1368 		return (retval == -ENOENT || retval == -EPERM) ?
1369 				0 : retval - 2000;
1370 }
1371 
1372 static int unlink_simple(struct usbtest_dev *dev, int pipe, int len)
1373 {
1374 	int			retval = 0;
1375 
1376 	/* test sync and async paths */
1377 	retval = unlink1(dev, pipe, len, 1);
1378 	if (!retval)
1379 		retval = unlink1(dev, pipe, len, 0);
1380 	return retval;
1381 }
1382 
1383 /*-------------------------------------------------------------------------*/
1384 
1385 struct queued_ctx {
1386 	struct completion	complete;
1387 	atomic_t		pending;
1388 	unsigned		num;
1389 	int			status;
1390 	struct urb		**urbs;
1391 };
1392 
1393 static void unlink_queued_callback(struct urb *urb)
1394 {
1395 	int			status = urb->status;
1396 	struct queued_ctx	*ctx = urb->context;
1397 
1398 	if (ctx->status)
1399 		goto done;
1400 	if (urb == ctx->urbs[ctx->num - 4] || urb == ctx->urbs[ctx->num - 2]) {
1401 		if (status == -ECONNRESET)
1402 			goto done;
1403 		/* What error should we report if the URB completed normally? */
1404 	}
1405 	if (status != 0)
1406 		ctx->status = status;
1407 
1408  done:
1409 	if (atomic_dec_and_test(&ctx->pending))
1410 		complete(&ctx->complete);
1411 }
1412 
1413 static int unlink_queued(struct usbtest_dev *dev, int pipe, unsigned num,
1414 		unsigned size)
1415 {
1416 	struct queued_ctx	ctx;
1417 	struct usb_device	*udev = testdev_to_usbdev(dev);
1418 	void			*buf;
1419 	dma_addr_t		buf_dma;
1420 	int			i;
1421 	int			retval = -ENOMEM;
1422 
1423 	init_completion(&ctx.complete);
1424 	atomic_set(&ctx.pending, 1);	/* One more than the actual value */
1425 	ctx.num = num;
1426 	ctx.status = 0;
1427 
1428 	buf = usb_alloc_coherent(udev, size, GFP_KERNEL, &buf_dma);
1429 	if (!buf)
1430 		return retval;
1431 	memset(buf, 0, size);
1432 
1433 	/* Allocate and init the urbs we'll queue */
1434 	ctx.urbs = kcalloc(num, sizeof(struct urb *), GFP_KERNEL);
1435 	if (!ctx.urbs)
1436 		goto free_buf;
1437 	for (i = 0; i < num; i++) {
1438 		ctx.urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
1439 		if (!ctx.urbs[i])
1440 			goto free_urbs;
1441 		usb_fill_bulk_urb(ctx.urbs[i], udev, pipe, buf, size,
1442 				unlink_queued_callback, &ctx);
1443 		ctx.urbs[i]->transfer_dma = buf_dma;
1444 		ctx.urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
1445 	}
1446 
1447 	/* Submit all the URBs and then unlink URBs num - 4 and num - 2. */
1448 	for (i = 0; i < num; i++) {
1449 		atomic_inc(&ctx.pending);
1450 		retval = usb_submit_urb(ctx.urbs[i], GFP_KERNEL);
1451 		if (retval != 0) {
1452 			dev_err(&dev->intf->dev, "submit urbs[%d] fail %d\n",
1453 					i, retval);
1454 			atomic_dec(&ctx.pending);
1455 			ctx.status = retval;
1456 			break;
1457 		}
1458 	}
1459 	if (i == num) {
1460 		usb_unlink_urb(ctx.urbs[num - 4]);
1461 		usb_unlink_urb(ctx.urbs[num - 2]);
1462 	} else {
1463 		while (--i >= 0)
1464 			usb_unlink_urb(ctx.urbs[i]);
1465 	}
1466 
1467 	if (atomic_dec_and_test(&ctx.pending))		/* The extra count */
1468 		complete(&ctx.complete);
1469 	wait_for_completion(&ctx.complete);
1470 	retval = ctx.status;
1471 
1472  free_urbs:
1473 	for (i = 0; i < num; i++)
1474 		usb_free_urb(ctx.urbs[i]);
1475 	kfree(ctx.urbs);
1476  free_buf:
1477 	usb_free_coherent(udev, size, buf, buf_dma);
1478 	return retval;
1479 }
1480 
1481 /*-------------------------------------------------------------------------*/
1482 
1483 static int verify_not_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
1484 {
1485 	int	retval;
1486 	u16	status;
1487 
1488 	/* shouldn't look or act halted */
1489 	retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
1490 	if (retval < 0) {
1491 		ERROR(tdev, "ep %02x couldn't get no-halt status, %d\n",
1492 				ep, retval);
1493 		return retval;
1494 	}
1495 	if (status != 0) {
1496 		ERROR(tdev, "ep %02x bogus status: %04x != 0\n", ep, status);
1497 		return -EINVAL;
1498 	}
1499 	retval = simple_io(tdev, urb, 1, 0, 0, __func__);
1500 	if (retval != 0)
1501 		return -EINVAL;
1502 	return 0;
1503 }
1504 
1505 static int verify_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
1506 {
1507 	int	retval;
1508 	u16	status;
1509 
1510 	/* should look and act halted */
1511 	retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
1512 	if (retval < 0) {
1513 		ERROR(tdev, "ep %02x couldn't get halt status, %d\n",
1514 				ep, retval);
1515 		return retval;
1516 	}
1517 	if (status != 1) {
1518 		ERROR(tdev, "ep %02x bogus status: %04x != 1\n", ep, status);
1519 		return -EINVAL;
1520 	}
1521 	retval = simple_io(tdev, urb, 1, 0, -EPIPE, __func__);
1522 	if (retval != -EPIPE)
1523 		return -EINVAL;
1524 	retval = simple_io(tdev, urb, 1, 0, -EPIPE, "verify_still_halted");
1525 	if (retval != -EPIPE)
1526 		return -EINVAL;
1527 	return 0;
1528 }
1529 
1530 static int test_halt(struct usbtest_dev *tdev, int ep, struct urb *urb)
1531 {
1532 	int	retval;
1533 
1534 	/* shouldn't look or act halted now */
1535 	retval = verify_not_halted(tdev, ep, urb);
1536 	if (retval < 0)
1537 		return retval;
1538 
1539 	/* set halt (protocol test only), verify it worked */
1540 	retval = usb_control_msg(urb->dev, usb_sndctrlpipe(urb->dev, 0),
1541 			USB_REQ_SET_FEATURE, USB_RECIP_ENDPOINT,
1542 			USB_ENDPOINT_HALT, ep,
1543 			NULL, 0, USB_CTRL_SET_TIMEOUT);
1544 	if (retval < 0) {
1545 		ERROR(tdev, "ep %02x couldn't set halt, %d\n", ep, retval);
1546 		return retval;
1547 	}
1548 	retval = verify_halted(tdev, ep, urb);
1549 	if (retval < 0)
1550 		return retval;
1551 
1552 	/* clear halt (tests API + protocol), verify it worked */
1553 	retval = usb_clear_halt(urb->dev, urb->pipe);
1554 	if (retval < 0) {
1555 		ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval);
1556 		return retval;
1557 	}
1558 	retval = verify_not_halted(tdev, ep, urb);
1559 	if (retval < 0)
1560 		return retval;
1561 
1562 	/* NOTE:  could also verify SET_INTERFACE clear halts ... */
1563 
1564 	return 0;
1565 }
1566 
1567 static int halt_simple(struct usbtest_dev *dev)
1568 {
1569 	int			ep;
1570 	int			retval = 0;
1571 	struct urb		*urb;
1572 	struct usb_device	*udev = testdev_to_usbdev(dev);
1573 
1574 	if (udev->speed == USB_SPEED_SUPER)
1575 		urb = simple_alloc_urb(udev, 0, 1024);
1576 	else
1577 		urb = simple_alloc_urb(udev, 0, 512);
1578 	if (urb == NULL)
1579 		return -ENOMEM;
1580 
1581 	if (dev->in_pipe) {
1582 		ep = usb_pipeendpoint(dev->in_pipe) | USB_DIR_IN;
1583 		urb->pipe = dev->in_pipe;
1584 		retval = test_halt(dev, ep, urb);
1585 		if (retval < 0)
1586 			goto done;
1587 	}
1588 
1589 	if (dev->out_pipe) {
1590 		ep = usb_pipeendpoint(dev->out_pipe);
1591 		urb->pipe = dev->out_pipe;
1592 		retval = test_halt(dev, ep, urb);
1593 	}
1594 done:
1595 	simple_free_urb(urb);
1596 	return retval;
1597 }
1598 
1599 /*-------------------------------------------------------------------------*/
1600 
1601 /* Control OUT tests use the vendor control requests from Intel's
1602  * USB 2.0 compliance test device:  write a buffer, read it back.
1603  *
1604  * Intel's spec only _requires_ that it work for one packet, which
1605  * is pretty weak.   Some HCDs place limits here; most devices will
1606  * need to be able to handle more than one OUT data packet.  We'll
1607  * try whatever we're told to try.
1608  */
1609 static int ctrl_out(struct usbtest_dev *dev,
1610 		unsigned count, unsigned length, unsigned vary, unsigned offset)
1611 {
1612 	unsigned		i, j, len;
1613 	int			retval;
1614 	u8			*buf;
1615 	char			*what = "?";
1616 	struct usb_device	*udev;
1617 
1618 	if (length < 1 || length > 0xffff || vary >= length)
1619 		return -EINVAL;
1620 
1621 	buf = kmalloc(length + offset, GFP_KERNEL);
1622 	if (!buf)
1623 		return -ENOMEM;
1624 
1625 	buf += offset;
1626 	udev = testdev_to_usbdev(dev);
1627 	len = length;
1628 	retval = 0;
1629 
1630 	/* NOTE:  hardware might well act differently if we pushed it
1631 	 * with lots back-to-back queued requests.
1632 	 */
1633 	for (i = 0; i < count; i++) {
1634 		/* write patterned data */
1635 		for (j = 0; j < len; j++)
1636 			buf[j] = i + j;
1637 		retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
1638 				0x5b, USB_DIR_OUT|USB_TYPE_VENDOR,
1639 				0, 0, buf, len, USB_CTRL_SET_TIMEOUT);
1640 		if (retval != len) {
1641 			what = "write";
1642 			if (retval >= 0) {
1643 				ERROR(dev, "ctrl_out, wlen %d (expected %d)\n",
1644 						retval, len);
1645 				retval = -EBADMSG;
1646 			}
1647 			break;
1648 		}
1649 
1650 		/* read it back -- assuming nothing intervened!!  */
1651 		retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
1652 				0x5c, USB_DIR_IN|USB_TYPE_VENDOR,
1653 				0, 0, buf, len, USB_CTRL_GET_TIMEOUT);
1654 		if (retval != len) {
1655 			what = "read";
1656 			if (retval >= 0) {
1657 				ERROR(dev, "ctrl_out, rlen %d (expected %d)\n",
1658 						retval, len);
1659 				retval = -EBADMSG;
1660 			}
1661 			break;
1662 		}
1663 
1664 		/* fail if we can't verify */
1665 		for (j = 0; j < len; j++) {
1666 			if (buf[j] != (u8) (i + j)) {
1667 				ERROR(dev, "ctrl_out, byte %d is %d not %d\n",
1668 					j, buf[j], (u8) i + j);
1669 				retval = -EBADMSG;
1670 				break;
1671 			}
1672 		}
1673 		if (retval < 0) {
1674 			what = "verify";
1675 			break;
1676 		}
1677 
1678 		len += vary;
1679 
1680 		/* [real world] the "zero bytes IN" case isn't really used.
1681 		 * hardware can easily trip up in this weird case, since its
1682 		 * status stage is IN, not OUT like other ep0in transfers.
1683 		 */
1684 		if (len > length)
1685 			len = realworld ? 1 : 0;
1686 	}
1687 
1688 	if (retval < 0)
1689 		ERROR(dev, "ctrl_out %s failed, code %d, count %d\n",
1690 			what, retval, i);
1691 
1692 	kfree(buf - offset);
1693 	return retval;
1694 }
1695 
1696 /*-------------------------------------------------------------------------*/
1697 
1698 /* ISO tests ... mimics common usage
1699  *  - buffer length is split into N packets (mostly maxpacket sized)
1700  *  - multi-buffers according to sglen
1701  */
1702 
1703 struct iso_context {
1704 	unsigned		count;
1705 	unsigned		pending;
1706 	spinlock_t		lock;
1707 	struct completion	done;
1708 	int			submit_error;
1709 	unsigned long		errors;
1710 	unsigned long		packet_count;
1711 	struct usbtest_dev	*dev;
1712 };
1713 
1714 static void iso_callback(struct urb *urb)
1715 {
1716 	struct iso_context	*ctx = urb->context;
1717 
1718 	spin_lock(&ctx->lock);
1719 	ctx->count--;
1720 
1721 	ctx->packet_count += urb->number_of_packets;
1722 	if (urb->error_count > 0)
1723 		ctx->errors += urb->error_count;
1724 	else if (urb->status != 0)
1725 		ctx->errors += urb->number_of_packets;
1726 	else if (urb->actual_length != urb->transfer_buffer_length)
1727 		ctx->errors++;
1728 	else if (check_guard_bytes(ctx->dev, urb) != 0)
1729 		ctx->errors++;
1730 
1731 	if (urb->status == 0 && ctx->count > (ctx->pending - 1)
1732 			&& !ctx->submit_error) {
1733 		int status = usb_submit_urb(urb, GFP_ATOMIC);
1734 		switch (status) {
1735 		case 0:
1736 			goto done;
1737 		default:
1738 			dev_err(&ctx->dev->intf->dev,
1739 					"iso resubmit err %d\n",
1740 					status);
1741 			/* FALLTHROUGH */
1742 		case -ENODEV:			/* disconnected */
1743 		case -ESHUTDOWN:		/* endpoint disabled */
1744 			ctx->submit_error = 1;
1745 			break;
1746 		}
1747 	}
1748 
1749 	ctx->pending--;
1750 	if (ctx->pending == 0) {
1751 		if (ctx->errors)
1752 			dev_err(&ctx->dev->intf->dev,
1753 				"iso test, %lu errors out of %lu\n",
1754 				ctx->errors, ctx->packet_count);
1755 		complete(&ctx->done);
1756 	}
1757 done:
1758 	spin_unlock(&ctx->lock);
1759 }
1760 
1761 static struct urb *iso_alloc_urb(
1762 	struct usb_device	*udev,
1763 	int			pipe,
1764 	struct usb_endpoint_descriptor	*desc,
1765 	long			bytes,
1766 	unsigned offset
1767 )
1768 {
1769 	struct urb		*urb;
1770 	unsigned		i, maxp, packets;
1771 
1772 	if (bytes < 0 || !desc)
1773 		return NULL;
1774 	maxp = 0x7ff & usb_endpoint_maxp(desc);
1775 	maxp *= 1 + (0x3 & (usb_endpoint_maxp(desc) >> 11));
1776 	packets = DIV_ROUND_UP(bytes, maxp);
1777 
1778 	urb = usb_alloc_urb(packets, GFP_KERNEL);
1779 	if (!urb)
1780 		return urb;
1781 	urb->dev = udev;
1782 	urb->pipe = pipe;
1783 
1784 	urb->number_of_packets = packets;
1785 	urb->transfer_buffer_length = bytes;
1786 	urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
1787 							GFP_KERNEL,
1788 							&urb->transfer_dma);
1789 	if (!urb->transfer_buffer) {
1790 		usb_free_urb(urb);
1791 		return NULL;
1792 	}
1793 	if (offset) {
1794 		memset(urb->transfer_buffer, GUARD_BYTE, offset);
1795 		urb->transfer_buffer += offset;
1796 		urb->transfer_dma += offset;
1797 	}
1798 	/* For inbound transfers use guard byte so that test fails if
1799 		data not correctly copied */
1800 	memset(urb->transfer_buffer,
1801 			usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
1802 			bytes);
1803 
1804 	for (i = 0; i < packets; i++) {
1805 		/* here, only the last packet will be short */
1806 		urb->iso_frame_desc[i].length = min((unsigned) bytes, maxp);
1807 		bytes -= urb->iso_frame_desc[i].length;
1808 
1809 		urb->iso_frame_desc[i].offset = maxp * i;
1810 	}
1811 
1812 	urb->complete = iso_callback;
1813 	/* urb->context = SET BY CALLER */
1814 	urb->interval = 1 << (desc->bInterval - 1);
1815 	urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
1816 	return urb;
1817 }
1818 
1819 static int
1820 test_iso_queue(struct usbtest_dev *dev, struct usbtest_param *param,
1821 		int pipe, struct usb_endpoint_descriptor *desc, unsigned offset)
1822 {
1823 	struct iso_context	context;
1824 	struct usb_device	*udev;
1825 	unsigned		i;
1826 	unsigned long		packets = 0;
1827 	int			status = 0;
1828 	struct urb		*urbs[10];	/* FIXME no limit */
1829 
1830 	if (param->sglen > 10)
1831 		return -EDOM;
1832 
1833 	memset(&context, 0, sizeof(context));
1834 	context.count = param->iterations * param->sglen;
1835 	context.dev = dev;
1836 	init_completion(&context.done);
1837 	spin_lock_init(&context.lock);
1838 
1839 	memset(urbs, 0, sizeof(urbs));
1840 	udev = testdev_to_usbdev(dev);
1841 	dev_info(&dev->intf->dev,
1842 		"... iso period %d %sframes, wMaxPacket %04x\n",
1843 		1 << (desc->bInterval - 1),
1844 		(udev->speed == USB_SPEED_HIGH) ? "micro" : "",
1845 		usb_endpoint_maxp(desc));
1846 
1847 	for (i = 0; i < param->sglen; i++) {
1848 		urbs[i] = iso_alloc_urb(udev, pipe, desc,
1849 					param->length, offset);
1850 		if (!urbs[i]) {
1851 			status = -ENOMEM;
1852 			goto fail;
1853 		}
1854 		packets += urbs[i]->number_of_packets;
1855 		urbs[i]->context = &context;
1856 	}
1857 	packets *= param->iterations;
1858 	dev_info(&dev->intf->dev,
1859 		"... total %lu msec (%lu packets)\n",
1860 		(packets * (1 << (desc->bInterval - 1)))
1861 			/ ((udev->speed == USB_SPEED_HIGH) ? 8 : 1),
1862 		packets);
1863 
1864 	spin_lock_irq(&context.lock);
1865 	for (i = 0; i < param->sglen; i++) {
1866 		++context.pending;
1867 		status = usb_submit_urb(urbs[i], GFP_ATOMIC);
1868 		if (status < 0) {
1869 			ERROR(dev, "submit iso[%d], error %d\n", i, status);
1870 			if (i == 0) {
1871 				spin_unlock_irq(&context.lock);
1872 				goto fail;
1873 			}
1874 
1875 			simple_free_urb(urbs[i]);
1876 			urbs[i] = NULL;
1877 			context.pending--;
1878 			context.submit_error = 1;
1879 			break;
1880 		}
1881 	}
1882 	spin_unlock_irq(&context.lock);
1883 
1884 	wait_for_completion(&context.done);
1885 
1886 	for (i = 0; i < param->sglen; i++) {
1887 		if (urbs[i])
1888 			simple_free_urb(urbs[i]);
1889 	}
1890 	/*
1891 	 * Isochronous transfers are expected to fail sometimes.  As an
1892 	 * arbitrary limit, we will report an error if any submissions
1893 	 * fail or if the transfer failure rate is > 10%.
1894 	 */
1895 	if (status != 0)
1896 		;
1897 	else if (context.submit_error)
1898 		status = -EACCES;
1899 	else if (context.errors > context.packet_count / 10)
1900 		status = -EIO;
1901 	return status;
1902 
1903 fail:
1904 	for (i = 0; i < param->sglen; i++) {
1905 		if (urbs[i])
1906 			simple_free_urb(urbs[i]);
1907 	}
1908 	return status;
1909 }
1910 
1911 static int test_unaligned_bulk(
1912 	struct usbtest_dev *tdev,
1913 	int pipe,
1914 	unsigned length,
1915 	int iterations,
1916 	unsigned transfer_flags,
1917 	const char *label)
1918 {
1919 	int retval;
1920 	struct urb *urb = usbtest_alloc_urb(
1921 		testdev_to_usbdev(tdev), pipe, length, transfer_flags, 1);
1922 
1923 	if (!urb)
1924 		return -ENOMEM;
1925 
1926 	retval = simple_io(tdev, urb, iterations, 0, 0, label);
1927 	simple_free_urb(urb);
1928 	return retval;
1929 }
1930 
1931 /*-------------------------------------------------------------------------*/
1932 
1933 /* We only have this one interface to user space, through usbfs.
1934  * User mode code can scan usbfs to find N different devices (maybe on
1935  * different busses) to use when testing, and allocate one thread per
1936  * test.  So discovery is simplified, and we have no device naming issues.
1937  *
1938  * Don't use these only as stress/load tests.  Use them along with with
1939  * other USB bus activity:  plugging, unplugging, mousing, mp3 playback,
1940  * video capture, and so on.  Run different tests at different times, in
1941  * different sequences.  Nothing here should interact with other devices,
1942  * except indirectly by consuming USB bandwidth and CPU resources for test
1943  * threads and request completion.  But the only way to know that for sure
1944  * is to test when HC queues are in use by many devices.
1945  *
1946  * WARNING:  Because usbfs grabs udev->dev.sem before calling this ioctl(),
1947  * it locks out usbcore in certain code paths.  Notably, if you disconnect
1948  * the device-under-test, khubd will wait block forever waiting for the
1949  * ioctl to complete ... so that usb_disconnect() can abort the pending
1950  * urbs and then call usbtest_disconnect().  To abort a test, you're best
1951  * off just killing the userspace task and waiting for it to exit.
1952  */
1953 
1954 static int
1955 usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
1956 {
1957 	struct usbtest_dev	*dev = usb_get_intfdata(intf);
1958 	struct usb_device	*udev = testdev_to_usbdev(dev);
1959 	struct usbtest_param	*param = buf;
1960 	int			retval = -EOPNOTSUPP;
1961 	struct urb		*urb;
1962 	struct scatterlist	*sg;
1963 	struct usb_sg_request	req;
1964 	struct timeval		start;
1965 	unsigned		i;
1966 
1967 	/* FIXME USBDEVFS_CONNECTINFO doesn't say how fast the device is. */
1968 
1969 	pattern = mod_pattern;
1970 
1971 	if (code != USBTEST_REQUEST)
1972 		return -EOPNOTSUPP;
1973 
1974 	if (param->iterations <= 0)
1975 		return -EINVAL;
1976 
1977 	if (mutex_lock_interruptible(&dev->lock))
1978 		return -ERESTARTSYS;
1979 
1980 	/* FIXME: What if a system sleep starts while a test is running? */
1981 
1982 	/* some devices, like ez-usb default devices, need a non-default
1983 	 * altsetting to have any active endpoints.  some tests change
1984 	 * altsettings; force a default so most tests don't need to check.
1985 	 */
1986 	if (dev->info->alt >= 0) {
1987 		int	res;
1988 
1989 		if (intf->altsetting->desc.bInterfaceNumber) {
1990 			mutex_unlock(&dev->lock);
1991 			return -ENODEV;
1992 		}
1993 		res = set_altsetting(dev, dev->info->alt);
1994 		if (res) {
1995 			dev_err(&intf->dev,
1996 					"set altsetting to %d failed, %d\n",
1997 					dev->info->alt, res);
1998 			mutex_unlock(&dev->lock);
1999 			return res;
2000 		}
2001 	}
2002 
2003 	/*
2004 	 * Just a bunch of test cases that every HCD is expected to handle.
2005 	 *
2006 	 * Some may need specific firmware, though it'd be good to have
2007 	 * one firmware image to handle all the test cases.
2008 	 *
2009 	 * FIXME add more tests!  cancel requests, verify the data, control
2010 	 * queueing, concurrent read+write threads, and so on.
2011 	 */
2012 	do_gettimeofday(&start);
2013 	switch (param->test_num) {
2014 
2015 	case 0:
2016 		dev_info(&intf->dev, "TEST 0:  NOP\n");
2017 		retval = 0;
2018 		break;
2019 
2020 	/* Simple non-queued bulk I/O tests */
2021 	case 1:
2022 		if (dev->out_pipe == 0)
2023 			break;
2024 		dev_info(&intf->dev,
2025 				"TEST 1:  write %d bytes %u times\n",
2026 				param->length, param->iterations);
2027 		urb = simple_alloc_urb(udev, dev->out_pipe, param->length);
2028 		if (!urb) {
2029 			retval = -ENOMEM;
2030 			break;
2031 		}
2032 		/* FIRMWARE:  bulk sink (maybe accepts short writes) */
2033 		retval = simple_io(dev, urb, param->iterations, 0, 0, "test1");
2034 		simple_free_urb(urb);
2035 		break;
2036 	case 2:
2037 		if (dev->in_pipe == 0)
2038 			break;
2039 		dev_info(&intf->dev,
2040 				"TEST 2:  read %d bytes %u times\n",
2041 				param->length, param->iterations);
2042 		urb = simple_alloc_urb(udev, dev->in_pipe, param->length);
2043 		if (!urb) {
2044 			retval = -ENOMEM;
2045 			break;
2046 		}
2047 		/* FIRMWARE:  bulk source (maybe generates short writes) */
2048 		retval = simple_io(dev, urb, param->iterations, 0, 0, "test2");
2049 		simple_free_urb(urb);
2050 		break;
2051 	case 3:
2052 		if (dev->out_pipe == 0 || param->vary == 0)
2053 			break;
2054 		dev_info(&intf->dev,
2055 				"TEST 3:  write/%d 0..%d bytes %u times\n",
2056 				param->vary, param->length, param->iterations);
2057 		urb = simple_alloc_urb(udev, dev->out_pipe, param->length);
2058 		if (!urb) {
2059 			retval = -ENOMEM;
2060 			break;
2061 		}
2062 		/* FIRMWARE:  bulk sink (maybe accepts short writes) */
2063 		retval = simple_io(dev, urb, param->iterations, param->vary,
2064 					0, "test3");
2065 		simple_free_urb(urb);
2066 		break;
2067 	case 4:
2068 		if (dev->in_pipe == 0 || param->vary == 0)
2069 			break;
2070 		dev_info(&intf->dev,
2071 				"TEST 4:  read/%d 0..%d bytes %u times\n",
2072 				param->vary, param->length, param->iterations);
2073 		urb = simple_alloc_urb(udev, dev->in_pipe, param->length);
2074 		if (!urb) {
2075 			retval = -ENOMEM;
2076 			break;
2077 		}
2078 		/* FIRMWARE:  bulk source (maybe generates short writes) */
2079 		retval = simple_io(dev, urb, param->iterations, param->vary,
2080 					0, "test4");
2081 		simple_free_urb(urb);
2082 		break;
2083 
2084 	/* Queued bulk I/O tests */
2085 	case 5:
2086 		if (dev->out_pipe == 0 || param->sglen == 0)
2087 			break;
2088 		dev_info(&intf->dev,
2089 			"TEST 5:  write %d sglists %d entries of %d bytes\n",
2090 				param->iterations,
2091 				param->sglen, param->length);
2092 		sg = alloc_sglist(param->sglen, param->length, 0);
2093 		if (!sg) {
2094 			retval = -ENOMEM;
2095 			break;
2096 		}
2097 		/* FIRMWARE:  bulk sink (maybe accepts short writes) */
2098 		retval = perform_sglist(dev, param->iterations, dev->out_pipe,
2099 				&req, sg, param->sglen);
2100 		free_sglist(sg, param->sglen);
2101 		break;
2102 
2103 	case 6:
2104 		if (dev->in_pipe == 0 || param->sglen == 0)
2105 			break;
2106 		dev_info(&intf->dev,
2107 			"TEST 6:  read %d sglists %d entries of %d bytes\n",
2108 				param->iterations,
2109 				param->sglen, param->length);
2110 		sg = alloc_sglist(param->sglen, param->length, 0);
2111 		if (!sg) {
2112 			retval = -ENOMEM;
2113 			break;
2114 		}
2115 		/* FIRMWARE:  bulk source (maybe generates short writes) */
2116 		retval = perform_sglist(dev, param->iterations, dev->in_pipe,
2117 				&req, sg, param->sglen);
2118 		free_sglist(sg, param->sglen);
2119 		break;
2120 	case 7:
2121 		if (dev->out_pipe == 0 || param->sglen == 0 || param->vary == 0)
2122 			break;
2123 		dev_info(&intf->dev,
2124 			"TEST 7:  write/%d %d sglists %d entries 0..%d bytes\n",
2125 				param->vary, param->iterations,
2126 				param->sglen, param->length);
2127 		sg = alloc_sglist(param->sglen, param->length, param->vary);
2128 		if (!sg) {
2129 			retval = -ENOMEM;
2130 			break;
2131 		}
2132 		/* FIRMWARE:  bulk sink (maybe accepts short writes) */
2133 		retval = perform_sglist(dev, param->iterations, dev->out_pipe,
2134 				&req, sg, param->sglen);
2135 		free_sglist(sg, param->sglen);
2136 		break;
2137 	case 8:
2138 		if (dev->in_pipe == 0 || param->sglen == 0 || param->vary == 0)
2139 			break;
2140 		dev_info(&intf->dev,
2141 			"TEST 8:  read/%d %d sglists %d entries 0..%d bytes\n",
2142 				param->vary, param->iterations,
2143 				param->sglen, param->length);
2144 		sg = alloc_sglist(param->sglen, param->length, param->vary);
2145 		if (!sg) {
2146 			retval = -ENOMEM;
2147 			break;
2148 		}
2149 		/* FIRMWARE:  bulk source (maybe generates short writes) */
2150 		retval = perform_sglist(dev, param->iterations, dev->in_pipe,
2151 				&req, sg, param->sglen);
2152 		free_sglist(sg, param->sglen);
2153 		break;
2154 
2155 	/* non-queued sanity tests for control (chapter 9 subset) */
2156 	case 9:
2157 		retval = 0;
2158 		dev_info(&intf->dev,
2159 			"TEST 9:  ch9 (subset) control tests, %d times\n",
2160 				param->iterations);
2161 		for (i = param->iterations; retval == 0 && i--; /* NOP */)
2162 			retval = ch9_postconfig(dev);
2163 		if (retval)
2164 			dev_err(&intf->dev, "ch9 subset failed, "
2165 					"iterations left %d\n", i);
2166 		break;
2167 
2168 	/* queued control messaging */
2169 	case 10:
2170 		retval = 0;
2171 		dev_info(&intf->dev,
2172 				"TEST 10:  queue %d control calls, %d times\n",
2173 				param->sglen,
2174 				param->iterations);
2175 		retval = test_ctrl_queue(dev, param);
2176 		break;
2177 
2178 	/* simple non-queued unlinks (ring with one urb) */
2179 	case 11:
2180 		if (dev->in_pipe == 0 || !param->length)
2181 			break;
2182 		retval = 0;
2183 		dev_info(&intf->dev, "TEST 11:  unlink %d reads of %d\n",
2184 				param->iterations, param->length);
2185 		for (i = param->iterations; retval == 0 && i--; /* NOP */)
2186 			retval = unlink_simple(dev, dev->in_pipe,
2187 						param->length);
2188 		if (retval)
2189 			dev_err(&intf->dev, "unlink reads failed %d, "
2190 				"iterations left %d\n", retval, i);
2191 		break;
2192 	case 12:
2193 		if (dev->out_pipe == 0 || !param->length)
2194 			break;
2195 		retval = 0;
2196 		dev_info(&intf->dev, "TEST 12:  unlink %d writes of %d\n",
2197 				param->iterations, param->length);
2198 		for (i = param->iterations; retval == 0 && i--; /* NOP */)
2199 			retval = unlink_simple(dev, dev->out_pipe,
2200 						param->length);
2201 		if (retval)
2202 			dev_err(&intf->dev, "unlink writes failed %d, "
2203 				"iterations left %d\n", retval, i);
2204 		break;
2205 
2206 	/* ep halt tests */
2207 	case 13:
2208 		if (dev->out_pipe == 0 && dev->in_pipe == 0)
2209 			break;
2210 		retval = 0;
2211 		dev_info(&intf->dev, "TEST 13:  set/clear %d halts\n",
2212 				param->iterations);
2213 		for (i = param->iterations; retval == 0 && i--; /* NOP */)
2214 			retval = halt_simple(dev);
2215 
2216 		if (retval)
2217 			ERROR(dev, "halts failed, iterations left %d\n", i);
2218 		break;
2219 
2220 	/* control write tests */
2221 	case 14:
2222 		if (!dev->info->ctrl_out)
2223 			break;
2224 		dev_info(&intf->dev, "TEST 14:  %d ep0out, %d..%d vary %d\n",
2225 				param->iterations,
2226 				realworld ? 1 : 0, param->length,
2227 				param->vary);
2228 		retval = ctrl_out(dev, param->iterations,
2229 				param->length, param->vary, 0);
2230 		break;
2231 
2232 	/* iso write tests */
2233 	case 15:
2234 		if (dev->out_iso_pipe == 0 || param->sglen == 0)
2235 			break;
2236 		dev_info(&intf->dev,
2237 			"TEST 15:  write %d iso, %d entries of %d bytes\n",
2238 				param->iterations,
2239 				param->sglen, param->length);
2240 		/* FIRMWARE:  iso sink */
2241 		retval = test_iso_queue(dev, param,
2242 				dev->out_iso_pipe, dev->iso_out, 0);
2243 		break;
2244 
2245 	/* iso read tests */
2246 	case 16:
2247 		if (dev->in_iso_pipe == 0 || param->sglen == 0)
2248 			break;
2249 		dev_info(&intf->dev,
2250 			"TEST 16:  read %d iso, %d entries of %d bytes\n",
2251 				param->iterations,
2252 				param->sglen, param->length);
2253 		/* FIRMWARE:  iso source */
2254 		retval = test_iso_queue(dev, param,
2255 				dev->in_iso_pipe, dev->iso_in, 0);
2256 		break;
2257 
2258 	/* FIXME scatterlist cancel (needs helper thread) */
2259 
2260 	/* Tests for bulk I/O using DMA mapping by core and odd address */
2261 	case 17:
2262 		if (dev->out_pipe == 0)
2263 			break;
2264 		dev_info(&intf->dev,
2265 			"TEST 17:  write odd addr %d bytes %u times core map\n",
2266 			param->length, param->iterations);
2267 
2268 		retval = test_unaligned_bulk(
2269 				dev, dev->out_pipe,
2270 				param->length, param->iterations,
2271 				0, "test17");
2272 		break;
2273 
2274 	case 18:
2275 		if (dev->in_pipe == 0)
2276 			break;
2277 		dev_info(&intf->dev,
2278 			"TEST 18:  read odd addr %d bytes %u times core map\n",
2279 			param->length, param->iterations);
2280 
2281 		retval = test_unaligned_bulk(
2282 				dev, dev->in_pipe,
2283 				param->length, param->iterations,
2284 				0, "test18");
2285 		break;
2286 
2287 	/* Tests for bulk I/O using premapped coherent buffer and odd address */
2288 	case 19:
2289 		if (dev->out_pipe == 0)
2290 			break;
2291 		dev_info(&intf->dev,
2292 			"TEST 19:  write odd addr %d bytes %u times premapped\n",
2293 			param->length, param->iterations);
2294 
2295 		retval = test_unaligned_bulk(
2296 				dev, dev->out_pipe,
2297 				param->length, param->iterations,
2298 				URB_NO_TRANSFER_DMA_MAP, "test19");
2299 		break;
2300 
2301 	case 20:
2302 		if (dev->in_pipe == 0)
2303 			break;
2304 		dev_info(&intf->dev,
2305 			"TEST 20:  read odd addr %d bytes %u times premapped\n",
2306 			param->length, param->iterations);
2307 
2308 		retval = test_unaligned_bulk(
2309 				dev, dev->in_pipe,
2310 				param->length, param->iterations,
2311 				URB_NO_TRANSFER_DMA_MAP, "test20");
2312 		break;
2313 
2314 	/* control write tests with unaligned buffer */
2315 	case 21:
2316 		if (!dev->info->ctrl_out)
2317 			break;
2318 		dev_info(&intf->dev,
2319 				"TEST 21:  %d ep0out odd addr, %d..%d vary %d\n",
2320 				param->iterations,
2321 				realworld ? 1 : 0, param->length,
2322 				param->vary);
2323 		retval = ctrl_out(dev, param->iterations,
2324 				param->length, param->vary, 1);
2325 		break;
2326 
2327 	/* unaligned iso tests */
2328 	case 22:
2329 		if (dev->out_iso_pipe == 0 || param->sglen == 0)
2330 			break;
2331 		dev_info(&intf->dev,
2332 			"TEST 22:  write %d iso odd, %d entries of %d bytes\n",
2333 				param->iterations,
2334 				param->sglen, param->length);
2335 		retval = test_iso_queue(dev, param,
2336 				dev->out_iso_pipe, dev->iso_out, 1);
2337 		break;
2338 
2339 	case 23:
2340 		if (dev->in_iso_pipe == 0 || param->sglen == 0)
2341 			break;
2342 		dev_info(&intf->dev,
2343 			"TEST 23:  read %d iso odd, %d entries of %d bytes\n",
2344 				param->iterations,
2345 				param->sglen, param->length);
2346 		retval = test_iso_queue(dev, param,
2347 				dev->in_iso_pipe, dev->iso_in, 1);
2348 		break;
2349 
2350 	/* unlink URBs from a bulk-OUT queue */
2351 	case 24:
2352 		if (dev->out_pipe == 0 || !param->length || param->sglen < 4)
2353 			break;
2354 		retval = 0;
2355 		dev_info(&intf->dev, "TEST 24:  unlink from %d queues of "
2356 				"%d %d-byte writes\n",
2357 				param->iterations, param->sglen, param->length);
2358 		for (i = param->iterations; retval == 0 && i > 0; --i) {
2359 			retval = unlink_queued(dev, dev->out_pipe,
2360 						param->sglen, param->length);
2361 			if (retval) {
2362 				dev_err(&intf->dev,
2363 					"unlink queued writes failed %d, "
2364 					"iterations left %d\n", retval, i);
2365 				break;
2366 			}
2367 		}
2368 		break;
2369 
2370 	}
2371 	do_gettimeofday(&param->duration);
2372 	param->duration.tv_sec -= start.tv_sec;
2373 	param->duration.tv_usec -= start.tv_usec;
2374 	if (param->duration.tv_usec < 0) {
2375 		param->duration.tv_usec += 1000 * 1000;
2376 		param->duration.tv_sec -= 1;
2377 	}
2378 	mutex_unlock(&dev->lock);
2379 	return retval;
2380 }
2381 
2382 /*-------------------------------------------------------------------------*/
2383 
2384 static unsigned force_interrupt;
2385 module_param(force_interrupt, uint, 0);
2386 MODULE_PARM_DESC(force_interrupt, "0 = test default; else interrupt");
2387 
2388 #ifdef	GENERIC
2389 static unsigned short vendor;
2390 module_param(vendor, ushort, 0);
2391 MODULE_PARM_DESC(vendor, "vendor code (from usb-if)");
2392 
2393 static unsigned short product;
2394 module_param(product, ushort, 0);
2395 MODULE_PARM_DESC(product, "product code (from vendor)");
2396 #endif
2397 
2398 static int
2399 usbtest_probe(struct usb_interface *intf, const struct usb_device_id *id)
2400 {
2401 	struct usb_device	*udev;
2402 	struct usbtest_dev	*dev;
2403 	struct usbtest_info	*info;
2404 	char			*rtest, *wtest;
2405 	char			*irtest, *iwtest;
2406 
2407 	udev = interface_to_usbdev(intf);
2408 
2409 #ifdef	GENERIC
2410 	/* specify devices by module parameters? */
2411 	if (id->match_flags == 0) {
2412 		/* vendor match required, product match optional */
2413 		if (!vendor || le16_to_cpu(udev->descriptor.idVendor) != (u16)vendor)
2414 			return -ENODEV;
2415 		if (product && le16_to_cpu(udev->descriptor.idProduct) != (u16)product)
2416 			return -ENODEV;
2417 		dev_info(&intf->dev, "matched module params, "
2418 					"vend=0x%04x prod=0x%04x\n",
2419 				le16_to_cpu(udev->descriptor.idVendor),
2420 				le16_to_cpu(udev->descriptor.idProduct));
2421 	}
2422 #endif
2423 
2424 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2425 	if (!dev)
2426 		return -ENOMEM;
2427 	info = (struct usbtest_info *) id->driver_info;
2428 	dev->info = info;
2429 	mutex_init(&dev->lock);
2430 
2431 	dev->intf = intf;
2432 
2433 	/* cacheline-aligned scratch for i/o */
2434 	dev->buf = kmalloc(TBUF_SIZE, GFP_KERNEL);
2435 	if (dev->buf == NULL) {
2436 		kfree(dev);
2437 		return -ENOMEM;
2438 	}
2439 
2440 	/* NOTE this doesn't yet test the handful of difference that are
2441 	 * visible with high speed interrupts:  bigger maxpacket (1K) and
2442 	 * "high bandwidth" modes (up to 3 packets/uframe).
2443 	 */
2444 	rtest = wtest = "";
2445 	irtest = iwtest = "";
2446 	if (force_interrupt || udev->speed == USB_SPEED_LOW) {
2447 		if (info->ep_in) {
2448 			dev->in_pipe = usb_rcvintpipe(udev, info->ep_in);
2449 			rtest = " intr-in";
2450 		}
2451 		if (info->ep_out) {
2452 			dev->out_pipe = usb_sndintpipe(udev, info->ep_out);
2453 			wtest = " intr-out";
2454 		}
2455 	} else {
2456 		if (override_alt >= 0 || info->autoconf) {
2457 			int status;
2458 
2459 			status = get_endpoints(dev, intf);
2460 			if (status < 0) {
2461 				WARNING(dev, "couldn't get endpoints, %d\n",
2462 						status);
2463 				kfree(dev->buf);
2464 				kfree(dev);
2465 				return status;
2466 			}
2467 			/* may find bulk or ISO pipes */
2468 		} else {
2469 			if (info->ep_in)
2470 				dev->in_pipe = usb_rcvbulkpipe(udev,
2471 							info->ep_in);
2472 			if (info->ep_out)
2473 				dev->out_pipe = usb_sndbulkpipe(udev,
2474 							info->ep_out);
2475 		}
2476 		if (dev->in_pipe)
2477 			rtest = " bulk-in";
2478 		if (dev->out_pipe)
2479 			wtest = " bulk-out";
2480 		if (dev->in_iso_pipe)
2481 			irtest = " iso-in";
2482 		if (dev->out_iso_pipe)
2483 			iwtest = " iso-out";
2484 	}
2485 
2486 	usb_set_intfdata(intf, dev);
2487 	dev_info(&intf->dev, "%s\n", info->name);
2488 	dev_info(&intf->dev, "%s {control%s%s%s%s%s} tests%s\n",
2489 			usb_speed_string(udev->speed),
2490 			info->ctrl_out ? " in/out" : "",
2491 			rtest, wtest,
2492 			irtest, iwtest,
2493 			info->alt >= 0 ? " (+alt)" : "");
2494 	return 0;
2495 }
2496 
2497 static int usbtest_suspend(struct usb_interface *intf, pm_message_t message)
2498 {
2499 	return 0;
2500 }
2501 
2502 static int usbtest_resume(struct usb_interface *intf)
2503 {
2504 	return 0;
2505 }
2506 
2507 
2508 static void usbtest_disconnect(struct usb_interface *intf)
2509 {
2510 	struct usbtest_dev	*dev = usb_get_intfdata(intf);
2511 
2512 	usb_set_intfdata(intf, NULL);
2513 	dev_dbg(&intf->dev, "disconnect\n");
2514 	kfree(dev);
2515 }
2516 
2517 /* Basic testing only needs a device that can source or sink bulk traffic.
2518  * Any device can test control transfers (default with GENERIC binding).
2519  *
2520  * Several entries work with the default EP0 implementation that's built
2521  * into EZ-USB chips.  There's a default vendor ID which can be overridden
2522  * by (very) small config EEPROMS, but otherwise all these devices act
2523  * identically until firmware is loaded:  only EP0 works.  It turns out
2524  * to be easy to make other endpoints work, without modifying that EP0
2525  * behavior.  For now, we expect that kind of firmware.
2526  */
2527 
2528 /* an21xx or fx versions of ez-usb */
2529 static struct usbtest_info ez1_info = {
2530 	.name		= "EZ-USB device",
2531 	.ep_in		= 2,
2532 	.ep_out		= 2,
2533 	.alt		= 1,
2534 };
2535 
2536 /* fx2 version of ez-usb */
2537 static struct usbtest_info ez2_info = {
2538 	.name		= "FX2 device",
2539 	.ep_in		= 6,
2540 	.ep_out		= 2,
2541 	.alt		= 1,
2542 };
2543 
2544 /* ezusb family device with dedicated usb test firmware,
2545  */
2546 static struct usbtest_info fw_info = {
2547 	.name		= "usb test device",
2548 	.ep_in		= 2,
2549 	.ep_out		= 2,
2550 	.alt		= 1,
2551 	.autoconf	= 1,		/* iso and ctrl_out need autoconf */
2552 	.ctrl_out	= 1,
2553 	.iso		= 1,		/* iso_ep's are #8 in/out */
2554 };
2555 
2556 /* peripheral running Linux and 'zero.c' test firmware, or
2557  * its user-mode cousin. different versions of this use
2558  * different hardware with the same vendor/product codes.
2559  * host side MUST rely on the endpoint descriptors.
2560  */
2561 static struct usbtest_info gz_info = {
2562 	.name		= "Linux gadget zero",
2563 	.autoconf	= 1,
2564 	.ctrl_out	= 1,
2565 	.iso		= 1,
2566 	.alt		= 0,
2567 };
2568 
2569 static struct usbtest_info um_info = {
2570 	.name		= "Linux user mode test driver",
2571 	.autoconf	= 1,
2572 	.alt		= -1,
2573 };
2574 
2575 static struct usbtest_info um2_info = {
2576 	.name		= "Linux user mode ISO test driver",
2577 	.autoconf	= 1,
2578 	.iso		= 1,
2579 	.alt		= -1,
2580 };
2581 
2582 #ifdef IBOT2
2583 /* this is a nice source of high speed bulk data;
2584  * uses an FX2, with firmware provided in the device
2585  */
2586 static struct usbtest_info ibot2_info = {
2587 	.name		= "iBOT2 webcam",
2588 	.ep_in		= 2,
2589 	.alt		= -1,
2590 };
2591 #endif
2592 
2593 #ifdef GENERIC
2594 /* we can use any device to test control traffic */
2595 static struct usbtest_info generic_info = {
2596 	.name		= "Generic USB device",
2597 	.alt		= -1,
2598 };
2599 #endif
2600 
2601 
2602 static const struct usb_device_id id_table[] = {
2603 
2604 	/*-------------------------------------------------------------*/
2605 
2606 	/* EZ-USB devices which download firmware to replace (or in our
2607 	 * case augment) the default device implementation.
2608 	 */
2609 
2610 	/* generic EZ-USB FX controller */
2611 	{ USB_DEVICE(0x0547, 0x2235),
2612 		.driver_info = (unsigned long) &ez1_info,
2613 	},
2614 
2615 	/* CY3671 development board with EZ-USB FX */
2616 	{ USB_DEVICE(0x0547, 0x0080),
2617 		.driver_info = (unsigned long) &ez1_info,
2618 	},
2619 
2620 	/* generic EZ-USB FX2 controller (or development board) */
2621 	{ USB_DEVICE(0x04b4, 0x8613),
2622 		.driver_info = (unsigned long) &ez2_info,
2623 	},
2624 
2625 	/* re-enumerated usb test device firmware */
2626 	{ USB_DEVICE(0xfff0, 0xfff0),
2627 		.driver_info = (unsigned long) &fw_info,
2628 	},
2629 
2630 	/* "Gadget Zero" firmware runs under Linux */
2631 	{ USB_DEVICE(0x0525, 0xa4a0),
2632 		.driver_info = (unsigned long) &gz_info,
2633 	},
2634 
2635 	/* so does a user-mode variant */
2636 	{ USB_DEVICE(0x0525, 0xa4a4),
2637 		.driver_info = (unsigned long) &um_info,
2638 	},
2639 
2640 	/* ... and a user-mode variant that talks iso */
2641 	{ USB_DEVICE(0x0525, 0xa4a3),
2642 		.driver_info = (unsigned long) &um2_info,
2643 	},
2644 
2645 #ifdef KEYSPAN_19Qi
2646 	/* Keyspan 19qi uses an21xx (original EZ-USB) */
2647 	/* this does not coexist with the real Keyspan 19qi driver! */
2648 	{ USB_DEVICE(0x06cd, 0x010b),
2649 		.driver_info = (unsigned long) &ez1_info,
2650 	},
2651 #endif
2652 
2653 	/*-------------------------------------------------------------*/
2654 
2655 #ifdef IBOT2
2656 	/* iBOT2 makes a nice source of high speed bulk-in data */
2657 	/* this does not coexist with a real iBOT2 driver! */
2658 	{ USB_DEVICE(0x0b62, 0x0059),
2659 		.driver_info = (unsigned long) &ibot2_info,
2660 	},
2661 #endif
2662 
2663 	/*-------------------------------------------------------------*/
2664 
2665 #ifdef GENERIC
2666 	/* module params can specify devices to use for control tests */
2667 	{ .driver_info = (unsigned long) &generic_info, },
2668 #endif
2669 
2670 	/*-------------------------------------------------------------*/
2671 
2672 	{ }
2673 };
2674 MODULE_DEVICE_TABLE(usb, id_table);
2675 
2676 static struct usb_driver usbtest_driver = {
2677 	.name =		"usbtest",
2678 	.id_table =	id_table,
2679 	.probe =	usbtest_probe,
2680 	.unlocked_ioctl = usbtest_ioctl,
2681 	.disconnect =	usbtest_disconnect,
2682 	.suspend =	usbtest_suspend,
2683 	.resume =	usbtest_resume,
2684 };
2685 
2686 /*-------------------------------------------------------------------------*/
2687 
2688 static int __init usbtest_init(void)
2689 {
2690 #ifdef GENERIC
2691 	if (vendor)
2692 		pr_debug("params: vend=0x%04x prod=0x%04x\n", vendor, product);
2693 #endif
2694 	return usb_register(&usbtest_driver);
2695 }
2696 module_init(usbtest_init);
2697 
2698 static void __exit usbtest_exit(void)
2699 {
2700 	usb_deregister(&usbtest_driver);
2701 }
2702 module_exit(usbtest_exit);
2703 
2704 MODULE_DESCRIPTION("USB Core/HCD Testing Driver");
2705 MODULE_LICENSE("GPL");
2706 
2707