xref: /openbmc/linux/drivers/usb/misc/usbtest.c (revision ce932d0c5589e9766e089c22c66890dfc48fbd94)
1 #include <linux/kernel.h>
2 #include <linux/errno.h>
3 #include <linux/init.h>
4 #include <linux/slab.h>
5 #include <linux/mm.h>
6 #include <linux/module.h>
7 #include <linux/moduleparam.h>
8 #include <linux/scatterlist.h>
9 #include <linux/mutex.h>
10 
11 #include <linux/usb.h>
12 
13 
14 /*-------------------------------------------------------------------------*/
15 
16 /* FIXME make these public somewhere; usbdevfs.h? */
17 struct usbtest_param {
18 	/* inputs */
19 	unsigned		test_num;	/* 0..(TEST_CASES-1) */
20 	unsigned		iterations;
21 	unsigned		length;
22 	unsigned		vary;
23 	unsigned		sglen;
24 
25 	/* outputs */
26 	struct timeval		duration;
27 };
28 #define USBTEST_REQUEST	_IOWR('U', 100, struct usbtest_param)
29 
30 /*-------------------------------------------------------------------------*/
31 
32 #define	GENERIC		/* let probe() bind using module params */
33 
34 /* Some devices that can be used for testing will have "real" drivers.
35  * Entries for those need to be enabled here by hand, after disabling
36  * that "real" driver.
37  */
38 //#define	IBOT2		/* grab iBOT2 webcams */
39 //#define	KEYSPAN_19Qi	/* grab un-renumerated serial adapter */
40 
41 /*-------------------------------------------------------------------------*/
42 
43 struct usbtest_info {
44 	const char		*name;
45 	u8			ep_in;		/* bulk/intr source */
46 	u8			ep_out;		/* bulk/intr sink */
47 	unsigned		autoconf:1;
48 	unsigned		ctrl_out:1;
49 	unsigned		iso:1;		/* try iso in/out */
50 	int			alt;
51 };
52 
53 /* this is accessed only through usbfs ioctl calls.
54  * one ioctl to issue a test ... one lock per device.
55  * tests create other threads if they need them.
56  * urbs and buffers are allocated dynamically,
57  * and data generated deterministically.
58  */
59 struct usbtest_dev {
60 	struct usb_interface	*intf;
61 	struct usbtest_info	*info;
62 	int			in_pipe;
63 	int			out_pipe;
64 	int			in_iso_pipe;
65 	int			out_iso_pipe;
66 	struct usb_endpoint_descriptor	*iso_in, *iso_out;
67 	struct mutex		lock;
68 
69 #define TBUF_SIZE	256
70 	u8			*buf;
71 };
72 
73 static struct usb_device *testdev_to_usbdev(struct usbtest_dev *test)
74 {
75 	return interface_to_usbdev(test->intf);
76 }
77 
78 /* set up all urbs so they can be used with either bulk or interrupt */
79 #define	INTERRUPT_RATE		1	/* msec/transfer */
80 
81 #define ERROR(tdev, fmt, args...) \
82 	dev_err(&(tdev)->intf->dev , fmt , ## args)
83 #define WARNING(tdev, fmt, args...) \
84 	dev_warn(&(tdev)->intf->dev , fmt , ## args)
85 
86 #define GUARD_BYTE	0xA5
87 
88 /*-------------------------------------------------------------------------*/
89 
90 static int
91 get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf)
92 {
93 	int				tmp;
94 	struct usb_host_interface	*alt;
95 	struct usb_host_endpoint	*in, *out;
96 	struct usb_host_endpoint	*iso_in, *iso_out;
97 	struct usb_device		*udev;
98 
99 	for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
100 		unsigned	ep;
101 
102 		in = out = NULL;
103 		iso_in = iso_out = NULL;
104 		alt = intf->altsetting + tmp;
105 
106 		/* take the first altsetting with in-bulk + out-bulk;
107 		 * ignore other endpoints and altsettings.
108 		 */
109 		for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
110 			struct usb_host_endpoint	*e;
111 
112 			e = alt->endpoint + ep;
113 			switch (e->desc.bmAttributes) {
114 			case USB_ENDPOINT_XFER_BULK:
115 				break;
116 			case USB_ENDPOINT_XFER_ISOC:
117 				if (dev->info->iso)
118 					goto try_iso;
119 				/* FALLTHROUGH */
120 			default:
121 				continue;
122 			}
123 			if (usb_endpoint_dir_in(&e->desc)) {
124 				if (!in)
125 					in = e;
126 			} else {
127 				if (!out)
128 					out = e;
129 			}
130 			continue;
131 try_iso:
132 			if (usb_endpoint_dir_in(&e->desc)) {
133 				if (!iso_in)
134 					iso_in = e;
135 			} else {
136 				if (!iso_out)
137 					iso_out = e;
138 			}
139 		}
140 		if ((in && out)  ||  iso_in || iso_out)
141 			goto found;
142 	}
143 	return -EINVAL;
144 
145 found:
146 	udev = testdev_to_usbdev(dev);
147 	if (alt->desc.bAlternateSetting != 0) {
148 		tmp = usb_set_interface(udev,
149 				alt->desc.bInterfaceNumber,
150 				alt->desc.bAlternateSetting);
151 		if (tmp < 0)
152 			return tmp;
153 	}
154 
155 	if (in) {
156 		dev->in_pipe = usb_rcvbulkpipe(udev,
157 			in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
158 		dev->out_pipe = usb_sndbulkpipe(udev,
159 			out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
160 	}
161 	if (iso_in) {
162 		dev->iso_in = &iso_in->desc;
163 		dev->in_iso_pipe = usb_rcvisocpipe(udev,
164 				iso_in->desc.bEndpointAddress
165 					& USB_ENDPOINT_NUMBER_MASK);
166 	}
167 
168 	if (iso_out) {
169 		dev->iso_out = &iso_out->desc;
170 		dev->out_iso_pipe = usb_sndisocpipe(udev,
171 				iso_out->desc.bEndpointAddress
172 					& USB_ENDPOINT_NUMBER_MASK);
173 	}
174 	return 0;
175 }
176 
177 /*-------------------------------------------------------------------------*/
178 
179 /* Support for testing basic non-queued I/O streams.
180  *
181  * These just package urbs as requests that can be easily canceled.
182  * Each urb's data buffer is dynamically allocated; callers can fill
183  * them with non-zero test data (or test for it) when appropriate.
184  */
185 
186 static void simple_callback(struct urb *urb)
187 {
188 	complete(urb->context);
189 }
190 
191 static struct urb *usbtest_alloc_urb(
192 	struct usb_device	*udev,
193 	int			pipe,
194 	unsigned long		bytes,
195 	unsigned		transfer_flags,
196 	unsigned		offset)
197 {
198 	struct urb		*urb;
199 
200 	urb = usb_alloc_urb(0, GFP_KERNEL);
201 	if (!urb)
202 		return urb;
203 	usb_fill_bulk_urb(urb, udev, pipe, NULL, bytes, simple_callback, NULL);
204 	urb->interval = (udev->speed == USB_SPEED_HIGH)
205 			? (INTERRUPT_RATE << 3)
206 			: INTERRUPT_RATE;
207 	urb->transfer_flags = transfer_flags;
208 	if (usb_pipein(pipe))
209 		urb->transfer_flags |= URB_SHORT_NOT_OK;
210 
211 	if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
212 		urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
213 			GFP_KERNEL, &urb->transfer_dma);
214 	else
215 		urb->transfer_buffer = kmalloc(bytes + offset, GFP_KERNEL);
216 
217 	if (!urb->transfer_buffer) {
218 		usb_free_urb(urb);
219 		return NULL;
220 	}
221 
222 	/* To test unaligned transfers add an offset and fill the
223 		unused memory with a guard value */
224 	if (offset) {
225 		memset(urb->transfer_buffer, GUARD_BYTE, offset);
226 		urb->transfer_buffer += offset;
227 		if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
228 			urb->transfer_dma += offset;
229 	}
230 
231 	/* For inbound transfers use guard byte so that test fails if
232 		data not correctly copied */
233 	memset(urb->transfer_buffer,
234 			usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
235 			bytes);
236 	return urb;
237 }
238 
239 static struct urb *simple_alloc_urb(
240 	struct usb_device	*udev,
241 	int			pipe,
242 	unsigned long		bytes)
243 {
244 	return usbtest_alloc_urb(udev, pipe, bytes, URB_NO_TRANSFER_DMA_MAP, 0);
245 }
246 
247 static unsigned pattern;
248 static unsigned mod_pattern;
249 module_param_named(pattern, mod_pattern, uint, S_IRUGO | S_IWUSR);
250 MODULE_PARM_DESC(mod_pattern, "i/o pattern (0 == zeroes)");
251 
252 static inline void simple_fill_buf(struct urb *urb)
253 {
254 	unsigned	i;
255 	u8		*buf = urb->transfer_buffer;
256 	unsigned	len = urb->transfer_buffer_length;
257 
258 	switch (pattern) {
259 	default:
260 		/* FALLTHROUGH */
261 	case 0:
262 		memset(buf, 0, len);
263 		break;
264 	case 1:			/* mod63 */
265 		for (i = 0; i < len; i++)
266 			*buf++ = (u8) (i % 63);
267 		break;
268 	}
269 }
270 
271 static inline unsigned long buffer_offset(void *buf)
272 {
273 	return (unsigned long)buf & (ARCH_KMALLOC_MINALIGN - 1);
274 }
275 
276 static int check_guard_bytes(struct usbtest_dev *tdev, struct urb *urb)
277 {
278 	u8 *buf = urb->transfer_buffer;
279 	u8 *guard = buf - buffer_offset(buf);
280 	unsigned i;
281 
282 	for (i = 0; guard < buf; i++, guard++) {
283 		if (*guard != GUARD_BYTE) {
284 			ERROR(tdev, "guard byte[%d] %d (not %d)\n",
285 				i, *guard, GUARD_BYTE);
286 			return -EINVAL;
287 		}
288 	}
289 	return 0;
290 }
291 
292 static int simple_check_buf(struct usbtest_dev *tdev, struct urb *urb)
293 {
294 	unsigned	i;
295 	u8		expected;
296 	u8		*buf = urb->transfer_buffer;
297 	unsigned	len = urb->actual_length;
298 
299 	int ret = check_guard_bytes(tdev, urb);
300 	if (ret)
301 		return ret;
302 
303 	for (i = 0; i < len; i++, buf++) {
304 		switch (pattern) {
305 		/* all-zeroes has no synchronization issues */
306 		case 0:
307 			expected = 0;
308 			break;
309 		/* mod63 stays in sync with short-terminated transfers,
310 		 * or otherwise when host and gadget agree on how large
311 		 * each usb transfer request should be.  resync is done
312 		 * with set_interface or set_config.
313 		 */
314 		case 1:			/* mod63 */
315 			expected = i % 63;
316 			break;
317 		/* always fail unsupported patterns */
318 		default:
319 			expected = !*buf;
320 			break;
321 		}
322 		if (*buf == expected)
323 			continue;
324 		ERROR(tdev, "buf[%d] = %d (not %d)\n", i, *buf, expected);
325 		return -EINVAL;
326 	}
327 	return 0;
328 }
329 
330 static void simple_free_urb(struct urb *urb)
331 {
332 	unsigned long offset = buffer_offset(urb->transfer_buffer);
333 
334 	if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
335 		usb_free_coherent(
336 			urb->dev,
337 			urb->transfer_buffer_length + offset,
338 			urb->transfer_buffer - offset,
339 			urb->transfer_dma - offset);
340 	else
341 		kfree(urb->transfer_buffer - offset);
342 	usb_free_urb(urb);
343 }
344 
345 static int simple_io(
346 	struct usbtest_dev	*tdev,
347 	struct urb		*urb,
348 	int			iterations,
349 	int			vary,
350 	int			expected,
351 	const char		*label
352 )
353 {
354 	struct usb_device	*udev = urb->dev;
355 	int			max = urb->transfer_buffer_length;
356 	struct completion	completion;
357 	int			retval = 0;
358 
359 	urb->context = &completion;
360 	while (retval == 0 && iterations-- > 0) {
361 		init_completion(&completion);
362 		if (usb_pipeout(urb->pipe)) {
363 			simple_fill_buf(urb);
364 			urb->transfer_flags |= URB_ZERO_PACKET;
365 		}
366 		retval = usb_submit_urb(urb, GFP_KERNEL);
367 		if (retval != 0)
368 			break;
369 
370 		/* NOTE:  no timeouts; can't be broken out of by interrupt */
371 		wait_for_completion(&completion);
372 		retval = urb->status;
373 		urb->dev = udev;
374 		if (retval == 0 && usb_pipein(urb->pipe))
375 			retval = simple_check_buf(tdev, urb);
376 
377 		if (vary) {
378 			int	len = urb->transfer_buffer_length;
379 
380 			len += vary;
381 			len %= max;
382 			if (len == 0)
383 				len = (vary < max) ? vary : max;
384 			urb->transfer_buffer_length = len;
385 		}
386 
387 		/* FIXME if endpoint halted, clear halt (and log) */
388 	}
389 	urb->transfer_buffer_length = max;
390 
391 	if (expected != retval)
392 		dev_err(&udev->dev,
393 			"%s failed, iterations left %d, status %d (not %d)\n",
394 				label, iterations, retval, expected);
395 	return retval;
396 }
397 
398 
399 /*-------------------------------------------------------------------------*/
400 
401 /* We use scatterlist primitives to test queued I/O.
402  * Yes, this also tests the scatterlist primitives.
403  */
404 
405 static void free_sglist(struct scatterlist *sg, int nents)
406 {
407 	unsigned		i;
408 
409 	if (!sg)
410 		return;
411 	for (i = 0; i < nents; i++) {
412 		if (!sg_page(&sg[i]))
413 			continue;
414 		kfree(sg_virt(&sg[i]));
415 	}
416 	kfree(sg);
417 }
418 
419 static struct scatterlist *
420 alloc_sglist(int nents, int max, int vary)
421 {
422 	struct scatterlist	*sg;
423 	unsigned		i;
424 	unsigned		size = max;
425 
426 	sg = kmalloc_array(nents, sizeof *sg, GFP_KERNEL);
427 	if (!sg)
428 		return NULL;
429 	sg_init_table(sg, nents);
430 
431 	for (i = 0; i < nents; i++) {
432 		char		*buf;
433 		unsigned	j;
434 
435 		buf = kzalloc(size, GFP_KERNEL);
436 		if (!buf) {
437 			free_sglist(sg, i);
438 			return NULL;
439 		}
440 
441 		/* kmalloc pages are always physically contiguous! */
442 		sg_set_buf(&sg[i], buf, size);
443 
444 		switch (pattern) {
445 		case 0:
446 			/* already zeroed */
447 			break;
448 		case 1:
449 			for (j = 0; j < size; j++)
450 				*buf++ = (u8) (j % 63);
451 			break;
452 		}
453 
454 		if (vary) {
455 			size += vary;
456 			size %= max;
457 			if (size == 0)
458 				size = (vary < max) ? vary : max;
459 		}
460 	}
461 
462 	return sg;
463 }
464 
465 static int perform_sglist(
466 	struct usbtest_dev	*tdev,
467 	unsigned		iterations,
468 	int			pipe,
469 	struct usb_sg_request	*req,
470 	struct scatterlist	*sg,
471 	int			nents
472 )
473 {
474 	struct usb_device	*udev = testdev_to_usbdev(tdev);
475 	int			retval = 0;
476 
477 	while (retval == 0 && iterations-- > 0) {
478 		retval = usb_sg_init(req, udev, pipe,
479 				(udev->speed == USB_SPEED_HIGH)
480 					? (INTERRUPT_RATE << 3)
481 					: INTERRUPT_RATE,
482 				sg, nents, 0, GFP_KERNEL);
483 
484 		if (retval)
485 			break;
486 		usb_sg_wait(req);
487 		retval = req->status;
488 
489 		/* FIXME check resulting data pattern */
490 
491 		/* FIXME if endpoint halted, clear halt (and log) */
492 	}
493 
494 	/* FIXME for unlink or fault handling tests, don't report
495 	 * failure if retval is as we expected ...
496 	 */
497 	if (retval)
498 		ERROR(tdev, "perform_sglist failed, "
499 				"iterations left %d, status %d\n",
500 				iterations, retval);
501 	return retval;
502 }
503 
504 
505 /*-------------------------------------------------------------------------*/
506 
507 /* unqueued control message testing
508  *
509  * there's a nice set of device functional requirements in chapter 9 of the
510  * usb 2.0 spec, which we can apply to ANY device, even ones that don't use
511  * special test firmware.
512  *
513  * we know the device is configured (or suspended) by the time it's visible
514  * through usbfs.  we can't change that, so we won't test enumeration (which
515  * worked 'well enough' to get here, this time), power management (ditto),
516  * or remote wakeup (which needs human interaction).
517  */
518 
519 static unsigned realworld = 1;
520 module_param(realworld, uint, 0);
521 MODULE_PARM_DESC(realworld, "clear to demand stricter spec compliance");
522 
523 static int get_altsetting(struct usbtest_dev *dev)
524 {
525 	struct usb_interface	*iface = dev->intf;
526 	struct usb_device	*udev = interface_to_usbdev(iface);
527 	int			retval;
528 
529 	retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
530 			USB_REQ_GET_INTERFACE, USB_DIR_IN|USB_RECIP_INTERFACE,
531 			0, iface->altsetting[0].desc.bInterfaceNumber,
532 			dev->buf, 1, USB_CTRL_GET_TIMEOUT);
533 	switch (retval) {
534 	case 1:
535 		return dev->buf[0];
536 	case 0:
537 		retval = -ERANGE;
538 		/* FALLTHROUGH */
539 	default:
540 		return retval;
541 	}
542 }
543 
544 static int set_altsetting(struct usbtest_dev *dev, int alternate)
545 {
546 	struct usb_interface		*iface = dev->intf;
547 	struct usb_device		*udev;
548 
549 	if (alternate < 0 || alternate >= 256)
550 		return -EINVAL;
551 
552 	udev = interface_to_usbdev(iface);
553 	return usb_set_interface(udev,
554 			iface->altsetting[0].desc.bInterfaceNumber,
555 			alternate);
556 }
557 
558 static int is_good_config(struct usbtest_dev *tdev, int len)
559 {
560 	struct usb_config_descriptor	*config;
561 
562 	if (len < sizeof *config)
563 		return 0;
564 	config = (struct usb_config_descriptor *) tdev->buf;
565 
566 	switch (config->bDescriptorType) {
567 	case USB_DT_CONFIG:
568 	case USB_DT_OTHER_SPEED_CONFIG:
569 		if (config->bLength != 9) {
570 			ERROR(tdev, "bogus config descriptor length\n");
571 			return 0;
572 		}
573 		/* this bit 'must be 1' but often isn't */
574 		if (!realworld && !(config->bmAttributes & 0x80)) {
575 			ERROR(tdev, "high bit of config attributes not set\n");
576 			return 0;
577 		}
578 		if (config->bmAttributes & 0x1f) {	/* reserved == 0 */
579 			ERROR(tdev, "reserved config bits set\n");
580 			return 0;
581 		}
582 		break;
583 	default:
584 		return 0;
585 	}
586 
587 	if (le16_to_cpu(config->wTotalLength) == len)	/* read it all */
588 		return 1;
589 	if (le16_to_cpu(config->wTotalLength) >= TBUF_SIZE)	/* max partial read */
590 		return 1;
591 	ERROR(tdev, "bogus config descriptor read size\n");
592 	return 0;
593 }
594 
595 /* sanity test for standard requests working with usb_control_mesg() and some
596  * of the utility functions which use it.
597  *
598  * this doesn't test how endpoint halts behave or data toggles get set, since
599  * we won't do I/O to bulk/interrupt endpoints here (which is how to change
600  * halt or toggle).  toggle testing is impractical without support from hcds.
601  *
602  * this avoids failing devices linux would normally work with, by not testing
603  * config/altsetting operations for devices that only support their defaults.
604  * such devices rarely support those needless operations.
605  *
606  * NOTE that since this is a sanity test, it's not examining boundary cases
607  * to see if usbcore, hcd, and device all behave right.  such testing would
608  * involve varied read sizes and other operation sequences.
609  */
610 static int ch9_postconfig(struct usbtest_dev *dev)
611 {
612 	struct usb_interface	*iface = dev->intf;
613 	struct usb_device	*udev = interface_to_usbdev(iface);
614 	int			i, alt, retval;
615 
616 	/* [9.2.3] if there's more than one altsetting, we need to be able to
617 	 * set and get each one.  mostly trusts the descriptors from usbcore.
618 	 */
619 	for (i = 0; i < iface->num_altsetting; i++) {
620 
621 		/* 9.2.3 constrains the range here */
622 		alt = iface->altsetting[i].desc.bAlternateSetting;
623 		if (alt < 0 || alt >= iface->num_altsetting) {
624 			dev_err(&iface->dev,
625 					"invalid alt [%d].bAltSetting = %d\n",
626 					i, alt);
627 		}
628 
629 		/* [real world] get/set unimplemented if there's only one */
630 		if (realworld && iface->num_altsetting == 1)
631 			continue;
632 
633 		/* [9.4.10] set_interface */
634 		retval = set_altsetting(dev, alt);
635 		if (retval) {
636 			dev_err(&iface->dev, "can't set_interface = %d, %d\n",
637 					alt, retval);
638 			return retval;
639 		}
640 
641 		/* [9.4.4] get_interface always works */
642 		retval = get_altsetting(dev);
643 		if (retval != alt) {
644 			dev_err(&iface->dev, "get alt should be %d, was %d\n",
645 					alt, retval);
646 			return (retval < 0) ? retval : -EDOM;
647 		}
648 
649 	}
650 
651 	/* [real world] get_config unimplemented if there's only one */
652 	if (!realworld || udev->descriptor.bNumConfigurations != 1) {
653 		int	expected = udev->actconfig->desc.bConfigurationValue;
654 
655 		/* [9.4.2] get_configuration always works
656 		 * ... although some cheap devices (like one TI Hub I've got)
657 		 * won't return config descriptors except before set_config.
658 		 */
659 		retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
660 				USB_REQ_GET_CONFIGURATION,
661 				USB_DIR_IN | USB_RECIP_DEVICE,
662 				0, 0, dev->buf, 1, USB_CTRL_GET_TIMEOUT);
663 		if (retval != 1 || dev->buf[0] != expected) {
664 			dev_err(&iface->dev, "get config --> %d %d (1 %d)\n",
665 				retval, dev->buf[0], expected);
666 			return (retval < 0) ? retval : -EDOM;
667 		}
668 	}
669 
670 	/* there's always [9.4.3] a device descriptor [9.6.1] */
671 	retval = usb_get_descriptor(udev, USB_DT_DEVICE, 0,
672 			dev->buf, sizeof udev->descriptor);
673 	if (retval != sizeof udev->descriptor) {
674 		dev_err(&iface->dev, "dev descriptor --> %d\n", retval);
675 		return (retval < 0) ? retval : -EDOM;
676 	}
677 
678 	/* there's always [9.4.3] at least one config descriptor [9.6.3] */
679 	for (i = 0; i < udev->descriptor.bNumConfigurations; i++) {
680 		retval = usb_get_descriptor(udev, USB_DT_CONFIG, i,
681 				dev->buf, TBUF_SIZE);
682 		if (!is_good_config(dev, retval)) {
683 			dev_err(&iface->dev,
684 					"config [%d] descriptor --> %d\n",
685 					i, retval);
686 			return (retval < 0) ? retval : -EDOM;
687 		}
688 
689 		/* FIXME cross-checking udev->config[i] to make sure usbcore
690 		 * parsed it right (etc) would be good testing paranoia
691 		 */
692 	}
693 
694 	/* and sometimes [9.2.6.6] speed dependent descriptors */
695 	if (le16_to_cpu(udev->descriptor.bcdUSB) == 0x0200) {
696 		struct usb_qualifier_descriptor *d = NULL;
697 
698 		/* device qualifier [9.6.2] */
699 		retval = usb_get_descriptor(udev,
700 				USB_DT_DEVICE_QUALIFIER, 0, dev->buf,
701 				sizeof(struct usb_qualifier_descriptor));
702 		if (retval == -EPIPE) {
703 			if (udev->speed == USB_SPEED_HIGH) {
704 				dev_err(&iface->dev,
705 						"hs dev qualifier --> %d\n",
706 						retval);
707 				return (retval < 0) ? retval : -EDOM;
708 			}
709 			/* usb2.0 but not high-speed capable; fine */
710 		} else if (retval != sizeof(struct usb_qualifier_descriptor)) {
711 			dev_err(&iface->dev, "dev qualifier --> %d\n", retval);
712 			return (retval < 0) ? retval : -EDOM;
713 		} else
714 			d = (struct usb_qualifier_descriptor *) dev->buf;
715 
716 		/* might not have [9.6.2] any other-speed configs [9.6.4] */
717 		if (d) {
718 			unsigned max = d->bNumConfigurations;
719 			for (i = 0; i < max; i++) {
720 				retval = usb_get_descriptor(udev,
721 					USB_DT_OTHER_SPEED_CONFIG, i,
722 					dev->buf, TBUF_SIZE);
723 				if (!is_good_config(dev, retval)) {
724 					dev_err(&iface->dev,
725 						"other speed config --> %d\n",
726 						retval);
727 					return (retval < 0) ? retval : -EDOM;
728 				}
729 			}
730 		}
731 	}
732 	/* FIXME fetch strings from at least the device descriptor */
733 
734 	/* [9.4.5] get_status always works */
735 	retval = usb_get_status(udev, USB_RECIP_DEVICE, 0, dev->buf);
736 	if (retval != 2) {
737 		dev_err(&iface->dev, "get dev status --> %d\n", retval);
738 		return (retval < 0) ? retval : -EDOM;
739 	}
740 
741 	/* FIXME configuration.bmAttributes says if we could try to set/clear
742 	 * the device's remote wakeup feature ... if we can, test that here
743 	 */
744 
745 	retval = usb_get_status(udev, USB_RECIP_INTERFACE,
746 			iface->altsetting[0].desc.bInterfaceNumber, dev->buf);
747 	if (retval != 2) {
748 		dev_err(&iface->dev, "get interface status --> %d\n", retval);
749 		return (retval < 0) ? retval : -EDOM;
750 	}
751 	/* FIXME get status for each endpoint in the interface */
752 
753 	return 0;
754 }
755 
756 /*-------------------------------------------------------------------------*/
757 
758 /* use ch9 requests to test whether:
759  *   (a) queues work for control, keeping N subtests queued and
760  *       active (auto-resubmit) for M loops through the queue.
761  *   (b) protocol stalls (control-only) will autorecover.
762  *       it's not like bulk/intr; no halt clearing.
763  *   (c) short control reads are reported and handled.
764  *   (d) queues are always processed in-order
765  */
766 
767 struct ctrl_ctx {
768 	spinlock_t		lock;
769 	struct usbtest_dev	*dev;
770 	struct completion	complete;
771 	unsigned		count;
772 	unsigned		pending;
773 	int			status;
774 	struct urb		**urb;
775 	struct usbtest_param	*param;
776 	int			last;
777 };
778 
779 #define NUM_SUBCASES	15		/* how many test subcases here? */
780 
781 struct subcase {
782 	struct usb_ctrlrequest	setup;
783 	int			number;
784 	int			expected;
785 };
786 
787 static void ctrl_complete(struct urb *urb)
788 {
789 	struct ctrl_ctx		*ctx = urb->context;
790 	struct usb_ctrlrequest	*reqp;
791 	struct subcase		*subcase;
792 	int			status = urb->status;
793 
794 	reqp = (struct usb_ctrlrequest *)urb->setup_packet;
795 	subcase = container_of(reqp, struct subcase, setup);
796 
797 	spin_lock(&ctx->lock);
798 	ctx->count--;
799 	ctx->pending--;
800 
801 	/* queue must transfer and complete in fifo order, unless
802 	 * usb_unlink_urb() is used to unlink something not at the
803 	 * physical queue head (not tested).
804 	 */
805 	if (subcase->number > 0) {
806 		if ((subcase->number - ctx->last) != 1) {
807 			ERROR(ctx->dev,
808 				"subcase %d completed out of order, last %d\n",
809 				subcase->number, ctx->last);
810 			status = -EDOM;
811 			ctx->last = subcase->number;
812 			goto error;
813 		}
814 	}
815 	ctx->last = subcase->number;
816 
817 	/* succeed or fault in only one way? */
818 	if (status == subcase->expected)
819 		status = 0;
820 
821 	/* async unlink for cleanup? */
822 	else if (status != -ECONNRESET) {
823 
824 		/* some faults are allowed, not required */
825 		if (subcase->expected > 0 && (
826 			  ((status == -subcase->expected	/* happened */
827 			   || status == 0))))			/* didn't */
828 			status = 0;
829 		/* sometimes more than one fault is allowed */
830 		else if (subcase->number == 12 && status == -EPIPE)
831 			status = 0;
832 		else
833 			ERROR(ctx->dev, "subtest %d error, status %d\n",
834 					subcase->number, status);
835 	}
836 
837 	/* unexpected status codes mean errors; ideally, in hardware */
838 	if (status) {
839 error:
840 		if (ctx->status == 0) {
841 			int		i;
842 
843 			ctx->status = status;
844 			ERROR(ctx->dev, "control queue %02x.%02x, err %d, "
845 					"%d left, subcase %d, len %d/%d\n",
846 					reqp->bRequestType, reqp->bRequest,
847 					status, ctx->count, subcase->number,
848 					urb->actual_length,
849 					urb->transfer_buffer_length);
850 
851 			/* FIXME this "unlink everything" exit route should
852 			 * be a separate test case.
853 			 */
854 
855 			/* unlink whatever's still pending */
856 			for (i = 1; i < ctx->param->sglen; i++) {
857 				struct urb *u = ctx->urb[
858 							(i + subcase->number)
859 							% ctx->param->sglen];
860 
861 				if (u == urb || !u->dev)
862 					continue;
863 				spin_unlock(&ctx->lock);
864 				status = usb_unlink_urb(u);
865 				spin_lock(&ctx->lock);
866 				switch (status) {
867 				case -EINPROGRESS:
868 				case -EBUSY:
869 				case -EIDRM:
870 					continue;
871 				default:
872 					ERROR(ctx->dev, "urb unlink --> %d\n",
873 							status);
874 				}
875 			}
876 			status = ctx->status;
877 		}
878 	}
879 
880 	/* resubmit if we need to, else mark this as done */
881 	if ((status == 0) && (ctx->pending < ctx->count)) {
882 		status = usb_submit_urb(urb, GFP_ATOMIC);
883 		if (status != 0) {
884 			ERROR(ctx->dev,
885 				"can't resubmit ctrl %02x.%02x, err %d\n",
886 				reqp->bRequestType, reqp->bRequest, status);
887 			urb->dev = NULL;
888 		} else
889 			ctx->pending++;
890 	} else
891 		urb->dev = NULL;
892 
893 	/* signal completion when nothing's queued */
894 	if (ctx->pending == 0)
895 		complete(&ctx->complete);
896 	spin_unlock(&ctx->lock);
897 }
898 
899 static int
900 test_ctrl_queue(struct usbtest_dev *dev, struct usbtest_param *param)
901 {
902 	struct usb_device	*udev = testdev_to_usbdev(dev);
903 	struct urb		**urb;
904 	struct ctrl_ctx		context;
905 	int			i;
906 
907 	if (param->sglen == 0 || param->iterations > UINT_MAX / param->sglen)
908 		return -EOPNOTSUPP;
909 
910 	spin_lock_init(&context.lock);
911 	context.dev = dev;
912 	init_completion(&context.complete);
913 	context.count = param->sglen * param->iterations;
914 	context.pending = 0;
915 	context.status = -ENOMEM;
916 	context.param = param;
917 	context.last = -1;
918 
919 	/* allocate and init the urbs we'll queue.
920 	 * as with bulk/intr sglists, sglen is the queue depth; it also
921 	 * controls which subtests run (more tests than sglen) or rerun.
922 	 */
923 	urb = kcalloc(param->sglen, sizeof(struct urb *), GFP_KERNEL);
924 	if (!urb)
925 		return -ENOMEM;
926 	for (i = 0; i < param->sglen; i++) {
927 		int			pipe = usb_rcvctrlpipe(udev, 0);
928 		unsigned		len;
929 		struct urb		*u;
930 		struct usb_ctrlrequest	req;
931 		struct subcase		*reqp;
932 
933 		/* sign of this variable means:
934 		 *  -: tested code must return this (negative) error code
935 		 *  +: tested code may return this (negative too) error code
936 		 */
937 		int			expected = 0;
938 
939 		/* requests here are mostly expected to succeed on any
940 		 * device, but some are chosen to trigger protocol stalls
941 		 * or short reads.
942 		 */
943 		memset(&req, 0, sizeof req);
944 		req.bRequest = USB_REQ_GET_DESCRIPTOR;
945 		req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
946 
947 		switch (i % NUM_SUBCASES) {
948 		case 0:		/* get device descriptor */
949 			req.wValue = cpu_to_le16(USB_DT_DEVICE << 8);
950 			len = sizeof(struct usb_device_descriptor);
951 			break;
952 		case 1:		/* get first config descriptor (only) */
953 			req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
954 			len = sizeof(struct usb_config_descriptor);
955 			break;
956 		case 2:		/* get altsetting (OFTEN STALLS) */
957 			req.bRequest = USB_REQ_GET_INTERFACE;
958 			req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
959 			/* index = 0 means first interface */
960 			len = 1;
961 			expected = EPIPE;
962 			break;
963 		case 3:		/* get interface status */
964 			req.bRequest = USB_REQ_GET_STATUS;
965 			req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
966 			/* interface 0 */
967 			len = 2;
968 			break;
969 		case 4:		/* get device status */
970 			req.bRequest = USB_REQ_GET_STATUS;
971 			req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
972 			len = 2;
973 			break;
974 		case 5:		/* get device qualifier (MAY STALL) */
975 			req.wValue = cpu_to_le16 (USB_DT_DEVICE_QUALIFIER << 8);
976 			len = sizeof(struct usb_qualifier_descriptor);
977 			if (udev->speed != USB_SPEED_HIGH)
978 				expected = EPIPE;
979 			break;
980 		case 6:		/* get first config descriptor, plus interface */
981 			req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
982 			len = sizeof(struct usb_config_descriptor);
983 			len += sizeof(struct usb_interface_descriptor);
984 			break;
985 		case 7:		/* get interface descriptor (ALWAYS STALLS) */
986 			req.wValue = cpu_to_le16 (USB_DT_INTERFACE << 8);
987 			/* interface == 0 */
988 			len = sizeof(struct usb_interface_descriptor);
989 			expected = -EPIPE;
990 			break;
991 		/* NOTE: two consecutive stalls in the queue here.
992 		 *  that tests fault recovery a bit more aggressively. */
993 		case 8:		/* clear endpoint halt (MAY STALL) */
994 			req.bRequest = USB_REQ_CLEAR_FEATURE;
995 			req.bRequestType = USB_RECIP_ENDPOINT;
996 			/* wValue 0 == ep halt */
997 			/* wIndex 0 == ep0 (shouldn't halt!) */
998 			len = 0;
999 			pipe = usb_sndctrlpipe(udev, 0);
1000 			expected = EPIPE;
1001 			break;
1002 		case 9:		/* get endpoint status */
1003 			req.bRequest = USB_REQ_GET_STATUS;
1004 			req.bRequestType = USB_DIR_IN|USB_RECIP_ENDPOINT;
1005 			/* endpoint 0 */
1006 			len = 2;
1007 			break;
1008 		case 10:	/* trigger short read (EREMOTEIO) */
1009 			req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
1010 			len = 1024;
1011 			expected = -EREMOTEIO;
1012 			break;
1013 		/* NOTE: two consecutive _different_ faults in the queue. */
1014 		case 11:	/* get endpoint descriptor (ALWAYS STALLS) */
1015 			req.wValue = cpu_to_le16(USB_DT_ENDPOINT << 8);
1016 			/* endpoint == 0 */
1017 			len = sizeof(struct usb_interface_descriptor);
1018 			expected = EPIPE;
1019 			break;
1020 		/* NOTE: sometimes even a third fault in the queue! */
1021 		case 12:	/* get string 0 descriptor (MAY STALL) */
1022 			req.wValue = cpu_to_le16(USB_DT_STRING << 8);
1023 			/* string == 0, for language IDs */
1024 			len = sizeof(struct usb_interface_descriptor);
1025 			/* may succeed when > 4 languages */
1026 			expected = EREMOTEIO;	/* or EPIPE, if no strings */
1027 			break;
1028 		case 13:	/* short read, resembling case 10 */
1029 			req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
1030 			/* last data packet "should" be DATA1, not DATA0 */
1031 			len = 1024 - udev->descriptor.bMaxPacketSize0;
1032 			expected = -EREMOTEIO;
1033 			break;
1034 		case 14:	/* short read; try to fill the last packet */
1035 			req.wValue = cpu_to_le16((USB_DT_DEVICE << 8) | 0);
1036 			/* device descriptor size == 18 bytes */
1037 			len = udev->descriptor.bMaxPacketSize0;
1038 			if (udev->speed == USB_SPEED_SUPER)
1039 				len = 512;
1040 			switch (len) {
1041 			case 8:
1042 				len = 24;
1043 				break;
1044 			case 16:
1045 				len = 32;
1046 				break;
1047 			}
1048 			expected = -EREMOTEIO;
1049 			break;
1050 		default:
1051 			ERROR(dev, "bogus number of ctrl queue testcases!\n");
1052 			context.status = -EINVAL;
1053 			goto cleanup;
1054 		}
1055 		req.wLength = cpu_to_le16(len);
1056 		urb[i] = u = simple_alloc_urb(udev, pipe, len);
1057 		if (!u)
1058 			goto cleanup;
1059 
1060 		reqp = kmalloc(sizeof *reqp, GFP_KERNEL);
1061 		if (!reqp)
1062 			goto cleanup;
1063 		reqp->setup = req;
1064 		reqp->number = i % NUM_SUBCASES;
1065 		reqp->expected = expected;
1066 		u->setup_packet = (char *) &reqp->setup;
1067 
1068 		u->context = &context;
1069 		u->complete = ctrl_complete;
1070 	}
1071 
1072 	/* queue the urbs */
1073 	context.urb = urb;
1074 	spin_lock_irq(&context.lock);
1075 	for (i = 0; i < param->sglen; i++) {
1076 		context.status = usb_submit_urb(urb[i], GFP_ATOMIC);
1077 		if (context.status != 0) {
1078 			ERROR(dev, "can't submit urb[%d], status %d\n",
1079 					i, context.status);
1080 			context.count = context.pending;
1081 			break;
1082 		}
1083 		context.pending++;
1084 	}
1085 	spin_unlock_irq(&context.lock);
1086 
1087 	/* FIXME  set timer and time out; provide a disconnect hook */
1088 
1089 	/* wait for the last one to complete */
1090 	if (context.pending > 0)
1091 		wait_for_completion(&context.complete);
1092 
1093 cleanup:
1094 	for (i = 0; i < param->sglen; i++) {
1095 		if (!urb[i])
1096 			continue;
1097 		urb[i]->dev = udev;
1098 		kfree(urb[i]->setup_packet);
1099 		simple_free_urb(urb[i]);
1100 	}
1101 	kfree(urb);
1102 	return context.status;
1103 }
1104 #undef NUM_SUBCASES
1105 
1106 
1107 /*-------------------------------------------------------------------------*/
1108 
1109 static void unlink1_callback(struct urb *urb)
1110 {
1111 	int	status = urb->status;
1112 
1113 	/* we "know" -EPIPE (stall) never happens */
1114 	if (!status)
1115 		status = usb_submit_urb(urb, GFP_ATOMIC);
1116 	if (status) {
1117 		urb->status = status;
1118 		complete(urb->context);
1119 	}
1120 }
1121 
1122 static int unlink1(struct usbtest_dev *dev, int pipe, int size, int async)
1123 {
1124 	struct urb		*urb;
1125 	struct completion	completion;
1126 	int			retval = 0;
1127 
1128 	init_completion(&completion);
1129 	urb = simple_alloc_urb(testdev_to_usbdev(dev), pipe, size);
1130 	if (!urb)
1131 		return -ENOMEM;
1132 	urb->context = &completion;
1133 	urb->complete = unlink1_callback;
1134 
1135 	/* keep the endpoint busy.  there are lots of hc/hcd-internal
1136 	 * states, and testing should get to all of them over time.
1137 	 *
1138 	 * FIXME want additional tests for when endpoint is STALLing
1139 	 * due to errors, or is just NAKing requests.
1140 	 */
1141 	retval = usb_submit_urb(urb, GFP_KERNEL);
1142 	if (retval != 0) {
1143 		dev_err(&dev->intf->dev, "submit fail %d\n", retval);
1144 		return retval;
1145 	}
1146 
1147 	/* unlinking that should always work.  variable delay tests more
1148 	 * hcd states and code paths, even with little other system load.
1149 	 */
1150 	msleep(jiffies % (2 * INTERRUPT_RATE));
1151 	if (async) {
1152 		while (!completion_done(&completion)) {
1153 			retval = usb_unlink_urb(urb);
1154 
1155 			switch (retval) {
1156 			case -EBUSY:
1157 			case -EIDRM:
1158 				/* we can't unlink urbs while they're completing
1159 				 * or if they've completed, and we haven't
1160 				 * resubmitted. "normal" drivers would prevent
1161 				 * resubmission, but since we're testing unlink
1162 				 * paths, we can't.
1163 				 */
1164 				ERROR(dev, "unlink retry\n");
1165 				continue;
1166 			case 0:
1167 			case -EINPROGRESS:
1168 				break;
1169 
1170 			default:
1171 				dev_err(&dev->intf->dev,
1172 					"unlink fail %d\n", retval);
1173 				return retval;
1174 			}
1175 
1176 			break;
1177 		}
1178 	} else
1179 		usb_kill_urb(urb);
1180 
1181 	wait_for_completion(&completion);
1182 	retval = urb->status;
1183 	simple_free_urb(urb);
1184 
1185 	if (async)
1186 		return (retval == -ECONNRESET) ? 0 : retval - 1000;
1187 	else
1188 		return (retval == -ENOENT || retval == -EPERM) ?
1189 				0 : retval - 2000;
1190 }
1191 
1192 static int unlink_simple(struct usbtest_dev *dev, int pipe, int len)
1193 {
1194 	int			retval = 0;
1195 
1196 	/* test sync and async paths */
1197 	retval = unlink1(dev, pipe, len, 1);
1198 	if (!retval)
1199 		retval = unlink1(dev, pipe, len, 0);
1200 	return retval;
1201 }
1202 
1203 /*-------------------------------------------------------------------------*/
1204 
1205 struct queued_ctx {
1206 	struct completion	complete;
1207 	atomic_t		pending;
1208 	unsigned		num;
1209 	int			status;
1210 	struct urb		**urbs;
1211 };
1212 
1213 static void unlink_queued_callback(struct urb *urb)
1214 {
1215 	int			status = urb->status;
1216 	struct queued_ctx	*ctx = urb->context;
1217 
1218 	if (ctx->status)
1219 		goto done;
1220 	if (urb == ctx->urbs[ctx->num - 4] || urb == ctx->urbs[ctx->num - 2]) {
1221 		if (status == -ECONNRESET)
1222 			goto done;
1223 		/* What error should we report if the URB completed normally? */
1224 	}
1225 	if (status != 0)
1226 		ctx->status = status;
1227 
1228  done:
1229 	if (atomic_dec_and_test(&ctx->pending))
1230 		complete(&ctx->complete);
1231 }
1232 
1233 static int unlink_queued(struct usbtest_dev *dev, int pipe, unsigned num,
1234 		unsigned size)
1235 {
1236 	struct queued_ctx	ctx;
1237 	struct usb_device	*udev = testdev_to_usbdev(dev);
1238 	void			*buf;
1239 	dma_addr_t		buf_dma;
1240 	int			i;
1241 	int			retval = -ENOMEM;
1242 
1243 	init_completion(&ctx.complete);
1244 	atomic_set(&ctx.pending, 1);	/* One more than the actual value */
1245 	ctx.num = num;
1246 	ctx.status = 0;
1247 
1248 	buf = usb_alloc_coherent(udev, size, GFP_KERNEL, &buf_dma);
1249 	if (!buf)
1250 		return retval;
1251 	memset(buf, 0, size);
1252 
1253 	/* Allocate and init the urbs we'll queue */
1254 	ctx.urbs = kcalloc(num, sizeof(struct urb *), GFP_KERNEL);
1255 	if (!ctx.urbs)
1256 		goto free_buf;
1257 	for (i = 0; i < num; i++) {
1258 		ctx.urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
1259 		if (!ctx.urbs[i])
1260 			goto free_urbs;
1261 		usb_fill_bulk_urb(ctx.urbs[i], udev, pipe, buf, size,
1262 				unlink_queued_callback, &ctx);
1263 		ctx.urbs[i]->transfer_dma = buf_dma;
1264 		ctx.urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
1265 	}
1266 
1267 	/* Submit all the URBs and then unlink URBs num - 4 and num - 2. */
1268 	for (i = 0; i < num; i++) {
1269 		atomic_inc(&ctx.pending);
1270 		retval = usb_submit_urb(ctx.urbs[i], GFP_KERNEL);
1271 		if (retval != 0) {
1272 			dev_err(&dev->intf->dev, "submit urbs[%d] fail %d\n",
1273 					i, retval);
1274 			atomic_dec(&ctx.pending);
1275 			ctx.status = retval;
1276 			break;
1277 		}
1278 	}
1279 	if (i == num) {
1280 		usb_unlink_urb(ctx.urbs[num - 4]);
1281 		usb_unlink_urb(ctx.urbs[num - 2]);
1282 	} else {
1283 		while (--i >= 0)
1284 			usb_unlink_urb(ctx.urbs[i]);
1285 	}
1286 
1287 	if (atomic_dec_and_test(&ctx.pending))		/* The extra count */
1288 		complete(&ctx.complete);
1289 	wait_for_completion(&ctx.complete);
1290 	retval = ctx.status;
1291 
1292  free_urbs:
1293 	for (i = 0; i < num; i++)
1294 		usb_free_urb(ctx.urbs[i]);
1295 	kfree(ctx.urbs);
1296  free_buf:
1297 	usb_free_coherent(udev, size, buf, buf_dma);
1298 	return retval;
1299 }
1300 
1301 /*-------------------------------------------------------------------------*/
1302 
1303 static int verify_not_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
1304 {
1305 	int	retval;
1306 	u16	status;
1307 
1308 	/* shouldn't look or act halted */
1309 	retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
1310 	if (retval < 0) {
1311 		ERROR(tdev, "ep %02x couldn't get no-halt status, %d\n",
1312 				ep, retval);
1313 		return retval;
1314 	}
1315 	if (status != 0) {
1316 		ERROR(tdev, "ep %02x bogus status: %04x != 0\n", ep, status);
1317 		return -EINVAL;
1318 	}
1319 	retval = simple_io(tdev, urb, 1, 0, 0, __func__);
1320 	if (retval != 0)
1321 		return -EINVAL;
1322 	return 0;
1323 }
1324 
1325 static int verify_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
1326 {
1327 	int	retval;
1328 	u16	status;
1329 
1330 	/* should look and act halted */
1331 	retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
1332 	if (retval < 0) {
1333 		ERROR(tdev, "ep %02x couldn't get halt status, %d\n",
1334 				ep, retval);
1335 		return retval;
1336 	}
1337 	le16_to_cpus(&status);
1338 	if (status != 1) {
1339 		ERROR(tdev, "ep %02x bogus status: %04x != 1\n", ep, status);
1340 		return -EINVAL;
1341 	}
1342 	retval = simple_io(tdev, urb, 1, 0, -EPIPE, __func__);
1343 	if (retval != -EPIPE)
1344 		return -EINVAL;
1345 	retval = simple_io(tdev, urb, 1, 0, -EPIPE, "verify_still_halted");
1346 	if (retval != -EPIPE)
1347 		return -EINVAL;
1348 	return 0;
1349 }
1350 
1351 static int test_halt(struct usbtest_dev *tdev, int ep, struct urb *urb)
1352 {
1353 	int	retval;
1354 
1355 	/* shouldn't look or act halted now */
1356 	retval = verify_not_halted(tdev, ep, urb);
1357 	if (retval < 0)
1358 		return retval;
1359 
1360 	/* set halt (protocol test only), verify it worked */
1361 	retval = usb_control_msg(urb->dev, usb_sndctrlpipe(urb->dev, 0),
1362 			USB_REQ_SET_FEATURE, USB_RECIP_ENDPOINT,
1363 			USB_ENDPOINT_HALT, ep,
1364 			NULL, 0, USB_CTRL_SET_TIMEOUT);
1365 	if (retval < 0) {
1366 		ERROR(tdev, "ep %02x couldn't set halt, %d\n", ep, retval);
1367 		return retval;
1368 	}
1369 	retval = verify_halted(tdev, ep, urb);
1370 	if (retval < 0)
1371 		return retval;
1372 
1373 	/* clear halt (tests API + protocol), verify it worked */
1374 	retval = usb_clear_halt(urb->dev, urb->pipe);
1375 	if (retval < 0) {
1376 		ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval);
1377 		return retval;
1378 	}
1379 	retval = verify_not_halted(tdev, ep, urb);
1380 	if (retval < 0)
1381 		return retval;
1382 
1383 	/* NOTE:  could also verify SET_INTERFACE clear halts ... */
1384 
1385 	return 0;
1386 }
1387 
1388 static int halt_simple(struct usbtest_dev *dev)
1389 {
1390 	int		ep;
1391 	int		retval = 0;
1392 	struct urb	*urb;
1393 
1394 	urb = simple_alloc_urb(testdev_to_usbdev(dev), 0, 512);
1395 	if (urb == NULL)
1396 		return -ENOMEM;
1397 
1398 	if (dev->in_pipe) {
1399 		ep = usb_pipeendpoint(dev->in_pipe) | USB_DIR_IN;
1400 		urb->pipe = dev->in_pipe;
1401 		retval = test_halt(dev, ep, urb);
1402 		if (retval < 0)
1403 			goto done;
1404 	}
1405 
1406 	if (dev->out_pipe) {
1407 		ep = usb_pipeendpoint(dev->out_pipe);
1408 		urb->pipe = dev->out_pipe;
1409 		retval = test_halt(dev, ep, urb);
1410 	}
1411 done:
1412 	simple_free_urb(urb);
1413 	return retval;
1414 }
1415 
1416 /*-------------------------------------------------------------------------*/
1417 
1418 /* Control OUT tests use the vendor control requests from Intel's
1419  * USB 2.0 compliance test device:  write a buffer, read it back.
1420  *
1421  * Intel's spec only _requires_ that it work for one packet, which
1422  * is pretty weak.   Some HCDs place limits here; most devices will
1423  * need to be able to handle more than one OUT data packet.  We'll
1424  * try whatever we're told to try.
1425  */
1426 static int ctrl_out(struct usbtest_dev *dev,
1427 		unsigned count, unsigned length, unsigned vary, unsigned offset)
1428 {
1429 	unsigned		i, j, len;
1430 	int			retval;
1431 	u8			*buf;
1432 	char			*what = "?";
1433 	struct usb_device	*udev;
1434 
1435 	if (length < 1 || length > 0xffff || vary >= length)
1436 		return -EINVAL;
1437 
1438 	buf = kmalloc(length + offset, GFP_KERNEL);
1439 	if (!buf)
1440 		return -ENOMEM;
1441 
1442 	buf += offset;
1443 	udev = testdev_to_usbdev(dev);
1444 	len = length;
1445 	retval = 0;
1446 
1447 	/* NOTE:  hardware might well act differently if we pushed it
1448 	 * with lots back-to-back queued requests.
1449 	 */
1450 	for (i = 0; i < count; i++) {
1451 		/* write patterned data */
1452 		for (j = 0; j < len; j++)
1453 			buf[j] = i + j;
1454 		retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
1455 				0x5b, USB_DIR_OUT|USB_TYPE_VENDOR,
1456 				0, 0, buf, len, USB_CTRL_SET_TIMEOUT);
1457 		if (retval != len) {
1458 			what = "write";
1459 			if (retval >= 0) {
1460 				ERROR(dev, "ctrl_out, wlen %d (expected %d)\n",
1461 						retval, len);
1462 				retval = -EBADMSG;
1463 			}
1464 			break;
1465 		}
1466 
1467 		/* read it back -- assuming nothing intervened!!  */
1468 		retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
1469 				0x5c, USB_DIR_IN|USB_TYPE_VENDOR,
1470 				0, 0, buf, len, USB_CTRL_GET_TIMEOUT);
1471 		if (retval != len) {
1472 			what = "read";
1473 			if (retval >= 0) {
1474 				ERROR(dev, "ctrl_out, rlen %d (expected %d)\n",
1475 						retval, len);
1476 				retval = -EBADMSG;
1477 			}
1478 			break;
1479 		}
1480 
1481 		/* fail if we can't verify */
1482 		for (j = 0; j < len; j++) {
1483 			if (buf[j] != (u8) (i + j)) {
1484 				ERROR(dev, "ctrl_out, byte %d is %d not %d\n",
1485 					j, buf[j], (u8) i + j);
1486 				retval = -EBADMSG;
1487 				break;
1488 			}
1489 		}
1490 		if (retval < 0) {
1491 			what = "verify";
1492 			break;
1493 		}
1494 
1495 		len += vary;
1496 
1497 		/* [real world] the "zero bytes IN" case isn't really used.
1498 		 * hardware can easily trip up in this weird case, since its
1499 		 * status stage is IN, not OUT like other ep0in transfers.
1500 		 */
1501 		if (len > length)
1502 			len = realworld ? 1 : 0;
1503 	}
1504 
1505 	if (retval < 0)
1506 		ERROR(dev, "ctrl_out %s failed, code %d, count %d\n",
1507 			what, retval, i);
1508 
1509 	kfree(buf - offset);
1510 	return retval;
1511 }
1512 
1513 /*-------------------------------------------------------------------------*/
1514 
1515 /* ISO tests ... mimics common usage
1516  *  - buffer length is split into N packets (mostly maxpacket sized)
1517  *  - multi-buffers according to sglen
1518  */
1519 
1520 struct iso_context {
1521 	unsigned		count;
1522 	unsigned		pending;
1523 	spinlock_t		lock;
1524 	struct completion	done;
1525 	int			submit_error;
1526 	unsigned long		errors;
1527 	unsigned long		packet_count;
1528 	struct usbtest_dev	*dev;
1529 };
1530 
1531 static void iso_callback(struct urb *urb)
1532 {
1533 	struct iso_context	*ctx = urb->context;
1534 
1535 	spin_lock(&ctx->lock);
1536 	ctx->count--;
1537 
1538 	ctx->packet_count += urb->number_of_packets;
1539 	if (urb->error_count > 0)
1540 		ctx->errors += urb->error_count;
1541 	else if (urb->status != 0)
1542 		ctx->errors += urb->number_of_packets;
1543 	else if (urb->actual_length != urb->transfer_buffer_length)
1544 		ctx->errors++;
1545 	else if (check_guard_bytes(ctx->dev, urb) != 0)
1546 		ctx->errors++;
1547 
1548 	if (urb->status == 0 && ctx->count > (ctx->pending - 1)
1549 			&& !ctx->submit_error) {
1550 		int status = usb_submit_urb(urb, GFP_ATOMIC);
1551 		switch (status) {
1552 		case 0:
1553 			goto done;
1554 		default:
1555 			dev_err(&ctx->dev->intf->dev,
1556 					"iso resubmit err %d\n",
1557 					status);
1558 			/* FALLTHROUGH */
1559 		case -ENODEV:			/* disconnected */
1560 		case -ESHUTDOWN:		/* endpoint disabled */
1561 			ctx->submit_error = 1;
1562 			break;
1563 		}
1564 	}
1565 
1566 	ctx->pending--;
1567 	if (ctx->pending == 0) {
1568 		if (ctx->errors)
1569 			dev_err(&ctx->dev->intf->dev,
1570 				"iso test, %lu errors out of %lu\n",
1571 				ctx->errors, ctx->packet_count);
1572 		complete(&ctx->done);
1573 	}
1574 done:
1575 	spin_unlock(&ctx->lock);
1576 }
1577 
1578 static struct urb *iso_alloc_urb(
1579 	struct usb_device	*udev,
1580 	int			pipe,
1581 	struct usb_endpoint_descriptor	*desc,
1582 	long			bytes,
1583 	unsigned offset
1584 )
1585 {
1586 	struct urb		*urb;
1587 	unsigned		i, maxp, packets;
1588 
1589 	if (bytes < 0 || !desc)
1590 		return NULL;
1591 	maxp = 0x7ff & usb_endpoint_maxp(desc);
1592 	maxp *= 1 + (0x3 & (usb_endpoint_maxp(desc) >> 11));
1593 	packets = DIV_ROUND_UP(bytes, maxp);
1594 
1595 	urb = usb_alloc_urb(packets, GFP_KERNEL);
1596 	if (!urb)
1597 		return urb;
1598 	urb->dev = udev;
1599 	urb->pipe = pipe;
1600 
1601 	urb->number_of_packets = packets;
1602 	urb->transfer_buffer_length = bytes;
1603 	urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
1604 							GFP_KERNEL,
1605 							&urb->transfer_dma);
1606 	if (!urb->transfer_buffer) {
1607 		usb_free_urb(urb);
1608 		return NULL;
1609 	}
1610 	if (offset) {
1611 		memset(urb->transfer_buffer, GUARD_BYTE, offset);
1612 		urb->transfer_buffer += offset;
1613 		urb->transfer_dma += offset;
1614 	}
1615 	/* For inbound transfers use guard byte so that test fails if
1616 		data not correctly copied */
1617 	memset(urb->transfer_buffer,
1618 			usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
1619 			bytes);
1620 
1621 	for (i = 0; i < packets; i++) {
1622 		/* here, only the last packet will be short */
1623 		urb->iso_frame_desc[i].length = min((unsigned) bytes, maxp);
1624 		bytes -= urb->iso_frame_desc[i].length;
1625 
1626 		urb->iso_frame_desc[i].offset = maxp * i;
1627 	}
1628 
1629 	urb->complete = iso_callback;
1630 	/* urb->context = SET BY CALLER */
1631 	urb->interval = 1 << (desc->bInterval - 1);
1632 	urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
1633 	return urb;
1634 }
1635 
1636 static int
1637 test_iso_queue(struct usbtest_dev *dev, struct usbtest_param *param,
1638 		int pipe, struct usb_endpoint_descriptor *desc, unsigned offset)
1639 {
1640 	struct iso_context	context;
1641 	struct usb_device	*udev;
1642 	unsigned		i;
1643 	unsigned long		packets = 0;
1644 	int			status = 0;
1645 	struct urb		*urbs[10];	/* FIXME no limit */
1646 
1647 	if (param->sglen > 10)
1648 		return -EDOM;
1649 
1650 	memset(&context, 0, sizeof context);
1651 	context.count = param->iterations * param->sglen;
1652 	context.dev = dev;
1653 	init_completion(&context.done);
1654 	spin_lock_init(&context.lock);
1655 
1656 	memset(urbs, 0, sizeof urbs);
1657 	udev = testdev_to_usbdev(dev);
1658 	dev_info(&dev->intf->dev,
1659 		"... iso period %d %sframes, wMaxPacket %04x\n",
1660 		1 << (desc->bInterval - 1),
1661 		(udev->speed == USB_SPEED_HIGH) ? "micro" : "",
1662 		usb_endpoint_maxp(desc));
1663 
1664 	for (i = 0; i < param->sglen; i++) {
1665 		urbs[i] = iso_alloc_urb(udev, pipe, desc,
1666 					param->length, offset);
1667 		if (!urbs[i]) {
1668 			status = -ENOMEM;
1669 			goto fail;
1670 		}
1671 		packets += urbs[i]->number_of_packets;
1672 		urbs[i]->context = &context;
1673 	}
1674 	packets *= param->iterations;
1675 	dev_info(&dev->intf->dev,
1676 		"... total %lu msec (%lu packets)\n",
1677 		(packets * (1 << (desc->bInterval - 1)))
1678 			/ ((udev->speed == USB_SPEED_HIGH) ? 8 : 1),
1679 		packets);
1680 
1681 	spin_lock_irq(&context.lock);
1682 	for (i = 0; i < param->sglen; i++) {
1683 		++context.pending;
1684 		status = usb_submit_urb(urbs[i], GFP_ATOMIC);
1685 		if (status < 0) {
1686 			ERROR(dev, "submit iso[%d], error %d\n", i, status);
1687 			if (i == 0) {
1688 				spin_unlock_irq(&context.lock);
1689 				goto fail;
1690 			}
1691 
1692 			simple_free_urb(urbs[i]);
1693 			urbs[i] = NULL;
1694 			context.pending--;
1695 			context.submit_error = 1;
1696 			break;
1697 		}
1698 	}
1699 	spin_unlock_irq(&context.lock);
1700 
1701 	wait_for_completion(&context.done);
1702 
1703 	for (i = 0; i < param->sglen; i++) {
1704 		if (urbs[i])
1705 			simple_free_urb(urbs[i]);
1706 	}
1707 	/*
1708 	 * Isochronous transfers are expected to fail sometimes.  As an
1709 	 * arbitrary limit, we will report an error if any submissions
1710 	 * fail or if the transfer failure rate is > 10%.
1711 	 */
1712 	if (status != 0)
1713 		;
1714 	else if (context.submit_error)
1715 		status = -EACCES;
1716 	else if (context.errors > context.packet_count / 10)
1717 		status = -EIO;
1718 	return status;
1719 
1720 fail:
1721 	for (i = 0; i < param->sglen; i++) {
1722 		if (urbs[i])
1723 			simple_free_urb(urbs[i]);
1724 	}
1725 	return status;
1726 }
1727 
1728 static int test_unaligned_bulk(
1729 	struct usbtest_dev *tdev,
1730 	int pipe,
1731 	unsigned length,
1732 	int iterations,
1733 	unsigned transfer_flags,
1734 	const char *label)
1735 {
1736 	int retval;
1737 	struct urb *urb = usbtest_alloc_urb(
1738 		testdev_to_usbdev(tdev), pipe, length, transfer_flags, 1);
1739 
1740 	if (!urb)
1741 		return -ENOMEM;
1742 
1743 	retval = simple_io(tdev, urb, iterations, 0, 0, label);
1744 	simple_free_urb(urb);
1745 	return retval;
1746 }
1747 
1748 /*-------------------------------------------------------------------------*/
1749 
1750 /* We only have this one interface to user space, through usbfs.
1751  * User mode code can scan usbfs to find N different devices (maybe on
1752  * different busses) to use when testing, and allocate one thread per
1753  * test.  So discovery is simplified, and we have no device naming issues.
1754  *
1755  * Don't use these only as stress/load tests.  Use them along with with
1756  * other USB bus activity:  plugging, unplugging, mousing, mp3 playback,
1757  * video capture, and so on.  Run different tests at different times, in
1758  * different sequences.  Nothing here should interact with other devices,
1759  * except indirectly by consuming USB bandwidth and CPU resources for test
1760  * threads and request completion.  But the only way to know that for sure
1761  * is to test when HC queues are in use by many devices.
1762  *
1763  * WARNING:  Because usbfs grabs udev->dev.sem before calling this ioctl(),
1764  * it locks out usbcore in certain code paths.  Notably, if you disconnect
1765  * the device-under-test, khubd will wait block forever waiting for the
1766  * ioctl to complete ... so that usb_disconnect() can abort the pending
1767  * urbs and then call usbtest_disconnect().  To abort a test, you're best
1768  * off just killing the userspace task and waiting for it to exit.
1769  */
1770 
1771 static int
1772 usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
1773 {
1774 	struct usbtest_dev	*dev = usb_get_intfdata(intf);
1775 	struct usb_device	*udev = testdev_to_usbdev(dev);
1776 	struct usbtest_param	*param = buf;
1777 	int			retval = -EOPNOTSUPP;
1778 	struct urb		*urb;
1779 	struct scatterlist	*sg;
1780 	struct usb_sg_request	req;
1781 	struct timeval		start;
1782 	unsigned		i;
1783 
1784 	/* FIXME USBDEVFS_CONNECTINFO doesn't say how fast the device is. */
1785 
1786 	pattern = mod_pattern;
1787 
1788 	if (code != USBTEST_REQUEST)
1789 		return -EOPNOTSUPP;
1790 
1791 	if (param->iterations <= 0)
1792 		return -EINVAL;
1793 
1794 	if (mutex_lock_interruptible(&dev->lock))
1795 		return -ERESTARTSYS;
1796 
1797 	/* FIXME: What if a system sleep starts while a test is running? */
1798 
1799 	/* some devices, like ez-usb default devices, need a non-default
1800 	 * altsetting to have any active endpoints.  some tests change
1801 	 * altsettings; force a default so most tests don't need to check.
1802 	 */
1803 	if (dev->info->alt >= 0) {
1804 		int	res;
1805 
1806 		if (intf->altsetting->desc.bInterfaceNumber) {
1807 			mutex_unlock(&dev->lock);
1808 			return -ENODEV;
1809 		}
1810 		res = set_altsetting(dev, dev->info->alt);
1811 		if (res) {
1812 			dev_err(&intf->dev,
1813 					"set altsetting to %d failed, %d\n",
1814 					dev->info->alt, res);
1815 			mutex_unlock(&dev->lock);
1816 			return res;
1817 		}
1818 	}
1819 
1820 	/*
1821 	 * Just a bunch of test cases that every HCD is expected to handle.
1822 	 *
1823 	 * Some may need specific firmware, though it'd be good to have
1824 	 * one firmware image to handle all the test cases.
1825 	 *
1826 	 * FIXME add more tests!  cancel requests, verify the data, control
1827 	 * queueing, concurrent read+write threads, and so on.
1828 	 */
1829 	do_gettimeofday(&start);
1830 	switch (param->test_num) {
1831 
1832 	case 0:
1833 		dev_info(&intf->dev, "TEST 0:  NOP\n");
1834 		retval = 0;
1835 		break;
1836 
1837 	/* Simple non-queued bulk I/O tests */
1838 	case 1:
1839 		if (dev->out_pipe == 0)
1840 			break;
1841 		dev_info(&intf->dev,
1842 				"TEST 1:  write %d bytes %u times\n",
1843 				param->length, param->iterations);
1844 		urb = simple_alloc_urb(udev, dev->out_pipe, param->length);
1845 		if (!urb) {
1846 			retval = -ENOMEM;
1847 			break;
1848 		}
1849 		/* FIRMWARE:  bulk sink (maybe accepts short writes) */
1850 		retval = simple_io(dev, urb, param->iterations, 0, 0, "test1");
1851 		simple_free_urb(urb);
1852 		break;
1853 	case 2:
1854 		if (dev->in_pipe == 0)
1855 			break;
1856 		dev_info(&intf->dev,
1857 				"TEST 2:  read %d bytes %u times\n",
1858 				param->length, param->iterations);
1859 		urb = simple_alloc_urb(udev, dev->in_pipe, param->length);
1860 		if (!urb) {
1861 			retval = -ENOMEM;
1862 			break;
1863 		}
1864 		/* FIRMWARE:  bulk source (maybe generates short writes) */
1865 		retval = simple_io(dev, urb, param->iterations, 0, 0, "test2");
1866 		simple_free_urb(urb);
1867 		break;
1868 	case 3:
1869 		if (dev->out_pipe == 0 || param->vary == 0)
1870 			break;
1871 		dev_info(&intf->dev,
1872 				"TEST 3:  write/%d 0..%d bytes %u times\n",
1873 				param->vary, param->length, param->iterations);
1874 		urb = simple_alloc_urb(udev, dev->out_pipe, param->length);
1875 		if (!urb) {
1876 			retval = -ENOMEM;
1877 			break;
1878 		}
1879 		/* FIRMWARE:  bulk sink (maybe accepts short writes) */
1880 		retval = simple_io(dev, urb, param->iterations, param->vary,
1881 					0, "test3");
1882 		simple_free_urb(urb);
1883 		break;
1884 	case 4:
1885 		if (dev->in_pipe == 0 || param->vary == 0)
1886 			break;
1887 		dev_info(&intf->dev,
1888 				"TEST 4:  read/%d 0..%d bytes %u times\n",
1889 				param->vary, param->length, param->iterations);
1890 		urb = simple_alloc_urb(udev, dev->in_pipe, param->length);
1891 		if (!urb) {
1892 			retval = -ENOMEM;
1893 			break;
1894 		}
1895 		/* FIRMWARE:  bulk source (maybe generates short writes) */
1896 		retval = simple_io(dev, urb, param->iterations, param->vary,
1897 					0, "test4");
1898 		simple_free_urb(urb);
1899 		break;
1900 
1901 	/* Queued bulk I/O tests */
1902 	case 5:
1903 		if (dev->out_pipe == 0 || param->sglen == 0)
1904 			break;
1905 		dev_info(&intf->dev,
1906 			"TEST 5:  write %d sglists %d entries of %d bytes\n",
1907 				param->iterations,
1908 				param->sglen, param->length);
1909 		sg = alloc_sglist(param->sglen, param->length, 0);
1910 		if (!sg) {
1911 			retval = -ENOMEM;
1912 			break;
1913 		}
1914 		/* FIRMWARE:  bulk sink (maybe accepts short writes) */
1915 		retval = perform_sglist(dev, param->iterations, dev->out_pipe,
1916 				&req, sg, param->sglen);
1917 		free_sglist(sg, param->sglen);
1918 		break;
1919 
1920 	case 6:
1921 		if (dev->in_pipe == 0 || param->sglen == 0)
1922 			break;
1923 		dev_info(&intf->dev,
1924 			"TEST 6:  read %d sglists %d entries of %d bytes\n",
1925 				param->iterations,
1926 				param->sglen, param->length);
1927 		sg = alloc_sglist(param->sglen, param->length, 0);
1928 		if (!sg) {
1929 			retval = -ENOMEM;
1930 			break;
1931 		}
1932 		/* FIRMWARE:  bulk source (maybe generates short writes) */
1933 		retval = perform_sglist(dev, param->iterations, dev->in_pipe,
1934 				&req, sg, param->sglen);
1935 		free_sglist(sg, param->sglen);
1936 		break;
1937 	case 7:
1938 		if (dev->out_pipe == 0 || param->sglen == 0 || param->vary == 0)
1939 			break;
1940 		dev_info(&intf->dev,
1941 			"TEST 7:  write/%d %d sglists %d entries 0..%d bytes\n",
1942 				param->vary, param->iterations,
1943 				param->sglen, param->length);
1944 		sg = alloc_sglist(param->sglen, param->length, param->vary);
1945 		if (!sg) {
1946 			retval = -ENOMEM;
1947 			break;
1948 		}
1949 		/* FIRMWARE:  bulk sink (maybe accepts short writes) */
1950 		retval = perform_sglist(dev, param->iterations, dev->out_pipe,
1951 				&req, sg, param->sglen);
1952 		free_sglist(sg, param->sglen);
1953 		break;
1954 	case 8:
1955 		if (dev->in_pipe == 0 || param->sglen == 0 || param->vary == 0)
1956 			break;
1957 		dev_info(&intf->dev,
1958 			"TEST 8:  read/%d %d sglists %d entries 0..%d bytes\n",
1959 				param->vary, param->iterations,
1960 				param->sglen, param->length);
1961 		sg = alloc_sglist(param->sglen, param->length, param->vary);
1962 		if (!sg) {
1963 			retval = -ENOMEM;
1964 			break;
1965 		}
1966 		/* FIRMWARE:  bulk source (maybe generates short writes) */
1967 		retval = perform_sglist(dev, param->iterations, dev->in_pipe,
1968 				&req, sg, param->sglen);
1969 		free_sglist(sg, param->sglen);
1970 		break;
1971 
1972 	/* non-queued sanity tests for control (chapter 9 subset) */
1973 	case 9:
1974 		retval = 0;
1975 		dev_info(&intf->dev,
1976 			"TEST 9:  ch9 (subset) control tests, %d times\n",
1977 				param->iterations);
1978 		for (i = param->iterations; retval == 0 && i--; /* NOP */)
1979 			retval = ch9_postconfig(dev);
1980 		if (retval)
1981 			dev_err(&intf->dev, "ch9 subset failed, "
1982 					"iterations left %d\n", i);
1983 		break;
1984 
1985 	/* queued control messaging */
1986 	case 10:
1987 		retval = 0;
1988 		dev_info(&intf->dev,
1989 				"TEST 10:  queue %d control calls, %d times\n",
1990 				param->sglen,
1991 				param->iterations);
1992 		retval = test_ctrl_queue(dev, param);
1993 		break;
1994 
1995 	/* simple non-queued unlinks (ring with one urb) */
1996 	case 11:
1997 		if (dev->in_pipe == 0 || !param->length)
1998 			break;
1999 		retval = 0;
2000 		dev_info(&intf->dev, "TEST 11:  unlink %d reads of %d\n",
2001 				param->iterations, param->length);
2002 		for (i = param->iterations; retval == 0 && i--; /* NOP */)
2003 			retval = unlink_simple(dev, dev->in_pipe,
2004 						param->length);
2005 		if (retval)
2006 			dev_err(&intf->dev, "unlink reads failed %d, "
2007 				"iterations left %d\n", retval, i);
2008 		break;
2009 	case 12:
2010 		if (dev->out_pipe == 0 || !param->length)
2011 			break;
2012 		retval = 0;
2013 		dev_info(&intf->dev, "TEST 12:  unlink %d writes of %d\n",
2014 				param->iterations, param->length);
2015 		for (i = param->iterations; retval == 0 && i--; /* NOP */)
2016 			retval = unlink_simple(dev, dev->out_pipe,
2017 						param->length);
2018 		if (retval)
2019 			dev_err(&intf->dev, "unlink writes failed %d, "
2020 				"iterations left %d\n", retval, i);
2021 		break;
2022 
2023 	/* ep halt tests */
2024 	case 13:
2025 		if (dev->out_pipe == 0 && dev->in_pipe == 0)
2026 			break;
2027 		retval = 0;
2028 		dev_info(&intf->dev, "TEST 13:  set/clear %d halts\n",
2029 				param->iterations);
2030 		for (i = param->iterations; retval == 0 && i--; /* NOP */)
2031 			retval = halt_simple(dev);
2032 
2033 		if (retval)
2034 			ERROR(dev, "halts failed, iterations left %d\n", i);
2035 		break;
2036 
2037 	/* control write tests */
2038 	case 14:
2039 		if (!dev->info->ctrl_out)
2040 			break;
2041 		dev_info(&intf->dev, "TEST 14:  %d ep0out, %d..%d vary %d\n",
2042 				param->iterations,
2043 				realworld ? 1 : 0, param->length,
2044 				param->vary);
2045 		retval = ctrl_out(dev, param->iterations,
2046 				param->length, param->vary, 0);
2047 		break;
2048 
2049 	/* iso write tests */
2050 	case 15:
2051 		if (dev->out_iso_pipe == 0 || param->sglen == 0)
2052 			break;
2053 		dev_info(&intf->dev,
2054 			"TEST 15:  write %d iso, %d entries of %d bytes\n",
2055 				param->iterations,
2056 				param->sglen, param->length);
2057 		/* FIRMWARE:  iso sink */
2058 		retval = test_iso_queue(dev, param,
2059 				dev->out_iso_pipe, dev->iso_out, 0);
2060 		break;
2061 
2062 	/* iso read tests */
2063 	case 16:
2064 		if (dev->in_iso_pipe == 0 || param->sglen == 0)
2065 			break;
2066 		dev_info(&intf->dev,
2067 			"TEST 16:  read %d iso, %d entries of %d bytes\n",
2068 				param->iterations,
2069 				param->sglen, param->length);
2070 		/* FIRMWARE:  iso source */
2071 		retval = test_iso_queue(dev, param,
2072 				dev->in_iso_pipe, dev->iso_in, 0);
2073 		break;
2074 
2075 	/* FIXME scatterlist cancel (needs helper thread) */
2076 
2077 	/* Tests for bulk I/O using DMA mapping by core and odd address */
2078 	case 17:
2079 		if (dev->out_pipe == 0)
2080 			break;
2081 		dev_info(&intf->dev,
2082 			"TEST 17:  write odd addr %d bytes %u times core map\n",
2083 			param->length, param->iterations);
2084 
2085 		retval = test_unaligned_bulk(
2086 				dev, dev->out_pipe,
2087 				param->length, param->iterations,
2088 				0, "test17");
2089 		break;
2090 
2091 	case 18:
2092 		if (dev->in_pipe == 0)
2093 			break;
2094 		dev_info(&intf->dev,
2095 			"TEST 18:  read odd addr %d bytes %u times core map\n",
2096 			param->length, param->iterations);
2097 
2098 		retval = test_unaligned_bulk(
2099 				dev, dev->in_pipe,
2100 				param->length, param->iterations,
2101 				0, "test18");
2102 		break;
2103 
2104 	/* Tests for bulk I/O using premapped coherent buffer and odd address */
2105 	case 19:
2106 		if (dev->out_pipe == 0)
2107 			break;
2108 		dev_info(&intf->dev,
2109 			"TEST 19:  write odd addr %d bytes %u times premapped\n",
2110 			param->length, param->iterations);
2111 
2112 		retval = test_unaligned_bulk(
2113 				dev, dev->out_pipe,
2114 				param->length, param->iterations,
2115 				URB_NO_TRANSFER_DMA_MAP, "test19");
2116 		break;
2117 
2118 	case 20:
2119 		if (dev->in_pipe == 0)
2120 			break;
2121 		dev_info(&intf->dev,
2122 			"TEST 20:  read odd addr %d bytes %u times premapped\n",
2123 			param->length, param->iterations);
2124 
2125 		retval = test_unaligned_bulk(
2126 				dev, dev->in_pipe,
2127 				param->length, param->iterations,
2128 				URB_NO_TRANSFER_DMA_MAP, "test20");
2129 		break;
2130 
2131 	/* control write tests with unaligned buffer */
2132 	case 21:
2133 		if (!dev->info->ctrl_out)
2134 			break;
2135 		dev_info(&intf->dev,
2136 				"TEST 21:  %d ep0out odd addr, %d..%d vary %d\n",
2137 				param->iterations,
2138 				realworld ? 1 : 0, param->length,
2139 				param->vary);
2140 		retval = ctrl_out(dev, param->iterations,
2141 				param->length, param->vary, 1);
2142 		break;
2143 
2144 	/* unaligned iso tests */
2145 	case 22:
2146 		if (dev->out_iso_pipe == 0 || param->sglen == 0)
2147 			break;
2148 		dev_info(&intf->dev,
2149 			"TEST 22:  write %d iso odd, %d entries of %d bytes\n",
2150 				param->iterations,
2151 				param->sglen, param->length);
2152 		retval = test_iso_queue(dev, param,
2153 				dev->out_iso_pipe, dev->iso_out, 1);
2154 		break;
2155 
2156 	case 23:
2157 		if (dev->in_iso_pipe == 0 || param->sglen == 0)
2158 			break;
2159 		dev_info(&intf->dev,
2160 			"TEST 23:  read %d iso odd, %d entries of %d bytes\n",
2161 				param->iterations,
2162 				param->sglen, param->length);
2163 		retval = test_iso_queue(dev, param,
2164 				dev->in_iso_pipe, dev->iso_in, 1);
2165 		break;
2166 
2167 	/* unlink URBs from a bulk-OUT queue */
2168 	case 24:
2169 		if (dev->out_pipe == 0 || !param->length || param->sglen < 4)
2170 			break;
2171 		retval = 0;
2172 		dev_info(&intf->dev, "TEST 17:  unlink from %d queues of "
2173 				"%d %d-byte writes\n",
2174 				param->iterations, param->sglen, param->length);
2175 		for (i = param->iterations; retval == 0 && i > 0; --i) {
2176 			retval = unlink_queued(dev, dev->out_pipe,
2177 						param->sglen, param->length);
2178 			if (retval) {
2179 				dev_err(&intf->dev,
2180 					"unlink queued writes failed %d, "
2181 					"iterations left %d\n", retval, i);
2182 				break;
2183 			}
2184 		}
2185 		break;
2186 
2187 	}
2188 	do_gettimeofday(&param->duration);
2189 	param->duration.tv_sec -= start.tv_sec;
2190 	param->duration.tv_usec -= start.tv_usec;
2191 	if (param->duration.tv_usec < 0) {
2192 		param->duration.tv_usec += 1000 * 1000;
2193 		param->duration.tv_sec -= 1;
2194 	}
2195 	mutex_unlock(&dev->lock);
2196 	return retval;
2197 }
2198 
2199 /*-------------------------------------------------------------------------*/
2200 
2201 static unsigned force_interrupt;
2202 module_param(force_interrupt, uint, 0);
2203 MODULE_PARM_DESC(force_interrupt, "0 = test default; else interrupt");
2204 
2205 #ifdef	GENERIC
2206 static unsigned short vendor;
2207 module_param(vendor, ushort, 0);
2208 MODULE_PARM_DESC(vendor, "vendor code (from usb-if)");
2209 
2210 static unsigned short product;
2211 module_param(product, ushort, 0);
2212 MODULE_PARM_DESC(product, "product code (from vendor)");
2213 #endif
2214 
2215 static int
2216 usbtest_probe(struct usb_interface *intf, const struct usb_device_id *id)
2217 {
2218 	struct usb_device	*udev;
2219 	struct usbtest_dev	*dev;
2220 	struct usbtest_info	*info;
2221 	char			*rtest, *wtest;
2222 	char			*irtest, *iwtest;
2223 
2224 	udev = interface_to_usbdev(intf);
2225 
2226 #ifdef	GENERIC
2227 	/* specify devices by module parameters? */
2228 	if (id->match_flags == 0) {
2229 		/* vendor match required, product match optional */
2230 		if (!vendor || le16_to_cpu(udev->descriptor.idVendor) != (u16)vendor)
2231 			return -ENODEV;
2232 		if (product && le16_to_cpu(udev->descriptor.idProduct) != (u16)product)
2233 			return -ENODEV;
2234 		dev_info(&intf->dev, "matched module params, "
2235 					"vend=0x%04x prod=0x%04x\n",
2236 				le16_to_cpu(udev->descriptor.idVendor),
2237 				le16_to_cpu(udev->descriptor.idProduct));
2238 	}
2239 #endif
2240 
2241 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2242 	if (!dev)
2243 		return -ENOMEM;
2244 	info = (struct usbtest_info *) id->driver_info;
2245 	dev->info = info;
2246 	mutex_init(&dev->lock);
2247 
2248 	dev->intf = intf;
2249 
2250 	/* cacheline-aligned scratch for i/o */
2251 	dev->buf = kmalloc(TBUF_SIZE, GFP_KERNEL);
2252 	if (dev->buf == NULL) {
2253 		kfree(dev);
2254 		return -ENOMEM;
2255 	}
2256 
2257 	/* NOTE this doesn't yet test the handful of difference that are
2258 	 * visible with high speed interrupts:  bigger maxpacket (1K) and
2259 	 * "high bandwidth" modes (up to 3 packets/uframe).
2260 	 */
2261 	rtest = wtest = "";
2262 	irtest = iwtest = "";
2263 	if (force_interrupt || udev->speed == USB_SPEED_LOW) {
2264 		if (info->ep_in) {
2265 			dev->in_pipe = usb_rcvintpipe(udev, info->ep_in);
2266 			rtest = " intr-in";
2267 		}
2268 		if (info->ep_out) {
2269 			dev->out_pipe = usb_sndintpipe(udev, info->ep_out);
2270 			wtest = " intr-out";
2271 		}
2272 	} else {
2273 		if (info->autoconf) {
2274 			int status;
2275 
2276 			status = get_endpoints(dev, intf);
2277 			if (status < 0) {
2278 				WARNING(dev, "couldn't get endpoints, %d\n",
2279 						status);
2280 				kfree(dev->buf);
2281 				kfree(dev);
2282 				return status;
2283 			}
2284 			/* may find bulk or ISO pipes */
2285 		} else {
2286 			if (info->ep_in)
2287 				dev->in_pipe = usb_rcvbulkpipe(udev,
2288 							info->ep_in);
2289 			if (info->ep_out)
2290 				dev->out_pipe = usb_sndbulkpipe(udev,
2291 							info->ep_out);
2292 		}
2293 		if (dev->in_pipe)
2294 			rtest = " bulk-in";
2295 		if (dev->out_pipe)
2296 			wtest = " bulk-out";
2297 		if (dev->in_iso_pipe)
2298 			irtest = " iso-in";
2299 		if (dev->out_iso_pipe)
2300 			iwtest = " iso-out";
2301 	}
2302 
2303 	usb_set_intfdata(intf, dev);
2304 	dev_info(&intf->dev, "%s\n", info->name);
2305 	dev_info(&intf->dev, "%s {control%s%s%s%s%s} tests%s\n",
2306 			usb_speed_string(udev->speed),
2307 			info->ctrl_out ? " in/out" : "",
2308 			rtest, wtest,
2309 			irtest, iwtest,
2310 			info->alt >= 0 ? " (+alt)" : "");
2311 	return 0;
2312 }
2313 
2314 static int usbtest_suspend(struct usb_interface *intf, pm_message_t message)
2315 {
2316 	return 0;
2317 }
2318 
2319 static int usbtest_resume(struct usb_interface *intf)
2320 {
2321 	return 0;
2322 }
2323 
2324 
2325 static void usbtest_disconnect(struct usb_interface *intf)
2326 {
2327 	struct usbtest_dev	*dev = usb_get_intfdata(intf);
2328 
2329 	usb_set_intfdata(intf, NULL);
2330 	dev_dbg(&intf->dev, "disconnect\n");
2331 	kfree(dev);
2332 }
2333 
2334 /* Basic testing only needs a device that can source or sink bulk traffic.
2335  * Any device can test control transfers (default with GENERIC binding).
2336  *
2337  * Several entries work with the default EP0 implementation that's built
2338  * into EZ-USB chips.  There's a default vendor ID which can be overridden
2339  * by (very) small config EEPROMS, but otherwise all these devices act
2340  * identically until firmware is loaded:  only EP0 works.  It turns out
2341  * to be easy to make other endpoints work, without modifying that EP0
2342  * behavior.  For now, we expect that kind of firmware.
2343  */
2344 
2345 /* an21xx or fx versions of ez-usb */
2346 static struct usbtest_info ez1_info = {
2347 	.name		= "EZ-USB device",
2348 	.ep_in		= 2,
2349 	.ep_out		= 2,
2350 	.alt		= 1,
2351 };
2352 
2353 /* fx2 version of ez-usb */
2354 static struct usbtest_info ez2_info = {
2355 	.name		= "FX2 device",
2356 	.ep_in		= 6,
2357 	.ep_out		= 2,
2358 	.alt		= 1,
2359 };
2360 
2361 /* ezusb family device with dedicated usb test firmware,
2362  */
2363 static struct usbtest_info fw_info = {
2364 	.name		= "usb test device",
2365 	.ep_in		= 2,
2366 	.ep_out		= 2,
2367 	.alt		= 1,
2368 	.autoconf	= 1,		/* iso and ctrl_out need autoconf */
2369 	.ctrl_out	= 1,
2370 	.iso		= 1,		/* iso_ep's are #8 in/out */
2371 };
2372 
2373 /* peripheral running Linux and 'zero.c' test firmware, or
2374  * its user-mode cousin. different versions of this use
2375  * different hardware with the same vendor/product codes.
2376  * host side MUST rely on the endpoint descriptors.
2377  */
2378 static struct usbtest_info gz_info = {
2379 	.name		= "Linux gadget zero",
2380 	.autoconf	= 1,
2381 	.ctrl_out	= 1,
2382 	.alt		= 0,
2383 };
2384 
2385 static struct usbtest_info um_info = {
2386 	.name		= "Linux user mode test driver",
2387 	.autoconf	= 1,
2388 	.alt		= -1,
2389 };
2390 
2391 static struct usbtest_info um2_info = {
2392 	.name		= "Linux user mode ISO test driver",
2393 	.autoconf	= 1,
2394 	.iso		= 1,
2395 	.alt		= -1,
2396 };
2397 
2398 #ifdef IBOT2
2399 /* this is a nice source of high speed bulk data;
2400  * uses an FX2, with firmware provided in the device
2401  */
2402 static struct usbtest_info ibot2_info = {
2403 	.name		= "iBOT2 webcam",
2404 	.ep_in		= 2,
2405 	.alt		= -1,
2406 };
2407 #endif
2408 
2409 #ifdef GENERIC
2410 /* we can use any device to test control traffic */
2411 static struct usbtest_info generic_info = {
2412 	.name		= "Generic USB device",
2413 	.alt		= -1,
2414 };
2415 #endif
2416 
2417 
2418 static const struct usb_device_id id_table[] = {
2419 
2420 	/*-------------------------------------------------------------*/
2421 
2422 	/* EZ-USB devices which download firmware to replace (or in our
2423 	 * case augment) the default device implementation.
2424 	 */
2425 
2426 	/* generic EZ-USB FX controller */
2427 	{ USB_DEVICE(0x0547, 0x2235),
2428 		.driver_info = (unsigned long) &ez1_info,
2429 	},
2430 
2431 	/* CY3671 development board with EZ-USB FX */
2432 	{ USB_DEVICE(0x0547, 0x0080),
2433 		.driver_info = (unsigned long) &ez1_info,
2434 	},
2435 
2436 	/* generic EZ-USB FX2 controller (or development board) */
2437 	{ USB_DEVICE(0x04b4, 0x8613),
2438 		.driver_info = (unsigned long) &ez2_info,
2439 	},
2440 
2441 	/* re-enumerated usb test device firmware */
2442 	{ USB_DEVICE(0xfff0, 0xfff0),
2443 		.driver_info = (unsigned long) &fw_info,
2444 	},
2445 
2446 	/* "Gadget Zero" firmware runs under Linux */
2447 	{ USB_DEVICE(0x0525, 0xa4a0),
2448 		.driver_info = (unsigned long) &gz_info,
2449 	},
2450 
2451 	/* so does a user-mode variant */
2452 	{ USB_DEVICE(0x0525, 0xa4a4),
2453 		.driver_info = (unsigned long) &um_info,
2454 	},
2455 
2456 	/* ... and a user-mode variant that talks iso */
2457 	{ USB_DEVICE(0x0525, 0xa4a3),
2458 		.driver_info = (unsigned long) &um2_info,
2459 	},
2460 
2461 #ifdef KEYSPAN_19Qi
2462 	/* Keyspan 19qi uses an21xx (original EZ-USB) */
2463 	/* this does not coexist with the real Keyspan 19qi driver! */
2464 	{ USB_DEVICE(0x06cd, 0x010b),
2465 		.driver_info = (unsigned long) &ez1_info,
2466 	},
2467 #endif
2468 
2469 	/*-------------------------------------------------------------*/
2470 
2471 #ifdef IBOT2
2472 	/* iBOT2 makes a nice source of high speed bulk-in data */
2473 	/* this does not coexist with a real iBOT2 driver! */
2474 	{ USB_DEVICE(0x0b62, 0x0059),
2475 		.driver_info = (unsigned long) &ibot2_info,
2476 	},
2477 #endif
2478 
2479 	/*-------------------------------------------------------------*/
2480 
2481 #ifdef GENERIC
2482 	/* module params can specify devices to use for control tests */
2483 	{ .driver_info = (unsigned long) &generic_info, },
2484 #endif
2485 
2486 	/*-------------------------------------------------------------*/
2487 
2488 	{ }
2489 };
2490 MODULE_DEVICE_TABLE(usb, id_table);
2491 
2492 static struct usb_driver usbtest_driver = {
2493 	.name =		"usbtest",
2494 	.id_table =	id_table,
2495 	.probe =	usbtest_probe,
2496 	.unlocked_ioctl = usbtest_ioctl,
2497 	.disconnect =	usbtest_disconnect,
2498 	.suspend =	usbtest_suspend,
2499 	.resume =	usbtest_resume,
2500 };
2501 
2502 /*-------------------------------------------------------------------------*/
2503 
2504 static int __init usbtest_init(void)
2505 {
2506 #ifdef GENERIC
2507 	if (vendor)
2508 		pr_debug("params: vend=0x%04x prod=0x%04x\n", vendor, product);
2509 #endif
2510 	return usb_register(&usbtest_driver);
2511 }
2512 module_init(usbtest_init);
2513 
2514 static void __exit usbtest_exit(void)
2515 {
2516 	usb_deregister(&usbtest_driver);
2517 }
2518 module_exit(usbtest_exit);
2519 
2520 MODULE_DESCRIPTION("USB Core/HCD Testing Driver");
2521 MODULE_LICENSE("GPL");
2522 
2523