1 #include <linux/kernel.h> 2 #include <linux/errno.h> 3 #include <linux/init.h> 4 #include <linux/slab.h> 5 #include <linux/mm.h> 6 #include <linux/module.h> 7 #include <linux/moduleparam.h> 8 #include <linux/scatterlist.h> 9 #include <linux/mutex.h> 10 11 #include <linux/usb.h> 12 13 14 /*-------------------------------------------------------------------------*/ 15 16 static int override_alt = -1; 17 module_param_named(alt, override_alt, int, 0644); 18 MODULE_PARM_DESC(alt, ">= 0 to override altsetting selection"); 19 20 /*-------------------------------------------------------------------------*/ 21 22 /* FIXME make these public somewhere; usbdevfs.h? */ 23 struct usbtest_param { 24 /* inputs */ 25 unsigned test_num; /* 0..(TEST_CASES-1) */ 26 unsigned iterations; 27 unsigned length; 28 unsigned vary; 29 unsigned sglen; 30 31 /* outputs */ 32 struct timeval duration; 33 }; 34 #define USBTEST_REQUEST _IOWR('U', 100, struct usbtest_param) 35 36 /*-------------------------------------------------------------------------*/ 37 38 #define GENERIC /* let probe() bind using module params */ 39 40 /* Some devices that can be used for testing will have "real" drivers. 41 * Entries for those need to be enabled here by hand, after disabling 42 * that "real" driver. 43 */ 44 //#define IBOT2 /* grab iBOT2 webcams */ 45 //#define KEYSPAN_19Qi /* grab un-renumerated serial adapter */ 46 47 /*-------------------------------------------------------------------------*/ 48 49 struct usbtest_info { 50 const char *name; 51 u8 ep_in; /* bulk/intr source */ 52 u8 ep_out; /* bulk/intr sink */ 53 unsigned autoconf:1; 54 unsigned ctrl_out:1; 55 unsigned iso:1; /* try iso in/out */ 56 int alt; 57 }; 58 59 /* this is accessed only through usbfs ioctl calls. 60 * one ioctl to issue a test ... one lock per device. 61 * tests create other threads if they need them. 62 * urbs and buffers are allocated dynamically, 63 * and data generated deterministically. 64 */ 65 struct usbtest_dev { 66 struct usb_interface *intf; 67 struct usbtest_info *info; 68 int in_pipe; 69 int out_pipe; 70 int in_iso_pipe; 71 int out_iso_pipe; 72 struct usb_endpoint_descriptor *iso_in, *iso_out; 73 struct mutex lock; 74 75 #define TBUF_SIZE 256 76 u8 *buf; 77 }; 78 79 static struct usb_device *testdev_to_usbdev(struct usbtest_dev *test) 80 { 81 return interface_to_usbdev(test->intf); 82 } 83 84 /* set up all urbs so they can be used with either bulk or interrupt */ 85 #define INTERRUPT_RATE 1 /* msec/transfer */ 86 87 #define ERROR(tdev, fmt, args...) \ 88 dev_err(&(tdev)->intf->dev , fmt , ## args) 89 #define WARNING(tdev, fmt, args...) \ 90 dev_warn(&(tdev)->intf->dev , fmt , ## args) 91 92 #define GUARD_BYTE 0xA5 93 94 /*-------------------------------------------------------------------------*/ 95 96 static int 97 get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf) 98 { 99 int tmp; 100 struct usb_host_interface *alt; 101 struct usb_host_endpoint *in, *out; 102 struct usb_host_endpoint *iso_in, *iso_out; 103 struct usb_device *udev; 104 105 for (tmp = 0; tmp < intf->num_altsetting; tmp++) { 106 unsigned ep; 107 108 in = out = NULL; 109 iso_in = iso_out = NULL; 110 alt = intf->altsetting + tmp; 111 112 if (override_alt >= 0 && 113 override_alt != alt->desc.bAlternateSetting) 114 continue; 115 116 /* take the first altsetting with in-bulk + out-bulk; 117 * ignore other endpoints and altsettings. 118 */ 119 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) { 120 struct usb_host_endpoint *e; 121 122 e = alt->endpoint + ep; 123 switch (usb_endpoint_type(&e->desc)) { 124 case USB_ENDPOINT_XFER_BULK: 125 break; 126 case USB_ENDPOINT_XFER_ISOC: 127 if (dev->info->iso) 128 goto try_iso; 129 /* FALLTHROUGH */ 130 default: 131 continue; 132 } 133 if (usb_endpoint_dir_in(&e->desc)) { 134 if (!in) 135 in = e; 136 } else { 137 if (!out) 138 out = e; 139 } 140 continue; 141 try_iso: 142 if (usb_endpoint_dir_in(&e->desc)) { 143 if (!iso_in) 144 iso_in = e; 145 } else { 146 if (!iso_out) 147 iso_out = e; 148 } 149 } 150 if ((in && out) || iso_in || iso_out) 151 goto found; 152 } 153 return -EINVAL; 154 155 found: 156 udev = testdev_to_usbdev(dev); 157 dev->info->alt = alt->desc.bAlternateSetting; 158 if (alt->desc.bAlternateSetting != 0) { 159 tmp = usb_set_interface(udev, 160 alt->desc.bInterfaceNumber, 161 alt->desc.bAlternateSetting); 162 if (tmp < 0) 163 return tmp; 164 } 165 166 if (in) { 167 dev->in_pipe = usb_rcvbulkpipe(udev, 168 in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 169 dev->out_pipe = usb_sndbulkpipe(udev, 170 out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 171 } 172 if (iso_in) { 173 dev->iso_in = &iso_in->desc; 174 dev->in_iso_pipe = usb_rcvisocpipe(udev, 175 iso_in->desc.bEndpointAddress 176 & USB_ENDPOINT_NUMBER_MASK); 177 } 178 179 if (iso_out) { 180 dev->iso_out = &iso_out->desc; 181 dev->out_iso_pipe = usb_sndisocpipe(udev, 182 iso_out->desc.bEndpointAddress 183 & USB_ENDPOINT_NUMBER_MASK); 184 } 185 return 0; 186 } 187 188 /*-------------------------------------------------------------------------*/ 189 190 /* Support for testing basic non-queued I/O streams. 191 * 192 * These just package urbs as requests that can be easily canceled. 193 * Each urb's data buffer is dynamically allocated; callers can fill 194 * them with non-zero test data (or test for it) when appropriate. 195 */ 196 197 static void simple_callback(struct urb *urb) 198 { 199 complete(urb->context); 200 } 201 202 static struct urb *usbtest_alloc_urb( 203 struct usb_device *udev, 204 int pipe, 205 unsigned long bytes, 206 unsigned transfer_flags, 207 unsigned offset) 208 { 209 struct urb *urb; 210 211 urb = usb_alloc_urb(0, GFP_KERNEL); 212 if (!urb) 213 return urb; 214 usb_fill_bulk_urb(urb, udev, pipe, NULL, bytes, simple_callback, NULL); 215 urb->interval = (udev->speed == USB_SPEED_HIGH) 216 ? (INTERRUPT_RATE << 3) 217 : INTERRUPT_RATE; 218 urb->transfer_flags = transfer_flags; 219 if (usb_pipein(pipe)) 220 urb->transfer_flags |= URB_SHORT_NOT_OK; 221 222 if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) 223 urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset, 224 GFP_KERNEL, &urb->transfer_dma); 225 else 226 urb->transfer_buffer = kmalloc(bytes + offset, GFP_KERNEL); 227 228 if (!urb->transfer_buffer) { 229 usb_free_urb(urb); 230 return NULL; 231 } 232 233 /* To test unaligned transfers add an offset and fill the 234 unused memory with a guard value */ 235 if (offset) { 236 memset(urb->transfer_buffer, GUARD_BYTE, offset); 237 urb->transfer_buffer += offset; 238 if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) 239 urb->transfer_dma += offset; 240 } 241 242 /* For inbound transfers use guard byte so that test fails if 243 data not correctly copied */ 244 memset(urb->transfer_buffer, 245 usb_pipein(urb->pipe) ? GUARD_BYTE : 0, 246 bytes); 247 return urb; 248 } 249 250 static struct urb *simple_alloc_urb( 251 struct usb_device *udev, 252 int pipe, 253 unsigned long bytes) 254 { 255 return usbtest_alloc_urb(udev, pipe, bytes, URB_NO_TRANSFER_DMA_MAP, 0); 256 } 257 258 static unsigned pattern; 259 static unsigned mod_pattern; 260 module_param_named(pattern, mod_pattern, uint, S_IRUGO | S_IWUSR); 261 MODULE_PARM_DESC(mod_pattern, "i/o pattern (0 == zeroes)"); 262 263 static inline void simple_fill_buf(struct urb *urb) 264 { 265 unsigned i; 266 u8 *buf = urb->transfer_buffer; 267 unsigned len = urb->transfer_buffer_length; 268 269 switch (pattern) { 270 default: 271 /* FALLTHROUGH */ 272 case 0: 273 memset(buf, 0, len); 274 break; 275 case 1: /* mod63 */ 276 for (i = 0; i < len; i++) 277 *buf++ = (u8) (i % 63); 278 break; 279 } 280 } 281 282 static inline unsigned long buffer_offset(void *buf) 283 { 284 return (unsigned long)buf & (ARCH_KMALLOC_MINALIGN - 1); 285 } 286 287 static int check_guard_bytes(struct usbtest_dev *tdev, struct urb *urb) 288 { 289 u8 *buf = urb->transfer_buffer; 290 u8 *guard = buf - buffer_offset(buf); 291 unsigned i; 292 293 for (i = 0; guard < buf; i++, guard++) { 294 if (*guard != GUARD_BYTE) { 295 ERROR(tdev, "guard byte[%d] %d (not %d)\n", 296 i, *guard, GUARD_BYTE); 297 return -EINVAL; 298 } 299 } 300 return 0; 301 } 302 303 static int simple_check_buf(struct usbtest_dev *tdev, struct urb *urb) 304 { 305 unsigned i; 306 u8 expected; 307 u8 *buf = urb->transfer_buffer; 308 unsigned len = urb->actual_length; 309 310 int ret = check_guard_bytes(tdev, urb); 311 if (ret) 312 return ret; 313 314 for (i = 0; i < len; i++, buf++) { 315 switch (pattern) { 316 /* all-zeroes has no synchronization issues */ 317 case 0: 318 expected = 0; 319 break; 320 /* mod63 stays in sync with short-terminated transfers, 321 * or otherwise when host and gadget agree on how large 322 * each usb transfer request should be. resync is done 323 * with set_interface or set_config. 324 */ 325 case 1: /* mod63 */ 326 expected = i % 63; 327 break; 328 /* always fail unsupported patterns */ 329 default: 330 expected = !*buf; 331 break; 332 } 333 if (*buf == expected) 334 continue; 335 ERROR(tdev, "buf[%d] = %d (not %d)\n", i, *buf, expected); 336 return -EINVAL; 337 } 338 return 0; 339 } 340 341 static void simple_free_urb(struct urb *urb) 342 { 343 unsigned long offset = buffer_offset(urb->transfer_buffer); 344 345 if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) 346 usb_free_coherent( 347 urb->dev, 348 urb->transfer_buffer_length + offset, 349 urb->transfer_buffer - offset, 350 urb->transfer_dma - offset); 351 else 352 kfree(urb->transfer_buffer - offset); 353 usb_free_urb(urb); 354 } 355 356 static int simple_io( 357 struct usbtest_dev *tdev, 358 struct urb *urb, 359 int iterations, 360 int vary, 361 int expected, 362 const char *label 363 ) 364 { 365 struct usb_device *udev = urb->dev; 366 int max = urb->transfer_buffer_length; 367 struct completion completion; 368 int retval = 0; 369 370 urb->context = &completion; 371 while (retval == 0 && iterations-- > 0) { 372 init_completion(&completion); 373 if (usb_pipeout(urb->pipe)) { 374 simple_fill_buf(urb); 375 urb->transfer_flags |= URB_ZERO_PACKET; 376 } 377 retval = usb_submit_urb(urb, GFP_KERNEL); 378 if (retval != 0) 379 break; 380 381 /* NOTE: no timeouts; can't be broken out of by interrupt */ 382 wait_for_completion(&completion); 383 retval = urb->status; 384 urb->dev = udev; 385 if (retval == 0 && usb_pipein(urb->pipe)) 386 retval = simple_check_buf(tdev, urb); 387 388 if (vary) { 389 int len = urb->transfer_buffer_length; 390 391 len += vary; 392 len %= max; 393 if (len == 0) 394 len = (vary < max) ? vary : max; 395 urb->transfer_buffer_length = len; 396 } 397 398 /* FIXME if endpoint halted, clear halt (and log) */ 399 } 400 urb->transfer_buffer_length = max; 401 402 if (expected != retval) 403 dev_err(&udev->dev, 404 "%s failed, iterations left %d, status %d (not %d)\n", 405 label, iterations, retval, expected); 406 return retval; 407 } 408 409 410 /*-------------------------------------------------------------------------*/ 411 412 /* We use scatterlist primitives to test queued I/O. 413 * Yes, this also tests the scatterlist primitives. 414 */ 415 416 static void free_sglist(struct scatterlist *sg, int nents) 417 { 418 unsigned i; 419 420 if (!sg) 421 return; 422 for (i = 0; i < nents; i++) { 423 if (!sg_page(&sg[i])) 424 continue; 425 kfree(sg_virt(&sg[i])); 426 } 427 kfree(sg); 428 } 429 430 static struct scatterlist * 431 alloc_sglist(int nents, int max, int vary) 432 { 433 struct scatterlist *sg; 434 unsigned i; 435 unsigned size = max; 436 437 if (max == 0) 438 return NULL; 439 440 sg = kmalloc_array(nents, sizeof(*sg), GFP_KERNEL); 441 if (!sg) 442 return NULL; 443 sg_init_table(sg, nents); 444 445 for (i = 0; i < nents; i++) { 446 char *buf; 447 unsigned j; 448 449 buf = kzalloc(size, GFP_KERNEL); 450 if (!buf) { 451 free_sglist(sg, i); 452 return NULL; 453 } 454 455 /* kmalloc pages are always physically contiguous! */ 456 sg_set_buf(&sg[i], buf, size); 457 458 switch (pattern) { 459 case 0: 460 /* already zeroed */ 461 break; 462 case 1: 463 for (j = 0; j < size; j++) 464 *buf++ = (u8) (j % 63); 465 break; 466 } 467 468 if (vary) { 469 size += vary; 470 size %= max; 471 if (size == 0) 472 size = (vary < max) ? vary : max; 473 } 474 } 475 476 return sg; 477 } 478 479 static int perform_sglist( 480 struct usbtest_dev *tdev, 481 unsigned iterations, 482 int pipe, 483 struct usb_sg_request *req, 484 struct scatterlist *sg, 485 int nents 486 ) 487 { 488 struct usb_device *udev = testdev_to_usbdev(tdev); 489 int retval = 0; 490 491 while (retval == 0 && iterations-- > 0) { 492 retval = usb_sg_init(req, udev, pipe, 493 (udev->speed == USB_SPEED_HIGH) 494 ? (INTERRUPT_RATE << 3) 495 : INTERRUPT_RATE, 496 sg, nents, 0, GFP_KERNEL); 497 498 if (retval) 499 break; 500 usb_sg_wait(req); 501 retval = req->status; 502 503 /* FIXME check resulting data pattern */ 504 505 /* FIXME if endpoint halted, clear halt (and log) */ 506 } 507 508 /* FIXME for unlink or fault handling tests, don't report 509 * failure if retval is as we expected ... 510 */ 511 if (retval) 512 ERROR(tdev, "perform_sglist failed, " 513 "iterations left %d, status %d\n", 514 iterations, retval); 515 return retval; 516 } 517 518 519 /*-------------------------------------------------------------------------*/ 520 521 /* unqueued control message testing 522 * 523 * there's a nice set of device functional requirements in chapter 9 of the 524 * usb 2.0 spec, which we can apply to ANY device, even ones that don't use 525 * special test firmware. 526 * 527 * we know the device is configured (or suspended) by the time it's visible 528 * through usbfs. we can't change that, so we won't test enumeration (which 529 * worked 'well enough' to get here, this time), power management (ditto), 530 * or remote wakeup (which needs human interaction). 531 */ 532 533 static unsigned realworld = 1; 534 module_param(realworld, uint, 0); 535 MODULE_PARM_DESC(realworld, "clear to demand stricter spec compliance"); 536 537 static int get_altsetting(struct usbtest_dev *dev) 538 { 539 struct usb_interface *iface = dev->intf; 540 struct usb_device *udev = interface_to_usbdev(iface); 541 int retval; 542 543 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 544 USB_REQ_GET_INTERFACE, USB_DIR_IN|USB_RECIP_INTERFACE, 545 0, iface->altsetting[0].desc.bInterfaceNumber, 546 dev->buf, 1, USB_CTRL_GET_TIMEOUT); 547 switch (retval) { 548 case 1: 549 return dev->buf[0]; 550 case 0: 551 retval = -ERANGE; 552 /* FALLTHROUGH */ 553 default: 554 return retval; 555 } 556 } 557 558 static int set_altsetting(struct usbtest_dev *dev, int alternate) 559 { 560 struct usb_interface *iface = dev->intf; 561 struct usb_device *udev; 562 563 if (alternate < 0 || alternate >= 256) 564 return -EINVAL; 565 566 udev = interface_to_usbdev(iface); 567 return usb_set_interface(udev, 568 iface->altsetting[0].desc.bInterfaceNumber, 569 alternate); 570 } 571 572 static int is_good_config(struct usbtest_dev *tdev, int len) 573 { 574 struct usb_config_descriptor *config; 575 576 if (len < sizeof(*config)) 577 return 0; 578 config = (struct usb_config_descriptor *) tdev->buf; 579 580 switch (config->bDescriptorType) { 581 case USB_DT_CONFIG: 582 case USB_DT_OTHER_SPEED_CONFIG: 583 if (config->bLength != 9) { 584 ERROR(tdev, "bogus config descriptor length\n"); 585 return 0; 586 } 587 /* this bit 'must be 1' but often isn't */ 588 if (!realworld && !(config->bmAttributes & 0x80)) { 589 ERROR(tdev, "high bit of config attributes not set\n"); 590 return 0; 591 } 592 if (config->bmAttributes & 0x1f) { /* reserved == 0 */ 593 ERROR(tdev, "reserved config bits set\n"); 594 return 0; 595 } 596 break; 597 default: 598 return 0; 599 } 600 601 if (le16_to_cpu(config->wTotalLength) == len) /* read it all */ 602 return 1; 603 if (le16_to_cpu(config->wTotalLength) >= TBUF_SIZE) /* max partial read */ 604 return 1; 605 ERROR(tdev, "bogus config descriptor read size\n"); 606 return 0; 607 } 608 609 /* sanity test for standard requests working with usb_control_mesg() and some 610 * of the utility functions which use it. 611 * 612 * this doesn't test how endpoint halts behave or data toggles get set, since 613 * we won't do I/O to bulk/interrupt endpoints here (which is how to change 614 * halt or toggle). toggle testing is impractical without support from hcds. 615 * 616 * this avoids failing devices linux would normally work with, by not testing 617 * config/altsetting operations for devices that only support their defaults. 618 * such devices rarely support those needless operations. 619 * 620 * NOTE that since this is a sanity test, it's not examining boundary cases 621 * to see if usbcore, hcd, and device all behave right. such testing would 622 * involve varied read sizes and other operation sequences. 623 */ 624 static int ch9_postconfig(struct usbtest_dev *dev) 625 { 626 struct usb_interface *iface = dev->intf; 627 struct usb_device *udev = interface_to_usbdev(iface); 628 int i, alt, retval; 629 630 /* [9.2.3] if there's more than one altsetting, we need to be able to 631 * set and get each one. mostly trusts the descriptors from usbcore. 632 */ 633 for (i = 0; i < iface->num_altsetting; i++) { 634 635 /* 9.2.3 constrains the range here */ 636 alt = iface->altsetting[i].desc.bAlternateSetting; 637 if (alt < 0 || alt >= iface->num_altsetting) { 638 dev_err(&iface->dev, 639 "invalid alt [%d].bAltSetting = %d\n", 640 i, alt); 641 } 642 643 /* [real world] get/set unimplemented if there's only one */ 644 if (realworld && iface->num_altsetting == 1) 645 continue; 646 647 /* [9.4.10] set_interface */ 648 retval = set_altsetting(dev, alt); 649 if (retval) { 650 dev_err(&iface->dev, "can't set_interface = %d, %d\n", 651 alt, retval); 652 return retval; 653 } 654 655 /* [9.4.4] get_interface always works */ 656 retval = get_altsetting(dev); 657 if (retval != alt) { 658 dev_err(&iface->dev, "get alt should be %d, was %d\n", 659 alt, retval); 660 return (retval < 0) ? retval : -EDOM; 661 } 662 663 } 664 665 /* [real world] get_config unimplemented if there's only one */ 666 if (!realworld || udev->descriptor.bNumConfigurations != 1) { 667 int expected = udev->actconfig->desc.bConfigurationValue; 668 669 /* [9.4.2] get_configuration always works 670 * ... although some cheap devices (like one TI Hub I've got) 671 * won't return config descriptors except before set_config. 672 */ 673 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 674 USB_REQ_GET_CONFIGURATION, 675 USB_DIR_IN | USB_RECIP_DEVICE, 676 0, 0, dev->buf, 1, USB_CTRL_GET_TIMEOUT); 677 if (retval != 1 || dev->buf[0] != expected) { 678 dev_err(&iface->dev, "get config --> %d %d (1 %d)\n", 679 retval, dev->buf[0], expected); 680 return (retval < 0) ? retval : -EDOM; 681 } 682 } 683 684 /* there's always [9.4.3] a device descriptor [9.6.1] */ 685 retval = usb_get_descriptor(udev, USB_DT_DEVICE, 0, 686 dev->buf, sizeof(udev->descriptor)); 687 if (retval != sizeof(udev->descriptor)) { 688 dev_err(&iface->dev, "dev descriptor --> %d\n", retval); 689 return (retval < 0) ? retval : -EDOM; 690 } 691 692 /* 693 * there's always [9.4.3] a bos device descriptor [9.6.2] in USB 694 * 3.0 spec 695 */ 696 if (le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0300) { 697 retval = usb_get_descriptor(udev, USB_DT_BOS, 0, dev->buf, 698 sizeof(*udev->bos->desc)); 699 if (retval != sizeof(*udev->bos->desc)) { 700 dev_err(&iface->dev, "bos descriptor --> %d\n", retval); 701 return (retval < 0) ? retval : -EDOM; 702 } 703 } 704 705 /* there's always [9.4.3] at least one config descriptor [9.6.3] */ 706 for (i = 0; i < udev->descriptor.bNumConfigurations; i++) { 707 retval = usb_get_descriptor(udev, USB_DT_CONFIG, i, 708 dev->buf, TBUF_SIZE); 709 if (!is_good_config(dev, retval)) { 710 dev_err(&iface->dev, 711 "config [%d] descriptor --> %d\n", 712 i, retval); 713 return (retval < 0) ? retval : -EDOM; 714 } 715 716 /* FIXME cross-checking udev->config[i] to make sure usbcore 717 * parsed it right (etc) would be good testing paranoia 718 */ 719 } 720 721 /* and sometimes [9.2.6.6] speed dependent descriptors */ 722 if (le16_to_cpu(udev->descriptor.bcdUSB) == 0x0200) { 723 struct usb_qualifier_descriptor *d = NULL; 724 725 /* device qualifier [9.6.2] */ 726 retval = usb_get_descriptor(udev, 727 USB_DT_DEVICE_QUALIFIER, 0, dev->buf, 728 sizeof(struct usb_qualifier_descriptor)); 729 if (retval == -EPIPE) { 730 if (udev->speed == USB_SPEED_HIGH) { 731 dev_err(&iface->dev, 732 "hs dev qualifier --> %d\n", 733 retval); 734 return (retval < 0) ? retval : -EDOM; 735 } 736 /* usb2.0 but not high-speed capable; fine */ 737 } else if (retval != sizeof(struct usb_qualifier_descriptor)) { 738 dev_err(&iface->dev, "dev qualifier --> %d\n", retval); 739 return (retval < 0) ? retval : -EDOM; 740 } else 741 d = (struct usb_qualifier_descriptor *) dev->buf; 742 743 /* might not have [9.6.2] any other-speed configs [9.6.4] */ 744 if (d) { 745 unsigned max = d->bNumConfigurations; 746 for (i = 0; i < max; i++) { 747 retval = usb_get_descriptor(udev, 748 USB_DT_OTHER_SPEED_CONFIG, i, 749 dev->buf, TBUF_SIZE); 750 if (!is_good_config(dev, retval)) { 751 dev_err(&iface->dev, 752 "other speed config --> %d\n", 753 retval); 754 return (retval < 0) ? retval : -EDOM; 755 } 756 } 757 } 758 } 759 /* FIXME fetch strings from at least the device descriptor */ 760 761 /* [9.4.5] get_status always works */ 762 retval = usb_get_status(udev, USB_RECIP_DEVICE, 0, dev->buf); 763 if (retval) { 764 dev_err(&iface->dev, "get dev status --> %d\n", retval); 765 return retval; 766 } 767 768 /* FIXME configuration.bmAttributes says if we could try to set/clear 769 * the device's remote wakeup feature ... if we can, test that here 770 */ 771 772 retval = usb_get_status(udev, USB_RECIP_INTERFACE, 773 iface->altsetting[0].desc.bInterfaceNumber, dev->buf); 774 if (retval) { 775 dev_err(&iface->dev, "get interface status --> %d\n", retval); 776 return retval; 777 } 778 /* FIXME get status for each endpoint in the interface */ 779 780 return 0; 781 } 782 783 /*-------------------------------------------------------------------------*/ 784 785 /* use ch9 requests to test whether: 786 * (a) queues work for control, keeping N subtests queued and 787 * active (auto-resubmit) for M loops through the queue. 788 * (b) protocol stalls (control-only) will autorecover. 789 * it's not like bulk/intr; no halt clearing. 790 * (c) short control reads are reported and handled. 791 * (d) queues are always processed in-order 792 */ 793 794 struct ctrl_ctx { 795 spinlock_t lock; 796 struct usbtest_dev *dev; 797 struct completion complete; 798 unsigned count; 799 unsigned pending; 800 int status; 801 struct urb **urb; 802 struct usbtest_param *param; 803 int last; 804 }; 805 806 #define NUM_SUBCASES 15 /* how many test subcases here? */ 807 808 struct subcase { 809 struct usb_ctrlrequest setup; 810 int number; 811 int expected; 812 }; 813 814 static void ctrl_complete(struct urb *urb) 815 { 816 struct ctrl_ctx *ctx = urb->context; 817 struct usb_ctrlrequest *reqp; 818 struct subcase *subcase; 819 int status = urb->status; 820 821 reqp = (struct usb_ctrlrequest *)urb->setup_packet; 822 subcase = container_of(reqp, struct subcase, setup); 823 824 spin_lock(&ctx->lock); 825 ctx->count--; 826 ctx->pending--; 827 828 /* queue must transfer and complete in fifo order, unless 829 * usb_unlink_urb() is used to unlink something not at the 830 * physical queue head (not tested). 831 */ 832 if (subcase->number > 0) { 833 if ((subcase->number - ctx->last) != 1) { 834 ERROR(ctx->dev, 835 "subcase %d completed out of order, last %d\n", 836 subcase->number, ctx->last); 837 status = -EDOM; 838 ctx->last = subcase->number; 839 goto error; 840 } 841 } 842 ctx->last = subcase->number; 843 844 /* succeed or fault in only one way? */ 845 if (status == subcase->expected) 846 status = 0; 847 848 /* async unlink for cleanup? */ 849 else if (status != -ECONNRESET) { 850 851 /* some faults are allowed, not required */ 852 if (subcase->expected > 0 && ( 853 ((status == -subcase->expected /* happened */ 854 || status == 0)))) /* didn't */ 855 status = 0; 856 /* sometimes more than one fault is allowed */ 857 else if (subcase->number == 12 && status == -EPIPE) 858 status = 0; 859 else 860 ERROR(ctx->dev, "subtest %d error, status %d\n", 861 subcase->number, status); 862 } 863 864 /* unexpected status codes mean errors; ideally, in hardware */ 865 if (status) { 866 error: 867 if (ctx->status == 0) { 868 int i; 869 870 ctx->status = status; 871 ERROR(ctx->dev, "control queue %02x.%02x, err %d, " 872 "%d left, subcase %d, len %d/%d\n", 873 reqp->bRequestType, reqp->bRequest, 874 status, ctx->count, subcase->number, 875 urb->actual_length, 876 urb->transfer_buffer_length); 877 878 /* FIXME this "unlink everything" exit route should 879 * be a separate test case. 880 */ 881 882 /* unlink whatever's still pending */ 883 for (i = 1; i < ctx->param->sglen; i++) { 884 struct urb *u = ctx->urb[ 885 (i + subcase->number) 886 % ctx->param->sglen]; 887 888 if (u == urb || !u->dev) 889 continue; 890 spin_unlock(&ctx->lock); 891 status = usb_unlink_urb(u); 892 spin_lock(&ctx->lock); 893 switch (status) { 894 case -EINPROGRESS: 895 case -EBUSY: 896 case -EIDRM: 897 continue; 898 default: 899 ERROR(ctx->dev, "urb unlink --> %d\n", 900 status); 901 } 902 } 903 status = ctx->status; 904 } 905 } 906 907 /* resubmit if we need to, else mark this as done */ 908 if ((status == 0) && (ctx->pending < ctx->count)) { 909 status = usb_submit_urb(urb, GFP_ATOMIC); 910 if (status != 0) { 911 ERROR(ctx->dev, 912 "can't resubmit ctrl %02x.%02x, err %d\n", 913 reqp->bRequestType, reqp->bRequest, status); 914 urb->dev = NULL; 915 } else 916 ctx->pending++; 917 } else 918 urb->dev = NULL; 919 920 /* signal completion when nothing's queued */ 921 if (ctx->pending == 0) 922 complete(&ctx->complete); 923 spin_unlock(&ctx->lock); 924 } 925 926 static int 927 test_ctrl_queue(struct usbtest_dev *dev, struct usbtest_param *param) 928 { 929 struct usb_device *udev = testdev_to_usbdev(dev); 930 struct urb **urb; 931 struct ctrl_ctx context; 932 int i; 933 934 if (param->sglen == 0 || param->iterations > UINT_MAX / param->sglen) 935 return -EOPNOTSUPP; 936 937 spin_lock_init(&context.lock); 938 context.dev = dev; 939 init_completion(&context.complete); 940 context.count = param->sglen * param->iterations; 941 context.pending = 0; 942 context.status = -ENOMEM; 943 context.param = param; 944 context.last = -1; 945 946 /* allocate and init the urbs we'll queue. 947 * as with bulk/intr sglists, sglen is the queue depth; it also 948 * controls which subtests run (more tests than sglen) or rerun. 949 */ 950 urb = kcalloc(param->sglen, sizeof(struct urb *), GFP_KERNEL); 951 if (!urb) 952 return -ENOMEM; 953 for (i = 0; i < param->sglen; i++) { 954 int pipe = usb_rcvctrlpipe(udev, 0); 955 unsigned len; 956 struct urb *u; 957 struct usb_ctrlrequest req; 958 struct subcase *reqp; 959 960 /* sign of this variable means: 961 * -: tested code must return this (negative) error code 962 * +: tested code may return this (negative too) error code 963 */ 964 int expected = 0; 965 966 /* requests here are mostly expected to succeed on any 967 * device, but some are chosen to trigger protocol stalls 968 * or short reads. 969 */ 970 memset(&req, 0, sizeof(req)); 971 req.bRequest = USB_REQ_GET_DESCRIPTOR; 972 req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE; 973 974 switch (i % NUM_SUBCASES) { 975 case 0: /* get device descriptor */ 976 req.wValue = cpu_to_le16(USB_DT_DEVICE << 8); 977 len = sizeof(struct usb_device_descriptor); 978 break; 979 case 1: /* get first config descriptor (only) */ 980 req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0); 981 len = sizeof(struct usb_config_descriptor); 982 break; 983 case 2: /* get altsetting (OFTEN STALLS) */ 984 req.bRequest = USB_REQ_GET_INTERFACE; 985 req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE; 986 /* index = 0 means first interface */ 987 len = 1; 988 expected = EPIPE; 989 break; 990 case 3: /* get interface status */ 991 req.bRequest = USB_REQ_GET_STATUS; 992 req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE; 993 /* interface 0 */ 994 len = 2; 995 break; 996 case 4: /* get device status */ 997 req.bRequest = USB_REQ_GET_STATUS; 998 req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE; 999 len = 2; 1000 break; 1001 case 5: /* get device qualifier (MAY STALL) */ 1002 req.wValue = cpu_to_le16 (USB_DT_DEVICE_QUALIFIER << 8); 1003 len = sizeof(struct usb_qualifier_descriptor); 1004 if (udev->speed != USB_SPEED_HIGH) 1005 expected = EPIPE; 1006 break; 1007 case 6: /* get first config descriptor, plus interface */ 1008 req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0); 1009 len = sizeof(struct usb_config_descriptor); 1010 len += sizeof(struct usb_interface_descriptor); 1011 break; 1012 case 7: /* get interface descriptor (ALWAYS STALLS) */ 1013 req.wValue = cpu_to_le16 (USB_DT_INTERFACE << 8); 1014 /* interface == 0 */ 1015 len = sizeof(struct usb_interface_descriptor); 1016 expected = -EPIPE; 1017 break; 1018 /* NOTE: two consecutive stalls in the queue here. 1019 * that tests fault recovery a bit more aggressively. */ 1020 case 8: /* clear endpoint halt (MAY STALL) */ 1021 req.bRequest = USB_REQ_CLEAR_FEATURE; 1022 req.bRequestType = USB_RECIP_ENDPOINT; 1023 /* wValue 0 == ep halt */ 1024 /* wIndex 0 == ep0 (shouldn't halt!) */ 1025 len = 0; 1026 pipe = usb_sndctrlpipe(udev, 0); 1027 expected = EPIPE; 1028 break; 1029 case 9: /* get endpoint status */ 1030 req.bRequest = USB_REQ_GET_STATUS; 1031 req.bRequestType = USB_DIR_IN|USB_RECIP_ENDPOINT; 1032 /* endpoint 0 */ 1033 len = 2; 1034 break; 1035 case 10: /* trigger short read (EREMOTEIO) */ 1036 req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0); 1037 len = 1024; 1038 expected = -EREMOTEIO; 1039 break; 1040 /* NOTE: two consecutive _different_ faults in the queue. */ 1041 case 11: /* get endpoint descriptor (ALWAYS STALLS) */ 1042 req.wValue = cpu_to_le16(USB_DT_ENDPOINT << 8); 1043 /* endpoint == 0 */ 1044 len = sizeof(struct usb_interface_descriptor); 1045 expected = EPIPE; 1046 break; 1047 /* NOTE: sometimes even a third fault in the queue! */ 1048 case 12: /* get string 0 descriptor (MAY STALL) */ 1049 req.wValue = cpu_to_le16(USB_DT_STRING << 8); 1050 /* string == 0, for language IDs */ 1051 len = sizeof(struct usb_interface_descriptor); 1052 /* may succeed when > 4 languages */ 1053 expected = EREMOTEIO; /* or EPIPE, if no strings */ 1054 break; 1055 case 13: /* short read, resembling case 10 */ 1056 req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0); 1057 /* last data packet "should" be DATA1, not DATA0 */ 1058 if (udev->speed == USB_SPEED_SUPER) 1059 len = 1024 - 512; 1060 else 1061 len = 1024 - udev->descriptor.bMaxPacketSize0; 1062 expected = -EREMOTEIO; 1063 break; 1064 case 14: /* short read; try to fill the last packet */ 1065 req.wValue = cpu_to_le16((USB_DT_DEVICE << 8) | 0); 1066 /* device descriptor size == 18 bytes */ 1067 len = udev->descriptor.bMaxPacketSize0; 1068 if (udev->speed == USB_SPEED_SUPER) 1069 len = 512; 1070 switch (len) { 1071 case 8: 1072 len = 24; 1073 break; 1074 case 16: 1075 len = 32; 1076 break; 1077 } 1078 expected = -EREMOTEIO; 1079 break; 1080 default: 1081 ERROR(dev, "bogus number of ctrl queue testcases!\n"); 1082 context.status = -EINVAL; 1083 goto cleanup; 1084 } 1085 req.wLength = cpu_to_le16(len); 1086 urb[i] = u = simple_alloc_urb(udev, pipe, len); 1087 if (!u) 1088 goto cleanup; 1089 1090 reqp = kmalloc(sizeof(*reqp), GFP_KERNEL); 1091 if (!reqp) 1092 goto cleanup; 1093 reqp->setup = req; 1094 reqp->number = i % NUM_SUBCASES; 1095 reqp->expected = expected; 1096 u->setup_packet = (char *) &reqp->setup; 1097 1098 u->context = &context; 1099 u->complete = ctrl_complete; 1100 } 1101 1102 /* queue the urbs */ 1103 context.urb = urb; 1104 spin_lock_irq(&context.lock); 1105 for (i = 0; i < param->sglen; i++) { 1106 context.status = usb_submit_urb(urb[i], GFP_ATOMIC); 1107 if (context.status != 0) { 1108 ERROR(dev, "can't submit urb[%d], status %d\n", 1109 i, context.status); 1110 context.count = context.pending; 1111 break; 1112 } 1113 context.pending++; 1114 } 1115 spin_unlock_irq(&context.lock); 1116 1117 /* FIXME set timer and time out; provide a disconnect hook */ 1118 1119 /* wait for the last one to complete */ 1120 if (context.pending > 0) 1121 wait_for_completion(&context.complete); 1122 1123 cleanup: 1124 for (i = 0; i < param->sglen; i++) { 1125 if (!urb[i]) 1126 continue; 1127 urb[i]->dev = udev; 1128 kfree(urb[i]->setup_packet); 1129 simple_free_urb(urb[i]); 1130 } 1131 kfree(urb); 1132 return context.status; 1133 } 1134 #undef NUM_SUBCASES 1135 1136 1137 /*-------------------------------------------------------------------------*/ 1138 1139 static void unlink1_callback(struct urb *urb) 1140 { 1141 int status = urb->status; 1142 1143 /* we "know" -EPIPE (stall) never happens */ 1144 if (!status) 1145 status = usb_submit_urb(urb, GFP_ATOMIC); 1146 if (status) { 1147 urb->status = status; 1148 complete(urb->context); 1149 } 1150 } 1151 1152 static int unlink1(struct usbtest_dev *dev, int pipe, int size, int async) 1153 { 1154 struct urb *urb; 1155 struct completion completion; 1156 int retval = 0; 1157 1158 init_completion(&completion); 1159 urb = simple_alloc_urb(testdev_to_usbdev(dev), pipe, size); 1160 if (!urb) 1161 return -ENOMEM; 1162 urb->context = &completion; 1163 urb->complete = unlink1_callback; 1164 1165 /* keep the endpoint busy. there are lots of hc/hcd-internal 1166 * states, and testing should get to all of them over time. 1167 * 1168 * FIXME want additional tests for when endpoint is STALLing 1169 * due to errors, or is just NAKing requests. 1170 */ 1171 retval = usb_submit_urb(urb, GFP_KERNEL); 1172 if (retval != 0) { 1173 dev_err(&dev->intf->dev, "submit fail %d\n", retval); 1174 return retval; 1175 } 1176 1177 /* unlinking that should always work. variable delay tests more 1178 * hcd states and code paths, even with little other system load. 1179 */ 1180 msleep(jiffies % (2 * INTERRUPT_RATE)); 1181 if (async) { 1182 while (!completion_done(&completion)) { 1183 retval = usb_unlink_urb(urb); 1184 1185 switch (retval) { 1186 case -EBUSY: 1187 case -EIDRM: 1188 /* we can't unlink urbs while they're completing 1189 * or if they've completed, and we haven't 1190 * resubmitted. "normal" drivers would prevent 1191 * resubmission, but since we're testing unlink 1192 * paths, we can't. 1193 */ 1194 ERROR(dev, "unlink retry\n"); 1195 continue; 1196 case 0: 1197 case -EINPROGRESS: 1198 break; 1199 1200 default: 1201 dev_err(&dev->intf->dev, 1202 "unlink fail %d\n", retval); 1203 return retval; 1204 } 1205 1206 break; 1207 } 1208 } else 1209 usb_kill_urb(urb); 1210 1211 wait_for_completion(&completion); 1212 retval = urb->status; 1213 simple_free_urb(urb); 1214 1215 if (async) 1216 return (retval == -ECONNRESET) ? 0 : retval - 1000; 1217 else 1218 return (retval == -ENOENT || retval == -EPERM) ? 1219 0 : retval - 2000; 1220 } 1221 1222 static int unlink_simple(struct usbtest_dev *dev, int pipe, int len) 1223 { 1224 int retval = 0; 1225 1226 /* test sync and async paths */ 1227 retval = unlink1(dev, pipe, len, 1); 1228 if (!retval) 1229 retval = unlink1(dev, pipe, len, 0); 1230 return retval; 1231 } 1232 1233 /*-------------------------------------------------------------------------*/ 1234 1235 struct queued_ctx { 1236 struct completion complete; 1237 atomic_t pending; 1238 unsigned num; 1239 int status; 1240 struct urb **urbs; 1241 }; 1242 1243 static void unlink_queued_callback(struct urb *urb) 1244 { 1245 int status = urb->status; 1246 struct queued_ctx *ctx = urb->context; 1247 1248 if (ctx->status) 1249 goto done; 1250 if (urb == ctx->urbs[ctx->num - 4] || urb == ctx->urbs[ctx->num - 2]) { 1251 if (status == -ECONNRESET) 1252 goto done; 1253 /* What error should we report if the URB completed normally? */ 1254 } 1255 if (status != 0) 1256 ctx->status = status; 1257 1258 done: 1259 if (atomic_dec_and_test(&ctx->pending)) 1260 complete(&ctx->complete); 1261 } 1262 1263 static int unlink_queued(struct usbtest_dev *dev, int pipe, unsigned num, 1264 unsigned size) 1265 { 1266 struct queued_ctx ctx; 1267 struct usb_device *udev = testdev_to_usbdev(dev); 1268 void *buf; 1269 dma_addr_t buf_dma; 1270 int i; 1271 int retval = -ENOMEM; 1272 1273 init_completion(&ctx.complete); 1274 atomic_set(&ctx.pending, 1); /* One more than the actual value */ 1275 ctx.num = num; 1276 ctx.status = 0; 1277 1278 buf = usb_alloc_coherent(udev, size, GFP_KERNEL, &buf_dma); 1279 if (!buf) 1280 return retval; 1281 memset(buf, 0, size); 1282 1283 /* Allocate and init the urbs we'll queue */ 1284 ctx.urbs = kcalloc(num, sizeof(struct urb *), GFP_KERNEL); 1285 if (!ctx.urbs) 1286 goto free_buf; 1287 for (i = 0; i < num; i++) { 1288 ctx.urbs[i] = usb_alloc_urb(0, GFP_KERNEL); 1289 if (!ctx.urbs[i]) 1290 goto free_urbs; 1291 usb_fill_bulk_urb(ctx.urbs[i], udev, pipe, buf, size, 1292 unlink_queued_callback, &ctx); 1293 ctx.urbs[i]->transfer_dma = buf_dma; 1294 ctx.urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP; 1295 } 1296 1297 /* Submit all the URBs and then unlink URBs num - 4 and num - 2. */ 1298 for (i = 0; i < num; i++) { 1299 atomic_inc(&ctx.pending); 1300 retval = usb_submit_urb(ctx.urbs[i], GFP_KERNEL); 1301 if (retval != 0) { 1302 dev_err(&dev->intf->dev, "submit urbs[%d] fail %d\n", 1303 i, retval); 1304 atomic_dec(&ctx.pending); 1305 ctx.status = retval; 1306 break; 1307 } 1308 } 1309 if (i == num) { 1310 usb_unlink_urb(ctx.urbs[num - 4]); 1311 usb_unlink_urb(ctx.urbs[num - 2]); 1312 } else { 1313 while (--i >= 0) 1314 usb_unlink_urb(ctx.urbs[i]); 1315 } 1316 1317 if (atomic_dec_and_test(&ctx.pending)) /* The extra count */ 1318 complete(&ctx.complete); 1319 wait_for_completion(&ctx.complete); 1320 retval = ctx.status; 1321 1322 free_urbs: 1323 for (i = 0; i < num; i++) 1324 usb_free_urb(ctx.urbs[i]); 1325 kfree(ctx.urbs); 1326 free_buf: 1327 usb_free_coherent(udev, size, buf, buf_dma); 1328 return retval; 1329 } 1330 1331 /*-------------------------------------------------------------------------*/ 1332 1333 static int verify_not_halted(struct usbtest_dev *tdev, int ep, struct urb *urb) 1334 { 1335 int retval; 1336 u16 status; 1337 1338 /* shouldn't look or act halted */ 1339 retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status); 1340 if (retval < 0) { 1341 ERROR(tdev, "ep %02x couldn't get no-halt status, %d\n", 1342 ep, retval); 1343 return retval; 1344 } 1345 if (status != 0) { 1346 ERROR(tdev, "ep %02x bogus status: %04x != 0\n", ep, status); 1347 return -EINVAL; 1348 } 1349 retval = simple_io(tdev, urb, 1, 0, 0, __func__); 1350 if (retval != 0) 1351 return -EINVAL; 1352 return 0; 1353 } 1354 1355 static int verify_halted(struct usbtest_dev *tdev, int ep, struct urb *urb) 1356 { 1357 int retval; 1358 u16 status; 1359 1360 /* should look and act halted */ 1361 retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status); 1362 if (retval < 0) { 1363 ERROR(tdev, "ep %02x couldn't get halt status, %d\n", 1364 ep, retval); 1365 return retval; 1366 } 1367 if (status != 1) { 1368 ERROR(tdev, "ep %02x bogus status: %04x != 1\n", ep, status); 1369 return -EINVAL; 1370 } 1371 retval = simple_io(tdev, urb, 1, 0, -EPIPE, __func__); 1372 if (retval != -EPIPE) 1373 return -EINVAL; 1374 retval = simple_io(tdev, urb, 1, 0, -EPIPE, "verify_still_halted"); 1375 if (retval != -EPIPE) 1376 return -EINVAL; 1377 return 0; 1378 } 1379 1380 static int test_halt(struct usbtest_dev *tdev, int ep, struct urb *urb) 1381 { 1382 int retval; 1383 1384 /* shouldn't look or act halted now */ 1385 retval = verify_not_halted(tdev, ep, urb); 1386 if (retval < 0) 1387 return retval; 1388 1389 /* set halt (protocol test only), verify it worked */ 1390 retval = usb_control_msg(urb->dev, usb_sndctrlpipe(urb->dev, 0), 1391 USB_REQ_SET_FEATURE, USB_RECIP_ENDPOINT, 1392 USB_ENDPOINT_HALT, ep, 1393 NULL, 0, USB_CTRL_SET_TIMEOUT); 1394 if (retval < 0) { 1395 ERROR(tdev, "ep %02x couldn't set halt, %d\n", ep, retval); 1396 return retval; 1397 } 1398 retval = verify_halted(tdev, ep, urb); 1399 if (retval < 0) 1400 return retval; 1401 1402 /* clear halt (tests API + protocol), verify it worked */ 1403 retval = usb_clear_halt(urb->dev, urb->pipe); 1404 if (retval < 0) { 1405 ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval); 1406 return retval; 1407 } 1408 retval = verify_not_halted(tdev, ep, urb); 1409 if (retval < 0) 1410 return retval; 1411 1412 /* NOTE: could also verify SET_INTERFACE clear halts ... */ 1413 1414 return 0; 1415 } 1416 1417 static int halt_simple(struct usbtest_dev *dev) 1418 { 1419 int ep; 1420 int retval = 0; 1421 struct urb *urb; 1422 struct usb_device *udev = testdev_to_usbdev(dev); 1423 1424 if (udev->speed == USB_SPEED_SUPER) 1425 urb = simple_alloc_urb(udev, 0, 1024); 1426 else 1427 urb = simple_alloc_urb(udev, 0, 512); 1428 if (urb == NULL) 1429 return -ENOMEM; 1430 1431 if (dev->in_pipe) { 1432 ep = usb_pipeendpoint(dev->in_pipe) | USB_DIR_IN; 1433 urb->pipe = dev->in_pipe; 1434 retval = test_halt(dev, ep, urb); 1435 if (retval < 0) 1436 goto done; 1437 } 1438 1439 if (dev->out_pipe) { 1440 ep = usb_pipeendpoint(dev->out_pipe); 1441 urb->pipe = dev->out_pipe; 1442 retval = test_halt(dev, ep, urb); 1443 } 1444 done: 1445 simple_free_urb(urb); 1446 return retval; 1447 } 1448 1449 /*-------------------------------------------------------------------------*/ 1450 1451 /* Control OUT tests use the vendor control requests from Intel's 1452 * USB 2.0 compliance test device: write a buffer, read it back. 1453 * 1454 * Intel's spec only _requires_ that it work for one packet, which 1455 * is pretty weak. Some HCDs place limits here; most devices will 1456 * need to be able to handle more than one OUT data packet. We'll 1457 * try whatever we're told to try. 1458 */ 1459 static int ctrl_out(struct usbtest_dev *dev, 1460 unsigned count, unsigned length, unsigned vary, unsigned offset) 1461 { 1462 unsigned i, j, len; 1463 int retval; 1464 u8 *buf; 1465 char *what = "?"; 1466 struct usb_device *udev; 1467 1468 if (length < 1 || length > 0xffff || vary >= length) 1469 return -EINVAL; 1470 1471 buf = kmalloc(length + offset, GFP_KERNEL); 1472 if (!buf) 1473 return -ENOMEM; 1474 1475 buf += offset; 1476 udev = testdev_to_usbdev(dev); 1477 len = length; 1478 retval = 0; 1479 1480 /* NOTE: hardware might well act differently if we pushed it 1481 * with lots back-to-back queued requests. 1482 */ 1483 for (i = 0; i < count; i++) { 1484 /* write patterned data */ 1485 for (j = 0; j < len; j++) 1486 buf[j] = i + j; 1487 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 1488 0x5b, USB_DIR_OUT|USB_TYPE_VENDOR, 1489 0, 0, buf, len, USB_CTRL_SET_TIMEOUT); 1490 if (retval != len) { 1491 what = "write"; 1492 if (retval >= 0) { 1493 ERROR(dev, "ctrl_out, wlen %d (expected %d)\n", 1494 retval, len); 1495 retval = -EBADMSG; 1496 } 1497 break; 1498 } 1499 1500 /* read it back -- assuming nothing intervened!! */ 1501 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 1502 0x5c, USB_DIR_IN|USB_TYPE_VENDOR, 1503 0, 0, buf, len, USB_CTRL_GET_TIMEOUT); 1504 if (retval != len) { 1505 what = "read"; 1506 if (retval >= 0) { 1507 ERROR(dev, "ctrl_out, rlen %d (expected %d)\n", 1508 retval, len); 1509 retval = -EBADMSG; 1510 } 1511 break; 1512 } 1513 1514 /* fail if we can't verify */ 1515 for (j = 0; j < len; j++) { 1516 if (buf[j] != (u8) (i + j)) { 1517 ERROR(dev, "ctrl_out, byte %d is %d not %d\n", 1518 j, buf[j], (u8) i + j); 1519 retval = -EBADMSG; 1520 break; 1521 } 1522 } 1523 if (retval < 0) { 1524 what = "verify"; 1525 break; 1526 } 1527 1528 len += vary; 1529 1530 /* [real world] the "zero bytes IN" case isn't really used. 1531 * hardware can easily trip up in this weird case, since its 1532 * status stage is IN, not OUT like other ep0in transfers. 1533 */ 1534 if (len > length) 1535 len = realworld ? 1 : 0; 1536 } 1537 1538 if (retval < 0) 1539 ERROR(dev, "ctrl_out %s failed, code %d, count %d\n", 1540 what, retval, i); 1541 1542 kfree(buf - offset); 1543 return retval; 1544 } 1545 1546 /*-------------------------------------------------------------------------*/ 1547 1548 /* ISO tests ... mimics common usage 1549 * - buffer length is split into N packets (mostly maxpacket sized) 1550 * - multi-buffers according to sglen 1551 */ 1552 1553 struct iso_context { 1554 unsigned count; 1555 unsigned pending; 1556 spinlock_t lock; 1557 struct completion done; 1558 int submit_error; 1559 unsigned long errors; 1560 unsigned long packet_count; 1561 struct usbtest_dev *dev; 1562 }; 1563 1564 static void iso_callback(struct urb *urb) 1565 { 1566 struct iso_context *ctx = urb->context; 1567 1568 spin_lock(&ctx->lock); 1569 ctx->count--; 1570 1571 ctx->packet_count += urb->number_of_packets; 1572 if (urb->error_count > 0) 1573 ctx->errors += urb->error_count; 1574 else if (urb->status != 0) 1575 ctx->errors += urb->number_of_packets; 1576 else if (urb->actual_length != urb->transfer_buffer_length) 1577 ctx->errors++; 1578 else if (check_guard_bytes(ctx->dev, urb) != 0) 1579 ctx->errors++; 1580 1581 if (urb->status == 0 && ctx->count > (ctx->pending - 1) 1582 && !ctx->submit_error) { 1583 int status = usb_submit_urb(urb, GFP_ATOMIC); 1584 switch (status) { 1585 case 0: 1586 goto done; 1587 default: 1588 dev_err(&ctx->dev->intf->dev, 1589 "iso resubmit err %d\n", 1590 status); 1591 /* FALLTHROUGH */ 1592 case -ENODEV: /* disconnected */ 1593 case -ESHUTDOWN: /* endpoint disabled */ 1594 ctx->submit_error = 1; 1595 break; 1596 } 1597 } 1598 1599 ctx->pending--; 1600 if (ctx->pending == 0) { 1601 if (ctx->errors) 1602 dev_err(&ctx->dev->intf->dev, 1603 "iso test, %lu errors out of %lu\n", 1604 ctx->errors, ctx->packet_count); 1605 complete(&ctx->done); 1606 } 1607 done: 1608 spin_unlock(&ctx->lock); 1609 } 1610 1611 static struct urb *iso_alloc_urb( 1612 struct usb_device *udev, 1613 int pipe, 1614 struct usb_endpoint_descriptor *desc, 1615 long bytes, 1616 unsigned offset 1617 ) 1618 { 1619 struct urb *urb; 1620 unsigned i, maxp, packets; 1621 1622 if (bytes < 0 || !desc) 1623 return NULL; 1624 maxp = 0x7ff & usb_endpoint_maxp(desc); 1625 maxp *= 1 + (0x3 & (usb_endpoint_maxp(desc) >> 11)); 1626 packets = DIV_ROUND_UP(bytes, maxp); 1627 1628 urb = usb_alloc_urb(packets, GFP_KERNEL); 1629 if (!urb) 1630 return urb; 1631 urb->dev = udev; 1632 urb->pipe = pipe; 1633 1634 urb->number_of_packets = packets; 1635 urb->transfer_buffer_length = bytes; 1636 urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset, 1637 GFP_KERNEL, 1638 &urb->transfer_dma); 1639 if (!urb->transfer_buffer) { 1640 usb_free_urb(urb); 1641 return NULL; 1642 } 1643 if (offset) { 1644 memset(urb->transfer_buffer, GUARD_BYTE, offset); 1645 urb->transfer_buffer += offset; 1646 urb->transfer_dma += offset; 1647 } 1648 /* For inbound transfers use guard byte so that test fails if 1649 data not correctly copied */ 1650 memset(urb->transfer_buffer, 1651 usb_pipein(urb->pipe) ? GUARD_BYTE : 0, 1652 bytes); 1653 1654 for (i = 0; i < packets; i++) { 1655 /* here, only the last packet will be short */ 1656 urb->iso_frame_desc[i].length = min((unsigned) bytes, maxp); 1657 bytes -= urb->iso_frame_desc[i].length; 1658 1659 urb->iso_frame_desc[i].offset = maxp * i; 1660 } 1661 1662 urb->complete = iso_callback; 1663 /* urb->context = SET BY CALLER */ 1664 urb->interval = 1 << (desc->bInterval - 1); 1665 urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP; 1666 return urb; 1667 } 1668 1669 static int 1670 test_iso_queue(struct usbtest_dev *dev, struct usbtest_param *param, 1671 int pipe, struct usb_endpoint_descriptor *desc, unsigned offset) 1672 { 1673 struct iso_context context; 1674 struct usb_device *udev; 1675 unsigned i; 1676 unsigned long packets = 0; 1677 int status = 0; 1678 struct urb *urbs[10]; /* FIXME no limit */ 1679 1680 if (param->sglen > 10) 1681 return -EDOM; 1682 1683 memset(&context, 0, sizeof(context)); 1684 context.count = param->iterations * param->sglen; 1685 context.dev = dev; 1686 init_completion(&context.done); 1687 spin_lock_init(&context.lock); 1688 1689 memset(urbs, 0, sizeof(urbs)); 1690 udev = testdev_to_usbdev(dev); 1691 dev_info(&dev->intf->dev, 1692 "... iso period %d %sframes, wMaxPacket %04x\n", 1693 1 << (desc->bInterval - 1), 1694 (udev->speed == USB_SPEED_HIGH) ? "micro" : "", 1695 usb_endpoint_maxp(desc)); 1696 1697 for (i = 0; i < param->sglen; i++) { 1698 urbs[i] = iso_alloc_urb(udev, pipe, desc, 1699 param->length, offset); 1700 if (!urbs[i]) { 1701 status = -ENOMEM; 1702 goto fail; 1703 } 1704 packets += urbs[i]->number_of_packets; 1705 urbs[i]->context = &context; 1706 } 1707 packets *= param->iterations; 1708 dev_info(&dev->intf->dev, 1709 "... total %lu msec (%lu packets)\n", 1710 (packets * (1 << (desc->bInterval - 1))) 1711 / ((udev->speed == USB_SPEED_HIGH) ? 8 : 1), 1712 packets); 1713 1714 spin_lock_irq(&context.lock); 1715 for (i = 0; i < param->sglen; i++) { 1716 ++context.pending; 1717 status = usb_submit_urb(urbs[i], GFP_ATOMIC); 1718 if (status < 0) { 1719 ERROR(dev, "submit iso[%d], error %d\n", i, status); 1720 if (i == 0) { 1721 spin_unlock_irq(&context.lock); 1722 goto fail; 1723 } 1724 1725 simple_free_urb(urbs[i]); 1726 urbs[i] = NULL; 1727 context.pending--; 1728 context.submit_error = 1; 1729 break; 1730 } 1731 } 1732 spin_unlock_irq(&context.lock); 1733 1734 wait_for_completion(&context.done); 1735 1736 for (i = 0; i < param->sglen; i++) { 1737 if (urbs[i]) 1738 simple_free_urb(urbs[i]); 1739 } 1740 /* 1741 * Isochronous transfers are expected to fail sometimes. As an 1742 * arbitrary limit, we will report an error if any submissions 1743 * fail or if the transfer failure rate is > 10%. 1744 */ 1745 if (status != 0) 1746 ; 1747 else if (context.submit_error) 1748 status = -EACCES; 1749 else if (context.errors > context.packet_count / 10) 1750 status = -EIO; 1751 return status; 1752 1753 fail: 1754 for (i = 0; i < param->sglen; i++) { 1755 if (urbs[i]) 1756 simple_free_urb(urbs[i]); 1757 } 1758 return status; 1759 } 1760 1761 static int test_unaligned_bulk( 1762 struct usbtest_dev *tdev, 1763 int pipe, 1764 unsigned length, 1765 int iterations, 1766 unsigned transfer_flags, 1767 const char *label) 1768 { 1769 int retval; 1770 struct urb *urb = usbtest_alloc_urb( 1771 testdev_to_usbdev(tdev), pipe, length, transfer_flags, 1); 1772 1773 if (!urb) 1774 return -ENOMEM; 1775 1776 retval = simple_io(tdev, urb, iterations, 0, 0, label); 1777 simple_free_urb(urb); 1778 return retval; 1779 } 1780 1781 /*-------------------------------------------------------------------------*/ 1782 1783 /* We only have this one interface to user space, through usbfs. 1784 * User mode code can scan usbfs to find N different devices (maybe on 1785 * different busses) to use when testing, and allocate one thread per 1786 * test. So discovery is simplified, and we have no device naming issues. 1787 * 1788 * Don't use these only as stress/load tests. Use them along with with 1789 * other USB bus activity: plugging, unplugging, mousing, mp3 playback, 1790 * video capture, and so on. Run different tests at different times, in 1791 * different sequences. Nothing here should interact with other devices, 1792 * except indirectly by consuming USB bandwidth and CPU resources for test 1793 * threads and request completion. But the only way to know that for sure 1794 * is to test when HC queues are in use by many devices. 1795 * 1796 * WARNING: Because usbfs grabs udev->dev.sem before calling this ioctl(), 1797 * it locks out usbcore in certain code paths. Notably, if you disconnect 1798 * the device-under-test, khubd will wait block forever waiting for the 1799 * ioctl to complete ... so that usb_disconnect() can abort the pending 1800 * urbs and then call usbtest_disconnect(). To abort a test, you're best 1801 * off just killing the userspace task and waiting for it to exit. 1802 */ 1803 1804 static int 1805 usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf) 1806 { 1807 struct usbtest_dev *dev = usb_get_intfdata(intf); 1808 struct usb_device *udev = testdev_to_usbdev(dev); 1809 struct usbtest_param *param = buf; 1810 int retval = -EOPNOTSUPP; 1811 struct urb *urb; 1812 struct scatterlist *sg; 1813 struct usb_sg_request req; 1814 struct timeval start; 1815 unsigned i; 1816 1817 /* FIXME USBDEVFS_CONNECTINFO doesn't say how fast the device is. */ 1818 1819 pattern = mod_pattern; 1820 1821 if (code != USBTEST_REQUEST) 1822 return -EOPNOTSUPP; 1823 1824 if (param->iterations <= 0) 1825 return -EINVAL; 1826 1827 if (mutex_lock_interruptible(&dev->lock)) 1828 return -ERESTARTSYS; 1829 1830 /* FIXME: What if a system sleep starts while a test is running? */ 1831 1832 /* some devices, like ez-usb default devices, need a non-default 1833 * altsetting to have any active endpoints. some tests change 1834 * altsettings; force a default so most tests don't need to check. 1835 */ 1836 if (dev->info->alt >= 0) { 1837 int res; 1838 1839 if (intf->altsetting->desc.bInterfaceNumber) { 1840 mutex_unlock(&dev->lock); 1841 return -ENODEV; 1842 } 1843 res = set_altsetting(dev, dev->info->alt); 1844 if (res) { 1845 dev_err(&intf->dev, 1846 "set altsetting to %d failed, %d\n", 1847 dev->info->alt, res); 1848 mutex_unlock(&dev->lock); 1849 return res; 1850 } 1851 } 1852 1853 /* 1854 * Just a bunch of test cases that every HCD is expected to handle. 1855 * 1856 * Some may need specific firmware, though it'd be good to have 1857 * one firmware image to handle all the test cases. 1858 * 1859 * FIXME add more tests! cancel requests, verify the data, control 1860 * queueing, concurrent read+write threads, and so on. 1861 */ 1862 do_gettimeofday(&start); 1863 switch (param->test_num) { 1864 1865 case 0: 1866 dev_info(&intf->dev, "TEST 0: NOP\n"); 1867 retval = 0; 1868 break; 1869 1870 /* Simple non-queued bulk I/O tests */ 1871 case 1: 1872 if (dev->out_pipe == 0) 1873 break; 1874 dev_info(&intf->dev, 1875 "TEST 1: write %d bytes %u times\n", 1876 param->length, param->iterations); 1877 urb = simple_alloc_urb(udev, dev->out_pipe, param->length); 1878 if (!urb) { 1879 retval = -ENOMEM; 1880 break; 1881 } 1882 /* FIRMWARE: bulk sink (maybe accepts short writes) */ 1883 retval = simple_io(dev, urb, param->iterations, 0, 0, "test1"); 1884 simple_free_urb(urb); 1885 break; 1886 case 2: 1887 if (dev->in_pipe == 0) 1888 break; 1889 dev_info(&intf->dev, 1890 "TEST 2: read %d bytes %u times\n", 1891 param->length, param->iterations); 1892 urb = simple_alloc_urb(udev, dev->in_pipe, param->length); 1893 if (!urb) { 1894 retval = -ENOMEM; 1895 break; 1896 } 1897 /* FIRMWARE: bulk source (maybe generates short writes) */ 1898 retval = simple_io(dev, urb, param->iterations, 0, 0, "test2"); 1899 simple_free_urb(urb); 1900 break; 1901 case 3: 1902 if (dev->out_pipe == 0 || param->vary == 0) 1903 break; 1904 dev_info(&intf->dev, 1905 "TEST 3: write/%d 0..%d bytes %u times\n", 1906 param->vary, param->length, param->iterations); 1907 urb = simple_alloc_urb(udev, dev->out_pipe, param->length); 1908 if (!urb) { 1909 retval = -ENOMEM; 1910 break; 1911 } 1912 /* FIRMWARE: bulk sink (maybe accepts short writes) */ 1913 retval = simple_io(dev, urb, param->iterations, param->vary, 1914 0, "test3"); 1915 simple_free_urb(urb); 1916 break; 1917 case 4: 1918 if (dev->in_pipe == 0 || param->vary == 0) 1919 break; 1920 dev_info(&intf->dev, 1921 "TEST 4: read/%d 0..%d bytes %u times\n", 1922 param->vary, param->length, param->iterations); 1923 urb = simple_alloc_urb(udev, dev->in_pipe, param->length); 1924 if (!urb) { 1925 retval = -ENOMEM; 1926 break; 1927 } 1928 /* FIRMWARE: bulk source (maybe generates short writes) */ 1929 retval = simple_io(dev, urb, param->iterations, param->vary, 1930 0, "test4"); 1931 simple_free_urb(urb); 1932 break; 1933 1934 /* Queued bulk I/O tests */ 1935 case 5: 1936 if (dev->out_pipe == 0 || param->sglen == 0) 1937 break; 1938 dev_info(&intf->dev, 1939 "TEST 5: write %d sglists %d entries of %d bytes\n", 1940 param->iterations, 1941 param->sglen, param->length); 1942 sg = alloc_sglist(param->sglen, param->length, 0); 1943 if (!sg) { 1944 retval = -ENOMEM; 1945 break; 1946 } 1947 /* FIRMWARE: bulk sink (maybe accepts short writes) */ 1948 retval = perform_sglist(dev, param->iterations, dev->out_pipe, 1949 &req, sg, param->sglen); 1950 free_sglist(sg, param->sglen); 1951 break; 1952 1953 case 6: 1954 if (dev->in_pipe == 0 || param->sglen == 0) 1955 break; 1956 dev_info(&intf->dev, 1957 "TEST 6: read %d sglists %d entries of %d bytes\n", 1958 param->iterations, 1959 param->sglen, param->length); 1960 sg = alloc_sglist(param->sglen, param->length, 0); 1961 if (!sg) { 1962 retval = -ENOMEM; 1963 break; 1964 } 1965 /* FIRMWARE: bulk source (maybe generates short writes) */ 1966 retval = perform_sglist(dev, param->iterations, dev->in_pipe, 1967 &req, sg, param->sglen); 1968 free_sglist(sg, param->sglen); 1969 break; 1970 case 7: 1971 if (dev->out_pipe == 0 || param->sglen == 0 || param->vary == 0) 1972 break; 1973 dev_info(&intf->dev, 1974 "TEST 7: write/%d %d sglists %d entries 0..%d bytes\n", 1975 param->vary, param->iterations, 1976 param->sglen, param->length); 1977 sg = alloc_sglist(param->sglen, param->length, param->vary); 1978 if (!sg) { 1979 retval = -ENOMEM; 1980 break; 1981 } 1982 /* FIRMWARE: bulk sink (maybe accepts short writes) */ 1983 retval = perform_sglist(dev, param->iterations, dev->out_pipe, 1984 &req, sg, param->sglen); 1985 free_sglist(sg, param->sglen); 1986 break; 1987 case 8: 1988 if (dev->in_pipe == 0 || param->sglen == 0 || param->vary == 0) 1989 break; 1990 dev_info(&intf->dev, 1991 "TEST 8: read/%d %d sglists %d entries 0..%d bytes\n", 1992 param->vary, param->iterations, 1993 param->sglen, param->length); 1994 sg = alloc_sglist(param->sglen, param->length, param->vary); 1995 if (!sg) { 1996 retval = -ENOMEM; 1997 break; 1998 } 1999 /* FIRMWARE: bulk source (maybe generates short writes) */ 2000 retval = perform_sglist(dev, param->iterations, dev->in_pipe, 2001 &req, sg, param->sglen); 2002 free_sglist(sg, param->sglen); 2003 break; 2004 2005 /* non-queued sanity tests for control (chapter 9 subset) */ 2006 case 9: 2007 retval = 0; 2008 dev_info(&intf->dev, 2009 "TEST 9: ch9 (subset) control tests, %d times\n", 2010 param->iterations); 2011 for (i = param->iterations; retval == 0 && i--; /* NOP */) 2012 retval = ch9_postconfig(dev); 2013 if (retval) 2014 dev_err(&intf->dev, "ch9 subset failed, " 2015 "iterations left %d\n", i); 2016 break; 2017 2018 /* queued control messaging */ 2019 case 10: 2020 retval = 0; 2021 dev_info(&intf->dev, 2022 "TEST 10: queue %d control calls, %d times\n", 2023 param->sglen, 2024 param->iterations); 2025 retval = test_ctrl_queue(dev, param); 2026 break; 2027 2028 /* simple non-queued unlinks (ring with one urb) */ 2029 case 11: 2030 if (dev->in_pipe == 0 || !param->length) 2031 break; 2032 retval = 0; 2033 dev_info(&intf->dev, "TEST 11: unlink %d reads of %d\n", 2034 param->iterations, param->length); 2035 for (i = param->iterations; retval == 0 && i--; /* NOP */) 2036 retval = unlink_simple(dev, dev->in_pipe, 2037 param->length); 2038 if (retval) 2039 dev_err(&intf->dev, "unlink reads failed %d, " 2040 "iterations left %d\n", retval, i); 2041 break; 2042 case 12: 2043 if (dev->out_pipe == 0 || !param->length) 2044 break; 2045 retval = 0; 2046 dev_info(&intf->dev, "TEST 12: unlink %d writes of %d\n", 2047 param->iterations, param->length); 2048 for (i = param->iterations; retval == 0 && i--; /* NOP */) 2049 retval = unlink_simple(dev, dev->out_pipe, 2050 param->length); 2051 if (retval) 2052 dev_err(&intf->dev, "unlink writes failed %d, " 2053 "iterations left %d\n", retval, i); 2054 break; 2055 2056 /* ep halt tests */ 2057 case 13: 2058 if (dev->out_pipe == 0 && dev->in_pipe == 0) 2059 break; 2060 retval = 0; 2061 dev_info(&intf->dev, "TEST 13: set/clear %d halts\n", 2062 param->iterations); 2063 for (i = param->iterations; retval == 0 && i--; /* NOP */) 2064 retval = halt_simple(dev); 2065 2066 if (retval) 2067 ERROR(dev, "halts failed, iterations left %d\n", i); 2068 break; 2069 2070 /* control write tests */ 2071 case 14: 2072 if (!dev->info->ctrl_out) 2073 break; 2074 dev_info(&intf->dev, "TEST 14: %d ep0out, %d..%d vary %d\n", 2075 param->iterations, 2076 realworld ? 1 : 0, param->length, 2077 param->vary); 2078 retval = ctrl_out(dev, param->iterations, 2079 param->length, param->vary, 0); 2080 break; 2081 2082 /* iso write tests */ 2083 case 15: 2084 if (dev->out_iso_pipe == 0 || param->sglen == 0) 2085 break; 2086 dev_info(&intf->dev, 2087 "TEST 15: write %d iso, %d entries of %d bytes\n", 2088 param->iterations, 2089 param->sglen, param->length); 2090 /* FIRMWARE: iso sink */ 2091 retval = test_iso_queue(dev, param, 2092 dev->out_iso_pipe, dev->iso_out, 0); 2093 break; 2094 2095 /* iso read tests */ 2096 case 16: 2097 if (dev->in_iso_pipe == 0 || param->sglen == 0) 2098 break; 2099 dev_info(&intf->dev, 2100 "TEST 16: read %d iso, %d entries of %d bytes\n", 2101 param->iterations, 2102 param->sglen, param->length); 2103 /* FIRMWARE: iso source */ 2104 retval = test_iso_queue(dev, param, 2105 dev->in_iso_pipe, dev->iso_in, 0); 2106 break; 2107 2108 /* FIXME scatterlist cancel (needs helper thread) */ 2109 2110 /* Tests for bulk I/O using DMA mapping by core and odd address */ 2111 case 17: 2112 if (dev->out_pipe == 0) 2113 break; 2114 dev_info(&intf->dev, 2115 "TEST 17: write odd addr %d bytes %u times core map\n", 2116 param->length, param->iterations); 2117 2118 retval = test_unaligned_bulk( 2119 dev, dev->out_pipe, 2120 param->length, param->iterations, 2121 0, "test17"); 2122 break; 2123 2124 case 18: 2125 if (dev->in_pipe == 0) 2126 break; 2127 dev_info(&intf->dev, 2128 "TEST 18: read odd addr %d bytes %u times core map\n", 2129 param->length, param->iterations); 2130 2131 retval = test_unaligned_bulk( 2132 dev, dev->in_pipe, 2133 param->length, param->iterations, 2134 0, "test18"); 2135 break; 2136 2137 /* Tests for bulk I/O using premapped coherent buffer and odd address */ 2138 case 19: 2139 if (dev->out_pipe == 0) 2140 break; 2141 dev_info(&intf->dev, 2142 "TEST 19: write odd addr %d bytes %u times premapped\n", 2143 param->length, param->iterations); 2144 2145 retval = test_unaligned_bulk( 2146 dev, dev->out_pipe, 2147 param->length, param->iterations, 2148 URB_NO_TRANSFER_DMA_MAP, "test19"); 2149 break; 2150 2151 case 20: 2152 if (dev->in_pipe == 0) 2153 break; 2154 dev_info(&intf->dev, 2155 "TEST 20: read odd addr %d bytes %u times premapped\n", 2156 param->length, param->iterations); 2157 2158 retval = test_unaligned_bulk( 2159 dev, dev->in_pipe, 2160 param->length, param->iterations, 2161 URB_NO_TRANSFER_DMA_MAP, "test20"); 2162 break; 2163 2164 /* control write tests with unaligned buffer */ 2165 case 21: 2166 if (!dev->info->ctrl_out) 2167 break; 2168 dev_info(&intf->dev, 2169 "TEST 21: %d ep0out odd addr, %d..%d vary %d\n", 2170 param->iterations, 2171 realworld ? 1 : 0, param->length, 2172 param->vary); 2173 retval = ctrl_out(dev, param->iterations, 2174 param->length, param->vary, 1); 2175 break; 2176 2177 /* unaligned iso tests */ 2178 case 22: 2179 if (dev->out_iso_pipe == 0 || param->sglen == 0) 2180 break; 2181 dev_info(&intf->dev, 2182 "TEST 22: write %d iso odd, %d entries of %d bytes\n", 2183 param->iterations, 2184 param->sglen, param->length); 2185 retval = test_iso_queue(dev, param, 2186 dev->out_iso_pipe, dev->iso_out, 1); 2187 break; 2188 2189 case 23: 2190 if (dev->in_iso_pipe == 0 || param->sglen == 0) 2191 break; 2192 dev_info(&intf->dev, 2193 "TEST 23: read %d iso odd, %d entries of %d bytes\n", 2194 param->iterations, 2195 param->sglen, param->length); 2196 retval = test_iso_queue(dev, param, 2197 dev->in_iso_pipe, dev->iso_in, 1); 2198 break; 2199 2200 /* unlink URBs from a bulk-OUT queue */ 2201 case 24: 2202 if (dev->out_pipe == 0 || !param->length || param->sglen < 4) 2203 break; 2204 retval = 0; 2205 dev_info(&intf->dev, "TEST 24: unlink from %d queues of " 2206 "%d %d-byte writes\n", 2207 param->iterations, param->sglen, param->length); 2208 for (i = param->iterations; retval == 0 && i > 0; --i) { 2209 retval = unlink_queued(dev, dev->out_pipe, 2210 param->sglen, param->length); 2211 if (retval) { 2212 dev_err(&intf->dev, 2213 "unlink queued writes failed %d, " 2214 "iterations left %d\n", retval, i); 2215 break; 2216 } 2217 } 2218 break; 2219 2220 } 2221 do_gettimeofday(¶m->duration); 2222 param->duration.tv_sec -= start.tv_sec; 2223 param->duration.tv_usec -= start.tv_usec; 2224 if (param->duration.tv_usec < 0) { 2225 param->duration.tv_usec += 1000 * 1000; 2226 param->duration.tv_sec -= 1; 2227 } 2228 mutex_unlock(&dev->lock); 2229 return retval; 2230 } 2231 2232 /*-------------------------------------------------------------------------*/ 2233 2234 static unsigned force_interrupt; 2235 module_param(force_interrupt, uint, 0); 2236 MODULE_PARM_DESC(force_interrupt, "0 = test default; else interrupt"); 2237 2238 #ifdef GENERIC 2239 static unsigned short vendor; 2240 module_param(vendor, ushort, 0); 2241 MODULE_PARM_DESC(vendor, "vendor code (from usb-if)"); 2242 2243 static unsigned short product; 2244 module_param(product, ushort, 0); 2245 MODULE_PARM_DESC(product, "product code (from vendor)"); 2246 #endif 2247 2248 static int 2249 usbtest_probe(struct usb_interface *intf, const struct usb_device_id *id) 2250 { 2251 struct usb_device *udev; 2252 struct usbtest_dev *dev; 2253 struct usbtest_info *info; 2254 char *rtest, *wtest; 2255 char *irtest, *iwtest; 2256 2257 udev = interface_to_usbdev(intf); 2258 2259 #ifdef GENERIC 2260 /* specify devices by module parameters? */ 2261 if (id->match_flags == 0) { 2262 /* vendor match required, product match optional */ 2263 if (!vendor || le16_to_cpu(udev->descriptor.idVendor) != (u16)vendor) 2264 return -ENODEV; 2265 if (product && le16_to_cpu(udev->descriptor.idProduct) != (u16)product) 2266 return -ENODEV; 2267 dev_info(&intf->dev, "matched module params, " 2268 "vend=0x%04x prod=0x%04x\n", 2269 le16_to_cpu(udev->descriptor.idVendor), 2270 le16_to_cpu(udev->descriptor.idProduct)); 2271 } 2272 #endif 2273 2274 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 2275 if (!dev) 2276 return -ENOMEM; 2277 info = (struct usbtest_info *) id->driver_info; 2278 dev->info = info; 2279 mutex_init(&dev->lock); 2280 2281 dev->intf = intf; 2282 2283 /* cacheline-aligned scratch for i/o */ 2284 dev->buf = kmalloc(TBUF_SIZE, GFP_KERNEL); 2285 if (dev->buf == NULL) { 2286 kfree(dev); 2287 return -ENOMEM; 2288 } 2289 2290 /* NOTE this doesn't yet test the handful of difference that are 2291 * visible with high speed interrupts: bigger maxpacket (1K) and 2292 * "high bandwidth" modes (up to 3 packets/uframe). 2293 */ 2294 rtest = wtest = ""; 2295 irtest = iwtest = ""; 2296 if (force_interrupt || udev->speed == USB_SPEED_LOW) { 2297 if (info->ep_in) { 2298 dev->in_pipe = usb_rcvintpipe(udev, info->ep_in); 2299 rtest = " intr-in"; 2300 } 2301 if (info->ep_out) { 2302 dev->out_pipe = usb_sndintpipe(udev, info->ep_out); 2303 wtest = " intr-out"; 2304 } 2305 } else { 2306 if (override_alt >= 0 || info->autoconf) { 2307 int status; 2308 2309 status = get_endpoints(dev, intf); 2310 if (status < 0) { 2311 WARNING(dev, "couldn't get endpoints, %d\n", 2312 status); 2313 kfree(dev->buf); 2314 kfree(dev); 2315 return status; 2316 } 2317 /* may find bulk or ISO pipes */ 2318 } else { 2319 if (info->ep_in) 2320 dev->in_pipe = usb_rcvbulkpipe(udev, 2321 info->ep_in); 2322 if (info->ep_out) 2323 dev->out_pipe = usb_sndbulkpipe(udev, 2324 info->ep_out); 2325 } 2326 if (dev->in_pipe) 2327 rtest = " bulk-in"; 2328 if (dev->out_pipe) 2329 wtest = " bulk-out"; 2330 if (dev->in_iso_pipe) 2331 irtest = " iso-in"; 2332 if (dev->out_iso_pipe) 2333 iwtest = " iso-out"; 2334 } 2335 2336 usb_set_intfdata(intf, dev); 2337 dev_info(&intf->dev, "%s\n", info->name); 2338 dev_info(&intf->dev, "%s {control%s%s%s%s%s} tests%s\n", 2339 usb_speed_string(udev->speed), 2340 info->ctrl_out ? " in/out" : "", 2341 rtest, wtest, 2342 irtest, iwtest, 2343 info->alt >= 0 ? " (+alt)" : ""); 2344 return 0; 2345 } 2346 2347 static int usbtest_suspend(struct usb_interface *intf, pm_message_t message) 2348 { 2349 return 0; 2350 } 2351 2352 static int usbtest_resume(struct usb_interface *intf) 2353 { 2354 return 0; 2355 } 2356 2357 2358 static void usbtest_disconnect(struct usb_interface *intf) 2359 { 2360 struct usbtest_dev *dev = usb_get_intfdata(intf); 2361 2362 usb_set_intfdata(intf, NULL); 2363 dev_dbg(&intf->dev, "disconnect\n"); 2364 kfree(dev); 2365 } 2366 2367 /* Basic testing only needs a device that can source or sink bulk traffic. 2368 * Any device can test control transfers (default with GENERIC binding). 2369 * 2370 * Several entries work with the default EP0 implementation that's built 2371 * into EZ-USB chips. There's a default vendor ID which can be overridden 2372 * by (very) small config EEPROMS, but otherwise all these devices act 2373 * identically until firmware is loaded: only EP0 works. It turns out 2374 * to be easy to make other endpoints work, without modifying that EP0 2375 * behavior. For now, we expect that kind of firmware. 2376 */ 2377 2378 /* an21xx or fx versions of ez-usb */ 2379 static struct usbtest_info ez1_info = { 2380 .name = "EZ-USB device", 2381 .ep_in = 2, 2382 .ep_out = 2, 2383 .alt = 1, 2384 }; 2385 2386 /* fx2 version of ez-usb */ 2387 static struct usbtest_info ez2_info = { 2388 .name = "FX2 device", 2389 .ep_in = 6, 2390 .ep_out = 2, 2391 .alt = 1, 2392 }; 2393 2394 /* ezusb family device with dedicated usb test firmware, 2395 */ 2396 static struct usbtest_info fw_info = { 2397 .name = "usb test device", 2398 .ep_in = 2, 2399 .ep_out = 2, 2400 .alt = 1, 2401 .autoconf = 1, /* iso and ctrl_out need autoconf */ 2402 .ctrl_out = 1, 2403 .iso = 1, /* iso_ep's are #8 in/out */ 2404 }; 2405 2406 /* peripheral running Linux and 'zero.c' test firmware, or 2407 * its user-mode cousin. different versions of this use 2408 * different hardware with the same vendor/product codes. 2409 * host side MUST rely on the endpoint descriptors. 2410 */ 2411 static struct usbtest_info gz_info = { 2412 .name = "Linux gadget zero", 2413 .autoconf = 1, 2414 .ctrl_out = 1, 2415 .iso = 1, 2416 .alt = 0, 2417 }; 2418 2419 static struct usbtest_info um_info = { 2420 .name = "Linux user mode test driver", 2421 .autoconf = 1, 2422 .alt = -1, 2423 }; 2424 2425 static struct usbtest_info um2_info = { 2426 .name = "Linux user mode ISO test driver", 2427 .autoconf = 1, 2428 .iso = 1, 2429 .alt = -1, 2430 }; 2431 2432 #ifdef IBOT2 2433 /* this is a nice source of high speed bulk data; 2434 * uses an FX2, with firmware provided in the device 2435 */ 2436 static struct usbtest_info ibot2_info = { 2437 .name = "iBOT2 webcam", 2438 .ep_in = 2, 2439 .alt = -1, 2440 }; 2441 #endif 2442 2443 #ifdef GENERIC 2444 /* we can use any device to test control traffic */ 2445 static struct usbtest_info generic_info = { 2446 .name = "Generic USB device", 2447 .alt = -1, 2448 }; 2449 #endif 2450 2451 2452 static const struct usb_device_id id_table[] = { 2453 2454 /*-------------------------------------------------------------*/ 2455 2456 /* EZ-USB devices which download firmware to replace (or in our 2457 * case augment) the default device implementation. 2458 */ 2459 2460 /* generic EZ-USB FX controller */ 2461 { USB_DEVICE(0x0547, 0x2235), 2462 .driver_info = (unsigned long) &ez1_info, 2463 }, 2464 2465 /* CY3671 development board with EZ-USB FX */ 2466 { USB_DEVICE(0x0547, 0x0080), 2467 .driver_info = (unsigned long) &ez1_info, 2468 }, 2469 2470 /* generic EZ-USB FX2 controller (or development board) */ 2471 { USB_DEVICE(0x04b4, 0x8613), 2472 .driver_info = (unsigned long) &ez2_info, 2473 }, 2474 2475 /* re-enumerated usb test device firmware */ 2476 { USB_DEVICE(0xfff0, 0xfff0), 2477 .driver_info = (unsigned long) &fw_info, 2478 }, 2479 2480 /* "Gadget Zero" firmware runs under Linux */ 2481 { USB_DEVICE(0x0525, 0xa4a0), 2482 .driver_info = (unsigned long) &gz_info, 2483 }, 2484 2485 /* so does a user-mode variant */ 2486 { USB_DEVICE(0x0525, 0xa4a4), 2487 .driver_info = (unsigned long) &um_info, 2488 }, 2489 2490 /* ... and a user-mode variant that talks iso */ 2491 { USB_DEVICE(0x0525, 0xa4a3), 2492 .driver_info = (unsigned long) &um2_info, 2493 }, 2494 2495 #ifdef KEYSPAN_19Qi 2496 /* Keyspan 19qi uses an21xx (original EZ-USB) */ 2497 /* this does not coexist with the real Keyspan 19qi driver! */ 2498 { USB_DEVICE(0x06cd, 0x010b), 2499 .driver_info = (unsigned long) &ez1_info, 2500 }, 2501 #endif 2502 2503 /*-------------------------------------------------------------*/ 2504 2505 #ifdef IBOT2 2506 /* iBOT2 makes a nice source of high speed bulk-in data */ 2507 /* this does not coexist with a real iBOT2 driver! */ 2508 { USB_DEVICE(0x0b62, 0x0059), 2509 .driver_info = (unsigned long) &ibot2_info, 2510 }, 2511 #endif 2512 2513 /*-------------------------------------------------------------*/ 2514 2515 #ifdef GENERIC 2516 /* module params can specify devices to use for control tests */ 2517 { .driver_info = (unsigned long) &generic_info, }, 2518 #endif 2519 2520 /*-------------------------------------------------------------*/ 2521 2522 { } 2523 }; 2524 MODULE_DEVICE_TABLE(usb, id_table); 2525 2526 static struct usb_driver usbtest_driver = { 2527 .name = "usbtest", 2528 .id_table = id_table, 2529 .probe = usbtest_probe, 2530 .unlocked_ioctl = usbtest_ioctl, 2531 .disconnect = usbtest_disconnect, 2532 .suspend = usbtest_suspend, 2533 .resume = usbtest_resume, 2534 }; 2535 2536 /*-------------------------------------------------------------------------*/ 2537 2538 static int __init usbtest_init(void) 2539 { 2540 #ifdef GENERIC 2541 if (vendor) 2542 pr_debug("params: vend=0x%04x prod=0x%04x\n", vendor, product); 2543 #endif 2544 return usb_register(&usbtest_driver); 2545 } 2546 module_init(usbtest_init); 2547 2548 static void __exit usbtest_exit(void) 2549 { 2550 usb_deregister(&usbtest_driver); 2551 } 2552 module_exit(usbtest_exit); 2553 2554 MODULE_DESCRIPTION("USB Core/HCD Testing Driver"); 2555 MODULE_LICENSE("GPL"); 2556 2557