1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * linux/drivers/spi/spi-loopback-test.c 4 * 5 * (c) Martin Sperl <kernel@martin.sperl.org> 6 * 7 * Loopback test driver to test several typical spi_message conditions 8 * that a spi_master driver may encounter 9 * this can also get used for regression testing 10 */ 11 12 #include <linux/delay.h> 13 #include <linux/kernel.h> 14 #include <linux/ktime.h> 15 #include <linux/list.h> 16 #include <linux/list_sort.h> 17 #include <linux/module.h> 18 #include <linux/of_device.h> 19 #include <linux/printk.h> 20 #include <linux/vmalloc.h> 21 #include <linux/spi/spi.h> 22 23 #include "spi-test.h" 24 25 /* flag to only simulate transfers */ 26 static int simulate_only; 27 module_param(simulate_only, int, 0); 28 MODULE_PARM_DESC(simulate_only, "if not 0 do not execute the spi message"); 29 30 /* dump spi messages */ 31 static int dump_messages; 32 module_param(dump_messages, int, 0); 33 MODULE_PARM_DESC(dump_messages, 34 "=1 dump the basic spi_message_structure, " \ 35 "=2 dump the spi_message_structure including data, " \ 36 "=3 dump the spi_message structure before and after execution"); 37 /* the device is jumpered for loopback - enabling some rx_buf tests */ 38 static int loopback; 39 module_param(loopback, int, 0); 40 MODULE_PARM_DESC(loopback, 41 "if set enable loopback mode, where the rx_buf " \ 42 "is checked to match tx_buf after the spi_message " \ 43 "is executed"); 44 45 static int loop_req; 46 module_param(loop_req, int, 0); 47 MODULE_PARM_DESC(loop_req, 48 "if set controller will be asked to enable test loop mode. " \ 49 "If controller supported it, MISO and MOSI will be connected"); 50 51 static int no_cs; 52 module_param(no_cs, int, 0); 53 MODULE_PARM_DESC(no_cs, 54 "if set Chip Select (CS) will not be used"); 55 56 /* run only a specific test */ 57 static int run_only_test = -1; 58 module_param(run_only_test, int, 0); 59 MODULE_PARM_DESC(run_only_test, 60 "only run the test with this number (0-based !)"); 61 62 /* use vmalloc'ed buffers */ 63 static int use_vmalloc; 64 module_param(use_vmalloc, int, 0644); 65 MODULE_PARM_DESC(use_vmalloc, 66 "use vmalloc'ed buffers instead of kmalloc'ed"); 67 68 /* check rx ranges */ 69 static int check_ranges = 1; 70 module_param(check_ranges, int, 0644); 71 MODULE_PARM_DESC(check_ranges, 72 "checks rx_buffer pattern are valid"); 73 74 /* the actual tests to execute */ 75 static struct spi_test spi_tests[] = { 76 { 77 .description = "tx/rx-transfer - start of page", 78 .fill_option = FILL_COUNT_8, 79 .iterate_len = { ITERATE_MAX_LEN }, 80 .iterate_tx_align = ITERATE_ALIGN, 81 .iterate_rx_align = ITERATE_ALIGN, 82 .transfer_count = 1, 83 .transfers = { 84 { 85 .tx_buf = TX(0), 86 .rx_buf = RX(0), 87 }, 88 }, 89 }, 90 { 91 .description = "tx/rx-transfer - crossing PAGE_SIZE", 92 .fill_option = FILL_COUNT_8, 93 .iterate_len = { ITERATE_LEN }, 94 .iterate_tx_align = ITERATE_ALIGN, 95 .iterate_rx_align = ITERATE_ALIGN, 96 .transfer_count = 1, 97 .transfers = { 98 { 99 .tx_buf = TX(PAGE_SIZE - 4), 100 .rx_buf = RX(PAGE_SIZE - 4), 101 }, 102 }, 103 }, 104 { 105 .description = "tx-transfer - only", 106 .fill_option = FILL_COUNT_8, 107 .iterate_len = { ITERATE_MAX_LEN }, 108 .iterate_tx_align = ITERATE_ALIGN, 109 .transfer_count = 1, 110 .transfers = { 111 { 112 .tx_buf = TX(0), 113 }, 114 }, 115 }, 116 { 117 .description = "rx-transfer - only", 118 .fill_option = FILL_COUNT_8, 119 .iterate_len = { ITERATE_MAX_LEN }, 120 .iterate_rx_align = ITERATE_ALIGN, 121 .transfer_count = 1, 122 .transfers = { 123 { 124 .rx_buf = RX(0), 125 }, 126 }, 127 }, 128 { 129 .description = "two tx-transfers - alter both", 130 .fill_option = FILL_COUNT_8, 131 .iterate_len = { ITERATE_LEN }, 132 .iterate_tx_align = ITERATE_ALIGN, 133 .iterate_transfer_mask = BIT(0) | BIT(1), 134 .transfer_count = 2, 135 .transfers = { 136 { 137 .tx_buf = TX(0), 138 }, 139 { 140 /* this is why we cant use ITERATE_MAX_LEN */ 141 .tx_buf = TX(SPI_TEST_MAX_SIZE_HALF), 142 }, 143 }, 144 }, 145 { 146 .description = "two tx-transfers - alter first", 147 .fill_option = FILL_COUNT_8, 148 .iterate_len = { ITERATE_MAX_LEN }, 149 .iterate_tx_align = ITERATE_ALIGN, 150 .iterate_transfer_mask = BIT(0), 151 .transfer_count = 2, 152 .transfers = { 153 { 154 .tx_buf = TX(64), 155 }, 156 { 157 .len = 1, 158 .tx_buf = TX(0), 159 }, 160 }, 161 }, 162 { 163 .description = "two tx-transfers - alter second", 164 .fill_option = FILL_COUNT_8, 165 .iterate_len = { ITERATE_MAX_LEN }, 166 .iterate_tx_align = ITERATE_ALIGN, 167 .iterate_transfer_mask = BIT(1), 168 .transfer_count = 2, 169 .transfers = { 170 { 171 .len = 16, 172 .tx_buf = TX(0), 173 }, 174 { 175 .tx_buf = TX(64), 176 }, 177 }, 178 }, 179 { 180 .description = "two transfers tx then rx - alter both", 181 .fill_option = FILL_COUNT_8, 182 .iterate_len = { ITERATE_MAX_LEN }, 183 .iterate_tx_align = ITERATE_ALIGN, 184 .iterate_transfer_mask = BIT(0) | BIT(1), 185 .transfer_count = 2, 186 .transfers = { 187 { 188 .tx_buf = TX(0), 189 }, 190 { 191 .rx_buf = RX(0), 192 }, 193 }, 194 }, 195 { 196 .description = "two transfers tx then rx - alter tx", 197 .fill_option = FILL_COUNT_8, 198 .iterate_len = { ITERATE_MAX_LEN }, 199 .iterate_tx_align = ITERATE_ALIGN, 200 .iterate_transfer_mask = BIT(0), 201 .transfer_count = 2, 202 .transfers = { 203 { 204 .tx_buf = TX(0), 205 }, 206 { 207 .len = 1, 208 .rx_buf = RX(0), 209 }, 210 }, 211 }, 212 { 213 .description = "two transfers tx then rx - alter rx", 214 .fill_option = FILL_COUNT_8, 215 .iterate_len = { ITERATE_MAX_LEN }, 216 .iterate_tx_align = ITERATE_ALIGN, 217 .iterate_transfer_mask = BIT(1), 218 .transfer_count = 2, 219 .transfers = { 220 { 221 .len = 1, 222 .tx_buf = TX(0), 223 }, 224 { 225 .rx_buf = RX(0), 226 }, 227 }, 228 }, 229 { 230 .description = "two tx+rx transfers - alter both", 231 .fill_option = FILL_COUNT_8, 232 .iterate_len = { ITERATE_LEN }, 233 .iterate_tx_align = ITERATE_ALIGN, 234 .iterate_transfer_mask = BIT(0) | BIT(1), 235 .transfer_count = 2, 236 .transfers = { 237 { 238 .tx_buf = TX(0), 239 .rx_buf = RX(0), 240 }, 241 { 242 /* making sure we align without overwrite 243 * the reason we can not use ITERATE_MAX_LEN 244 */ 245 .tx_buf = TX(SPI_TEST_MAX_SIZE_HALF), 246 .rx_buf = RX(SPI_TEST_MAX_SIZE_HALF), 247 }, 248 }, 249 }, 250 { 251 .description = "two tx+rx transfers - alter first", 252 .fill_option = FILL_COUNT_8, 253 .iterate_len = { ITERATE_MAX_LEN }, 254 .iterate_tx_align = ITERATE_ALIGN, 255 .iterate_transfer_mask = BIT(0), 256 .transfer_count = 2, 257 .transfers = { 258 { 259 /* making sure we align without overwrite */ 260 .tx_buf = TX(1024), 261 .rx_buf = RX(1024), 262 }, 263 { 264 .len = 1, 265 /* making sure we align without overwrite */ 266 .tx_buf = TX(0), 267 .rx_buf = RX(0), 268 }, 269 }, 270 }, 271 { 272 .description = "two tx+rx transfers - alter second", 273 .fill_option = FILL_COUNT_8, 274 .iterate_len = { ITERATE_MAX_LEN }, 275 .iterate_tx_align = ITERATE_ALIGN, 276 .iterate_transfer_mask = BIT(1), 277 .transfer_count = 2, 278 .transfers = { 279 { 280 .len = 1, 281 .tx_buf = TX(0), 282 .rx_buf = RX(0), 283 }, 284 { 285 /* making sure we align without overwrite */ 286 .tx_buf = TX(1024), 287 .rx_buf = RX(1024), 288 }, 289 }, 290 }, 291 { 292 .description = "two tx+rx transfers - delay after transfer", 293 .fill_option = FILL_COUNT_8, 294 .iterate_len = { ITERATE_MAX_LEN }, 295 .iterate_transfer_mask = BIT(0) | BIT(1), 296 .transfer_count = 2, 297 .transfers = { 298 { 299 .tx_buf = TX(0), 300 .rx_buf = RX(0), 301 .delay = { 302 .value = 1000, 303 .unit = SPI_DELAY_UNIT_USECS, 304 }, 305 }, 306 { 307 .tx_buf = TX(0), 308 .rx_buf = RX(0), 309 .delay = { 310 .value = 1000, 311 .unit = SPI_DELAY_UNIT_USECS, 312 }, 313 }, 314 }, 315 }, 316 317 { /* end of tests sequence */ } 318 }; 319 320 static int spi_loopback_test_probe(struct spi_device *spi) 321 { 322 int ret; 323 324 if (loop_req || no_cs) { 325 spi->mode |= loop_req ? SPI_LOOP : 0; 326 spi->mode |= no_cs ? SPI_NO_CS : 0; 327 ret = spi_setup(spi); 328 if (ret) { 329 dev_err(&spi->dev, "SPI setup with SPI_LOOP or SPI_NO_CS failed (%d)\n", 330 ret); 331 return ret; 332 } 333 } 334 335 dev_info(&spi->dev, "Executing spi-loopback-tests\n"); 336 337 ret = spi_test_run_tests(spi, spi_tests); 338 339 dev_info(&spi->dev, "Finished spi-loopback-tests with return: %i\n", 340 ret); 341 342 return ret; 343 } 344 345 /* non const match table to permit to change via a module parameter */ 346 static struct of_device_id spi_loopback_test_of_match[] = { 347 { .compatible = "linux,spi-loopback-test", }, 348 { } 349 }; 350 351 /* allow to override the compatible string via a module_parameter */ 352 module_param_string(compatible, spi_loopback_test_of_match[0].compatible, 353 sizeof(spi_loopback_test_of_match[0].compatible), 354 0000); 355 356 MODULE_DEVICE_TABLE(of, spi_loopback_test_of_match); 357 358 static struct spi_driver spi_loopback_test_driver = { 359 .driver = { 360 .name = "spi-loopback-test", 361 .owner = THIS_MODULE, 362 .of_match_table = spi_loopback_test_of_match, 363 }, 364 .probe = spi_loopback_test_probe, 365 }; 366 367 module_spi_driver(spi_loopback_test_driver); 368 369 MODULE_AUTHOR("Martin Sperl <kernel@martin.sperl.org>"); 370 MODULE_DESCRIPTION("test spi_driver to check core functionality"); 371 MODULE_LICENSE("GPL"); 372 373 /*-------------------------------------------------------------------------*/ 374 375 /* spi_test implementation */ 376 377 #define RANGE_CHECK(ptr, plen, start, slen) \ 378 ((ptr >= start) && (ptr + plen <= start + slen)) 379 380 /* we allocate one page more, to allow for offsets */ 381 #define SPI_TEST_MAX_SIZE_PLUS (SPI_TEST_MAX_SIZE + PAGE_SIZE) 382 383 static void spi_test_print_hex_dump(char *pre, const void *ptr, size_t len) 384 { 385 /* limit the hex_dump */ 386 if (len < 1024) { 387 print_hex_dump(KERN_INFO, pre, 388 DUMP_PREFIX_OFFSET, 16, 1, 389 ptr, len, 0); 390 return; 391 } 392 /* print head */ 393 print_hex_dump(KERN_INFO, pre, 394 DUMP_PREFIX_OFFSET, 16, 1, 395 ptr, 512, 0); 396 /* print tail */ 397 pr_info("%s truncated - continuing at offset %04zx\n", 398 pre, len - 512); 399 print_hex_dump(KERN_INFO, pre, 400 DUMP_PREFIX_OFFSET, 16, 1, 401 ptr + (len - 512), 512, 0); 402 } 403 404 static void spi_test_dump_message(struct spi_device *spi, 405 struct spi_message *msg, 406 bool dump_data) 407 { 408 struct spi_transfer *xfer; 409 int i; 410 u8 b; 411 412 dev_info(&spi->dev, " spi_msg@%pK\n", msg); 413 if (msg->status) 414 dev_info(&spi->dev, " status: %i\n", 415 msg->status); 416 dev_info(&spi->dev, " frame_length: %i\n", 417 msg->frame_length); 418 dev_info(&spi->dev, " actual_length: %i\n", 419 msg->actual_length); 420 421 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 422 dev_info(&spi->dev, " spi_transfer@%pK\n", xfer); 423 dev_info(&spi->dev, " len: %i\n", xfer->len); 424 dev_info(&spi->dev, " tx_buf: %pK\n", xfer->tx_buf); 425 if (dump_data && xfer->tx_buf) 426 spi_test_print_hex_dump(" TX: ", 427 xfer->tx_buf, 428 xfer->len); 429 430 dev_info(&spi->dev, " rx_buf: %pK\n", xfer->rx_buf); 431 if (dump_data && xfer->rx_buf) 432 spi_test_print_hex_dump(" RX: ", 433 xfer->rx_buf, 434 xfer->len); 435 /* check for unwritten test pattern on rx_buf */ 436 if (xfer->rx_buf) { 437 for (i = 0 ; i < xfer->len ; i++) { 438 b = ((u8 *)xfer->rx_buf)[xfer->len - 1 - i]; 439 if (b != SPI_TEST_PATTERN_UNWRITTEN) 440 break; 441 } 442 if (i) 443 dev_info(&spi->dev, 444 " rx_buf filled with %02x starts at offset: %i\n", 445 SPI_TEST_PATTERN_UNWRITTEN, 446 xfer->len - i); 447 } 448 } 449 } 450 451 struct rx_ranges { 452 struct list_head list; 453 u8 *start; 454 u8 *end; 455 }; 456 457 static int rx_ranges_cmp(void *priv, const struct list_head *a, 458 const struct list_head *b) 459 { 460 struct rx_ranges *rx_a = list_entry(a, struct rx_ranges, list); 461 struct rx_ranges *rx_b = list_entry(b, struct rx_ranges, list); 462 463 if (rx_a->start > rx_b->start) 464 return 1; 465 if (rx_a->start < rx_b->start) 466 return -1; 467 return 0; 468 } 469 470 static int spi_check_rx_ranges(struct spi_device *spi, 471 struct spi_message *msg, 472 void *rx) 473 { 474 struct spi_transfer *xfer; 475 struct rx_ranges ranges[SPI_TEST_MAX_TRANSFERS], *r; 476 int i = 0; 477 LIST_HEAD(ranges_list); 478 u8 *addr; 479 int ret = 0; 480 481 /* loop over all transfers to fill in the rx_ranges */ 482 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 483 /* if there is no rx, then no check is needed */ 484 if (!xfer->rx_buf) 485 continue; 486 /* fill in the rx_range */ 487 if (RANGE_CHECK(xfer->rx_buf, xfer->len, 488 rx, SPI_TEST_MAX_SIZE_PLUS)) { 489 ranges[i].start = xfer->rx_buf; 490 ranges[i].end = xfer->rx_buf + xfer->len; 491 list_add(&ranges[i].list, &ranges_list); 492 i++; 493 } 494 } 495 496 /* if no ranges, then we can return and avoid the checks...*/ 497 if (!i) 498 return 0; 499 500 /* sort the list */ 501 list_sort(NULL, &ranges_list, rx_ranges_cmp); 502 503 /* and iterate over all the rx addresses */ 504 for (addr = rx; addr < (u8 *)rx + SPI_TEST_MAX_SIZE_PLUS; addr++) { 505 /* if we are the DO not write pattern, 506 * then continue with the loop... 507 */ 508 if (*addr == SPI_TEST_PATTERN_DO_NOT_WRITE) 509 continue; 510 511 /* check if we are inside a range */ 512 list_for_each_entry(r, &ranges_list, list) { 513 /* if so then set to end... */ 514 if ((addr >= r->start) && (addr < r->end)) 515 addr = r->end; 516 } 517 /* second test after a (hopefull) translation */ 518 if (*addr == SPI_TEST_PATTERN_DO_NOT_WRITE) 519 continue; 520 521 /* if still not found then something has modified too much */ 522 /* we could list the "closest" transfer here... */ 523 dev_err(&spi->dev, 524 "loopback strangeness - rx changed outside of allowed range at: %pK\n", 525 addr); 526 /* do not return, only set ret, 527 * so that we list all addresses 528 */ 529 ret = -ERANGE; 530 } 531 532 return ret; 533 } 534 535 static int spi_test_check_elapsed_time(struct spi_device *spi, 536 struct spi_test *test) 537 { 538 int i; 539 unsigned long long estimated_time = 0; 540 unsigned long long delay_usecs = 0; 541 542 for (i = 0; i < test->transfer_count; i++) { 543 struct spi_transfer *xfer = test->transfers + i; 544 unsigned long long nbits = (unsigned long long)BITS_PER_BYTE * 545 xfer->len; 546 547 delay_usecs += xfer->delay.value; 548 if (!xfer->speed_hz) 549 continue; 550 estimated_time += div_u64(nbits * NSEC_PER_SEC, xfer->speed_hz); 551 } 552 553 estimated_time += delay_usecs * NSEC_PER_USEC; 554 if (test->elapsed_time < estimated_time) { 555 dev_err(&spi->dev, 556 "elapsed time %lld ns is shorter than minimum estimated time %lld ns\n", 557 test->elapsed_time, estimated_time); 558 559 return -EINVAL; 560 } 561 562 return 0; 563 } 564 565 static int spi_test_check_loopback_result(struct spi_device *spi, 566 struct spi_message *msg, 567 void *tx, void *rx) 568 { 569 struct spi_transfer *xfer; 570 u8 rxb, txb; 571 size_t i; 572 int ret; 573 574 /* checks rx_buffer pattern are valid with loopback or without */ 575 if (check_ranges) { 576 ret = spi_check_rx_ranges(spi, msg, rx); 577 if (ret) 578 return ret; 579 } 580 581 /* if we run without loopback, then return now */ 582 if (!loopback) 583 return 0; 584 585 /* if applicable to transfer check that rx_buf is equal to tx_buf */ 586 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 587 /* if there is no rx, then no check is needed */ 588 if (!xfer->len || !xfer->rx_buf) 589 continue; 590 /* so depending on tx_buf we need to handle things */ 591 if (xfer->tx_buf) { 592 for (i = 0; i < xfer->len; i++) { 593 txb = ((u8 *)xfer->tx_buf)[i]; 594 rxb = ((u8 *)xfer->rx_buf)[i]; 595 if (txb != rxb) 596 goto mismatch_error; 597 } 598 } else { 599 /* first byte received */ 600 txb = ((u8 *)xfer->rx_buf)[0]; 601 /* first byte may be 0 or xff */ 602 if (!((txb == 0) || (txb == 0xff))) { 603 dev_err(&spi->dev, 604 "loopback strangeness - we expect 0x00 or 0xff, but not 0x%02x\n", 605 txb); 606 return -EINVAL; 607 } 608 /* check that all bytes are identical */ 609 for (i = 1; i < xfer->len; i++) { 610 rxb = ((u8 *)xfer->rx_buf)[i]; 611 if (rxb != txb) 612 goto mismatch_error; 613 } 614 } 615 } 616 617 return 0; 618 619 mismatch_error: 620 dev_err(&spi->dev, 621 "loopback strangeness - transfer mismatch on byte %04zx - expected 0x%02x, but got 0x%02x\n", 622 i, txb, rxb); 623 624 return -EINVAL; 625 } 626 627 static int spi_test_translate(struct spi_device *spi, 628 void **ptr, size_t len, 629 void *tx, void *rx) 630 { 631 size_t off; 632 633 /* return on null */ 634 if (!*ptr) 635 return 0; 636 637 /* in the MAX_SIZE_HALF case modify the pointer */ 638 if (((size_t)*ptr) & SPI_TEST_MAX_SIZE_HALF) 639 /* move the pointer to the correct range */ 640 *ptr += (SPI_TEST_MAX_SIZE_PLUS / 2) - 641 SPI_TEST_MAX_SIZE_HALF; 642 643 /* RX range 644 * - we check against MAX_SIZE_PLUS to allow for automated alignment 645 */ 646 if (RANGE_CHECK(*ptr, len, RX(0), SPI_TEST_MAX_SIZE_PLUS)) { 647 off = *ptr - RX(0); 648 *ptr = rx + off; 649 650 return 0; 651 } 652 653 /* TX range */ 654 if (RANGE_CHECK(*ptr, len, TX(0), SPI_TEST_MAX_SIZE_PLUS)) { 655 off = *ptr - TX(0); 656 *ptr = tx + off; 657 658 return 0; 659 } 660 661 dev_err(&spi->dev, 662 "PointerRange [%pK:%pK[ not in range [%pK:%pK[ or [%pK:%pK[\n", 663 *ptr, *ptr + len, 664 RX(0), RX(SPI_TEST_MAX_SIZE), 665 TX(0), TX(SPI_TEST_MAX_SIZE)); 666 667 return -EINVAL; 668 } 669 670 static int spi_test_fill_pattern(struct spi_device *spi, 671 struct spi_test *test) 672 { 673 struct spi_transfer *xfers = test->transfers; 674 u8 *tx_buf; 675 size_t count = 0; 676 int i, j; 677 678 #ifdef __BIG_ENDIAN 679 #define GET_VALUE_BYTE(value, index, bytes) \ 680 (value >> (8 * (bytes - 1 - count % bytes))) 681 #else 682 #define GET_VALUE_BYTE(value, index, bytes) \ 683 (value >> (8 * (count % bytes))) 684 #endif 685 686 /* fill all transfers with the pattern requested */ 687 for (i = 0; i < test->transfer_count; i++) { 688 /* fill rx_buf with SPI_TEST_PATTERN_UNWRITTEN */ 689 if (xfers[i].rx_buf) 690 memset(xfers[i].rx_buf, SPI_TEST_PATTERN_UNWRITTEN, 691 xfers[i].len); 692 /* if tx_buf is NULL then skip */ 693 tx_buf = (u8 *)xfers[i].tx_buf; 694 if (!tx_buf) 695 continue; 696 /* modify all the transfers */ 697 for (j = 0; j < xfers[i].len; j++, tx_buf++, count++) { 698 /* fill tx */ 699 switch (test->fill_option) { 700 case FILL_MEMSET_8: 701 *tx_buf = test->fill_pattern; 702 break; 703 case FILL_MEMSET_16: 704 *tx_buf = GET_VALUE_BYTE(test->fill_pattern, 705 count, 2); 706 break; 707 case FILL_MEMSET_24: 708 *tx_buf = GET_VALUE_BYTE(test->fill_pattern, 709 count, 3); 710 break; 711 case FILL_MEMSET_32: 712 *tx_buf = GET_VALUE_BYTE(test->fill_pattern, 713 count, 4); 714 break; 715 case FILL_COUNT_8: 716 *tx_buf = count; 717 break; 718 case FILL_COUNT_16: 719 *tx_buf = GET_VALUE_BYTE(count, count, 2); 720 break; 721 case FILL_COUNT_24: 722 *tx_buf = GET_VALUE_BYTE(count, count, 3); 723 break; 724 case FILL_COUNT_32: 725 *tx_buf = GET_VALUE_BYTE(count, count, 4); 726 break; 727 case FILL_TRANSFER_BYTE_8: 728 *tx_buf = j; 729 break; 730 case FILL_TRANSFER_BYTE_16: 731 *tx_buf = GET_VALUE_BYTE(j, j, 2); 732 break; 733 case FILL_TRANSFER_BYTE_24: 734 *tx_buf = GET_VALUE_BYTE(j, j, 3); 735 break; 736 case FILL_TRANSFER_BYTE_32: 737 *tx_buf = GET_VALUE_BYTE(j, j, 4); 738 break; 739 case FILL_TRANSFER_NUM: 740 *tx_buf = i; 741 break; 742 default: 743 dev_err(&spi->dev, 744 "unsupported fill_option: %i\n", 745 test->fill_option); 746 return -EINVAL; 747 } 748 } 749 } 750 751 return 0; 752 } 753 754 static int _spi_test_run_iter(struct spi_device *spi, 755 struct spi_test *test, 756 void *tx, void *rx) 757 { 758 struct spi_message *msg = &test->msg; 759 struct spi_transfer *x; 760 int i, ret; 761 762 /* initialize message - zero-filled via static initialization */ 763 spi_message_init_no_memset(msg); 764 765 /* fill rx with the DO_NOT_WRITE pattern */ 766 memset(rx, SPI_TEST_PATTERN_DO_NOT_WRITE, SPI_TEST_MAX_SIZE_PLUS); 767 768 /* add the individual transfers */ 769 for (i = 0; i < test->transfer_count; i++) { 770 x = &test->transfers[i]; 771 772 /* patch the values of tx_buf */ 773 ret = spi_test_translate(spi, (void **)&x->tx_buf, x->len, 774 (void *)tx, rx); 775 if (ret) 776 return ret; 777 778 /* patch the values of rx_buf */ 779 ret = spi_test_translate(spi, &x->rx_buf, x->len, 780 (void *)tx, rx); 781 if (ret) 782 return ret; 783 784 /* and add it to the list */ 785 spi_message_add_tail(x, msg); 786 } 787 788 /* fill in the transfer buffers with pattern */ 789 ret = spi_test_fill_pattern(spi, test); 790 if (ret) 791 return ret; 792 793 /* and execute */ 794 if (test->execute_msg) 795 ret = test->execute_msg(spi, test, tx, rx); 796 else 797 ret = spi_test_execute_msg(spi, test, tx, rx); 798 799 /* handle result */ 800 if (ret == test->expected_return) 801 return 0; 802 803 dev_err(&spi->dev, 804 "test failed - test returned %i, but we expect %i\n", 805 ret, test->expected_return); 806 807 if (ret) 808 return ret; 809 810 /* if it is 0, as we expected something else, 811 * then return something special 812 */ 813 return -EFAULT; 814 } 815 816 static int spi_test_run_iter(struct spi_device *spi, 817 const struct spi_test *testtemplate, 818 void *tx, void *rx, 819 size_t len, 820 size_t tx_off, 821 size_t rx_off 822 ) 823 { 824 struct spi_test test; 825 int i, tx_count, rx_count; 826 827 /* copy the test template to test */ 828 memcpy(&test, testtemplate, sizeof(test)); 829 830 /* if iterate_transfer_mask is not set, 831 * then set it to first transfer only 832 */ 833 if (!(test.iterate_transfer_mask & (BIT(test.transfer_count) - 1))) 834 test.iterate_transfer_mask = 1; 835 836 /* count number of transfers with tx/rx_buf != NULL */ 837 rx_count = tx_count = 0; 838 for (i = 0; i < test.transfer_count; i++) { 839 if (test.transfers[i].tx_buf) 840 tx_count++; 841 if (test.transfers[i].rx_buf) 842 rx_count++; 843 } 844 845 /* in some iteration cases warn and exit early, 846 * as there is nothing to do, that has not been tested already... 847 */ 848 if (tx_off && (!tx_count)) { 849 dev_warn_once(&spi->dev, 850 "%s: iterate_tx_off configured with tx_buf==NULL - ignoring\n", 851 test.description); 852 return 0; 853 } 854 if (rx_off && (!rx_count)) { 855 dev_warn_once(&spi->dev, 856 "%s: iterate_rx_off configured with rx_buf==NULL - ignoring\n", 857 test.description); 858 return 0; 859 } 860 861 /* write out info */ 862 if (!(len || tx_off || rx_off)) { 863 dev_info(&spi->dev, "Running test %s\n", test.description); 864 } else { 865 dev_info(&spi->dev, 866 " with iteration values: len = %zu, tx_off = %zu, rx_off = %zu\n", 867 len, tx_off, rx_off); 868 } 869 870 /* update in the values from iteration values */ 871 for (i = 0; i < test.transfer_count; i++) { 872 /* only when bit in transfer mask is set */ 873 if (!(test.iterate_transfer_mask & BIT(i))) 874 continue; 875 test.transfers[i].len = len; 876 if (test.transfers[i].tx_buf) 877 test.transfers[i].tx_buf += tx_off; 878 if (test.transfers[i].tx_buf) 879 test.transfers[i].rx_buf += rx_off; 880 } 881 882 /* and execute */ 883 return _spi_test_run_iter(spi, &test, tx, rx); 884 } 885 886 /** 887 * spi_test_execute_msg - default implementation to run a test 888 * 889 * @spi: @spi_device on which to run the @spi_message 890 * @test: the test to execute, which already contains @msg 891 * @tx: the tx buffer allocated for the test sequence 892 * @rx: the rx buffer allocated for the test sequence 893 * 894 * Returns: error code of spi_sync as well as basic error checking 895 */ 896 int spi_test_execute_msg(struct spi_device *spi, struct spi_test *test, 897 void *tx, void *rx) 898 { 899 struct spi_message *msg = &test->msg; 900 int ret = 0; 901 int i; 902 903 /* only if we do not simulate */ 904 if (!simulate_only) { 905 ktime_t start; 906 907 /* dump the complete message before and after the transfer */ 908 if (dump_messages == 3) 909 spi_test_dump_message(spi, msg, true); 910 911 start = ktime_get(); 912 /* run spi message */ 913 ret = spi_sync(spi, msg); 914 test->elapsed_time = ktime_to_ns(ktime_sub(ktime_get(), start)); 915 if (ret == -ETIMEDOUT) { 916 dev_info(&spi->dev, 917 "spi-message timed out - rerunning...\n"); 918 /* rerun after a few explicit schedules */ 919 for (i = 0; i < 16; i++) 920 schedule(); 921 ret = spi_sync(spi, msg); 922 } 923 if (ret) { 924 dev_err(&spi->dev, 925 "Failed to execute spi_message: %i\n", 926 ret); 927 goto exit; 928 } 929 930 /* do some extra error checks */ 931 if (msg->frame_length != msg->actual_length) { 932 dev_err(&spi->dev, 933 "actual length differs from expected\n"); 934 ret = -EIO; 935 goto exit; 936 } 937 938 /* run rx-buffer tests */ 939 ret = spi_test_check_loopback_result(spi, msg, tx, rx); 940 if (ret) 941 goto exit; 942 943 ret = spi_test_check_elapsed_time(spi, test); 944 } 945 946 /* if requested or on error dump message (including data) */ 947 exit: 948 if (dump_messages || ret) 949 spi_test_dump_message(spi, msg, 950 (dump_messages >= 2) || (ret)); 951 952 return ret; 953 } 954 EXPORT_SYMBOL_GPL(spi_test_execute_msg); 955 956 /** 957 * spi_test_run_test - run an individual spi_test 958 * including all the relevant iterations on: 959 * length and buffer alignment 960 * 961 * @spi: the spi_device to send the messages to 962 * @test: the test which we need to execute 963 * @tx: the tx buffer allocated for the test sequence 964 * @rx: the rx buffer allocated for the test sequence 965 * 966 * Returns: status code of spi_sync or other failures 967 */ 968 969 int spi_test_run_test(struct spi_device *spi, const struct spi_test *test, 970 void *tx, void *rx) 971 { 972 int idx_len; 973 size_t len; 974 size_t tx_align, rx_align; 975 int ret; 976 977 /* test for transfer limits */ 978 if (test->transfer_count >= SPI_TEST_MAX_TRANSFERS) { 979 dev_err(&spi->dev, 980 "%s: Exceeded max number of transfers with %i\n", 981 test->description, test->transfer_count); 982 return -E2BIG; 983 } 984 985 /* setting up some values in spi_message 986 * based on some settings in spi_master 987 * some of this can also get done in the run() method 988 */ 989 990 /* iterate over all the iterable values using macros 991 * (to make it a bit more readable... 992 */ 993 #define FOR_EACH_ALIGNMENT(var) \ 994 for (var = 0; \ 995 var < (test->iterate_##var ? \ 996 (spi->master->dma_alignment ? \ 997 spi->master->dma_alignment : \ 998 test->iterate_##var) : \ 999 1); \ 1000 var++) 1001 1002 for (idx_len = 0; idx_len < SPI_TEST_MAX_ITERATE && 1003 (len = test->iterate_len[idx_len]) != -1; idx_len++) { 1004 FOR_EACH_ALIGNMENT(tx_align) { 1005 FOR_EACH_ALIGNMENT(rx_align) { 1006 /* and run the iteration */ 1007 ret = spi_test_run_iter(spi, test, 1008 tx, rx, 1009 len, 1010 tx_align, 1011 rx_align); 1012 if (ret) 1013 return ret; 1014 } 1015 } 1016 } 1017 1018 return 0; 1019 } 1020 EXPORT_SYMBOL_GPL(spi_test_run_test); 1021 1022 /** 1023 * spi_test_run_tests - run an array of spi_messages tests 1024 * @spi: the spi device on which to run the tests 1025 * @tests: NULL-terminated array of @spi_test 1026 * 1027 * Returns: status errors as per @spi_test_run_test() 1028 */ 1029 1030 int spi_test_run_tests(struct spi_device *spi, 1031 struct spi_test *tests) 1032 { 1033 char *rx = NULL, *tx = NULL; 1034 int ret = 0, count = 0; 1035 struct spi_test *test; 1036 1037 /* allocate rx/tx buffers of 128kB size without devm 1038 * in the hope that is on a page boundary 1039 */ 1040 if (use_vmalloc) 1041 rx = vmalloc(SPI_TEST_MAX_SIZE_PLUS); 1042 else 1043 rx = kzalloc(SPI_TEST_MAX_SIZE_PLUS, GFP_KERNEL); 1044 if (!rx) 1045 return -ENOMEM; 1046 1047 1048 if (use_vmalloc) 1049 tx = vmalloc(SPI_TEST_MAX_SIZE_PLUS); 1050 else 1051 tx = kzalloc(SPI_TEST_MAX_SIZE_PLUS, GFP_KERNEL); 1052 if (!tx) { 1053 ret = -ENOMEM; 1054 goto err_tx; 1055 } 1056 1057 /* now run the individual tests in the table */ 1058 for (test = tests, count = 0; test->description[0]; 1059 test++, count++) { 1060 /* only run test if requested */ 1061 if ((run_only_test > -1) && (count != run_only_test)) 1062 continue; 1063 /* run custom implementation */ 1064 if (test->run_test) 1065 ret = test->run_test(spi, test, tx, rx); 1066 else 1067 ret = spi_test_run_test(spi, test, tx, rx); 1068 if (ret) 1069 goto out; 1070 /* add some delays so that we can easily 1071 * detect the individual tests when using a logic analyzer 1072 * we also add scheduling to avoid potential spi_timeouts... 1073 */ 1074 mdelay(100); 1075 schedule(); 1076 } 1077 1078 out: 1079 kvfree(tx); 1080 err_tx: 1081 kvfree(rx); 1082 return ret; 1083 } 1084 EXPORT_SYMBOL_GPL(spi_test_run_tests); 1085