1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * linux/drivers/spi/spi-loopback-test.c 4 * 5 * (c) Martin Sperl <kernel@martin.sperl.org> 6 * 7 * Loopback test driver to test several typical spi_message conditions 8 * that a spi_master driver may encounter 9 * this can also get used for regression testing 10 */ 11 12 #include <linux/delay.h> 13 #include <linux/kernel.h> 14 #include <linux/ktime.h> 15 #include <linux/list.h> 16 #include <linux/list_sort.h> 17 #include <linux/mod_devicetable.h> 18 #include <linux/module.h> 19 #include <linux/printk.h> 20 #include <linux/vmalloc.h> 21 #include <linux/spi/spi.h> 22 23 #include "spi-test.h" 24 25 /* flag to only simulate transfers */ 26 static int simulate_only; 27 module_param(simulate_only, int, 0); 28 MODULE_PARM_DESC(simulate_only, "if not 0 do not execute the spi message"); 29 30 /* dump spi messages */ 31 static int dump_messages; 32 module_param(dump_messages, int, 0); 33 MODULE_PARM_DESC(dump_messages, 34 "=1 dump the basic spi_message_structure, " \ 35 "=2 dump the spi_message_structure including data, " \ 36 "=3 dump the spi_message structure before and after execution"); 37 /* the device is jumpered for loopback - enabling some rx_buf tests */ 38 static int loopback; 39 module_param(loopback, int, 0); 40 MODULE_PARM_DESC(loopback, 41 "if set enable loopback mode, where the rx_buf " \ 42 "is checked to match tx_buf after the spi_message " \ 43 "is executed"); 44 45 static int loop_req; 46 module_param(loop_req, int, 0); 47 MODULE_PARM_DESC(loop_req, 48 "if set controller will be asked to enable test loop mode. " \ 49 "If controller supported it, MISO and MOSI will be connected"); 50 51 static int no_cs; 52 module_param(no_cs, int, 0); 53 MODULE_PARM_DESC(no_cs, 54 "if set Chip Select (CS) will not be used"); 55 56 /* run tests only for a specific length */ 57 static int run_only_iter_len = -1; 58 module_param(run_only_iter_len, int, 0); 59 MODULE_PARM_DESC(run_only_iter_len, 60 "only run tests for a length of this number in iterate_len list"); 61 62 /* run only a specific test */ 63 static int run_only_test = -1; 64 module_param(run_only_test, int, 0); 65 MODULE_PARM_DESC(run_only_test, 66 "only run the test with this number (0-based !)"); 67 68 /* use vmalloc'ed buffers */ 69 static int use_vmalloc; 70 module_param(use_vmalloc, int, 0644); 71 MODULE_PARM_DESC(use_vmalloc, 72 "use vmalloc'ed buffers instead of kmalloc'ed"); 73 74 /* check rx ranges */ 75 static int check_ranges = 1; 76 module_param(check_ranges, int, 0644); 77 MODULE_PARM_DESC(check_ranges, 78 "checks rx_buffer pattern are valid"); 79 80 static unsigned int delay_ms = 100; 81 module_param(delay_ms, uint, 0644); 82 MODULE_PARM_DESC(delay_ms, 83 "delay between tests, in milliseconds (default: 100)"); 84 85 /* the actual tests to execute */ 86 static struct spi_test spi_tests[] = { 87 { 88 .description = "tx/rx-transfer - start of page", 89 .fill_option = FILL_COUNT_8, 90 .iterate_len = { ITERATE_MAX_LEN }, 91 .iterate_tx_align = ITERATE_ALIGN, 92 .iterate_rx_align = ITERATE_ALIGN, 93 .transfer_count = 1, 94 .transfers = { 95 { 96 .tx_buf = TX(0), 97 .rx_buf = RX(0), 98 }, 99 }, 100 }, 101 { 102 .description = "tx/rx-transfer - crossing PAGE_SIZE", 103 .fill_option = FILL_COUNT_8, 104 .iterate_len = { ITERATE_LEN }, 105 .iterate_tx_align = ITERATE_ALIGN, 106 .iterate_rx_align = ITERATE_ALIGN, 107 .transfer_count = 1, 108 .transfers = { 109 { 110 .tx_buf = TX(PAGE_SIZE - 4), 111 .rx_buf = RX(PAGE_SIZE - 4), 112 }, 113 }, 114 }, 115 { 116 .description = "tx-transfer - only", 117 .fill_option = FILL_COUNT_8, 118 .iterate_len = { ITERATE_MAX_LEN }, 119 .iterate_tx_align = ITERATE_ALIGN, 120 .transfer_count = 1, 121 .transfers = { 122 { 123 .tx_buf = TX(0), 124 }, 125 }, 126 }, 127 { 128 .description = "rx-transfer - only", 129 .fill_option = FILL_COUNT_8, 130 .iterate_len = { ITERATE_MAX_LEN }, 131 .iterate_rx_align = ITERATE_ALIGN, 132 .transfer_count = 1, 133 .transfers = { 134 { 135 .rx_buf = RX(0), 136 }, 137 }, 138 }, 139 { 140 .description = "two tx-transfers - alter both", 141 .fill_option = FILL_COUNT_8, 142 .iterate_len = { ITERATE_LEN }, 143 .iterate_tx_align = ITERATE_ALIGN, 144 .iterate_transfer_mask = BIT(0) | BIT(1), 145 .transfer_count = 2, 146 .transfers = { 147 { 148 .tx_buf = TX(0), 149 }, 150 { 151 /* this is why we cant use ITERATE_MAX_LEN */ 152 .tx_buf = TX(SPI_TEST_MAX_SIZE_HALF), 153 }, 154 }, 155 }, 156 { 157 .description = "two tx-transfers - alter first", 158 .fill_option = FILL_COUNT_8, 159 .iterate_len = { ITERATE_MAX_LEN }, 160 .iterate_tx_align = ITERATE_ALIGN, 161 .iterate_transfer_mask = BIT(0), 162 .transfer_count = 2, 163 .transfers = { 164 { 165 .tx_buf = TX(64), 166 }, 167 { 168 .len = 1, 169 .tx_buf = TX(0), 170 }, 171 }, 172 }, 173 { 174 .description = "two tx-transfers - alter second", 175 .fill_option = FILL_COUNT_8, 176 .iterate_len = { ITERATE_MAX_LEN }, 177 .iterate_tx_align = ITERATE_ALIGN, 178 .iterate_transfer_mask = BIT(1), 179 .transfer_count = 2, 180 .transfers = { 181 { 182 .len = 16, 183 .tx_buf = TX(0), 184 }, 185 { 186 .tx_buf = TX(64), 187 }, 188 }, 189 }, 190 { 191 .description = "two transfers tx then rx - alter both", 192 .fill_option = FILL_COUNT_8, 193 .iterate_len = { ITERATE_MAX_LEN }, 194 .iterate_tx_align = ITERATE_ALIGN, 195 .iterate_transfer_mask = BIT(0) | BIT(1), 196 .transfer_count = 2, 197 .transfers = { 198 { 199 .tx_buf = TX(0), 200 }, 201 { 202 .rx_buf = RX(0), 203 }, 204 }, 205 }, 206 { 207 .description = "two transfers tx then rx - alter tx", 208 .fill_option = FILL_COUNT_8, 209 .iterate_len = { ITERATE_MAX_LEN }, 210 .iterate_tx_align = ITERATE_ALIGN, 211 .iterate_transfer_mask = BIT(0), 212 .transfer_count = 2, 213 .transfers = { 214 { 215 .tx_buf = TX(0), 216 }, 217 { 218 .len = 1, 219 .rx_buf = RX(0), 220 }, 221 }, 222 }, 223 { 224 .description = "two transfers tx then rx - alter rx", 225 .fill_option = FILL_COUNT_8, 226 .iterate_len = { ITERATE_MAX_LEN }, 227 .iterate_tx_align = ITERATE_ALIGN, 228 .iterate_transfer_mask = BIT(1), 229 .transfer_count = 2, 230 .transfers = { 231 { 232 .len = 1, 233 .tx_buf = TX(0), 234 }, 235 { 236 .rx_buf = RX(0), 237 }, 238 }, 239 }, 240 { 241 .description = "two tx+rx transfers - alter both", 242 .fill_option = FILL_COUNT_8, 243 .iterate_len = { ITERATE_LEN }, 244 .iterate_tx_align = ITERATE_ALIGN, 245 .iterate_transfer_mask = BIT(0) | BIT(1), 246 .transfer_count = 2, 247 .transfers = { 248 { 249 .tx_buf = TX(0), 250 .rx_buf = RX(0), 251 }, 252 { 253 /* making sure we align without overwrite 254 * the reason we can not use ITERATE_MAX_LEN 255 */ 256 .tx_buf = TX(SPI_TEST_MAX_SIZE_HALF), 257 .rx_buf = RX(SPI_TEST_MAX_SIZE_HALF), 258 }, 259 }, 260 }, 261 { 262 .description = "two tx+rx transfers - alter first", 263 .fill_option = FILL_COUNT_8, 264 .iterate_len = { ITERATE_MAX_LEN }, 265 .iterate_tx_align = ITERATE_ALIGN, 266 .iterate_transfer_mask = BIT(0), 267 .transfer_count = 2, 268 .transfers = { 269 { 270 /* making sure we align without overwrite */ 271 .tx_buf = TX(1024), 272 .rx_buf = RX(1024), 273 }, 274 { 275 .len = 1, 276 /* making sure we align without overwrite */ 277 .tx_buf = TX(0), 278 .rx_buf = RX(0), 279 }, 280 }, 281 }, 282 { 283 .description = "two tx+rx transfers - alter second", 284 .fill_option = FILL_COUNT_8, 285 .iterate_len = { ITERATE_MAX_LEN }, 286 .iterate_tx_align = ITERATE_ALIGN, 287 .iterate_transfer_mask = BIT(1), 288 .transfer_count = 2, 289 .transfers = { 290 { 291 .len = 1, 292 .tx_buf = TX(0), 293 .rx_buf = RX(0), 294 }, 295 { 296 /* making sure we align without overwrite */ 297 .tx_buf = TX(1024), 298 .rx_buf = RX(1024), 299 }, 300 }, 301 }, 302 { 303 .description = "two tx+rx transfers - delay after transfer", 304 .fill_option = FILL_COUNT_8, 305 .iterate_len = { ITERATE_MAX_LEN }, 306 .iterate_transfer_mask = BIT(0) | BIT(1), 307 .transfer_count = 2, 308 .transfers = { 309 { 310 .tx_buf = TX(0), 311 .rx_buf = RX(0), 312 .delay = { 313 .value = 1000, 314 .unit = SPI_DELAY_UNIT_USECS, 315 }, 316 }, 317 { 318 .tx_buf = TX(0), 319 .rx_buf = RX(0), 320 .delay = { 321 .value = 1000, 322 .unit = SPI_DELAY_UNIT_USECS, 323 }, 324 }, 325 }, 326 }, 327 { 328 .description = "three tx+rx transfers with overlapping cache lines", 329 .fill_option = FILL_COUNT_8, 330 /* 331 * This should be large enough for the controller driver to 332 * choose to transfer it with DMA. 333 */ 334 .iterate_len = { 512, -1 }, 335 .iterate_transfer_mask = BIT(1), 336 .transfer_count = 3, 337 .transfers = { 338 { 339 .len = 1, 340 .tx_buf = TX(0), 341 .rx_buf = RX(0), 342 }, 343 { 344 .tx_buf = TX(1), 345 .rx_buf = RX(1), 346 }, 347 { 348 .len = 1, 349 .tx_buf = TX(513), 350 .rx_buf = RX(513), 351 }, 352 }, 353 }, 354 355 { /* end of tests sequence */ } 356 }; 357 358 static int spi_loopback_test_probe(struct spi_device *spi) 359 { 360 int ret; 361 362 if (loop_req || no_cs) { 363 spi->mode |= loop_req ? SPI_LOOP : 0; 364 spi->mode |= no_cs ? SPI_NO_CS : 0; 365 ret = spi_setup(spi); 366 if (ret) { 367 dev_err(&spi->dev, "SPI setup with SPI_LOOP or SPI_NO_CS failed (%d)\n", 368 ret); 369 return ret; 370 } 371 } 372 373 dev_info(&spi->dev, "Executing spi-loopback-tests\n"); 374 375 ret = spi_test_run_tests(spi, spi_tests); 376 377 dev_info(&spi->dev, "Finished spi-loopback-tests with return: %i\n", 378 ret); 379 380 return ret; 381 } 382 383 /* non const match table to permit to change via a module parameter */ 384 static struct of_device_id spi_loopback_test_of_match[] = { 385 { .compatible = "linux,spi-loopback-test", }, 386 { } 387 }; 388 389 /* allow to override the compatible string via a module_parameter */ 390 module_param_string(compatible, spi_loopback_test_of_match[0].compatible, 391 sizeof(spi_loopback_test_of_match[0].compatible), 392 0000); 393 394 MODULE_DEVICE_TABLE(of, spi_loopback_test_of_match); 395 396 static struct spi_driver spi_loopback_test_driver = { 397 .driver = { 398 .name = "spi-loopback-test", 399 .owner = THIS_MODULE, 400 .of_match_table = spi_loopback_test_of_match, 401 }, 402 .probe = spi_loopback_test_probe, 403 }; 404 405 module_spi_driver(spi_loopback_test_driver); 406 407 MODULE_AUTHOR("Martin Sperl <kernel@martin.sperl.org>"); 408 MODULE_DESCRIPTION("test spi_driver to check core functionality"); 409 MODULE_LICENSE("GPL"); 410 411 /*-------------------------------------------------------------------------*/ 412 413 /* spi_test implementation */ 414 415 #define RANGE_CHECK(ptr, plen, start, slen) \ 416 ((ptr >= start) && (ptr + plen <= start + slen)) 417 418 /* we allocate one page more, to allow for offsets */ 419 #define SPI_TEST_MAX_SIZE_PLUS (SPI_TEST_MAX_SIZE + PAGE_SIZE) 420 421 static void spi_test_print_hex_dump(char *pre, const void *ptr, size_t len) 422 { 423 /* limit the hex_dump */ 424 if (len < 1024) { 425 print_hex_dump(KERN_INFO, pre, 426 DUMP_PREFIX_OFFSET, 16, 1, 427 ptr, len, 0); 428 return; 429 } 430 /* print head */ 431 print_hex_dump(KERN_INFO, pre, 432 DUMP_PREFIX_OFFSET, 16, 1, 433 ptr, 512, 0); 434 /* print tail */ 435 pr_info("%s truncated - continuing at offset %04zx\n", 436 pre, len - 512); 437 print_hex_dump(KERN_INFO, pre, 438 DUMP_PREFIX_OFFSET, 16, 1, 439 ptr + (len - 512), 512, 0); 440 } 441 442 static void spi_test_dump_message(struct spi_device *spi, 443 struct spi_message *msg, 444 bool dump_data) 445 { 446 struct spi_transfer *xfer; 447 int i; 448 u8 b; 449 450 dev_info(&spi->dev, " spi_msg@%pK\n", msg); 451 if (msg->status) 452 dev_info(&spi->dev, " status: %i\n", 453 msg->status); 454 dev_info(&spi->dev, " frame_length: %i\n", 455 msg->frame_length); 456 dev_info(&spi->dev, " actual_length: %i\n", 457 msg->actual_length); 458 459 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 460 dev_info(&spi->dev, " spi_transfer@%pK\n", xfer); 461 dev_info(&spi->dev, " len: %i\n", xfer->len); 462 dev_info(&spi->dev, " tx_buf: %pK\n", xfer->tx_buf); 463 if (dump_data && xfer->tx_buf) 464 spi_test_print_hex_dump(" TX: ", 465 xfer->tx_buf, 466 xfer->len); 467 468 dev_info(&spi->dev, " rx_buf: %pK\n", xfer->rx_buf); 469 if (dump_data && xfer->rx_buf) 470 spi_test_print_hex_dump(" RX: ", 471 xfer->rx_buf, 472 xfer->len); 473 /* check for unwritten test pattern on rx_buf */ 474 if (xfer->rx_buf) { 475 for (i = 0 ; i < xfer->len ; i++) { 476 b = ((u8 *)xfer->rx_buf)[xfer->len - 1 - i]; 477 if (b != SPI_TEST_PATTERN_UNWRITTEN) 478 break; 479 } 480 if (i) 481 dev_info(&spi->dev, 482 " rx_buf filled with %02x starts at offset: %i\n", 483 SPI_TEST_PATTERN_UNWRITTEN, 484 xfer->len - i); 485 } 486 } 487 } 488 489 struct rx_ranges { 490 struct list_head list; 491 u8 *start; 492 u8 *end; 493 }; 494 495 static int rx_ranges_cmp(void *priv, const struct list_head *a, 496 const struct list_head *b) 497 { 498 struct rx_ranges *rx_a = list_entry(a, struct rx_ranges, list); 499 struct rx_ranges *rx_b = list_entry(b, struct rx_ranges, list); 500 501 if (rx_a->start > rx_b->start) 502 return 1; 503 if (rx_a->start < rx_b->start) 504 return -1; 505 return 0; 506 } 507 508 static int spi_check_rx_ranges(struct spi_device *spi, 509 struct spi_message *msg, 510 void *rx) 511 { 512 struct spi_transfer *xfer; 513 struct rx_ranges ranges[SPI_TEST_MAX_TRANSFERS], *r; 514 int i = 0; 515 LIST_HEAD(ranges_list); 516 u8 *addr; 517 int ret = 0; 518 519 /* loop over all transfers to fill in the rx_ranges */ 520 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 521 /* if there is no rx, then no check is needed */ 522 if (!xfer->rx_buf) 523 continue; 524 /* fill in the rx_range */ 525 if (RANGE_CHECK(xfer->rx_buf, xfer->len, 526 rx, SPI_TEST_MAX_SIZE_PLUS)) { 527 ranges[i].start = xfer->rx_buf; 528 ranges[i].end = xfer->rx_buf + xfer->len; 529 list_add(&ranges[i].list, &ranges_list); 530 i++; 531 } 532 } 533 534 /* if no ranges, then we can return and avoid the checks...*/ 535 if (!i) 536 return 0; 537 538 /* sort the list */ 539 list_sort(NULL, &ranges_list, rx_ranges_cmp); 540 541 /* and iterate over all the rx addresses */ 542 for (addr = rx; addr < (u8 *)rx + SPI_TEST_MAX_SIZE_PLUS; addr++) { 543 /* if we are the DO not write pattern, 544 * then continue with the loop... 545 */ 546 if (*addr == SPI_TEST_PATTERN_DO_NOT_WRITE) 547 continue; 548 549 /* check if we are inside a range */ 550 list_for_each_entry(r, &ranges_list, list) { 551 /* if so then set to end... */ 552 if ((addr >= r->start) && (addr < r->end)) 553 addr = r->end; 554 } 555 /* second test after a (hopefull) translation */ 556 if (*addr == SPI_TEST_PATTERN_DO_NOT_WRITE) 557 continue; 558 559 /* if still not found then something has modified too much */ 560 /* we could list the "closest" transfer here... */ 561 dev_err(&spi->dev, 562 "loopback strangeness - rx changed outside of allowed range at: %pK\n", 563 addr); 564 /* do not return, only set ret, 565 * so that we list all addresses 566 */ 567 ret = -ERANGE; 568 } 569 570 return ret; 571 } 572 573 static int spi_test_check_elapsed_time(struct spi_device *spi, 574 struct spi_test *test) 575 { 576 int i; 577 unsigned long long estimated_time = 0; 578 unsigned long long delay_usecs = 0; 579 580 for (i = 0; i < test->transfer_count; i++) { 581 struct spi_transfer *xfer = test->transfers + i; 582 unsigned long long nbits = (unsigned long long)BITS_PER_BYTE * 583 xfer->len; 584 585 delay_usecs += xfer->delay.value; 586 if (!xfer->speed_hz) 587 continue; 588 estimated_time += div_u64(nbits * NSEC_PER_SEC, xfer->speed_hz); 589 } 590 591 estimated_time += delay_usecs * NSEC_PER_USEC; 592 if (test->elapsed_time < estimated_time) { 593 dev_err(&spi->dev, 594 "elapsed time %lld ns is shorter than minimum estimated time %lld ns\n", 595 test->elapsed_time, estimated_time); 596 597 return -EINVAL; 598 } 599 600 return 0; 601 } 602 603 static int spi_test_check_loopback_result(struct spi_device *spi, 604 struct spi_message *msg, 605 void *tx, void *rx) 606 { 607 struct spi_transfer *xfer; 608 u8 rxb, txb; 609 size_t i; 610 int ret; 611 612 /* checks rx_buffer pattern are valid with loopback or without */ 613 if (check_ranges) { 614 ret = spi_check_rx_ranges(spi, msg, rx); 615 if (ret) 616 return ret; 617 } 618 619 /* if we run without loopback, then return now */ 620 if (!loopback) 621 return 0; 622 623 /* if applicable to transfer check that rx_buf is equal to tx_buf */ 624 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 625 /* if there is no rx, then no check is needed */ 626 if (!xfer->len || !xfer->rx_buf) 627 continue; 628 /* so depending on tx_buf we need to handle things */ 629 if (xfer->tx_buf) { 630 for (i = 0; i < xfer->len; i++) { 631 txb = ((u8 *)xfer->tx_buf)[i]; 632 rxb = ((u8 *)xfer->rx_buf)[i]; 633 if (txb != rxb) 634 goto mismatch_error; 635 } 636 } else { 637 /* first byte received */ 638 txb = ((u8 *)xfer->rx_buf)[0]; 639 /* first byte may be 0 or xff */ 640 if (!((txb == 0) || (txb == 0xff))) { 641 dev_err(&spi->dev, 642 "loopback strangeness - we expect 0x00 or 0xff, but not 0x%02x\n", 643 txb); 644 return -EINVAL; 645 } 646 /* check that all bytes are identical */ 647 for (i = 1; i < xfer->len; i++) { 648 rxb = ((u8 *)xfer->rx_buf)[i]; 649 if (rxb != txb) 650 goto mismatch_error; 651 } 652 } 653 } 654 655 return 0; 656 657 mismatch_error: 658 dev_err(&spi->dev, 659 "loopback strangeness - transfer mismatch on byte %04zx - expected 0x%02x, but got 0x%02x\n", 660 i, txb, rxb); 661 662 return -EINVAL; 663 } 664 665 static int spi_test_translate(struct spi_device *spi, 666 void **ptr, size_t len, 667 void *tx, void *rx) 668 { 669 size_t off; 670 671 /* return on null */ 672 if (!*ptr) 673 return 0; 674 675 /* in the MAX_SIZE_HALF case modify the pointer */ 676 if (((size_t)*ptr) & SPI_TEST_MAX_SIZE_HALF) 677 /* move the pointer to the correct range */ 678 *ptr += (SPI_TEST_MAX_SIZE_PLUS / 2) - 679 SPI_TEST_MAX_SIZE_HALF; 680 681 /* RX range 682 * - we check against MAX_SIZE_PLUS to allow for automated alignment 683 */ 684 if (RANGE_CHECK(*ptr, len, RX(0), SPI_TEST_MAX_SIZE_PLUS)) { 685 off = *ptr - RX(0); 686 *ptr = rx + off; 687 688 return 0; 689 } 690 691 /* TX range */ 692 if (RANGE_CHECK(*ptr, len, TX(0), SPI_TEST_MAX_SIZE_PLUS)) { 693 off = *ptr - TX(0); 694 *ptr = tx + off; 695 696 return 0; 697 } 698 699 dev_err(&spi->dev, 700 "PointerRange [%pK:%pK[ not in range [%pK:%pK[ or [%pK:%pK[\n", 701 *ptr, *ptr + len, 702 RX(0), RX(SPI_TEST_MAX_SIZE), 703 TX(0), TX(SPI_TEST_MAX_SIZE)); 704 705 return -EINVAL; 706 } 707 708 static int spi_test_fill_pattern(struct spi_device *spi, 709 struct spi_test *test) 710 { 711 struct spi_transfer *xfers = test->transfers; 712 u8 *tx_buf; 713 size_t count = 0; 714 int i, j; 715 716 #ifdef __BIG_ENDIAN 717 #define GET_VALUE_BYTE(value, index, bytes) \ 718 (value >> (8 * (bytes - 1 - count % bytes))) 719 #else 720 #define GET_VALUE_BYTE(value, index, bytes) \ 721 (value >> (8 * (count % bytes))) 722 #endif 723 724 /* fill all transfers with the pattern requested */ 725 for (i = 0; i < test->transfer_count; i++) { 726 /* fill rx_buf with SPI_TEST_PATTERN_UNWRITTEN */ 727 if (xfers[i].rx_buf) 728 memset(xfers[i].rx_buf, SPI_TEST_PATTERN_UNWRITTEN, 729 xfers[i].len); 730 /* if tx_buf is NULL then skip */ 731 tx_buf = (u8 *)xfers[i].tx_buf; 732 if (!tx_buf) 733 continue; 734 /* modify all the transfers */ 735 for (j = 0; j < xfers[i].len; j++, tx_buf++, count++) { 736 /* fill tx */ 737 switch (test->fill_option) { 738 case FILL_MEMSET_8: 739 *tx_buf = test->fill_pattern; 740 break; 741 case FILL_MEMSET_16: 742 *tx_buf = GET_VALUE_BYTE(test->fill_pattern, 743 count, 2); 744 break; 745 case FILL_MEMSET_24: 746 *tx_buf = GET_VALUE_BYTE(test->fill_pattern, 747 count, 3); 748 break; 749 case FILL_MEMSET_32: 750 *tx_buf = GET_VALUE_BYTE(test->fill_pattern, 751 count, 4); 752 break; 753 case FILL_COUNT_8: 754 *tx_buf = count; 755 break; 756 case FILL_COUNT_16: 757 *tx_buf = GET_VALUE_BYTE(count, count, 2); 758 break; 759 case FILL_COUNT_24: 760 *tx_buf = GET_VALUE_BYTE(count, count, 3); 761 break; 762 case FILL_COUNT_32: 763 *tx_buf = GET_VALUE_BYTE(count, count, 4); 764 break; 765 case FILL_TRANSFER_BYTE_8: 766 *tx_buf = j; 767 break; 768 case FILL_TRANSFER_BYTE_16: 769 *tx_buf = GET_VALUE_BYTE(j, j, 2); 770 break; 771 case FILL_TRANSFER_BYTE_24: 772 *tx_buf = GET_VALUE_BYTE(j, j, 3); 773 break; 774 case FILL_TRANSFER_BYTE_32: 775 *tx_buf = GET_VALUE_BYTE(j, j, 4); 776 break; 777 case FILL_TRANSFER_NUM: 778 *tx_buf = i; 779 break; 780 default: 781 dev_err(&spi->dev, 782 "unsupported fill_option: %i\n", 783 test->fill_option); 784 return -EINVAL; 785 } 786 } 787 } 788 789 return 0; 790 } 791 792 static int _spi_test_run_iter(struct spi_device *spi, 793 struct spi_test *test, 794 void *tx, void *rx) 795 { 796 struct spi_message *msg = &test->msg; 797 struct spi_transfer *x; 798 int i, ret; 799 800 /* initialize message - zero-filled via static initialization */ 801 spi_message_init_no_memset(msg); 802 803 /* fill rx with the DO_NOT_WRITE pattern */ 804 memset(rx, SPI_TEST_PATTERN_DO_NOT_WRITE, SPI_TEST_MAX_SIZE_PLUS); 805 806 /* add the individual transfers */ 807 for (i = 0; i < test->transfer_count; i++) { 808 x = &test->transfers[i]; 809 810 /* patch the values of tx_buf */ 811 ret = spi_test_translate(spi, (void **)&x->tx_buf, x->len, 812 (void *)tx, rx); 813 if (ret) 814 return ret; 815 816 /* patch the values of rx_buf */ 817 ret = spi_test_translate(spi, &x->rx_buf, x->len, 818 (void *)tx, rx); 819 if (ret) 820 return ret; 821 822 /* and add it to the list */ 823 spi_message_add_tail(x, msg); 824 } 825 826 /* fill in the transfer buffers with pattern */ 827 ret = spi_test_fill_pattern(spi, test); 828 if (ret) 829 return ret; 830 831 /* and execute */ 832 if (test->execute_msg) 833 ret = test->execute_msg(spi, test, tx, rx); 834 else 835 ret = spi_test_execute_msg(spi, test, tx, rx); 836 837 /* handle result */ 838 if (ret == test->expected_return) 839 return 0; 840 841 dev_err(&spi->dev, 842 "test failed - test returned %i, but we expect %i\n", 843 ret, test->expected_return); 844 845 if (ret) 846 return ret; 847 848 /* if it is 0, as we expected something else, 849 * then return something special 850 */ 851 return -EFAULT; 852 } 853 854 static int spi_test_run_iter(struct spi_device *spi, 855 const struct spi_test *testtemplate, 856 void *tx, void *rx, 857 size_t len, 858 size_t tx_off, 859 size_t rx_off 860 ) 861 { 862 struct spi_test test; 863 int i, tx_count, rx_count; 864 865 /* copy the test template to test */ 866 memcpy(&test, testtemplate, sizeof(test)); 867 868 /* if iterate_transfer_mask is not set, 869 * then set it to first transfer only 870 */ 871 if (!(test.iterate_transfer_mask & (BIT(test.transfer_count) - 1))) 872 test.iterate_transfer_mask = 1; 873 874 /* count number of transfers with tx/rx_buf != NULL */ 875 rx_count = tx_count = 0; 876 for (i = 0; i < test.transfer_count; i++) { 877 if (test.transfers[i].tx_buf) 878 tx_count++; 879 if (test.transfers[i].rx_buf) 880 rx_count++; 881 } 882 883 /* in some iteration cases warn and exit early, 884 * as there is nothing to do, that has not been tested already... 885 */ 886 if (tx_off && (!tx_count)) { 887 dev_warn_once(&spi->dev, 888 "%s: iterate_tx_off configured with tx_buf==NULL - ignoring\n", 889 test.description); 890 return 0; 891 } 892 if (rx_off && (!rx_count)) { 893 dev_warn_once(&spi->dev, 894 "%s: iterate_rx_off configured with rx_buf==NULL - ignoring\n", 895 test.description); 896 return 0; 897 } 898 899 /* write out info */ 900 if (!(len || tx_off || rx_off)) { 901 dev_info(&spi->dev, "Running test %s\n", test.description); 902 } else { 903 dev_info(&spi->dev, 904 " with iteration values: len = %zu, tx_off = %zu, rx_off = %zu\n", 905 len, tx_off, rx_off); 906 } 907 908 /* update in the values from iteration values */ 909 for (i = 0; i < test.transfer_count; i++) { 910 /* only when bit in transfer mask is set */ 911 if (!(test.iterate_transfer_mask & BIT(i))) 912 continue; 913 test.transfers[i].len = len; 914 if (test.transfers[i].tx_buf) 915 test.transfers[i].tx_buf += tx_off; 916 if (test.transfers[i].rx_buf) 917 test.transfers[i].rx_buf += rx_off; 918 } 919 920 /* and execute */ 921 return _spi_test_run_iter(spi, &test, tx, rx); 922 } 923 924 /** 925 * spi_test_execute_msg - default implementation to run a test 926 * 927 * @spi: @spi_device on which to run the @spi_message 928 * @test: the test to execute, which already contains @msg 929 * @tx: the tx buffer allocated for the test sequence 930 * @rx: the rx buffer allocated for the test sequence 931 * 932 * Returns: error code of spi_sync as well as basic error checking 933 */ 934 int spi_test_execute_msg(struct spi_device *spi, struct spi_test *test, 935 void *tx, void *rx) 936 { 937 struct spi_message *msg = &test->msg; 938 int ret = 0; 939 int i; 940 941 /* only if we do not simulate */ 942 if (!simulate_only) { 943 ktime_t start; 944 945 /* dump the complete message before and after the transfer */ 946 if (dump_messages == 3) 947 spi_test_dump_message(spi, msg, true); 948 949 start = ktime_get(); 950 /* run spi message */ 951 ret = spi_sync(spi, msg); 952 test->elapsed_time = ktime_to_ns(ktime_sub(ktime_get(), start)); 953 if (ret == -ETIMEDOUT) { 954 dev_info(&spi->dev, 955 "spi-message timed out - rerunning...\n"); 956 /* rerun after a few explicit schedules */ 957 for (i = 0; i < 16; i++) 958 schedule(); 959 ret = spi_sync(spi, msg); 960 } 961 if (ret) { 962 dev_err(&spi->dev, 963 "Failed to execute spi_message: %i\n", 964 ret); 965 goto exit; 966 } 967 968 /* do some extra error checks */ 969 if (msg->frame_length != msg->actual_length) { 970 dev_err(&spi->dev, 971 "actual length differs from expected\n"); 972 ret = -EIO; 973 goto exit; 974 } 975 976 /* run rx-buffer tests */ 977 ret = spi_test_check_loopback_result(spi, msg, tx, rx); 978 if (ret) 979 goto exit; 980 981 ret = spi_test_check_elapsed_time(spi, test); 982 } 983 984 /* if requested or on error dump message (including data) */ 985 exit: 986 if (dump_messages || ret) 987 spi_test_dump_message(spi, msg, 988 (dump_messages >= 2) || (ret)); 989 990 return ret; 991 } 992 EXPORT_SYMBOL_GPL(spi_test_execute_msg); 993 994 /** 995 * spi_test_run_test - run an individual spi_test 996 * including all the relevant iterations on: 997 * length and buffer alignment 998 * 999 * @spi: the spi_device to send the messages to 1000 * @test: the test which we need to execute 1001 * @tx: the tx buffer allocated for the test sequence 1002 * @rx: the rx buffer allocated for the test sequence 1003 * 1004 * Returns: status code of spi_sync or other failures 1005 */ 1006 1007 int spi_test_run_test(struct spi_device *spi, const struct spi_test *test, 1008 void *tx, void *rx) 1009 { 1010 int idx_len; 1011 size_t len; 1012 size_t tx_align, rx_align; 1013 int ret; 1014 1015 /* test for transfer limits */ 1016 if (test->transfer_count >= SPI_TEST_MAX_TRANSFERS) { 1017 dev_err(&spi->dev, 1018 "%s: Exceeded max number of transfers with %i\n", 1019 test->description, test->transfer_count); 1020 return -E2BIG; 1021 } 1022 1023 /* setting up some values in spi_message 1024 * based on some settings in spi_master 1025 * some of this can also get done in the run() method 1026 */ 1027 1028 /* iterate over all the iterable values using macros 1029 * (to make it a bit more readable... 1030 */ 1031 #define FOR_EACH_ALIGNMENT(var) \ 1032 for (var = 0; \ 1033 var < (test->iterate_##var ? \ 1034 (spi->master->dma_alignment ? \ 1035 spi->master->dma_alignment : \ 1036 test->iterate_##var) : \ 1037 1); \ 1038 var++) 1039 1040 for (idx_len = 0; idx_len < SPI_TEST_MAX_ITERATE && 1041 (len = test->iterate_len[idx_len]) != -1; idx_len++) { 1042 if ((run_only_iter_len > -1) && len != run_only_iter_len) 1043 continue; 1044 FOR_EACH_ALIGNMENT(tx_align) { 1045 FOR_EACH_ALIGNMENT(rx_align) { 1046 /* and run the iteration */ 1047 ret = spi_test_run_iter(spi, test, 1048 tx, rx, 1049 len, 1050 tx_align, 1051 rx_align); 1052 if (ret) 1053 return ret; 1054 } 1055 } 1056 } 1057 1058 return 0; 1059 } 1060 EXPORT_SYMBOL_GPL(spi_test_run_test); 1061 1062 /** 1063 * spi_test_run_tests - run an array of spi_messages tests 1064 * @spi: the spi device on which to run the tests 1065 * @tests: NULL-terminated array of @spi_test 1066 * 1067 * Returns: status errors as per @spi_test_run_test() 1068 */ 1069 1070 int spi_test_run_tests(struct spi_device *spi, 1071 struct spi_test *tests) 1072 { 1073 char *rx = NULL, *tx = NULL; 1074 int ret = 0, count = 0; 1075 struct spi_test *test; 1076 1077 /* allocate rx/tx buffers of 128kB size without devm 1078 * in the hope that is on a page boundary 1079 */ 1080 if (use_vmalloc) 1081 rx = vmalloc(SPI_TEST_MAX_SIZE_PLUS); 1082 else 1083 rx = kzalloc(SPI_TEST_MAX_SIZE_PLUS, GFP_KERNEL); 1084 if (!rx) 1085 return -ENOMEM; 1086 1087 1088 if (use_vmalloc) 1089 tx = vmalloc(SPI_TEST_MAX_SIZE_PLUS); 1090 else 1091 tx = kzalloc(SPI_TEST_MAX_SIZE_PLUS, GFP_KERNEL); 1092 if (!tx) { 1093 ret = -ENOMEM; 1094 goto err_tx; 1095 } 1096 1097 /* now run the individual tests in the table */ 1098 for (test = tests, count = 0; test->description[0]; 1099 test++, count++) { 1100 /* only run test if requested */ 1101 if ((run_only_test > -1) && (count != run_only_test)) 1102 continue; 1103 /* run custom implementation */ 1104 if (test->run_test) 1105 ret = test->run_test(spi, test, tx, rx); 1106 else 1107 ret = spi_test_run_test(spi, test, tx, rx); 1108 if (ret) 1109 goto out; 1110 /* add some delays so that we can easily 1111 * detect the individual tests when using a logic analyzer 1112 * we also add scheduling to avoid potential spi_timeouts... 1113 */ 1114 if (delay_ms) 1115 mdelay(delay_ms); 1116 schedule(); 1117 } 1118 1119 out: 1120 kvfree(tx); 1121 err_tx: 1122 kvfree(rx); 1123 return ret; 1124 } 1125 EXPORT_SYMBOL_GPL(spi_test_run_tests); 1126