1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright 2007-2008 Pierre Ossman 4 */ 5 6 #include <linux/mmc/core.h> 7 #include <linux/mmc/card.h> 8 #include <linux/mmc/host.h> 9 #include <linux/mmc/mmc.h> 10 #include <linux/slab.h> 11 12 #include <linux/scatterlist.h> 13 #include <linux/swap.h> /* For nr_free_buffer_pages() */ 14 #include <linux/list.h> 15 16 #include <linux/debugfs.h> 17 #include <linux/uaccess.h> 18 #include <linux/seq_file.h> 19 #include <linux/module.h> 20 21 #include "core.h" 22 #include "card.h" 23 #include "host.h" 24 #include "bus.h" 25 #include "mmc_ops.h" 26 27 #define RESULT_OK 0 28 #define RESULT_FAIL 1 29 #define RESULT_UNSUP_HOST 2 30 #define RESULT_UNSUP_CARD 3 31 32 #define BUFFER_ORDER 2 33 #define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER) 34 35 #define TEST_ALIGN_END 8 36 37 /* 38 * Limit the test area size to the maximum MMC HC erase group size. Note that 39 * the maximum SD allocation unit size is just 4MiB. 40 */ 41 #define TEST_AREA_MAX_SIZE (128 * 1024 * 1024) 42 43 /** 44 * struct mmc_test_pages - pages allocated by 'alloc_pages()'. 45 * @page: first page in the allocation 46 * @order: order of the number of pages allocated 47 */ 48 struct mmc_test_pages { 49 struct page *page; 50 unsigned int order; 51 }; 52 53 /** 54 * struct mmc_test_mem - allocated memory. 55 * @arr: array of allocations 56 * @cnt: number of allocations 57 */ 58 struct mmc_test_mem { 59 struct mmc_test_pages *arr; 60 unsigned int cnt; 61 }; 62 63 /** 64 * struct mmc_test_area - information for performance tests. 65 * @max_sz: test area size (in bytes) 66 * @dev_addr: address on card at which to do performance tests 67 * @max_tfr: maximum transfer size allowed by driver (in bytes) 68 * @max_segs: maximum segments allowed by driver in scatterlist @sg 69 * @max_seg_sz: maximum segment size allowed by driver 70 * @blocks: number of (512 byte) blocks currently mapped by @sg 71 * @sg_len: length of currently mapped scatterlist @sg 72 * @mem: allocated memory 73 * @sg: scatterlist 74 */ 75 struct mmc_test_area { 76 unsigned long max_sz; 77 unsigned int dev_addr; 78 unsigned int max_tfr; 79 unsigned int max_segs; 80 unsigned int max_seg_sz; 81 unsigned int blocks; 82 unsigned int sg_len; 83 struct mmc_test_mem *mem; 84 struct scatterlist *sg; 85 }; 86 87 /** 88 * struct mmc_test_transfer_result - transfer results for performance tests. 89 * @link: double-linked list 90 * @count: amount of group of sectors to check 91 * @sectors: amount of sectors to check in one group 92 * @ts: time values of transfer 93 * @rate: calculated transfer rate 94 * @iops: I/O operations per second (times 100) 95 */ 96 struct mmc_test_transfer_result { 97 struct list_head link; 98 unsigned int count; 99 unsigned int sectors; 100 struct timespec64 ts; 101 unsigned int rate; 102 unsigned int iops; 103 }; 104 105 /** 106 * struct mmc_test_general_result - results for tests. 107 * @link: double-linked list 108 * @card: card under test 109 * @testcase: number of test case 110 * @result: result of test run 111 * @tr_lst: transfer measurements if any as mmc_test_transfer_result 112 */ 113 struct mmc_test_general_result { 114 struct list_head link; 115 struct mmc_card *card; 116 int testcase; 117 int result; 118 struct list_head tr_lst; 119 }; 120 121 /** 122 * struct mmc_test_dbgfs_file - debugfs related file. 123 * @link: double-linked list 124 * @card: card under test 125 * @file: file created under debugfs 126 */ 127 struct mmc_test_dbgfs_file { 128 struct list_head link; 129 struct mmc_card *card; 130 struct dentry *file; 131 }; 132 133 /** 134 * struct mmc_test_card - test information. 135 * @card: card under test 136 * @scratch: transfer buffer 137 * @buffer: transfer buffer 138 * @highmem: buffer for highmem tests 139 * @area: information for performance tests 140 * @gr: pointer to results of current testcase 141 */ 142 struct mmc_test_card { 143 struct mmc_card *card; 144 145 u8 scratch[BUFFER_SIZE]; 146 u8 *buffer; 147 #ifdef CONFIG_HIGHMEM 148 struct page *highmem; 149 #endif 150 struct mmc_test_area area; 151 struct mmc_test_general_result *gr; 152 }; 153 154 enum mmc_test_prep_media { 155 MMC_TEST_PREP_NONE = 0, 156 MMC_TEST_PREP_WRITE_FULL = 1 << 0, 157 MMC_TEST_PREP_ERASE = 1 << 1, 158 }; 159 160 struct mmc_test_multiple_rw { 161 unsigned int *sg_len; 162 unsigned int *bs; 163 unsigned int len; 164 unsigned int size; 165 bool do_write; 166 bool do_nonblock_req; 167 enum mmc_test_prep_media prepare; 168 }; 169 170 /*******************************************************************/ 171 /* General helper functions */ 172 /*******************************************************************/ 173 174 /* 175 * Configure correct block size in card 176 */ 177 static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size) 178 { 179 return mmc_set_blocklen(test->card, size); 180 } 181 182 static bool mmc_test_card_cmd23(struct mmc_card *card) 183 { 184 return mmc_card_mmc(card) || 185 (mmc_card_sd(card) && card->scr.cmds & SD_SCR_CMD23_SUPPORT); 186 } 187 188 static void mmc_test_prepare_sbc(struct mmc_test_card *test, 189 struct mmc_request *mrq, unsigned int blocks) 190 { 191 struct mmc_card *card = test->card; 192 193 if (!mrq->sbc || !mmc_host_cmd23(card->host) || 194 !mmc_test_card_cmd23(card) || !mmc_op_multi(mrq->cmd->opcode) || 195 (card->quirks & MMC_QUIRK_BLK_NO_CMD23)) { 196 mrq->sbc = NULL; 197 return; 198 } 199 200 mrq->sbc->opcode = MMC_SET_BLOCK_COUNT; 201 mrq->sbc->arg = blocks; 202 mrq->sbc->flags = MMC_RSP_R1 | MMC_CMD_AC; 203 } 204 205 /* 206 * Fill in the mmc_request structure given a set of transfer parameters. 207 */ 208 static void mmc_test_prepare_mrq(struct mmc_test_card *test, 209 struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len, 210 unsigned dev_addr, unsigned blocks, unsigned blksz, int write) 211 { 212 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop)) 213 return; 214 215 if (blocks > 1) { 216 mrq->cmd->opcode = write ? 217 MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK; 218 } else { 219 mrq->cmd->opcode = write ? 220 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK; 221 } 222 223 mrq->cmd->arg = dev_addr; 224 if (!mmc_card_blockaddr(test->card)) 225 mrq->cmd->arg <<= 9; 226 227 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC; 228 229 if (blocks == 1) 230 mrq->stop = NULL; 231 else { 232 mrq->stop->opcode = MMC_STOP_TRANSMISSION; 233 mrq->stop->arg = 0; 234 mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC; 235 } 236 237 mrq->data->blksz = blksz; 238 mrq->data->blocks = blocks; 239 mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; 240 mrq->data->sg = sg; 241 mrq->data->sg_len = sg_len; 242 243 mmc_test_prepare_sbc(test, mrq, blocks); 244 245 mmc_set_data_timeout(mrq->data, test->card); 246 } 247 248 static int mmc_test_busy(struct mmc_command *cmd) 249 { 250 return !(cmd->resp[0] & R1_READY_FOR_DATA) || 251 (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG); 252 } 253 254 /* 255 * Wait for the card to finish the busy state 256 */ 257 static int mmc_test_wait_busy(struct mmc_test_card *test) 258 { 259 int ret, busy; 260 struct mmc_command cmd = {}; 261 262 busy = 0; 263 do { 264 memset(&cmd, 0, sizeof(struct mmc_command)); 265 266 cmd.opcode = MMC_SEND_STATUS; 267 cmd.arg = test->card->rca << 16; 268 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 269 270 ret = mmc_wait_for_cmd(test->card->host, &cmd, 0); 271 if (ret) 272 break; 273 274 if (!busy && mmc_test_busy(&cmd)) { 275 busy = 1; 276 if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) 277 pr_info("%s: Warning: Host did not wait for busy state to end.\n", 278 mmc_hostname(test->card->host)); 279 } 280 } while (mmc_test_busy(&cmd)); 281 282 return ret; 283 } 284 285 /* 286 * Transfer a single sector of kernel addressable data 287 */ 288 static int mmc_test_buffer_transfer(struct mmc_test_card *test, 289 u8 *buffer, unsigned addr, unsigned blksz, int write) 290 { 291 struct mmc_request mrq = {}; 292 struct mmc_command cmd = {}; 293 struct mmc_command stop = {}; 294 struct mmc_data data = {}; 295 296 struct scatterlist sg; 297 298 mrq.cmd = &cmd; 299 mrq.data = &data; 300 mrq.stop = &stop; 301 302 sg_init_one(&sg, buffer, blksz); 303 304 mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write); 305 306 mmc_wait_for_req(test->card->host, &mrq); 307 308 if (cmd.error) 309 return cmd.error; 310 if (data.error) 311 return data.error; 312 313 return mmc_test_wait_busy(test); 314 } 315 316 static void mmc_test_free_mem(struct mmc_test_mem *mem) 317 { 318 if (!mem) 319 return; 320 while (mem->cnt--) 321 __free_pages(mem->arr[mem->cnt].page, 322 mem->arr[mem->cnt].order); 323 kfree(mem->arr); 324 kfree(mem); 325 } 326 327 /* 328 * Allocate a lot of memory, preferably max_sz but at least min_sz. In case 329 * there isn't much memory do not exceed 1/16th total lowmem pages. Also do 330 * not exceed a maximum number of segments and try not to make segments much 331 * bigger than maximum segment size. 332 */ 333 static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz, 334 unsigned long max_sz, 335 unsigned int max_segs, 336 unsigned int max_seg_sz) 337 { 338 unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE); 339 unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE); 340 unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE); 341 unsigned long page_cnt = 0; 342 unsigned long limit = nr_free_buffer_pages() >> 4; 343 struct mmc_test_mem *mem; 344 345 if (max_page_cnt > limit) 346 max_page_cnt = limit; 347 if (min_page_cnt > max_page_cnt) 348 min_page_cnt = max_page_cnt; 349 350 if (max_seg_page_cnt > max_page_cnt) 351 max_seg_page_cnt = max_page_cnt; 352 353 if (max_segs > max_page_cnt) 354 max_segs = max_page_cnt; 355 356 mem = kzalloc(sizeof(*mem), GFP_KERNEL); 357 if (!mem) 358 return NULL; 359 360 mem->arr = kcalloc(max_segs, sizeof(*mem->arr), GFP_KERNEL); 361 if (!mem->arr) 362 goto out_free; 363 364 while (max_page_cnt) { 365 struct page *page; 366 unsigned int order; 367 gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN | 368 __GFP_NORETRY; 369 370 order = get_order(max_seg_page_cnt << PAGE_SHIFT); 371 while (1) { 372 page = alloc_pages(flags, order); 373 if (page || !order) 374 break; 375 order -= 1; 376 } 377 if (!page) { 378 if (page_cnt < min_page_cnt) 379 goto out_free; 380 break; 381 } 382 mem->arr[mem->cnt].page = page; 383 mem->arr[mem->cnt].order = order; 384 mem->cnt += 1; 385 if (max_page_cnt <= (1UL << order)) 386 break; 387 max_page_cnt -= 1UL << order; 388 page_cnt += 1UL << order; 389 if (mem->cnt >= max_segs) { 390 if (page_cnt < min_page_cnt) 391 goto out_free; 392 break; 393 } 394 } 395 396 return mem; 397 398 out_free: 399 mmc_test_free_mem(mem); 400 return NULL; 401 } 402 403 /* 404 * Map memory into a scatterlist. Optionally allow the same memory to be 405 * mapped more than once. 406 */ 407 static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size, 408 struct scatterlist *sglist, int repeat, 409 unsigned int max_segs, unsigned int max_seg_sz, 410 unsigned int *sg_len, int min_sg_len) 411 { 412 struct scatterlist *sg = NULL; 413 unsigned int i; 414 unsigned long sz = size; 415 416 sg_init_table(sglist, max_segs); 417 if (min_sg_len > max_segs) 418 min_sg_len = max_segs; 419 420 *sg_len = 0; 421 do { 422 for (i = 0; i < mem->cnt; i++) { 423 unsigned long len = PAGE_SIZE << mem->arr[i].order; 424 425 if (min_sg_len && (size / min_sg_len < len)) 426 len = ALIGN(size / min_sg_len, 512); 427 if (len > sz) 428 len = sz; 429 if (len > max_seg_sz) 430 len = max_seg_sz; 431 if (sg) 432 sg = sg_next(sg); 433 else 434 sg = sglist; 435 if (!sg) 436 return -EINVAL; 437 sg_set_page(sg, mem->arr[i].page, len, 0); 438 sz -= len; 439 *sg_len += 1; 440 if (!sz) 441 break; 442 } 443 } while (sz && repeat); 444 445 if (sz) 446 return -EINVAL; 447 448 if (sg) 449 sg_mark_end(sg); 450 451 return 0; 452 } 453 454 /* 455 * Map memory into a scatterlist so that no pages are contiguous. Allow the 456 * same memory to be mapped more than once. 457 */ 458 static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem, 459 unsigned long sz, 460 struct scatterlist *sglist, 461 unsigned int max_segs, 462 unsigned int max_seg_sz, 463 unsigned int *sg_len) 464 { 465 struct scatterlist *sg = NULL; 466 unsigned int i = mem->cnt, cnt; 467 unsigned long len; 468 void *base, *addr, *last_addr = NULL; 469 470 sg_init_table(sglist, max_segs); 471 472 *sg_len = 0; 473 while (sz) { 474 base = page_address(mem->arr[--i].page); 475 cnt = 1 << mem->arr[i].order; 476 while (sz && cnt) { 477 addr = base + PAGE_SIZE * --cnt; 478 if (last_addr && last_addr + PAGE_SIZE == addr) 479 continue; 480 last_addr = addr; 481 len = PAGE_SIZE; 482 if (len > max_seg_sz) 483 len = max_seg_sz; 484 if (len > sz) 485 len = sz; 486 if (sg) 487 sg = sg_next(sg); 488 else 489 sg = sglist; 490 if (!sg) 491 return -EINVAL; 492 sg_set_page(sg, virt_to_page(addr), len, 0); 493 sz -= len; 494 *sg_len += 1; 495 } 496 if (i == 0) 497 i = mem->cnt; 498 } 499 500 if (sg) 501 sg_mark_end(sg); 502 503 return 0; 504 } 505 506 /* 507 * Calculate transfer rate in bytes per second. 508 */ 509 static unsigned int mmc_test_rate(uint64_t bytes, struct timespec64 *ts) 510 { 511 uint64_t ns; 512 513 ns = timespec64_to_ns(ts); 514 bytes *= 1000000000; 515 516 while (ns > UINT_MAX) { 517 bytes >>= 1; 518 ns >>= 1; 519 } 520 521 if (!ns) 522 return 0; 523 524 do_div(bytes, (uint32_t)ns); 525 526 return bytes; 527 } 528 529 /* 530 * Save transfer results for future usage 531 */ 532 static void mmc_test_save_transfer_result(struct mmc_test_card *test, 533 unsigned int count, unsigned int sectors, struct timespec64 ts, 534 unsigned int rate, unsigned int iops) 535 { 536 struct mmc_test_transfer_result *tr; 537 538 if (!test->gr) 539 return; 540 541 tr = kmalloc(sizeof(*tr), GFP_KERNEL); 542 if (!tr) 543 return; 544 545 tr->count = count; 546 tr->sectors = sectors; 547 tr->ts = ts; 548 tr->rate = rate; 549 tr->iops = iops; 550 551 list_add_tail(&tr->link, &test->gr->tr_lst); 552 } 553 554 /* 555 * Print the transfer rate. 556 */ 557 static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes, 558 struct timespec64 *ts1, struct timespec64 *ts2) 559 { 560 unsigned int rate, iops, sectors = bytes >> 9; 561 struct timespec64 ts; 562 563 ts = timespec64_sub(*ts2, *ts1); 564 565 rate = mmc_test_rate(bytes, &ts); 566 iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */ 567 568 pr_info("%s: Transfer of %u sectors (%u%s KiB) took %llu.%09u " 569 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n", 570 mmc_hostname(test->card->host), sectors, sectors >> 1, 571 (sectors & 1 ? ".5" : ""), (u64)ts.tv_sec, 572 (u32)ts.tv_nsec, rate / 1000, rate / 1024, 573 iops / 100, iops % 100); 574 575 mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops); 576 } 577 578 /* 579 * Print the average transfer rate. 580 */ 581 static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes, 582 unsigned int count, struct timespec64 *ts1, 583 struct timespec64 *ts2) 584 { 585 unsigned int rate, iops, sectors = bytes >> 9; 586 uint64_t tot = bytes * count; 587 struct timespec64 ts; 588 589 ts = timespec64_sub(*ts2, *ts1); 590 591 rate = mmc_test_rate(tot, &ts); 592 iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */ 593 594 pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took " 595 "%llu.%09u seconds (%u kB/s, %u KiB/s, " 596 "%u.%02u IOPS, sg_len %d)\n", 597 mmc_hostname(test->card->host), count, sectors, count, 598 sectors >> 1, (sectors & 1 ? ".5" : ""), 599 (u64)ts.tv_sec, (u32)ts.tv_nsec, 600 rate / 1000, rate / 1024, iops / 100, iops % 100, 601 test->area.sg_len); 602 603 mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops); 604 } 605 606 /* 607 * Return the card size in sectors. 608 */ 609 static unsigned int mmc_test_capacity(struct mmc_card *card) 610 { 611 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) 612 return card->ext_csd.sectors; 613 else 614 return card->csd.capacity << (card->csd.read_blkbits - 9); 615 } 616 617 /*******************************************************************/ 618 /* Test preparation and cleanup */ 619 /*******************************************************************/ 620 621 /* 622 * Fill the first couple of sectors of the card with known data 623 * so that bad reads/writes can be detected 624 */ 625 static int __mmc_test_prepare(struct mmc_test_card *test, int write) 626 { 627 int ret, i; 628 629 ret = mmc_test_set_blksize(test, 512); 630 if (ret) 631 return ret; 632 633 if (write) 634 memset(test->buffer, 0xDF, 512); 635 else { 636 for (i = 0; i < 512; i++) 637 test->buffer[i] = i; 638 } 639 640 for (i = 0; i < BUFFER_SIZE / 512; i++) { 641 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1); 642 if (ret) 643 return ret; 644 } 645 646 return 0; 647 } 648 649 static int mmc_test_prepare_write(struct mmc_test_card *test) 650 { 651 return __mmc_test_prepare(test, 1); 652 } 653 654 static int mmc_test_prepare_read(struct mmc_test_card *test) 655 { 656 return __mmc_test_prepare(test, 0); 657 } 658 659 static int mmc_test_cleanup(struct mmc_test_card *test) 660 { 661 int ret, i; 662 663 ret = mmc_test_set_blksize(test, 512); 664 if (ret) 665 return ret; 666 667 memset(test->buffer, 0, 512); 668 669 for (i = 0; i < BUFFER_SIZE / 512; i++) { 670 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1); 671 if (ret) 672 return ret; 673 } 674 675 return 0; 676 } 677 678 /*******************************************************************/ 679 /* Test execution helpers */ 680 /*******************************************************************/ 681 682 /* 683 * Modifies the mmc_request to perform the "short transfer" tests 684 */ 685 static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test, 686 struct mmc_request *mrq, int write) 687 { 688 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data)) 689 return; 690 691 if (mrq->data->blocks > 1) { 692 mrq->cmd->opcode = write ? 693 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK; 694 mrq->stop = NULL; 695 } else { 696 mrq->cmd->opcode = MMC_SEND_STATUS; 697 mrq->cmd->arg = test->card->rca << 16; 698 } 699 } 700 701 /* 702 * Checks that a normal transfer didn't have any errors 703 */ 704 static int mmc_test_check_result(struct mmc_test_card *test, 705 struct mmc_request *mrq) 706 { 707 int ret; 708 709 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data)) 710 return -EINVAL; 711 712 ret = 0; 713 714 if (mrq->sbc && mrq->sbc->error) 715 ret = mrq->sbc->error; 716 if (!ret && mrq->cmd->error) 717 ret = mrq->cmd->error; 718 if (!ret && mrq->data->error) 719 ret = mrq->data->error; 720 if (!ret && mrq->stop && mrq->stop->error) 721 ret = mrq->stop->error; 722 if (!ret && mrq->data->bytes_xfered != 723 mrq->data->blocks * mrq->data->blksz) 724 ret = RESULT_FAIL; 725 726 if (ret == -EINVAL) 727 ret = RESULT_UNSUP_HOST; 728 729 return ret; 730 } 731 732 /* 733 * Checks that a "short transfer" behaved as expected 734 */ 735 static int mmc_test_check_broken_result(struct mmc_test_card *test, 736 struct mmc_request *mrq) 737 { 738 int ret; 739 740 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data)) 741 return -EINVAL; 742 743 ret = 0; 744 745 if (!ret && mrq->cmd->error) 746 ret = mrq->cmd->error; 747 if (!ret && mrq->data->error == 0) 748 ret = RESULT_FAIL; 749 if (!ret && mrq->data->error != -ETIMEDOUT) 750 ret = mrq->data->error; 751 if (!ret && mrq->stop && mrq->stop->error) 752 ret = mrq->stop->error; 753 if (mrq->data->blocks > 1) { 754 if (!ret && mrq->data->bytes_xfered > mrq->data->blksz) 755 ret = RESULT_FAIL; 756 } else { 757 if (!ret && mrq->data->bytes_xfered > 0) 758 ret = RESULT_FAIL; 759 } 760 761 if (ret == -EINVAL) 762 ret = RESULT_UNSUP_HOST; 763 764 return ret; 765 } 766 767 struct mmc_test_req { 768 struct mmc_request mrq; 769 struct mmc_command sbc; 770 struct mmc_command cmd; 771 struct mmc_command stop; 772 struct mmc_command status; 773 struct mmc_data data; 774 }; 775 776 /* 777 * Tests nonblock transfer with certain parameters 778 */ 779 static void mmc_test_req_reset(struct mmc_test_req *rq) 780 { 781 memset(rq, 0, sizeof(struct mmc_test_req)); 782 783 rq->mrq.cmd = &rq->cmd; 784 rq->mrq.data = &rq->data; 785 rq->mrq.stop = &rq->stop; 786 } 787 788 static struct mmc_test_req *mmc_test_req_alloc(void) 789 { 790 struct mmc_test_req *rq = kmalloc(sizeof(*rq), GFP_KERNEL); 791 792 if (rq) 793 mmc_test_req_reset(rq); 794 795 return rq; 796 } 797 798 static void mmc_test_wait_done(struct mmc_request *mrq) 799 { 800 complete(&mrq->completion); 801 } 802 803 static int mmc_test_start_areq(struct mmc_test_card *test, 804 struct mmc_request *mrq, 805 struct mmc_request *prev_mrq) 806 { 807 struct mmc_host *host = test->card->host; 808 int err = 0; 809 810 if (mrq) { 811 init_completion(&mrq->completion); 812 mrq->done = mmc_test_wait_done; 813 mmc_pre_req(host, mrq); 814 } 815 816 if (prev_mrq) { 817 wait_for_completion(&prev_mrq->completion); 818 err = mmc_test_wait_busy(test); 819 if (!err) 820 err = mmc_test_check_result(test, prev_mrq); 821 } 822 823 if (!err && mrq) { 824 err = mmc_start_request(host, mrq); 825 if (err) 826 mmc_retune_release(host); 827 } 828 829 if (prev_mrq) 830 mmc_post_req(host, prev_mrq, 0); 831 832 if (err && mrq) 833 mmc_post_req(host, mrq, err); 834 835 return err; 836 } 837 838 static int mmc_test_nonblock_transfer(struct mmc_test_card *test, 839 struct scatterlist *sg, unsigned sg_len, 840 unsigned dev_addr, unsigned blocks, 841 unsigned blksz, int write, int count) 842 { 843 struct mmc_test_req *rq1, *rq2; 844 struct mmc_request *mrq, *prev_mrq; 845 int i; 846 int ret = RESULT_OK; 847 848 rq1 = mmc_test_req_alloc(); 849 rq2 = mmc_test_req_alloc(); 850 if (!rq1 || !rq2) { 851 ret = RESULT_FAIL; 852 goto err; 853 } 854 855 mrq = &rq1->mrq; 856 prev_mrq = NULL; 857 858 for (i = 0; i < count; i++) { 859 mmc_test_req_reset(container_of(mrq, struct mmc_test_req, mrq)); 860 mmc_test_prepare_mrq(test, mrq, sg, sg_len, dev_addr, blocks, 861 blksz, write); 862 ret = mmc_test_start_areq(test, mrq, prev_mrq); 863 if (ret) 864 goto err; 865 866 if (!prev_mrq) 867 prev_mrq = &rq2->mrq; 868 869 swap(mrq, prev_mrq); 870 dev_addr += blocks; 871 } 872 873 ret = mmc_test_start_areq(test, NULL, prev_mrq); 874 err: 875 kfree(rq1); 876 kfree(rq2); 877 return ret; 878 } 879 880 /* 881 * Tests a basic transfer with certain parameters 882 */ 883 static int mmc_test_simple_transfer(struct mmc_test_card *test, 884 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, 885 unsigned blocks, unsigned blksz, int write) 886 { 887 struct mmc_request mrq = {}; 888 struct mmc_command cmd = {}; 889 struct mmc_command stop = {}; 890 struct mmc_data data = {}; 891 892 mrq.cmd = &cmd; 893 mrq.data = &data; 894 mrq.stop = &stop; 895 896 mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr, 897 blocks, blksz, write); 898 899 mmc_wait_for_req(test->card->host, &mrq); 900 901 mmc_test_wait_busy(test); 902 903 return mmc_test_check_result(test, &mrq); 904 } 905 906 /* 907 * Tests a transfer where the card will fail completely or partly 908 */ 909 static int mmc_test_broken_transfer(struct mmc_test_card *test, 910 unsigned blocks, unsigned blksz, int write) 911 { 912 struct mmc_request mrq = {}; 913 struct mmc_command cmd = {}; 914 struct mmc_command stop = {}; 915 struct mmc_data data = {}; 916 917 struct scatterlist sg; 918 919 mrq.cmd = &cmd; 920 mrq.data = &data; 921 mrq.stop = &stop; 922 923 sg_init_one(&sg, test->buffer, blocks * blksz); 924 925 mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write); 926 mmc_test_prepare_broken_mrq(test, &mrq, write); 927 928 mmc_wait_for_req(test->card->host, &mrq); 929 930 mmc_test_wait_busy(test); 931 932 return mmc_test_check_broken_result(test, &mrq); 933 } 934 935 /* 936 * Does a complete transfer test where data is also validated 937 * 938 * Note: mmc_test_prepare() must have been done before this call 939 */ 940 static int mmc_test_transfer(struct mmc_test_card *test, 941 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, 942 unsigned blocks, unsigned blksz, int write) 943 { 944 int ret, i; 945 unsigned long flags; 946 947 if (write) { 948 for (i = 0; i < blocks * blksz; i++) 949 test->scratch[i] = i; 950 } else { 951 memset(test->scratch, 0, BUFFER_SIZE); 952 } 953 local_irq_save(flags); 954 sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); 955 local_irq_restore(flags); 956 957 ret = mmc_test_set_blksize(test, blksz); 958 if (ret) 959 return ret; 960 961 ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr, 962 blocks, blksz, write); 963 if (ret) 964 return ret; 965 966 if (write) { 967 int sectors; 968 969 ret = mmc_test_set_blksize(test, 512); 970 if (ret) 971 return ret; 972 973 sectors = (blocks * blksz + 511) / 512; 974 if ((sectors * 512) == (blocks * blksz)) 975 sectors++; 976 977 if ((sectors * 512) > BUFFER_SIZE) 978 return -EINVAL; 979 980 memset(test->buffer, 0, sectors * 512); 981 982 for (i = 0; i < sectors; i++) { 983 ret = mmc_test_buffer_transfer(test, 984 test->buffer + i * 512, 985 dev_addr + i, 512, 0); 986 if (ret) 987 return ret; 988 } 989 990 for (i = 0; i < blocks * blksz; i++) { 991 if (test->buffer[i] != (u8)i) 992 return RESULT_FAIL; 993 } 994 995 for (; i < sectors * 512; i++) { 996 if (test->buffer[i] != 0xDF) 997 return RESULT_FAIL; 998 } 999 } else { 1000 local_irq_save(flags); 1001 sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); 1002 local_irq_restore(flags); 1003 for (i = 0; i < blocks * blksz; i++) { 1004 if (test->scratch[i] != (u8)i) 1005 return RESULT_FAIL; 1006 } 1007 } 1008 1009 return 0; 1010 } 1011 1012 /*******************************************************************/ 1013 /* Tests */ 1014 /*******************************************************************/ 1015 1016 struct mmc_test_case { 1017 const char *name; 1018 1019 int (*prepare)(struct mmc_test_card *); 1020 int (*run)(struct mmc_test_card *); 1021 int (*cleanup)(struct mmc_test_card *); 1022 }; 1023 1024 static int mmc_test_basic_write(struct mmc_test_card *test) 1025 { 1026 int ret; 1027 struct scatterlist sg; 1028 1029 ret = mmc_test_set_blksize(test, 512); 1030 if (ret) 1031 return ret; 1032 1033 sg_init_one(&sg, test->buffer, 512); 1034 1035 return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1); 1036 } 1037 1038 static int mmc_test_basic_read(struct mmc_test_card *test) 1039 { 1040 int ret; 1041 struct scatterlist sg; 1042 1043 ret = mmc_test_set_blksize(test, 512); 1044 if (ret) 1045 return ret; 1046 1047 sg_init_one(&sg, test->buffer, 512); 1048 1049 return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0); 1050 } 1051 1052 static int mmc_test_verify_write(struct mmc_test_card *test) 1053 { 1054 struct scatterlist sg; 1055 1056 sg_init_one(&sg, test->buffer, 512); 1057 1058 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); 1059 } 1060 1061 static int mmc_test_verify_read(struct mmc_test_card *test) 1062 { 1063 struct scatterlist sg; 1064 1065 sg_init_one(&sg, test->buffer, 512); 1066 1067 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); 1068 } 1069 1070 static int mmc_test_multi_write(struct mmc_test_card *test) 1071 { 1072 unsigned int size; 1073 struct scatterlist sg; 1074 1075 if (test->card->host->max_blk_count == 1) 1076 return RESULT_UNSUP_HOST; 1077 1078 size = PAGE_SIZE * 2; 1079 size = min(size, test->card->host->max_req_size); 1080 size = min(size, test->card->host->max_seg_size); 1081 size = min(size, test->card->host->max_blk_count * 512); 1082 1083 if (size < 1024) 1084 return RESULT_UNSUP_HOST; 1085 1086 sg_init_one(&sg, test->buffer, size); 1087 1088 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1); 1089 } 1090 1091 static int mmc_test_multi_read(struct mmc_test_card *test) 1092 { 1093 unsigned int size; 1094 struct scatterlist sg; 1095 1096 if (test->card->host->max_blk_count == 1) 1097 return RESULT_UNSUP_HOST; 1098 1099 size = PAGE_SIZE * 2; 1100 size = min(size, test->card->host->max_req_size); 1101 size = min(size, test->card->host->max_seg_size); 1102 size = min(size, test->card->host->max_blk_count * 512); 1103 1104 if (size < 1024) 1105 return RESULT_UNSUP_HOST; 1106 1107 sg_init_one(&sg, test->buffer, size); 1108 1109 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0); 1110 } 1111 1112 static int mmc_test_pow2_write(struct mmc_test_card *test) 1113 { 1114 int ret, i; 1115 struct scatterlist sg; 1116 1117 if (!test->card->csd.write_partial) 1118 return RESULT_UNSUP_CARD; 1119 1120 for (i = 1; i < 512; i <<= 1) { 1121 sg_init_one(&sg, test->buffer, i); 1122 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1); 1123 if (ret) 1124 return ret; 1125 } 1126 1127 return 0; 1128 } 1129 1130 static int mmc_test_pow2_read(struct mmc_test_card *test) 1131 { 1132 int ret, i; 1133 struct scatterlist sg; 1134 1135 if (!test->card->csd.read_partial) 1136 return RESULT_UNSUP_CARD; 1137 1138 for (i = 1; i < 512; i <<= 1) { 1139 sg_init_one(&sg, test->buffer, i); 1140 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0); 1141 if (ret) 1142 return ret; 1143 } 1144 1145 return 0; 1146 } 1147 1148 static int mmc_test_weird_write(struct mmc_test_card *test) 1149 { 1150 int ret, i; 1151 struct scatterlist sg; 1152 1153 if (!test->card->csd.write_partial) 1154 return RESULT_UNSUP_CARD; 1155 1156 for (i = 3; i < 512; i += 7) { 1157 sg_init_one(&sg, test->buffer, i); 1158 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1); 1159 if (ret) 1160 return ret; 1161 } 1162 1163 return 0; 1164 } 1165 1166 static int mmc_test_weird_read(struct mmc_test_card *test) 1167 { 1168 int ret, i; 1169 struct scatterlist sg; 1170 1171 if (!test->card->csd.read_partial) 1172 return RESULT_UNSUP_CARD; 1173 1174 for (i = 3; i < 512; i += 7) { 1175 sg_init_one(&sg, test->buffer, i); 1176 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0); 1177 if (ret) 1178 return ret; 1179 } 1180 1181 return 0; 1182 } 1183 1184 static int mmc_test_align_write(struct mmc_test_card *test) 1185 { 1186 int ret, i; 1187 struct scatterlist sg; 1188 1189 for (i = 1; i < TEST_ALIGN_END; i++) { 1190 sg_init_one(&sg, test->buffer + i, 512); 1191 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); 1192 if (ret) 1193 return ret; 1194 } 1195 1196 return 0; 1197 } 1198 1199 static int mmc_test_align_read(struct mmc_test_card *test) 1200 { 1201 int ret, i; 1202 struct scatterlist sg; 1203 1204 for (i = 1; i < TEST_ALIGN_END; i++) { 1205 sg_init_one(&sg, test->buffer + i, 512); 1206 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); 1207 if (ret) 1208 return ret; 1209 } 1210 1211 return 0; 1212 } 1213 1214 static int mmc_test_align_multi_write(struct mmc_test_card *test) 1215 { 1216 int ret, i; 1217 unsigned int size; 1218 struct scatterlist sg; 1219 1220 if (test->card->host->max_blk_count == 1) 1221 return RESULT_UNSUP_HOST; 1222 1223 size = PAGE_SIZE * 2; 1224 size = min(size, test->card->host->max_req_size); 1225 size = min(size, test->card->host->max_seg_size); 1226 size = min(size, test->card->host->max_blk_count * 512); 1227 1228 if (size < 1024) 1229 return RESULT_UNSUP_HOST; 1230 1231 for (i = 1; i < TEST_ALIGN_END; i++) { 1232 sg_init_one(&sg, test->buffer + i, size); 1233 ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1); 1234 if (ret) 1235 return ret; 1236 } 1237 1238 return 0; 1239 } 1240 1241 static int mmc_test_align_multi_read(struct mmc_test_card *test) 1242 { 1243 int ret, i; 1244 unsigned int size; 1245 struct scatterlist sg; 1246 1247 if (test->card->host->max_blk_count == 1) 1248 return RESULT_UNSUP_HOST; 1249 1250 size = PAGE_SIZE * 2; 1251 size = min(size, test->card->host->max_req_size); 1252 size = min(size, test->card->host->max_seg_size); 1253 size = min(size, test->card->host->max_blk_count * 512); 1254 1255 if (size < 1024) 1256 return RESULT_UNSUP_HOST; 1257 1258 for (i = 1; i < TEST_ALIGN_END; i++) { 1259 sg_init_one(&sg, test->buffer + i, size); 1260 ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0); 1261 if (ret) 1262 return ret; 1263 } 1264 1265 return 0; 1266 } 1267 1268 static int mmc_test_xfersize_write(struct mmc_test_card *test) 1269 { 1270 int ret; 1271 1272 ret = mmc_test_set_blksize(test, 512); 1273 if (ret) 1274 return ret; 1275 1276 return mmc_test_broken_transfer(test, 1, 512, 1); 1277 } 1278 1279 static int mmc_test_xfersize_read(struct mmc_test_card *test) 1280 { 1281 int ret; 1282 1283 ret = mmc_test_set_blksize(test, 512); 1284 if (ret) 1285 return ret; 1286 1287 return mmc_test_broken_transfer(test, 1, 512, 0); 1288 } 1289 1290 static int mmc_test_multi_xfersize_write(struct mmc_test_card *test) 1291 { 1292 int ret; 1293 1294 if (test->card->host->max_blk_count == 1) 1295 return RESULT_UNSUP_HOST; 1296 1297 ret = mmc_test_set_blksize(test, 512); 1298 if (ret) 1299 return ret; 1300 1301 return mmc_test_broken_transfer(test, 2, 512, 1); 1302 } 1303 1304 static int mmc_test_multi_xfersize_read(struct mmc_test_card *test) 1305 { 1306 int ret; 1307 1308 if (test->card->host->max_blk_count == 1) 1309 return RESULT_UNSUP_HOST; 1310 1311 ret = mmc_test_set_blksize(test, 512); 1312 if (ret) 1313 return ret; 1314 1315 return mmc_test_broken_transfer(test, 2, 512, 0); 1316 } 1317 1318 #ifdef CONFIG_HIGHMEM 1319 1320 static int mmc_test_write_high(struct mmc_test_card *test) 1321 { 1322 struct scatterlist sg; 1323 1324 sg_init_table(&sg, 1); 1325 sg_set_page(&sg, test->highmem, 512, 0); 1326 1327 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); 1328 } 1329 1330 static int mmc_test_read_high(struct mmc_test_card *test) 1331 { 1332 struct scatterlist sg; 1333 1334 sg_init_table(&sg, 1); 1335 sg_set_page(&sg, test->highmem, 512, 0); 1336 1337 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); 1338 } 1339 1340 static int mmc_test_multi_write_high(struct mmc_test_card *test) 1341 { 1342 unsigned int size; 1343 struct scatterlist sg; 1344 1345 if (test->card->host->max_blk_count == 1) 1346 return RESULT_UNSUP_HOST; 1347 1348 size = PAGE_SIZE * 2; 1349 size = min(size, test->card->host->max_req_size); 1350 size = min(size, test->card->host->max_seg_size); 1351 size = min(size, test->card->host->max_blk_count * 512); 1352 1353 if (size < 1024) 1354 return RESULT_UNSUP_HOST; 1355 1356 sg_init_table(&sg, 1); 1357 sg_set_page(&sg, test->highmem, size, 0); 1358 1359 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1); 1360 } 1361 1362 static int mmc_test_multi_read_high(struct mmc_test_card *test) 1363 { 1364 unsigned int size; 1365 struct scatterlist sg; 1366 1367 if (test->card->host->max_blk_count == 1) 1368 return RESULT_UNSUP_HOST; 1369 1370 size = PAGE_SIZE * 2; 1371 size = min(size, test->card->host->max_req_size); 1372 size = min(size, test->card->host->max_seg_size); 1373 size = min(size, test->card->host->max_blk_count * 512); 1374 1375 if (size < 1024) 1376 return RESULT_UNSUP_HOST; 1377 1378 sg_init_table(&sg, 1); 1379 sg_set_page(&sg, test->highmem, size, 0); 1380 1381 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0); 1382 } 1383 1384 #else 1385 1386 static int mmc_test_no_highmem(struct mmc_test_card *test) 1387 { 1388 pr_info("%s: Highmem not configured - test skipped\n", 1389 mmc_hostname(test->card->host)); 1390 return 0; 1391 } 1392 1393 #endif /* CONFIG_HIGHMEM */ 1394 1395 /* 1396 * Map sz bytes so that it can be transferred. 1397 */ 1398 static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz, 1399 int max_scatter, int min_sg_len) 1400 { 1401 struct mmc_test_area *t = &test->area; 1402 int err; 1403 1404 t->blocks = sz >> 9; 1405 1406 if (max_scatter) { 1407 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg, 1408 t->max_segs, t->max_seg_sz, 1409 &t->sg_len); 1410 } else { 1411 err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs, 1412 t->max_seg_sz, &t->sg_len, min_sg_len); 1413 } 1414 if (err) 1415 pr_info("%s: Failed to map sg list\n", 1416 mmc_hostname(test->card->host)); 1417 return err; 1418 } 1419 1420 /* 1421 * Transfer bytes mapped by mmc_test_area_map(). 1422 */ 1423 static int mmc_test_area_transfer(struct mmc_test_card *test, 1424 unsigned int dev_addr, int write) 1425 { 1426 struct mmc_test_area *t = &test->area; 1427 1428 return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr, 1429 t->blocks, 512, write); 1430 } 1431 1432 /* 1433 * Map and transfer bytes for multiple transfers. 1434 */ 1435 static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz, 1436 unsigned int dev_addr, int write, 1437 int max_scatter, int timed, int count, 1438 bool nonblock, int min_sg_len) 1439 { 1440 struct timespec64 ts1, ts2; 1441 int ret = 0; 1442 int i; 1443 struct mmc_test_area *t = &test->area; 1444 1445 /* 1446 * In the case of a maximally scattered transfer, the maximum transfer 1447 * size is further limited by using PAGE_SIZE segments. 1448 */ 1449 if (max_scatter) { 1450 struct mmc_test_area *t = &test->area; 1451 unsigned long max_tfr; 1452 1453 if (t->max_seg_sz >= PAGE_SIZE) 1454 max_tfr = t->max_segs * PAGE_SIZE; 1455 else 1456 max_tfr = t->max_segs * t->max_seg_sz; 1457 if (sz > max_tfr) 1458 sz = max_tfr; 1459 } 1460 1461 ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len); 1462 if (ret) 1463 return ret; 1464 1465 if (timed) 1466 ktime_get_ts64(&ts1); 1467 if (nonblock) 1468 ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len, 1469 dev_addr, t->blocks, 512, write, count); 1470 else 1471 for (i = 0; i < count && ret == 0; i++) { 1472 ret = mmc_test_area_transfer(test, dev_addr, write); 1473 dev_addr += sz >> 9; 1474 } 1475 1476 if (ret) 1477 return ret; 1478 1479 if (timed) 1480 ktime_get_ts64(&ts2); 1481 1482 if (timed) 1483 mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2); 1484 1485 return 0; 1486 } 1487 1488 static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz, 1489 unsigned int dev_addr, int write, int max_scatter, 1490 int timed) 1491 { 1492 return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter, 1493 timed, 1, false, 0); 1494 } 1495 1496 /* 1497 * Write the test area entirely. 1498 */ 1499 static int mmc_test_area_fill(struct mmc_test_card *test) 1500 { 1501 struct mmc_test_area *t = &test->area; 1502 1503 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0); 1504 } 1505 1506 /* 1507 * Erase the test area entirely. 1508 */ 1509 static int mmc_test_area_erase(struct mmc_test_card *test) 1510 { 1511 struct mmc_test_area *t = &test->area; 1512 1513 if (!mmc_can_erase(test->card)) 1514 return 0; 1515 1516 return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9, 1517 MMC_ERASE_ARG); 1518 } 1519 1520 /* 1521 * Cleanup struct mmc_test_area. 1522 */ 1523 static int mmc_test_area_cleanup(struct mmc_test_card *test) 1524 { 1525 struct mmc_test_area *t = &test->area; 1526 1527 kfree(t->sg); 1528 mmc_test_free_mem(t->mem); 1529 1530 return 0; 1531 } 1532 1533 /* 1534 * Initialize an area for testing large transfers. The test area is set to the 1535 * middle of the card because cards may have different characteristics at the 1536 * front (for FAT file system optimization). Optionally, the area is erased 1537 * (if the card supports it) which may improve write performance. Optionally, 1538 * the area is filled with data for subsequent read tests. 1539 */ 1540 static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill) 1541 { 1542 struct mmc_test_area *t = &test->area; 1543 unsigned long min_sz = 64 * 1024, sz; 1544 int ret; 1545 1546 ret = mmc_test_set_blksize(test, 512); 1547 if (ret) 1548 return ret; 1549 1550 /* Make the test area size about 4MiB */ 1551 sz = (unsigned long)test->card->pref_erase << 9; 1552 t->max_sz = sz; 1553 while (t->max_sz < 4 * 1024 * 1024) 1554 t->max_sz += sz; 1555 while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz) 1556 t->max_sz -= sz; 1557 1558 t->max_segs = test->card->host->max_segs; 1559 t->max_seg_sz = test->card->host->max_seg_size; 1560 t->max_seg_sz -= t->max_seg_sz % 512; 1561 1562 t->max_tfr = t->max_sz; 1563 if (t->max_tfr >> 9 > test->card->host->max_blk_count) 1564 t->max_tfr = test->card->host->max_blk_count << 9; 1565 if (t->max_tfr > test->card->host->max_req_size) 1566 t->max_tfr = test->card->host->max_req_size; 1567 if (t->max_tfr / t->max_seg_sz > t->max_segs) 1568 t->max_tfr = t->max_segs * t->max_seg_sz; 1569 1570 /* 1571 * Try to allocate enough memory for a max. sized transfer. Less is OK 1572 * because the same memory can be mapped into the scatterlist more than 1573 * once. Also, take into account the limits imposed on scatterlist 1574 * segments by the host driver. 1575 */ 1576 t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs, 1577 t->max_seg_sz); 1578 if (!t->mem) 1579 return -ENOMEM; 1580 1581 t->sg = kmalloc_array(t->max_segs, sizeof(*t->sg), GFP_KERNEL); 1582 if (!t->sg) { 1583 ret = -ENOMEM; 1584 goto out_free; 1585 } 1586 1587 t->dev_addr = mmc_test_capacity(test->card) / 2; 1588 t->dev_addr -= t->dev_addr % (t->max_sz >> 9); 1589 1590 if (erase) { 1591 ret = mmc_test_area_erase(test); 1592 if (ret) 1593 goto out_free; 1594 } 1595 1596 if (fill) { 1597 ret = mmc_test_area_fill(test); 1598 if (ret) 1599 goto out_free; 1600 } 1601 1602 return 0; 1603 1604 out_free: 1605 mmc_test_area_cleanup(test); 1606 return ret; 1607 } 1608 1609 /* 1610 * Prepare for large transfers. Do not erase the test area. 1611 */ 1612 static int mmc_test_area_prepare(struct mmc_test_card *test) 1613 { 1614 return mmc_test_area_init(test, 0, 0); 1615 } 1616 1617 /* 1618 * Prepare for large transfers. Do erase the test area. 1619 */ 1620 static int mmc_test_area_prepare_erase(struct mmc_test_card *test) 1621 { 1622 return mmc_test_area_init(test, 1, 0); 1623 } 1624 1625 /* 1626 * Prepare for large transfers. Erase and fill the test area. 1627 */ 1628 static int mmc_test_area_prepare_fill(struct mmc_test_card *test) 1629 { 1630 return mmc_test_area_init(test, 1, 1); 1631 } 1632 1633 /* 1634 * Test best-case performance. Best-case performance is expected from 1635 * a single large transfer. 1636 * 1637 * An additional option (max_scatter) allows the measurement of the same 1638 * transfer but with no contiguous pages in the scatter list. This tests 1639 * the efficiency of DMA to handle scattered pages. 1640 */ 1641 static int mmc_test_best_performance(struct mmc_test_card *test, int write, 1642 int max_scatter) 1643 { 1644 struct mmc_test_area *t = &test->area; 1645 1646 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write, 1647 max_scatter, 1); 1648 } 1649 1650 /* 1651 * Best-case read performance. 1652 */ 1653 static int mmc_test_best_read_performance(struct mmc_test_card *test) 1654 { 1655 return mmc_test_best_performance(test, 0, 0); 1656 } 1657 1658 /* 1659 * Best-case write performance. 1660 */ 1661 static int mmc_test_best_write_performance(struct mmc_test_card *test) 1662 { 1663 return mmc_test_best_performance(test, 1, 0); 1664 } 1665 1666 /* 1667 * Best-case read performance into scattered pages. 1668 */ 1669 static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test) 1670 { 1671 return mmc_test_best_performance(test, 0, 1); 1672 } 1673 1674 /* 1675 * Best-case write performance from scattered pages. 1676 */ 1677 static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test) 1678 { 1679 return mmc_test_best_performance(test, 1, 1); 1680 } 1681 1682 /* 1683 * Single read performance by transfer size. 1684 */ 1685 static int mmc_test_profile_read_perf(struct mmc_test_card *test) 1686 { 1687 struct mmc_test_area *t = &test->area; 1688 unsigned long sz; 1689 unsigned int dev_addr; 1690 int ret; 1691 1692 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1693 dev_addr = t->dev_addr + (sz >> 9); 1694 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1); 1695 if (ret) 1696 return ret; 1697 } 1698 sz = t->max_tfr; 1699 dev_addr = t->dev_addr; 1700 return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1); 1701 } 1702 1703 /* 1704 * Single write performance by transfer size. 1705 */ 1706 static int mmc_test_profile_write_perf(struct mmc_test_card *test) 1707 { 1708 struct mmc_test_area *t = &test->area; 1709 unsigned long sz; 1710 unsigned int dev_addr; 1711 int ret; 1712 1713 ret = mmc_test_area_erase(test); 1714 if (ret) 1715 return ret; 1716 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1717 dev_addr = t->dev_addr + (sz >> 9); 1718 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1); 1719 if (ret) 1720 return ret; 1721 } 1722 ret = mmc_test_area_erase(test); 1723 if (ret) 1724 return ret; 1725 sz = t->max_tfr; 1726 dev_addr = t->dev_addr; 1727 return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1); 1728 } 1729 1730 /* 1731 * Single trim performance by transfer size. 1732 */ 1733 static int mmc_test_profile_trim_perf(struct mmc_test_card *test) 1734 { 1735 struct mmc_test_area *t = &test->area; 1736 unsigned long sz; 1737 unsigned int dev_addr; 1738 struct timespec64 ts1, ts2; 1739 int ret; 1740 1741 if (!mmc_can_trim(test->card)) 1742 return RESULT_UNSUP_CARD; 1743 1744 if (!mmc_can_erase(test->card)) 1745 return RESULT_UNSUP_HOST; 1746 1747 for (sz = 512; sz < t->max_sz; sz <<= 1) { 1748 dev_addr = t->dev_addr + (sz >> 9); 1749 ktime_get_ts64(&ts1); 1750 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG); 1751 if (ret) 1752 return ret; 1753 ktime_get_ts64(&ts2); 1754 mmc_test_print_rate(test, sz, &ts1, &ts2); 1755 } 1756 dev_addr = t->dev_addr; 1757 ktime_get_ts64(&ts1); 1758 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG); 1759 if (ret) 1760 return ret; 1761 ktime_get_ts64(&ts2); 1762 mmc_test_print_rate(test, sz, &ts1, &ts2); 1763 return 0; 1764 } 1765 1766 static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz) 1767 { 1768 struct mmc_test_area *t = &test->area; 1769 unsigned int dev_addr, i, cnt; 1770 struct timespec64 ts1, ts2; 1771 int ret; 1772 1773 cnt = t->max_sz / sz; 1774 dev_addr = t->dev_addr; 1775 ktime_get_ts64(&ts1); 1776 for (i = 0; i < cnt; i++) { 1777 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0); 1778 if (ret) 1779 return ret; 1780 dev_addr += (sz >> 9); 1781 } 1782 ktime_get_ts64(&ts2); 1783 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 1784 return 0; 1785 } 1786 1787 /* 1788 * Consecutive read performance by transfer size. 1789 */ 1790 static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test) 1791 { 1792 struct mmc_test_area *t = &test->area; 1793 unsigned long sz; 1794 int ret; 1795 1796 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1797 ret = mmc_test_seq_read_perf(test, sz); 1798 if (ret) 1799 return ret; 1800 } 1801 sz = t->max_tfr; 1802 return mmc_test_seq_read_perf(test, sz); 1803 } 1804 1805 static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz) 1806 { 1807 struct mmc_test_area *t = &test->area; 1808 unsigned int dev_addr, i, cnt; 1809 struct timespec64 ts1, ts2; 1810 int ret; 1811 1812 ret = mmc_test_area_erase(test); 1813 if (ret) 1814 return ret; 1815 cnt = t->max_sz / sz; 1816 dev_addr = t->dev_addr; 1817 ktime_get_ts64(&ts1); 1818 for (i = 0; i < cnt; i++) { 1819 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0); 1820 if (ret) 1821 return ret; 1822 dev_addr += (sz >> 9); 1823 } 1824 ktime_get_ts64(&ts2); 1825 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 1826 return 0; 1827 } 1828 1829 /* 1830 * Consecutive write performance by transfer size. 1831 */ 1832 static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test) 1833 { 1834 struct mmc_test_area *t = &test->area; 1835 unsigned long sz; 1836 int ret; 1837 1838 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1839 ret = mmc_test_seq_write_perf(test, sz); 1840 if (ret) 1841 return ret; 1842 } 1843 sz = t->max_tfr; 1844 return mmc_test_seq_write_perf(test, sz); 1845 } 1846 1847 /* 1848 * Consecutive trim performance by transfer size. 1849 */ 1850 static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test) 1851 { 1852 struct mmc_test_area *t = &test->area; 1853 unsigned long sz; 1854 unsigned int dev_addr, i, cnt; 1855 struct timespec64 ts1, ts2; 1856 int ret; 1857 1858 if (!mmc_can_trim(test->card)) 1859 return RESULT_UNSUP_CARD; 1860 1861 if (!mmc_can_erase(test->card)) 1862 return RESULT_UNSUP_HOST; 1863 1864 for (sz = 512; sz <= t->max_sz; sz <<= 1) { 1865 ret = mmc_test_area_erase(test); 1866 if (ret) 1867 return ret; 1868 ret = mmc_test_area_fill(test); 1869 if (ret) 1870 return ret; 1871 cnt = t->max_sz / sz; 1872 dev_addr = t->dev_addr; 1873 ktime_get_ts64(&ts1); 1874 for (i = 0; i < cnt; i++) { 1875 ret = mmc_erase(test->card, dev_addr, sz >> 9, 1876 MMC_TRIM_ARG); 1877 if (ret) 1878 return ret; 1879 dev_addr += (sz >> 9); 1880 } 1881 ktime_get_ts64(&ts2); 1882 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 1883 } 1884 return 0; 1885 } 1886 1887 static unsigned int rnd_next = 1; 1888 1889 static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt) 1890 { 1891 uint64_t r; 1892 1893 rnd_next = rnd_next * 1103515245 + 12345; 1894 r = (rnd_next >> 16) & 0x7fff; 1895 return (r * rnd_cnt) >> 15; 1896 } 1897 1898 static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print, 1899 unsigned long sz) 1900 { 1901 unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea; 1902 unsigned int ssz; 1903 struct timespec64 ts1, ts2, ts; 1904 int ret; 1905 1906 ssz = sz >> 9; 1907 1908 rnd_addr = mmc_test_capacity(test->card) / 4; 1909 range1 = rnd_addr / test->card->pref_erase; 1910 range2 = range1 / ssz; 1911 1912 ktime_get_ts64(&ts1); 1913 for (cnt = 0; cnt < UINT_MAX; cnt++) { 1914 ktime_get_ts64(&ts2); 1915 ts = timespec64_sub(ts2, ts1); 1916 if (ts.tv_sec >= 10) 1917 break; 1918 ea = mmc_test_rnd_num(range1); 1919 if (ea == last_ea) 1920 ea -= 1; 1921 last_ea = ea; 1922 dev_addr = rnd_addr + test->card->pref_erase * ea + 1923 ssz * mmc_test_rnd_num(range2); 1924 ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0); 1925 if (ret) 1926 return ret; 1927 } 1928 if (print) 1929 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 1930 return 0; 1931 } 1932 1933 static int mmc_test_random_perf(struct mmc_test_card *test, int write) 1934 { 1935 struct mmc_test_area *t = &test->area; 1936 unsigned int next; 1937 unsigned long sz; 1938 int ret; 1939 1940 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1941 /* 1942 * When writing, try to get more consistent results by running 1943 * the test twice with exactly the same I/O but outputting the 1944 * results only for the 2nd run. 1945 */ 1946 if (write) { 1947 next = rnd_next; 1948 ret = mmc_test_rnd_perf(test, write, 0, sz); 1949 if (ret) 1950 return ret; 1951 rnd_next = next; 1952 } 1953 ret = mmc_test_rnd_perf(test, write, 1, sz); 1954 if (ret) 1955 return ret; 1956 } 1957 sz = t->max_tfr; 1958 if (write) { 1959 next = rnd_next; 1960 ret = mmc_test_rnd_perf(test, write, 0, sz); 1961 if (ret) 1962 return ret; 1963 rnd_next = next; 1964 } 1965 return mmc_test_rnd_perf(test, write, 1, sz); 1966 } 1967 1968 /* 1969 * Random read performance by transfer size. 1970 */ 1971 static int mmc_test_random_read_perf(struct mmc_test_card *test) 1972 { 1973 return mmc_test_random_perf(test, 0); 1974 } 1975 1976 /* 1977 * Random write performance by transfer size. 1978 */ 1979 static int mmc_test_random_write_perf(struct mmc_test_card *test) 1980 { 1981 return mmc_test_random_perf(test, 1); 1982 } 1983 1984 static int mmc_test_seq_perf(struct mmc_test_card *test, int write, 1985 unsigned int tot_sz, int max_scatter) 1986 { 1987 struct mmc_test_area *t = &test->area; 1988 unsigned int dev_addr, i, cnt, sz, ssz; 1989 struct timespec64 ts1, ts2; 1990 int ret; 1991 1992 sz = t->max_tfr; 1993 1994 /* 1995 * In the case of a maximally scattered transfer, the maximum transfer 1996 * size is further limited by using PAGE_SIZE segments. 1997 */ 1998 if (max_scatter) { 1999 unsigned long max_tfr; 2000 2001 if (t->max_seg_sz >= PAGE_SIZE) 2002 max_tfr = t->max_segs * PAGE_SIZE; 2003 else 2004 max_tfr = t->max_segs * t->max_seg_sz; 2005 if (sz > max_tfr) 2006 sz = max_tfr; 2007 } 2008 2009 ssz = sz >> 9; 2010 dev_addr = mmc_test_capacity(test->card) / 4; 2011 if (tot_sz > dev_addr << 9) 2012 tot_sz = dev_addr << 9; 2013 cnt = tot_sz / sz; 2014 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */ 2015 2016 ktime_get_ts64(&ts1); 2017 for (i = 0; i < cnt; i++) { 2018 ret = mmc_test_area_io(test, sz, dev_addr, write, 2019 max_scatter, 0); 2020 if (ret) 2021 return ret; 2022 dev_addr += ssz; 2023 } 2024 ktime_get_ts64(&ts2); 2025 2026 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 2027 2028 return 0; 2029 } 2030 2031 static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write) 2032 { 2033 int ret, i; 2034 2035 for (i = 0; i < 10; i++) { 2036 ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1); 2037 if (ret) 2038 return ret; 2039 } 2040 for (i = 0; i < 5; i++) { 2041 ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1); 2042 if (ret) 2043 return ret; 2044 } 2045 for (i = 0; i < 3; i++) { 2046 ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1); 2047 if (ret) 2048 return ret; 2049 } 2050 2051 return ret; 2052 } 2053 2054 /* 2055 * Large sequential read performance. 2056 */ 2057 static int mmc_test_large_seq_read_perf(struct mmc_test_card *test) 2058 { 2059 return mmc_test_large_seq_perf(test, 0); 2060 } 2061 2062 /* 2063 * Large sequential write performance. 2064 */ 2065 static int mmc_test_large_seq_write_perf(struct mmc_test_card *test) 2066 { 2067 return mmc_test_large_seq_perf(test, 1); 2068 } 2069 2070 static int mmc_test_rw_multiple(struct mmc_test_card *test, 2071 struct mmc_test_multiple_rw *tdata, 2072 unsigned int reqsize, unsigned int size, 2073 int min_sg_len) 2074 { 2075 unsigned int dev_addr; 2076 struct mmc_test_area *t = &test->area; 2077 int ret = 0; 2078 2079 /* Set up test area */ 2080 if (size > mmc_test_capacity(test->card) / 2 * 512) 2081 size = mmc_test_capacity(test->card) / 2 * 512; 2082 if (reqsize > t->max_tfr) 2083 reqsize = t->max_tfr; 2084 dev_addr = mmc_test_capacity(test->card) / 4; 2085 if ((dev_addr & 0xffff0000)) 2086 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */ 2087 else 2088 dev_addr &= 0xfffff800; /* Round to 1MiB boundary */ 2089 if (!dev_addr) 2090 goto err; 2091 2092 if (reqsize > size) 2093 return 0; 2094 2095 /* prepare test area */ 2096 if (mmc_can_erase(test->card) && 2097 tdata->prepare & MMC_TEST_PREP_ERASE) { 2098 ret = mmc_erase(test->card, dev_addr, 2099 size / 512, MMC_SECURE_ERASE_ARG); 2100 if (ret) 2101 ret = mmc_erase(test->card, dev_addr, 2102 size / 512, MMC_ERASE_ARG); 2103 if (ret) 2104 goto err; 2105 } 2106 2107 /* Run test */ 2108 ret = mmc_test_area_io_seq(test, reqsize, dev_addr, 2109 tdata->do_write, 0, 1, size / reqsize, 2110 tdata->do_nonblock_req, min_sg_len); 2111 if (ret) 2112 goto err; 2113 2114 return ret; 2115 err: 2116 pr_info("[%s] error\n", __func__); 2117 return ret; 2118 } 2119 2120 static int mmc_test_rw_multiple_size(struct mmc_test_card *test, 2121 struct mmc_test_multiple_rw *rw) 2122 { 2123 int ret = 0; 2124 int i; 2125 void *pre_req = test->card->host->ops->pre_req; 2126 void *post_req = test->card->host->ops->post_req; 2127 2128 if (rw->do_nonblock_req && 2129 ((!pre_req && post_req) || (pre_req && !post_req))) { 2130 pr_info("error: only one of pre/post is defined\n"); 2131 return -EINVAL; 2132 } 2133 2134 for (i = 0 ; i < rw->len && ret == 0; i++) { 2135 ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0); 2136 if (ret) 2137 break; 2138 } 2139 return ret; 2140 } 2141 2142 static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test, 2143 struct mmc_test_multiple_rw *rw) 2144 { 2145 int ret = 0; 2146 int i; 2147 2148 for (i = 0 ; i < rw->len && ret == 0; i++) { 2149 ret = mmc_test_rw_multiple(test, rw, 512 * 1024, rw->size, 2150 rw->sg_len[i]); 2151 if (ret) 2152 break; 2153 } 2154 return ret; 2155 } 2156 2157 /* 2158 * Multiple blocking write 4k to 4 MB chunks 2159 */ 2160 static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test) 2161 { 2162 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, 2163 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; 2164 struct mmc_test_multiple_rw test_data = { 2165 .bs = bs, 2166 .size = TEST_AREA_MAX_SIZE, 2167 .len = ARRAY_SIZE(bs), 2168 .do_write = true, 2169 .do_nonblock_req = false, 2170 .prepare = MMC_TEST_PREP_ERASE, 2171 }; 2172 2173 return mmc_test_rw_multiple_size(test, &test_data); 2174 }; 2175 2176 /* 2177 * Multiple non-blocking write 4k to 4 MB chunks 2178 */ 2179 static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test) 2180 { 2181 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, 2182 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; 2183 struct mmc_test_multiple_rw test_data = { 2184 .bs = bs, 2185 .size = TEST_AREA_MAX_SIZE, 2186 .len = ARRAY_SIZE(bs), 2187 .do_write = true, 2188 .do_nonblock_req = true, 2189 .prepare = MMC_TEST_PREP_ERASE, 2190 }; 2191 2192 return mmc_test_rw_multiple_size(test, &test_data); 2193 } 2194 2195 /* 2196 * Multiple blocking read 4k to 4 MB chunks 2197 */ 2198 static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test) 2199 { 2200 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, 2201 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; 2202 struct mmc_test_multiple_rw test_data = { 2203 .bs = bs, 2204 .size = TEST_AREA_MAX_SIZE, 2205 .len = ARRAY_SIZE(bs), 2206 .do_write = false, 2207 .do_nonblock_req = false, 2208 .prepare = MMC_TEST_PREP_NONE, 2209 }; 2210 2211 return mmc_test_rw_multiple_size(test, &test_data); 2212 } 2213 2214 /* 2215 * Multiple non-blocking read 4k to 4 MB chunks 2216 */ 2217 static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test) 2218 { 2219 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, 2220 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; 2221 struct mmc_test_multiple_rw test_data = { 2222 .bs = bs, 2223 .size = TEST_AREA_MAX_SIZE, 2224 .len = ARRAY_SIZE(bs), 2225 .do_write = false, 2226 .do_nonblock_req = true, 2227 .prepare = MMC_TEST_PREP_NONE, 2228 }; 2229 2230 return mmc_test_rw_multiple_size(test, &test_data); 2231 } 2232 2233 /* 2234 * Multiple blocking write 1 to 512 sg elements 2235 */ 2236 static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test) 2237 { 2238 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 2239 1 << 7, 1 << 8, 1 << 9}; 2240 struct mmc_test_multiple_rw test_data = { 2241 .sg_len = sg_len, 2242 .size = TEST_AREA_MAX_SIZE, 2243 .len = ARRAY_SIZE(sg_len), 2244 .do_write = true, 2245 .do_nonblock_req = false, 2246 .prepare = MMC_TEST_PREP_ERASE, 2247 }; 2248 2249 return mmc_test_rw_multiple_sg_len(test, &test_data); 2250 }; 2251 2252 /* 2253 * Multiple non-blocking write 1 to 512 sg elements 2254 */ 2255 static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test) 2256 { 2257 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 2258 1 << 7, 1 << 8, 1 << 9}; 2259 struct mmc_test_multiple_rw test_data = { 2260 .sg_len = sg_len, 2261 .size = TEST_AREA_MAX_SIZE, 2262 .len = ARRAY_SIZE(sg_len), 2263 .do_write = true, 2264 .do_nonblock_req = true, 2265 .prepare = MMC_TEST_PREP_ERASE, 2266 }; 2267 2268 return mmc_test_rw_multiple_sg_len(test, &test_data); 2269 } 2270 2271 /* 2272 * Multiple blocking read 1 to 512 sg elements 2273 */ 2274 static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test) 2275 { 2276 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 2277 1 << 7, 1 << 8, 1 << 9}; 2278 struct mmc_test_multiple_rw test_data = { 2279 .sg_len = sg_len, 2280 .size = TEST_AREA_MAX_SIZE, 2281 .len = ARRAY_SIZE(sg_len), 2282 .do_write = false, 2283 .do_nonblock_req = false, 2284 .prepare = MMC_TEST_PREP_NONE, 2285 }; 2286 2287 return mmc_test_rw_multiple_sg_len(test, &test_data); 2288 } 2289 2290 /* 2291 * Multiple non-blocking read 1 to 512 sg elements 2292 */ 2293 static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test) 2294 { 2295 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 2296 1 << 7, 1 << 8, 1 << 9}; 2297 struct mmc_test_multiple_rw test_data = { 2298 .sg_len = sg_len, 2299 .size = TEST_AREA_MAX_SIZE, 2300 .len = ARRAY_SIZE(sg_len), 2301 .do_write = false, 2302 .do_nonblock_req = true, 2303 .prepare = MMC_TEST_PREP_NONE, 2304 }; 2305 2306 return mmc_test_rw_multiple_sg_len(test, &test_data); 2307 } 2308 2309 /* 2310 * eMMC hardware reset. 2311 */ 2312 static int mmc_test_reset(struct mmc_test_card *test) 2313 { 2314 struct mmc_card *card = test->card; 2315 struct mmc_host *host = card->host; 2316 int err; 2317 2318 err = mmc_hw_reset(host); 2319 if (!err) { 2320 /* 2321 * Reset will re-enable the card's command queue, but tests 2322 * expect it to be disabled. 2323 */ 2324 if (card->ext_csd.cmdq_en) 2325 mmc_cmdq_disable(card); 2326 return RESULT_OK; 2327 } else if (err == -EOPNOTSUPP) { 2328 return RESULT_UNSUP_HOST; 2329 } 2330 2331 return RESULT_FAIL; 2332 } 2333 2334 static int mmc_test_send_status(struct mmc_test_card *test, 2335 struct mmc_command *cmd) 2336 { 2337 memset(cmd, 0, sizeof(*cmd)); 2338 2339 cmd->opcode = MMC_SEND_STATUS; 2340 if (!mmc_host_is_spi(test->card->host)) 2341 cmd->arg = test->card->rca << 16; 2342 cmd->flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 2343 2344 return mmc_wait_for_cmd(test->card->host, cmd, 0); 2345 } 2346 2347 static int mmc_test_ongoing_transfer(struct mmc_test_card *test, 2348 unsigned int dev_addr, int use_sbc, 2349 int repeat_cmd, int write, int use_areq) 2350 { 2351 struct mmc_test_req *rq = mmc_test_req_alloc(); 2352 struct mmc_host *host = test->card->host; 2353 struct mmc_test_area *t = &test->area; 2354 struct mmc_request *mrq; 2355 unsigned long timeout; 2356 bool expired = false; 2357 int ret = 0, cmd_ret; 2358 u32 status = 0; 2359 int count = 0; 2360 2361 if (!rq) 2362 return -ENOMEM; 2363 2364 mrq = &rq->mrq; 2365 if (use_sbc) 2366 mrq->sbc = &rq->sbc; 2367 mrq->cap_cmd_during_tfr = true; 2368 2369 mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks, 2370 512, write); 2371 2372 if (use_sbc && t->blocks > 1 && !mrq->sbc) { 2373 ret = mmc_host_cmd23(host) ? 2374 RESULT_UNSUP_CARD : 2375 RESULT_UNSUP_HOST; 2376 goto out_free; 2377 } 2378 2379 /* Start ongoing data request */ 2380 if (use_areq) { 2381 ret = mmc_test_start_areq(test, mrq, NULL); 2382 if (ret) 2383 goto out_free; 2384 } else { 2385 mmc_wait_for_req(host, mrq); 2386 } 2387 2388 timeout = jiffies + msecs_to_jiffies(3000); 2389 do { 2390 count += 1; 2391 2392 /* Send status command while data transfer in progress */ 2393 cmd_ret = mmc_test_send_status(test, &rq->status); 2394 if (cmd_ret) 2395 break; 2396 2397 status = rq->status.resp[0]; 2398 if (status & R1_ERROR) { 2399 cmd_ret = -EIO; 2400 break; 2401 } 2402 2403 if (mmc_is_req_done(host, mrq)) 2404 break; 2405 2406 expired = time_after(jiffies, timeout); 2407 if (expired) { 2408 pr_info("%s: timeout waiting for Tran state status %#x\n", 2409 mmc_hostname(host), status); 2410 cmd_ret = -ETIMEDOUT; 2411 break; 2412 } 2413 } while (repeat_cmd && R1_CURRENT_STATE(status) != R1_STATE_TRAN); 2414 2415 /* Wait for data request to complete */ 2416 if (use_areq) { 2417 ret = mmc_test_start_areq(test, NULL, mrq); 2418 } else { 2419 mmc_wait_for_req_done(test->card->host, mrq); 2420 } 2421 2422 /* 2423 * For cap_cmd_during_tfr request, upper layer must send stop if 2424 * required. 2425 */ 2426 if (mrq->data->stop && (mrq->data->error || !mrq->sbc)) { 2427 if (ret) 2428 mmc_wait_for_cmd(host, mrq->data->stop, 0); 2429 else 2430 ret = mmc_wait_for_cmd(host, mrq->data->stop, 0); 2431 } 2432 2433 if (ret) 2434 goto out_free; 2435 2436 if (cmd_ret) { 2437 pr_info("%s: Send Status failed: status %#x, error %d\n", 2438 mmc_hostname(test->card->host), status, cmd_ret); 2439 } 2440 2441 ret = mmc_test_check_result(test, mrq); 2442 if (ret) 2443 goto out_free; 2444 2445 ret = mmc_test_wait_busy(test); 2446 if (ret) 2447 goto out_free; 2448 2449 if (repeat_cmd && (t->blocks + 1) << 9 > t->max_tfr) 2450 pr_info("%s: %d commands completed during transfer of %u blocks\n", 2451 mmc_hostname(test->card->host), count, t->blocks); 2452 2453 if (cmd_ret) 2454 ret = cmd_ret; 2455 out_free: 2456 kfree(rq); 2457 2458 return ret; 2459 } 2460 2461 static int __mmc_test_cmds_during_tfr(struct mmc_test_card *test, 2462 unsigned long sz, int use_sbc, int write, 2463 int use_areq) 2464 { 2465 struct mmc_test_area *t = &test->area; 2466 int ret; 2467 2468 if (!(test->card->host->caps & MMC_CAP_CMD_DURING_TFR)) 2469 return RESULT_UNSUP_HOST; 2470 2471 ret = mmc_test_area_map(test, sz, 0, 0); 2472 if (ret) 2473 return ret; 2474 2475 ret = mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 0, write, 2476 use_areq); 2477 if (ret) 2478 return ret; 2479 2480 return mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 1, write, 2481 use_areq); 2482 } 2483 2484 static int mmc_test_cmds_during_tfr(struct mmc_test_card *test, int use_sbc, 2485 int write, int use_areq) 2486 { 2487 struct mmc_test_area *t = &test->area; 2488 unsigned long sz; 2489 int ret; 2490 2491 for (sz = 512; sz <= t->max_tfr; sz += 512) { 2492 ret = __mmc_test_cmds_during_tfr(test, sz, use_sbc, write, 2493 use_areq); 2494 if (ret) 2495 return ret; 2496 } 2497 return 0; 2498 } 2499 2500 /* 2501 * Commands during read - no Set Block Count (CMD23). 2502 */ 2503 static int mmc_test_cmds_during_read(struct mmc_test_card *test) 2504 { 2505 return mmc_test_cmds_during_tfr(test, 0, 0, 0); 2506 } 2507 2508 /* 2509 * Commands during write - no Set Block Count (CMD23). 2510 */ 2511 static int mmc_test_cmds_during_write(struct mmc_test_card *test) 2512 { 2513 return mmc_test_cmds_during_tfr(test, 0, 1, 0); 2514 } 2515 2516 /* 2517 * Commands during read - use Set Block Count (CMD23). 2518 */ 2519 static int mmc_test_cmds_during_read_cmd23(struct mmc_test_card *test) 2520 { 2521 return mmc_test_cmds_during_tfr(test, 1, 0, 0); 2522 } 2523 2524 /* 2525 * Commands during write - use Set Block Count (CMD23). 2526 */ 2527 static int mmc_test_cmds_during_write_cmd23(struct mmc_test_card *test) 2528 { 2529 return mmc_test_cmds_during_tfr(test, 1, 1, 0); 2530 } 2531 2532 /* 2533 * Commands during non-blocking read - use Set Block Count (CMD23). 2534 */ 2535 static int mmc_test_cmds_during_read_cmd23_nonblock(struct mmc_test_card *test) 2536 { 2537 return mmc_test_cmds_during_tfr(test, 1, 0, 1); 2538 } 2539 2540 /* 2541 * Commands during non-blocking write - use Set Block Count (CMD23). 2542 */ 2543 static int mmc_test_cmds_during_write_cmd23_nonblock(struct mmc_test_card *test) 2544 { 2545 return mmc_test_cmds_during_tfr(test, 1, 1, 1); 2546 } 2547 2548 static const struct mmc_test_case mmc_test_cases[] = { 2549 { 2550 .name = "Basic write (no data verification)", 2551 .run = mmc_test_basic_write, 2552 }, 2553 2554 { 2555 .name = "Basic read (no data verification)", 2556 .run = mmc_test_basic_read, 2557 }, 2558 2559 { 2560 .name = "Basic write (with data verification)", 2561 .prepare = mmc_test_prepare_write, 2562 .run = mmc_test_verify_write, 2563 .cleanup = mmc_test_cleanup, 2564 }, 2565 2566 { 2567 .name = "Basic read (with data verification)", 2568 .prepare = mmc_test_prepare_read, 2569 .run = mmc_test_verify_read, 2570 .cleanup = mmc_test_cleanup, 2571 }, 2572 2573 { 2574 .name = "Multi-block write", 2575 .prepare = mmc_test_prepare_write, 2576 .run = mmc_test_multi_write, 2577 .cleanup = mmc_test_cleanup, 2578 }, 2579 2580 { 2581 .name = "Multi-block read", 2582 .prepare = mmc_test_prepare_read, 2583 .run = mmc_test_multi_read, 2584 .cleanup = mmc_test_cleanup, 2585 }, 2586 2587 { 2588 .name = "Power of two block writes", 2589 .prepare = mmc_test_prepare_write, 2590 .run = mmc_test_pow2_write, 2591 .cleanup = mmc_test_cleanup, 2592 }, 2593 2594 { 2595 .name = "Power of two block reads", 2596 .prepare = mmc_test_prepare_read, 2597 .run = mmc_test_pow2_read, 2598 .cleanup = mmc_test_cleanup, 2599 }, 2600 2601 { 2602 .name = "Weird sized block writes", 2603 .prepare = mmc_test_prepare_write, 2604 .run = mmc_test_weird_write, 2605 .cleanup = mmc_test_cleanup, 2606 }, 2607 2608 { 2609 .name = "Weird sized block reads", 2610 .prepare = mmc_test_prepare_read, 2611 .run = mmc_test_weird_read, 2612 .cleanup = mmc_test_cleanup, 2613 }, 2614 2615 { 2616 .name = "Badly aligned write", 2617 .prepare = mmc_test_prepare_write, 2618 .run = mmc_test_align_write, 2619 .cleanup = mmc_test_cleanup, 2620 }, 2621 2622 { 2623 .name = "Badly aligned read", 2624 .prepare = mmc_test_prepare_read, 2625 .run = mmc_test_align_read, 2626 .cleanup = mmc_test_cleanup, 2627 }, 2628 2629 { 2630 .name = "Badly aligned multi-block write", 2631 .prepare = mmc_test_prepare_write, 2632 .run = mmc_test_align_multi_write, 2633 .cleanup = mmc_test_cleanup, 2634 }, 2635 2636 { 2637 .name = "Badly aligned multi-block read", 2638 .prepare = mmc_test_prepare_read, 2639 .run = mmc_test_align_multi_read, 2640 .cleanup = mmc_test_cleanup, 2641 }, 2642 2643 { 2644 .name = "Correct xfer_size at write (start failure)", 2645 .run = mmc_test_xfersize_write, 2646 }, 2647 2648 { 2649 .name = "Correct xfer_size at read (start failure)", 2650 .run = mmc_test_xfersize_read, 2651 }, 2652 2653 { 2654 .name = "Correct xfer_size at write (midway failure)", 2655 .run = mmc_test_multi_xfersize_write, 2656 }, 2657 2658 { 2659 .name = "Correct xfer_size at read (midway failure)", 2660 .run = mmc_test_multi_xfersize_read, 2661 }, 2662 2663 #ifdef CONFIG_HIGHMEM 2664 2665 { 2666 .name = "Highmem write", 2667 .prepare = mmc_test_prepare_write, 2668 .run = mmc_test_write_high, 2669 .cleanup = mmc_test_cleanup, 2670 }, 2671 2672 { 2673 .name = "Highmem read", 2674 .prepare = mmc_test_prepare_read, 2675 .run = mmc_test_read_high, 2676 .cleanup = mmc_test_cleanup, 2677 }, 2678 2679 { 2680 .name = "Multi-block highmem write", 2681 .prepare = mmc_test_prepare_write, 2682 .run = mmc_test_multi_write_high, 2683 .cleanup = mmc_test_cleanup, 2684 }, 2685 2686 { 2687 .name = "Multi-block highmem read", 2688 .prepare = mmc_test_prepare_read, 2689 .run = mmc_test_multi_read_high, 2690 .cleanup = mmc_test_cleanup, 2691 }, 2692 2693 #else 2694 2695 { 2696 .name = "Highmem write", 2697 .run = mmc_test_no_highmem, 2698 }, 2699 2700 { 2701 .name = "Highmem read", 2702 .run = mmc_test_no_highmem, 2703 }, 2704 2705 { 2706 .name = "Multi-block highmem write", 2707 .run = mmc_test_no_highmem, 2708 }, 2709 2710 { 2711 .name = "Multi-block highmem read", 2712 .run = mmc_test_no_highmem, 2713 }, 2714 2715 #endif /* CONFIG_HIGHMEM */ 2716 2717 { 2718 .name = "Best-case read performance", 2719 .prepare = mmc_test_area_prepare_fill, 2720 .run = mmc_test_best_read_performance, 2721 .cleanup = mmc_test_area_cleanup, 2722 }, 2723 2724 { 2725 .name = "Best-case write performance", 2726 .prepare = mmc_test_area_prepare_erase, 2727 .run = mmc_test_best_write_performance, 2728 .cleanup = mmc_test_area_cleanup, 2729 }, 2730 2731 { 2732 .name = "Best-case read performance into scattered pages", 2733 .prepare = mmc_test_area_prepare_fill, 2734 .run = mmc_test_best_read_perf_max_scatter, 2735 .cleanup = mmc_test_area_cleanup, 2736 }, 2737 2738 { 2739 .name = "Best-case write performance from scattered pages", 2740 .prepare = mmc_test_area_prepare_erase, 2741 .run = mmc_test_best_write_perf_max_scatter, 2742 .cleanup = mmc_test_area_cleanup, 2743 }, 2744 2745 { 2746 .name = "Single read performance by transfer size", 2747 .prepare = mmc_test_area_prepare_fill, 2748 .run = mmc_test_profile_read_perf, 2749 .cleanup = mmc_test_area_cleanup, 2750 }, 2751 2752 { 2753 .name = "Single write performance by transfer size", 2754 .prepare = mmc_test_area_prepare, 2755 .run = mmc_test_profile_write_perf, 2756 .cleanup = mmc_test_area_cleanup, 2757 }, 2758 2759 { 2760 .name = "Single trim performance by transfer size", 2761 .prepare = mmc_test_area_prepare_fill, 2762 .run = mmc_test_profile_trim_perf, 2763 .cleanup = mmc_test_area_cleanup, 2764 }, 2765 2766 { 2767 .name = "Consecutive read performance by transfer size", 2768 .prepare = mmc_test_area_prepare_fill, 2769 .run = mmc_test_profile_seq_read_perf, 2770 .cleanup = mmc_test_area_cleanup, 2771 }, 2772 2773 { 2774 .name = "Consecutive write performance by transfer size", 2775 .prepare = mmc_test_area_prepare, 2776 .run = mmc_test_profile_seq_write_perf, 2777 .cleanup = mmc_test_area_cleanup, 2778 }, 2779 2780 { 2781 .name = "Consecutive trim performance by transfer size", 2782 .prepare = mmc_test_area_prepare, 2783 .run = mmc_test_profile_seq_trim_perf, 2784 .cleanup = mmc_test_area_cleanup, 2785 }, 2786 2787 { 2788 .name = "Random read performance by transfer size", 2789 .prepare = mmc_test_area_prepare, 2790 .run = mmc_test_random_read_perf, 2791 .cleanup = mmc_test_area_cleanup, 2792 }, 2793 2794 { 2795 .name = "Random write performance by transfer size", 2796 .prepare = mmc_test_area_prepare, 2797 .run = mmc_test_random_write_perf, 2798 .cleanup = mmc_test_area_cleanup, 2799 }, 2800 2801 { 2802 .name = "Large sequential read into scattered pages", 2803 .prepare = mmc_test_area_prepare, 2804 .run = mmc_test_large_seq_read_perf, 2805 .cleanup = mmc_test_area_cleanup, 2806 }, 2807 2808 { 2809 .name = "Large sequential write from scattered pages", 2810 .prepare = mmc_test_area_prepare, 2811 .run = mmc_test_large_seq_write_perf, 2812 .cleanup = mmc_test_area_cleanup, 2813 }, 2814 2815 { 2816 .name = "Write performance with blocking req 4k to 4MB", 2817 .prepare = mmc_test_area_prepare, 2818 .run = mmc_test_profile_mult_write_blocking_perf, 2819 .cleanup = mmc_test_area_cleanup, 2820 }, 2821 2822 { 2823 .name = "Write performance with non-blocking req 4k to 4MB", 2824 .prepare = mmc_test_area_prepare, 2825 .run = mmc_test_profile_mult_write_nonblock_perf, 2826 .cleanup = mmc_test_area_cleanup, 2827 }, 2828 2829 { 2830 .name = "Read performance with blocking req 4k to 4MB", 2831 .prepare = mmc_test_area_prepare, 2832 .run = mmc_test_profile_mult_read_blocking_perf, 2833 .cleanup = mmc_test_area_cleanup, 2834 }, 2835 2836 { 2837 .name = "Read performance with non-blocking req 4k to 4MB", 2838 .prepare = mmc_test_area_prepare, 2839 .run = mmc_test_profile_mult_read_nonblock_perf, 2840 .cleanup = mmc_test_area_cleanup, 2841 }, 2842 2843 { 2844 .name = "Write performance blocking req 1 to 512 sg elems", 2845 .prepare = mmc_test_area_prepare, 2846 .run = mmc_test_profile_sglen_wr_blocking_perf, 2847 .cleanup = mmc_test_area_cleanup, 2848 }, 2849 2850 { 2851 .name = "Write performance non-blocking req 1 to 512 sg elems", 2852 .prepare = mmc_test_area_prepare, 2853 .run = mmc_test_profile_sglen_wr_nonblock_perf, 2854 .cleanup = mmc_test_area_cleanup, 2855 }, 2856 2857 { 2858 .name = "Read performance blocking req 1 to 512 sg elems", 2859 .prepare = mmc_test_area_prepare, 2860 .run = mmc_test_profile_sglen_r_blocking_perf, 2861 .cleanup = mmc_test_area_cleanup, 2862 }, 2863 2864 { 2865 .name = "Read performance non-blocking req 1 to 512 sg elems", 2866 .prepare = mmc_test_area_prepare, 2867 .run = mmc_test_profile_sglen_r_nonblock_perf, 2868 .cleanup = mmc_test_area_cleanup, 2869 }, 2870 2871 { 2872 .name = "Reset test", 2873 .run = mmc_test_reset, 2874 }, 2875 2876 { 2877 .name = "Commands during read - no Set Block Count (CMD23)", 2878 .prepare = mmc_test_area_prepare, 2879 .run = mmc_test_cmds_during_read, 2880 .cleanup = mmc_test_area_cleanup, 2881 }, 2882 2883 { 2884 .name = "Commands during write - no Set Block Count (CMD23)", 2885 .prepare = mmc_test_area_prepare, 2886 .run = mmc_test_cmds_during_write, 2887 .cleanup = mmc_test_area_cleanup, 2888 }, 2889 2890 { 2891 .name = "Commands during read - use Set Block Count (CMD23)", 2892 .prepare = mmc_test_area_prepare, 2893 .run = mmc_test_cmds_during_read_cmd23, 2894 .cleanup = mmc_test_area_cleanup, 2895 }, 2896 2897 { 2898 .name = "Commands during write - use Set Block Count (CMD23)", 2899 .prepare = mmc_test_area_prepare, 2900 .run = mmc_test_cmds_during_write_cmd23, 2901 .cleanup = mmc_test_area_cleanup, 2902 }, 2903 2904 { 2905 .name = "Commands during non-blocking read - use Set Block Count (CMD23)", 2906 .prepare = mmc_test_area_prepare, 2907 .run = mmc_test_cmds_during_read_cmd23_nonblock, 2908 .cleanup = mmc_test_area_cleanup, 2909 }, 2910 2911 { 2912 .name = "Commands during non-blocking write - use Set Block Count (CMD23)", 2913 .prepare = mmc_test_area_prepare, 2914 .run = mmc_test_cmds_during_write_cmd23_nonblock, 2915 .cleanup = mmc_test_area_cleanup, 2916 }, 2917 }; 2918 2919 static DEFINE_MUTEX(mmc_test_lock); 2920 2921 static LIST_HEAD(mmc_test_result); 2922 2923 static void mmc_test_run(struct mmc_test_card *test, int testcase) 2924 { 2925 int i, ret; 2926 2927 pr_info("%s: Starting tests of card %s...\n", 2928 mmc_hostname(test->card->host), mmc_card_id(test->card)); 2929 2930 mmc_claim_host(test->card->host); 2931 2932 for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++) { 2933 struct mmc_test_general_result *gr; 2934 2935 if (testcase && ((i + 1) != testcase)) 2936 continue; 2937 2938 pr_info("%s: Test case %d. %s...\n", 2939 mmc_hostname(test->card->host), i + 1, 2940 mmc_test_cases[i].name); 2941 2942 if (mmc_test_cases[i].prepare) { 2943 ret = mmc_test_cases[i].prepare(test); 2944 if (ret) { 2945 pr_info("%s: Result: Prepare stage failed! (%d)\n", 2946 mmc_hostname(test->card->host), 2947 ret); 2948 continue; 2949 } 2950 } 2951 2952 gr = kzalloc(sizeof(*gr), GFP_KERNEL); 2953 if (gr) { 2954 INIT_LIST_HEAD(&gr->tr_lst); 2955 2956 /* Assign data what we know already */ 2957 gr->card = test->card; 2958 gr->testcase = i; 2959 2960 /* Append container to global one */ 2961 list_add_tail(&gr->link, &mmc_test_result); 2962 2963 /* 2964 * Save the pointer to created container in our private 2965 * structure. 2966 */ 2967 test->gr = gr; 2968 } 2969 2970 ret = mmc_test_cases[i].run(test); 2971 switch (ret) { 2972 case RESULT_OK: 2973 pr_info("%s: Result: OK\n", 2974 mmc_hostname(test->card->host)); 2975 break; 2976 case RESULT_FAIL: 2977 pr_info("%s: Result: FAILED\n", 2978 mmc_hostname(test->card->host)); 2979 break; 2980 case RESULT_UNSUP_HOST: 2981 pr_info("%s: Result: UNSUPPORTED (by host)\n", 2982 mmc_hostname(test->card->host)); 2983 break; 2984 case RESULT_UNSUP_CARD: 2985 pr_info("%s: Result: UNSUPPORTED (by card)\n", 2986 mmc_hostname(test->card->host)); 2987 break; 2988 default: 2989 pr_info("%s: Result: ERROR (%d)\n", 2990 mmc_hostname(test->card->host), ret); 2991 } 2992 2993 /* Save the result */ 2994 if (gr) 2995 gr->result = ret; 2996 2997 if (mmc_test_cases[i].cleanup) { 2998 ret = mmc_test_cases[i].cleanup(test); 2999 if (ret) { 3000 pr_info("%s: Warning: Cleanup stage failed! (%d)\n", 3001 mmc_hostname(test->card->host), 3002 ret); 3003 } 3004 } 3005 } 3006 3007 mmc_release_host(test->card->host); 3008 3009 pr_info("%s: Tests completed.\n", 3010 mmc_hostname(test->card->host)); 3011 } 3012 3013 static void mmc_test_free_result(struct mmc_card *card) 3014 { 3015 struct mmc_test_general_result *gr, *grs; 3016 3017 mutex_lock(&mmc_test_lock); 3018 3019 list_for_each_entry_safe(gr, grs, &mmc_test_result, link) { 3020 struct mmc_test_transfer_result *tr, *trs; 3021 3022 if (card && gr->card != card) 3023 continue; 3024 3025 list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) { 3026 list_del(&tr->link); 3027 kfree(tr); 3028 } 3029 3030 list_del(&gr->link); 3031 kfree(gr); 3032 } 3033 3034 mutex_unlock(&mmc_test_lock); 3035 } 3036 3037 static LIST_HEAD(mmc_test_file_test); 3038 3039 static int mtf_test_show(struct seq_file *sf, void *data) 3040 { 3041 struct mmc_card *card = (struct mmc_card *)sf->private; 3042 struct mmc_test_general_result *gr; 3043 3044 mutex_lock(&mmc_test_lock); 3045 3046 list_for_each_entry(gr, &mmc_test_result, link) { 3047 struct mmc_test_transfer_result *tr; 3048 3049 if (gr->card != card) 3050 continue; 3051 3052 seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result); 3053 3054 list_for_each_entry(tr, &gr->tr_lst, link) { 3055 seq_printf(sf, "%u %d %llu.%09u %u %u.%02u\n", 3056 tr->count, tr->sectors, 3057 (u64)tr->ts.tv_sec, (u32)tr->ts.tv_nsec, 3058 tr->rate, tr->iops / 100, tr->iops % 100); 3059 } 3060 } 3061 3062 mutex_unlock(&mmc_test_lock); 3063 3064 return 0; 3065 } 3066 3067 static int mtf_test_open(struct inode *inode, struct file *file) 3068 { 3069 return single_open(file, mtf_test_show, inode->i_private); 3070 } 3071 3072 static ssize_t mtf_test_write(struct file *file, const char __user *buf, 3073 size_t count, loff_t *pos) 3074 { 3075 struct seq_file *sf = (struct seq_file *)file->private_data; 3076 struct mmc_card *card = (struct mmc_card *)sf->private; 3077 struct mmc_test_card *test; 3078 long testcase; 3079 int ret; 3080 3081 ret = kstrtol_from_user(buf, count, 10, &testcase); 3082 if (ret) 3083 return ret; 3084 3085 test = kzalloc(sizeof(*test), GFP_KERNEL); 3086 if (!test) 3087 return -ENOMEM; 3088 3089 /* 3090 * Remove all test cases associated with given card. Thus we have only 3091 * actual data of the last run. 3092 */ 3093 mmc_test_free_result(card); 3094 3095 test->card = card; 3096 3097 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL); 3098 #ifdef CONFIG_HIGHMEM 3099 test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER); 3100 #endif 3101 3102 #ifdef CONFIG_HIGHMEM 3103 if (test->buffer && test->highmem) { 3104 #else 3105 if (test->buffer) { 3106 #endif 3107 mutex_lock(&mmc_test_lock); 3108 mmc_test_run(test, testcase); 3109 mutex_unlock(&mmc_test_lock); 3110 } 3111 3112 #ifdef CONFIG_HIGHMEM 3113 __free_pages(test->highmem, BUFFER_ORDER); 3114 #endif 3115 kfree(test->buffer); 3116 kfree(test); 3117 3118 return count; 3119 } 3120 3121 static const struct file_operations mmc_test_fops_test = { 3122 .open = mtf_test_open, 3123 .read = seq_read, 3124 .write = mtf_test_write, 3125 .llseek = seq_lseek, 3126 .release = single_release, 3127 }; 3128 3129 static int mtf_testlist_show(struct seq_file *sf, void *data) 3130 { 3131 int i; 3132 3133 mutex_lock(&mmc_test_lock); 3134 3135 seq_puts(sf, "0:\tRun all tests\n"); 3136 for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++) 3137 seq_printf(sf, "%d:\t%s\n", i + 1, mmc_test_cases[i].name); 3138 3139 mutex_unlock(&mmc_test_lock); 3140 3141 return 0; 3142 } 3143 3144 DEFINE_SHOW_ATTRIBUTE(mtf_testlist); 3145 3146 static void mmc_test_free_dbgfs_file(struct mmc_card *card) 3147 { 3148 struct mmc_test_dbgfs_file *df, *dfs; 3149 3150 mutex_lock(&mmc_test_lock); 3151 3152 list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) { 3153 if (card && df->card != card) 3154 continue; 3155 debugfs_remove(df->file); 3156 list_del(&df->link); 3157 kfree(df); 3158 } 3159 3160 mutex_unlock(&mmc_test_lock); 3161 } 3162 3163 static int __mmc_test_register_dbgfs_file(struct mmc_card *card, 3164 const char *name, umode_t mode, const struct file_operations *fops) 3165 { 3166 struct dentry *file = NULL; 3167 struct mmc_test_dbgfs_file *df; 3168 3169 if (card->debugfs_root) 3170 debugfs_create_file(name, mode, card->debugfs_root, card, fops); 3171 3172 df = kmalloc(sizeof(*df), GFP_KERNEL); 3173 if (!df) { 3174 debugfs_remove(file); 3175 return -ENOMEM; 3176 } 3177 3178 df->card = card; 3179 df->file = file; 3180 3181 list_add(&df->link, &mmc_test_file_test); 3182 return 0; 3183 } 3184 3185 static int mmc_test_register_dbgfs_file(struct mmc_card *card) 3186 { 3187 int ret; 3188 3189 mutex_lock(&mmc_test_lock); 3190 3191 ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO, 3192 &mmc_test_fops_test); 3193 if (ret) 3194 goto err; 3195 3196 ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO, 3197 &mtf_testlist_fops); 3198 if (ret) 3199 goto err; 3200 3201 err: 3202 mutex_unlock(&mmc_test_lock); 3203 3204 return ret; 3205 } 3206 3207 static int mmc_test_probe(struct mmc_card *card) 3208 { 3209 int ret; 3210 3211 if (!mmc_card_mmc(card) && !mmc_card_sd(card)) 3212 return -ENODEV; 3213 3214 ret = mmc_test_register_dbgfs_file(card); 3215 if (ret) 3216 return ret; 3217 3218 if (card->ext_csd.cmdq_en) { 3219 mmc_claim_host(card->host); 3220 ret = mmc_cmdq_disable(card); 3221 mmc_release_host(card->host); 3222 if (ret) 3223 return ret; 3224 } 3225 3226 dev_info(&card->dev, "Card claimed for testing.\n"); 3227 3228 return 0; 3229 } 3230 3231 static void mmc_test_remove(struct mmc_card *card) 3232 { 3233 if (card->reenable_cmdq) { 3234 mmc_claim_host(card->host); 3235 mmc_cmdq_enable(card); 3236 mmc_release_host(card->host); 3237 } 3238 mmc_test_free_result(card); 3239 mmc_test_free_dbgfs_file(card); 3240 } 3241 3242 static void mmc_test_shutdown(struct mmc_card *card) 3243 { 3244 } 3245 3246 static struct mmc_driver mmc_driver = { 3247 .drv = { 3248 .name = "mmc_test", 3249 }, 3250 .probe = mmc_test_probe, 3251 .remove = mmc_test_remove, 3252 .shutdown = mmc_test_shutdown, 3253 }; 3254 3255 static int __init mmc_test_init(void) 3256 { 3257 return mmc_register_driver(&mmc_driver); 3258 } 3259 3260 static void __exit mmc_test_exit(void) 3261 { 3262 /* Clear stalled data if card is still plugged */ 3263 mmc_test_free_result(NULL); 3264 mmc_test_free_dbgfs_file(NULL); 3265 3266 mmc_unregister_driver(&mmc_driver); 3267 } 3268 3269 module_init(mmc_test_init); 3270 module_exit(mmc_test_exit); 3271 3272 MODULE_LICENSE("GPL"); 3273 MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver"); 3274 MODULE_AUTHOR("Pierre Ossman"); 3275