1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright 2007-2008 Pierre Ossman 4 */ 5 6 #include <linux/mmc/core.h> 7 #include <linux/mmc/card.h> 8 #include <linux/mmc/host.h> 9 #include <linux/mmc/mmc.h> 10 #include <linux/slab.h> 11 12 #include <linux/scatterlist.h> 13 #include <linux/list.h> 14 15 #include <linux/debugfs.h> 16 #include <linux/uaccess.h> 17 #include <linux/seq_file.h> 18 #include <linux/module.h> 19 20 #include "core.h" 21 #include "card.h" 22 #include "host.h" 23 #include "bus.h" 24 #include "mmc_ops.h" 25 26 #define RESULT_OK 0 27 #define RESULT_FAIL 1 28 #define RESULT_UNSUP_HOST 2 29 #define RESULT_UNSUP_CARD 3 30 31 #define BUFFER_ORDER 2 32 #define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER) 33 34 #define TEST_ALIGN_END 8 35 36 /* 37 * Limit the test area size to the maximum MMC HC erase group size. Note that 38 * the maximum SD allocation unit size is just 4MiB. 39 */ 40 #define TEST_AREA_MAX_SIZE (128 * 1024 * 1024) 41 42 /** 43 * struct mmc_test_pages - pages allocated by 'alloc_pages()'. 44 * @page: first page in the allocation 45 * @order: order of the number of pages allocated 46 */ 47 struct mmc_test_pages { 48 struct page *page; 49 unsigned int order; 50 }; 51 52 /** 53 * struct mmc_test_mem - allocated memory. 54 * @arr: array of allocations 55 * @cnt: number of allocations 56 */ 57 struct mmc_test_mem { 58 struct mmc_test_pages *arr; 59 unsigned int cnt; 60 }; 61 62 /** 63 * struct mmc_test_area - information for performance tests. 64 * @max_sz: test area size (in bytes) 65 * @dev_addr: address on card at which to do performance tests 66 * @max_tfr: maximum transfer size allowed by driver (in bytes) 67 * @max_segs: maximum segments allowed by driver in scatterlist @sg 68 * @max_seg_sz: maximum segment size allowed by driver 69 * @blocks: number of (512 byte) blocks currently mapped by @sg 70 * @sg_len: length of currently mapped scatterlist @sg 71 * @mem: allocated memory 72 * @sg: scatterlist 73 * @sg_areq: scatterlist for non-blocking request 74 */ 75 struct mmc_test_area { 76 unsigned long max_sz; 77 unsigned int dev_addr; 78 unsigned int max_tfr; 79 unsigned int max_segs; 80 unsigned int max_seg_sz; 81 unsigned int blocks; 82 unsigned int sg_len; 83 struct mmc_test_mem *mem; 84 struct scatterlist *sg; 85 struct scatterlist *sg_areq; 86 }; 87 88 /** 89 * struct mmc_test_transfer_result - transfer results for performance tests. 90 * @link: double-linked list 91 * @count: amount of group of sectors to check 92 * @sectors: amount of sectors to check in one group 93 * @ts: time values of transfer 94 * @rate: calculated transfer rate 95 * @iops: I/O operations per second (times 100) 96 */ 97 struct mmc_test_transfer_result { 98 struct list_head link; 99 unsigned int count; 100 unsigned int sectors; 101 struct timespec64 ts; 102 unsigned int rate; 103 unsigned int iops; 104 }; 105 106 /** 107 * struct mmc_test_general_result - results for tests. 108 * @link: double-linked list 109 * @card: card under test 110 * @testcase: number of test case 111 * @result: result of test run 112 * @tr_lst: transfer measurements if any as mmc_test_transfer_result 113 */ 114 struct mmc_test_general_result { 115 struct list_head link; 116 struct mmc_card *card; 117 int testcase; 118 int result; 119 struct list_head tr_lst; 120 }; 121 122 /** 123 * struct mmc_test_dbgfs_file - debugfs related file. 124 * @link: double-linked list 125 * @card: card under test 126 * @file: file created under debugfs 127 */ 128 struct mmc_test_dbgfs_file { 129 struct list_head link; 130 struct mmc_card *card; 131 struct dentry *file; 132 }; 133 134 /** 135 * struct mmc_test_card - test information. 136 * @card: card under test 137 * @scratch: transfer buffer 138 * @buffer: transfer buffer 139 * @highmem: buffer for highmem tests 140 * @area: information for performance tests 141 * @gr: pointer to results of current testcase 142 */ 143 struct mmc_test_card { 144 struct mmc_card *card; 145 146 u8 scratch[BUFFER_SIZE]; 147 u8 *buffer; 148 #ifdef CONFIG_HIGHMEM 149 struct page *highmem; 150 #endif 151 struct mmc_test_area area; 152 struct mmc_test_general_result *gr; 153 }; 154 155 enum mmc_test_prep_media { 156 MMC_TEST_PREP_NONE = 0, 157 MMC_TEST_PREP_WRITE_FULL = 1 << 0, 158 MMC_TEST_PREP_ERASE = 1 << 1, 159 }; 160 161 struct mmc_test_multiple_rw { 162 unsigned int *sg_len; 163 unsigned int *bs; 164 unsigned int len; 165 unsigned int size; 166 bool do_write; 167 bool do_nonblock_req; 168 enum mmc_test_prep_media prepare; 169 }; 170 171 /*******************************************************************/ 172 /* General helper functions */ 173 /*******************************************************************/ 174 175 /* 176 * Configure correct block size in card 177 */ 178 static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size) 179 { 180 return mmc_set_blocklen(test->card, size); 181 } 182 183 static bool mmc_test_card_cmd23(struct mmc_card *card) 184 { 185 return mmc_card_mmc(card) || 186 (mmc_card_sd(card) && card->scr.cmds & SD_SCR_CMD23_SUPPORT); 187 } 188 189 static void mmc_test_prepare_sbc(struct mmc_test_card *test, 190 struct mmc_request *mrq, unsigned int blocks) 191 { 192 struct mmc_card *card = test->card; 193 194 if (!mrq->sbc || !mmc_host_cmd23(card->host) || 195 !mmc_test_card_cmd23(card) || !mmc_op_multi(mrq->cmd->opcode) || 196 (card->quirks & MMC_QUIRK_BLK_NO_CMD23)) { 197 mrq->sbc = NULL; 198 return; 199 } 200 201 mrq->sbc->opcode = MMC_SET_BLOCK_COUNT; 202 mrq->sbc->arg = blocks; 203 mrq->sbc->flags = MMC_RSP_R1 | MMC_CMD_AC; 204 } 205 206 /* 207 * Fill in the mmc_request structure given a set of transfer parameters. 208 */ 209 static void mmc_test_prepare_mrq(struct mmc_test_card *test, 210 struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len, 211 unsigned dev_addr, unsigned blocks, unsigned blksz, int write) 212 { 213 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop)) 214 return; 215 216 if (blocks > 1) { 217 mrq->cmd->opcode = write ? 218 MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK; 219 } else { 220 mrq->cmd->opcode = write ? 221 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK; 222 } 223 224 mrq->cmd->arg = dev_addr; 225 if (!mmc_card_blockaddr(test->card)) 226 mrq->cmd->arg <<= 9; 227 228 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC; 229 230 if (blocks == 1) 231 mrq->stop = NULL; 232 else { 233 mrq->stop->opcode = MMC_STOP_TRANSMISSION; 234 mrq->stop->arg = 0; 235 mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC; 236 } 237 238 mrq->data->blksz = blksz; 239 mrq->data->blocks = blocks; 240 mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; 241 mrq->data->sg = sg; 242 mrq->data->sg_len = sg_len; 243 244 mmc_test_prepare_sbc(test, mrq, blocks); 245 246 mmc_set_data_timeout(mrq->data, test->card); 247 } 248 249 static int mmc_test_busy(struct mmc_command *cmd) 250 { 251 return !(cmd->resp[0] & R1_READY_FOR_DATA) || 252 (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG); 253 } 254 255 /* 256 * Wait for the card to finish the busy state 257 */ 258 static int mmc_test_wait_busy(struct mmc_test_card *test) 259 { 260 int ret, busy; 261 struct mmc_command cmd = {}; 262 263 busy = 0; 264 do { 265 memset(&cmd, 0, sizeof(struct mmc_command)); 266 267 cmd.opcode = MMC_SEND_STATUS; 268 cmd.arg = test->card->rca << 16; 269 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 270 271 ret = mmc_wait_for_cmd(test->card->host, &cmd, 0); 272 if (ret) 273 break; 274 275 if (!busy && mmc_test_busy(&cmd)) { 276 busy = 1; 277 if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) 278 pr_info("%s: Warning: Host did not wait for busy state to end.\n", 279 mmc_hostname(test->card->host)); 280 } 281 } while (mmc_test_busy(&cmd)); 282 283 return ret; 284 } 285 286 /* 287 * Transfer a single sector of kernel addressable data 288 */ 289 static int mmc_test_buffer_transfer(struct mmc_test_card *test, 290 u8 *buffer, unsigned addr, unsigned blksz, int write) 291 { 292 struct mmc_request mrq = {}; 293 struct mmc_command cmd = {}; 294 struct mmc_command stop = {}; 295 struct mmc_data data = {}; 296 297 struct scatterlist sg; 298 299 mrq.cmd = &cmd; 300 mrq.data = &data; 301 mrq.stop = &stop; 302 303 sg_init_one(&sg, buffer, blksz); 304 305 mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write); 306 307 mmc_wait_for_req(test->card->host, &mrq); 308 309 if (cmd.error) 310 return cmd.error; 311 if (data.error) 312 return data.error; 313 314 return mmc_test_wait_busy(test); 315 } 316 317 static void mmc_test_free_mem(struct mmc_test_mem *mem) 318 { 319 if (!mem) 320 return; 321 while (mem->cnt--) 322 __free_pages(mem->arr[mem->cnt].page, 323 mem->arr[mem->cnt].order); 324 kfree(mem->arr); 325 kfree(mem); 326 } 327 328 /* 329 * Allocate a lot of memory, preferably max_sz but at least min_sz. In case 330 * there isn't much memory do not exceed 1/16th total lowmem pages. Also do 331 * not exceed a maximum number of segments and try not to make segments much 332 * bigger than maximum segment size. 333 */ 334 static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz, 335 unsigned long max_sz, 336 unsigned int max_segs, 337 unsigned int max_seg_sz) 338 { 339 unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE); 340 unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE); 341 unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE); 342 unsigned long page_cnt = 0; 343 unsigned long limit = nr_free_buffer_pages() >> 4; 344 struct mmc_test_mem *mem; 345 346 if (max_page_cnt > limit) 347 max_page_cnt = limit; 348 if (min_page_cnt > max_page_cnt) 349 min_page_cnt = max_page_cnt; 350 351 if (max_seg_page_cnt > max_page_cnt) 352 max_seg_page_cnt = max_page_cnt; 353 354 if (max_segs > max_page_cnt) 355 max_segs = max_page_cnt; 356 357 mem = kzalloc(sizeof(*mem), GFP_KERNEL); 358 if (!mem) 359 return NULL; 360 361 mem->arr = kcalloc(max_segs, sizeof(*mem->arr), GFP_KERNEL); 362 if (!mem->arr) 363 goto out_free; 364 365 while (max_page_cnt) { 366 struct page *page; 367 unsigned int order; 368 gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN | 369 __GFP_NORETRY; 370 371 order = get_order(max_seg_page_cnt << PAGE_SHIFT); 372 while (1) { 373 page = alloc_pages(flags, order); 374 if (page || !order) 375 break; 376 order -= 1; 377 } 378 if (!page) { 379 if (page_cnt < min_page_cnt) 380 goto out_free; 381 break; 382 } 383 mem->arr[mem->cnt].page = page; 384 mem->arr[mem->cnt].order = order; 385 mem->cnt += 1; 386 if (max_page_cnt <= (1UL << order)) 387 break; 388 max_page_cnt -= 1UL << order; 389 page_cnt += 1UL << order; 390 if (mem->cnt >= max_segs) { 391 if (page_cnt < min_page_cnt) 392 goto out_free; 393 break; 394 } 395 } 396 397 return mem; 398 399 out_free: 400 mmc_test_free_mem(mem); 401 return NULL; 402 } 403 404 /* 405 * Map memory into a scatterlist. Optionally allow the same memory to be 406 * mapped more than once. 407 */ 408 static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size, 409 struct scatterlist *sglist, int repeat, 410 unsigned int max_segs, unsigned int max_seg_sz, 411 unsigned int *sg_len, int min_sg_len) 412 { 413 struct scatterlist *sg = NULL; 414 unsigned int i; 415 unsigned long sz = size; 416 417 sg_init_table(sglist, max_segs); 418 if (min_sg_len > max_segs) 419 min_sg_len = max_segs; 420 421 *sg_len = 0; 422 do { 423 for (i = 0; i < mem->cnt; i++) { 424 unsigned long len = PAGE_SIZE << mem->arr[i].order; 425 426 if (min_sg_len && (size / min_sg_len < len)) 427 len = ALIGN(size / min_sg_len, 512); 428 if (len > sz) 429 len = sz; 430 if (len > max_seg_sz) 431 len = max_seg_sz; 432 if (sg) 433 sg = sg_next(sg); 434 else 435 sg = sglist; 436 if (!sg) 437 return -EINVAL; 438 sg_set_page(sg, mem->arr[i].page, len, 0); 439 sz -= len; 440 *sg_len += 1; 441 if (!sz) 442 break; 443 } 444 } while (sz && repeat); 445 446 if (sz) 447 return -EINVAL; 448 449 if (sg) 450 sg_mark_end(sg); 451 452 return 0; 453 } 454 455 /* 456 * Map memory into a scatterlist so that no pages are contiguous. Allow the 457 * same memory to be mapped more than once. 458 */ 459 static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem, 460 unsigned long sz, 461 struct scatterlist *sglist, 462 unsigned int max_segs, 463 unsigned int max_seg_sz, 464 unsigned int *sg_len) 465 { 466 struct scatterlist *sg = NULL; 467 unsigned int i = mem->cnt, cnt; 468 unsigned long len; 469 void *base, *addr, *last_addr = NULL; 470 471 sg_init_table(sglist, max_segs); 472 473 *sg_len = 0; 474 while (sz) { 475 base = page_address(mem->arr[--i].page); 476 cnt = 1 << mem->arr[i].order; 477 while (sz && cnt) { 478 addr = base + PAGE_SIZE * --cnt; 479 if (last_addr && last_addr + PAGE_SIZE == addr) 480 continue; 481 last_addr = addr; 482 len = PAGE_SIZE; 483 if (len > max_seg_sz) 484 len = max_seg_sz; 485 if (len > sz) 486 len = sz; 487 if (sg) 488 sg = sg_next(sg); 489 else 490 sg = sglist; 491 if (!sg) 492 return -EINVAL; 493 sg_set_page(sg, virt_to_page(addr), len, 0); 494 sz -= len; 495 *sg_len += 1; 496 } 497 if (i == 0) 498 i = mem->cnt; 499 } 500 501 if (sg) 502 sg_mark_end(sg); 503 504 return 0; 505 } 506 507 /* 508 * Calculate transfer rate in bytes per second. 509 */ 510 static unsigned int mmc_test_rate(uint64_t bytes, struct timespec64 *ts) 511 { 512 uint64_t ns; 513 514 ns = timespec64_to_ns(ts); 515 bytes *= 1000000000; 516 517 while (ns > UINT_MAX) { 518 bytes >>= 1; 519 ns >>= 1; 520 } 521 522 if (!ns) 523 return 0; 524 525 do_div(bytes, (uint32_t)ns); 526 527 return bytes; 528 } 529 530 /* 531 * Save transfer results for future usage 532 */ 533 static void mmc_test_save_transfer_result(struct mmc_test_card *test, 534 unsigned int count, unsigned int sectors, struct timespec64 ts, 535 unsigned int rate, unsigned int iops) 536 { 537 struct mmc_test_transfer_result *tr; 538 539 if (!test->gr) 540 return; 541 542 tr = kmalloc(sizeof(*tr), GFP_KERNEL); 543 if (!tr) 544 return; 545 546 tr->count = count; 547 tr->sectors = sectors; 548 tr->ts = ts; 549 tr->rate = rate; 550 tr->iops = iops; 551 552 list_add_tail(&tr->link, &test->gr->tr_lst); 553 } 554 555 /* 556 * Print the transfer rate. 557 */ 558 static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes, 559 struct timespec64 *ts1, struct timespec64 *ts2) 560 { 561 unsigned int rate, iops, sectors = bytes >> 9; 562 struct timespec64 ts; 563 564 ts = timespec64_sub(*ts2, *ts1); 565 566 rate = mmc_test_rate(bytes, &ts); 567 iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */ 568 569 pr_info("%s: Transfer of %u sectors (%u%s KiB) took %llu.%09u " 570 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n", 571 mmc_hostname(test->card->host), sectors, sectors >> 1, 572 (sectors & 1 ? ".5" : ""), (u64)ts.tv_sec, 573 (u32)ts.tv_nsec, rate / 1000, rate / 1024, 574 iops / 100, iops % 100); 575 576 mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops); 577 } 578 579 /* 580 * Print the average transfer rate. 581 */ 582 static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes, 583 unsigned int count, struct timespec64 *ts1, 584 struct timespec64 *ts2) 585 { 586 unsigned int rate, iops, sectors = bytes >> 9; 587 uint64_t tot = bytes * count; 588 struct timespec64 ts; 589 590 ts = timespec64_sub(*ts2, *ts1); 591 592 rate = mmc_test_rate(tot, &ts); 593 iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */ 594 595 pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took " 596 "%llu.%09u seconds (%u kB/s, %u KiB/s, " 597 "%u.%02u IOPS, sg_len %d)\n", 598 mmc_hostname(test->card->host), count, sectors, count, 599 sectors >> 1, (sectors & 1 ? ".5" : ""), 600 (u64)ts.tv_sec, (u32)ts.tv_nsec, 601 rate / 1000, rate / 1024, iops / 100, iops % 100, 602 test->area.sg_len); 603 604 mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops); 605 } 606 607 /* 608 * Return the card size in sectors. 609 */ 610 static unsigned int mmc_test_capacity(struct mmc_card *card) 611 { 612 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) 613 return card->ext_csd.sectors; 614 else 615 return card->csd.capacity << (card->csd.read_blkbits - 9); 616 } 617 618 /*******************************************************************/ 619 /* Test preparation and cleanup */ 620 /*******************************************************************/ 621 622 /* 623 * Fill the first couple of sectors of the card with known data 624 * so that bad reads/writes can be detected 625 */ 626 static int __mmc_test_prepare(struct mmc_test_card *test, int write, int val) 627 { 628 int ret, i; 629 630 ret = mmc_test_set_blksize(test, 512); 631 if (ret) 632 return ret; 633 634 if (write) 635 memset(test->buffer, val, 512); 636 else { 637 for (i = 0; i < 512; i++) 638 test->buffer[i] = i; 639 } 640 641 for (i = 0; i < BUFFER_SIZE / 512; i++) { 642 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1); 643 if (ret) 644 return ret; 645 } 646 647 return 0; 648 } 649 650 static int mmc_test_prepare_write(struct mmc_test_card *test) 651 { 652 return __mmc_test_prepare(test, 1, 0xDF); 653 } 654 655 static int mmc_test_prepare_read(struct mmc_test_card *test) 656 { 657 return __mmc_test_prepare(test, 0, 0); 658 } 659 660 static int mmc_test_cleanup(struct mmc_test_card *test) 661 { 662 return __mmc_test_prepare(test, 1, 0); 663 } 664 665 /*******************************************************************/ 666 /* Test execution helpers */ 667 /*******************************************************************/ 668 669 /* 670 * Modifies the mmc_request to perform the "short transfer" tests 671 */ 672 static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test, 673 struct mmc_request *mrq, int write) 674 { 675 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data)) 676 return; 677 678 if (mrq->data->blocks > 1) { 679 mrq->cmd->opcode = write ? 680 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK; 681 mrq->stop = NULL; 682 } else { 683 mrq->cmd->opcode = MMC_SEND_STATUS; 684 mrq->cmd->arg = test->card->rca << 16; 685 } 686 } 687 688 /* 689 * Checks that a normal transfer didn't have any errors 690 */ 691 static int mmc_test_check_result(struct mmc_test_card *test, 692 struct mmc_request *mrq) 693 { 694 int ret; 695 696 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data)) 697 return -EINVAL; 698 699 ret = 0; 700 701 if (mrq->sbc && mrq->sbc->error) 702 ret = mrq->sbc->error; 703 if (!ret && mrq->cmd->error) 704 ret = mrq->cmd->error; 705 if (!ret && mrq->data->error) 706 ret = mrq->data->error; 707 if (!ret && mrq->stop && mrq->stop->error) 708 ret = mrq->stop->error; 709 if (!ret && mrq->data->bytes_xfered != 710 mrq->data->blocks * mrq->data->blksz) 711 ret = RESULT_FAIL; 712 713 if (ret == -EINVAL) 714 ret = RESULT_UNSUP_HOST; 715 716 return ret; 717 } 718 719 /* 720 * Checks that a "short transfer" behaved as expected 721 */ 722 static int mmc_test_check_broken_result(struct mmc_test_card *test, 723 struct mmc_request *mrq) 724 { 725 int ret; 726 727 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data)) 728 return -EINVAL; 729 730 ret = 0; 731 732 if (!ret && mrq->cmd->error) 733 ret = mrq->cmd->error; 734 if (!ret && mrq->data->error == 0) 735 ret = RESULT_FAIL; 736 if (!ret && mrq->data->error != -ETIMEDOUT) 737 ret = mrq->data->error; 738 if (!ret && mrq->stop && mrq->stop->error) 739 ret = mrq->stop->error; 740 if (mrq->data->blocks > 1) { 741 if (!ret && mrq->data->bytes_xfered > mrq->data->blksz) 742 ret = RESULT_FAIL; 743 } else { 744 if (!ret && mrq->data->bytes_xfered > 0) 745 ret = RESULT_FAIL; 746 } 747 748 if (ret == -EINVAL) 749 ret = RESULT_UNSUP_HOST; 750 751 return ret; 752 } 753 754 struct mmc_test_req { 755 struct mmc_request mrq; 756 struct mmc_command sbc; 757 struct mmc_command cmd; 758 struct mmc_command stop; 759 struct mmc_command status; 760 struct mmc_data data; 761 }; 762 763 /* 764 * Tests nonblock transfer with certain parameters 765 */ 766 static void mmc_test_req_reset(struct mmc_test_req *rq) 767 { 768 memset(rq, 0, sizeof(struct mmc_test_req)); 769 770 rq->mrq.cmd = &rq->cmd; 771 rq->mrq.data = &rq->data; 772 rq->mrq.stop = &rq->stop; 773 } 774 775 static struct mmc_test_req *mmc_test_req_alloc(void) 776 { 777 struct mmc_test_req *rq = kmalloc(sizeof(*rq), GFP_KERNEL); 778 779 if (rq) 780 mmc_test_req_reset(rq); 781 782 return rq; 783 } 784 785 static void mmc_test_wait_done(struct mmc_request *mrq) 786 { 787 complete(&mrq->completion); 788 } 789 790 static int mmc_test_start_areq(struct mmc_test_card *test, 791 struct mmc_request *mrq, 792 struct mmc_request *prev_mrq) 793 { 794 struct mmc_host *host = test->card->host; 795 int err = 0; 796 797 if (mrq) { 798 init_completion(&mrq->completion); 799 mrq->done = mmc_test_wait_done; 800 mmc_pre_req(host, mrq); 801 } 802 803 if (prev_mrq) { 804 wait_for_completion(&prev_mrq->completion); 805 err = mmc_test_wait_busy(test); 806 if (!err) 807 err = mmc_test_check_result(test, prev_mrq); 808 } 809 810 if (!err && mrq) { 811 err = mmc_start_request(host, mrq); 812 if (err) 813 mmc_retune_release(host); 814 } 815 816 if (prev_mrq) 817 mmc_post_req(host, prev_mrq, 0); 818 819 if (err && mrq) 820 mmc_post_req(host, mrq, err); 821 822 return err; 823 } 824 825 static int mmc_test_nonblock_transfer(struct mmc_test_card *test, 826 unsigned int dev_addr, int write, 827 int count) 828 { 829 struct mmc_test_req *rq1, *rq2; 830 struct mmc_request *mrq, *prev_mrq; 831 int i; 832 int ret = RESULT_OK; 833 struct mmc_test_area *t = &test->area; 834 struct scatterlist *sg = t->sg; 835 struct scatterlist *sg_areq = t->sg_areq; 836 837 rq1 = mmc_test_req_alloc(); 838 rq2 = mmc_test_req_alloc(); 839 if (!rq1 || !rq2) { 840 ret = RESULT_FAIL; 841 goto err; 842 } 843 844 mrq = &rq1->mrq; 845 prev_mrq = NULL; 846 847 for (i = 0; i < count; i++) { 848 mmc_test_req_reset(container_of(mrq, struct mmc_test_req, mrq)); 849 mmc_test_prepare_mrq(test, mrq, sg, t->sg_len, dev_addr, 850 t->blocks, 512, write); 851 ret = mmc_test_start_areq(test, mrq, prev_mrq); 852 if (ret) 853 goto err; 854 855 if (!prev_mrq) 856 prev_mrq = &rq2->mrq; 857 858 swap(mrq, prev_mrq); 859 swap(sg, sg_areq); 860 dev_addr += t->blocks; 861 } 862 863 ret = mmc_test_start_areq(test, NULL, prev_mrq); 864 err: 865 kfree(rq1); 866 kfree(rq2); 867 return ret; 868 } 869 870 /* 871 * Tests a basic transfer with certain parameters 872 */ 873 static int mmc_test_simple_transfer(struct mmc_test_card *test, 874 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, 875 unsigned blocks, unsigned blksz, int write) 876 { 877 struct mmc_request mrq = {}; 878 struct mmc_command cmd = {}; 879 struct mmc_command stop = {}; 880 struct mmc_data data = {}; 881 882 mrq.cmd = &cmd; 883 mrq.data = &data; 884 mrq.stop = &stop; 885 886 mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr, 887 blocks, blksz, write); 888 889 mmc_wait_for_req(test->card->host, &mrq); 890 891 mmc_test_wait_busy(test); 892 893 return mmc_test_check_result(test, &mrq); 894 } 895 896 /* 897 * Tests a transfer where the card will fail completely or partly 898 */ 899 static int mmc_test_broken_transfer(struct mmc_test_card *test, 900 unsigned blocks, unsigned blksz, int write) 901 { 902 struct mmc_request mrq = {}; 903 struct mmc_command cmd = {}; 904 struct mmc_command stop = {}; 905 struct mmc_data data = {}; 906 907 struct scatterlist sg; 908 909 mrq.cmd = &cmd; 910 mrq.data = &data; 911 mrq.stop = &stop; 912 913 sg_init_one(&sg, test->buffer, blocks * blksz); 914 915 mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write); 916 mmc_test_prepare_broken_mrq(test, &mrq, write); 917 918 mmc_wait_for_req(test->card->host, &mrq); 919 920 mmc_test_wait_busy(test); 921 922 return mmc_test_check_broken_result(test, &mrq); 923 } 924 925 /* 926 * Does a complete transfer test where data is also validated 927 * 928 * Note: mmc_test_prepare() must have been done before this call 929 */ 930 static int mmc_test_transfer(struct mmc_test_card *test, 931 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, 932 unsigned blocks, unsigned blksz, int write) 933 { 934 int ret, i; 935 936 if (write) { 937 for (i = 0; i < blocks * blksz; i++) 938 test->scratch[i] = i; 939 } else { 940 memset(test->scratch, 0, BUFFER_SIZE); 941 } 942 sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); 943 944 ret = mmc_test_set_blksize(test, blksz); 945 if (ret) 946 return ret; 947 948 ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr, 949 blocks, blksz, write); 950 if (ret) 951 return ret; 952 953 if (write) { 954 int sectors; 955 956 ret = mmc_test_set_blksize(test, 512); 957 if (ret) 958 return ret; 959 960 sectors = (blocks * blksz + 511) / 512; 961 if ((sectors * 512) == (blocks * blksz)) 962 sectors++; 963 964 if ((sectors * 512) > BUFFER_SIZE) 965 return -EINVAL; 966 967 memset(test->buffer, 0, sectors * 512); 968 969 for (i = 0; i < sectors; i++) { 970 ret = mmc_test_buffer_transfer(test, 971 test->buffer + i * 512, 972 dev_addr + i, 512, 0); 973 if (ret) 974 return ret; 975 } 976 977 for (i = 0; i < blocks * blksz; i++) { 978 if (test->buffer[i] != (u8)i) 979 return RESULT_FAIL; 980 } 981 982 for (; i < sectors * 512; i++) { 983 if (test->buffer[i] != 0xDF) 984 return RESULT_FAIL; 985 } 986 } else { 987 sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); 988 for (i = 0; i < blocks * blksz; i++) { 989 if (test->scratch[i] != (u8)i) 990 return RESULT_FAIL; 991 } 992 } 993 994 return 0; 995 } 996 997 /*******************************************************************/ 998 /* Tests */ 999 /*******************************************************************/ 1000 1001 struct mmc_test_case { 1002 const char *name; 1003 1004 int (*prepare)(struct mmc_test_card *); 1005 int (*run)(struct mmc_test_card *); 1006 int (*cleanup)(struct mmc_test_card *); 1007 }; 1008 1009 static int mmc_test_basic_write(struct mmc_test_card *test) 1010 { 1011 int ret; 1012 struct scatterlist sg; 1013 1014 ret = mmc_test_set_blksize(test, 512); 1015 if (ret) 1016 return ret; 1017 1018 sg_init_one(&sg, test->buffer, 512); 1019 1020 return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1); 1021 } 1022 1023 static int mmc_test_basic_read(struct mmc_test_card *test) 1024 { 1025 int ret; 1026 struct scatterlist sg; 1027 1028 ret = mmc_test_set_blksize(test, 512); 1029 if (ret) 1030 return ret; 1031 1032 sg_init_one(&sg, test->buffer, 512); 1033 1034 return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0); 1035 } 1036 1037 static int mmc_test_verify_write(struct mmc_test_card *test) 1038 { 1039 struct scatterlist sg; 1040 1041 sg_init_one(&sg, test->buffer, 512); 1042 1043 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); 1044 } 1045 1046 static int mmc_test_verify_read(struct mmc_test_card *test) 1047 { 1048 struct scatterlist sg; 1049 1050 sg_init_one(&sg, test->buffer, 512); 1051 1052 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); 1053 } 1054 1055 static int mmc_test_multi_write(struct mmc_test_card *test) 1056 { 1057 unsigned int size; 1058 struct scatterlist sg; 1059 1060 if (test->card->host->max_blk_count == 1) 1061 return RESULT_UNSUP_HOST; 1062 1063 size = PAGE_SIZE * 2; 1064 size = min(size, test->card->host->max_req_size); 1065 size = min(size, test->card->host->max_seg_size); 1066 size = min(size, test->card->host->max_blk_count * 512); 1067 1068 if (size < 1024) 1069 return RESULT_UNSUP_HOST; 1070 1071 sg_init_one(&sg, test->buffer, size); 1072 1073 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1); 1074 } 1075 1076 static int mmc_test_multi_read(struct mmc_test_card *test) 1077 { 1078 unsigned int size; 1079 struct scatterlist sg; 1080 1081 if (test->card->host->max_blk_count == 1) 1082 return RESULT_UNSUP_HOST; 1083 1084 size = PAGE_SIZE * 2; 1085 size = min(size, test->card->host->max_req_size); 1086 size = min(size, test->card->host->max_seg_size); 1087 size = min(size, test->card->host->max_blk_count * 512); 1088 1089 if (size < 1024) 1090 return RESULT_UNSUP_HOST; 1091 1092 sg_init_one(&sg, test->buffer, size); 1093 1094 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0); 1095 } 1096 1097 static int mmc_test_pow2_write(struct mmc_test_card *test) 1098 { 1099 int ret, i; 1100 struct scatterlist sg; 1101 1102 if (!test->card->csd.write_partial) 1103 return RESULT_UNSUP_CARD; 1104 1105 for (i = 1; i < 512; i <<= 1) { 1106 sg_init_one(&sg, test->buffer, i); 1107 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1); 1108 if (ret) 1109 return ret; 1110 } 1111 1112 return 0; 1113 } 1114 1115 static int mmc_test_pow2_read(struct mmc_test_card *test) 1116 { 1117 int ret, i; 1118 struct scatterlist sg; 1119 1120 if (!test->card->csd.read_partial) 1121 return RESULT_UNSUP_CARD; 1122 1123 for (i = 1; i < 512; i <<= 1) { 1124 sg_init_one(&sg, test->buffer, i); 1125 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0); 1126 if (ret) 1127 return ret; 1128 } 1129 1130 return 0; 1131 } 1132 1133 static int mmc_test_weird_write(struct mmc_test_card *test) 1134 { 1135 int ret, i; 1136 struct scatterlist sg; 1137 1138 if (!test->card->csd.write_partial) 1139 return RESULT_UNSUP_CARD; 1140 1141 for (i = 3; i < 512; i += 7) { 1142 sg_init_one(&sg, test->buffer, i); 1143 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1); 1144 if (ret) 1145 return ret; 1146 } 1147 1148 return 0; 1149 } 1150 1151 static int mmc_test_weird_read(struct mmc_test_card *test) 1152 { 1153 int ret, i; 1154 struct scatterlist sg; 1155 1156 if (!test->card->csd.read_partial) 1157 return RESULT_UNSUP_CARD; 1158 1159 for (i = 3; i < 512; i += 7) { 1160 sg_init_one(&sg, test->buffer, i); 1161 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0); 1162 if (ret) 1163 return ret; 1164 } 1165 1166 return 0; 1167 } 1168 1169 static int mmc_test_align_write(struct mmc_test_card *test) 1170 { 1171 int ret, i; 1172 struct scatterlist sg; 1173 1174 for (i = 1; i < TEST_ALIGN_END; i++) { 1175 sg_init_one(&sg, test->buffer + i, 512); 1176 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); 1177 if (ret) 1178 return ret; 1179 } 1180 1181 return 0; 1182 } 1183 1184 static int mmc_test_align_read(struct mmc_test_card *test) 1185 { 1186 int ret, i; 1187 struct scatterlist sg; 1188 1189 for (i = 1; i < TEST_ALIGN_END; i++) { 1190 sg_init_one(&sg, test->buffer + i, 512); 1191 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); 1192 if (ret) 1193 return ret; 1194 } 1195 1196 return 0; 1197 } 1198 1199 static int mmc_test_align_multi_write(struct mmc_test_card *test) 1200 { 1201 int ret, i; 1202 unsigned int size; 1203 struct scatterlist sg; 1204 1205 if (test->card->host->max_blk_count == 1) 1206 return RESULT_UNSUP_HOST; 1207 1208 size = PAGE_SIZE * 2; 1209 size = min(size, test->card->host->max_req_size); 1210 size = min(size, test->card->host->max_seg_size); 1211 size = min(size, test->card->host->max_blk_count * 512); 1212 1213 if (size < 1024) 1214 return RESULT_UNSUP_HOST; 1215 1216 for (i = 1; i < TEST_ALIGN_END; i++) { 1217 sg_init_one(&sg, test->buffer + i, size); 1218 ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1); 1219 if (ret) 1220 return ret; 1221 } 1222 1223 return 0; 1224 } 1225 1226 static int mmc_test_align_multi_read(struct mmc_test_card *test) 1227 { 1228 int ret, i; 1229 unsigned int size; 1230 struct scatterlist sg; 1231 1232 if (test->card->host->max_blk_count == 1) 1233 return RESULT_UNSUP_HOST; 1234 1235 size = PAGE_SIZE * 2; 1236 size = min(size, test->card->host->max_req_size); 1237 size = min(size, test->card->host->max_seg_size); 1238 size = min(size, test->card->host->max_blk_count * 512); 1239 1240 if (size < 1024) 1241 return RESULT_UNSUP_HOST; 1242 1243 for (i = 1; i < TEST_ALIGN_END; i++) { 1244 sg_init_one(&sg, test->buffer + i, size); 1245 ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0); 1246 if (ret) 1247 return ret; 1248 } 1249 1250 return 0; 1251 } 1252 1253 static int mmc_test_xfersize_write(struct mmc_test_card *test) 1254 { 1255 int ret; 1256 1257 ret = mmc_test_set_blksize(test, 512); 1258 if (ret) 1259 return ret; 1260 1261 return mmc_test_broken_transfer(test, 1, 512, 1); 1262 } 1263 1264 static int mmc_test_xfersize_read(struct mmc_test_card *test) 1265 { 1266 int ret; 1267 1268 ret = mmc_test_set_blksize(test, 512); 1269 if (ret) 1270 return ret; 1271 1272 return mmc_test_broken_transfer(test, 1, 512, 0); 1273 } 1274 1275 static int mmc_test_multi_xfersize_write(struct mmc_test_card *test) 1276 { 1277 int ret; 1278 1279 if (test->card->host->max_blk_count == 1) 1280 return RESULT_UNSUP_HOST; 1281 1282 ret = mmc_test_set_blksize(test, 512); 1283 if (ret) 1284 return ret; 1285 1286 return mmc_test_broken_transfer(test, 2, 512, 1); 1287 } 1288 1289 static int mmc_test_multi_xfersize_read(struct mmc_test_card *test) 1290 { 1291 int ret; 1292 1293 if (test->card->host->max_blk_count == 1) 1294 return RESULT_UNSUP_HOST; 1295 1296 ret = mmc_test_set_blksize(test, 512); 1297 if (ret) 1298 return ret; 1299 1300 return mmc_test_broken_transfer(test, 2, 512, 0); 1301 } 1302 1303 #ifdef CONFIG_HIGHMEM 1304 1305 static int mmc_test_write_high(struct mmc_test_card *test) 1306 { 1307 struct scatterlist sg; 1308 1309 sg_init_table(&sg, 1); 1310 sg_set_page(&sg, test->highmem, 512, 0); 1311 1312 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); 1313 } 1314 1315 static int mmc_test_read_high(struct mmc_test_card *test) 1316 { 1317 struct scatterlist sg; 1318 1319 sg_init_table(&sg, 1); 1320 sg_set_page(&sg, test->highmem, 512, 0); 1321 1322 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); 1323 } 1324 1325 static int mmc_test_multi_write_high(struct mmc_test_card *test) 1326 { 1327 unsigned int size; 1328 struct scatterlist sg; 1329 1330 if (test->card->host->max_blk_count == 1) 1331 return RESULT_UNSUP_HOST; 1332 1333 size = PAGE_SIZE * 2; 1334 size = min(size, test->card->host->max_req_size); 1335 size = min(size, test->card->host->max_seg_size); 1336 size = min(size, test->card->host->max_blk_count * 512); 1337 1338 if (size < 1024) 1339 return RESULT_UNSUP_HOST; 1340 1341 sg_init_table(&sg, 1); 1342 sg_set_page(&sg, test->highmem, size, 0); 1343 1344 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1); 1345 } 1346 1347 static int mmc_test_multi_read_high(struct mmc_test_card *test) 1348 { 1349 unsigned int size; 1350 struct scatterlist sg; 1351 1352 if (test->card->host->max_blk_count == 1) 1353 return RESULT_UNSUP_HOST; 1354 1355 size = PAGE_SIZE * 2; 1356 size = min(size, test->card->host->max_req_size); 1357 size = min(size, test->card->host->max_seg_size); 1358 size = min(size, test->card->host->max_blk_count * 512); 1359 1360 if (size < 1024) 1361 return RESULT_UNSUP_HOST; 1362 1363 sg_init_table(&sg, 1); 1364 sg_set_page(&sg, test->highmem, size, 0); 1365 1366 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0); 1367 } 1368 1369 #else 1370 1371 static int mmc_test_no_highmem(struct mmc_test_card *test) 1372 { 1373 pr_info("%s: Highmem not configured - test skipped\n", 1374 mmc_hostname(test->card->host)); 1375 return 0; 1376 } 1377 1378 #endif /* CONFIG_HIGHMEM */ 1379 1380 /* 1381 * Map sz bytes so that it can be transferred. 1382 */ 1383 static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz, 1384 int max_scatter, int min_sg_len, bool nonblock) 1385 { 1386 struct mmc_test_area *t = &test->area; 1387 int err; 1388 unsigned int sg_len = 0; 1389 1390 t->blocks = sz >> 9; 1391 1392 if (max_scatter) { 1393 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg, 1394 t->max_segs, t->max_seg_sz, 1395 &t->sg_len); 1396 } else { 1397 err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs, 1398 t->max_seg_sz, &t->sg_len, min_sg_len); 1399 } 1400 1401 if (err || !nonblock) 1402 goto err; 1403 1404 if (max_scatter) { 1405 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg_areq, 1406 t->max_segs, t->max_seg_sz, 1407 &sg_len); 1408 } else { 1409 err = mmc_test_map_sg(t->mem, sz, t->sg_areq, 1, t->max_segs, 1410 t->max_seg_sz, &sg_len, min_sg_len); 1411 } 1412 if (!err && sg_len != t->sg_len) 1413 err = -EINVAL; 1414 1415 err: 1416 if (err) 1417 pr_info("%s: Failed to map sg list\n", 1418 mmc_hostname(test->card->host)); 1419 return err; 1420 } 1421 1422 /* 1423 * Transfer bytes mapped by mmc_test_area_map(). 1424 */ 1425 static int mmc_test_area_transfer(struct mmc_test_card *test, 1426 unsigned int dev_addr, int write) 1427 { 1428 struct mmc_test_area *t = &test->area; 1429 1430 return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr, 1431 t->blocks, 512, write); 1432 } 1433 1434 /* 1435 * Map and transfer bytes for multiple transfers. 1436 */ 1437 static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz, 1438 unsigned int dev_addr, int write, 1439 int max_scatter, int timed, int count, 1440 bool nonblock, int min_sg_len) 1441 { 1442 struct timespec64 ts1, ts2; 1443 int ret = 0; 1444 int i; 1445 1446 /* 1447 * In the case of a maximally scattered transfer, the maximum transfer 1448 * size is further limited by using PAGE_SIZE segments. 1449 */ 1450 if (max_scatter) { 1451 struct mmc_test_area *t = &test->area; 1452 unsigned long max_tfr; 1453 1454 if (t->max_seg_sz >= PAGE_SIZE) 1455 max_tfr = t->max_segs * PAGE_SIZE; 1456 else 1457 max_tfr = t->max_segs * t->max_seg_sz; 1458 if (sz > max_tfr) 1459 sz = max_tfr; 1460 } 1461 1462 ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len, nonblock); 1463 if (ret) 1464 return ret; 1465 1466 if (timed) 1467 ktime_get_ts64(&ts1); 1468 if (nonblock) 1469 ret = mmc_test_nonblock_transfer(test, dev_addr, write, count); 1470 else 1471 for (i = 0; i < count && ret == 0; i++) { 1472 ret = mmc_test_area_transfer(test, dev_addr, write); 1473 dev_addr += sz >> 9; 1474 } 1475 1476 if (ret) 1477 return ret; 1478 1479 if (timed) 1480 ktime_get_ts64(&ts2); 1481 1482 if (timed) 1483 mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2); 1484 1485 return 0; 1486 } 1487 1488 static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz, 1489 unsigned int dev_addr, int write, int max_scatter, 1490 int timed) 1491 { 1492 return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter, 1493 timed, 1, false, 0); 1494 } 1495 1496 /* 1497 * Write the test area entirely. 1498 */ 1499 static int mmc_test_area_fill(struct mmc_test_card *test) 1500 { 1501 struct mmc_test_area *t = &test->area; 1502 1503 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0); 1504 } 1505 1506 /* 1507 * Erase the test area entirely. 1508 */ 1509 static int mmc_test_area_erase(struct mmc_test_card *test) 1510 { 1511 struct mmc_test_area *t = &test->area; 1512 1513 if (!mmc_can_erase(test->card)) 1514 return 0; 1515 1516 return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9, 1517 MMC_ERASE_ARG); 1518 } 1519 1520 /* 1521 * Cleanup struct mmc_test_area. 1522 */ 1523 static int mmc_test_area_cleanup(struct mmc_test_card *test) 1524 { 1525 struct mmc_test_area *t = &test->area; 1526 1527 kfree(t->sg); 1528 kfree(t->sg_areq); 1529 mmc_test_free_mem(t->mem); 1530 1531 return 0; 1532 } 1533 1534 /* 1535 * Initialize an area for testing large transfers. The test area is set to the 1536 * middle of the card because cards may have different characteristics at the 1537 * front (for FAT file system optimization). Optionally, the area is erased 1538 * (if the card supports it) which may improve write performance. Optionally, 1539 * the area is filled with data for subsequent read tests. 1540 */ 1541 static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill) 1542 { 1543 struct mmc_test_area *t = &test->area; 1544 unsigned long min_sz = 64 * 1024, sz; 1545 int ret; 1546 1547 ret = mmc_test_set_blksize(test, 512); 1548 if (ret) 1549 return ret; 1550 1551 /* Make the test area size about 4MiB */ 1552 sz = (unsigned long)test->card->pref_erase << 9; 1553 t->max_sz = sz; 1554 while (t->max_sz < 4 * 1024 * 1024) 1555 t->max_sz += sz; 1556 while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz) 1557 t->max_sz -= sz; 1558 1559 t->max_segs = test->card->host->max_segs; 1560 t->max_seg_sz = test->card->host->max_seg_size; 1561 t->max_seg_sz -= t->max_seg_sz % 512; 1562 1563 t->max_tfr = t->max_sz; 1564 if (t->max_tfr >> 9 > test->card->host->max_blk_count) 1565 t->max_tfr = test->card->host->max_blk_count << 9; 1566 if (t->max_tfr > test->card->host->max_req_size) 1567 t->max_tfr = test->card->host->max_req_size; 1568 if (t->max_tfr / t->max_seg_sz > t->max_segs) 1569 t->max_tfr = t->max_segs * t->max_seg_sz; 1570 1571 /* 1572 * Try to allocate enough memory for a max. sized transfer. Less is OK 1573 * because the same memory can be mapped into the scatterlist more than 1574 * once. Also, take into account the limits imposed on scatterlist 1575 * segments by the host driver. 1576 */ 1577 t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs, 1578 t->max_seg_sz); 1579 if (!t->mem) 1580 return -ENOMEM; 1581 1582 t->sg = kmalloc_array(t->max_segs, sizeof(*t->sg), GFP_KERNEL); 1583 if (!t->sg) { 1584 ret = -ENOMEM; 1585 goto out_free; 1586 } 1587 1588 t->sg_areq = kmalloc_array(t->max_segs, sizeof(*t->sg_areq), 1589 GFP_KERNEL); 1590 if (!t->sg_areq) { 1591 ret = -ENOMEM; 1592 goto out_free; 1593 } 1594 1595 t->dev_addr = mmc_test_capacity(test->card) / 2; 1596 t->dev_addr -= t->dev_addr % (t->max_sz >> 9); 1597 1598 if (erase) { 1599 ret = mmc_test_area_erase(test); 1600 if (ret) 1601 goto out_free; 1602 } 1603 1604 if (fill) { 1605 ret = mmc_test_area_fill(test); 1606 if (ret) 1607 goto out_free; 1608 } 1609 1610 return 0; 1611 1612 out_free: 1613 mmc_test_area_cleanup(test); 1614 return ret; 1615 } 1616 1617 /* 1618 * Prepare for large transfers. Do not erase the test area. 1619 */ 1620 static int mmc_test_area_prepare(struct mmc_test_card *test) 1621 { 1622 return mmc_test_area_init(test, 0, 0); 1623 } 1624 1625 /* 1626 * Prepare for large transfers. Do erase the test area. 1627 */ 1628 static int mmc_test_area_prepare_erase(struct mmc_test_card *test) 1629 { 1630 return mmc_test_area_init(test, 1, 0); 1631 } 1632 1633 /* 1634 * Prepare for large transfers. Erase and fill the test area. 1635 */ 1636 static int mmc_test_area_prepare_fill(struct mmc_test_card *test) 1637 { 1638 return mmc_test_area_init(test, 1, 1); 1639 } 1640 1641 /* 1642 * Test best-case performance. Best-case performance is expected from 1643 * a single large transfer. 1644 * 1645 * An additional option (max_scatter) allows the measurement of the same 1646 * transfer but with no contiguous pages in the scatter list. This tests 1647 * the efficiency of DMA to handle scattered pages. 1648 */ 1649 static int mmc_test_best_performance(struct mmc_test_card *test, int write, 1650 int max_scatter) 1651 { 1652 struct mmc_test_area *t = &test->area; 1653 1654 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write, 1655 max_scatter, 1); 1656 } 1657 1658 /* 1659 * Best-case read performance. 1660 */ 1661 static int mmc_test_best_read_performance(struct mmc_test_card *test) 1662 { 1663 return mmc_test_best_performance(test, 0, 0); 1664 } 1665 1666 /* 1667 * Best-case write performance. 1668 */ 1669 static int mmc_test_best_write_performance(struct mmc_test_card *test) 1670 { 1671 return mmc_test_best_performance(test, 1, 0); 1672 } 1673 1674 /* 1675 * Best-case read performance into scattered pages. 1676 */ 1677 static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test) 1678 { 1679 return mmc_test_best_performance(test, 0, 1); 1680 } 1681 1682 /* 1683 * Best-case write performance from scattered pages. 1684 */ 1685 static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test) 1686 { 1687 return mmc_test_best_performance(test, 1, 1); 1688 } 1689 1690 /* 1691 * Single read performance by transfer size. 1692 */ 1693 static int mmc_test_profile_read_perf(struct mmc_test_card *test) 1694 { 1695 struct mmc_test_area *t = &test->area; 1696 unsigned long sz; 1697 unsigned int dev_addr; 1698 int ret; 1699 1700 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1701 dev_addr = t->dev_addr + (sz >> 9); 1702 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1); 1703 if (ret) 1704 return ret; 1705 } 1706 sz = t->max_tfr; 1707 dev_addr = t->dev_addr; 1708 return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1); 1709 } 1710 1711 /* 1712 * Single write performance by transfer size. 1713 */ 1714 static int mmc_test_profile_write_perf(struct mmc_test_card *test) 1715 { 1716 struct mmc_test_area *t = &test->area; 1717 unsigned long sz; 1718 unsigned int dev_addr; 1719 int ret; 1720 1721 ret = mmc_test_area_erase(test); 1722 if (ret) 1723 return ret; 1724 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1725 dev_addr = t->dev_addr + (sz >> 9); 1726 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1); 1727 if (ret) 1728 return ret; 1729 } 1730 ret = mmc_test_area_erase(test); 1731 if (ret) 1732 return ret; 1733 sz = t->max_tfr; 1734 dev_addr = t->dev_addr; 1735 return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1); 1736 } 1737 1738 /* 1739 * Single trim performance by transfer size. 1740 */ 1741 static int mmc_test_profile_trim_perf(struct mmc_test_card *test) 1742 { 1743 struct mmc_test_area *t = &test->area; 1744 unsigned long sz; 1745 unsigned int dev_addr; 1746 struct timespec64 ts1, ts2; 1747 int ret; 1748 1749 if (!mmc_can_trim(test->card)) 1750 return RESULT_UNSUP_CARD; 1751 1752 if (!mmc_can_erase(test->card)) 1753 return RESULT_UNSUP_HOST; 1754 1755 for (sz = 512; sz < t->max_sz; sz <<= 1) { 1756 dev_addr = t->dev_addr + (sz >> 9); 1757 ktime_get_ts64(&ts1); 1758 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG); 1759 if (ret) 1760 return ret; 1761 ktime_get_ts64(&ts2); 1762 mmc_test_print_rate(test, sz, &ts1, &ts2); 1763 } 1764 dev_addr = t->dev_addr; 1765 ktime_get_ts64(&ts1); 1766 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG); 1767 if (ret) 1768 return ret; 1769 ktime_get_ts64(&ts2); 1770 mmc_test_print_rate(test, sz, &ts1, &ts2); 1771 return 0; 1772 } 1773 1774 static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz) 1775 { 1776 struct mmc_test_area *t = &test->area; 1777 unsigned int dev_addr, i, cnt; 1778 struct timespec64 ts1, ts2; 1779 int ret; 1780 1781 cnt = t->max_sz / sz; 1782 dev_addr = t->dev_addr; 1783 ktime_get_ts64(&ts1); 1784 for (i = 0; i < cnt; i++) { 1785 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0); 1786 if (ret) 1787 return ret; 1788 dev_addr += (sz >> 9); 1789 } 1790 ktime_get_ts64(&ts2); 1791 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 1792 return 0; 1793 } 1794 1795 /* 1796 * Consecutive read performance by transfer size. 1797 */ 1798 static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test) 1799 { 1800 struct mmc_test_area *t = &test->area; 1801 unsigned long sz; 1802 int ret; 1803 1804 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1805 ret = mmc_test_seq_read_perf(test, sz); 1806 if (ret) 1807 return ret; 1808 } 1809 sz = t->max_tfr; 1810 return mmc_test_seq_read_perf(test, sz); 1811 } 1812 1813 static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz) 1814 { 1815 struct mmc_test_area *t = &test->area; 1816 unsigned int dev_addr, i, cnt; 1817 struct timespec64 ts1, ts2; 1818 int ret; 1819 1820 ret = mmc_test_area_erase(test); 1821 if (ret) 1822 return ret; 1823 cnt = t->max_sz / sz; 1824 dev_addr = t->dev_addr; 1825 ktime_get_ts64(&ts1); 1826 for (i = 0; i < cnt; i++) { 1827 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0); 1828 if (ret) 1829 return ret; 1830 dev_addr += (sz >> 9); 1831 } 1832 ktime_get_ts64(&ts2); 1833 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 1834 return 0; 1835 } 1836 1837 /* 1838 * Consecutive write performance by transfer size. 1839 */ 1840 static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test) 1841 { 1842 struct mmc_test_area *t = &test->area; 1843 unsigned long sz; 1844 int ret; 1845 1846 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1847 ret = mmc_test_seq_write_perf(test, sz); 1848 if (ret) 1849 return ret; 1850 } 1851 sz = t->max_tfr; 1852 return mmc_test_seq_write_perf(test, sz); 1853 } 1854 1855 /* 1856 * Consecutive trim performance by transfer size. 1857 */ 1858 static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test) 1859 { 1860 struct mmc_test_area *t = &test->area; 1861 unsigned long sz; 1862 unsigned int dev_addr, i, cnt; 1863 struct timespec64 ts1, ts2; 1864 int ret; 1865 1866 if (!mmc_can_trim(test->card)) 1867 return RESULT_UNSUP_CARD; 1868 1869 if (!mmc_can_erase(test->card)) 1870 return RESULT_UNSUP_HOST; 1871 1872 for (sz = 512; sz <= t->max_sz; sz <<= 1) { 1873 ret = mmc_test_area_erase(test); 1874 if (ret) 1875 return ret; 1876 ret = mmc_test_area_fill(test); 1877 if (ret) 1878 return ret; 1879 cnt = t->max_sz / sz; 1880 dev_addr = t->dev_addr; 1881 ktime_get_ts64(&ts1); 1882 for (i = 0; i < cnt; i++) { 1883 ret = mmc_erase(test->card, dev_addr, sz >> 9, 1884 MMC_TRIM_ARG); 1885 if (ret) 1886 return ret; 1887 dev_addr += (sz >> 9); 1888 } 1889 ktime_get_ts64(&ts2); 1890 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 1891 } 1892 return 0; 1893 } 1894 1895 static unsigned int rnd_next = 1; 1896 1897 static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt) 1898 { 1899 uint64_t r; 1900 1901 rnd_next = rnd_next * 1103515245 + 12345; 1902 r = (rnd_next >> 16) & 0x7fff; 1903 return (r * rnd_cnt) >> 15; 1904 } 1905 1906 static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print, 1907 unsigned long sz) 1908 { 1909 unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea; 1910 unsigned int ssz; 1911 struct timespec64 ts1, ts2, ts; 1912 int ret; 1913 1914 ssz = sz >> 9; 1915 1916 rnd_addr = mmc_test_capacity(test->card) / 4; 1917 range1 = rnd_addr / test->card->pref_erase; 1918 range2 = range1 / ssz; 1919 1920 ktime_get_ts64(&ts1); 1921 for (cnt = 0; cnt < UINT_MAX; cnt++) { 1922 ktime_get_ts64(&ts2); 1923 ts = timespec64_sub(ts2, ts1); 1924 if (ts.tv_sec >= 10) 1925 break; 1926 ea = mmc_test_rnd_num(range1); 1927 if (ea == last_ea) 1928 ea -= 1; 1929 last_ea = ea; 1930 dev_addr = rnd_addr + test->card->pref_erase * ea + 1931 ssz * mmc_test_rnd_num(range2); 1932 ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0); 1933 if (ret) 1934 return ret; 1935 } 1936 if (print) 1937 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 1938 return 0; 1939 } 1940 1941 static int mmc_test_random_perf(struct mmc_test_card *test, int write) 1942 { 1943 struct mmc_test_area *t = &test->area; 1944 unsigned int next; 1945 unsigned long sz; 1946 int ret; 1947 1948 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1949 /* 1950 * When writing, try to get more consistent results by running 1951 * the test twice with exactly the same I/O but outputting the 1952 * results only for the 2nd run. 1953 */ 1954 if (write) { 1955 next = rnd_next; 1956 ret = mmc_test_rnd_perf(test, write, 0, sz); 1957 if (ret) 1958 return ret; 1959 rnd_next = next; 1960 } 1961 ret = mmc_test_rnd_perf(test, write, 1, sz); 1962 if (ret) 1963 return ret; 1964 } 1965 sz = t->max_tfr; 1966 if (write) { 1967 next = rnd_next; 1968 ret = mmc_test_rnd_perf(test, write, 0, sz); 1969 if (ret) 1970 return ret; 1971 rnd_next = next; 1972 } 1973 return mmc_test_rnd_perf(test, write, 1, sz); 1974 } 1975 1976 /* 1977 * Random read performance by transfer size. 1978 */ 1979 static int mmc_test_random_read_perf(struct mmc_test_card *test) 1980 { 1981 return mmc_test_random_perf(test, 0); 1982 } 1983 1984 /* 1985 * Random write performance by transfer size. 1986 */ 1987 static int mmc_test_random_write_perf(struct mmc_test_card *test) 1988 { 1989 return mmc_test_random_perf(test, 1); 1990 } 1991 1992 static int mmc_test_seq_perf(struct mmc_test_card *test, int write, 1993 unsigned int tot_sz, int max_scatter) 1994 { 1995 struct mmc_test_area *t = &test->area; 1996 unsigned int dev_addr, i, cnt, sz, ssz; 1997 struct timespec64 ts1, ts2; 1998 int ret; 1999 2000 sz = t->max_tfr; 2001 2002 /* 2003 * In the case of a maximally scattered transfer, the maximum transfer 2004 * size is further limited by using PAGE_SIZE segments. 2005 */ 2006 if (max_scatter) { 2007 unsigned long max_tfr; 2008 2009 if (t->max_seg_sz >= PAGE_SIZE) 2010 max_tfr = t->max_segs * PAGE_SIZE; 2011 else 2012 max_tfr = t->max_segs * t->max_seg_sz; 2013 if (sz > max_tfr) 2014 sz = max_tfr; 2015 } 2016 2017 ssz = sz >> 9; 2018 dev_addr = mmc_test_capacity(test->card) / 4; 2019 if (tot_sz > dev_addr << 9) 2020 tot_sz = dev_addr << 9; 2021 cnt = tot_sz / sz; 2022 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */ 2023 2024 ktime_get_ts64(&ts1); 2025 for (i = 0; i < cnt; i++) { 2026 ret = mmc_test_area_io(test, sz, dev_addr, write, 2027 max_scatter, 0); 2028 if (ret) 2029 return ret; 2030 dev_addr += ssz; 2031 } 2032 ktime_get_ts64(&ts2); 2033 2034 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 2035 2036 return 0; 2037 } 2038 2039 static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write) 2040 { 2041 int ret, i; 2042 2043 for (i = 0; i < 10; i++) { 2044 ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1); 2045 if (ret) 2046 return ret; 2047 } 2048 for (i = 0; i < 5; i++) { 2049 ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1); 2050 if (ret) 2051 return ret; 2052 } 2053 for (i = 0; i < 3; i++) { 2054 ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1); 2055 if (ret) 2056 return ret; 2057 } 2058 2059 return ret; 2060 } 2061 2062 /* 2063 * Large sequential read performance. 2064 */ 2065 static int mmc_test_large_seq_read_perf(struct mmc_test_card *test) 2066 { 2067 return mmc_test_large_seq_perf(test, 0); 2068 } 2069 2070 /* 2071 * Large sequential write performance. 2072 */ 2073 static int mmc_test_large_seq_write_perf(struct mmc_test_card *test) 2074 { 2075 return mmc_test_large_seq_perf(test, 1); 2076 } 2077 2078 static int mmc_test_rw_multiple(struct mmc_test_card *test, 2079 struct mmc_test_multiple_rw *tdata, 2080 unsigned int reqsize, unsigned int size, 2081 int min_sg_len) 2082 { 2083 unsigned int dev_addr; 2084 struct mmc_test_area *t = &test->area; 2085 int ret = 0; 2086 2087 /* Set up test area */ 2088 if (size > mmc_test_capacity(test->card) / 2 * 512) 2089 size = mmc_test_capacity(test->card) / 2 * 512; 2090 if (reqsize > t->max_tfr) 2091 reqsize = t->max_tfr; 2092 dev_addr = mmc_test_capacity(test->card) / 4; 2093 if ((dev_addr & 0xffff0000)) 2094 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */ 2095 else 2096 dev_addr &= 0xfffff800; /* Round to 1MiB boundary */ 2097 if (!dev_addr) 2098 goto err; 2099 2100 if (reqsize > size) 2101 return 0; 2102 2103 /* prepare test area */ 2104 if (mmc_can_erase(test->card) && 2105 tdata->prepare & MMC_TEST_PREP_ERASE) { 2106 ret = mmc_erase(test->card, dev_addr, 2107 size / 512, test->card->erase_arg); 2108 if (ret) 2109 ret = mmc_erase(test->card, dev_addr, 2110 size / 512, MMC_ERASE_ARG); 2111 if (ret) 2112 goto err; 2113 } 2114 2115 /* Run test */ 2116 ret = mmc_test_area_io_seq(test, reqsize, dev_addr, 2117 tdata->do_write, 0, 1, size / reqsize, 2118 tdata->do_nonblock_req, min_sg_len); 2119 if (ret) 2120 goto err; 2121 2122 return ret; 2123 err: 2124 pr_info("[%s] error\n", __func__); 2125 return ret; 2126 } 2127 2128 static int mmc_test_rw_multiple_size(struct mmc_test_card *test, 2129 struct mmc_test_multiple_rw *rw) 2130 { 2131 int ret = 0; 2132 int i; 2133 void *pre_req = test->card->host->ops->pre_req; 2134 void *post_req = test->card->host->ops->post_req; 2135 2136 if (rw->do_nonblock_req && 2137 ((!pre_req && post_req) || (pre_req && !post_req))) { 2138 pr_info("error: only one of pre/post is defined\n"); 2139 return -EINVAL; 2140 } 2141 2142 for (i = 0 ; i < rw->len && ret == 0; i++) { 2143 ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0); 2144 if (ret) 2145 break; 2146 } 2147 return ret; 2148 } 2149 2150 static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test, 2151 struct mmc_test_multiple_rw *rw) 2152 { 2153 int ret = 0; 2154 int i; 2155 2156 for (i = 0 ; i < rw->len && ret == 0; i++) { 2157 ret = mmc_test_rw_multiple(test, rw, 512 * 1024, rw->size, 2158 rw->sg_len[i]); 2159 if (ret) 2160 break; 2161 } 2162 return ret; 2163 } 2164 2165 /* 2166 * Multiple blocking write 4k to 4 MB chunks 2167 */ 2168 static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test) 2169 { 2170 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, 2171 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; 2172 struct mmc_test_multiple_rw test_data = { 2173 .bs = bs, 2174 .size = TEST_AREA_MAX_SIZE, 2175 .len = ARRAY_SIZE(bs), 2176 .do_write = true, 2177 .do_nonblock_req = false, 2178 .prepare = MMC_TEST_PREP_ERASE, 2179 }; 2180 2181 return mmc_test_rw_multiple_size(test, &test_data); 2182 }; 2183 2184 /* 2185 * Multiple non-blocking write 4k to 4 MB chunks 2186 */ 2187 static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test) 2188 { 2189 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, 2190 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; 2191 struct mmc_test_multiple_rw test_data = { 2192 .bs = bs, 2193 .size = TEST_AREA_MAX_SIZE, 2194 .len = ARRAY_SIZE(bs), 2195 .do_write = true, 2196 .do_nonblock_req = true, 2197 .prepare = MMC_TEST_PREP_ERASE, 2198 }; 2199 2200 return mmc_test_rw_multiple_size(test, &test_data); 2201 } 2202 2203 /* 2204 * Multiple blocking read 4k to 4 MB chunks 2205 */ 2206 static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test) 2207 { 2208 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, 2209 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; 2210 struct mmc_test_multiple_rw test_data = { 2211 .bs = bs, 2212 .size = TEST_AREA_MAX_SIZE, 2213 .len = ARRAY_SIZE(bs), 2214 .do_write = false, 2215 .do_nonblock_req = false, 2216 .prepare = MMC_TEST_PREP_NONE, 2217 }; 2218 2219 return mmc_test_rw_multiple_size(test, &test_data); 2220 } 2221 2222 /* 2223 * Multiple non-blocking read 4k to 4 MB chunks 2224 */ 2225 static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test) 2226 { 2227 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, 2228 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; 2229 struct mmc_test_multiple_rw test_data = { 2230 .bs = bs, 2231 .size = TEST_AREA_MAX_SIZE, 2232 .len = ARRAY_SIZE(bs), 2233 .do_write = false, 2234 .do_nonblock_req = true, 2235 .prepare = MMC_TEST_PREP_NONE, 2236 }; 2237 2238 return mmc_test_rw_multiple_size(test, &test_data); 2239 } 2240 2241 /* 2242 * Multiple blocking write 1 to 512 sg elements 2243 */ 2244 static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test) 2245 { 2246 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 2247 1 << 7, 1 << 8, 1 << 9}; 2248 struct mmc_test_multiple_rw test_data = { 2249 .sg_len = sg_len, 2250 .size = TEST_AREA_MAX_SIZE, 2251 .len = ARRAY_SIZE(sg_len), 2252 .do_write = true, 2253 .do_nonblock_req = false, 2254 .prepare = MMC_TEST_PREP_ERASE, 2255 }; 2256 2257 return mmc_test_rw_multiple_sg_len(test, &test_data); 2258 }; 2259 2260 /* 2261 * Multiple non-blocking write 1 to 512 sg elements 2262 */ 2263 static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test) 2264 { 2265 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 2266 1 << 7, 1 << 8, 1 << 9}; 2267 struct mmc_test_multiple_rw test_data = { 2268 .sg_len = sg_len, 2269 .size = TEST_AREA_MAX_SIZE, 2270 .len = ARRAY_SIZE(sg_len), 2271 .do_write = true, 2272 .do_nonblock_req = true, 2273 .prepare = MMC_TEST_PREP_ERASE, 2274 }; 2275 2276 return mmc_test_rw_multiple_sg_len(test, &test_data); 2277 } 2278 2279 /* 2280 * Multiple blocking read 1 to 512 sg elements 2281 */ 2282 static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test) 2283 { 2284 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 2285 1 << 7, 1 << 8, 1 << 9}; 2286 struct mmc_test_multiple_rw test_data = { 2287 .sg_len = sg_len, 2288 .size = TEST_AREA_MAX_SIZE, 2289 .len = ARRAY_SIZE(sg_len), 2290 .do_write = false, 2291 .do_nonblock_req = false, 2292 .prepare = MMC_TEST_PREP_NONE, 2293 }; 2294 2295 return mmc_test_rw_multiple_sg_len(test, &test_data); 2296 } 2297 2298 /* 2299 * Multiple non-blocking read 1 to 512 sg elements 2300 */ 2301 static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test) 2302 { 2303 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 2304 1 << 7, 1 << 8, 1 << 9}; 2305 struct mmc_test_multiple_rw test_data = { 2306 .sg_len = sg_len, 2307 .size = TEST_AREA_MAX_SIZE, 2308 .len = ARRAY_SIZE(sg_len), 2309 .do_write = false, 2310 .do_nonblock_req = true, 2311 .prepare = MMC_TEST_PREP_NONE, 2312 }; 2313 2314 return mmc_test_rw_multiple_sg_len(test, &test_data); 2315 } 2316 2317 /* 2318 * eMMC hardware reset. 2319 */ 2320 static int mmc_test_reset(struct mmc_test_card *test) 2321 { 2322 struct mmc_card *card = test->card; 2323 int err; 2324 2325 err = mmc_hw_reset(card); 2326 if (!err) { 2327 /* 2328 * Reset will re-enable the card's command queue, but tests 2329 * expect it to be disabled. 2330 */ 2331 if (card->ext_csd.cmdq_en) 2332 mmc_cmdq_disable(card); 2333 return RESULT_OK; 2334 } else if (err == -EOPNOTSUPP) { 2335 return RESULT_UNSUP_HOST; 2336 } 2337 2338 return RESULT_FAIL; 2339 } 2340 2341 static int mmc_test_send_status(struct mmc_test_card *test, 2342 struct mmc_command *cmd) 2343 { 2344 memset(cmd, 0, sizeof(*cmd)); 2345 2346 cmd->opcode = MMC_SEND_STATUS; 2347 if (!mmc_host_is_spi(test->card->host)) 2348 cmd->arg = test->card->rca << 16; 2349 cmd->flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 2350 2351 return mmc_wait_for_cmd(test->card->host, cmd, 0); 2352 } 2353 2354 static int mmc_test_ongoing_transfer(struct mmc_test_card *test, 2355 unsigned int dev_addr, int use_sbc, 2356 int repeat_cmd, int write, int use_areq) 2357 { 2358 struct mmc_test_req *rq = mmc_test_req_alloc(); 2359 struct mmc_host *host = test->card->host; 2360 struct mmc_test_area *t = &test->area; 2361 struct mmc_request *mrq; 2362 unsigned long timeout; 2363 bool expired = false; 2364 int ret = 0, cmd_ret; 2365 u32 status = 0; 2366 int count = 0; 2367 2368 if (!rq) 2369 return -ENOMEM; 2370 2371 mrq = &rq->mrq; 2372 if (use_sbc) 2373 mrq->sbc = &rq->sbc; 2374 mrq->cap_cmd_during_tfr = true; 2375 2376 mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks, 2377 512, write); 2378 2379 if (use_sbc && t->blocks > 1 && !mrq->sbc) { 2380 ret = mmc_host_cmd23(host) ? 2381 RESULT_UNSUP_CARD : 2382 RESULT_UNSUP_HOST; 2383 goto out_free; 2384 } 2385 2386 /* Start ongoing data request */ 2387 if (use_areq) { 2388 ret = mmc_test_start_areq(test, mrq, NULL); 2389 if (ret) 2390 goto out_free; 2391 } else { 2392 mmc_wait_for_req(host, mrq); 2393 } 2394 2395 timeout = jiffies + msecs_to_jiffies(3000); 2396 do { 2397 count += 1; 2398 2399 /* Send status command while data transfer in progress */ 2400 cmd_ret = mmc_test_send_status(test, &rq->status); 2401 if (cmd_ret) 2402 break; 2403 2404 status = rq->status.resp[0]; 2405 if (status & R1_ERROR) { 2406 cmd_ret = -EIO; 2407 break; 2408 } 2409 2410 if (mmc_is_req_done(host, mrq)) 2411 break; 2412 2413 expired = time_after(jiffies, timeout); 2414 if (expired) { 2415 pr_info("%s: timeout waiting for Tran state status %#x\n", 2416 mmc_hostname(host), status); 2417 cmd_ret = -ETIMEDOUT; 2418 break; 2419 } 2420 } while (repeat_cmd && R1_CURRENT_STATE(status) != R1_STATE_TRAN); 2421 2422 /* Wait for data request to complete */ 2423 if (use_areq) { 2424 ret = mmc_test_start_areq(test, NULL, mrq); 2425 } else { 2426 mmc_wait_for_req_done(test->card->host, mrq); 2427 } 2428 2429 /* 2430 * For cap_cmd_during_tfr request, upper layer must send stop if 2431 * required. 2432 */ 2433 if (mrq->data->stop && (mrq->data->error || !mrq->sbc)) { 2434 if (ret) 2435 mmc_wait_for_cmd(host, mrq->data->stop, 0); 2436 else 2437 ret = mmc_wait_for_cmd(host, mrq->data->stop, 0); 2438 } 2439 2440 if (ret) 2441 goto out_free; 2442 2443 if (cmd_ret) { 2444 pr_info("%s: Send Status failed: status %#x, error %d\n", 2445 mmc_hostname(test->card->host), status, cmd_ret); 2446 } 2447 2448 ret = mmc_test_check_result(test, mrq); 2449 if (ret) 2450 goto out_free; 2451 2452 ret = mmc_test_wait_busy(test); 2453 if (ret) 2454 goto out_free; 2455 2456 if (repeat_cmd && (t->blocks + 1) << 9 > t->max_tfr) 2457 pr_info("%s: %d commands completed during transfer of %u blocks\n", 2458 mmc_hostname(test->card->host), count, t->blocks); 2459 2460 if (cmd_ret) 2461 ret = cmd_ret; 2462 out_free: 2463 kfree(rq); 2464 2465 return ret; 2466 } 2467 2468 static int __mmc_test_cmds_during_tfr(struct mmc_test_card *test, 2469 unsigned long sz, int use_sbc, int write, 2470 int use_areq) 2471 { 2472 struct mmc_test_area *t = &test->area; 2473 int ret; 2474 2475 if (!(test->card->host->caps & MMC_CAP_CMD_DURING_TFR)) 2476 return RESULT_UNSUP_HOST; 2477 2478 ret = mmc_test_area_map(test, sz, 0, 0, use_areq); 2479 if (ret) 2480 return ret; 2481 2482 ret = mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 0, write, 2483 use_areq); 2484 if (ret) 2485 return ret; 2486 2487 return mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 1, write, 2488 use_areq); 2489 } 2490 2491 static int mmc_test_cmds_during_tfr(struct mmc_test_card *test, int use_sbc, 2492 int write, int use_areq) 2493 { 2494 struct mmc_test_area *t = &test->area; 2495 unsigned long sz; 2496 int ret; 2497 2498 for (sz = 512; sz <= t->max_tfr; sz += 512) { 2499 ret = __mmc_test_cmds_during_tfr(test, sz, use_sbc, write, 2500 use_areq); 2501 if (ret) 2502 return ret; 2503 } 2504 return 0; 2505 } 2506 2507 /* 2508 * Commands during read - no Set Block Count (CMD23). 2509 */ 2510 static int mmc_test_cmds_during_read(struct mmc_test_card *test) 2511 { 2512 return mmc_test_cmds_during_tfr(test, 0, 0, 0); 2513 } 2514 2515 /* 2516 * Commands during write - no Set Block Count (CMD23). 2517 */ 2518 static int mmc_test_cmds_during_write(struct mmc_test_card *test) 2519 { 2520 return mmc_test_cmds_during_tfr(test, 0, 1, 0); 2521 } 2522 2523 /* 2524 * Commands during read - use Set Block Count (CMD23). 2525 */ 2526 static int mmc_test_cmds_during_read_cmd23(struct mmc_test_card *test) 2527 { 2528 return mmc_test_cmds_during_tfr(test, 1, 0, 0); 2529 } 2530 2531 /* 2532 * Commands during write - use Set Block Count (CMD23). 2533 */ 2534 static int mmc_test_cmds_during_write_cmd23(struct mmc_test_card *test) 2535 { 2536 return mmc_test_cmds_during_tfr(test, 1, 1, 0); 2537 } 2538 2539 /* 2540 * Commands during non-blocking read - use Set Block Count (CMD23). 2541 */ 2542 static int mmc_test_cmds_during_read_cmd23_nonblock(struct mmc_test_card *test) 2543 { 2544 return mmc_test_cmds_during_tfr(test, 1, 0, 1); 2545 } 2546 2547 /* 2548 * Commands during non-blocking write - use Set Block Count (CMD23). 2549 */ 2550 static int mmc_test_cmds_during_write_cmd23_nonblock(struct mmc_test_card *test) 2551 { 2552 return mmc_test_cmds_during_tfr(test, 1, 1, 1); 2553 } 2554 2555 static const struct mmc_test_case mmc_test_cases[] = { 2556 { 2557 .name = "Basic write (no data verification)", 2558 .run = mmc_test_basic_write, 2559 }, 2560 2561 { 2562 .name = "Basic read (no data verification)", 2563 .run = mmc_test_basic_read, 2564 }, 2565 2566 { 2567 .name = "Basic write (with data verification)", 2568 .prepare = mmc_test_prepare_write, 2569 .run = mmc_test_verify_write, 2570 .cleanup = mmc_test_cleanup, 2571 }, 2572 2573 { 2574 .name = "Basic read (with data verification)", 2575 .prepare = mmc_test_prepare_read, 2576 .run = mmc_test_verify_read, 2577 .cleanup = mmc_test_cleanup, 2578 }, 2579 2580 { 2581 .name = "Multi-block write", 2582 .prepare = mmc_test_prepare_write, 2583 .run = mmc_test_multi_write, 2584 .cleanup = mmc_test_cleanup, 2585 }, 2586 2587 { 2588 .name = "Multi-block read", 2589 .prepare = mmc_test_prepare_read, 2590 .run = mmc_test_multi_read, 2591 .cleanup = mmc_test_cleanup, 2592 }, 2593 2594 { 2595 .name = "Power of two block writes", 2596 .prepare = mmc_test_prepare_write, 2597 .run = mmc_test_pow2_write, 2598 .cleanup = mmc_test_cleanup, 2599 }, 2600 2601 { 2602 .name = "Power of two block reads", 2603 .prepare = mmc_test_prepare_read, 2604 .run = mmc_test_pow2_read, 2605 .cleanup = mmc_test_cleanup, 2606 }, 2607 2608 { 2609 .name = "Weird sized block writes", 2610 .prepare = mmc_test_prepare_write, 2611 .run = mmc_test_weird_write, 2612 .cleanup = mmc_test_cleanup, 2613 }, 2614 2615 { 2616 .name = "Weird sized block reads", 2617 .prepare = mmc_test_prepare_read, 2618 .run = mmc_test_weird_read, 2619 .cleanup = mmc_test_cleanup, 2620 }, 2621 2622 { 2623 .name = "Badly aligned write", 2624 .prepare = mmc_test_prepare_write, 2625 .run = mmc_test_align_write, 2626 .cleanup = mmc_test_cleanup, 2627 }, 2628 2629 { 2630 .name = "Badly aligned read", 2631 .prepare = mmc_test_prepare_read, 2632 .run = mmc_test_align_read, 2633 .cleanup = mmc_test_cleanup, 2634 }, 2635 2636 { 2637 .name = "Badly aligned multi-block write", 2638 .prepare = mmc_test_prepare_write, 2639 .run = mmc_test_align_multi_write, 2640 .cleanup = mmc_test_cleanup, 2641 }, 2642 2643 { 2644 .name = "Badly aligned multi-block read", 2645 .prepare = mmc_test_prepare_read, 2646 .run = mmc_test_align_multi_read, 2647 .cleanup = mmc_test_cleanup, 2648 }, 2649 2650 { 2651 .name = "Proper xfer_size at write (start failure)", 2652 .run = mmc_test_xfersize_write, 2653 }, 2654 2655 { 2656 .name = "Proper xfer_size at read (start failure)", 2657 .run = mmc_test_xfersize_read, 2658 }, 2659 2660 { 2661 .name = "Proper xfer_size at write (midway failure)", 2662 .run = mmc_test_multi_xfersize_write, 2663 }, 2664 2665 { 2666 .name = "Proper xfer_size at read (midway failure)", 2667 .run = mmc_test_multi_xfersize_read, 2668 }, 2669 2670 #ifdef CONFIG_HIGHMEM 2671 2672 { 2673 .name = "Highmem write", 2674 .prepare = mmc_test_prepare_write, 2675 .run = mmc_test_write_high, 2676 .cleanup = mmc_test_cleanup, 2677 }, 2678 2679 { 2680 .name = "Highmem read", 2681 .prepare = mmc_test_prepare_read, 2682 .run = mmc_test_read_high, 2683 .cleanup = mmc_test_cleanup, 2684 }, 2685 2686 { 2687 .name = "Multi-block highmem write", 2688 .prepare = mmc_test_prepare_write, 2689 .run = mmc_test_multi_write_high, 2690 .cleanup = mmc_test_cleanup, 2691 }, 2692 2693 { 2694 .name = "Multi-block highmem read", 2695 .prepare = mmc_test_prepare_read, 2696 .run = mmc_test_multi_read_high, 2697 .cleanup = mmc_test_cleanup, 2698 }, 2699 2700 #else 2701 2702 { 2703 .name = "Highmem write", 2704 .run = mmc_test_no_highmem, 2705 }, 2706 2707 { 2708 .name = "Highmem read", 2709 .run = mmc_test_no_highmem, 2710 }, 2711 2712 { 2713 .name = "Multi-block highmem write", 2714 .run = mmc_test_no_highmem, 2715 }, 2716 2717 { 2718 .name = "Multi-block highmem read", 2719 .run = mmc_test_no_highmem, 2720 }, 2721 2722 #endif /* CONFIG_HIGHMEM */ 2723 2724 { 2725 .name = "Best-case read performance", 2726 .prepare = mmc_test_area_prepare_fill, 2727 .run = mmc_test_best_read_performance, 2728 .cleanup = mmc_test_area_cleanup, 2729 }, 2730 2731 { 2732 .name = "Best-case write performance", 2733 .prepare = mmc_test_area_prepare_erase, 2734 .run = mmc_test_best_write_performance, 2735 .cleanup = mmc_test_area_cleanup, 2736 }, 2737 2738 { 2739 .name = "Best-case read performance into scattered pages", 2740 .prepare = mmc_test_area_prepare_fill, 2741 .run = mmc_test_best_read_perf_max_scatter, 2742 .cleanup = mmc_test_area_cleanup, 2743 }, 2744 2745 { 2746 .name = "Best-case write performance from scattered pages", 2747 .prepare = mmc_test_area_prepare_erase, 2748 .run = mmc_test_best_write_perf_max_scatter, 2749 .cleanup = mmc_test_area_cleanup, 2750 }, 2751 2752 { 2753 .name = "Single read performance by transfer size", 2754 .prepare = mmc_test_area_prepare_fill, 2755 .run = mmc_test_profile_read_perf, 2756 .cleanup = mmc_test_area_cleanup, 2757 }, 2758 2759 { 2760 .name = "Single write performance by transfer size", 2761 .prepare = mmc_test_area_prepare, 2762 .run = mmc_test_profile_write_perf, 2763 .cleanup = mmc_test_area_cleanup, 2764 }, 2765 2766 { 2767 .name = "Single trim performance by transfer size", 2768 .prepare = mmc_test_area_prepare_fill, 2769 .run = mmc_test_profile_trim_perf, 2770 .cleanup = mmc_test_area_cleanup, 2771 }, 2772 2773 { 2774 .name = "Consecutive read performance by transfer size", 2775 .prepare = mmc_test_area_prepare_fill, 2776 .run = mmc_test_profile_seq_read_perf, 2777 .cleanup = mmc_test_area_cleanup, 2778 }, 2779 2780 { 2781 .name = "Consecutive write performance by transfer size", 2782 .prepare = mmc_test_area_prepare, 2783 .run = mmc_test_profile_seq_write_perf, 2784 .cleanup = mmc_test_area_cleanup, 2785 }, 2786 2787 { 2788 .name = "Consecutive trim performance by transfer size", 2789 .prepare = mmc_test_area_prepare, 2790 .run = mmc_test_profile_seq_trim_perf, 2791 .cleanup = mmc_test_area_cleanup, 2792 }, 2793 2794 { 2795 .name = "Random read performance by transfer size", 2796 .prepare = mmc_test_area_prepare, 2797 .run = mmc_test_random_read_perf, 2798 .cleanup = mmc_test_area_cleanup, 2799 }, 2800 2801 { 2802 .name = "Random write performance by transfer size", 2803 .prepare = mmc_test_area_prepare, 2804 .run = mmc_test_random_write_perf, 2805 .cleanup = mmc_test_area_cleanup, 2806 }, 2807 2808 { 2809 .name = "Large sequential read into scattered pages", 2810 .prepare = mmc_test_area_prepare, 2811 .run = mmc_test_large_seq_read_perf, 2812 .cleanup = mmc_test_area_cleanup, 2813 }, 2814 2815 { 2816 .name = "Large sequential write from scattered pages", 2817 .prepare = mmc_test_area_prepare, 2818 .run = mmc_test_large_seq_write_perf, 2819 .cleanup = mmc_test_area_cleanup, 2820 }, 2821 2822 { 2823 .name = "Write performance with blocking req 4k to 4MB", 2824 .prepare = mmc_test_area_prepare, 2825 .run = mmc_test_profile_mult_write_blocking_perf, 2826 .cleanup = mmc_test_area_cleanup, 2827 }, 2828 2829 { 2830 .name = "Write performance with non-blocking req 4k to 4MB", 2831 .prepare = mmc_test_area_prepare, 2832 .run = mmc_test_profile_mult_write_nonblock_perf, 2833 .cleanup = mmc_test_area_cleanup, 2834 }, 2835 2836 { 2837 .name = "Read performance with blocking req 4k to 4MB", 2838 .prepare = mmc_test_area_prepare, 2839 .run = mmc_test_profile_mult_read_blocking_perf, 2840 .cleanup = mmc_test_area_cleanup, 2841 }, 2842 2843 { 2844 .name = "Read performance with non-blocking req 4k to 4MB", 2845 .prepare = mmc_test_area_prepare, 2846 .run = mmc_test_profile_mult_read_nonblock_perf, 2847 .cleanup = mmc_test_area_cleanup, 2848 }, 2849 2850 { 2851 .name = "Write performance blocking req 1 to 512 sg elems", 2852 .prepare = mmc_test_area_prepare, 2853 .run = mmc_test_profile_sglen_wr_blocking_perf, 2854 .cleanup = mmc_test_area_cleanup, 2855 }, 2856 2857 { 2858 .name = "Write performance non-blocking req 1 to 512 sg elems", 2859 .prepare = mmc_test_area_prepare, 2860 .run = mmc_test_profile_sglen_wr_nonblock_perf, 2861 .cleanup = mmc_test_area_cleanup, 2862 }, 2863 2864 { 2865 .name = "Read performance blocking req 1 to 512 sg elems", 2866 .prepare = mmc_test_area_prepare, 2867 .run = mmc_test_profile_sglen_r_blocking_perf, 2868 .cleanup = mmc_test_area_cleanup, 2869 }, 2870 2871 { 2872 .name = "Read performance non-blocking req 1 to 512 sg elems", 2873 .prepare = mmc_test_area_prepare, 2874 .run = mmc_test_profile_sglen_r_nonblock_perf, 2875 .cleanup = mmc_test_area_cleanup, 2876 }, 2877 2878 { 2879 .name = "Reset test", 2880 .run = mmc_test_reset, 2881 }, 2882 2883 { 2884 .name = "Commands during read - no Set Block Count (CMD23)", 2885 .prepare = mmc_test_area_prepare, 2886 .run = mmc_test_cmds_during_read, 2887 .cleanup = mmc_test_area_cleanup, 2888 }, 2889 2890 { 2891 .name = "Commands during write - no Set Block Count (CMD23)", 2892 .prepare = mmc_test_area_prepare, 2893 .run = mmc_test_cmds_during_write, 2894 .cleanup = mmc_test_area_cleanup, 2895 }, 2896 2897 { 2898 .name = "Commands during read - use Set Block Count (CMD23)", 2899 .prepare = mmc_test_area_prepare, 2900 .run = mmc_test_cmds_during_read_cmd23, 2901 .cleanup = mmc_test_area_cleanup, 2902 }, 2903 2904 { 2905 .name = "Commands during write - use Set Block Count (CMD23)", 2906 .prepare = mmc_test_area_prepare, 2907 .run = mmc_test_cmds_during_write_cmd23, 2908 .cleanup = mmc_test_area_cleanup, 2909 }, 2910 2911 { 2912 .name = "Commands during non-blocking read - use Set Block Count (CMD23)", 2913 .prepare = mmc_test_area_prepare, 2914 .run = mmc_test_cmds_during_read_cmd23_nonblock, 2915 .cleanup = mmc_test_area_cleanup, 2916 }, 2917 2918 { 2919 .name = "Commands during non-blocking write - use Set Block Count (CMD23)", 2920 .prepare = mmc_test_area_prepare, 2921 .run = mmc_test_cmds_during_write_cmd23_nonblock, 2922 .cleanup = mmc_test_area_cleanup, 2923 }, 2924 }; 2925 2926 static DEFINE_MUTEX(mmc_test_lock); 2927 2928 static LIST_HEAD(mmc_test_result); 2929 2930 static void mmc_test_run(struct mmc_test_card *test, int testcase) 2931 { 2932 int i, ret; 2933 2934 pr_info("%s: Starting tests of card %s...\n", 2935 mmc_hostname(test->card->host), mmc_card_id(test->card)); 2936 2937 mmc_claim_host(test->card->host); 2938 2939 for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++) { 2940 struct mmc_test_general_result *gr; 2941 2942 if (testcase && ((i + 1) != testcase)) 2943 continue; 2944 2945 pr_info("%s: Test case %d. %s...\n", 2946 mmc_hostname(test->card->host), i + 1, 2947 mmc_test_cases[i].name); 2948 2949 if (mmc_test_cases[i].prepare) { 2950 ret = mmc_test_cases[i].prepare(test); 2951 if (ret) { 2952 pr_info("%s: Result: Prepare stage failed! (%d)\n", 2953 mmc_hostname(test->card->host), 2954 ret); 2955 continue; 2956 } 2957 } 2958 2959 gr = kzalloc(sizeof(*gr), GFP_KERNEL); 2960 if (gr) { 2961 INIT_LIST_HEAD(&gr->tr_lst); 2962 2963 /* Assign data what we know already */ 2964 gr->card = test->card; 2965 gr->testcase = i; 2966 2967 /* Append container to global one */ 2968 list_add_tail(&gr->link, &mmc_test_result); 2969 2970 /* 2971 * Save the pointer to created container in our private 2972 * structure. 2973 */ 2974 test->gr = gr; 2975 } 2976 2977 ret = mmc_test_cases[i].run(test); 2978 switch (ret) { 2979 case RESULT_OK: 2980 pr_info("%s: Result: OK\n", 2981 mmc_hostname(test->card->host)); 2982 break; 2983 case RESULT_FAIL: 2984 pr_info("%s: Result: FAILED\n", 2985 mmc_hostname(test->card->host)); 2986 break; 2987 case RESULT_UNSUP_HOST: 2988 pr_info("%s: Result: UNSUPPORTED (by host)\n", 2989 mmc_hostname(test->card->host)); 2990 break; 2991 case RESULT_UNSUP_CARD: 2992 pr_info("%s: Result: UNSUPPORTED (by card)\n", 2993 mmc_hostname(test->card->host)); 2994 break; 2995 default: 2996 pr_info("%s: Result: ERROR (%d)\n", 2997 mmc_hostname(test->card->host), ret); 2998 } 2999 3000 /* Save the result */ 3001 if (gr) 3002 gr->result = ret; 3003 3004 if (mmc_test_cases[i].cleanup) { 3005 ret = mmc_test_cases[i].cleanup(test); 3006 if (ret) { 3007 pr_info("%s: Warning: Cleanup stage failed! (%d)\n", 3008 mmc_hostname(test->card->host), 3009 ret); 3010 } 3011 } 3012 } 3013 3014 mmc_release_host(test->card->host); 3015 3016 pr_info("%s: Tests completed.\n", 3017 mmc_hostname(test->card->host)); 3018 } 3019 3020 static void mmc_test_free_result(struct mmc_card *card) 3021 { 3022 struct mmc_test_general_result *gr, *grs; 3023 3024 mutex_lock(&mmc_test_lock); 3025 3026 list_for_each_entry_safe(gr, grs, &mmc_test_result, link) { 3027 struct mmc_test_transfer_result *tr, *trs; 3028 3029 if (card && gr->card != card) 3030 continue; 3031 3032 list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) { 3033 list_del(&tr->link); 3034 kfree(tr); 3035 } 3036 3037 list_del(&gr->link); 3038 kfree(gr); 3039 } 3040 3041 mutex_unlock(&mmc_test_lock); 3042 } 3043 3044 static LIST_HEAD(mmc_test_file_test); 3045 3046 static int mtf_test_show(struct seq_file *sf, void *data) 3047 { 3048 struct mmc_card *card = (struct mmc_card *)sf->private; 3049 struct mmc_test_general_result *gr; 3050 3051 mutex_lock(&mmc_test_lock); 3052 3053 list_for_each_entry(gr, &mmc_test_result, link) { 3054 struct mmc_test_transfer_result *tr; 3055 3056 if (gr->card != card) 3057 continue; 3058 3059 seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result); 3060 3061 list_for_each_entry(tr, &gr->tr_lst, link) { 3062 seq_printf(sf, "%u %d %llu.%09u %u %u.%02u\n", 3063 tr->count, tr->sectors, 3064 (u64)tr->ts.tv_sec, (u32)tr->ts.tv_nsec, 3065 tr->rate, tr->iops / 100, tr->iops % 100); 3066 } 3067 } 3068 3069 mutex_unlock(&mmc_test_lock); 3070 3071 return 0; 3072 } 3073 3074 static int mtf_test_open(struct inode *inode, struct file *file) 3075 { 3076 return single_open(file, mtf_test_show, inode->i_private); 3077 } 3078 3079 static ssize_t mtf_test_write(struct file *file, const char __user *buf, 3080 size_t count, loff_t *pos) 3081 { 3082 struct seq_file *sf = (struct seq_file *)file->private_data; 3083 struct mmc_card *card = (struct mmc_card *)sf->private; 3084 struct mmc_test_card *test; 3085 long testcase; 3086 int ret; 3087 3088 ret = kstrtol_from_user(buf, count, 10, &testcase); 3089 if (ret) 3090 return ret; 3091 3092 test = kzalloc(sizeof(*test), GFP_KERNEL); 3093 if (!test) 3094 return -ENOMEM; 3095 3096 /* 3097 * Remove all test cases associated with given card. Thus we have only 3098 * actual data of the last run. 3099 */ 3100 mmc_test_free_result(card); 3101 3102 test->card = card; 3103 3104 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL); 3105 #ifdef CONFIG_HIGHMEM 3106 test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER); 3107 #endif 3108 3109 #ifdef CONFIG_HIGHMEM 3110 if (test->buffer && test->highmem) { 3111 #else 3112 if (test->buffer) { 3113 #endif 3114 mutex_lock(&mmc_test_lock); 3115 mmc_test_run(test, testcase); 3116 mutex_unlock(&mmc_test_lock); 3117 } 3118 3119 #ifdef CONFIG_HIGHMEM 3120 __free_pages(test->highmem, BUFFER_ORDER); 3121 #endif 3122 kfree(test->buffer); 3123 kfree(test); 3124 3125 return count; 3126 } 3127 3128 static const struct file_operations mmc_test_fops_test = { 3129 .open = mtf_test_open, 3130 .read = seq_read, 3131 .write = mtf_test_write, 3132 .llseek = seq_lseek, 3133 .release = single_release, 3134 }; 3135 3136 static int mtf_testlist_show(struct seq_file *sf, void *data) 3137 { 3138 int i; 3139 3140 mutex_lock(&mmc_test_lock); 3141 3142 seq_puts(sf, "0:\tRun all tests\n"); 3143 for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++) 3144 seq_printf(sf, "%d:\t%s\n", i + 1, mmc_test_cases[i].name); 3145 3146 mutex_unlock(&mmc_test_lock); 3147 3148 return 0; 3149 } 3150 3151 DEFINE_SHOW_ATTRIBUTE(mtf_testlist); 3152 3153 static void mmc_test_free_dbgfs_file(struct mmc_card *card) 3154 { 3155 struct mmc_test_dbgfs_file *df, *dfs; 3156 3157 mutex_lock(&mmc_test_lock); 3158 3159 list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) { 3160 if (card && df->card != card) 3161 continue; 3162 debugfs_remove(df->file); 3163 list_del(&df->link); 3164 kfree(df); 3165 } 3166 3167 mutex_unlock(&mmc_test_lock); 3168 } 3169 3170 static int __mmc_test_register_dbgfs_file(struct mmc_card *card, 3171 const char *name, umode_t mode, const struct file_operations *fops) 3172 { 3173 struct dentry *file = NULL; 3174 struct mmc_test_dbgfs_file *df; 3175 3176 if (card->debugfs_root) 3177 file = debugfs_create_file(name, mode, card->debugfs_root, 3178 card, fops); 3179 3180 df = kmalloc(sizeof(*df), GFP_KERNEL); 3181 if (!df) { 3182 debugfs_remove(file); 3183 return -ENOMEM; 3184 } 3185 3186 df->card = card; 3187 df->file = file; 3188 3189 list_add(&df->link, &mmc_test_file_test); 3190 return 0; 3191 } 3192 3193 static int mmc_test_register_dbgfs_file(struct mmc_card *card) 3194 { 3195 int ret; 3196 3197 mutex_lock(&mmc_test_lock); 3198 3199 ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO, 3200 &mmc_test_fops_test); 3201 if (ret) 3202 goto err; 3203 3204 ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO, 3205 &mtf_testlist_fops); 3206 if (ret) 3207 goto err; 3208 3209 err: 3210 mutex_unlock(&mmc_test_lock); 3211 3212 return ret; 3213 } 3214 3215 static int mmc_test_probe(struct mmc_card *card) 3216 { 3217 int ret; 3218 3219 if (!mmc_card_mmc(card) && !mmc_card_sd(card)) 3220 return -ENODEV; 3221 3222 ret = mmc_test_register_dbgfs_file(card); 3223 if (ret) 3224 return ret; 3225 3226 if (card->ext_csd.cmdq_en) { 3227 mmc_claim_host(card->host); 3228 ret = mmc_cmdq_disable(card); 3229 mmc_release_host(card->host); 3230 if (ret) 3231 return ret; 3232 } 3233 3234 dev_info(&card->dev, "Card claimed for testing.\n"); 3235 3236 return 0; 3237 } 3238 3239 static void mmc_test_remove(struct mmc_card *card) 3240 { 3241 if (card->reenable_cmdq) { 3242 mmc_claim_host(card->host); 3243 mmc_cmdq_enable(card); 3244 mmc_release_host(card->host); 3245 } 3246 mmc_test_free_result(card); 3247 mmc_test_free_dbgfs_file(card); 3248 } 3249 3250 static struct mmc_driver mmc_driver = { 3251 .drv = { 3252 .name = "mmc_test", 3253 }, 3254 .probe = mmc_test_probe, 3255 .remove = mmc_test_remove, 3256 }; 3257 3258 static int __init mmc_test_init(void) 3259 { 3260 return mmc_register_driver(&mmc_driver); 3261 } 3262 3263 static void __exit mmc_test_exit(void) 3264 { 3265 /* Clear stalled data if card is still plugged */ 3266 mmc_test_free_result(NULL); 3267 mmc_test_free_dbgfs_file(NULL); 3268 3269 mmc_unregister_driver(&mmc_driver); 3270 } 3271 3272 module_init(mmc_test_init); 3273 module_exit(mmc_test_exit); 3274 3275 MODULE_LICENSE("GPL"); 3276 MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver"); 3277 MODULE_AUTHOR("Pierre Ossman"); 3278