1 /* 2 * Copyright 2007-2008 Pierre Ossman 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or (at 7 * your option) any later version. 8 */ 9 10 #include <linux/mmc/core.h> 11 #include <linux/mmc/card.h> 12 #include <linux/mmc/host.h> 13 #include <linux/mmc/mmc.h> 14 #include <linux/slab.h> 15 16 #include <linux/scatterlist.h> 17 #include <linux/swap.h> /* For nr_free_buffer_pages() */ 18 #include <linux/list.h> 19 20 #include <linux/debugfs.h> 21 #include <linux/uaccess.h> 22 #include <linux/seq_file.h> 23 #include <linux/module.h> 24 25 #include "core.h" 26 #include "card.h" 27 #include "host.h" 28 #include "bus.h" 29 #include "mmc_ops.h" 30 31 #define RESULT_OK 0 32 #define RESULT_FAIL 1 33 #define RESULT_UNSUP_HOST 2 34 #define RESULT_UNSUP_CARD 3 35 36 #define BUFFER_ORDER 2 37 #define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER) 38 39 #define TEST_ALIGN_END 8 40 41 /* 42 * Limit the test area size to the maximum MMC HC erase group size. Note that 43 * the maximum SD allocation unit size is just 4MiB. 44 */ 45 #define TEST_AREA_MAX_SIZE (128 * 1024 * 1024) 46 47 /** 48 * struct mmc_test_pages - pages allocated by 'alloc_pages()'. 49 * @page: first page in the allocation 50 * @order: order of the number of pages allocated 51 */ 52 struct mmc_test_pages { 53 struct page *page; 54 unsigned int order; 55 }; 56 57 /** 58 * struct mmc_test_mem - allocated memory. 59 * @arr: array of allocations 60 * @cnt: number of allocations 61 */ 62 struct mmc_test_mem { 63 struct mmc_test_pages *arr; 64 unsigned int cnt; 65 }; 66 67 /** 68 * struct mmc_test_area - information for performance tests. 69 * @max_sz: test area size (in bytes) 70 * @dev_addr: address on card at which to do performance tests 71 * @max_tfr: maximum transfer size allowed by driver (in bytes) 72 * @max_segs: maximum segments allowed by driver in scatterlist @sg 73 * @max_seg_sz: maximum segment size allowed by driver 74 * @blocks: number of (512 byte) blocks currently mapped by @sg 75 * @sg_len: length of currently mapped scatterlist @sg 76 * @mem: allocated memory 77 * @sg: scatterlist 78 */ 79 struct mmc_test_area { 80 unsigned long max_sz; 81 unsigned int dev_addr; 82 unsigned int max_tfr; 83 unsigned int max_segs; 84 unsigned int max_seg_sz; 85 unsigned int blocks; 86 unsigned int sg_len; 87 struct mmc_test_mem *mem; 88 struct scatterlist *sg; 89 }; 90 91 /** 92 * struct mmc_test_transfer_result - transfer results for performance tests. 93 * @link: double-linked list 94 * @count: amount of group of sectors to check 95 * @sectors: amount of sectors to check in one group 96 * @ts: time values of transfer 97 * @rate: calculated transfer rate 98 * @iops: I/O operations per second (times 100) 99 */ 100 struct mmc_test_transfer_result { 101 struct list_head link; 102 unsigned int count; 103 unsigned int sectors; 104 struct timespec ts; 105 unsigned int rate; 106 unsigned int iops; 107 }; 108 109 /** 110 * struct mmc_test_general_result - results for tests. 111 * @link: double-linked list 112 * @card: card under test 113 * @testcase: number of test case 114 * @result: result of test run 115 * @tr_lst: transfer measurements if any as mmc_test_transfer_result 116 */ 117 struct mmc_test_general_result { 118 struct list_head link; 119 struct mmc_card *card; 120 int testcase; 121 int result; 122 struct list_head tr_lst; 123 }; 124 125 /** 126 * struct mmc_test_dbgfs_file - debugfs related file. 127 * @link: double-linked list 128 * @card: card under test 129 * @file: file created under debugfs 130 */ 131 struct mmc_test_dbgfs_file { 132 struct list_head link; 133 struct mmc_card *card; 134 struct dentry *file; 135 }; 136 137 /** 138 * struct mmc_test_card - test information. 139 * @card: card under test 140 * @scratch: transfer buffer 141 * @buffer: transfer buffer 142 * @highmem: buffer for highmem tests 143 * @area: information for performance tests 144 * @gr: pointer to results of current testcase 145 */ 146 struct mmc_test_card { 147 struct mmc_card *card; 148 149 u8 scratch[BUFFER_SIZE]; 150 u8 *buffer; 151 #ifdef CONFIG_HIGHMEM 152 struct page *highmem; 153 #endif 154 struct mmc_test_area area; 155 struct mmc_test_general_result *gr; 156 }; 157 158 enum mmc_test_prep_media { 159 MMC_TEST_PREP_NONE = 0, 160 MMC_TEST_PREP_WRITE_FULL = 1 << 0, 161 MMC_TEST_PREP_ERASE = 1 << 1, 162 }; 163 164 struct mmc_test_multiple_rw { 165 unsigned int *sg_len; 166 unsigned int *bs; 167 unsigned int len; 168 unsigned int size; 169 bool do_write; 170 bool do_nonblock_req; 171 enum mmc_test_prep_media prepare; 172 }; 173 174 struct mmc_test_async_req { 175 struct mmc_async_req areq; 176 struct mmc_test_card *test; 177 }; 178 179 /*******************************************************************/ 180 /* General helper functions */ 181 /*******************************************************************/ 182 183 /* 184 * Configure correct block size in card 185 */ 186 static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size) 187 { 188 return mmc_set_blocklen(test->card, size); 189 } 190 191 static bool mmc_test_card_cmd23(struct mmc_card *card) 192 { 193 return mmc_card_mmc(card) || 194 (mmc_card_sd(card) && card->scr.cmds & SD_SCR_CMD23_SUPPORT); 195 } 196 197 static void mmc_test_prepare_sbc(struct mmc_test_card *test, 198 struct mmc_request *mrq, unsigned int blocks) 199 { 200 struct mmc_card *card = test->card; 201 202 if (!mrq->sbc || !mmc_host_cmd23(card->host) || 203 !mmc_test_card_cmd23(card) || !mmc_op_multi(mrq->cmd->opcode) || 204 (card->quirks & MMC_QUIRK_BLK_NO_CMD23)) { 205 mrq->sbc = NULL; 206 return; 207 } 208 209 mrq->sbc->opcode = MMC_SET_BLOCK_COUNT; 210 mrq->sbc->arg = blocks; 211 mrq->sbc->flags = MMC_RSP_R1 | MMC_CMD_AC; 212 } 213 214 /* 215 * Fill in the mmc_request structure given a set of transfer parameters. 216 */ 217 static void mmc_test_prepare_mrq(struct mmc_test_card *test, 218 struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len, 219 unsigned dev_addr, unsigned blocks, unsigned blksz, int write) 220 { 221 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop)) 222 return; 223 224 if (blocks > 1) { 225 mrq->cmd->opcode = write ? 226 MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK; 227 } else { 228 mrq->cmd->opcode = write ? 229 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK; 230 } 231 232 mrq->cmd->arg = dev_addr; 233 if (!mmc_card_blockaddr(test->card)) 234 mrq->cmd->arg <<= 9; 235 236 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC; 237 238 if (blocks == 1) 239 mrq->stop = NULL; 240 else { 241 mrq->stop->opcode = MMC_STOP_TRANSMISSION; 242 mrq->stop->arg = 0; 243 mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC; 244 } 245 246 mrq->data->blksz = blksz; 247 mrq->data->blocks = blocks; 248 mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; 249 mrq->data->sg = sg; 250 mrq->data->sg_len = sg_len; 251 252 mmc_test_prepare_sbc(test, mrq, blocks); 253 254 mmc_set_data_timeout(mrq->data, test->card); 255 } 256 257 static int mmc_test_busy(struct mmc_command *cmd) 258 { 259 return !(cmd->resp[0] & R1_READY_FOR_DATA) || 260 (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG); 261 } 262 263 /* 264 * Wait for the card to finish the busy state 265 */ 266 static int mmc_test_wait_busy(struct mmc_test_card *test) 267 { 268 int ret, busy; 269 struct mmc_command cmd = {}; 270 271 busy = 0; 272 do { 273 memset(&cmd, 0, sizeof(struct mmc_command)); 274 275 cmd.opcode = MMC_SEND_STATUS; 276 cmd.arg = test->card->rca << 16; 277 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 278 279 ret = mmc_wait_for_cmd(test->card->host, &cmd, 0); 280 if (ret) 281 break; 282 283 if (!busy && mmc_test_busy(&cmd)) { 284 busy = 1; 285 if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) 286 pr_info("%s: Warning: Host did not wait for busy state to end.\n", 287 mmc_hostname(test->card->host)); 288 } 289 } while (mmc_test_busy(&cmd)); 290 291 return ret; 292 } 293 294 /* 295 * Transfer a single sector of kernel addressable data 296 */ 297 static int mmc_test_buffer_transfer(struct mmc_test_card *test, 298 u8 *buffer, unsigned addr, unsigned blksz, int write) 299 { 300 struct mmc_request mrq = {}; 301 struct mmc_command cmd = {}; 302 struct mmc_command stop = {}; 303 struct mmc_data data = {}; 304 305 struct scatterlist sg; 306 307 mrq.cmd = &cmd; 308 mrq.data = &data; 309 mrq.stop = &stop; 310 311 sg_init_one(&sg, buffer, blksz); 312 313 mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write); 314 315 mmc_wait_for_req(test->card->host, &mrq); 316 317 if (cmd.error) 318 return cmd.error; 319 if (data.error) 320 return data.error; 321 322 return mmc_test_wait_busy(test); 323 } 324 325 static void mmc_test_free_mem(struct mmc_test_mem *mem) 326 { 327 if (!mem) 328 return; 329 while (mem->cnt--) 330 __free_pages(mem->arr[mem->cnt].page, 331 mem->arr[mem->cnt].order); 332 kfree(mem->arr); 333 kfree(mem); 334 } 335 336 /* 337 * Allocate a lot of memory, preferably max_sz but at least min_sz. In case 338 * there isn't much memory do not exceed 1/16th total lowmem pages. Also do 339 * not exceed a maximum number of segments and try not to make segments much 340 * bigger than maximum segment size. 341 */ 342 static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz, 343 unsigned long max_sz, 344 unsigned int max_segs, 345 unsigned int max_seg_sz) 346 { 347 unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE); 348 unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE); 349 unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE); 350 unsigned long page_cnt = 0; 351 unsigned long limit = nr_free_buffer_pages() >> 4; 352 struct mmc_test_mem *mem; 353 354 if (max_page_cnt > limit) 355 max_page_cnt = limit; 356 if (min_page_cnt > max_page_cnt) 357 min_page_cnt = max_page_cnt; 358 359 if (max_seg_page_cnt > max_page_cnt) 360 max_seg_page_cnt = max_page_cnt; 361 362 if (max_segs > max_page_cnt) 363 max_segs = max_page_cnt; 364 365 mem = kzalloc(sizeof(*mem), GFP_KERNEL); 366 if (!mem) 367 return NULL; 368 369 mem->arr = kcalloc(max_segs, sizeof(*mem->arr), GFP_KERNEL); 370 if (!mem->arr) 371 goto out_free; 372 373 while (max_page_cnt) { 374 struct page *page; 375 unsigned int order; 376 gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN | 377 __GFP_NORETRY; 378 379 order = get_order(max_seg_page_cnt << PAGE_SHIFT); 380 while (1) { 381 page = alloc_pages(flags, order); 382 if (page || !order) 383 break; 384 order -= 1; 385 } 386 if (!page) { 387 if (page_cnt < min_page_cnt) 388 goto out_free; 389 break; 390 } 391 mem->arr[mem->cnt].page = page; 392 mem->arr[mem->cnt].order = order; 393 mem->cnt += 1; 394 if (max_page_cnt <= (1UL << order)) 395 break; 396 max_page_cnt -= 1UL << order; 397 page_cnt += 1UL << order; 398 if (mem->cnt >= max_segs) { 399 if (page_cnt < min_page_cnt) 400 goto out_free; 401 break; 402 } 403 } 404 405 return mem; 406 407 out_free: 408 mmc_test_free_mem(mem); 409 return NULL; 410 } 411 412 /* 413 * Map memory into a scatterlist. Optionally allow the same memory to be 414 * mapped more than once. 415 */ 416 static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size, 417 struct scatterlist *sglist, int repeat, 418 unsigned int max_segs, unsigned int max_seg_sz, 419 unsigned int *sg_len, int min_sg_len) 420 { 421 struct scatterlist *sg = NULL; 422 unsigned int i; 423 unsigned long sz = size; 424 425 sg_init_table(sglist, max_segs); 426 if (min_sg_len > max_segs) 427 min_sg_len = max_segs; 428 429 *sg_len = 0; 430 do { 431 for (i = 0; i < mem->cnt; i++) { 432 unsigned long len = PAGE_SIZE << mem->arr[i].order; 433 434 if (min_sg_len && (size / min_sg_len < len)) 435 len = ALIGN(size / min_sg_len, 512); 436 if (len > sz) 437 len = sz; 438 if (len > max_seg_sz) 439 len = max_seg_sz; 440 if (sg) 441 sg = sg_next(sg); 442 else 443 sg = sglist; 444 if (!sg) 445 return -EINVAL; 446 sg_set_page(sg, mem->arr[i].page, len, 0); 447 sz -= len; 448 *sg_len += 1; 449 if (!sz) 450 break; 451 } 452 } while (sz && repeat); 453 454 if (sz) 455 return -EINVAL; 456 457 if (sg) 458 sg_mark_end(sg); 459 460 return 0; 461 } 462 463 /* 464 * Map memory into a scatterlist so that no pages are contiguous. Allow the 465 * same memory to be mapped more than once. 466 */ 467 static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem, 468 unsigned long sz, 469 struct scatterlist *sglist, 470 unsigned int max_segs, 471 unsigned int max_seg_sz, 472 unsigned int *sg_len) 473 { 474 struct scatterlist *sg = NULL; 475 unsigned int i = mem->cnt, cnt; 476 unsigned long len; 477 void *base, *addr, *last_addr = NULL; 478 479 sg_init_table(sglist, max_segs); 480 481 *sg_len = 0; 482 while (sz) { 483 base = page_address(mem->arr[--i].page); 484 cnt = 1 << mem->arr[i].order; 485 while (sz && cnt) { 486 addr = base + PAGE_SIZE * --cnt; 487 if (last_addr && last_addr + PAGE_SIZE == addr) 488 continue; 489 last_addr = addr; 490 len = PAGE_SIZE; 491 if (len > max_seg_sz) 492 len = max_seg_sz; 493 if (len > sz) 494 len = sz; 495 if (sg) 496 sg = sg_next(sg); 497 else 498 sg = sglist; 499 if (!sg) 500 return -EINVAL; 501 sg_set_page(sg, virt_to_page(addr), len, 0); 502 sz -= len; 503 *sg_len += 1; 504 } 505 if (i == 0) 506 i = mem->cnt; 507 } 508 509 if (sg) 510 sg_mark_end(sg); 511 512 return 0; 513 } 514 515 /* 516 * Calculate transfer rate in bytes per second. 517 */ 518 static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts) 519 { 520 uint64_t ns; 521 522 ns = ts->tv_sec; 523 ns *= 1000000000; 524 ns += ts->tv_nsec; 525 526 bytes *= 1000000000; 527 528 while (ns > UINT_MAX) { 529 bytes >>= 1; 530 ns >>= 1; 531 } 532 533 if (!ns) 534 return 0; 535 536 do_div(bytes, (uint32_t)ns); 537 538 return bytes; 539 } 540 541 /* 542 * Save transfer results for future usage 543 */ 544 static void mmc_test_save_transfer_result(struct mmc_test_card *test, 545 unsigned int count, unsigned int sectors, struct timespec ts, 546 unsigned int rate, unsigned int iops) 547 { 548 struct mmc_test_transfer_result *tr; 549 550 if (!test->gr) 551 return; 552 553 tr = kmalloc(sizeof(*tr), GFP_KERNEL); 554 if (!tr) 555 return; 556 557 tr->count = count; 558 tr->sectors = sectors; 559 tr->ts = ts; 560 tr->rate = rate; 561 tr->iops = iops; 562 563 list_add_tail(&tr->link, &test->gr->tr_lst); 564 } 565 566 /* 567 * Print the transfer rate. 568 */ 569 static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes, 570 struct timespec *ts1, struct timespec *ts2) 571 { 572 unsigned int rate, iops, sectors = bytes >> 9; 573 struct timespec ts; 574 575 ts = timespec_sub(*ts2, *ts1); 576 577 rate = mmc_test_rate(bytes, &ts); 578 iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */ 579 580 pr_info("%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu " 581 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n", 582 mmc_hostname(test->card->host), sectors, sectors >> 1, 583 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec, 584 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024, 585 iops / 100, iops % 100); 586 587 mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops); 588 } 589 590 /* 591 * Print the average transfer rate. 592 */ 593 static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes, 594 unsigned int count, struct timespec *ts1, 595 struct timespec *ts2) 596 { 597 unsigned int rate, iops, sectors = bytes >> 9; 598 uint64_t tot = bytes * count; 599 struct timespec ts; 600 601 ts = timespec_sub(*ts2, *ts1); 602 603 rate = mmc_test_rate(tot, &ts); 604 iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */ 605 606 pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took " 607 "%lu.%09lu seconds (%u kB/s, %u KiB/s, " 608 "%u.%02u IOPS, sg_len %d)\n", 609 mmc_hostname(test->card->host), count, sectors, count, 610 sectors >> 1, (sectors & 1 ? ".5" : ""), 611 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec, 612 rate / 1000, rate / 1024, iops / 100, iops % 100, 613 test->area.sg_len); 614 615 mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops); 616 } 617 618 /* 619 * Return the card size in sectors. 620 */ 621 static unsigned int mmc_test_capacity(struct mmc_card *card) 622 { 623 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) 624 return card->ext_csd.sectors; 625 else 626 return card->csd.capacity << (card->csd.read_blkbits - 9); 627 } 628 629 /*******************************************************************/ 630 /* Test preparation and cleanup */ 631 /*******************************************************************/ 632 633 /* 634 * Fill the first couple of sectors of the card with known data 635 * so that bad reads/writes can be detected 636 */ 637 static int __mmc_test_prepare(struct mmc_test_card *test, int write) 638 { 639 int ret, i; 640 641 ret = mmc_test_set_blksize(test, 512); 642 if (ret) 643 return ret; 644 645 if (write) 646 memset(test->buffer, 0xDF, 512); 647 else { 648 for (i = 0; i < 512; i++) 649 test->buffer[i] = i; 650 } 651 652 for (i = 0; i < BUFFER_SIZE / 512; i++) { 653 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1); 654 if (ret) 655 return ret; 656 } 657 658 return 0; 659 } 660 661 static int mmc_test_prepare_write(struct mmc_test_card *test) 662 { 663 return __mmc_test_prepare(test, 1); 664 } 665 666 static int mmc_test_prepare_read(struct mmc_test_card *test) 667 { 668 return __mmc_test_prepare(test, 0); 669 } 670 671 static int mmc_test_cleanup(struct mmc_test_card *test) 672 { 673 int ret, i; 674 675 ret = mmc_test_set_blksize(test, 512); 676 if (ret) 677 return ret; 678 679 memset(test->buffer, 0, 512); 680 681 for (i = 0; i < BUFFER_SIZE / 512; i++) { 682 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1); 683 if (ret) 684 return ret; 685 } 686 687 return 0; 688 } 689 690 /*******************************************************************/ 691 /* Test execution helpers */ 692 /*******************************************************************/ 693 694 /* 695 * Modifies the mmc_request to perform the "short transfer" tests 696 */ 697 static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test, 698 struct mmc_request *mrq, int write) 699 { 700 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data)) 701 return; 702 703 if (mrq->data->blocks > 1) { 704 mrq->cmd->opcode = write ? 705 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK; 706 mrq->stop = NULL; 707 } else { 708 mrq->cmd->opcode = MMC_SEND_STATUS; 709 mrq->cmd->arg = test->card->rca << 16; 710 } 711 } 712 713 /* 714 * Checks that a normal transfer didn't have any errors 715 */ 716 static int mmc_test_check_result(struct mmc_test_card *test, 717 struct mmc_request *mrq) 718 { 719 int ret; 720 721 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data)) 722 return -EINVAL; 723 724 ret = 0; 725 726 if (mrq->sbc && mrq->sbc->error) 727 ret = mrq->sbc->error; 728 if (!ret && mrq->cmd->error) 729 ret = mrq->cmd->error; 730 if (!ret && mrq->data->error) 731 ret = mrq->data->error; 732 if (!ret && mrq->stop && mrq->stop->error) 733 ret = mrq->stop->error; 734 if (!ret && mrq->data->bytes_xfered != 735 mrq->data->blocks * mrq->data->blksz) 736 ret = RESULT_FAIL; 737 738 if (ret == -EINVAL) 739 ret = RESULT_UNSUP_HOST; 740 741 return ret; 742 } 743 744 static enum mmc_blk_status mmc_test_check_result_async(struct mmc_card *card, 745 struct mmc_async_req *areq) 746 { 747 struct mmc_test_async_req *test_async = 748 container_of(areq, struct mmc_test_async_req, areq); 749 int ret; 750 751 mmc_test_wait_busy(test_async->test); 752 753 /* 754 * FIXME: this would earlier just casts a regular error code, 755 * either of the kernel type -ERRORCODE or the local test framework 756 * RESULT_* errorcode, into an enum mmc_blk_status and return as 757 * result check. Instead, convert it to some reasonable type by just 758 * returning either MMC_BLK_SUCCESS or MMC_BLK_CMD_ERR. 759 * If possible, a reasonable error code should be returned. 760 */ 761 ret = mmc_test_check_result(test_async->test, areq->mrq); 762 if (ret) 763 return MMC_BLK_CMD_ERR; 764 765 return MMC_BLK_SUCCESS; 766 } 767 768 /* 769 * Checks that a "short transfer" behaved as expected 770 */ 771 static int mmc_test_check_broken_result(struct mmc_test_card *test, 772 struct mmc_request *mrq) 773 { 774 int ret; 775 776 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data)) 777 return -EINVAL; 778 779 ret = 0; 780 781 if (!ret && mrq->cmd->error) 782 ret = mrq->cmd->error; 783 if (!ret && mrq->data->error == 0) 784 ret = RESULT_FAIL; 785 if (!ret && mrq->data->error != -ETIMEDOUT) 786 ret = mrq->data->error; 787 if (!ret && mrq->stop && mrq->stop->error) 788 ret = mrq->stop->error; 789 if (mrq->data->blocks > 1) { 790 if (!ret && mrq->data->bytes_xfered > mrq->data->blksz) 791 ret = RESULT_FAIL; 792 } else { 793 if (!ret && mrq->data->bytes_xfered > 0) 794 ret = RESULT_FAIL; 795 } 796 797 if (ret == -EINVAL) 798 ret = RESULT_UNSUP_HOST; 799 800 return ret; 801 } 802 803 /* 804 * Tests nonblock transfer with certain parameters 805 */ 806 static void mmc_test_nonblock_reset(struct mmc_request *mrq, 807 struct mmc_command *cmd, 808 struct mmc_command *stop, 809 struct mmc_data *data) 810 { 811 memset(mrq, 0, sizeof(struct mmc_request)); 812 memset(cmd, 0, sizeof(struct mmc_command)); 813 memset(data, 0, sizeof(struct mmc_data)); 814 memset(stop, 0, sizeof(struct mmc_command)); 815 816 mrq->cmd = cmd; 817 mrq->data = data; 818 mrq->stop = stop; 819 } 820 static int mmc_test_nonblock_transfer(struct mmc_test_card *test, 821 struct scatterlist *sg, unsigned sg_len, 822 unsigned dev_addr, unsigned blocks, 823 unsigned blksz, int write, int count) 824 { 825 struct mmc_request mrq1; 826 struct mmc_command cmd1; 827 struct mmc_command stop1; 828 struct mmc_data data1; 829 830 struct mmc_request mrq2; 831 struct mmc_command cmd2; 832 struct mmc_command stop2; 833 struct mmc_data data2; 834 835 struct mmc_test_async_req test_areq[2]; 836 struct mmc_async_req *done_areq; 837 struct mmc_async_req *cur_areq = &test_areq[0].areq; 838 struct mmc_async_req *other_areq = &test_areq[1].areq; 839 enum mmc_blk_status status; 840 int i; 841 int ret = RESULT_OK; 842 843 test_areq[0].test = test; 844 test_areq[1].test = test; 845 846 mmc_test_nonblock_reset(&mrq1, &cmd1, &stop1, &data1); 847 mmc_test_nonblock_reset(&mrq2, &cmd2, &stop2, &data2); 848 849 cur_areq->mrq = &mrq1; 850 cur_areq->err_check = mmc_test_check_result_async; 851 other_areq->mrq = &mrq2; 852 other_areq->err_check = mmc_test_check_result_async; 853 854 for (i = 0; i < count; i++) { 855 mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr, 856 blocks, blksz, write); 857 done_areq = mmc_start_areq(test->card->host, cur_areq, &status); 858 859 if (status != MMC_BLK_SUCCESS || (!done_areq && i > 0)) { 860 ret = RESULT_FAIL; 861 goto err; 862 } 863 864 if (done_areq) { 865 if (done_areq->mrq == &mrq2) 866 mmc_test_nonblock_reset(&mrq2, &cmd2, 867 &stop2, &data2); 868 else 869 mmc_test_nonblock_reset(&mrq1, &cmd1, 870 &stop1, &data1); 871 } 872 swap(cur_areq, other_areq); 873 dev_addr += blocks; 874 } 875 876 done_areq = mmc_start_areq(test->card->host, NULL, &status); 877 if (status != MMC_BLK_SUCCESS) 878 ret = RESULT_FAIL; 879 880 return ret; 881 err: 882 return ret; 883 } 884 885 /* 886 * Tests a basic transfer with certain parameters 887 */ 888 static int mmc_test_simple_transfer(struct mmc_test_card *test, 889 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, 890 unsigned blocks, unsigned blksz, int write) 891 { 892 struct mmc_request mrq = {}; 893 struct mmc_command cmd = {}; 894 struct mmc_command stop = {}; 895 struct mmc_data data = {}; 896 897 mrq.cmd = &cmd; 898 mrq.data = &data; 899 mrq.stop = &stop; 900 901 mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr, 902 blocks, blksz, write); 903 904 mmc_wait_for_req(test->card->host, &mrq); 905 906 mmc_test_wait_busy(test); 907 908 return mmc_test_check_result(test, &mrq); 909 } 910 911 /* 912 * Tests a transfer where the card will fail completely or partly 913 */ 914 static int mmc_test_broken_transfer(struct mmc_test_card *test, 915 unsigned blocks, unsigned blksz, int write) 916 { 917 struct mmc_request mrq = {}; 918 struct mmc_command cmd = {}; 919 struct mmc_command stop = {}; 920 struct mmc_data data = {}; 921 922 struct scatterlist sg; 923 924 mrq.cmd = &cmd; 925 mrq.data = &data; 926 mrq.stop = &stop; 927 928 sg_init_one(&sg, test->buffer, blocks * blksz); 929 930 mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write); 931 mmc_test_prepare_broken_mrq(test, &mrq, write); 932 933 mmc_wait_for_req(test->card->host, &mrq); 934 935 mmc_test_wait_busy(test); 936 937 return mmc_test_check_broken_result(test, &mrq); 938 } 939 940 /* 941 * Does a complete transfer test where data is also validated 942 * 943 * Note: mmc_test_prepare() must have been done before this call 944 */ 945 static int mmc_test_transfer(struct mmc_test_card *test, 946 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, 947 unsigned blocks, unsigned blksz, int write) 948 { 949 int ret, i; 950 unsigned long flags; 951 952 if (write) { 953 for (i = 0; i < blocks * blksz; i++) 954 test->scratch[i] = i; 955 } else { 956 memset(test->scratch, 0, BUFFER_SIZE); 957 } 958 local_irq_save(flags); 959 sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); 960 local_irq_restore(flags); 961 962 ret = mmc_test_set_blksize(test, blksz); 963 if (ret) 964 return ret; 965 966 ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr, 967 blocks, blksz, write); 968 if (ret) 969 return ret; 970 971 if (write) { 972 int sectors; 973 974 ret = mmc_test_set_blksize(test, 512); 975 if (ret) 976 return ret; 977 978 sectors = (blocks * blksz + 511) / 512; 979 if ((sectors * 512) == (blocks * blksz)) 980 sectors++; 981 982 if ((sectors * 512) > BUFFER_SIZE) 983 return -EINVAL; 984 985 memset(test->buffer, 0, sectors * 512); 986 987 for (i = 0; i < sectors; i++) { 988 ret = mmc_test_buffer_transfer(test, 989 test->buffer + i * 512, 990 dev_addr + i, 512, 0); 991 if (ret) 992 return ret; 993 } 994 995 for (i = 0; i < blocks * blksz; i++) { 996 if (test->buffer[i] != (u8)i) 997 return RESULT_FAIL; 998 } 999 1000 for (; i < sectors * 512; i++) { 1001 if (test->buffer[i] != 0xDF) 1002 return RESULT_FAIL; 1003 } 1004 } else { 1005 local_irq_save(flags); 1006 sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); 1007 local_irq_restore(flags); 1008 for (i = 0; i < blocks * blksz; i++) { 1009 if (test->scratch[i] != (u8)i) 1010 return RESULT_FAIL; 1011 } 1012 } 1013 1014 return 0; 1015 } 1016 1017 /*******************************************************************/ 1018 /* Tests */ 1019 /*******************************************************************/ 1020 1021 struct mmc_test_case { 1022 const char *name; 1023 1024 int (*prepare)(struct mmc_test_card *); 1025 int (*run)(struct mmc_test_card *); 1026 int (*cleanup)(struct mmc_test_card *); 1027 }; 1028 1029 static int mmc_test_basic_write(struct mmc_test_card *test) 1030 { 1031 int ret; 1032 struct scatterlist sg; 1033 1034 ret = mmc_test_set_blksize(test, 512); 1035 if (ret) 1036 return ret; 1037 1038 sg_init_one(&sg, test->buffer, 512); 1039 1040 return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1); 1041 } 1042 1043 static int mmc_test_basic_read(struct mmc_test_card *test) 1044 { 1045 int ret; 1046 struct scatterlist sg; 1047 1048 ret = mmc_test_set_blksize(test, 512); 1049 if (ret) 1050 return ret; 1051 1052 sg_init_one(&sg, test->buffer, 512); 1053 1054 return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0); 1055 } 1056 1057 static int mmc_test_verify_write(struct mmc_test_card *test) 1058 { 1059 struct scatterlist sg; 1060 1061 sg_init_one(&sg, test->buffer, 512); 1062 1063 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); 1064 } 1065 1066 static int mmc_test_verify_read(struct mmc_test_card *test) 1067 { 1068 struct scatterlist sg; 1069 1070 sg_init_one(&sg, test->buffer, 512); 1071 1072 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); 1073 } 1074 1075 static int mmc_test_multi_write(struct mmc_test_card *test) 1076 { 1077 unsigned int size; 1078 struct scatterlist sg; 1079 1080 if (test->card->host->max_blk_count == 1) 1081 return RESULT_UNSUP_HOST; 1082 1083 size = PAGE_SIZE * 2; 1084 size = min(size, test->card->host->max_req_size); 1085 size = min(size, test->card->host->max_seg_size); 1086 size = min(size, test->card->host->max_blk_count * 512); 1087 1088 if (size < 1024) 1089 return RESULT_UNSUP_HOST; 1090 1091 sg_init_one(&sg, test->buffer, size); 1092 1093 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1); 1094 } 1095 1096 static int mmc_test_multi_read(struct mmc_test_card *test) 1097 { 1098 unsigned int size; 1099 struct scatterlist sg; 1100 1101 if (test->card->host->max_blk_count == 1) 1102 return RESULT_UNSUP_HOST; 1103 1104 size = PAGE_SIZE * 2; 1105 size = min(size, test->card->host->max_req_size); 1106 size = min(size, test->card->host->max_seg_size); 1107 size = min(size, test->card->host->max_blk_count * 512); 1108 1109 if (size < 1024) 1110 return RESULT_UNSUP_HOST; 1111 1112 sg_init_one(&sg, test->buffer, size); 1113 1114 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0); 1115 } 1116 1117 static int mmc_test_pow2_write(struct mmc_test_card *test) 1118 { 1119 int ret, i; 1120 struct scatterlist sg; 1121 1122 if (!test->card->csd.write_partial) 1123 return RESULT_UNSUP_CARD; 1124 1125 for (i = 1; i < 512; i <<= 1) { 1126 sg_init_one(&sg, test->buffer, i); 1127 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1); 1128 if (ret) 1129 return ret; 1130 } 1131 1132 return 0; 1133 } 1134 1135 static int mmc_test_pow2_read(struct mmc_test_card *test) 1136 { 1137 int ret, i; 1138 struct scatterlist sg; 1139 1140 if (!test->card->csd.read_partial) 1141 return RESULT_UNSUP_CARD; 1142 1143 for (i = 1; i < 512; i <<= 1) { 1144 sg_init_one(&sg, test->buffer, i); 1145 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0); 1146 if (ret) 1147 return ret; 1148 } 1149 1150 return 0; 1151 } 1152 1153 static int mmc_test_weird_write(struct mmc_test_card *test) 1154 { 1155 int ret, i; 1156 struct scatterlist sg; 1157 1158 if (!test->card->csd.write_partial) 1159 return RESULT_UNSUP_CARD; 1160 1161 for (i = 3; i < 512; i += 7) { 1162 sg_init_one(&sg, test->buffer, i); 1163 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1); 1164 if (ret) 1165 return ret; 1166 } 1167 1168 return 0; 1169 } 1170 1171 static int mmc_test_weird_read(struct mmc_test_card *test) 1172 { 1173 int ret, i; 1174 struct scatterlist sg; 1175 1176 if (!test->card->csd.read_partial) 1177 return RESULT_UNSUP_CARD; 1178 1179 for (i = 3; i < 512; i += 7) { 1180 sg_init_one(&sg, test->buffer, i); 1181 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0); 1182 if (ret) 1183 return ret; 1184 } 1185 1186 return 0; 1187 } 1188 1189 static int mmc_test_align_write(struct mmc_test_card *test) 1190 { 1191 int ret, i; 1192 struct scatterlist sg; 1193 1194 for (i = 1; i < TEST_ALIGN_END; i++) { 1195 sg_init_one(&sg, test->buffer + i, 512); 1196 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); 1197 if (ret) 1198 return ret; 1199 } 1200 1201 return 0; 1202 } 1203 1204 static int mmc_test_align_read(struct mmc_test_card *test) 1205 { 1206 int ret, i; 1207 struct scatterlist sg; 1208 1209 for (i = 1; i < TEST_ALIGN_END; i++) { 1210 sg_init_one(&sg, test->buffer + i, 512); 1211 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); 1212 if (ret) 1213 return ret; 1214 } 1215 1216 return 0; 1217 } 1218 1219 static int mmc_test_align_multi_write(struct mmc_test_card *test) 1220 { 1221 int ret, i; 1222 unsigned int size; 1223 struct scatterlist sg; 1224 1225 if (test->card->host->max_blk_count == 1) 1226 return RESULT_UNSUP_HOST; 1227 1228 size = PAGE_SIZE * 2; 1229 size = min(size, test->card->host->max_req_size); 1230 size = min(size, test->card->host->max_seg_size); 1231 size = min(size, test->card->host->max_blk_count * 512); 1232 1233 if (size < 1024) 1234 return RESULT_UNSUP_HOST; 1235 1236 for (i = 1; i < TEST_ALIGN_END; i++) { 1237 sg_init_one(&sg, test->buffer + i, size); 1238 ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1); 1239 if (ret) 1240 return ret; 1241 } 1242 1243 return 0; 1244 } 1245 1246 static int mmc_test_align_multi_read(struct mmc_test_card *test) 1247 { 1248 int ret, i; 1249 unsigned int size; 1250 struct scatterlist sg; 1251 1252 if (test->card->host->max_blk_count == 1) 1253 return RESULT_UNSUP_HOST; 1254 1255 size = PAGE_SIZE * 2; 1256 size = min(size, test->card->host->max_req_size); 1257 size = min(size, test->card->host->max_seg_size); 1258 size = min(size, test->card->host->max_blk_count * 512); 1259 1260 if (size < 1024) 1261 return RESULT_UNSUP_HOST; 1262 1263 for (i = 1; i < TEST_ALIGN_END; i++) { 1264 sg_init_one(&sg, test->buffer + i, size); 1265 ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0); 1266 if (ret) 1267 return ret; 1268 } 1269 1270 return 0; 1271 } 1272 1273 static int mmc_test_xfersize_write(struct mmc_test_card *test) 1274 { 1275 int ret; 1276 1277 ret = mmc_test_set_blksize(test, 512); 1278 if (ret) 1279 return ret; 1280 1281 return mmc_test_broken_transfer(test, 1, 512, 1); 1282 } 1283 1284 static int mmc_test_xfersize_read(struct mmc_test_card *test) 1285 { 1286 int ret; 1287 1288 ret = mmc_test_set_blksize(test, 512); 1289 if (ret) 1290 return ret; 1291 1292 return mmc_test_broken_transfer(test, 1, 512, 0); 1293 } 1294 1295 static int mmc_test_multi_xfersize_write(struct mmc_test_card *test) 1296 { 1297 int ret; 1298 1299 if (test->card->host->max_blk_count == 1) 1300 return RESULT_UNSUP_HOST; 1301 1302 ret = mmc_test_set_blksize(test, 512); 1303 if (ret) 1304 return ret; 1305 1306 return mmc_test_broken_transfer(test, 2, 512, 1); 1307 } 1308 1309 static int mmc_test_multi_xfersize_read(struct mmc_test_card *test) 1310 { 1311 int ret; 1312 1313 if (test->card->host->max_blk_count == 1) 1314 return RESULT_UNSUP_HOST; 1315 1316 ret = mmc_test_set_blksize(test, 512); 1317 if (ret) 1318 return ret; 1319 1320 return mmc_test_broken_transfer(test, 2, 512, 0); 1321 } 1322 1323 #ifdef CONFIG_HIGHMEM 1324 1325 static int mmc_test_write_high(struct mmc_test_card *test) 1326 { 1327 struct scatterlist sg; 1328 1329 sg_init_table(&sg, 1); 1330 sg_set_page(&sg, test->highmem, 512, 0); 1331 1332 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); 1333 } 1334 1335 static int mmc_test_read_high(struct mmc_test_card *test) 1336 { 1337 struct scatterlist sg; 1338 1339 sg_init_table(&sg, 1); 1340 sg_set_page(&sg, test->highmem, 512, 0); 1341 1342 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); 1343 } 1344 1345 static int mmc_test_multi_write_high(struct mmc_test_card *test) 1346 { 1347 unsigned int size; 1348 struct scatterlist sg; 1349 1350 if (test->card->host->max_blk_count == 1) 1351 return RESULT_UNSUP_HOST; 1352 1353 size = PAGE_SIZE * 2; 1354 size = min(size, test->card->host->max_req_size); 1355 size = min(size, test->card->host->max_seg_size); 1356 size = min(size, test->card->host->max_blk_count * 512); 1357 1358 if (size < 1024) 1359 return RESULT_UNSUP_HOST; 1360 1361 sg_init_table(&sg, 1); 1362 sg_set_page(&sg, test->highmem, size, 0); 1363 1364 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1); 1365 } 1366 1367 static int mmc_test_multi_read_high(struct mmc_test_card *test) 1368 { 1369 unsigned int size; 1370 struct scatterlist sg; 1371 1372 if (test->card->host->max_blk_count == 1) 1373 return RESULT_UNSUP_HOST; 1374 1375 size = PAGE_SIZE * 2; 1376 size = min(size, test->card->host->max_req_size); 1377 size = min(size, test->card->host->max_seg_size); 1378 size = min(size, test->card->host->max_blk_count * 512); 1379 1380 if (size < 1024) 1381 return RESULT_UNSUP_HOST; 1382 1383 sg_init_table(&sg, 1); 1384 sg_set_page(&sg, test->highmem, size, 0); 1385 1386 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0); 1387 } 1388 1389 #else 1390 1391 static int mmc_test_no_highmem(struct mmc_test_card *test) 1392 { 1393 pr_info("%s: Highmem not configured - test skipped\n", 1394 mmc_hostname(test->card->host)); 1395 return 0; 1396 } 1397 1398 #endif /* CONFIG_HIGHMEM */ 1399 1400 /* 1401 * Map sz bytes so that it can be transferred. 1402 */ 1403 static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz, 1404 int max_scatter, int min_sg_len) 1405 { 1406 struct mmc_test_area *t = &test->area; 1407 int err; 1408 1409 t->blocks = sz >> 9; 1410 1411 if (max_scatter) { 1412 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg, 1413 t->max_segs, t->max_seg_sz, 1414 &t->sg_len); 1415 } else { 1416 err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs, 1417 t->max_seg_sz, &t->sg_len, min_sg_len); 1418 } 1419 if (err) 1420 pr_info("%s: Failed to map sg list\n", 1421 mmc_hostname(test->card->host)); 1422 return err; 1423 } 1424 1425 /* 1426 * Transfer bytes mapped by mmc_test_area_map(). 1427 */ 1428 static int mmc_test_area_transfer(struct mmc_test_card *test, 1429 unsigned int dev_addr, int write) 1430 { 1431 struct mmc_test_area *t = &test->area; 1432 1433 return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr, 1434 t->blocks, 512, write); 1435 } 1436 1437 /* 1438 * Map and transfer bytes for multiple transfers. 1439 */ 1440 static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz, 1441 unsigned int dev_addr, int write, 1442 int max_scatter, int timed, int count, 1443 bool nonblock, int min_sg_len) 1444 { 1445 struct timespec ts1, ts2; 1446 int ret = 0; 1447 int i; 1448 struct mmc_test_area *t = &test->area; 1449 1450 /* 1451 * In the case of a maximally scattered transfer, the maximum transfer 1452 * size is further limited by using PAGE_SIZE segments. 1453 */ 1454 if (max_scatter) { 1455 struct mmc_test_area *t = &test->area; 1456 unsigned long max_tfr; 1457 1458 if (t->max_seg_sz >= PAGE_SIZE) 1459 max_tfr = t->max_segs * PAGE_SIZE; 1460 else 1461 max_tfr = t->max_segs * t->max_seg_sz; 1462 if (sz > max_tfr) 1463 sz = max_tfr; 1464 } 1465 1466 ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len); 1467 if (ret) 1468 return ret; 1469 1470 if (timed) 1471 getnstimeofday(&ts1); 1472 if (nonblock) 1473 ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len, 1474 dev_addr, t->blocks, 512, write, count); 1475 else 1476 for (i = 0; i < count && ret == 0; i++) { 1477 ret = mmc_test_area_transfer(test, dev_addr, write); 1478 dev_addr += sz >> 9; 1479 } 1480 1481 if (ret) 1482 return ret; 1483 1484 if (timed) 1485 getnstimeofday(&ts2); 1486 1487 if (timed) 1488 mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2); 1489 1490 return 0; 1491 } 1492 1493 static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz, 1494 unsigned int dev_addr, int write, int max_scatter, 1495 int timed) 1496 { 1497 return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter, 1498 timed, 1, false, 0); 1499 } 1500 1501 /* 1502 * Write the test area entirely. 1503 */ 1504 static int mmc_test_area_fill(struct mmc_test_card *test) 1505 { 1506 struct mmc_test_area *t = &test->area; 1507 1508 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0); 1509 } 1510 1511 /* 1512 * Erase the test area entirely. 1513 */ 1514 static int mmc_test_area_erase(struct mmc_test_card *test) 1515 { 1516 struct mmc_test_area *t = &test->area; 1517 1518 if (!mmc_can_erase(test->card)) 1519 return 0; 1520 1521 return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9, 1522 MMC_ERASE_ARG); 1523 } 1524 1525 /* 1526 * Cleanup struct mmc_test_area. 1527 */ 1528 static int mmc_test_area_cleanup(struct mmc_test_card *test) 1529 { 1530 struct mmc_test_area *t = &test->area; 1531 1532 kfree(t->sg); 1533 mmc_test_free_mem(t->mem); 1534 1535 return 0; 1536 } 1537 1538 /* 1539 * Initialize an area for testing large transfers. The test area is set to the 1540 * middle of the card because cards may have different characteristics at the 1541 * front (for FAT file system optimization). Optionally, the area is erased 1542 * (if the card supports it) which may improve write performance. Optionally, 1543 * the area is filled with data for subsequent read tests. 1544 */ 1545 static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill) 1546 { 1547 struct mmc_test_area *t = &test->area; 1548 unsigned long min_sz = 64 * 1024, sz; 1549 int ret; 1550 1551 ret = mmc_test_set_blksize(test, 512); 1552 if (ret) 1553 return ret; 1554 1555 /* Make the test area size about 4MiB */ 1556 sz = (unsigned long)test->card->pref_erase << 9; 1557 t->max_sz = sz; 1558 while (t->max_sz < 4 * 1024 * 1024) 1559 t->max_sz += sz; 1560 while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz) 1561 t->max_sz -= sz; 1562 1563 t->max_segs = test->card->host->max_segs; 1564 t->max_seg_sz = test->card->host->max_seg_size; 1565 t->max_seg_sz -= t->max_seg_sz % 512; 1566 1567 t->max_tfr = t->max_sz; 1568 if (t->max_tfr >> 9 > test->card->host->max_blk_count) 1569 t->max_tfr = test->card->host->max_blk_count << 9; 1570 if (t->max_tfr > test->card->host->max_req_size) 1571 t->max_tfr = test->card->host->max_req_size; 1572 if (t->max_tfr / t->max_seg_sz > t->max_segs) 1573 t->max_tfr = t->max_segs * t->max_seg_sz; 1574 1575 /* 1576 * Try to allocate enough memory for a max. sized transfer. Less is OK 1577 * because the same memory can be mapped into the scatterlist more than 1578 * once. Also, take into account the limits imposed on scatterlist 1579 * segments by the host driver. 1580 */ 1581 t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs, 1582 t->max_seg_sz); 1583 if (!t->mem) 1584 return -ENOMEM; 1585 1586 t->sg = kmalloc_array(t->max_segs, sizeof(*t->sg), GFP_KERNEL); 1587 if (!t->sg) { 1588 ret = -ENOMEM; 1589 goto out_free; 1590 } 1591 1592 t->dev_addr = mmc_test_capacity(test->card) / 2; 1593 t->dev_addr -= t->dev_addr % (t->max_sz >> 9); 1594 1595 if (erase) { 1596 ret = mmc_test_area_erase(test); 1597 if (ret) 1598 goto out_free; 1599 } 1600 1601 if (fill) { 1602 ret = mmc_test_area_fill(test); 1603 if (ret) 1604 goto out_free; 1605 } 1606 1607 return 0; 1608 1609 out_free: 1610 mmc_test_area_cleanup(test); 1611 return ret; 1612 } 1613 1614 /* 1615 * Prepare for large transfers. Do not erase the test area. 1616 */ 1617 static int mmc_test_area_prepare(struct mmc_test_card *test) 1618 { 1619 return mmc_test_area_init(test, 0, 0); 1620 } 1621 1622 /* 1623 * Prepare for large transfers. Do erase the test area. 1624 */ 1625 static int mmc_test_area_prepare_erase(struct mmc_test_card *test) 1626 { 1627 return mmc_test_area_init(test, 1, 0); 1628 } 1629 1630 /* 1631 * Prepare for large transfers. Erase and fill the test area. 1632 */ 1633 static int mmc_test_area_prepare_fill(struct mmc_test_card *test) 1634 { 1635 return mmc_test_area_init(test, 1, 1); 1636 } 1637 1638 /* 1639 * Test best-case performance. Best-case performance is expected from 1640 * a single large transfer. 1641 * 1642 * An additional option (max_scatter) allows the measurement of the same 1643 * transfer but with no contiguous pages in the scatter list. This tests 1644 * the efficiency of DMA to handle scattered pages. 1645 */ 1646 static int mmc_test_best_performance(struct mmc_test_card *test, int write, 1647 int max_scatter) 1648 { 1649 struct mmc_test_area *t = &test->area; 1650 1651 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write, 1652 max_scatter, 1); 1653 } 1654 1655 /* 1656 * Best-case read performance. 1657 */ 1658 static int mmc_test_best_read_performance(struct mmc_test_card *test) 1659 { 1660 return mmc_test_best_performance(test, 0, 0); 1661 } 1662 1663 /* 1664 * Best-case write performance. 1665 */ 1666 static int mmc_test_best_write_performance(struct mmc_test_card *test) 1667 { 1668 return mmc_test_best_performance(test, 1, 0); 1669 } 1670 1671 /* 1672 * Best-case read performance into scattered pages. 1673 */ 1674 static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test) 1675 { 1676 return mmc_test_best_performance(test, 0, 1); 1677 } 1678 1679 /* 1680 * Best-case write performance from scattered pages. 1681 */ 1682 static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test) 1683 { 1684 return mmc_test_best_performance(test, 1, 1); 1685 } 1686 1687 /* 1688 * Single read performance by transfer size. 1689 */ 1690 static int mmc_test_profile_read_perf(struct mmc_test_card *test) 1691 { 1692 struct mmc_test_area *t = &test->area; 1693 unsigned long sz; 1694 unsigned int dev_addr; 1695 int ret; 1696 1697 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1698 dev_addr = t->dev_addr + (sz >> 9); 1699 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1); 1700 if (ret) 1701 return ret; 1702 } 1703 sz = t->max_tfr; 1704 dev_addr = t->dev_addr; 1705 return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1); 1706 } 1707 1708 /* 1709 * Single write performance by transfer size. 1710 */ 1711 static int mmc_test_profile_write_perf(struct mmc_test_card *test) 1712 { 1713 struct mmc_test_area *t = &test->area; 1714 unsigned long sz; 1715 unsigned int dev_addr; 1716 int ret; 1717 1718 ret = mmc_test_area_erase(test); 1719 if (ret) 1720 return ret; 1721 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1722 dev_addr = t->dev_addr + (sz >> 9); 1723 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1); 1724 if (ret) 1725 return ret; 1726 } 1727 ret = mmc_test_area_erase(test); 1728 if (ret) 1729 return ret; 1730 sz = t->max_tfr; 1731 dev_addr = t->dev_addr; 1732 return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1); 1733 } 1734 1735 /* 1736 * Single trim performance by transfer size. 1737 */ 1738 static int mmc_test_profile_trim_perf(struct mmc_test_card *test) 1739 { 1740 struct mmc_test_area *t = &test->area; 1741 unsigned long sz; 1742 unsigned int dev_addr; 1743 struct timespec ts1, ts2; 1744 int ret; 1745 1746 if (!mmc_can_trim(test->card)) 1747 return RESULT_UNSUP_CARD; 1748 1749 if (!mmc_can_erase(test->card)) 1750 return RESULT_UNSUP_HOST; 1751 1752 for (sz = 512; sz < t->max_sz; sz <<= 1) { 1753 dev_addr = t->dev_addr + (sz >> 9); 1754 getnstimeofday(&ts1); 1755 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG); 1756 if (ret) 1757 return ret; 1758 getnstimeofday(&ts2); 1759 mmc_test_print_rate(test, sz, &ts1, &ts2); 1760 } 1761 dev_addr = t->dev_addr; 1762 getnstimeofday(&ts1); 1763 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG); 1764 if (ret) 1765 return ret; 1766 getnstimeofday(&ts2); 1767 mmc_test_print_rate(test, sz, &ts1, &ts2); 1768 return 0; 1769 } 1770 1771 static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz) 1772 { 1773 struct mmc_test_area *t = &test->area; 1774 unsigned int dev_addr, i, cnt; 1775 struct timespec ts1, ts2; 1776 int ret; 1777 1778 cnt = t->max_sz / sz; 1779 dev_addr = t->dev_addr; 1780 getnstimeofday(&ts1); 1781 for (i = 0; i < cnt; i++) { 1782 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0); 1783 if (ret) 1784 return ret; 1785 dev_addr += (sz >> 9); 1786 } 1787 getnstimeofday(&ts2); 1788 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 1789 return 0; 1790 } 1791 1792 /* 1793 * Consecutive read performance by transfer size. 1794 */ 1795 static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test) 1796 { 1797 struct mmc_test_area *t = &test->area; 1798 unsigned long sz; 1799 int ret; 1800 1801 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1802 ret = mmc_test_seq_read_perf(test, sz); 1803 if (ret) 1804 return ret; 1805 } 1806 sz = t->max_tfr; 1807 return mmc_test_seq_read_perf(test, sz); 1808 } 1809 1810 static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz) 1811 { 1812 struct mmc_test_area *t = &test->area; 1813 unsigned int dev_addr, i, cnt; 1814 struct timespec ts1, ts2; 1815 int ret; 1816 1817 ret = mmc_test_area_erase(test); 1818 if (ret) 1819 return ret; 1820 cnt = t->max_sz / sz; 1821 dev_addr = t->dev_addr; 1822 getnstimeofday(&ts1); 1823 for (i = 0; i < cnt; i++) { 1824 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0); 1825 if (ret) 1826 return ret; 1827 dev_addr += (sz >> 9); 1828 } 1829 getnstimeofday(&ts2); 1830 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 1831 return 0; 1832 } 1833 1834 /* 1835 * Consecutive write performance by transfer size. 1836 */ 1837 static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test) 1838 { 1839 struct mmc_test_area *t = &test->area; 1840 unsigned long sz; 1841 int ret; 1842 1843 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1844 ret = mmc_test_seq_write_perf(test, sz); 1845 if (ret) 1846 return ret; 1847 } 1848 sz = t->max_tfr; 1849 return mmc_test_seq_write_perf(test, sz); 1850 } 1851 1852 /* 1853 * Consecutive trim performance by transfer size. 1854 */ 1855 static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test) 1856 { 1857 struct mmc_test_area *t = &test->area; 1858 unsigned long sz; 1859 unsigned int dev_addr, i, cnt; 1860 struct timespec ts1, ts2; 1861 int ret; 1862 1863 if (!mmc_can_trim(test->card)) 1864 return RESULT_UNSUP_CARD; 1865 1866 if (!mmc_can_erase(test->card)) 1867 return RESULT_UNSUP_HOST; 1868 1869 for (sz = 512; sz <= t->max_sz; sz <<= 1) { 1870 ret = mmc_test_area_erase(test); 1871 if (ret) 1872 return ret; 1873 ret = mmc_test_area_fill(test); 1874 if (ret) 1875 return ret; 1876 cnt = t->max_sz / sz; 1877 dev_addr = t->dev_addr; 1878 getnstimeofday(&ts1); 1879 for (i = 0; i < cnt; i++) { 1880 ret = mmc_erase(test->card, dev_addr, sz >> 9, 1881 MMC_TRIM_ARG); 1882 if (ret) 1883 return ret; 1884 dev_addr += (sz >> 9); 1885 } 1886 getnstimeofday(&ts2); 1887 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 1888 } 1889 return 0; 1890 } 1891 1892 static unsigned int rnd_next = 1; 1893 1894 static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt) 1895 { 1896 uint64_t r; 1897 1898 rnd_next = rnd_next * 1103515245 + 12345; 1899 r = (rnd_next >> 16) & 0x7fff; 1900 return (r * rnd_cnt) >> 15; 1901 } 1902 1903 static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print, 1904 unsigned long sz) 1905 { 1906 unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea; 1907 unsigned int ssz; 1908 struct timespec ts1, ts2, ts; 1909 int ret; 1910 1911 ssz = sz >> 9; 1912 1913 rnd_addr = mmc_test_capacity(test->card) / 4; 1914 range1 = rnd_addr / test->card->pref_erase; 1915 range2 = range1 / ssz; 1916 1917 getnstimeofday(&ts1); 1918 for (cnt = 0; cnt < UINT_MAX; cnt++) { 1919 getnstimeofday(&ts2); 1920 ts = timespec_sub(ts2, ts1); 1921 if (ts.tv_sec >= 10) 1922 break; 1923 ea = mmc_test_rnd_num(range1); 1924 if (ea == last_ea) 1925 ea -= 1; 1926 last_ea = ea; 1927 dev_addr = rnd_addr + test->card->pref_erase * ea + 1928 ssz * mmc_test_rnd_num(range2); 1929 ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0); 1930 if (ret) 1931 return ret; 1932 } 1933 if (print) 1934 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 1935 return 0; 1936 } 1937 1938 static int mmc_test_random_perf(struct mmc_test_card *test, int write) 1939 { 1940 struct mmc_test_area *t = &test->area; 1941 unsigned int next; 1942 unsigned long sz; 1943 int ret; 1944 1945 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1946 /* 1947 * When writing, try to get more consistent results by running 1948 * the test twice with exactly the same I/O but outputting the 1949 * results only for the 2nd run. 1950 */ 1951 if (write) { 1952 next = rnd_next; 1953 ret = mmc_test_rnd_perf(test, write, 0, sz); 1954 if (ret) 1955 return ret; 1956 rnd_next = next; 1957 } 1958 ret = mmc_test_rnd_perf(test, write, 1, sz); 1959 if (ret) 1960 return ret; 1961 } 1962 sz = t->max_tfr; 1963 if (write) { 1964 next = rnd_next; 1965 ret = mmc_test_rnd_perf(test, write, 0, sz); 1966 if (ret) 1967 return ret; 1968 rnd_next = next; 1969 } 1970 return mmc_test_rnd_perf(test, write, 1, sz); 1971 } 1972 1973 /* 1974 * Random read performance by transfer size. 1975 */ 1976 static int mmc_test_random_read_perf(struct mmc_test_card *test) 1977 { 1978 return mmc_test_random_perf(test, 0); 1979 } 1980 1981 /* 1982 * Random write performance by transfer size. 1983 */ 1984 static int mmc_test_random_write_perf(struct mmc_test_card *test) 1985 { 1986 return mmc_test_random_perf(test, 1); 1987 } 1988 1989 static int mmc_test_seq_perf(struct mmc_test_card *test, int write, 1990 unsigned int tot_sz, int max_scatter) 1991 { 1992 struct mmc_test_area *t = &test->area; 1993 unsigned int dev_addr, i, cnt, sz, ssz; 1994 struct timespec ts1, ts2; 1995 int ret; 1996 1997 sz = t->max_tfr; 1998 1999 /* 2000 * In the case of a maximally scattered transfer, the maximum transfer 2001 * size is further limited by using PAGE_SIZE segments. 2002 */ 2003 if (max_scatter) { 2004 unsigned long max_tfr; 2005 2006 if (t->max_seg_sz >= PAGE_SIZE) 2007 max_tfr = t->max_segs * PAGE_SIZE; 2008 else 2009 max_tfr = t->max_segs * t->max_seg_sz; 2010 if (sz > max_tfr) 2011 sz = max_tfr; 2012 } 2013 2014 ssz = sz >> 9; 2015 dev_addr = mmc_test_capacity(test->card) / 4; 2016 if (tot_sz > dev_addr << 9) 2017 tot_sz = dev_addr << 9; 2018 cnt = tot_sz / sz; 2019 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */ 2020 2021 getnstimeofday(&ts1); 2022 for (i = 0; i < cnt; i++) { 2023 ret = mmc_test_area_io(test, sz, dev_addr, write, 2024 max_scatter, 0); 2025 if (ret) 2026 return ret; 2027 dev_addr += ssz; 2028 } 2029 getnstimeofday(&ts2); 2030 2031 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 2032 2033 return 0; 2034 } 2035 2036 static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write) 2037 { 2038 int ret, i; 2039 2040 for (i = 0; i < 10; i++) { 2041 ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1); 2042 if (ret) 2043 return ret; 2044 } 2045 for (i = 0; i < 5; i++) { 2046 ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1); 2047 if (ret) 2048 return ret; 2049 } 2050 for (i = 0; i < 3; i++) { 2051 ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1); 2052 if (ret) 2053 return ret; 2054 } 2055 2056 return ret; 2057 } 2058 2059 /* 2060 * Large sequential read performance. 2061 */ 2062 static int mmc_test_large_seq_read_perf(struct mmc_test_card *test) 2063 { 2064 return mmc_test_large_seq_perf(test, 0); 2065 } 2066 2067 /* 2068 * Large sequential write performance. 2069 */ 2070 static int mmc_test_large_seq_write_perf(struct mmc_test_card *test) 2071 { 2072 return mmc_test_large_seq_perf(test, 1); 2073 } 2074 2075 static int mmc_test_rw_multiple(struct mmc_test_card *test, 2076 struct mmc_test_multiple_rw *tdata, 2077 unsigned int reqsize, unsigned int size, 2078 int min_sg_len) 2079 { 2080 unsigned int dev_addr; 2081 struct mmc_test_area *t = &test->area; 2082 int ret = 0; 2083 2084 /* Set up test area */ 2085 if (size > mmc_test_capacity(test->card) / 2 * 512) 2086 size = mmc_test_capacity(test->card) / 2 * 512; 2087 if (reqsize > t->max_tfr) 2088 reqsize = t->max_tfr; 2089 dev_addr = mmc_test_capacity(test->card) / 4; 2090 if ((dev_addr & 0xffff0000)) 2091 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */ 2092 else 2093 dev_addr &= 0xfffff800; /* Round to 1MiB boundary */ 2094 if (!dev_addr) 2095 goto err; 2096 2097 if (reqsize > size) 2098 return 0; 2099 2100 /* prepare test area */ 2101 if (mmc_can_erase(test->card) && 2102 tdata->prepare & MMC_TEST_PREP_ERASE) { 2103 ret = mmc_erase(test->card, dev_addr, 2104 size / 512, MMC_SECURE_ERASE_ARG); 2105 if (ret) 2106 ret = mmc_erase(test->card, dev_addr, 2107 size / 512, MMC_ERASE_ARG); 2108 if (ret) 2109 goto err; 2110 } 2111 2112 /* Run test */ 2113 ret = mmc_test_area_io_seq(test, reqsize, dev_addr, 2114 tdata->do_write, 0, 1, size / reqsize, 2115 tdata->do_nonblock_req, min_sg_len); 2116 if (ret) 2117 goto err; 2118 2119 return ret; 2120 err: 2121 pr_info("[%s] error\n", __func__); 2122 return ret; 2123 } 2124 2125 static int mmc_test_rw_multiple_size(struct mmc_test_card *test, 2126 struct mmc_test_multiple_rw *rw) 2127 { 2128 int ret = 0; 2129 int i; 2130 void *pre_req = test->card->host->ops->pre_req; 2131 void *post_req = test->card->host->ops->post_req; 2132 2133 if (rw->do_nonblock_req && 2134 ((!pre_req && post_req) || (pre_req && !post_req))) { 2135 pr_info("error: only one of pre/post is defined\n"); 2136 return -EINVAL; 2137 } 2138 2139 for (i = 0 ; i < rw->len && ret == 0; i++) { 2140 ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0); 2141 if (ret) 2142 break; 2143 } 2144 return ret; 2145 } 2146 2147 static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test, 2148 struct mmc_test_multiple_rw *rw) 2149 { 2150 int ret = 0; 2151 int i; 2152 2153 for (i = 0 ; i < rw->len && ret == 0; i++) { 2154 ret = mmc_test_rw_multiple(test, rw, 512 * 1024, rw->size, 2155 rw->sg_len[i]); 2156 if (ret) 2157 break; 2158 } 2159 return ret; 2160 } 2161 2162 /* 2163 * Multiple blocking write 4k to 4 MB chunks 2164 */ 2165 static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test) 2166 { 2167 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, 2168 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; 2169 struct mmc_test_multiple_rw test_data = { 2170 .bs = bs, 2171 .size = TEST_AREA_MAX_SIZE, 2172 .len = ARRAY_SIZE(bs), 2173 .do_write = true, 2174 .do_nonblock_req = false, 2175 .prepare = MMC_TEST_PREP_ERASE, 2176 }; 2177 2178 return mmc_test_rw_multiple_size(test, &test_data); 2179 }; 2180 2181 /* 2182 * Multiple non-blocking write 4k to 4 MB chunks 2183 */ 2184 static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test) 2185 { 2186 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, 2187 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; 2188 struct mmc_test_multiple_rw test_data = { 2189 .bs = bs, 2190 .size = TEST_AREA_MAX_SIZE, 2191 .len = ARRAY_SIZE(bs), 2192 .do_write = true, 2193 .do_nonblock_req = true, 2194 .prepare = MMC_TEST_PREP_ERASE, 2195 }; 2196 2197 return mmc_test_rw_multiple_size(test, &test_data); 2198 } 2199 2200 /* 2201 * Multiple blocking read 4k to 4 MB chunks 2202 */ 2203 static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test) 2204 { 2205 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, 2206 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; 2207 struct mmc_test_multiple_rw test_data = { 2208 .bs = bs, 2209 .size = TEST_AREA_MAX_SIZE, 2210 .len = ARRAY_SIZE(bs), 2211 .do_write = false, 2212 .do_nonblock_req = false, 2213 .prepare = MMC_TEST_PREP_NONE, 2214 }; 2215 2216 return mmc_test_rw_multiple_size(test, &test_data); 2217 } 2218 2219 /* 2220 * Multiple non-blocking read 4k to 4 MB chunks 2221 */ 2222 static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test) 2223 { 2224 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, 2225 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; 2226 struct mmc_test_multiple_rw test_data = { 2227 .bs = bs, 2228 .size = TEST_AREA_MAX_SIZE, 2229 .len = ARRAY_SIZE(bs), 2230 .do_write = false, 2231 .do_nonblock_req = true, 2232 .prepare = MMC_TEST_PREP_NONE, 2233 }; 2234 2235 return mmc_test_rw_multiple_size(test, &test_data); 2236 } 2237 2238 /* 2239 * Multiple blocking write 1 to 512 sg elements 2240 */ 2241 static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test) 2242 { 2243 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 2244 1 << 7, 1 << 8, 1 << 9}; 2245 struct mmc_test_multiple_rw test_data = { 2246 .sg_len = sg_len, 2247 .size = TEST_AREA_MAX_SIZE, 2248 .len = ARRAY_SIZE(sg_len), 2249 .do_write = true, 2250 .do_nonblock_req = false, 2251 .prepare = MMC_TEST_PREP_ERASE, 2252 }; 2253 2254 return mmc_test_rw_multiple_sg_len(test, &test_data); 2255 }; 2256 2257 /* 2258 * Multiple non-blocking write 1 to 512 sg elements 2259 */ 2260 static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test) 2261 { 2262 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 2263 1 << 7, 1 << 8, 1 << 9}; 2264 struct mmc_test_multiple_rw test_data = { 2265 .sg_len = sg_len, 2266 .size = TEST_AREA_MAX_SIZE, 2267 .len = ARRAY_SIZE(sg_len), 2268 .do_write = true, 2269 .do_nonblock_req = true, 2270 .prepare = MMC_TEST_PREP_ERASE, 2271 }; 2272 2273 return mmc_test_rw_multiple_sg_len(test, &test_data); 2274 } 2275 2276 /* 2277 * Multiple blocking read 1 to 512 sg elements 2278 */ 2279 static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test) 2280 { 2281 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 2282 1 << 7, 1 << 8, 1 << 9}; 2283 struct mmc_test_multiple_rw test_data = { 2284 .sg_len = sg_len, 2285 .size = TEST_AREA_MAX_SIZE, 2286 .len = ARRAY_SIZE(sg_len), 2287 .do_write = false, 2288 .do_nonblock_req = false, 2289 .prepare = MMC_TEST_PREP_NONE, 2290 }; 2291 2292 return mmc_test_rw_multiple_sg_len(test, &test_data); 2293 } 2294 2295 /* 2296 * Multiple non-blocking read 1 to 512 sg elements 2297 */ 2298 static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test) 2299 { 2300 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 2301 1 << 7, 1 << 8, 1 << 9}; 2302 struct mmc_test_multiple_rw test_data = { 2303 .sg_len = sg_len, 2304 .size = TEST_AREA_MAX_SIZE, 2305 .len = ARRAY_SIZE(sg_len), 2306 .do_write = false, 2307 .do_nonblock_req = true, 2308 .prepare = MMC_TEST_PREP_NONE, 2309 }; 2310 2311 return mmc_test_rw_multiple_sg_len(test, &test_data); 2312 } 2313 2314 /* 2315 * eMMC hardware reset. 2316 */ 2317 static int mmc_test_reset(struct mmc_test_card *test) 2318 { 2319 struct mmc_card *card = test->card; 2320 struct mmc_host *host = card->host; 2321 int err; 2322 2323 err = mmc_hw_reset(host); 2324 if (!err) 2325 return RESULT_OK; 2326 else if (err == -EOPNOTSUPP) 2327 return RESULT_UNSUP_HOST; 2328 2329 return RESULT_FAIL; 2330 } 2331 2332 struct mmc_test_req { 2333 struct mmc_request mrq; 2334 struct mmc_command sbc; 2335 struct mmc_command cmd; 2336 struct mmc_command stop; 2337 struct mmc_command status; 2338 struct mmc_data data; 2339 }; 2340 2341 static struct mmc_test_req *mmc_test_req_alloc(void) 2342 { 2343 struct mmc_test_req *rq = kzalloc(sizeof(*rq), GFP_KERNEL); 2344 2345 if (rq) { 2346 rq->mrq.cmd = &rq->cmd; 2347 rq->mrq.data = &rq->data; 2348 rq->mrq.stop = &rq->stop; 2349 } 2350 2351 return rq; 2352 } 2353 2354 static int mmc_test_send_status(struct mmc_test_card *test, 2355 struct mmc_command *cmd) 2356 { 2357 memset(cmd, 0, sizeof(*cmd)); 2358 2359 cmd->opcode = MMC_SEND_STATUS; 2360 if (!mmc_host_is_spi(test->card->host)) 2361 cmd->arg = test->card->rca << 16; 2362 cmd->flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 2363 2364 return mmc_wait_for_cmd(test->card->host, cmd, 0); 2365 } 2366 2367 static int mmc_test_ongoing_transfer(struct mmc_test_card *test, 2368 unsigned int dev_addr, int use_sbc, 2369 int repeat_cmd, int write, int use_areq) 2370 { 2371 struct mmc_test_req *rq = mmc_test_req_alloc(); 2372 struct mmc_host *host = test->card->host; 2373 struct mmc_test_area *t = &test->area; 2374 struct mmc_test_async_req test_areq = { .test = test }; 2375 struct mmc_request *mrq; 2376 unsigned long timeout; 2377 bool expired = false; 2378 enum mmc_blk_status blkstat = MMC_BLK_SUCCESS; 2379 int ret = 0, cmd_ret; 2380 u32 status = 0; 2381 int count = 0; 2382 2383 if (!rq) 2384 return -ENOMEM; 2385 2386 mrq = &rq->mrq; 2387 if (use_sbc) 2388 mrq->sbc = &rq->sbc; 2389 mrq->cap_cmd_during_tfr = true; 2390 2391 test_areq.areq.mrq = mrq; 2392 test_areq.areq.err_check = mmc_test_check_result_async; 2393 2394 mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks, 2395 512, write); 2396 2397 if (use_sbc && t->blocks > 1 && !mrq->sbc) { 2398 ret = mmc_host_cmd23(host) ? 2399 RESULT_UNSUP_CARD : 2400 RESULT_UNSUP_HOST; 2401 goto out_free; 2402 } 2403 2404 /* Start ongoing data request */ 2405 if (use_areq) { 2406 mmc_start_areq(host, &test_areq.areq, &blkstat); 2407 if (blkstat != MMC_BLK_SUCCESS) { 2408 ret = RESULT_FAIL; 2409 goto out_free; 2410 } 2411 } else { 2412 mmc_wait_for_req(host, mrq); 2413 } 2414 2415 timeout = jiffies + msecs_to_jiffies(3000); 2416 do { 2417 count += 1; 2418 2419 /* Send status command while data transfer in progress */ 2420 cmd_ret = mmc_test_send_status(test, &rq->status); 2421 if (cmd_ret) 2422 break; 2423 2424 status = rq->status.resp[0]; 2425 if (status & R1_ERROR) { 2426 cmd_ret = -EIO; 2427 break; 2428 } 2429 2430 if (mmc_is_req_done(host, mrq)) 2431 break; 2432 2433 expired = time_after(jiffies, timeout); 2434 if (expired) { 2435 pr_info("%s: timeout waiting for Tran state status %#x\n", 2436 mmc_hostname(host), status); 2437 cmd_ret = -ETIMEDOUT; 2438 break; 2439 } 2440 } while (repeat_cmd && R1_CURRENT_STATE(status) != R1_STATE_TRAN); 2441 2442 /* Wait for data request to complete */ 2443 if (use_areq) { 2444 mmc_start_areq(host, NULL, &blkstat); 2445 if (blkstat != MMC_BLK_SUCCESS) 2446 ret = RESULT_FAIL; 2447 } else { 2448 mmc_wait_for_req_done(test->card->host, mrq); 2449 } 2450 2451 /* 2452 * For cap_cmd_during_tfr request, upper layer must send stop if 2453 * required. 2454 */ 2455 if (mrq->data->stop && (mrq->data->error || !mrq->sbc)) { 2456 if (ret) 2457 mmc_wait_for_cmd(host, mrq->data->stop, 0); 2458 else 2459 ret = mmc_wait_for_cmd(host, mrq->data->stop, 0); 2460 } 2461 2462 if (ret) 2463 goto out_free; 2464 2465 if (cmd_ret) { 2466 pr_info("%s: Send Status failed: status %#x, error %d\n", 2467 mmc_hostname(test->card->host), status, cmd_ret); 2468 } 2469 2470 ret = mmc_test_check_result(test, mrq); 2471 if (ret) 2472 goto out_free; 2473 2474 ret = mmc_test_wait_busy(test); 2475 if (ret) 2476 goto out_free; 2477 2478 if (repeat_cmd && (t->blocks + 1) << 9 > t->max_tfr) 2479 pr_info("%s: %d commands completed during transfer of %u blocks\n", 2480 mmc_hostname(test->card->host), count, t->blocks); 2481 2482 if (cmd_ret) 2483 ret = cmd_ret; 2484 out_free: 2485 kfree(rq); 2486 2487 return ret; 2488 } 2489 2490 static int __mmc_test_cmds_during_tfr(struct mmc_test_card *test, 2491 unsigned long sz, int use_sbc, int write, 2492 int use_areq) 2493 { 2494 struct mmc_test_area *t = &test->area; 2495 int ret; 2496 2497 if (!(test->card->host->caps & MMC_CAP_CMD_DURING_TFR)) 2498 return RESULT_UNSUP_HOST; 2499 2500 ret = mmc_test_area_map(test, sz, 0, 0); 2501 if (ret) 2502 return ret; 2503 2504 ret = mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 0, write, 2505 use_areq); 2506 if (ret) 2507 return ret; 2508 2509 return mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 1, write, 2510 use_areq); 2511 } 2512 2513 static int mmc_test_cmds_during_tfr(struct mmc_test_card *test, int use_sbc, 2514 int write, int use_areq) 2515 { 2516 struct mmc_test_area *t = &test->area; 2517 unsigned long sz; 2518 int ret; 2519 2520 for (sz = 512; sz <= t->max_tfr; sz += 512) { 2521 ret = __mmc_test_cmds_during_tfr(test, sz, use_sbc, write, 2522 use_areq); 2523 if (ret) 2524 return ret; 2525 } 2526 return 0; 2527 } 2528 2529 /* 2530 * Commands during read - no Set Block Count (CMD23). 2531 */ 2532 static int mmc_test_cmds_during_read(struct mmc_test_card *test) 2533 { 2534 return mmc_test_cmds_during_tfr(test, 0, 0, 0); 2535 } 2536 2537 /* 2538 * Commands during write - no Set Block Count (CMD23). 2539 */ 2540 static int mmc_test_cmds_during_write(struct mmc_test_card *test) 2541 { 2542 return mmc_test_cmds_during_tfr(test, 0, 1, 0); 2543 } 2544 2545 /* 2546 * Commands during read - use Set Block Count (CMD23). 2547 */ 2548 static int mmc_test_cmds_during_read_cmd23(struct mmc_test_card *test) 2549 { 2550 return mmc_test_cmds_during_tfr(test, 1, 0, 0); 2551 } 2552 2553 /* 2554 * Commands during write - use Set Block Count (CMD23). 2555 */ 2556 static int mmc_test_cmds_during_write_cmd23(struct mmc_test_card *test) 2557 { 2558 return mmc_test_cmds_during_tfr(test, 1, 1, 0); 2559 } 2560 2561 /* 2562 * Commands during non-blocking read - use Set Block Count (CMD23). 2563 */ 2564 static int mmc_test_cmds_during_read_cmd23_nonblock(struct mmc_test_card *test) 2565 { 2566 return mmc_test_cmds_during_tfr(test, 1, 0, 1); 2567 } 2568 2569 /* 2570 * Commands during non-blocking write - use Set Block Count (CMD23). 2571 */ 2572 static int mmc_test_cmds_during_write_cmd23_nonblock(struct mmc_test_card *test) 2573 { 2574 return mmc_test_cmds_during_tfr(test, 1, 1, 1); 2575 } 2576 2577 static const struct mmc_test_case mmc_test_cases[] = { 2578 { 2579 .name = "Basic write (no data verification)", 2580 .run = mmc_test_basic_write, 2581 }, 2582 2583 { 2584 .name = "Basic read (no data verification)", 2585 .run = mmc_test_basic_read, 2586 }, 2587 2588 { 2589 .name = "Basic write (with data verification)", 2590 .prepare = mmc_test_prepare_write, 2591 .run = mmc_test_verify_write, 2592 .cleanup = mmc_test_cleanup, 2593 }, 2594 2595 { 2596 .name = "Basic read (with data verification)", 2597 .prepare = mmc_test_prepare_read, 2598 .run = mmc_test_verify_read, 2599 .cleanup = mmc_test_cleanup, 2600 }, 2601 2602 { 2603 .name = "Multi-block write", 2604 .prepare = mmc_test_prepare_write, 2605 .run = mmc_test_multi_write, 2606 .cleanup = mmc_test_cleanup, 2607 }, 2608 2609 { 2610 .name = "Multi-block read", 2611 .prepare = mmc_test_prepare_read, 2612 .run = mmc_test_multi_read, 2613 .cleanup = mmc_test_cleanup, 2614 }, 2615 2616 { 2617 .name = "Power of two block writes", 2618 .prepare = mmc_test_prepare_write, 2619 .run = mmc_test_pow2_write, 2620 .cleanup = mmc_test_cleanup, 2621 }, 2622 2623 { 2624 .name = "Power of two block reads", 2625 .prepare = mmc_test_prepare_read, 2626 .run = mmc_test_pow2_read, 2627 .cleanup = mmc_test_cleanup, 2628 }, 2629 2630 { 2631 .name = "Weird sized block writes", 2632 .prepare = mmc_test_prepare_write, 2633 .run = mmc_test_weird_write, 2634 .cleanup = mmc_test_cleanup, 2635 }, 2636 2637 { 2638 .name = "Weird sized block reads", 2639 .prepare = mmc_test_prepare_read, 2640 .run = mmc_test_weird_read, 2641 .cleanup = mmc_test_cleanup, 2642 }, 2643 2644 { 2645 .name = "Badly aligned write", 2646 .prepare = mmc_test_prepare_write, 2647 .run = mmc_test_align_write, 2648 .cleanup = mmc_test_cleanup, 2649 }, 2650 2651 { 2652 .name = "Badly aligned read", 2653 .prepare = mmc_test_prepare_read, 2654 .run = mmc_test_align_read, 2655 .cleanup = mmc_test_cleanup, 2656 }, 2657 2658 { 2659 .name = "Badly aligned multi-block write", 2660 .prepare = mmc_test_prepare_write, 2661 .run = mmc_test_align_multi_write, 2662 .cleanup = mmc_test_cleanup, 2663 }, 2664 2665 { 2666 .name = "Badly aligned multi-block read", 2667 .prepare = mmc_test_prepare_read, 2668 .run = mmc_test_align_multi_read, 2669 .cleanup = mmc_test_cleanup, 2670 }, 2671 2672 { 2673 .name = "Correct xfer_size at write (start failure)", 2674 .run = mmc_test_xfersize_write, 2675 }, 2676 2677 { 2678 .name = "Correct xfer_size at read (start failure)", 2679 .run = mmc_test_xfersize_read, 2680 }, 2681 2682 { 2683 .name = "Correct xfer_size at write (midway failure)", 2684 .run = mmc_test_multi_xfersize_write, 2685 }, 2686 2687 { 2688 .name = "Correct xfer_size at read (midway failure)", 2689 .run = mmc_test_multi_xfersize_read, 2690 }, 2691 2692 #ifdef CONFIG_HIGHMEM 2693 2694 { 2695 .name = "Highmem write", 2696 .prepare = mmc_test_prepare_write, 2697 .run = mmc_test_write_high, 2698 .cleanup = mmc_test_cleanup, 2699 }, 2700 2701 { 2702 .name = "Highmem read", 2703 .prepare = mmc_test_prepare_read, 2704 .run = mmc_test_read_high, 2705 .cleanup = mmc_test_cleanup, 2706 }, 2707 2708 { 2709 .name = "Multi-block highmem write", 2710 .prepare = mmc_test_prepare_write, 2711 .run = mmc_test_multi_write_high, 2712 .cleanup = mmc_test_cleanup, 2713 }, 2714 2715 { 2716 .name = "Multi-block highmem read", 2717 .prepare = mmc_test_prepare_read, 2718 .run = mmc_test_multi_read_high, 2719 .cleanup = mmc_test_cleanup, 2720 }, 2721 2722 #else 2723 2724 { 2725 .name = "Highmem write", 2726 .run = mmc_test_no_highmem, 2727 }, 2728 2729 { 2730 .name = "Highmem read", 2731 .run = mmc_test_no_highmem, 2732 }, 2733 2734 { 2735 .name = "Multi-block highmem write", 2736 .run = mmc_test_no_highmem, 2737 }, 2738 2739 { 2740 .name = "Multi-block highmem read", 2741 .run = mmc_test_no_highmem, 2742 }, 2743 2744 #endif /* CONFIG_HIGHMEM */ 2745 2746 { 2747 .name = "Best-case read performance", 2748 .prepare = mmc_test_area_prepare_fill, 2749 .run = mmc_test_best_read_performance, 2750 .cleanup = mmc_test_area_cleanup, 2751 }, 2752 2753 { 2754 .name = "Best-case write performance", 2755 .prepare = mmc_test_area_prepare_erase, 2756 .run = mmc_test_best_write_performance, 2757 .cleanup = mmc_test_area_cleanup, 2758 }, 2759 2760 { 2761 .name = "Best-case read performance into scattered pages", 2762 .prepare = mmc_test_area_prepare_fill, 2763 .run = mmc_test_best_read_perf_max_scatter, 2764 .cleanup = mmc_test_area_cleanup, 2765 }, 2766 2767 { 2768 .name = "Best-case write performance from scattered pages", 2769 .prepare = mmc_test_area_prepare_erase, 2770 .run = mmc_test_best_write_perf_max_scatter, 2771 .cleanup = mmc_test_area_cleanup, 2772 }, 2773 2774 { 2775 .name = "Single read performance by transfer size", 2776 .prepare = mmc_test_area_prepare_fill, 2777 .run = mmc_test_profile_read_perf, 2778 .cleanup = mmc_test_area_cleanup, 2779 }, 2780 2781 { 2782 .name = "Single write performance by transfer size", 2783 .prepare = mmc_test_area_prepare, 2784 .run = mmc_test_profile_write_perf, 2785 .cleanup = mmc_test_area_cleanup, 2786 }, 2787 2788 { 2789 .name = "Single trim performance by transfer size", 2790 .prepare = mmc_test_area_prepare_fill, 2791 .run = mmc_test_profile_trim_perf, 2792 .cleanup = mmc_test_area_cleanup, 2793 }, 2794 2795 { 2796 .name = "Consecutive read performance by transfer size", 2797 .prepare = mmc_test_area_prepare_fill, 2798 .run = mmc_test_profile_seq_read_perf, 2799 .cleanup = mmc_test_area_cleanup, 2800 }, 2801 2802 { 2803 .name = "Consecutive write performance by transfer size", 2804 .prepare = mmc_test_area_prepare, 2805 .run = mmc_test_profile_seq_write_perf, 2806 .cleanup = mmc_test_area_cleanup, 2807 }, 2808 2809 { 2810 .name = "Consecutive trim performance by transfer size", 2811 .prepare = mmc_test_area_prepare, 2812 .run = mmc_test_profile_seq_trim_perf, 2813 .cleanup = mmc_test_area_cleanup, 2814 }, 2815 2816 { 2817 .name = "Random read performance by transfer size", 2818 .prepare = mmc_test_area_prepare, 2819 .run = mmc_test_random_read_perf, 2820 .cleanup = mmc_test_area_cleanup, 2821 }, 2822 2823 { 2824 .name = "Random write performance by transfer size", 2825 .prepare = mmc_test_area_prepare, 2826 .run = mmc_test_random_write_perf, 2827 .cleanup = mmc_test_area_cleanup, 2828 }, 2829 2830 { 2831 .name = "Large sequential read into scattered pages", 2832 .prepare = mmc_test_area_prepare, 2833 .run = mmc_test_large_seq_read_perf, 2834 .cleanup = mmc_test_area_cleanup, 2835 }, 2836 2837 { 2838 .name = "Large sequential write from scattered pages", 2839 .prepare = mmc_test_area_prepare, 2840 .run = mmc_test_large_seq_write_perf, 2841 .cleanup = mmc_test_area_cleanup, 2842 }, 2843 2844 { 2845 .name = "Write performance with blocking req 4k to 4MB", 2846 .prepare = mmc_test_area_prepare, 2847 .run = mmc_test_profile_mult_write_blocking_perf, 2848 .cleanup = mmc_test_area_cleanup, 2849 }, 2850 2851 { 2852 .name = "Write performance with non-blocking req 4k to 4MB", 2853 .prepare = mmc_test_area_prepare, 2854 .run = mmc_test_profile_mult_write_nonblock_perf, 2855 .cleanup = mmc_test_area_cleanup, 2856 }, 2857 2858 { 2859 .name = "Read performance with blocking req 4k to 4MB", 2860 .prepare = mmc_test_area_prepare, 2861 .run = mmc_test_profile_mult_read_blocking_perf, 2862 .cleanup = mmc_test_area_cleanup, 2863 }, 2864 2865 { 2866 .name = "Read performance with non-blocking req 4k to 4MB", 2867 .prepare = mmc_test_area_prepare, 2868 .run = mmc_test_profile_mult_read_nonblock_perf, 2869 .cleanup = mmc_test_area_cleanup, 2870 }, 2871 2872 { 2873 .name = "Write performance blocking req 1 to 512 sg elems", 2874 .prepare = mmc_test_area_prepare, 2875 .run = mmc_test_profile_sglen_wr_blocking_perf, 2876 .cleanup = mmc_test_area_cleanup, 2877 }, 2878 2879 { 2880 .name = "Write performance non-blocking req 1 to 512 sg elems", 2881 .prepare = mmc_test_area_prepare, 2882 .run = mmc_test_profile_sglen_wr_nonblock_perf, 2883 .cleanup = mmc_test_area_cleanup, 2884 }, 2885 2886 { 2887 .name = "Read performance blocking req 1 to 512 sg elems", 2888 .prepare = mmc_test_area_prepare, 2889 .run = mmc_test_profile_sglen_r_blocking_perf, 2890 .cleanup = mmc_test_area_cleanup, 2891 }, 2892 2893 { 2894 .name = "Read performance non-blocking req 1 to 512 sg elems", 2895 .prepare = mmc_test_area_prepare, 2896 .run = mmc_test_profile_sglen_r_nonblock_perf, 2897 .cleanup = mmc_test_area_cleanup, 2898 }, 2899 2900 { 2901 .name = "Reset test", 2902 .run = mmc_test_reset, 2903 }, 2904 2905 { 2906 .name = "Commands during read - no Set Block Count (CMD23)", 2907 .prepare = mmc_test_area_prepare, 2908 .run = mmc_test_cmds_during_read, 2909 .cleanup = mmc_test_area_cleanup, 2910 }, 2911 2912 { 2913 .name = "Commands during write - no Set Block Count (CMD23)", 2914 .prepare = mmc_test_area_prepare, 2915 .run = mmc_test_cmds_during_write, 2916 .cleanup = mmc_test_area_cleanup, 2917 }, 2918 2919 { 2920 .name = "Commands during read - use Set Block Count (CMD23)", 2921 .prepare = mmc_test_area_prepare, 2922 .run = mmc_test_cmds_during_read_cmd23, 2923 .cleanup = mmc_test_area_cleanup, 2924 }, 2925 2926 { 2927 .name = "Commands during write - use Set Block Count (CMD23)", 2928 .prepare = mmc_test_area_prepare, 2929 .run = mmc_test_cmds_during_write_cmd23, 2930 .cleanup = mmc_test_area_cleanup, 2931 }, 2932 2933 { 2934 .name = "Commands during non-blocking read - use Set Block Count (CMD23)", 2935 .prepare = mmc_test_area_prepare, 2936 .run = mmc_test_cmds_during_read_cmd23_nonblock, 2937 .cleanup = mmc_test_area_cleanup, 2938 }, 2939 2940 { 2941 .name = "Commands during non-blocking write - use Set Block Count (CMD23)", 2942 .prepare = mmc_test_area_prepare, 2943 .run = mmc_test_cmds_during_write_cmd23_nonblock, 2944 .cleanup = mmc_test_area_cleanup, 2945 }, 2946 }; 2947 2948 static DEFINE_MUTEX(mmc_test_lock); 2949 2950 static LIST_HEAD(mmc_test_result); 2951 2952 static void mmc_test_run(struct mmc_test_card *test, int testcase) 2953 { 2954 int i, ret; 2955 2956 pr_info("%s: Starting tests of card %s...\n", 2957 mmc_hostname(test->card->host), mmc_card_id(test->card)); 2958 2959 mmc_claim_host(test->card->host); 2960 2961 for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++) { 2962 struct mmc_test_general_result *gr; 2963 2964 if (testcase && ((i + 1) != testcase)) 2965 continue; 2966 2967 pr_info("%s: Test case %d. %s...\n", 2968 mmc_hostname(test->card->host), i + 1, 2969 mmc_test_cases[i].name); 2970 2971 if (mmc_test_cases[i].prepare) { 2972 ret = mmc_test_cases[i].prepare(test); 2973 if (ret) { 2974 pr_info("%s: Result: Prepare stage failed! (%d)\n", 2975 mmc_hostname(test->card->host), 2976 ret); 2977 continue; 2978 } 2979 } 2980 2981 gr = kzalloc(sizeof(*gr), GFP_KERNEL); 2982 if (gr) { 2983 INIT_LIST_HEAD(&gr->tr_lst); 2984 2985 /* Assign data what we know already */ 2986 gr->card = test->card; 2987 gr->testcase = i; 2988 2989 /* Append container to global one */ 2990 list_add_tail(&gr->link, &mmc_test_result); 2991 2992 /* 2993 * Save the pointer to created container in our private 2994 * structure. 2995 */ 2996 test->gr = gr; 2997 } 2998 2999 ret = mmc_test_cases[i].run(test); 3000 switch (ret) { 3001 case RESULT_OK: 3002 pr_info("%s: Result: OK\n", 3003 mmc_hostname(test->card->host)); 3004 break; 3005 case RESULT_FAIL: 3006 pr_info("%s: Result: FAILED\n", 3007 mmc_hostname(test->card->host)); 3008 break; 3009 case RESULT_UNSUP_HOST: 3010 pr_info("%s: Result: UNSUPPORTED (by host)\n", 3011 mmc_hostname(test->card->host)); 3012 break; 3013 case RESULT_UNSUP_CARD: 3014 pr_info("%s: Result: UNSUPPORTED (by card)\n", 3015 mmc_hostname(test->card->host)); 3016 break; 3017 default: 3018 pr_info("%s: Result: ERROR (%d)\n", 3019 mmc_hostname(test->card->host), ret); 3020 } 3021 3022 /* Save the result */ 3023 if (gr) 3024 gr->result = ret; 3025 3026 if (mmc_test_cases[i].cleanup) { 3027 ret = mmc_test_cases[i].cleanup(test); 3028 if (ret) { 3029 pr_info("%s: Warning: Cleanup stage failed! (%d)\n", 3030 mmc_hostname(test->card->host), 3031 ret); 3032 } 3033 } 3034 } 3035 3036 mmc_release_host(test->card->host); 3037 3038 pr_info("%s: Tests completed.\n", 3039 mmc_hostname(test->card->host)); 3040 } 3041 3042 static void mmc_test_free_result(struct mmc_card *card) 3043 { 3044 struct mmc_test_general_result *gr, *grs; 3045 3046 mutex_lock(&mmc_test_lock); 3047 3048 list_for_each_entry_safe(gr, grs, &mmc_test_result, link) { 3049 struct mmc_test_transfer_result *tr, *trs; 3050 3051 if (card && gr->card != card) 3052 continue; 3053 3054 list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) { 3055 list_del(&tr->link); 3056 kfree(tr); 3057 } 3058 3059 list_del(&gr->link); 3060 kfree(gr); 3061 } 3062 3063 mutex_unlock(&mmc_test_lock); 3064 } 3065 3066 static LIST_HEAD(mmc_test_file_test); 3067 3068 static int mtf_test_show(struct seq_file *sf, void *data) 3069 { 3070 struct mmc_card *card = (struct mmc_card *)sf->private; 3071 struct mmc_test_general_result *gr; 3072 3073 mutex_lock(&mmc_test_lock); 3074 3075 list_for_each_entry(gr, &mmc_test_result, link) { 3076 struct mmc_test_transfer_result *tr; 3077 3078 if (gr->card != card) 3079 continue; 3080 3081 seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result); 3082 3083 list_for_each_entry(tr, &gr->tr_lst, link) { 3084 seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n", 3085 tr->count, tr->sectors, 3086 (unsigned long)tr->ts.tv_sec, 3087 (unsigned long)tr->ts.tv_nsec, 3088 tr->rate, tr->iops / 100, tr->iops % 100); 3089 } 3090 } 3091 3092 mutex_unlock(&mmc_test_lock); 3093 3094 return 0; 3095 } 3096 3097 static int mtf_test_open(struct inode *inode, struct file *file) 3098 { 3099 return single_open(file, mtf_test_show, inode->i_private); 3100 } 3101 3102 static ssize_t mtf_test_write(struct file *file, const char __user *buf, 3103 size_t count, loff_t *pos) 3104 { 3105 struct seq_file *sf = (struct seq_file *)file->private_data; 3106 struct mmc_card *card = (struct mmc_card *)sf->private; 3107 struct mmc_test_card *test; 3108 long testcase; 3109 int ret; 3110 3111 ret = kstrtol_from_user(buf, count, 10, &testcase); 3112 if (ret) 3113 return ret; 3114 3115 test = kzalloc(sizeof(*test), GFP_KERNEL); 3116 if (!test) 3117 return -ENOMEM; 3118 3119 /* 3120 * Remove all test cases associated with given card. Thus we have only 3121 * actual data of the last run. 3122 */ 3123 mmc_test_free_result(card); 3124 3125 test->card = card; 3126 3127 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL); 3128 #ifdef CONFIG_HIGHMEM 3129 test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER); 3130 #endif 3131 3132 #ifdef CONFIG_HIGHMEM 3133 if (test->buffer && test->highmem) { 3134 #else 3135 if (test->buffer) { 3136 #endif 3137 mutex_lock(&mmc_test_lock); 3138 mmc_test_run(test, testcase); 3139 mutex_unlock(&mmc_test_lock); 3140 } 3141 3142 #ifdef CONFIG_HIGHMEM 3143 __free_pages(test->highmem, BUFFER_ORDER); 3144 #endif 3145 kfree(test->buffer); 3146 kfree(test); 3147 3148 return count; 3149 } 3150 3151 static const struct file_operations mmc_test_fops_test = { 3152 .open = mtf_test_open, 3153 .read = seq_read, 3154 .write = mtf_test_write, 3155 .llseek = seq_lseek, 3156 .release = single_release, 3157 }; 3158 3159 static int mtf_testlist_show(struct seq_file *sf, void *data) 3160 { 3161 int i; 3162 3163 mutex_lock(&mmc_test_lock); 3164 3165 seq_puts(sf, "0:\tRun all tests\n"); 3166 for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++) 3167 seq_printf(sf, "%d:\t%s\n", i + 1, mmc_test_cases[i].name); 3168 3169 mutex_unlock(&mmc_test_lock); 3170 3171 return 0; 3172 } 3173 3174 static int mtf_testlist_open(struct inode *inode, struct file *file) 3175 { 3176 return single_open(file, mtf_testlist_show, inode->i_private); 3177 } 3178 3179 static const struct file_operations mmc_test_fops_testlist = { 3180 .open = mtf_testlist_open, 3181 .read = seq_read, 3182 .llseek = seq_lseek, 3183 .release = single_release, 3184 }; 3185 3186 static void mmc_test_free_dbgfs_file(struct mmc_card *card) 3187 { 3188 struct mmc_test_dbgfs_file *df, *dfs; 3189 3190 mutex_lock(&mmc_test_lock); 3191 3192 list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) { 3193 if (card && df->card != card) 3194 continue; 3195 debugfs_remove(df->file); 3196 list_del(&df->link); 3197 kfree(df); 3198 } 3199 3200 mutex_unlock(&mmc_test_lock); 3201 } 3202 3203 static int __mmc_test_register_dbgfs_file(struct mmc_card *card, 3204 const char *name, umode_t mode, const struct file_operations *fops) 3205 { 3206 struct dentry *file = NULL; 3207 struct mmc_test_dbgfs_file *df; 3208 3209 if (card->debugfs_root) 3210 file = debugfs_create_file(name, mode, card->debugfs_root, 3211 card, fops); 3212 3213 if (IS_ERR_OR_NULL(file)) { 3214 dev_err(&card->dev, 3215 "Can't create %s. Perhaps debugfs is disabled.\n", 3216 name); 3217 return -ENODEV; 3218 } 3219 3220 df = kmalloc(sizeof(*df), GFP_KERNEL); 3221 if (!df) { 3222 debugfs_remove(file); 3223 return -ENOMEM; 3224 } 3225 3226 df->card = card; 3227 df->file = file; 3228 3229 list_add(&df->link, &mmc_test_file_test); 3230 return 0; 3231 } 3232 3233 static int mmc_test_register_dbgfs_file(struct mmc_card *card) 3234 { 3235 int ret; 3236 3237 mutex_lock(&mmc_test_lock); 3238 3239 ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO, 3240 &mmc_test_fops_test); 3241 if (ret) 3242 goto err; 3243 3244 ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO, 3245 &mmc_test_fops_testlist); 3246 if (ret) 3247 goto err; 3248 3249 err: 3250 mutex_unlock(&mmc_test_lock); 3251 3252 return ret; 3253 } 3254 3255 static int mmc_test_probe(struct mmc_card *card) 3256 { 3257 int ret; 3258 3259 if (!mmc_card_mmc(card) && !mmc_card_sd(card)) 3260 return -ENODEV; 3261 3262 ret = mmc_test_register_dbgfs_file(card); 3263 if (ret) 3264 return ret; 3265 3266 if (card->ext_csd.cmdq_en) { 3267 mmc_claim_host(card->host); 3268 ret = mmc_cmdq_disable(card); 3269 mmc_release_host(card->host); 3270 if (ret) 3271 return ret; 3272 } 3273 3274 dev_info(&card->dev, "Card claimed for testing.\n"); 3275 3276 return 0; 3277 } 3278 3279 static void mmc_test_remove(struct mmc_card *card) 3280 { 3281 if (card->reenable_cmdq) { 3282 mmc_claim_host(card->host); 3283 mmc_cmdq_enable(card); 3284 mmc_release_host(card->host); 3285 } 3286 mmc_test_free_result(card); 3287 mmc_test_free_dbgfs_file(card); 3288 } 3289 3290 static void mmc_test_shutdown(struct mmc_card *card) 3291 { 3292 } 3293 3294 static struct mmc_driver mmc_driver = { 3295 .drv = { 3296 .name = "mmc_test", 3297 }, 3298 .probe = mmc_test_probe, 3299 .remove = mmc_test_remove, 3300 .shutdown = mmc_test_shutdown, 3301 }; 3302 3303 static int __init mmc_test_init(void) 3304 { 3305 return mmc_register_driver(&mmc_driver); 3306 } 3307 3308 static void __exit mmc_test_exit(void) 3309 { 3310 /* Clear stalled data if card is still plugged */ 3311 mmc_test_free_result(NULL); 3312 mmc_test_free_dbgfs_file(NULL); 3313 3314 mmc_unregister_driver(&mmc_driver); 3315 } 3316 3317 module_init(mmc_test_init); 3318 module_exit(mmc_test_exit); 3319 3320 MODULE_LICENSE("GPL"); 3321 MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver"); 3322 MODULE_AUTHOR("Pierre Ossman"); 3323