xref: /openbmc/linux/drivers/mmc/core/mmc_test.c (revision 8730046c)
1 /*
2  *  Copyright 2007-2008 Pierre Ossman
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or (at
7  * your option) any later version.
8  */
9 
10 #include <linux/mmc/core.h>
11 #include <linux/mmc/card.h>
12 #include <linux/mmc/host.h>
13 #include <linux/mmc/mmc.h>
14 #include <linux/slab.h>
15 
16 #include <linux/scatterlist.h>
17 #include <linux/swap.h>		/* For nr_free_buffer_pages() */
18 #include <linux/list.h>
19 
20 #include <linux/debugfs.h>
21 #include <linux/uaccess.h>
22 #include <linux/seq_file.h>
23 #include <linux/module.h>
24 
25 #define RESULT_OK		0
26 #define RESULT_FAIL		1
27 #define RESULT_UNSUP_HOST	2
28 #define RESULT_UNSUP_CARD	3
29 
30 #define BUFFER_ORDER		2
31 #define BUFFER_SIZE		(PAGE_SIZE << BUFFER_ORDER)
32 
33 #define TEST_ALIGN_END		8
34 
35 /*
36  * Limit the test area size to the maximum MMC HC erase group size.  Note that
37  * the maximum SD allocation unit size is just 4MiB.
38  */
39 #define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
40 
41 /**
42  * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
43  * @page: first page in the allocation
44  * @order: order of the number of pages allocated
45  */
46 struct mmc_test_pages {
47 	struct page *page;
48 	unsigned int order;
49 };
50 
51 /**
52  * struct mmc_test_mem - allocated memory.
53  * @arr: array of allocations
54  * @cnt: number of allocations
55  */
56 struct mmc_test_mem {
57 	struct mmc_test_pages *arr;
58 	unsigned int cnt;
59 };
60 
61 /**
62  * struct mmc_test_area - information for performance tests.
63  * @max_sz: test area size (in bytes)
64  * @dev_addr: address on card at which to do performance tests
65  * @max_tfr: maximum transfer size allowed by driver (in bytes)
66  * @max_segs: maximum segments allowed by driver in scatterlist @sg
67  * @max_seg_sz: maximum segment size allowed by driver
68  * @blocks: number of (512 byte) blocks currently mapped by @sg
69  * @sg_len: length of currently mapped scatterlist @sg
70  * @mem: allocated memory
71  * @sg: scatterlist
72  */
73 struct mmc_test_area {
74 	unsigned long max_sz;
75 	unsigned int dev_addr;
76 	unsigned int max_tfr;
77 	unsigned int max_segs;
78 	unsigned int max_seg_sz;
79 	unsigned int blocks;
80 	unsigned int sg_len;
81 	struct mmc_test_mem *mem;
82 	struct scatterlist *sg;
83 };
84 
85 /**
86  * struct mmc_test_transfer_result - transfer results for performance tests.
87  * @link: double-linked list
88  * @count: amount of group of sectors to check
89  * @sectors: amount of sectors to check in one group
90  * @ts: time values of transfer
91  * @rate: calculated transfer rate
92  * @iops: I/O operations per second (times 100)
93  */
94 struct mmc_test_transfer_result {
95 	struct list_head link;
96 	unsigned int count;
97 	unsigned int sectors;
98 	struct timespec ts;
99 	unsigned int rate;
100 	unsigned int iops;
101 };
102 
103 /**
104  * struct mmc_test_general_result - results for tests.
105  * @link: double-linked list
106  * @card: card under test
107  * @testcase: number of test case
108  * @result: result of test run
109  * @tr_lst: transfer measurements if any as mmc_test_transfer_result
110  */
111 struct mmc_test_general_result {
112 	struct list_head link;
113 	struct mmc_card *card;
114 	int testcase;
115 	int result;
116 	struct list_head tr_lst;
117 };
118 
119 /**
120  * struct mmc_test_dbgfs_file - debugfs related file.
121  * @link: double-linked list
122  * @card: card under test
123  * @file: file created under debugfs
124  */
125 struct mmc_test_dbgfs_file {
126 	struct list_head link;
127 	struct mmc_card *card;
128 	struct dentry *file;
129 };
130 
131 /**
132  * struct mmc_test_card - test information.
133  * @card: card under test
134  * @scratch: transfer buffer
135  * @buffer: transfer buffer
136  * @highmem: buffer for highmem tests
137  * @area: information for performance tests
138  * @gr: pointer to results of current testcase
139  */
140 struct mmc_test_card {
141 	struct mmc_card	*card;
142 
143 	u8		scratch[BUFFER_SIZE];
144 	u8		*buffer;
145 #ifdef CONFIG_HIGHMEM
146 	struct page	*highmem;
147 #endif
148 	struct mmc_test_area		area;
149 	struct mmc_test_general_result	*gr;
150 };
151 
152 enum mmc_test_prep_media {
153 	MMC_TEST_PREP_NONE = 0,
154 	MMC_TEST_PREP_WRITE_FULL = 1 << 0,
155 	MMC_TEST_PREP_ERASE = 1 << 1,
156 };
157 
158 struct mmc_test_multiple_rw {
159 	unsigned int *sg_len;
160 	unsigned int *bs;
161 	unsigned int len;
162 	unsigned int size;
163 	bool do_write;
164 	bool do_nonblock_req;
165 	enum mmc_test_prep_media prepare;
166 };
167 
168 struct mmc_test_async_req {
169 	struct mmc_async_req areq;
170 	struct mmc_test_card *test;
171 };
172 
173 /*******************************************************************/
174 /*  General helper functions                                       */
175 /*******************************************************************/
176 
177 /*
178  * Configure correct block size in card
179  */
180 static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
181 {
182 	return mmc_set_blocklen(test->card, size);
183 }
184 
185 static bool mmc_test_card_cmd23(struct mmc_card *card)
186 {
187 	return mmc_card_mmc(card) ||
188 	       (mmc_card_sd(card) && card->scr.cmds & SD_SCR_CMD23_SUPPORT);
189 }
190 
191 static void mmc_test_prepare_sbc(struct mmc_test_card *test,
192 				 struct mmc_request *mrq, unsigned int blocks)
193 {
194 	struct mmc_card *card = test->card;
195 
196 	if (!mrq->sbc || !mmc_host_cmd23(card->host) ||
197 	    !mmc_test_card_cmd23(card) || !mmc_op_multi(mrq->cmd->opcode) ||
198 	    (card->quirks & MMC_QUIRK_BLK_NO_CMD23)) {
199 		mrq->sbc = NULL;
200 		return;
201 	}
202 
203 	mrq->sbc->opcode = MMC_SET_BLOCK_COUNT;
204 	mrq->sbc->arg = blocks;
205 	mrq->sbc->flags = MMC_RSP_R1 | MMC_CMD_AC;
206 }
207 
208 /*
209  * Fill in the mmc_request structure given a set of transfer parameters.
210  */
211 static void mmc_test_prepare_mrq(struct mmc_test_card *test,
212 	struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
213 	unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
214 {
215 	if (WARN_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop))
216 		return;
217 
218 	if (blocks > 1) {
219 		mrq->cmd->opcode = write ?
220 			MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
221 	} else {
222 		mrq->cmd->opcode = write ?
223 			MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
224 	}
225 
226 	mrq->cmd->arg = dev_addr;
227 	if (!mmc_card_blockaddr(test->card))
228 		mrq->cmd->arg <<= 9;
229 
230 	mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
231 
232 	if (blocks == 1)
233 		mrq->stop = NULL;
234 	else {
235 		mrq->stop->opcode = MMC_STOP_TRANSMISSION;
236 		mrq->stop->arg = 0;
237 		mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
238 	}
239 
240 	mrq->data->blksz = blksz;
241 	mrq->data->blocks = blocks;
242 	mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
243 	mrq->data->sg = sg;
244 	mrq->data->sg_len = sg_len;
245 
246 	mmc_test_prepare_sbc(test, mrq, blocks);
247 
248 	mmc_set_data_timeout(mrq->data, test->card);
249 }
250 
251 static int mmc_test_busy(struct mmc_command *cmd)
252 {
253 	return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
254 		(R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG);
255 }
256 
257 /*
258  * Wait for the card to finish the busy state
259  */
260 static int mmc_test_wait_busy(struct mmc_test_card *test)
261 {
262 	int ret, busy;
263 	struct mmc_command cmd = {0};
264 
265 	busy = 0;
266 	do {
267 		memset(&cmd, 0, sizeof(struct mmc_command));
268 
269 		cmd.opcode = MMC_SEND_STATUS;
270 		cmd.arg = test->card->rca << 16;
271 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
272 
273 		ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
274 		if (ret)
275 			break;
276 
277 		if (!busy && mmc_test_busy(&cmd)) {
278 			busy = 1;
279 			if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
280 				pr_info("%s: Warning: Host did not "
281 					"wait for busy state to end.\n",
282 					mmc_hostname(test->card->host));
283 		}
284 	} while (mmc_test_busy(&cmd));
285 
286 	return ret;
287 }
288 
289 /*
290  * Transfer a single sector of kernel addressable data
291  */
292 static int mmc_test_buffer_transfer(struct mmc_test_card *test,
293 	u8 *buffer, unsigned addr, unsigned blksz, int write)
294 {
295 	struct mmc_request mrq = {0};
296 	struct mmc_command cmd = {0};
297 	struct mmc_command stop = {0};
298 	struct mmc_data data = {0};
299 
300 	struct scatterlist sg;
301 
302 	mrq.cmd = &cmd;
303 	mrq.data = &data;
304 	mrq.stop = &stop;
305 
306 	sg_init_one(&sg, buffer, blksz);
307 
308 	mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
309 
310 	mmc_wait_for_req(test->card->host, &mrq);
311 
312 	if (cmd.error)
313 		return cmd.error;
314 	if (data.error)
315 		return data.error;
316 
317 	return mmc_test_wait_busy(test);
318 }
319 
320 static void mmc_test_free_mem(struct mmc_test_mem *mem)
321 {
322 	if (!mem)
323 		return;
324 	while (mem->cnt--)
325 		__free_pages(mem->arr[mem->cnt].page,
326 			     mem->arr[mem->cnt].order);
327 	kfree(mem->arr);
328 	kfree(mem);
329 }
330 
331 /*
332  * Allocate a lot of memory, preferably max_sz but at least min_sz.  In case
333  * there isn't much memory do not exceed 1/16th total lowmem pages.  Also do
334  * not exceed a maximum number of segments and try not to make segments much
335  * bigger than maximum segment size.
336  */
337 static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
338 					       unsigned long max_sz,
339 					       unsigned int max_segs,
340 					       unsigned int max_seg_sz)
341 {
342 	unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
343 	unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
344 	unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
345 	unsigned long page_cnt = 0;
346 	unsigned long limit = nr_free_buffer_pages() >> 4;
347 	struct mmc_test_mem *mem;
348 
349 	if (max_page_cnt > limit)
350 		max_page_cnt = limit;
351 	if (min_page_cnt > max_page_cnt)
352 		min_page_cnt = max_page_cnt;
353 
354 	if (max_seg_page_cnt > max_page_cnt)
355 		max_seg_page_cnt = max_page_cnt;
356 
357 	if (max_segs > max_page_cnt)
358 		max_segs = max_page_cnt;
359 
360 	mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
361 	if (!mem)
362 		return NULL;
363 
364 	mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_segs,
365 			   GFP_KERNEL);
366 	if (!mem->arr)
367 		goto out_free;
368 
369 	while (max_page_cnt) {
370 		struct page *page;
371 		unsigned int order;
372 		gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
373 				__GFP_NORETRY;
374 
375 		order = get_order(max_seg_page_cnt << PAGE_SHIFT);
376 		while (1) {
377 			page = alloc_pages(flags, order);
378 			if (page || !order)
379 				break;
380 			order -= 1;
381 		}
382 		if (!page) {
383 			if (page_cnt < min_page_cnt)
384 				goto out_free;
385 			break;
386 		}
387 		mem->arr[mem->cnt].page = page;
388 		mem->arr[mem->cnt].order = order;
389 		mem->cnt += 1;
390 		if (max_page_cnt <= (1UL << order))
391 			break;
392 		max_page_cnt -= 1UL << order;
393 		page_cnt += 1UL << order;
394 		if (mem->cnt >= max_segs) {
395 			if (page_cnt < min_page_cnt)
396 				goto out_free;
397 			break;
398 		}
399 	}
400 
401 	return mem;
402 
403 out_free:
404 	mmc_test_free_mem(mem);
405 	return NULL;
406 }
407 
408 /*
409  * Map memory into a scatterlist.  Optionally allow the same memory to be
410  * mapped more than once.
411  */
412 static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size,
413 			   struct scatterlist *sglist, int repeat,
414 			   unsigned int max_segs, unsigned int max_seg_sz,
415 			   unsigned int *sg_len, int min_sg_len)
416 {
417 	struct scatterlist *sg = NULL;
418 	unsigned int i;
419 	unsigned long sz = size;
420 
421 	sg_init_table(sglist, max_segs);
422 	if (min_sg_len > max_segs)
423 		min_sg_len = max_segs;
424 
425 	*sg_len = 0;
426 	do {
427 		for (i = 0; i < mem->cnt; i++) {
428 			unsigned long len = PAGE_SIZE << mem->arr[i].order;
429 
430 			if (min_sg_len && (size / min_sg_len < len))
431 				len = ALIGN(size / min_sg_len, 512);
432 			if (len > sz)
433 				len = sz;
434 			if (len > max_seg_sz)
435 				len = max_seg_sz;
436 			if (sg)
437 				sg = sg_next(sg);
438 			else
439 				sg = sglist;
440 			if (!sg)
441 				return -EINVAL;
442 			sg_set_page(sg, mem->arr[i].page, len, 0);
443 			sz -= len;
444 			*sg_len += 1;
445 			if (!sz)
446 				break;
447 		}
448 	} while (sz && repeat);
449 
450 	if (sz)
451 		return -EINVAL;
452 
453 	if (sg)
454 		sg_mark_end(sg);
455 
456 	return 0;
457 }
458 
459 /*
460  * Map memory into a scatterlist so that no pages are contiguous.  Allow the
461  * same memory to be mapped more than once.
462  */
463 static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
464 				       unsigned long sz,
465 				       struct scatterlist *sglist,
466 				       unsigned int max_segs,
467 				       unsigned int max_seg_sz,
468 				       unsigned int *sg_len)
469 {
470 	struct scatterlist *sg = NULL;
471 	unsigned int i = mem->cnt, cnt;
472 	unsigned long len;
473 	void *base, *addr, *last_addr = NULL;
474 
475 	sg_init_table(sglist, max_segs);
476 
477 	*sg_len = 0;
478 	while (sz) {
479 		base = page_address(mem->arr[--i].page);
480 		cnt = 1 << mem->arr[i].order;
481 		while (sz && cnt) {
482 			addr = base + PAGE_SIZE * --cnt;
483 			if (last_addr && last_addr + PAGE_SIZE == addr)
484 				continue;
485 			last_addr = addr;
486 			len = PAGE_SIZE;
487 			if (len > max_seg_sz)
488 				len = max_seg_sz;
489 			if (len > sz)
490 				len = sz;
491 			if (sg)
492 				sg = sg_next(sg);
493 			else
494 				sg = sglist;
495 			if (!sg)
496 				return -EINVAL;
497 			sg_set_page(sg, virt_to_page(addr), len, 0);
498 			sz -= len;
499 			*sg_len += 1;
500 		}
501 		if (i == 0)
502 			i = mem->cnt;
503 	}
504 
505 	if (sg)
506 		sg_mark_end(sg);
507 
508 	return 0;
509 }
510 
511 /*
512  * Calculate transfer rate in bytes per second.
513  */
514 static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
515 {
516 	uint64_t ns;
517 
518 	ns = ts->tv_sec;
519 	ns *= 1000000000;
520 	ns += ts->tv_nsec;
521 
522 	bytes *= 1000000000;
523 
524 	while (ns > UINT_MAX) {
525 		bytes >>= 1;
526 		ns >>= 1;
527 	}
528 
529 	if (!ns)
530 		return 0;
531 
532 	do_div(bytes, (uint32_t)ns);
533 
534 	return bytes;
535 }
536 
537 /*
538  * Save transfer results for future usage
539  */
540 static void mmc_test_save_transfer_result(struct mmc_test_card *test,
541 	unsigned int count, unsigned int sectors, struct timespec ts,
542 	unsigned int rate, unsigned int iops)
543 {
544 	struct mmc_test_transfer_result *tr;
545 
546 	if (!test->gr)
547 		return;
548 
549 	tr = kmalloc(sizeof(struct mmc_test_transfer_result), GFP_KERNEL);
550 	if (!tr)
551 		return;
552 
553 	tr->count = count;
554 	tr->sectors = sectors;
555 	tr->ts = ts;
556 	tr->rate = rate;
557 	tr->iops = iops;
558 
559 	list_add_tail(&tr->link, &test->gr->tr_lst);
560 }
561 
562 /*
563  * Print the transfer rate.
564  */
565 static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
566 				struct timespec *ts1, struct timespec *ts2)
567 {
568 	unsigned int rate, iops, sectors = bytes >> 9;
569 	struct timespec ts;
570 
571 	ts = timespec_sub(*ts2, *ts1);
572 
573 	rate = mmc_test_rate(bytes, &ts);
574 	iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
575 
576 	pr_info("%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
577 			 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
578 			 mmc_hostname(test->card->host), sectors, sectors >> 1,
579 			 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
580 			 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024,
581 			 iops / 100, iops % 100);
582 
583 	mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
584 }
585 
586 /*
587  * Print the average transfer rate.
588  */
589 static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
590 				    unsigned int count, struct timespec *ts1,
591 				    struct timespec *ts2)
592 {
593 	unsigned int rate, iops, sectors = bytes >> 9;
594 	uint64_t tot = bytes * count;
595 	struct timespec ts;
596 
597 	ts = timespec_sub(*ts2, *ts1);
598 
599 	rate = mmc_test_rate(tot, &ts);
600 	iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
601 
602 	pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
603 			 "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
604 			 "%u.%02u IOPS, sg_len %d)\n",
605 			 mmc_hostname(test->card->host), count, sectors, count,
606 			 sectors >> 1, (sectors & 1 ? ".5" : ""),
607 			 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
608 			 rate / 1000, rate / 1024, iops / 100, iops % 100,
609 			 test->area.sg_len);
610 
611 	mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
612 }
613 
614 /*
615  * Return the card size in sectors.
616  */
617 static unsigned int mmc_test_capacity(struct mmc_card *card)
618 {
619 	if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
620 		return card->ext_csd.sectors;
621 	else
622 		return card->csd.capacity << (card->csd.read_blkbits - 9);
623 }
624 
625 /*******************************************************************/
626 /*  Test preparation and cleanup                                   */
627 /*******************************************************************/
628 
629 /*
630  * Fill the first couple of sectors of the card with known data
631  * so that bad reads/writes can be detected
632  */
633 static int __mmc_test_prepare(struct mmc_test_card *test, int write)
634 {
635 	int ret, i;
636 
637 	ret = mmc_test_set_blksize(test, 512);
638 	if (ret)
639 		return ret;
640 
641 	if (write)
642 		memset(test->buffer, 0xDF, 512);
643 	else {
644 		for (i = 0;i < 512;i++)
645 			test->buffer[i] = i;
646 	}
647 
648 	for (i = 0;i < BUFFER_SIZE / 512;i++) {
649 		ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
650 		if (ret)
651 			return ret;
652 	}
653 
654 	return 0;
655 }
656 
657 static int mmc_test_prepare_write(struct mmc_test_card *test)
658 {
659 	return __mmc_test_prepare(test, 1);
660 }
661 
662 static int mmc_test_prepare_read(struct mmc_test_card *test)
663 {
664 	return __mmc_test_prepare(test, 0);
665 }
666 
667 static int mmc_test_cleanup(struct mmc_test_card *test)
668 {
669 	int ret, i;
670 
671 	ret = mmc_test_set_blksize(test, 512);
672 	if (ret)
673 		return ret;
674 
675 	memset(test->buffer, 0, 512);
676 
677 	for (i = 0;i < BUFFER_SIZE / 512;i++) {
678 		ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
679 		if (ret)
680 			return ret;
681 	}
682 
683 	return 0;
684 }
685 
686 /*******************************************************************/
687 /*  Test execution helpers                                         */
688 /*******************************************************************/
689 
690 /*
691  * Modifies the mmc_request to perform the "short transfer" tests
692  */
693 static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
694 	struct mmc_request *mrq, int write)
695 {
696 	if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
697 		return;
698 
699 	if (mrq->data->blocks > 1) {
700 		mrq->cmd->opcode = write ?
701 			MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
702 		mrq->stop = NULL;
703 	} else {
704 		mrq->cmd->opcode = MMC_SEND_STATUS;
705 		mrq->cmd->arg = test->card->rca << 16;
706 	}
707 }
708 
709 /*
710  * Checks that a normal transfer didn't have any errors
711  */
712 static int mmc_test_check_result(struct mmc_test_card *test,
713 				 struct mmc_request *mrq)
714 {
715 	int ret;
716 
717 	if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
718 		return -EINVAL;
719 
720 	ret = 0;
721 
722 	if (mrq->sbc && mrq->sbc->error)
723 		ret = mrq->sbc->error;
724 	if (!ret && mrq->cmd->error)
725 		ret = mrq->cmd->error;
726 	if (!ret && mrq->data->error)
727 		ret = mrq->data->error;
728 	if (!ret && mrq->stop && mrq->stop->error)
729 		ret = mrq->stop->error;
730 	if (!ret && mrq->data->bytes_xfered !=
731 		mrq->data->blocks * mrq->data->blksz)
732 		ret = RESULT_FAIL;
733 
734 	if (ret == -EINVAL)
735 		ret = RESULT_UNSUP_HOST;
736 
737 	return ret;
738 }
739 
740 static enum mmc_blk_status mmc_test_check_result_async(struct mmc_card *card,
741 				       struct mmc_async_req *areq)
742 {
743 	struct mmc_test_async_req *test_async =
744 		container_of(areq, struct mmc_test_async_req, areq);
745 	int ret;
746 
747 	mmc_test_wait_busy(test_async->test);
748 
749 	/*
750 	 * FIXME: this would earlier just casts a regular error code,
751 	 * either of the kernel type -ERRORCODE or the local test framework
752 	 * RESULT_* errorcode, into an enum mmc_blk_status and return as
753 	 * result check. Instead, convert it to some reasonable type by just
754 	 * returning either MMC_BLK_SUCCESS or MMC_BLK_CMD_ERR.
755 	 * If possible, a reasonable error code should be returned.
756 	 */
757 	ret = mmc_test_check_result(test_async->test, areq->mrq);
758 	if (ret)
759 		return MMC_BLK_CMD_ERR;
760 
761 	return MMC_BLK_SUCCESS;
762 }
763 
764 /*
765  * Checks that a "short transfer" behaved as expected
766  */
767 static int mmc_test_check_broken_result(struct mmc_test_card *test,
768 	struct mmc_request *mrq)
769 {
770 	int ret;
771 
772 	if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
773 		return -EINVAL;
774 
775 	ret = 0;
776 
777 	if (!ret && mrq->cmd->error)
778 		ret = mrq->cmd->error;
779 	if (!ret && mrq->data->error == 0)
780 		ret = RESULT_FAIL;
781 	if (!ret && mrq->data->error != -ETIMEDOUT)
782 		ret = mrq->data->error;
783 	if (!ret && mrq->stop && mrq->stop->error)
784 		ret = mrq->stop->error;
785 	if (mrq->data->blocks > 1) {
786 		if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
787 			ret = RESULT_FAIL;
788 	} else {
789 		if (!ret && mrq->data->bytes_xfered > 0)
790 			ret = RESULT_FAIL;
791 	}
792 
793 	if (ret == -EINVAL)
794 		ret = RESULT_UNSUP_HOST;
795 
796 	return ret;
797 }
798 
799 /*
800  * Tests nonblock transfer with certain parameters
801  */
802 static void mmc_test_nonblock_reset(struct mmc_request *mrq,
803 				    struct mmc_command *cmd,
804 				    struct mmc_command *stop,
805 				    struct mmc_data *data)
806 {
807 	memset(mrq, 0, sizeof(struct mmc_request));
808 	memset(cmd, 0, sizeof(struct mmc_command));
809 	memset(data, 0, sizeof(struct mmc_data));
810 	memset(stop, 0, sizeof(struct mmc_command));
811 
812 	mrq->cmd = cmd;
813 	mrq->data = data;
814 	mrq->stop = stop;
815 }
816 static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
817 				      struct scatterlist *sg, unsigned sg_len,
818 				      unsigned dev_addr, unsigned blocks,
819 				      unsigned blksz, int write, int count)
820 {
821 	struct mmc_request mrq1;
822 	struct mmc_command cmd1;
823 	struct mmc_command stop1;
824 	struct mmc_data data1;
825 
826 	struct mmc_request mrq2;
827 	struct mmc_command cmd2;
828 	struct mmc_command stop2;
829 	struct mmc_data data2;
830 
831 	struct mmc_test_async_req test_areq[2];
832 	struct mmc_async_req *done_areq;
833 	struct mmc_async_req *cur_areq = &test_areq[0].areq;
834 	struct mmc_async_req *other_areq = &test_areq[1].areq;
835 	enum mmc_blk_status status;
836 	int i;
837 	int ret = RESULT_OK;
838 
839 	test_areq[0].test = test;
840 	test_areq[1].test = test;
841 
842 	mmc_test_nonblock_reset(&mrq1, &cmd1, &stop1, &data1);
843 	mmc_test_nonblock_reset(&mrq2, &cmd2, &stop2, &data2);
844 
845 	cur_areq->mrq = &mrq1;
846 	cur_areq->err_check = mmc_test_check_result_async;
847 	other_areq->mrq = &mrq2;
848 	other_areq->err_check = mmc_test_check_result_async;
849 
850 	for (i = 0; i < count; i++) {
851 		mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr,
852 				     blocks, blksz, write);
853 		done_areq = mmc_start_req(test->card->host, cur_areq, &status);
854 
855 		if (status != MMC_BLK_SUCCESS || (!done_areq && i > 0)) {
856 			ret = RESULT_FAIL;
857 			goto err;
858 		}
859 
860 		if (done_areq) {
861 			if (done_areq->mrq == &mrq2)
862 				mmc_test_nonblock_reset(&mrq2, &cmd2,
863 							&stop2, &data2);
864 			else
865 				mmc_test_nonblock_reset(&mrq1, &cmd1,
866 							&stop1, &data1);
867 		}
868 		swap(cur_areq, other_areq);
869 		dev_addr += blocks;
870 	}
871 
872 	done_areq = mmc_start_req(test->card->host, NULL, &status);
873 	if (status != MMC_BLK_SUCCESS)
874 		ret = RESULT_FAIL;
875 
876 	return ret;
877 err:
878 	return ret;
879 }
880 
881 /*
882  * Tests a basic transfer with certain parameters
883  */
884 static int mmc_test_simple_transfer(struct mmc_test_card *test,
885 	struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
886 	unsigned blocks, unsigned blksz, int write)
887 {
888 	struct mmc_request mrq = {0};
889 	struct mmc_command cmd = {0};
890 	struct mmc_command stop = {0};
891 	struct mmc_data data = {0};
892 
893 	mrq.cmd = &cmd;
894 	mrq.data = &data;
895 	mrq.stop = &stop;
896 
897 	mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
898 		blocks, blksz, write);
899 
900 	mmc_wait_for_req(test->card->host, &mrq);
901 
902 	mmc_test_wait_busy(test);
903 
904 	return mmc_test_check_result(test, &mrq);
905 }
906 
907 /*
908  * Tests a transfer where the card will fail completely or partly
909  */
910 static int mmc_test_broken_transfer(struct mmc_test_card *test,
911 	unsigned blocks, unsigned blksz, int write)
912 {
913 	struct mmc_request mrq = {0};
914 	struct mmc_command cmd = {0};
915 	struct mmc_command stop = {0};
916 	struct mmc_data data = {0};
917 
918 	struct scatterlist sg;
919 
920 	mrq.cmd = &cmd;
921 	mrq.data = &data;
922 	mrq.stop = &stop;
923 
924 	sg_init_one(&sg, test->buffer, blocks * blksz);
925 
926 	mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
927 	mmc_test_prepare_broken_mrq(test, &mrq, write);
928 
929 	mmc_wait_for_req(test->card->host, &mrq);
930 
931 	mmc_test_wait_busy(test);
932 
933 	return mmc_test_check_broken_result(test, &mrq);
934 }
935 
936 /*
937  * Does a complete transfer test where data is also validated
938  *
939  * Note: mmc_test_prepare() must have been done before this call
940  */
941 static int mmc_test_transfer(struct mmc_test_card *test,
942 	struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
943 	unsigned blocks, unsigned blksz, int write)
944 {
945 	int ret, i;
946 	unsigned long flags;
947 
948 	if (write) {
949 		for (i = 0;i < blocks * blksz;i++)
950 			test->scratch[i] = i;
951 	} else {
952 		memset(test->scratch, 0, BUFFER_SIZE);
953 	}
954 	local_irq_save(flags);
955 	sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
956 	local_irq_restore(flags);
957 
958 	ret = mmc_test_set_blksize(test, blksz);
959 	if (ret)
960 		return ret;
961 
962 	ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
963 		blocks, blksz, write);
964 	if (ret)
965 		return ret;
966 
967 	if (write) {
968 		int sectors;
969 
970 		ret = mmc_test_set_blksize(test, 512);
971 		if (ret)
972 			return ret;
973 
974 		sectors = (blocks * blksz + 511) / 512;
975 		if ((sectors * 512) == (blocks * blksz))
976 			sectors++;
977 
978 		if ((sectors * 512) > BUFFER_SIZE)
979 			return -EINVAL;
980 
981 		memset(test->buffer, 0, sectors * 512);
982 
983 		for (i = 0;i < sectors;i++) {
984 			ret = mmc_test_buffer_transfer(test,
985 				test->buffer + i * 512,
986 				dev_addr + i, 512, 0);
987 			if (ret)
988 				return ret;
989 		}
990 
991 		for (i = 0;i < blocks * blksz;i++) {
992 			if (test->buffer[i] != (u8)i)
993 				return RESULT_FAIL;
994 		}
995 
996 		for (;i < sectors * 512;i++) {
997 			if (test->buffer[i] != 0xDF)
998 				return RESULT_FAIL;
999 		}
1000 	} else {
1001 		local_irq_save(flags);
1002 		sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
1003 		local_irq_restore(flags);
1004 		for (i = 0;i < blocks * blksz;i++) {
1005 			if (test->scratch[i] != (u8)i)
1006 				return RESULT_FAIL;
1007 		}
1008 	}
1009 
1010 	return 0;
1011 }
1012 
1013 /*******************************************************************/
1014 /*  Tests                                                          */
1015 /*******************************************************************/
1016 
1017 struct mmc_test_case {
1018 	const char *name;
1019 
1020 	int (*prepare)(struct mmc_test_card *);
1021 	int (*run)(struct mmc_test_card *);
1022 	int (*cleanup)(struct mmc_test_card *);
1023 };
1024 
1025 static int mmc_test_basic_write(struct mmc_test_card *test)
1026 {
1027 	int ret;
1028 	struct scatterlist sg;
1029 
1030 	ret = mmc_test_set_blksize(test, 512);
1031 	if (ret)
1032 		return ret;
1033 
1034 	sg_init_one(&sg, test->buffer, 512);
1035 
1036 	return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
1037 }
1038 
1039 static int mmc_test_basic_read(struct mmc_test_card *test)
1040 {
1041 	int ret;
1042 	struct scatterlist sg;
1043 
1044 	ret = mmc_test_set_blksize(test, 512);
1045 	if (ret)
1046 		return ret;
1047 
1048 	sg_init_one(&sg, test->buffer, 512);
1049 
1050 	return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
1051 }
1052 
1053 static int mmc_test_verify_write(struct mmc_test_card *test)
1054 {
1055 	struct scatterlist sg;
1056 
1057 	sg_init_one(&sg, test->buffer, 512);
1058 
1059 	return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1060 }
1061 
1062 static int mmc_test_verify_read(struct mmc_test_card *test)
1063 {
1064 	struct scatterlist sg;
1065 
1066 	sg_init_one(&sg, test->buffer, 512);
1067 
1068 	return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1069 }
1070 
1071 static int mmc_test_multi_write(struct mmc_test_card *test)
1072 {
1073 	unsigned int size;
1074 	struct scatterlist sg;
1075 
1076 	if (test->card->host->max_blk_count == 1)
1077 		return RESULT_UNSUP_HOST;
1078 
1079 	size = PAGE_SIZE * 2;
1080 	size = min(size, test->card->host->max_req_size);
1081 	size = min(size, test->card->host->max_seg_size);
1082 	size = min(size, test->card->host->max_blk_count * 512);
1083 
1084 	if (size < 1024)
1085 		return RESULT_UNSUP_HOST;
1086 
1087 	sg_init_one(&sg, test->buffer, size);
1088 
1089 	return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1090 }
1091 
1092 static int mmc_test_multi_read(struct mmc_test_card *test)
1093 {
1094 	unsigned int size;
1095 	struct scatterlist sg;
1096 
1097 	if (test->card->host->max_blk_count == 1)
1098 		return RESULT_UNSUP_HOST;
1099 
1100 	size = PAGE_SIZE * 2;
1101 	size = min(size, test->card->host->max_req_size);
1102 	size = min(size, test->card->host->max_seg_size);
1103 	size = min(size, test->card->host->max_blk_count * 512);
1104 
1105 	if (size < 1024)
1106 		return RESULT_UNSUP_HOST;
1107 
1108 	sg_init_one(&sg, test->buffer, size);
1109 
1110 	return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1111 }
1112 
1113 static int mmc_test_pow2_write(struct mmc_test_card *test)
1114 {
1115 	int ret, i;
1116 	struct scatterlist sg;
1117 
1118 	if (!test->card->csd.write_partial)
1119 		return RESULT_UNSUP_CARD;
1120 
1121 	for (i = 1; i < 512;i <<= 1) {
1122 		sg_init_one(&sg, test->buffer, i);
1123 		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1124 		if (ret)
1125 			return ret;
1126 	}
1127 
1128 	return 0;
1129 }
1130 
1131 static int mmc_test_pow2_read(struct mmc_test_card *test)
1132 {
1133 	int ret, i;
1134 	struct scatterlist sg;
1135 
1136 	if (!test->card->csd.read_partial)
1137 		return RESULT_UNSUP_CARD;
1138 
1139 	for (i = 1; i < 512;i <<= 1) {
1140 		sg_init_one(&sg, test->buffer, i);
1141 		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1142 		if (ret)
1143 			return ret;
1144 	}
1145 
1146 	return 0;
1147 }
1148 
1149 static int mmc_test_weird_write(struct mmc_test_card *test)
1150 {
1151 	int ret, i;
1152 	struct scatterlist sg;
1153 
1154 	if (!test->card->csd.write_partial)
1155 		return RESULT_UNSUP_CARD;
1156 
1157 	for (i = 3; i < 512;i += 7) {
1158 		sg_init_one(&sg, test->buffer, i);
1159 		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1160 		if (ret)
1161 			return ret;
1162 	}
1163 
1164 	return 0;
1165 }
1166 
1167 static int mmc_test_weird_read(struct mmc_test_card *test)
1168 {
1169 	int ret, i;
1170 	struct scatterlist sg;
1171 
1172 	if (!test->card->csd.read_partial)
1173 		return RESULT_UNSUP_CARD;
1174 
1175 	for (i = 3; i < 512;i += 7) {
1176 		sg_init_one(&sg, test->buffer, i);
1177 		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1178 		if (ret)
1179 			return ret;
1180 	}
1181 
1182 	return 0;
1183 }
1184 
1185 static int mmc_test_align_write(struct mmc_test_card *test)
1186 {
1187 	int ret, i;
1188 	struct scatterlist sg;
1189 
1190 	for (i = 1; i < TEST_ALIGN_END; i++) {
1191 		sg_init_one(&sg, test->buffer + i, 512);
1192 		ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1193 		if (ret)
1194 			return ret;
1195 	}
1196 
1197 	return 0;
1198 }
1199 
1200 static int mmc_test_align_read(struct mmc_test_card *test)
1201 {
1202 	int ret, i;
1203 	struct scatterlist sg;
1204 
1205 	for (i = 1; i < TEST_ALIGN_END; i++) {
1206 		sg_init_one(&sg, test->buffer + i, 512);
1207 		ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1208 		if (ret)
1209 			return ret;
1210 	}
1211 
1212 	return 0;
1213 }
1214 
1215 static int mmc_test_align_multi_write(struct mmc_test_card *test)
1216 {
1217 	int ret, i;
1218 	unsigned int size;
1219 	struct scatterlist sg;
1220 
1221 	if (test->card->host->max_blk_count == 1)
1222 		return RESULT_UNSUP_HOST;
1223 
1224 	size = PAGE_SIZE * 2;
1225 	size = min(size, test->card->host->max_req_size);
1226 	size = min(size, test->card->host->max_seg_size);
1227 	size = min(size, test->card->host->max_blk_count * 512);
1228 
1229 	if (size < 1024)
1230 		return RESULT_UNSUP_HOST;
1231 
1232 	for (i = 1; i < TEST_ALIGN_END; i++) {
1233 		sg_init_one(&sg, test->buffer + i, size);
1234 		ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1235 		if (ret)
1236 			return ret;
1237 	}
1238 
1239 	return 0;
1240 }
1241 
1242 static int mmc_test_align_multi_read(struct mmc_test_card *test)
1243 {
1244 	int ret, i;
1245 	unsigned int size;
1246 	struct scatterlist sg;
1247 
1248 	if (test->card->host->max_blk_count == 1)
1249 		return RESULT_UNSUP_HOST;
1250 
1251 	size = PAGE_SIZE * 2;
1252 	size = min(size, test->card->host->max_req_size);
1253 	size = min(size, test->card->host->max_seg_size);
1254 	size = min(size, test->card->host->max_blk_count * 512);
1255 
1256 	if (size < 1024)
1257 		return RESULT_UNSUP_HOST;
1258 
1259 	for (i = 1; i < TEST_ALIGN_END; i++) {
1260 		sg_init_one(&sg, test->buffer + i, size);
1261 		ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1262 		if (ret)
1263 			return ret;
1264 	}
1265 
1266 	return 0;
1267 }
1268 
1269 static int mmc_test_xfersize_write(struct mmc_test_card *test)
1270 {
1271 	int ret;
1272 
1273 	ret = mmc_test_set_blksize(test, 512);
1274 	if (ret)
1275 		return ret;
1276 
1277 	return mmc_test_broken_transfer(test, 1, 512, 1);
1278 }
1279 
1280 static int mmc_test_xfersize_read(struct mmc_test_card *test)
1281 {
1282 	int ret;
1283 
1284 	ret = mmc_test_set_blksize(test, 512);
1285 	if (ret)
1286 		return ret;
1287 
1288 	return mmc_test_broken_transfer(test, 1, 512, 0);
1289 }
1290 
1291 static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
1292 {
1293 	int ret;
1294 
1295 	if (test->card->host->max_blk_count == 1)
1296 		return RESULT_UNSUP_HOST;
1297 
1298 	ret = mmc_test_set_blksize(test, 512);
1299 	if (ret)
1300 		return ret;
1301 
1302 	return mmc_test_broken_transfer(test, 2, 512, 1);
1303 }
1304 
1305 static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
1306 {
1307 	int ret;
1308 
1309 	if (test->card->host->max_blk_count == 1)
1310 		return RESULT_UNSUP_HOST;
1311 
1312 	ret = mmc_test_set_blksize(test, 512);
1313 	if (ret)
1314 		return ret;
1315 
1316 	return mmc_test_broken_transfer(test, 2, 512, 0);
1317 }
1318 
1319 #ifdef CONFIG_HIGHMEM
1320 
1321 static int mmc_test_write_high(struct mmc_test_card *test)
1322 {
1323 	struct scatterlist sg;
1324 
1325 	sg_init_table(&sg, 1);
1326 	sg_set_page(&sg, test->highmem, 512, 0);
1327 
1328 	return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1329 }
1330 
1331 static int mmc_test_read_high(struct mmc_test_card *test)
1332 {
1333 	struct scatterlist sg;
1334 
1335 	sg_init_table(&sg, 1);
1336 	sg_set_page(&sg, test->highmem, 512, 0);
1337 
1338 	return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1339 }
1340 
1341 static int mmc_test_multi_write_high(struct mmc_test_card *test)
1342 {
1343 	unsigned int size;
1344 	struct scatterlist sg;
1345 
1346 	if (test->card->host->max_blk_count == 1)
1347 		return RESULT_UNSUP_HOST;
1348 
1349 	size = PAGE_SIZE * 2;
1350 	size = min(size, test->card->host->max_req_size);
1351 	size = min(size, test->card->host->max_seg_size);
1352 	size = min(size, test->card->host->max_blk_count * 512);
1353 
1354 	if (size < 1024)
1355 		return RESULT_UNSUP_HOST;
1356 
1357 	sg_init_table(&sg, 1);
1358 	sg_set_page(&sg, test->highmem, size, 0);
1359 
1360 	return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1361 }
1362 
1363 static int mmc_test_multi_read_high(struct mmc_test_card *test)
1364 {
1365 	unsigned int size;
1366 	struct scatterlist sg;
1367 
1368 	if (test->card->host->max_blk_count == 1)
1369 		return RESULT_UNSUP_HOST;
1370 
1371 	size = PAGE_SIZE * 2;
1372 	size = min(size, test->card->host->max_req_size);
1373 	size = min(size, test->card->host->max_seg_size);
1374 	size = min(size, test->card->host->max_blk_count * 512);
1375 
1376 	if (size < 1024)
1377 		return RESULT_UNSUP_HOST;
1378 
1379 	sg_init_table(&sg, 1);
1380 	sg_set_page(&sg, test->highmem, size, 0);
1381 
1382 	return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1383 }
1384 
1385 #else
1386 
1387 static int mmc_test_no_highmem(struct mmc_test_card *test)
1388 {
1389 	pr_info("%s: Highmem not configured - test skipped\n",
1390 	       mmc_hostname(test->card->host));
1391 	return 0;
1392 }
1393 
1394 #endif /* CONFIG_HIGHMEM */
1395 
1396 /*
1397  * Map sz bytes so that it can be transferred.
1398  */
1399 static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
1400 			     int max_scatter, int min_sg_len)
1401 {
1402 	struct mmc_test_area *t = &test->area;
1403 	int err;
1404 
1405 	t->blocks = sz >> 9;
1406 
1407 	if (max_scatter) {
1408 		err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
1409 						  t->max_segs, t->max_seg_sz,
1410 				       &t->sg_len);
1411 	} else {
1412 		err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1413 				      t->max_seg_sz, &t->sg_len, min_sg_len);
1414 	}
1415 	if (err)
1416 		pr_info("%s: Failed to map sg list\n",
1417 		       mmc_hostname(test->card->host));
1418 	return err;
1419 }
1420 
1421 /*
1422  * Transfer bytes mapped by mmc_test_area_map().
1423  */
1424 static int mmc_test_area_transfer(struct mmc_test_card *test,
1425 				  unsigned int dev_addr, int write)
1426 {
1427 	struct mmc_test_area *t = &test->area;
1428 
1429 	return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
1430 					t->blocks, 512, write);
1431 }
1432 
1433 /*
1434  * Map and transfer bytes for multiple transfers.
1435  */
1436 static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
1437 				unsigned int dev_addr, int write,
1438 				int max_scatter, int timed, int count,
1439 				bool nonblock, int min_sg_len)
1440 {
1441 	struct timespec ts1, ts2;
1442 	int ret = 0;
1443 	int i;
1444 	struct mmc_test_area *t = &test->area;
1445 
1446 	/*
1447 	 * In the case of a maximally scattered transfer, the maximum transfer
1448 	 * size is further limited by using PAGE_SIZE segments.
1449 	 */
1450 	if (max_scatter) {
1451 		struct mmc_test_area *t = &test->area;
1452 		unsigned long max_tfr;
1453 
1454 		if (t->max_seg_sz >= PAGE_SIZE)
1455 			max_tfr = t->max_segs * PAGE_SIZE;
1456 		else
1457 			max_tfr = t->max_segs * t->max_seg_sz;
1458 		if (sz > max_tfr)
1459 			sz = max_tfr;
1460 	}
1461 
1462 	ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len);
1463 	if (ret)
1464 		return ret;
1465 
1466 	if (timed)
1467 		getnstimeofday(&ts1);
1468 	if (nonblock)
1469 		ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len,
1470 				 dev_addr, t->blocks, 512, write, count);
1471 	else
1472 		for (i = 0; i < count && ret == 0; i++) {
1473 			ret = mmc_test_area_transfer(test, dev_addr, write);
1474 			dev_addr += sz >> 9;
1475 		}
1476 
1477 	if (ret)
1478 		return ret;
1479 
1480 	if (timed)
1481 		getnstimeofday(&ts2);
1482 
1483 	if (timed)
1484 		mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
1485 
1486 	return 0;
1487 }
1488 
1489 static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1490 			    unsigned int dev_addr, int write, int max_scatter,
1491 			    int timed)
1492 {
1493 	return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter,
1494 				    timed, 1, false, 0);
1495 }
1496 
1497 /*
1498  * Write the test area entirely.
1499  */
1500 static int mmc_test_area_fill(struct mmc_test_card *test)
1501 {
1502 	struct mmc_test_area *t = &test->area;
1503 
1504 	return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0);
1505 }
1506 
1507 /*
1508  * Erase the test area entirely.
1509  */
1510 static int mmc_test_area_erase(struct mmc_test_card *test)
1511 {
1512 	struct mmc_test_area *t = &test->area;
1513 
1514 	if (!mmc_can_erase(test->card))
1515 		return 0;
1516 
1517 	return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
1518 			 MMC_ERASE_ARG);
1519 }
1520 
1521 /*
1522  * Cleanup struct mmc_test_area.
1523  */
1524 static int mmc_test_area_cleanup(struct mmc_test_card *test)
1525 {
1526 	struct mmc_test_area *t = &test->area;
1527 
1528 	kfree(t->sg);
1529 	mmc_test_free_mem(t->mem);
1530 
1531 	return 0;
1532 }
1533 
1534 /*
1535  * Initialize an area for testing large transfers.  The test area is set to the
1536  * middle of the card because cards may have different charateristics at the
1537  * front (for FAT file system optimization).  Optionally, the area is erased
1538  * (if the card supports it) which may improve write performance.  Optionally,
1539  * the area is filled with data for subsequent read tests.
1540  */
1541 static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1542 {
1543 	struct mmc_test_area *t = &test->area;
1544 	unsigned long min_sz = 64 * 1024, sz;
1545 	int ret;
1546 
1547 	ret = mmc_test_set_blksize(test, 512);
1548 	if (ret)
1549 		return ret;
1550 
1551 	/* Make the test area size about 4MiB */
1552 	sz = (unsigned long)test->card->pref_erase << 9;
1553 	t->max_sz = sz;
1554 	while (t->max_sz < 4 * 1024 * 1024)
1555 		t->max_sz += sz;
1556 	while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
1557 		t->max_sz -= sz;
1558 
1559 	t->max_segs = test->card->host->max_segs;
1560 	t->max_seg_sz = test->card->host->max_seg_size;
1561 	t->max_seg_sz -= t->max_seg_sz % 512;
1562 
1563 	t->max_tfr = t->max_sz;
1564 	if (t->max_tfr >> 9 > test->card->host->max_blk_count)
1565 		t->max_tfr = test->card->host->max_blk_count << 9;
1566 	if (t->max_tfr > test->card->host->max_req_size)
1567 		t->max_tfr = test->card->host->max_req_size;
1568 	if (t->max_tfr / t->max_seg_sz > t->max_segs)
1569 		t->max_tfr = t->max_segs * t->max_seg_sz;
1570 
1571 	/*
1572 	 * Try to allocate enough memory for a max. sized transfer.  Less is OK
1573 	 * because the same memory can be mapped into the scatterlist more than
1574 	 * once.  Also, take into account the limits imposed on scatterlist
1575 	 * segments by the host driver.
1576 	 */
1577 	t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
1578 				    t->max_seg_sz);
1579 	if (!t->mem)
1580 		return -ENOMEM;
1581 
1582 	t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
1583 	if (!t->sg) {
1584 		ret = -ENOMEM;
1585 		goto out_free;
1586 	}
1587 
1588 	t->dev_addr = mmc_test_capacity(test->card) / 2;
1589 	t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
1590 
1591 	if (erase) {
1592 		ret = mmc_test_area_erase(test);
1593 		if (ret)
1594 			goto out_free;
1595 	}
1596 
1597 	if (fill) {
1598 		ret = mmc_test_area_fill(test);
1599 		if (ret)
1600 			goto out_free;
1601 	}
1602 
1603 	return 0;
1604 
1605 out_free:
1606 	mmc_test_area_cleanup(test);
1607 	return ret;
1608 }
1609 
1610 /*
1611  * Prepare for large transfers.  Do not erase the test area.
1612  */
1613 static int mmc_test_area_prepare(struct mmc_test_card *test)
1614 {
1615 	return mmc_test_area_init(test, 0, 0);
1616 }
1617 
1618 /*
1619  * Prepare for large transfers.  Do erase the test area.
1620  */
1621 static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
1622 {
1623 	return mmc_test_area_init(test, 1, 0);
1624 }
1625 
1626 /*
1627  * Prepare for large transfers.  Erase and fill the test area.
1628  */
1629 static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1630 {
1631 	return mmc_test_area_init(test, 1, 1);
1632 }
1633 
1634 /*
1635  * Test best-case performance.  Best-case performance is expected from
1636  * a single large transfer.
1637  *
1638  * An additional option (max_scatter) allows the measurement of the same
1639  * transfer but with no contiguous pages in the scatter list.  This tests
1640  * the efficiency of DMA to handle scattered pages.
1641  */
1642 static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1643 				     int max_scatter)
1644 {
1645 	struct mmc_test_area *t = &test->area;
1646 
1647 	return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write,
1648 				max_scatter, 1);
1649 }
1650 
1651 /*
1652  * Best-case read performance.
1653  */
1654 static int mmc_test_best_read_performance(struct mmc_test_card *test)
1655 {
1656 	return mmc_test_best_performance(test, 0, 0);
1657 }
1658 
1659 /*
1660  * Best-case write performance.
1661  */
1662 static int mmc_test_best_write_performance(struct mmc_test_card *test)
1663 {
1664 	return mmc_test_best_performance(test, 1, 0);
1665 }
1666 
1667 /*
1668  * Best-case read performance into scattered pages.
1669  */
1670 static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
1671 {
1672 	return mmc_test_best_performance(test, 0, 1);
1673 }
1674 
1675 /*
1676  * Best-case write performance from scattered pages.
1677  */
1678 static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
1679 {
1680 	return mmc_test_best_performance(test, 1, 1);
1681 }
1682 
1683 /*
1684  * Single read performance by transfer size.
1685  */
1686 static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1687 {
1688 	struct mmc_test_area *t = &test->area;
1689 	unsigned long sz;
1690 	unsigned int dev_addr;
1691 	int ret;
1692 
1693 	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1694 		dev_addr = t->dev_addr + (sz >> 9);
1695 		ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1696 		if (ret)
1697 			return ret;
1698 	}
1699 	sz = t->max_tfr;
1700 	dev_addr = t->dev_addr;
1701 	return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1702 }
1703 
1704 /*
1705  * Single write performance by transfer size.
1706  */
1707 static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1708 {
1709 	struct mmc_test_area *t = &test->area;
1710 	unsigned long sz;
1711 	unsigned int dev_addr;
1712 	int ret;
1713 
1714 	ret = mmc_test_area_erase(test);
1715 	if (ret)
1716 		return ret;
1717 	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1718 		dev_addr = t->dev_addr + (sz >> 9);
1719 		ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1720 		if (ret)
1721 			return ret;
1722 	}
1723 	ret = mmc_test_area_erase(test);
1724 	if (ret)
1725 		return ret;
1726 	sz = t->max_tfr;
1727 	dev_addr = t->dev_addr;
1728 	return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1729 }
1730 
1731 /*
1732  * Single trim performance by transfer size.
1733  */
1734 static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1735 {
1736 	struct mmc_test_area *t = &test->area;
1737 	unsigned long sz;
1738 	unsigned int dev_addr;
1739 	struct timespec ts1, ts2;
1740 	int ret;
1741 
1742 	if (!mmc_can_trim(test->card))
1743 		return RESULT_UNSUP_CARD;
1744 
1745 	if (!mmc_can_erase(test->card))
1746 		return RESULT_UNSUP_HOST;
1747 
1748 	for (sz = 512; sz < t->max_sz; sz <<= 1) {
1749 		dev_addr = t->dev_addr + (sz >> 9);
1750 		getnstimeofday(&ts1);
1751 		ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1752 		if (ret)
1753 			return ret;
1754 		getnstimeofday(&ts2);
1755 		mmc_test_print_rate(test, sz, &ts1, &ts2);
1756 	}
1757 	dev_addr = t->dev_addr;
1758 	getnstimeofday(&ts1);
1759 	ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1760 	if (ret)
1761 		return ret;
1762 	getnstimeofday(&ts2);
1763 	mmc_test_print_rate(test, sz, &ts1, &ts2);
1764 	return 0;
1765 }
1766 
1767 static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
1768 {
1769 	struct mmc_test_area *t = &test->area;
1770 	unsigned int dev_addr, i, cnt;
1771 	struct timespec ts1, ts2;
1772 	int ret;
1773 
1774 	cnt = t->max_sz / sz;
1775 	dev_addr = t->dev_addr;
1776 	getnstimeofday(&ts1);
1777 	for (i = 0; i < cnt; i++) {
1778 		ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
1779 		if (ret)
1780 			return ret;
1781 		dev_addr += (sz >> 9);
1782 	}
1783 	getnstimeofday(&ts2);
1784 	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1785 	return 0;
1786 }
1787 
1788 /*
1789  * Consecutive read performance by transfer size.
1790  */
1791 static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1792 {
1793 	struct mmc_test_area *t = &test->area;
1794 	unsigned long sz;
1795 	int ret;
1796 
1797 	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1798 		ret = mmc_test_seq_read_perf(test, sz);
1799 		if (ret)
1800 			return ret;
1801 	}
1802 	sz = t->max_tfr;
1803 	return mmc_test_seq_read_perf(test, sz);
1804 }
1805 
1806 static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1807 {
1808 	struct mmc_test_area *t = &test->area;
1809 	unsigned int dev_addr, i, cnt;
1810 	struct timespec ts1, ts2;
1811 	int ret;
1812 
1813 	ret = mmc_test_area_erase(test);
1814 	if (ret)
1815 		return ret;
1816 	cnt = t->max_sz / sz;
1817 	dev_addr = t->dev_addr;
1818 	getnstimeofday(&ts1);
1819 	for (i = 0; i < cnt; i++) {
1820 		ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1821 		if (ret)
1822 			return ret;
1823 		dev_addr += (sz >> 9);
1824 	}
1825 	getnstimeofday(&ts2);
1826 	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1827 	return 0;
1828 }
1829 
1830 /*
1831  * Consecutive write performance by transfer size.
1832  */
1833 static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1834 {
1835 	struct mmc_test_area *t = &test->area;
1836 	unsigned long sz;
1837 	int ret;
1838 
1839 	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1840 		ret = mmc_test_seq_write_perf(test, sz);
1841 		if (ret)
1842 			return ret;
1843 	}
1844 	sz = t->max_tfr;
1845 	return mmc_test_seq_write_perf(test, sz);
1846 }
1847 
1848 /*
1849  * Consecutive trim performance by transfer size.
1850  */
1851 static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1852 {
1853 	struct mmc_test_area *t = &test->area;
1854 	unsigned long sz;
1855 	unsigned int dev_addr, i, cnt;
1856 	struct timespec ts1, ts2;
1857 	int ret;
1858 
1859 	if (!mmc_can_trim(test->card))
1860 		return RESULT_UNSUP_CARD;
1861 
1862 	if (!mmc_can_erase(test->card))
1863 		return RESULT_UNSUP_HOST;
1864 
1865 	for (sz = 512; sz <= t->max_sz; sz <<= 1) {
1866 		ret = mmc_test_area_erase(test);
1867 		if (ret)
1868 			return ret;
1869 		ret = mmc_test_area_fill(test);
1870 		if (ret)
1871 			return ret;
1872 		cnt = t->max_sz / sz;
1873 		dev_addr = t->dev_addr;
1874 		getnstimeofday(&ts1);
1875 		for (i = 0; i < cnt; i++) {
1876 			ret = mmc_erase(test->card, dev_addr, sz >> 9,
1877 					MMC_TRIM_ARG);
1878 			if (ret)
1879 				return ret;
1880 			dev_addr += (sz >> 9);
1881 		}
1882 		getnstimeofday(&ts2);
1883 		mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1884 	}
1885 	return 0;
1886 }
1887 
1888 static unsigned int rnd_next = 1;
1889 
1890 static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
1891 {
1892 	uint64_t r;
1893 
1894 	rnd_next = rnd_next * 1103515245 + 12345;
1895 	r = (rnd_next >> 16) & 0x7fff;
1896 	return (r * rnd_cnt) >> 15;
1897 }
1898 
1899 static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
1900 			     unsigned long sz)
1901 {
1902 	unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
1903 	unsigned int ssz;
1904 	struct timespec ts1, ts2, ts;
1905 	int ret;
1906 
1907 	ssz = sz >> 9;
1908 
1909 	rnd_addr = mmc_test_capacity(test->card) / 4;
1910 	range1 = rnd_addr / test->card->pref_erase;
1911 	range2 = range1 / ssz;
1912 
1913 	getnstimeofday(&ts1);
1914 	for (cnt = 0; cnt < UINT_MAX; cnt++) {
1915 		getnstimeofday(&ts2);
1916 		ts = timespec_sub(ts2, ts1);
1917 		if (ts.tv_sec >= 10)
1918 			break;
1919 		ea = mmc_test_rnd_num(range1);
1920 		if (ea == last_ea)
1921 			ea -= 1;
1922 		last_ea = ea;
1923 		dev_addr = rnd_addr + test->card->pref_erase * ea +
1924 			   ssz * mmc_test_rnd_num(range2);
1925 		ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
1926 		if (ret)
1927 			return ret;
1928 	}
1929 	if (print)
1930 		mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1931 	return 0;
1932 }
1933 
1934 static int mmc_test_random_perf(struct mmc_test_card *test, int write)
1935 {
1936 	struct mmc_test_area *t = &test->area;
1937 	unsigned int next;
1938 	unsigned long sz;
1939 	int ret;
1940 
1941 	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1942 		/*
1943 		 * When writing, try to get more consistent results by running
1944 		 * the test twice with exactly the same I/O but outputting the
1945 		 * results only for the 2nd run.
1946 		 */
1947 		if (write) {
1948 			next = rnd_next;
1949 			ret = mmc_test_rnd_perf(test, write, 0, sz);
1950 			if (ret)
1951 				return ret;
1952 			rnd_next = next;
1953 		}
1954 		ret = mmc_test_rnd_perf(test, write, 1, sz);
1955 		if (ret)
1956 			return ret;
1957 	}
1958 	sz = t->max_tfr;
1959 	if (write) {
1960 		next = rnd_next;
1961 		ret = mmc_test_rnd_perf(test, write, 0, sz);
1962 		if (ret)
1963 			return ret;
1964 		rnd_next = next;
1965 	}
1966 	return mmc_test_rnd_perf(test, write, 1, sz);
1967 }
1968 
1969 /*
1970  * Random read performance by transfer size.
1971  */
1972 static int mmc_test_random_read_perf(struct mmc_test_card *test)
1973 {
1974 	return mmc_test_random_perf(test, 0);
1975 }
1976 
1977 /*
1978  * Random write performance by transfer size.
1979  */
1980 static int mmc_test_random_write_perf(struct mmc_test_card *test)
1981 {
1982 	return mmc_test_random_perf(test, 1);
1983 }
1984 
1985 static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
1986 			     unsigned int tot_sz, int max_scatter)
1987 {
1988 	struct mmc_test_area *t = &test->area;
1989 	unsigned int dev_addr, i, cnt, sz, ssz;
1990 	struct timespec ts1, ts2;
1991 	int ret;
1992 
1993 	sz = t->max_tfr;
1994 
1995 	/*
1996 	 * In the case of a maximally scattered transfer, the maximum transfer
1997 	 * size is further limited by using PAGE_SIZE segments.
1998 	 */
1999 	if (max_scatter) {
2000 		unsigned long max_tfr;
2001 
2002 		if (t->max_seg_sz >= PAGE_SIZE)
2003 			max_tfr = t->max_segs * PAGE_SIZE;
2004 		else
2005 			max_tfr = t->max_segs * t->max_seg_sz;
2006 		if (sz > max_tfr)
2007 			sz = max_tfr;
2008 	}
2009 
2010 	ssz = sz >> 9;
2011 	dev_addr = mmc_test_capacity(test->card) / 4;
2012 	if (tot_sz > dev_addr << 9)
2013 		tot_sz = dev_addr << 9;
2014 	cnt = tot_sz / sz;
2015 	dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2016 
2017 	getnstimeofday(&ts1);
2018 	for (i = 0; i < cnt; i++) {
2019 		ret = mmc_test_area_io(test, sz, dev_addr, write,
2020 				       max_scatter, 0);
2021 		if (ret)
2022 			return ret;
2023 		dev_addr += ssz;
2024 	}
2025 	getnstimeofday(&ts2);
2026 
2027 	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
2028 
2029 	return 0;
2030 }
2031 
2032 static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
2033 {
2034 	int ret, i;
2035 
2036 	for (i = 0; i < 10; i++) {
2037 		ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
2038 		if (ret)
2039 			return ret;
2040 	}
2041 	for (i = 0; i < 5; i++) {
2042 		ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
2043 		if (ret)
2044 			return ret;
2045 	}
2046 	for (i = 0; i < 3; i++) {
2047 		ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
2048 		if (ret)
2049 			return ret;
2050 	}
2051 
2052 	return ret;
2053 }
2054 
2055 /*
2056  * Large sequential read performance.
2057  */
2058 static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
2059 {
2060 	return mmc_test_large_seq_perf(test, 0);
2061 }
2062 
2063 /*
2064  * Large sequential write performance.
2065  */
2066 static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
2067 {
2068 	return mmc_test_large_seq_perf(test, 1);
2069 }
2070 
2071 static int mmc_test_rw_multiple(struct mmc_test_card *test,
2072 				struct mmc_test_multiple_rw *tdata,
2073 				unsigned int reqsize, unsigned int size,
2074 				int min_sg_len)
2075 {
2076 	unsigned int dev_addr;
2077 	struct mmc_test_area *t = &test->area;
2078 	int ret = 0;
2079 
2080 	/* Set up test area */
2081 	if (size > mmc_test_capacity(test->card) / 2 * 512)
2082 		size = mmc_test_capacity(test->card) / 2 * 512;
2083 	if (reqsize > t->max_tfr)
2084 		reqsize = t->max_tfr;
2085 	dev_addr = mmc_test_capacity(test->card) / 4;
2086 	if ((dev_addr & 0xffff0000))
2087 		dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2088 	else
2089 		dev_addr &= 0xfffff800; /* Round to 1MiB boundary */
2090 	if (!dev_addr)
2091 		goto err;
2092 
2093 	if (reqsize > size)
2094 		return 0;
2095 
2096 	/* prepare test area */
2097 	if (mmc_can_erase(test->card) &&
2098 	    tdata->prepare & MMC_TEST_PREP_ERASE) {
2099 		ret = mmc_erase(test->card, dev_addr,
2100 				size / 512, MMC_SECURE_ERASE_ARG);
2101 		if (ret)
2102 			ret = mmc_erase(test->card, dev_addr,
2103 					size / 512, MMC_ERASE_ARG);
2104 		if (ret)
2105 			goto err;
2106 	}
2107 
2108 	/* Run test */
2109 	ret = mmc_test_area_io_seq(test, reqsize, dev_addr,
2110 				   tdata->do_write, 0, 1, size / reqsize,
2111 				   tdata->do_nonblock_req, min_sg_len);
2112 	if (ret)
2113 		goto err;
2114 
2115 	return ret;
2116  err:
2117 	pr_info("[%s] error\n", __func__);
2118 	return ret;
2119 }
2120 
2121 static int mmc_test_rw_multiple_size(struct mmc_test_card *test,
2122 				     struct mmc_test_multiple_rw *rw)
2123 {
2124 	int ret = 0;
2125 	int i;
2126 	void *pre_req = test->card->host->ops->pre_req;
2127 	void *post_req = test->card->host->ops->post_req;
2128 
2129 	if (rw->do_nonblock_req &&
2130 	    ((!pre_req && post_req) || (pre_req && !post_req))) {
2131 		pr_info("error: only one of pre/post is defined\n");
2132 		return -EINVAL;
2133 	}
2134 
2135 	for (i = 0 ; i < rw->len && ret == 0; i++) {
2136 		ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0);
2137 		if (ret)
2138 			break;
2139 	}
2140 	return ret;
2141 }
2142 
2143 static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test,
2144 				       struct mmc_test_multiple_rw *rw)
2145 {
2146 	int ret = 0;
2147 	int i;
2148 
2149 	for (i = 0 ; i < rw->len && ret == 0; i++) {
2150 		ret = mmc_test_rw_multiple(test, rw, 512*1024, rw->size,
2151 					   rw->sg_len[i]);
2152 		if (ret)
2153 			break;
2154 	}
2155 	return ret;
2156 }
2157 
2158 /*
2159  * Multiple blocking write 4k to 4 MB chunks
2160  */
2161 static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test)
2162 {
2163 	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2164 			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2165 	struct mmc_test_multiple_rw test_data = {
2166 		.bs = bs,
2167 		.size = TEST_AREA_MAX_SIZE,
2168 		.len = ARRAY_SIZE(bs),
2169 		.do_write = true,
2170 		.do_nonblock_req = false,
2171 		.prepare = MMC_TEST_PREP_ERASE,
2172 	};
2173 
2174 	return mmc_test_rw_multiple_size(test, &test_data);
2175 };
2176 
2177 /*
2178  * Multiple non-blocking write 4k to 4 MB chunks
2179  */
2180 static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test)
2181 {
2182 	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2183 			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2184 	struct mmc_test_multiple_rw test_data = {
2185 		.bs = bs,
2186 		.size = TEST_AREA_MAX_SIZE,
2187 		.len = ARRAY_SIZE(bs),
2188 		.do_write = true,
2189 		.do_nonblock_req = true,
2190 		.prepare = MMC_TEST_PREP_ERASE,
2191 	};
2192 
2193 	return mmc_test_rw_multiple_size(test, &test_data);
2194 }
2195 
2196 /*
2197  * Multiple blocking read 4k to 4 MB chunks
2198  */
2199 static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test)
2200 {
2201 	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2202 			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2203 	struct mmc_test_multiple_rw test_data = {
2204 		.bs = bs,
2205 		.size = TEST_AREA_MAX_SIZE,
2206 		.len = ARRAY_SIZE(bs),
2207 		.do_write = false,
2208 		.do_nonblock_req = false,
2209 		.prepare = MMC_TEST_PREP_NONE,
2210 	};
2211 
2212 	return mmc_test_rw_multiple_size(test, &test_data);
2213 }
2214 
2215 /*
2216  * Multiple non-blocking read 4k to 4 MB chunks
2217  */
2218 static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test)
2219 {
2220 	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2221 			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2222 	struct mmc_test_multiple_rw test_data = {
2223 		.bs = bs,
2224 		.size = TEST_AREA_MAX_SIZE,
2225 		.len = ARRAY_SIZE(bs),
2226 		.do_write = false,
2227 		.do_nonblock_req = true,
2228 		.prepare = MMC_TEST_PREP_NONE,
2229 	};
2230 
2231 	return mmc_test_rw_multiple_size(test, &test_data);
2232 }
2233 
2234 /*
2235  * Multiple blocking write 1 to 512 sg elements
2236  */
2237 static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test)
2238 {
2239 	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2240 				 1 << 7, 1 << 8, 1 << 9};
2241 	struct mmc_test_multiple_rw test_data = {
2242 		.sg_len = sg_len,
2243 		.size = TEST_AREA_MAX_SIZE,
2244 		.len = ARRAY_SIZE(sg_len),
2245 		.do_write = true,
2246 		.do_nonblock_req = false,
2247 		.prepare = MMC_TEST_PREP_ERASE,
2248 	};
2249 
2250 	return mmc_test_rw_multiple_sg_len(test, &test_data);
2251 };
2252 
2253 /*
2254  * Multiple non-blocking write 1 to 512 sg elements
2255  */
2256 static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test)
2257 {
2258 	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2259 				 1 << 7, 1 << 8, 1 << 9};
2260 	struct mmc_test_multiple_rw test_data = {
2261 		.sg_len = sg_len,
2262 		.size = TEST_AREA_MAX_SIZE,
2263 		.len = ARRAY_SIZE(sg_len),
2264 		.do_write = true,
2265 		.do_nonblock_req = true,
2266 		.prepare = MMC_TEST_PREP_ERASE,
2267 	};
2268 
2269 	return mmc_test_rw_multiple_sg_len(test, &test_data);
2270 }
2271 
2272 /*
2273  * Multiple blocking read 1 to 512 sg elements
2274  */
2275 static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test)
2276 {
2277 	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2278 				 1 << 7, 1 << 8, 1 << 9};
2279 	struct mmc_test_multiple_rw test_data = {
2280 		.sg_len = sg_len,
2281 		.size = TEST_AREA_MAX_SIZE,
2282 		.len = ARRAY_SIZE(sg_len),
2283 		.do_write = false,
2284 		.do_nonblock_req = false,
2285 		.prepare = MMC_TEST_PREP_NONE,
2286 	};
2287 
2288 	return mmc_test_rw_multiple_sg_len(test, &test_data);
2289 }
2290 
2291 /*
2292  * Multiple non-blocking read 1 to 512 sg elements
2293  */
2294 static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
2295 {
2296 	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2297 				 1 << 7, 1 << 8, 1 << 9};
2298 	struct mmc_test_multiple_rw test_data = {
2299 		.sg_len = sg_len,
2300 		.size = TEST_AREA_MAX_SIZE,
2301 		.len = ARRAY_SIZE(sg_len),
2302 		.do_write = false,
2303 		.do_nonblock_req = true,
2304 		.prepare = MMC_TEST_PREP_NONE,
2305 	};
2306 
2307 	return mmc_test_rw_multiple_sg_len(test, &test_data);
2308 }
2309 
2310 /*
2311  * eMMC hardware reset.
2312  */
2313 static int mmc_test_reset(struct mmc_test_card *test)
2314 {
2315 	struct mmc_card *card = test->card;
2316 	struct mmc_host *host = card->host;
2317 	int err;
2318 
2319 	err = mmc_hw_reset(host);
2320 	if (!err)
2321 		return RESULT_OK;
2322 	else if (err == -EOPNOTSUPP)
2323 		return RESULT_UNSUP_HOST;
2324 
2325 	return RESULT_FAIL;
2326 }
2327 
2328 struct mmc_test_req {
2329 	struct mmc_request mrq;
2330 	struct mmc_command sbc;
2331 	struct mmc_command cmd;
2332 	struct mmc_command stop;
2333 	struct mmc_command status;
2334 	struct mmc_data data;
2335 };
2336 
2337 static struct mmc_test_req *mmc_test_req_alloc(void)
2338 {
2339 	struct mmc_test_req *rq = kzalloc(sizeof(*rq), GFP_KERNEL);
2340 
2341 	if (rq) {
2342 		rq->mrq.cmd = &rq->cmd;
2343 		rq->mrq.data = &rq->data;
2344 		rq->mrq.stop = &rq->stop;
2345 	}
2346 
2347 	return rq;
2348 }
2349 
2350 static int mmc_test_send_status(struct mmc_test_card *test,
2351 				struct mmc_command *cmd)
2352 {
2353 	memset(cmd, 0, sizeof(*cmd));
2354 
2355 	cmd->opcode = MMC_SEND_STATUS;
2356 	if (!mmc_host_is_spi(test->card->host))
2357 		cmd->arg = test->card->rca << 16;
2358 	cmd->flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
2359 
2360 	return mmc_wait_for_cmd(test->card->host, cmd, 0);
2361 }
2362 
2363 static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
2364 				     unsigned int dev_addr, int use_sbc,
2365 				     int repeat_cmd, int write, int use_areq)
2366 {
2367 	struct mmc_test_req *rq = mmc_test_req_alloc();
2368 	struct mmc_host *host = test->card->host;
2369 	struct mmc_test_area *t = &test->area;
2370 	struct mmc_test_async_req test_areq = { .test = test };
2371 	struct mmc_request *mrq;
2372 	unsigned long timeout;
2373 	bool expired = false;
2374 	enum mmc_blk_status blkstat = MMC_BLK_SUCCESS;
2375 	int ret = 0, cmd_ret;
2376 	u32 status = 0;
2377 	int count = 0;
2378 
2379 	if (!rq)
2380 		return -ENOMEM;
2381 
2382 	mrq = &rq->mrq;
2383 	if (use_sbc)
2384 		mrq->sbc = &rq->sbc;
2385 	mrq->cap_cmd_during_tfr = true;
2386 
2387 	test_areq.areq.mrq = mrq;
2388 	test_areq.areq.err_check = mmc_test_check_result_async;
2389 
2390 	mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks,
2391 			     512, write);
2392 
2393 	if (use_sbc && t->blocks > 1 && !mrq->sbc) {
2394 		ret =  mmc_host_cmd23(host) ?
2395 		       RESULT_UNSUP_CARD :
2396 		       RESULT_UNSUP_HOST;
2397 		goto out_free;
2398 	}
2399 
2400 	/* Start ongoing data request */
2401 	if (use_areq) {
2402 		mmc_start_req(host, &test_areq.areq, &blkstat);
2403 		if (blkstat != MMC_BLK_SUCCESS) {
2404 			ret = RESULT_FAIL;
2405 			goto out_free;
2406 		}
2407 	} else {
2408 		mmc_wait_for_req(host, mrq);
2409 	}
2410 
2411 	timeout = jiffies + msecs_to_jiffies(3000);
2412 	do {
2413 		count += 1;
2414 
2415 		/* Send status command while data transfer in progress */
2416 		cmd_ret = mmc_test_send_status(test, &rq->status);
2417 		if (cmd_ret)
2418 			break;
2419 
2420 		status = rq->status.resp[0];
2421 		if (status & R1_ERROR) {
2422 			cmd_ret = -EIO;
2423 			break;
2424 		}
2425 
2426 		if (mmc_is_req_done(host, mrq))
2427 			break;
2428 
2429 		expired = time_after(jiffies, timeout);
2430 		if (expired) {
2431 			pr_info("%s: timeout waiting for Tran state status %#x\n",
2432 				mmc_hostname(host), status);
2433 			cmd_ret = -ETIMEDOUT;
2434 			break;
2435 		}
2436 	} while (repeat_cmd && R1_CURRENT_STATE(status) != R1_STATE_TRAN);
2437 
2438 	/* Wait for data request to complete */
2439 	if (use_areq) {
2440 		mmc_start_req(host, NULL, &blkstat);
2441 		if (blkstat != MMC_BLK_SUCCESS)
2442 			ret = RESULT_FAIL;
2443 	} else {
2444 		mmc_wait_for_req_done(test->card->host, mrq);
2445 	}
2446 
2447 	/*
2448 	 * For cap_cmd_during_tfr request, upper layer must send stop if
2449 	 * required.
2450 	 */
2451 	if (mrq->data->stop && (mrq->data->error || !mrq->sbc)) {
2452 		if (ret)
2453 			mmc_wait_for_cmd(host, mrq->data->stop, 0);
2454 		else
2455 			ret = mmc_wait_for_cmd(host, mrq->data->stop, 0);
2456 	}
2457 
2458 	if (ret)
2459 		goto out_free;
2460 
2461 	if (cmd_ret) {
2462 		pr_info("%s: Send Status failed: status %#x, error %d\n",
2463 			mmc_hostname(test->card->host), status, cmd_ret);
2464 	}
2465 
2466 	ret = mmc_test_check_result(test, mrq);
2467 	if (ret)
2468 		goto out_free;
2469 
2470 	ret = mmc_test_wait_busy(test);
2471 	if (ret)
2472 		goto out_free;
2473 
2474 	if (repeat_cmd && (t->blocks + 1) << 9 > t->max_tfr)
2475 		pr_info("%s: %d commands completed during transfer of %u blocks\n",
2476 			mmc_hostname(test->card->host), count, t->blocks);
2477 
2478 	if (cmd_ret)
2479 		ret = cmd_ret;
2480 out_free:
2481 	kfree(rq);
2482 
2483 	return ret;
2484 }
2485 
2486 static int __mmc_test_cmds_during_tfr(struct mmc_test_card *test,
2487 				      unsigned long sz, int use_sbc, int write,
2488 				      int use_areq)
2489 {
2490 	struct mmc_test_area *t = &test->area;
2491 	int ret;
2492 
2493 	if (!(test->card->host->caps & MMC_CAP_CMD_DURING_TFR))
2494 		return RESULT_UNSUP_HOST;
2495 
2496 	ret = mmc_test_area_map(test, sz, 0, 0);
2497 	if (ret)
2498 		return ret;
2499 
2500 	ret = mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 0, write,
2501 					use_areq);
2502 	if (ret)
2503 		return ret;
2504 
2505 	return mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 1, write,
2506 					 use_areq);
2507 }
2508 
2509 static int mmc_test_cmds_during_tfr(struct mmc_test_card *test, int use_sbc,
2510 				    int write, int use_areq)
2511 {
2512 	struct mmc_test_area *t = &test->area;
2513 	unsigned long sz;
2514 	int ret;
2515 
2516 	for (sz = 512; sz <= t->max_tfr; sz += 512) {
2517 		ret = __mmc_test_cmds_during_tfr(test, sz, use_sbc, write,
2518 						 use_areq);
2519 		if (ret)
2520 			return ret;
2521 	}
2522 	return 0;
2523 }
2524 
2525 /*
2526  * Commands during read - no Set Block Count (CMD23).
2527  */
2528 static int mmc_test_cmds_during_read(struct mmc_test_card *test)
2529 {
2530 	return mmc_test_cmds_during_tfr(test, 0, 0, 0);
2531 }
2532 
2533 /*
2534  * Commands during write - no Set Block Count (CMD23).
2535  */
2536 static int mmc_test_cmds_during_write(struct mmc_test_card *test)
2537 {
2538 	return mmc_test_cmds_during_tfr(test, 0, 1, 0);
2539 }
2540 
2541 /*
2542  * Commands during read - use Set Block Count (CMD23).
2543  */
2544 static int mmc_test_cmds_during_read_cmd23(struct mmc_test_card *test)
2545 {
2546 	return mmc_test_cmds_during_tfr(test, 1, 0, 0);
2547 }
2548 
2549 /*
2550  * Commands during write - use Set Block Count (CMD23).
2551  */
2552 static int mmc_test_cmds_during_write_cmd23(struct mmc_test_card *test)
2553 {
2554 	return mmc_test_cmds_during_tfr(test, 1, 1, 0);
2555 }
2556 
2557 /*
2558  * Commands during non-blocking read - use Set Block Count (CMD23).
2559  */
2560 static int mmc_test_cmds_during_read_cmd23_nonblock(struct mmc_test_card *test)
2561 {
2562 	return mmc_test_cmds_during_tfr(test, 1, 0, 1);
2563 }
2564 
2565 /*
2566  * Commands during non-blocking write - use Set Block Count (CMD23).
2567  */
2568 static int mmc_test_cmds_during_write_cmd23_nonblock(struct mmc_test_card *test)
2569 {
2570 	return mmc_test_cmds_during_tfr(test, 1, 1, 1);
2571 }
2572 
2573 static const struct mmc_test_case mmc_test_cases[] = {
2574 	{
2575 		.name = "Basic write (no data verification)",
2576 		.run = mmc_test_basic_write,
2577 	},
2578 
2579 	{
2580 		.name = "Basic read (no data verification)",
2581 		.run = mmc_test_basic_read,
2582 	},
2583 
2584 	{
2585 		.name = "Basic write (with data verification)",
2586 		.prepare = mmc_test_prepare_write,
2587 		.run = mmc_test_verify_write,
2588 		.cleanup = mmc_test_cleanup,
2589 	},
2590 
2591 	{
2592 		.name = "Basic read (with data verification)",
2593 		.prepare = mmc_test_prepare_read,
2594 		.run = mmc_test_verify_read,
2595 		.cleanup = mmc_test_cleanup,
2596 	},
2597 
2598 	{
2599 		.name = "Multi-block write",
2600 		.prepare = mmc_test_prepare_write,
2601 		.run = mmc_test_multi_write,
2602 		.cleanup = mmc_test_cleanup,
2603 	},
2604 
2605 	{
2606 		.name = "Multi-block read",
2607 		.prepare = mmc_test_prepare_read,
2608 		.run = mmc_test_multi_read,
2609 		.cleanup = mmc_test_cleanup,
2610 	},
2611 
2612 	{
2613 		.name = "Power of two block writes",
2614 		.prepare = mmc_test_prepare_write,
2615 		.run = mmc_test_pow2_write,
2616 		.cleanup = mmc_test_cleanup,
2617 	},
2618 
2619 	{
2620 		.name = "Power of two block reads",
2621 		.prepare = mmc_test_prepare_read,
2622 		.run = mmc_test_pow2_read,
2623 		.cleanup = mmc_test_cleanup,
2624 	},
2625 
2626 	{
2627 		.name = "Weird sized block writes",
2628 		.prepare = mmc_test_prepare_write,
2629 		.run = mmc_test_weird_write,
2630 		.cleanup = mmc_test_cleanup,
2631 	},
2632 
2633 	{
2634 		.name = "Weird sized block reads",
2635 		.prepare = mmc_test_prepare_read,
2636 		.run = mmc_test_weird_read,
2637 		.cleanup = mmc_test_cleanup,
2638 	},
2639 
2640 	{
2641 		.name = "Badly aligned write",
2642 		.prepare = mmc_test_prepare_write,
2643 		.run = mmc_test_align_write,
2644 		.cleanup = mmc_test_cleanup,
2645 	},
2646 
2647 	{
2648 		.name = "Badly aligned read",
2649 		.prepare = mmc_test_prepare_read,
2650 		.run = mmc_test_align_read,
2651 		.cleanup = mmc_test_cleanup,
2652 	},
2653 
2654 	{
2655 		.name = "Badly aligned multi-block write",
2656 		.prepare = mmc_test_prepare_write,
2657 		.run = mmc_test_align_multi_write,
2658 		.cleanup = mmc_test_cleanup,
2659 	},
2660 
2661 	{
2662 		.name = "Badly aligned multi-block read",
2663 		.prepare = mmc_test_prepare_read,
2664 		.run = mmc_test_align_multi_read,
2665 		.cleanup = mmc_test_cleanup,
2666 	},
2667 
2668 	{
2669 		.name = "Correct xfer_size at write (start failure)",
2670 		.run = mmc_test_xfersize_write,
2671 	},
2672 
2673 	{
2674 		.name = "Correct xfer_size at read (start failure)",
2675 		.run = mmc_test_xfersize_read,
2676 	},
2677 
2678 	{
2679 		.name = "Correct xfer_size at write (midway failure)",
2680 		.run = mmc_test_multi_xfersize_write,
2681 	},
2682 
2683 	{
2684 		.name = "Correct xfer_size at read (midway failure)",
2685 		.run = mmc_test_multi_xfersize_read,
2686 	},
2687 
2688 #ifdef CONFIG_HIGHMEM
2689 
2690 	{
2691 		.name = "Highmem write",
2692 		.prepare = mmc_test_prepare_write,
2693 		.run = mmc_test_write_high,
2694 		.cleanup = mmc_test_cleanup,
2695 	},
2696 
2697 	{
2698 		.name = "Highmem read",
2699 		.prepare = mmc_test_prepare_read,
2700 		.run = mmc_test_read_high,
2701 		.cleanup = mmc_test_cleanup,
2702 	},
2703 
2704 	{
2705 		.name = "Multi-block highmem write",
2706 		.prepare = mmc_test_prepare_write,
2707 		.run = mmc_test_multi_write_high,
2708 		.cleanup = mmc_test_cleanup,
2709 	},
2710 
2711 	{
2712 		.name = "Multi-block highmem read",
2713 		.prepare = mmc_test_prepare_read,
2714 		.run = mmc_test_multi_read_high,
2715 		.cleanup = mmc_test_cleanup,
2716 	},
2717 
2718 #else
2719 
2720 	{
2721 		.name = "Highmem write",
2722 		.run = mmc_test_no_highmem,
2723 	},
2724 
2725 	{
2726 		.name = "Highmem read",
2727 		.run = mmc_test_no_highmem,
2728 	},
2729 
2730 	{
2731 		.name = "Multi-block highmem write",
2732 		.run = mmc_test_no_highmem,
2733 	},
2734 
2735 	{
2736 		.name = "Multi-block highmem read",
2737 		.run = mmc_test_no_highmem,
2738 	},
2739 
2740 #endif /* CONFIG_HIGHMEM */
2741 
2742 	{
2743 		.name = "Best-case read performance",
2744 		.prepare = mmc_test_area_prepare_fill,
2745 		.run = mmc_test_best_read_performance,
2746 		.cleanup = mmc_test_area_cleanup,
2747 	},
2748 
2749 	{
2750 		.name = "Best-case write performance",
2751 		.prepare = mmc_test_area_prepare_erase,
2752 		.run = mmc_test_best_write_performance,
2753 		.cleanup = mmc_test_area_cleanup,
2754 	},
2755 
2756 	{
2757 		.name = "Best-case read performance into scattered pages",
2758 		.prepare = mmc_test_area_prepare_fill,
2759 		.run = mmc_test_best_read_perf_max_scatter,
2760 		.cleanup = mmc_test_area_cleanup,
2761 	},
2762 
2763 	{
2764 		.name = "Best-case write performance from scattered pages",
2765 		.prepare = mmc_test_area_prepare_erase,
2766 		.run = mmc_test_best_write_perf_max_scatter,
2767 		.cleanup = mmc_test_area_cleanup,
2768 	},
2769 
2770 	{
2771 		.name = "Single read performance by transfer size",
2772 		.prepare = mmc_test_area_prepare_fill,
2773 		.run = mmc_test_profile_read_perf,
2774 		.cleanup = mmc_test_area_cleanup,
2775 	},
2776 
2777 	{
2778 		.name = "Single write performance by transfer size",
2779 		.prepare = mmc_test_area_prepare,
2780 		.run = mmc_test_profile_write_perf,
2781 		.cleanup = mmc_test_area_cleanup,
2782 	},
2783 
2784 	{
2785 		.name = "Single trim performance by transfer size",
2786 		.prepare = mmc_test_area_prepare_fill,
2787 		.run = mmc_test_profile_trim_perf,
2788 		.cleanup = mmc_test_area_cleanup,
2789 	},
2790 
2791 	{
2792 		.name = "Consecutive read performance by transfer size",
2793 		.prepare = mmc_test_area_prepare_fill,
2794 		.run = mmc_test_profile_seq_read_perf,
2795 		.cleanup = mmc_test_area_cleanup,
2796 	},
2797 
2798 	{
2799 		.name = "Consecutive write performance by transfer size",
2800 		.prepare = mmc_test_area_prepare,
2801 		.run = mmc_test_profile_seq_write_perf,
2802 		.cleanup = mmc_test_area_cleanup,
2803 	},
2804 
2805 	{
2806 		.name = "Consecutive trim performance by transfer size",
2807 		.prepare = mmc_test_area_prepare,
2808 		.run = mmc_test_profile_seq_trim_perf,
2809 		.cleanup = mmc_test_area_cleanup,
2810 	},
2811 
2812 	{
2813 		.name = "Random read performance by transfer size",
2814 		.prepare = mmc_test_area_prepare,
2815 		.run = mmc_test_random_read_perf,
2816 		.cleanup = mmc_test_area_cleanup,
2817 	},
2818 
2819 	{
2820 		.name = "Random write performance by transfer size",
2821 		.prepare = mmc_test_area_prepare,
2822 		.run = mmc_test_random_write_perf,
2823 		.cleanup = mmc_test_area_cleanup,
2824 	},
2825 
2826 	{
2827 		.name = "Large sequential read into scattered pages",
2828 		.prepare = mmc_test_area_prepare,
2829 		.run = mmc_test_large_seq_read_perf,
2830 		.cleanup = mmc_test_area_cleanup,
2831 	},
2832 
2833 	{
2834 		.name = "Large sequential write from scattered pages",
2835 		.prepare = mmc_test_area_prepare,
2836 		.run = mmc_test_large_seq_write_perf,
2837 		.cleanup = mmc_test_area_cleanup,
2838 	},
2839 
2840 	{
2841 		.name = "Write performance with blocking req 4k to 4MB",
2842 		.prepare = mmc_test_area_prepare,
2843 		.run = mmc_test_profile_mult_write_blocking_perf,
2844 		.cleanup = mmc_test_area_cleanup,
2845 	},
2846 
2847 	{
2848 		.name = "Write performance with non-blocking req 4k to 4MB",
2849 		.prepare = mmc_test_area_prepare,
2850 		.run = mmc_test_profile_mult_write_nonblock_perf,
2851 		.cleanup = mmc_test_area_cleanup,
2852 	},
2853 
2854 	{
2855 		.name = "Read performance with blocking req 4k to 4MB",
2856 		.prepare = mmc_test_area_prepare,
2857 		.run = mmc_test_profile_mult_read_blocking_perf,
2858 		.cleanup = mmc_test_area_cleanup,
2859 	},
2860 
2861 	{
2862 		.name = "Read performance with non-blocking req 4k to 4MB",
2863 		.prepare = mmc_test_area_prepare,
2864 		.run = mmc_test_profile_mult_read_nonblock_perf,
2865 		.cleanup = mmc_test_area_cleanup,
2866 	},
2867 
2868 	{
2869 		.name = "Write performance blocking req 1 to 512 sg elems",
2870 		.prepare = mmc_test_area_prepare,
2871 		.run = mmc_test_profile_sglen_wr_blocking_perf,
2872 		.cleanup = mmc_test_area_cleanup,
2873 	},
2874 
2875 	{
2876 		.name = "Write performance non-blocking req 1 to 512 sg elems",
2877 		.prepare = mmc_test_area_prepare,
2878 		.run = mmc_test_profile_sglen_wr_nonblock_perf,
2879 		.cleanup = mmc_test_area_cleanup,
2880 	},
2881 
2882 	{
2883 		.name = "Read performance blocking req 1 to 512 sg elems",
2884 		.prepare = mmc_test_area_prepare,
2885 		.run = mmc_test_profile_sglen_r_blocking_perf,
2886 		.cleanup = mmc_test_area_cleanup,
2887 	},
2888 
2889 	{
2890 		.name = "Read performance non-blocking req 1 to 512 sg elems",
2891 		.prepare = mmc_test_area_prepare,
2892 		.run = mmc_test_profile_sglen_r_nonblock_perf,
2893 		.cleanup = mmc_test_area_cleanup,
2894 	},
2895 
2896 	{
2897 		.name = "Reset test",
2898 		.run = mmc_test_reset,
2899 	},
2900 
2901 	{
2902 		.name = "Commands during read - no Set Block Count (CMD23)",
2903 		.prepare = mmc_test_area_prepare,
2904 		.run = mmc_test_cmds_during_read,
2905 		.cleanup = mmc_test_area_cleanup,
2906 	},
2907 
2908 	{
2909 		.name = "Commands during write - no Set Block Count (CMD23)",
2910 		.prepare = mmc_test_area_prepare,
2911 		.run = mmc_test_cmds_during_write,
2912 		.cleanup = mmc_test_area_cleanup,
2913 	},
2914 
2915 	{
2916 		.name = "Commands during read - use Set Block Count (CMD23)",
2917 		.prepare = mmc_test_area_prepare,
2918 		.run = mmc_test_cmds_during_read_cmd23,
2919 		.cleanup = mmc_test_area_cleanup,
2920 	},
2921 
2922 	{
2923 		.name = "Commands during write - use Set Block Count (CMD23)",
2924 		.prepare = mmc_test_area_prepare,
2925 		.run = mmc_test_cmds_during_write_cmd23,
2926 		.cleanup = mmc_test_area_cleanup,
2927 	},
2928 
2929 	{
2930 		.name = "Commands during non-blocking read - use Set Block Count (CMD23)",
2931 		.prepare = mmc_test_area_prepare,
2932 		.run = mmc_test_cmds_during_read_cmd23_nonblock,
2933 		.cleanup = mmc_test_area_cleanup,
2934 	},
2935 
2936 	{
2937 		.name = "Commands during non-blocking write - use Set Block Count (CMD23)",
2938 		.prepare = mmc_test_area_prepare,
2939 		.run = mmc_test_cmds_during_write_cmd23_nonblock,
2940 		.cleanup = mmc_test_area_cleanup,
2941 	},
2942 };
2943 
2944 static DEFINE_MUTEX(mmc_test_lock);
2945 
2946 static LIST_HEAD(mmc_test_result);
2947 
2948 static void mmc_test_run(struct mmc_test_card *test, int testcase)
2949 {
2950 	int i, ret;
2951 
2952 	pr_info("%s: Starting tests of card %s...\n",
2953 		mmc_hostname(test->card->host), mmc_card_id(test->card));
2954 
2955 	mmc_claim_host(test->card->host);
2956 
2957 	for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
2958 		struct mmc_test_general_result *gr;
2959 
2960 		if (testcase && ((i + 1) != testcase))
2961 			continue;
2962 
2963 		pr_info("%s: Test case %d. %s...\n",
2964 			mmc_hostname(test->card->host), i + 1,
2965 			mmc_test_cases[i].name);
2966 
2967 		if (mmc_test_cases[i].prepare) {
2968 			ret = mmc_test_cases[i].prepare(test);
2969 			if (ret) {
2970 				pr_info("%s: Result: Prepare "
2971 					"stage failed! (%d)\n",
2972 					mmc_hostname(test->card->host),
2973 					ret);
2974 				continue;
2975 			}
2976 		}
2977 
2978 		gr = kzalloc(sizeof(struct mmc_test_general_result),
2979 			GFP_KERNEL);
2980 		if (gr) {
2981 			INIT_LIST_HEAD(&gr->tr_lst);
2982 
2983 			/* Assign data what we know already */
2984 			gr->card = test->card;
2985 			gr->testcase = i;
2986 
2987 			/* Append container to global one */
2988 			list_add_tail(&gr->link, &mmc_test_result);
2989 
2990 			/*
2991 			 * Save the pointer to created container in our private
2992 			 * structure.
2993 			 */
2994 			test->gr = gr;
2995 		}
2996 
2997 		ret = mmc_test_cases[i].run(test);
2998 		switch (ret) {
2999 		case RESULT_OK:
3000 			pr_info("%s: Result: OK\n",
3001 				mmc_hostname(test->card->host));
3002 			break;
3003 		case RESULT_FAIL:
3004 			pr_info("%s: Result: FAILED\n",
3005 				mmc_hostname(test->card->host));
3006 			break;
3007 		case RESULT_UNSUP_HOST:
3008 			pr_info("%s: Result: UNSUPPORTED "
3009 				"(by host)\n",
3010 				mmc_hostname(test->card->host));
3011 			break;
3012 		case RESULT_UNSUP_CARD:
3013 			pr_info("%s: Result: UNSUPPORTED "
3014 				"(by card)\n",
3015 				mmc_hostname(test->card->host));
3016 			break;
3017 		default:
3018 			pr_info("%s: Result: ERROR (%d)\n",
3019 				mmc_hostname(test->card->host), ret);
3020 		}
3021 
3022 		/* Save the result */
3023 		if (gr)
3024 			gr->result = ret;
3025 
3026 		if (mmc_test_cases[i].cleanup) {
3027 			ret = mmc_test_cases[i].cleanup(test);
3028 			if (ret) {
3029 				pr_info("%s: Warning: Cleanup "
3030 					"stage failed! (%d)\n",
3031 					mmc_hostname(test->card->host),
3032 					ret);
3033 			}
3034 		}
3035 	}
3036 
3037 	mmc_release_host(test->card->host);
3038 
3039 	pr_info("%s: Tests completed.\n",
3040 		mmc_hostname(test->card->host));
3041 }
3042 
3043 static void mmc_test_free_result(struct mmc_card *card)
3044 {
3045 	struct mmc_test_general_result *gr, *grs;
3046 
3047 	mutex_lock(&mmc_test_lock);
3048 
3049 	list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
3050 		struct mmc_test_transfer_result *tr, *trs;
3051 
3052 		if (card && gr->card != card)
3053 			continue;
3054 
3055 		list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
3056 			list_del(&tr->link);
3057 			kfree(tr);
3058 		}
3059 
3060 		list_del(&gr->link);
3061 		kfree(gr);
3062 	}
3063 
3064 	mutex_unlock(&mmc_test_lock);
3065 }
3066 
3067 static LIST_HEAD(mmc_test_file_test);
3068 
3069 static int mtf_test_show(struct seq_file *sf, void *data)
3070 {
3071 	struct mmc_card *card = (struct mmc_card *)sf->private;
3072 	struct mmc_test_general_result *gr;
3073 
3074 	mutex_lock(&mmc_test_lock);
3075 
3076 	list_for_each_entry(gr, &mmc_test_result, link) {
3077 		struct mmc_test_transfer_result *tr;
3078 
3079 		if (gr->card != card)
3080 			continue;
3081 
3082 		seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
3083 
3084 		list_for_each_entry(tr, &gr->tr_lst, link) {
3085 			seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n",
3086 				tr->count, tr->sectors,
3087 				(unsigned long)tr->ts.tv_sec,
3088 				(unsigned long)tr->ts.tv_nsec,
3089 				tr->rate, tr->iops / 100, tr->iops % 100);
3090 		}
3091 	}
3092 
3093 	mutex_unlock(&mmc_test_lock);
3094 
3095 	return 0;
3096 }
3097 
3098 static int mtf_test_open(struct inode *inode, struct file *file)
3099 {
3100 	return single_open(file, mtf_test_show, inode->i_private);
3101 }
3102 
3103 static ssize_t mtf_test_write(struct file *file, const char __user *buf,
3104 	size_t count, loff_t *pos)
3105 {
3106 	struct seq_file *sf = (struct seq_file *)file->private_data;
3107 	struct mmc_card *card = (struct mmc_card *)sf->private;
3108 	struct mmc_test_card *test;
3109 	long testcase;
3110 	int ret;
3111 
3112 	ret = kstrtol_from_user(buf, count, 10, &testcase);
3113 	if (ret)
3114 		return ret;
3115 
3116 	test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
3117 	if (!test)
3118 		return -ENOMEM;
3119 
3120 	/*
3121 	 * Remove all test cases associated with given card. Thus we have only
3122 	 * actual data of the last run.
3123 	 */
3124 	mmc_test_free_result(card);
3125 
3126 	test->card = card;
3127 
3128 	test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
3129 #ifdef CONFIG_HIGHMEM
3130 	test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
3131 #endif
3132 
3133 #ifdef CONFIG_HIGHMEM
3134 	if (test->buffer && test->highmem) {
3135 #else
3136 	if (test->buffer) {
3137 #endif
3138 		mutex_lock(&mmc_test_lock);
3139 		mmc_test_run(test, testcase);
3140 		mutex_unlock(&mmc_test_lock);
3141 	}
3142 
3143 #ifdef CONFIG_HIGHMEM
3144 	__free_pages(test->highmem, BUFFER_ORDER);
3145 #endif
3146 	kfree(test->buffer);
3147 	kfree(test);
3148 
3149 	return count;
3150 }
3151 
3152 static const struct file_operations mmc_test_fops_test = {
3153 	.open		= mtf_test_open,
3154 	.read		= seq_read,
3155 	.write		= mtf_test_write,
3156 	.llseek		= seq_lseek,
3157 	.release	= single_release,
3158 };
3159 
3160 static int mtf_testlist_show(struct seq_file *sf, void *data)
3161 {
3162 	int i;
3163 
3164 	mutex_lock(&mmc_test_lock);
3165 
3166 	seq_printf(sf, "0:\tRun all tests\n");
3167 	for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
3168 		seq_printf(sf, "%d:\t%s\n", i+1, mmc_test_cases[i].name);
3169 
3170 	mutex_unlock(&mmc_test_lock);
3171 
3172 	return 0;
3173 }
3174 
3175 static int mtf_testlist_open(struct inode *inode, struct file *file)
3176 {
3177 	return single_open(file, mtf_testlist_show, inode->i_private);
3178 }
3179 
3180 static const struct file_operations mmc_test_fops_testlist = {
3181 	.open		= mtf_testlist_open,
3182 	.read		= seq_read,
3183 	.llseek		= seq_lseek,
3184 	.release	= single_release,
3185 };
3186 
3187 static void mmc_test_free_dbgfs_file(struct mmc_card *card)
3188 {
3189 	struct mmc_test_dbgfs_file *df, *dfs;
3190 
3191 	mutex_lock(&mmc_test_lock);
3192 
3193 	list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
3194 		if (card && df->card != card)
3195 			continue;
3196 		debugfs_remove(df->file);
3197 		list_del(&df->link);
3198 		kfree(df);
3199 	}
3200 
3201 	mutex_unlock(&mmc_test_lock);
3202 }
3203 
3204 static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
3205 	const char *name, umode_t mode, const struct file_operations *fops)
3206 {
3207 	struct dentry *file = NULL;
3208 	struct mmc_test_dbgfs_file *df;
3209 
3210 	if (card->debugfs_root)
3211 		file = debugfs_create_file(name, mode, card->debugfs_root,
3212 			card, fops);
3213 
3214 	if (IS_ERR_OR_NULL(file)) {
3215 		dev_err(&card->dev,
3216 			"Can't create %s. Perhaps debugfs is disabled.\n",
3217 			name);
3218 		return -ENODEV;
3219 	}
3220 
3221 	df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
3222 	if (!df) {
3223 		debugfs_remove(file);
3224 		dev_err(&card->dev,
3225 			"Can't allocate memory for internal usage.\n");
3226 		return -ENOMEM;
3227 	}
3228 
3229 	df->card = card;
3230 	df->file = file;
3231 
3232 	list_add(&df->link, &mmc_test_file_test);
3233 	return 0;
3234 }
3235 
3236 static int mmc_test_register_dbgfs_file(struct mmc_card *card)
3237 {
3238 	int ret;
3239 
3240 	mutex_lock(&mmc_test_lock);
3241 
3242 	ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO,
3243 		&mmc_test_fops_test);
3244 	if (ret)
3245 		goto err;
3246 
3247 	ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO,
3248 		&mmc_test_fops_testlist);
3249 	if (ret)
3250 		goto err;
3251 
3252 err:
3253 	mutex_unlock(&mmc_test_lock);
3254 
3255 	return ret;
3256 }
3257 
3258 static int mmc_test_probe(struct mmc_card *card)
3259 {
3260 	int ret;
3261 
3262 	if (!mmc_card_mmc(card) && !mmc_card_sd(card))
3263 		return -ENODEV;
3264 
3265 	ret = mmc_test_register_dbgfs_file(card);
3266 	if (ret)
3267 		return ret;
3268 
3269 	dev_info(&card->dev, "Card claimed for testing.\n");
3270 
3271 	return 0;
3272 }
3273 
3274 static void mmc_test_remove(struct mmc_card *card)
3275 {
3276 	mmc_test_free_result(card);
3277 	mmc_test_free_dbgfs_file(card);
3278 }
3279 
3280 static void mmc_test_shutdown(struct mmc_card *card)
3281 {
3282 }
3283 
3284 static struct mmc_driver mmc_driver = {
3285 	.drv		= {
3286 		.name	= "mmc_test",
3287 	},
3288 	.probe		= mmc_test_probe,
3289 	.remove		= mmc_test_remove,
3290 	.shutdown	= mmc_test_shutdown,
3291 };
3292 
3293 static int __init mmc_test_init(void)
3294 {
3295 	return mmc_register_driver(&mmc_driver);
3296 }
3297 
3298 static void __exit mmc_test_exit(void)
3299 {
3300 	/* Clear stalled data if card is still plugged */
3301 	mmc_test_free_result(NULL);
3302 	mmc_test_free_dbgfs_file(NULL);
3303 
3304 	mmc_unregister_driver(&mmc_driver);
3305 }
3306 
3307 module_init(mmc_test_init);
3308 module_exit(mmc_test_exit);
3309 
3310 MODULE_LICENSE("GPL");
3311 MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
3312 MODULE_AUTHOR("Pierre Ossman");
3313