xref: /openbmc/linux/drivers/mmc/core/mmc_test.c (revision feac8c8b)
1 /*
2  *  Copyright 2007-2008 Pierre Ossman
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or (at
7  * your option) any later version.
8  */
9 
10 #include <linux/mmc/core.h>
11 #include <linux/mmc/card.h>
12 #include <linux/mmc/host.h>
13 #include <linux/mmc/mmc.h>
14 #include <linux/slab.h>
15 
16 #include <linux/scatterlist.h>
17 #include <linux/swap.h>		/* For nr_free_buffer_pages() */
18 #include <linux/list.h>
19 
20 #include <linux/debugfs.h>
21 #include <linux/uaccess.h>
22 #include <linux/seq_file.h>
23 #include <linux/module.h>
24 
25 #include "core.h"
26 #include "card.h"
27 #include "host.h"
28 #include "bus.h"
29 #include "mmc_ops.h"
30 
31 #define RESULT_OK		0
32 #define RESULT_FAIL		1
33 #define RESULT_UNSUP_HOST	2
34 #define RESULT_UNSUP_CARD	3
35 
36 #define BUFFER_ORDER		2
37 #define BUFFER_SIZE		(PAGE_SIZE << BUFFER_ORDER)
38 
39 #define TEST_ALIGN_END		8
40 
41 /*
42  * Limit the test area size to the maximum MMC HC erase group size.  Note that
43  * the maximum SD allocation unit size is just 4MiB.
44  */
45 #define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
46 
47 /**
48  * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
49  * @page: first page in the allocation
50  * @order: order of the number of pages allocated
51  */
52 struct mmc_test_pages {
53 	struct page *page;
54 	unsigned int order;
55 };
56 
57 /**
58  * struct mmc_test_mem - allocated memory.
59  * @arr: array of allocations
60  * @cnt: number of allocations
61  */
62 struct mmc_test_mem {
63 	struct mmc_test_pages *arr;
64 	unsigned int cnt;
65 };
66 
67 /**
68  * struct mmc_test_area - information for performance tests.
69  * @max_sz: test area size (in bytes)
70  * @dev_addr: address on card at which to do performance tests
71  * @max_tfr: maximum transfer size allowed by driver (in bytes)
72  * @max_segs: maximum segments allowed by driver in scatterlist @sg
73  * @max_seg_sz: maximum segment size allowed by driver
74  * @blocks: number of (512 byte) blocks currently mapped by @sg
75  * @sg_len: length of currently mapped scatterlist @sg
76  * @mem: allocated memory
77  * @sg: scatterlist
78  */
79 struct mmc_test_area {
80 	unsigned long max_sz;
81 	unsigned int dev_addr;
82 	unsigned int max_tfr;
83 	unsigned int max_segs;
84 	unsigned int max_seg_sz;
85 	unsigned int blocks;
86 	unsigned int sg_len;
87 	struct mmc_test_mem *mem;
88 	struct scatterlist *sg;
89 };
90 
91 /**
92  * struct mmc_test_transfer_result - transfer results for performance tests.
93  * @link: double-linked list
94  * @count: amount of group of sectors to check
95  * @sectors: amount of sectors to check in one group
96  * @ts: time values of transfer
97  * @rate: calculated transfer rate
98  * @iops: I/O operations per second (times 100)
99  */
100 struct mmc_test_transfer_result {
101 	struct list_head link;
102 	unsigned int count;
103 	unsigned int sectors;
104 	struct timespec64 ts;
105 	unsigned int rate;
106 	unsigned int iops;
107 };
108 
109 /**
110  * struct mmc_test_general_result - results for tests.
111  * @link: double-linked list
112  * @card: card under test
113  * @testcase: number of test case
114  * @result: result of test run
115  * @tr_lst: transfer measurements if any as mmc_test_transfer_result
116  */
117 struct mmc_test_general_result {
118 	struct list_head link;
119 	struct mmc_card *card;
120 	int testcase;
121 	int result;
122 	struct list_head tr_lst;
123 };
124 
125 /**
126  * struct mmc_test_dbgfs_file - debugfs related file.
127  * @link: double-linked list
128  * @card: card under test
129  * @file: file created under debugfs
130  */
131 struct mmc_test_dbgfs_file {
132 	struct list_head link;
133 	struct mmc_card *card;
134 	struct dentry *file;
135 };
136 
137 /**
138  * struct mmc_test_card - test information.
139  * @card: card under test
140  * @scratch: transfer buffer
141  * @buffer: transfer buffer
142  * @highmem: buffer for highmem tests
143  * @area: information for performance tests
144  * @gr: pointer to results of current testcase
145  */
146 struct mmc_test_card {
147 	struct mmc_card	*card;
148 
149 	u8		scratch[BUFFER_SIZE];
150 	u8		*buffer;
151 #ifdef CONFIG_HIGHMEM
152 	struct page	*highmem;
153 #endif
154 	struct mmc_test_area		area;
155 	struct mmc_test_general_result	*gr;
156 };
157 
158 enum mmc_test_prep_media {
159 	MMC_TEST_PREP_NONE = 0,
160 	MMC_TEST_PREP_WRITE_FULL = 1 << 0,
161 	MMC_TEST_PREP_ERASE = 1 << 1,
162 };
163 
164 struct mmc_test_multiple_rw {
165 	unsigned int *sg_len;
166 	unsigned int *bs;
167 	unsigned int len;
168 	unsigned int size;
169 	bool do_write;
170 	bool do_nonblock_req;
171 	enum mmc_test_prep_media prepare;
172 };
173 
174 /*******************************************************************/
175 /*  General helper functions                                       */
176 /*******************************************************************/
177 
178 /*
179  * Configure correct block size in card
180  */
181 static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
182 {
183 	return mmc_set_blocklen(test->card, size);
184 }
185 
186 static bool mmc_test_card_cmd23(struct mmc_card *card)
187 {
188 	return mmc_card_mmc(card) ||
189 	       (mmc_card_sd(card) && card->scr.cmds & SD_SCR_CMD23_SUPPORT);
190 }
191 
192 static void mmc_test_prepare_sbc(struct mmc_test_card *test,
193 				 struct mmc_request *mrq, unsigned int blocks)
194 {
195 	struct mmc_card *card = test->card;
196 
197 	if (!mrq->sbc || !mmc_host_cmd23(card->host) ||
198 	    !mmc_test_card_cmd23(card) || !mmc_op_multi(mrq->cmd->opcode) ||
199 	    (card->quirks & MMC_QUIRK_BLK_NO_CMD23)) {
200 		mrq->sbc = NULL;
201 		return;
202 	}
203 
204 	mrq->sbc->opcode = MMC_SET_BLOCK_COUNT;
205 	mrq->sbc->arg = blocks;
206 	mrq->sbc->flags = MMC_RSP_R1 | MMC_CMD_AC;
207 }
208 
209 /*
210  * Fill in the mmc_request structure given a set of transfer parameters.
211  */
212 static void mmc_test_prepare_mrq(struct mmc_test_card *test,
213 	struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
214 	unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
215 {
216 	if (WARN_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop))
217 		return;
218 
219 	if (blocks > 1) {
220 		mrq->cmd->opcode = write ?
221 			MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
222 	} else {
223 		mrq->cmd->opcode = write ?
224 			MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
225 	}
226 
227 	mrq->cmd->arg = dev_addr;
228 	if (!mmc_card_blockaddr(test->card))
229 		mrq->cmd->arg <<= 9;
230 
231 	mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
232 
233 	if (blocks == 1)
234 		mrq->stop = NULL;
235 	else {
236 		mrq->stop->opcode = MMC_STOP_TRANSMISSION;
237 		mrq->stop->arg = 0;
238 		mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
239 	}
240 
241 	mrq->data->blksz = blksz;
242 	mrq->data->blocks = blocks;
243 	mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
244 	mrq->data->sg = sg;
245 	mrq->data->sg_len = sg_len;
246 
247 	mmc_test_prepare_sbc(test, mrq, blocks);
248 
249 	mmc_set_data_timeout(mrq->data, test->card);
250 }
251 
252 static int mmc_test_busy(struct mmc_command *cmd)
253 {
254 	return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
255 		(R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG);
256 }
257 
258 /*
259  * Wait for the card to finish the busy state
260  */
261 static int mmc_test_wait_busy(struct mmc_test_card *test)
262 {
263 	int ret, busy;
264 	struct mmc_command cmd = {};
265 
266 	busy = 0;
267 	do {
268 		memset(&cmd, 0, sizeof(struct mmc_command));
269 
270 		cmd.opcode = MMC_SEND_STATUS;
271 		cmd.arg = test->card->rca << 16;
272 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
273 
274 		ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
275 		if (ret)
276 			break;
277 
278 		if (!busy && mmc_test_busy(&cmd)) {
279 			busy = 1;
280 			if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
281 				pr_info("%s: Warning: Host did not wait for busy state to end.\n",
282 					mmc_hostname(test->card->host));
283 		}
284 	} while (mmc_test_busy(&cmd));
285 
286 	return ret;
287 }
288 
289 /*
290  * Transfer a single sector of kernel addressable data
291  */
292 static int mmc_test_buffer_transfer(struct mmc_test_card *test,
293 	u8 *buffer, unsigned addr, unsigned blksz, int write)
294 {
295 	struct mmc_request mrq = {};
296 	struct mmc_command cmd = {};
297 	struct mmc_command stop = {};
298 	struct mmc_data data = {};
299 
300 	struct scatterlist sg;
301 
302 	mrq.cmd = &cmd;
303 	mrq.data = &data;
304 	mrq.stop = &stop;
305 
306 	sg_init_one(&sg, buffer, blksz);
307 
308 	mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
309 
310 	mmc_wait_for_req(test->card->host, &mrq);
311 
312 	if (cmd.error)
313 		return cmd.error;
314 	if (data.error)
315 		return data.error;
316 
317 	return mmc_test_wait_busy(test);
318 }
319 
320 static void mmc_test_free_mem(struct mmc_test_mem *mem)
321 {
322 	if (!mem)
323 		return;
324 	while (mem->cnt--)
325 		__free_pages(mem->arr[mem->cnt].page,
326 			     mem->arr[mem->cnt].order);
327 	kfree(mem->arr);
328 	kfree(mem);
329 }
330 
331 /*
332  * Allocate a lot of memory, preferably max_sz but at least min_sz.  In case
333  * there isn't much memory do not exceed 1/16th total lowmem pages.  Also do
334  * not exceed a maximum number of segments and try not to make segments much
335  * bigger than maximum segment size.
336  */
337 static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
338 					       unsigned long max_sz,
339 					       unsigned int max_segs,
340 					       unsigned int max_seg_sz)
341 {
342 	unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
343 	unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
344 	unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
345 	unsigned long page_cnt = 0;
346 	unsigned long limit = nr_free_buffer_pages() >> 4;
347 	struct mmc_test_mem *mem;
348 
349 	if (max_page_cnt > limit)
350 		max_page_cnt = limit;
351 	if (min_page_cnt > max_page_cnt)
352 		min_page_cnt = max_page_cnt;
353 
354 	if (max_seg_page_cnt > max_page_cnt)
355 		max_seg_page_cnt = max_page_cnt;
356 
357 	if (max_segs > max_page_cnt)
358 		max_segs = max_page_cnt;
359 
360 	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
361 	if (!mem)
362 		return NULL;
363 
364 	mem->arr = kcalloc(max_segs, sizeof(*mem->arr), GFP_KERNEL);
365 	if (!mem->arr)
366 		goto out_free;
367 
368 	while (max_page_cnt) {
369 		struct page *page;
370 		unsigned int order;
371 		gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
372 				__GFP_NORETRY;
373 
374 		order = get_order(max_seg_page_cnt << PAGE_SHIFT);
375 		while (1) {
376 			page = alloc_pages(flags, order);
377 			if (page || !order)
378 				break;
379 			order -= 1;
380 		}
381 		if (!page) {
382 			if (page_cnt < min_page_cnt)
383 				goto out_free;
384 			break;
385 		}
386 		mem->arr[mem->cnt].page = page;
387 		mem->arr[mem->cnt].order = order;
388 		mem->cnt += 1;
389 		if (max_page_cnt <= (1UL << order))
390 			break;
391 		max_page_cnt -= 1UL << order;
392 		page_cnt += 1UL << order;
393 		if (mem->cnt >= max_segs) {
394 			if (page_cnt < min_page_cnt)
395 				goto out_free;
396 			break;
397 		}
398 	}
399 
400 	return mem;
401 
402 out_free:
403 	mmc_test_free_mem(mem);
404 	return NULL;
405 }
406 
407 /*
408  * Map memory into a scatterlist.  Optionally allow the same memory to be
409  * mapped more than once.
410  */
411 static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size,
412 			   struct scatterlist *sglist, int repeat,
413 			   unsigned int max_segs, unsigned int max_seg_sz,
414 			   unsigned int *sg_len, int min_sg_len)
415 {
416 	struct scatterlist *sg = NULL;
417 	unsigned int i;
418 	unsigned long sz = size;
419 
420 	sg_init_table(sglist, max_segs);
421 	if (min_sg_len > max_segs)
422 		min_sg_len = max_segs;
423 
424 	*sg_len = 0;
425 	do {
426 		for (i = 0; i < mem->cnt; i++) {
427 			unsigned long len = PAGE_SIZE << mem->arr[i].order;
428 
429 			if (min_sg_len && (size / min_sg_len < len))
430 				len = ALIGN(size / min_sg_len, 512);
431 			if (len > sz)
432 				len = sz;
433 			if (len > max_seg_sz)
434 				len = max_seg_sz;
435 			if (sg)
436 				sg = sg_next(sg);
437 			else
438 				sg = sglist;
439 			if (!sg)
440 				return -EINVAL;
441 			sg_set_page(sg, mem->arr[i].page, len, 0);
442 			sz -= len;
443 			*sg_len += 1;
444 			if (!sz)
445 				break;
446 		}
447 	} while (sz && repeat);
448 
449 	if (sz)
450 		return -EINVAL;
451 
452 	if (sg)
453 		sg_mark_end(sg);
454 
455 	return 0;
456 }
457 
458 /*
459  * Map memory into a scatterlist so that no pages are contiguous.  Allow the
460  * same memory to be mapped more than once.
461  */
462 static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
463 				       unsigned long sz,
464 				       struct scatterlist *sglist,
465 				       unsigned int max_segs,
466 				       unsigned int max_seg_sz,
467 				       unsigned int *sg_len)
468 {
469 	struct scatterlist *sg = NULL;
470 	unsigned int i = mem->cnt, cnt;
471 	unsigned long len;
472 	void *base, *addr, *last_addr = NULL;
473 
474 	sg_init_table(sglist, max_segs);
475 
476 	*sg_len = 0;
477 	while (sz) {
478 		base = page_address(mem->arr[--i].page);
479 		cnt = 1 << mem->arr[i].order;
480 		while (sz && cnt) {
481 			addr = base + PAGE_SIZE * --cnt;
482 			if (last_addr && last_addr + PAGE_SIZE == addr)
483 				continue;
484 			last_addr = addr;
485 			len = PAGE_SIZE;
486 			if (len > max_seg_sz)
487 				len = max_seg_sz;
488 			if (len > sz)
489 				len = sz;
490 			if (sg)
491 				sg = sg_next(sg);
492 			else
493 				sg = sglist;
494 			if (!sg)
495 				return -EINVAL;
496 			sg_set_page(sg, virt_to_page(addr), len, 0);
497 			sz -= len;
498 			*sg_len += 1;
499 		}
500 		if (i == 0)
501 			i = mem->cnt;
502 	}
503 
504 	if (sg)
505 		sg_mark_end(sg);
506 
507 	return 0;
508 }
509 
510 /*
511  * Calculate transfer rate in bytes per second.
512  */
513 static unsigned int mmc_test_rate(uint64_t bytes, struct timespec64 *ts)
514 {
515 	uint64_t ns;
516 
517 	ns = timespec64_to_ns(ts);
518 	bytes *= 1000000000;
519 
520 	while (ns > UINT_MAX) {
521 		bytes >>= 1;
522 		ns >>= 1;
523 	}
524 
525 	if (!ns)
526 		return 0;
527 
528 	do_div(bytes, (uint32_t)ns);
529 
530 	return bytes;
531 }
532 
533 /*
534  * Save transfer results for future usage
535  */
536 static void mmc_test_save_transfer_result(struct mmc_test_card *test,
537 	unsigned int count, unsigned int sectors, struct timespec64 ts,
538 	unsigned int rate, unsigned int iops)
539 {
540 	struct mmc_test_transfer_result *tr;
541 
542 	if (!test->gr)
543 		return;
544 
545 	tr = kmalloc(sizeof(*tr), GFP_KERNEL);
546 	if (!tr)
547 		return;
548 
549 	tr->count = count;
550 	tr->sectors = sectors;
551 	tr->ts = ts;
552 	tr->rate = rate;
553 	tr->iops = iops;
554 
555 	list_add_tail(&tr->link, &test->gr->tr_lst);
556 }
557 
558 /*
559  * Print the transfer rate.
560  */
561 static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
562 				struct timespec64 *ts1, struct timespec64 *ts2)
563 {
564 	unsigned int rate, iops, sectors = bytes >> 9;
565 	struct timespec64 ts;
566 
567 	ts = timespec64_sub(*ts2, *ts1);
568 
569 	rate = mmc_test_rate(bytes, &ts);
570 	iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
571 
572 	pr_info("%s: Transfer of %u sectors (%u%s KiB) took %llu.%09u "
573 			 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
574 			 mmc_hostname(test->card->host), sectors, sectors >> 1,
575 			 (sectors & 1 ? ".5" : ""), (u64)ts.tv_sec,
576 			 (u32)ts.tv_nsec, rate / 1000, rate / 1024,
577 			 iops / 100, iops % 100);
578 
579 	mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
580 }
581 
582 /*
583  * Print the average transfer rate.
584  */
585 static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
586 				    unsigned int count, struct timespec64 *ts1,
587 				    struct timespec64 *ts2)
588 {
589 	unsigned int rate, iops, sectors = bytes >> 9;
590 	uint64_t tot = bytes * count;
591 	struct timespec64 ts;
592 
593 	ts = timespec64_sub(*ts2, *ts1);
594 
595 	rate = mmc_test_rate(tot, &ts);
596 	iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
597 
598 	pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
599 			 "%llu.%09u seconds (%u kB/s, %u KiB/s, "
600 			 "%u.%02u IOPS, sg_len %d)\n",
601 			 mmc_hostname(test->card->host), count, sectors, count,
602 			 sectors >> 1, (sectors & 1 ? ".5" : ""),
603 			 (u64)ts.tv_sec, (u32)ts.tv_nsec,
604 			 rate / 1000, rate / 1024, iops / 100, iops % 100,
605 			 test->area.sg_len);
606 
607 	mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
608 }
609 
610 /*
611  * Return the card size in sectors.
612  */
613 static unsigned int mmc_test_capacity(struct mmc_card *card)
614 {
615 	if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
616 		return card->ext_csd.sectors;
617 	else
618 		return card->csd.capacity << (card->csd.read_blkbits - 9);
619 }
620 
621 /*******************************************************************/
622 /*  Test preparation and cleanup                                   */
623 /*******************************************************************/
624 
625 /*
626  * Fill the first couple of sectors of the card with known data
627  * so that bad reads/writes can be detected
628  */
629 static int __mmc_test_prepare(struct mmc_test_card *test, int write)
630 {
631 	int ret, i;
632 
633 	ret = mmc_test_set_blksize(test, 512);
634 	if (ret)
635 		return ret;
636 
637 	if (write)
638 		memset(test->buffer, 0xDF, 512);
639 	else {
640 		for (i = 0; i < 512; i++)
641 			test->buffer[i] = i;
642 	}
643 
644 	for (i = 0; i < BUFFER_SIZE / 512; i++) {
645 		ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
646 		if (ret)
647 			return ret;
648 	}
649 
650 	return 0;
651 }
652 
653 static int mmc_test_prepare_write(struct mmc_test_card *test)
654 {
655 	return __mmc_test_prepare(test, 1);
656 }
657 
658 static int mmc_test_prepare_read(struct mmc_test_card *test)
659 {
660 	return __mmc_test_prepare(test, 0);
661 }
662 
663 static int mmc_test_cleanup(struct mmc_test_card *test)
664 {
665 	int ret, i;
666 
667 	ret = mmc_test_set_blksize(test, 512);
668 	if (ret)
669 		return ret;
670 
671 	memset(test->buffer, 0, 512);
672 
673 	for (i = 0; i < BUFFER_SIZE / 512; i++) {
674 		ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
675 		if (ret)
676 			return ret;
677 	}
678 
679 	return 0;
680 }
681 
682 /*******************************************************************/
683 /*  Test execution helpers                                         */
684 /*******************************************************************/
685 
686 /*
687  * Modifies the mmc_request to perform the "short transfer" tests
688  */
689 static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
690 	struct mmc_request *mrq, int write)
691 {
692 	if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
693 		return;
694 
695 	if (mrq->data->blocks > 1) {
696 		mrq->cmd->opcode = write ?
697 			MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
698 		mrq->stop = NULL;
699 	} else {
700 		mrq->cmd->opcode = MMC_SEND_STATUS;
701 		mrq->cmd->arg = test->card->rca << 16;
702 	}
703 }
704 
705 /*
706  * Checks that a normal transfer didn't have any errors
707  */
708 static int mmc_test_check_result(struct mmc_test_card *test,
709 				 struct mmc_request *mrq)
710 {
711 	int ret;
712 
713 	if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
714 		return -EINVAL;
715 
716 	ret = 0;
717 
718 	if (mrq->sbc && mrq->sbc->error)
719 		ret = mrq->sbc->error;
720 	if (!ret && mrq->cmd->error)
721 		ret = mrq->cmd->error;
722 	if (!ret && mrq->data->error)
723 		ret = mrq->data->error;
724 	if (!ret && mrq->stop && mrq->stop->error)
725 		ret = mrq->stop->error;
726 	if (!ret && mrq->data->bytes_xfered !=
727 		mrq->data->blocks * mrq->data->blksz)
728 		ret = RESULT_FAIL;
729 
730 	if (ret == -EINVAL)
731 		ret = RESULT_UNSUP_HOST;
732 
733 	return ret;
734 }
735 
736 /*
737  * Checks that a "short transfer" behaved as expected
738  */
739 static int mmc_test_check_broken_result(struct mmc_test_card *test,
740 	struct mmc_request *mrq)
741 {
742 	int ret;
743 
744 	if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
745 		return -EINVAL;
746 
747 	ret = 0;
748 
749 	if (!ret && mrq->cmd->error)
750 		ret = mrq->cmd->error;
751 	if (!ret && mrq->data->error == 0)
752 		ret = RESULT_FAIL;
753 	if (!ret && mrq->data->error != -ETIMEDOUT)
754 		ret = mrq->data->error;
755 	if (!ret && mrq->stop && mrq->stop->error)
756 		ret = mrq->stop->error;
757 	if (mrq->data->blocks > 1) {
758 		if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
759 			ret = RESULT_FAIL;
760 	} else {
761 		if (!ret && mrq->data->bytes_xfered > 0)
762 			ret = RESULT_FAIL;
763 	}
764 
765 	if (ret == -EINVAL)
766 		ret = RESULT_UNSUP_HOST;
767 
768 	return ret;
769 }
770 
771 struct mmc_test_req {
772 	struct mmc_request mrq;
773 	struct mmc_command sbc;
774 	struct mmc_command cmd;
775 	struct mmc_command stop;
776 	struct mmc_command status;
777 	struct mmc_data data;
778 };
779 
780 /*
781  * Tests nonblock transfer with certain parameters
782  */
783 static void mmc_test_req_reset(struct mmc_test_req *rq)
784 {
785 	memset(rq, 0, sizeof(struct mmc_test_req));
786 
787 	rq->mrq.cmd = &rq->cmd;
788 	rq->mrq.data = &rq->data;
789 	rq->mrq.stop = &rq->stop;
790 }
791 
792 static struct mmc_test_req *mmc_test_req_alloc(void)
793 {
794 	struct mmc_test_req *rq = kmalloc(sizeof(*rq), GFP_KERNEL);
795 
796 	if (rq)
797 		mmc_test_req_reset(rq);
798 
799 	return rq;
800 }
801 
802 static void mmc_test_wait_done(struct mmc_request *mrq)
803 {
804 	complete(&mrq->completion);
805 }
806 
807 static int mmc_test_start_areq(struct mmc_test_card *test,
808 			       struct mmc_request *mrq,
809 			       struct mmc_request *prev_mrq)
810 {
811 	struct mmc_host *host = test->card->host;
812 	int err = 0;
813 
814 	if (mrq) {
815 		init_completion(&mrq->completion);
816 		mrq->done = mmc_test_wait_done;
817 		mmc_pre_req(host, mrq);
818 	}
819 
820 	if (prev_mrq) {
821 		wait_for_completion(&prev_mrq->completion);
822 		err = mmc_test_wait_busy(test);
823 		if (!err)
824 			err = mmc_test_check_result(test, prev_mrq);
825 	}
826 
827 	if (!err && mrq) {
828 		err = mmc_start_request(host, mrq);
829 		if (err)
830 			mmc_retune_release(host);
831 	}
832 
833 	if (prev_mrq)
834 		mmc_post_req(host, prev_mrq, 0);
835 
836 	if (err && mrq)
837 		mmc_post_req(host, mrq, err);
838 
839 	return err;
840 }
841 
842 static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
843 				      struct scatterlist *sg, unsigned sg_len,
844 				      unsigned dev_addr, unsigned blocks,
845 				      unsigned blksz, int write, int count)
846 {
847 	struct mmc_test_req *rq1, *rq2;
848 	struct mmc_request *mrq, *prev_mrq;
849 	int i;
850 	int ret = RESULT_OK;
851 
852 	rq1 = mmc_test_req_alloc();
853 	rq2 = mmc_test_req_alloc();
854 	if (!rq1 || !rq2) {
855 		ret = RESULT_FAIL;
856 		goto err;
857 	}
858 
859 	mrq = &rq1->mrq;
860 	prev_mrq = NULL;
861 
862 	for (i = 0; i < count; i++) {
863 		mmc_test_req_reset(container_of(mrq, struct mmc_test_req, mrq));
864 		mmc_test_prepare_mrq(test, mrq, sg, sg_len, dev_addr, blocks,
865 				     blksz, write);
866 		ret = mmc_test_start_areq(test, mrq, prev_mrq);
867 		if (ret)
868 			goto err;
869 
870 		if (!prev_mrq)
871 			prev_mrq = &rq2->mrq;
872 
873 		swap(mrq, prev_mrq);
874 		dev_addr += blocks;
875 	}
876 
877 	ret = mmc_test_start_areq(test, NULL, prev_mrq);
878 err:
879 	kfree(rq1);
880 	kfree(rq2);
881 	return ret;
882 }
883 
884 /*
885  * Tests a basic transfer with certain parameters
886  */
887 static int mmc_test_simple_transfer(struct mmc_test_card *test,
888 	struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
889 	unsigned blocks, unsigned blksz, int write)
890 {
891 	struct mmc_request mrq = {};
892 	struct mmc_command cmd = {};
893 	struct mmc_command stop = {};
894 	struct mmc_data data = {};
895 
896 	mrq.cmd = &cmd;
897 	mrq.data = &data;
898 	mrq.stop = &stop;
899 
900 	mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
901 		blocks, blksz, write);
902 
903 	mmc_wait_for_req(test->card->host, &mrq);
904 
905 	mmc_test_wait_busy(test);
906 
907 	return mmc_test_check_result(test, &mrq);
908 }
909 
910 /*
911  * Tests a transfer where the card will fail completely or partly
912  */
913 static int mmc_test_broken_transfer(struct mmc_test_card *test,
914 	unsigned blocks, unsigned blksz, int write)
915 {
916 	struct mmc_request mrq = {};
917 	struct mmc_command cmd = {};
918 	struct mmc_command stop = {};
919 	struct mmc_data data = {};
920 
921 	struct scatterlist sg;
922 
923 	mrq.cmd = &cmd;
924 	mrq.data = &data;
925 	mrq.stop = &stop;
926 
927 	sg_init_one(&sg, test->buffer, blocks * blksz);
928 
929 	mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
930 	mmc_test_prepare_broken_mrq(test, &mrq, write);
931 
932 	mmc_wait_for_req(test->card->host, &mrq);
933 
934 	mmc_test_wait_busy(test);
935 
936 	return mmc_test_check_broken_result(test, &mrq);
937 }
938 
939 /*
940  * Does a complete transfer test where data is also validated
941  *
942  * Note: mmc_test_prepare() must have been done before this call
943  */
944 static int mmc_test_transfer(struct mmc_test_card *test,
945 	struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
946 	unsigned blocks, unsigned blksz, int write)
947 {
948 	int ret, i;
949 	unsigned long flags;
950 
951 	if (write) {
952 		for (i = 0; i < blocks * blksz; i++)
953 			test->scratch[i] = i;
954 	} else {
955 		memset(test->scratch, 0, BUFFER_SIZE);
956 	}
957 	local_irq_save(flags);
958 	sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
959 	local_irq_restore(flags);
960 
961 	ret = mmc_test_set_blksize(test, blksz);
962 	if (ret)
963 		return ret;
964 
965 	ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
966 		blocks, blksz, write);
967 	if (ret)
968 		return ret;
969 
970 	if (write) {
971 		int sectors;
972 
973 		ret = mmc_test_set_blksize(test, 512);
974 		if (ret)
975 			return ret;
976 
977 		sectors = (blocks * blksz + 511) / 512;
978 		if ((sectors * 512) == (blocks * blksz))
979 			sectors++;
980 
981 		if ((sectors * 512) > BUFFER_SIZE)
982 			return -EINVAL;
983 
984 		memset(test->buffer, 0, sectors * 512);
985 
986 		for (i = 0; i < sectors; i++) {
987 			ret = mmc_test_buffer_transfer(test,
988 				test->buffer + i * 512,
989 				dev_addr + i, 512, 0);
990 			if (ret)
991 				return ret;
992 		}
993 
994 		for (i = 0; i < blocks * blksz; i++) {
995 			if (test->buffer[i] != (u8)i)
996 				return RESULT_FAIL;
997 		}
998 
999 		for (; i < sectors * 512; i++) {
1000 			if (test->buffer[i] != 0xDF)
1001 				return RESULT_FAIL;
1002 		}
1003 	} else {
1004 		local_irq_save(flags);
1005 		sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
1006 		local_irq_restore(flags);
1007 		for (i = 0; i < blocks * blksz; i++) {
1008 			if (test->scratch[i] != (u8)i)
1009 				return RESULT_FAIL;
1010 		}
1011 	}
1012 
1013 	return 0;
1014 }
1015 
1016 /*******************************************************************/
1017 /*  Tests                                                          */
1018 /*******************************************************************/
1019 
1020 struct mmc_test_case {
1021 	const char *name;
1022 
1023 	int (*prepare)(struct mmc_test_card *);
1024 	int (*run)(struct mmc_test_card *);
1025 	int (*cleanup)(struct mmc_test_card *);
1026 };
1027 
1028 static int mmc_test_basic_write(struct mmc_test_card *test)
1029 {
1030 	int ret;
1031 	struct scatterlist sg;
1032 
1033 	ret = mmc_test_set_blksize(test, 512);
1034 	if (ret)
1035 		return ret;
1036 
1037 	sg_init_one(&sg, test->buffer, 512);
1038 
1039 	return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
1040 }
1041 
1042 static int mmc_test_basic_read(struct mmc_test_card *test)
1043 {
1044 	int ret;
1045 	struct scatterlist sg;
1046 
1047 	ret = mmc_test_set_blksize(test, 512);
1048 	if (ret)
1049 		return ret;
1050 
1051 	sg_init_one(&sg, test->buffer, 512);
1052 
1053 	return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
1054 }
1055 
1056 static int mmc_test_verify_write(struct mmc_test_card *test)
1057 {
1058 	struct scatterlist sg;
1059 
1060 	sg_init_one(&sg, test->buffer, 512);
1061 
1062 	return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1063 }
1064 
1065 static int mmc_test_verify_read(struct mmc_test_card *test)
1066 {
1067 	struct scatterlist sg;
1068 
1069 	sg_init_one(&sg, test->buffer, 512);
1070 
1071 	return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1072 }
1073 
1074 static int mmc_test_multi_write(struct mmc_test_card *test)
1075 {
1076 	unsigned int size;
1077 	struct scatterlist sg;
1078 
1079 	if (test->card->host->max_blk_count == 1)
1080 		return RESULT_UNSUP_HOST;
1081 
1082 	size = PAGE_SIZE * 2;
1083 	size = min(size, test->card->host->max_req_size);
1084 	size = min(size, test->card->host->max_seg_size);
1085 	size = min(size, test->card->host->max_blk_count * 512);
1086 
1087 	if (size < 1024)
1088 		return RESULT_UNSUP_HOST;
1089 
1090 	sg_init_one(&sg, test->buffer, size);
1091 
1092 	return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1);
1093 }
1094 
1095 static int mmc_test_multi_read(struct mmc_test_card *test)
1096 {
1097 	unsigned int size;
1098 	struct scatterlist sg;
1099 
1100 	if (test->card->host->max_blk_count == 1)
1101 		return RESULT_UNSUP_HOST;
1102 
1103 	size = PAGE_SIZE * 2;
1104 	size = min(size, test->card->host->max_req_size);
1105 	size = min(size, test->card->host->max_seg_size);
1106 	size = min(size, test->card->host->max_blk_count * 512);
1107 
1108 	if (size < 1024)
1109 		return RESULT_UNSUP_HOST;
1110 
1111 	sg_init_one(&sg, test->buffer, size);
1112 
1113 	return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0);
1114 }
1115 
1116 static int mmc_test_pow2_write(struct mmc_test_card *test)
1117 {
1118 	int ret, i;
1119 	struct scatterlist sg;
1120 
1121 	if (!test->card->csd.write_partial)
1122 		return RESULT_UNSUP_CARD;
1123 
1124 	for (i = 1; i < 512; i <<= 1) {
1125 		sg_init_one(&sg, test->buffer, i);
1126 		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1127 		if (ret)
1128 			return ret;
1129 	}
1130 
1131 	return 0;
1132 }
1133 
1134 static int mmc_test_pow2_read(struct mmc_test_card *test)
1135 {
1136 	int ret, i;
1137 	struct scatterlist sg;
1138 
1139 	if (!test->card->csd.read_partial)
1140 		return RESULT_UNSUP_CARD;
1141 
1142 	for (i = 1; i < 512; i <<= 1) {
1143 		sg_init_one(&sg, test->buffer, i);
1144 		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1145 		if (ret)
1146 			return ret;
1147 	}
1148 
1149 	return 0;
1150 }
1151 
1152 static int mmc_test_weird_write(struct mmc_test_card *test)
1153 {
1154 	int ret, i;
1155 	struct scatterlist sg;
1156 
1157 	if (!test->card->csd.write_partial)
1158 		return RESULT_UNSUP_CARD;
1159 
1160 	for (i = 3; i < 512; i += 7) {
1161 		sg_init_one(&sg, test->buffer, i);
1162 		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1163 		if (ret)
1164 			return ret;
1165 	}
1166 
1167 	return 0;
1168 }
1169 
1170 static int mmc_test_weird_read(struct mmc_test_card *test)
1171 {
1172 	int ret, i;
1173 	struct scatterlist sg;
1174 
1175 	if (!test->card->csd.read_partial)
1176 		return RESULT_UNSUP_CARD;
1177 
1178 	for (i = 3; i < 512; i += 7) {
1179 		sg_init_one(&sg, test->buffer, i);
1180 		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1181 		if (ret)
1182 			return ret;
1183 	}
1184 
1185 	return 0;
1186 }
1187 
1188 static int mmc_test_align_write(struct mmc_test_card *test)
1189 {
1190 	int ret, i;
1191 	struct scatterlist sg;
1192 
1193 	for (i = 1; i < TEST_ALIGN_END; i++) {
1194 		sg_init_one(&sg, test->buffer + i, 512);
1195 		ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1196 		if (ret)
1197 			return ret;
1198 	}
1199 
1200 	return 0;
1201 }
1202 
1203 static int mmc_test_align_read(struct mmc_test_card *test)
1204 {
1205 	int ret, i;
1206 	struct scatterlist sg;
1207 
1208 	for (i = 1; i < TEST_ALIGN_END; i++) {
1209 		sg_init_one(&sg, test->buffer + i, 512);
1210 		ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1211 		if (ret)
1212 			return ret;
1213 	}
1214 
1215 	return 0;
1216 }
1217 
1218 static int mmc_test_align_multi_write(struct mmc_test_card *test)
1219 {
1220 	int ret, i;
1221 	unsigned int size;
1222 	struct scatterlist sg;
1223 
1224 	if (test->card->host->max_blk_count == 1)
1225 		return RESULT_UNSUP_HOST;
1226 
1227 	size = PAGE_SIZE * 2;
1228 	size = min(size, test->card->host->max_req_size);
1229 	size = min(size, test->card->host->max_seg_size);
1230 	size = min(size, test->card->host->max_blk_count * 512);
1231 
1232 	if (size < 1024)
1233 		return RESULT_UNSUP_HOST;
1234 
1235 	for (i = 1; i < TEST_ALIGN_END; i++) {
1236 		sg_init_one(&sg, test->buffer + i, size);
1237 		ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1);
1238 		if (ret)
1239 			return ret;
1240 	}
1241 
1242 	return 0;
1243 }
1244 
1245 static int mmc_test_align_multi_read(struct mmc_test_card *test)
1246 {
1247 	int ret, i;
1248 	unsigned int size;
1249 	struct scatterlist sg;
1250 
1251 	if (test->card->host->max_blk_count == 1)
1252 		return RESULT_UNSUP_HOST;
1253 
1254 	size = PAGE_SIZE * 2;
1255 	size = min(size, test->card->host->max_req_size);
1256 	size = min(size, test->card->host->max_seg_size);
1257 	size = min(size, test->card->host->max_blk_count * 512);
1258 
1259 	if (size < 1024)
1260 		return RESULT_UNSUP_HOST;
1261 
1262 	for (i = 1; i < TEST_ALIGN_END; i++) {
1263 		sg_init_one(&sg, test->buffer + i, size);
1264 		ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0);
1265 		if (ret)
1266 			return ret;
1267 	}
1268 
1269 	return 0;
1270 }
1271 
1272 static int mmc_test_xfersize_write(struct mmc_test_card *test)
1273 {
1274 	int ret;
1275 
1276 	ret = mmc_test_set_blksize(test, 512);
1277 	if (ret)
1278 		return ret;
1279 
1280 	return mmc_test_broken_transfer(test, 1, 512, 1);
1281 }
1282 
1283 static int mmc_test_xfersize_read(struct mmc_test_card *test)
1284 {
1285 	int ret;
1286 
1287 	ret = mmc_test_set_blksize(test, 512);
1288 	if (ret)
1289 		return ret;
1290 
1291 	return mmc_test_broken_transfer(test, 1, 512, 0);
1292 }
1293 
1294 static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
1295 {
1296 	int ret;
1297 
1298 	if (test->card->host->max_blk_count == 1)
1299 		return RESULT_UNSUP_HOST;
1300 
1301 	ret = mmc_test_set_blksize(test, 512);
1302 	if (ret)
1303 		return ret;
1304 
1305 	return mmc_test_broken_transfer(test, 2, 512, 1);
1306 }
1307 
1308 static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
1309 {
1310 	int ret;
1311 
1312 	if (test->card->host->max_blk_count == 1)
1313 		return RESULT_UNSUP_HOST;
1314 
1315 	ret = mmc_test_set_blksize(test, 512);
1316 	if (ret)
1317 		return ret;
1318 
1319 	return mmc_test_broken_transfer(test, 2, 512, 0);
1320 }
1321 
1322 #ifdef CONFIG_HIGHMEM
1323 
1324 static int mmc_test_write_high(struct mmc_test_card *test)
1325 {
1326 	struct scatterlist sg;
1327 
1328 	sg_init_table(&sg, 1);
1329 	sg_set_page(&sg, test->highmem, 512, 0);
1330 
1331 	return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1332 }
1333 
1334 static int mmc_test_read_high(struct mmc_test_card *test)
1335 {
1336 	struct scatterlist sg;
1337 
1338 	sg_init_table(&sg, 1);
1339 	sg_set_page(&sg, test->highmem, 512, 0);
1340 
1341 	return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1342 }
1343 
1344 static int mmc_test_multi_write_high(struct mmc_test_card *test)
1345 {
1346 	unsigned int size;
1347 	struct scatterlist sg;
1348 
1349 	if (test->card->host->max_blk_count == 1)
1350 		return RESULT_UNSUP_HOST;
1351 
1352 	size = PAGE_SIZE * 2;
1353 	size = min(size, test->card->host->max_req_size);
1354 	size = min(size, test->card->host->max_seg_size);
1355 	size = min(size, test->card->host->max_blk_count * 512);
1356 
1357 	if (size < 1024)
1358 		return RESULT_UNSUP_HOST;
1359 
1360 	sg_init_table(&sg, 1);
1361 	sg_set_page(&sg, test->highmem, size, 0);
1362 
1363 	return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1);
1364 }
1365 
1366 static int mmc_test_multi_read_high(struct mmc_test_card *test)
1367 {
1368 	unsigned int size;
1369 	struct scatterlist sg;
1370 
1371 	if (test->card->host->max_blk_count == 1)
1372 		return RESULT_UNSUP_HOST;
1373 
1374 	size = PAGE_SIZE * 2;
1375 	size = min(size, test->card->host->max_req_size);
1376 	size = min(size, test->card->host->max_seg_size);
1377 	size = min(size, test->card->host->max_blk_count * 512);
1378 
1379 	if (size < 1024)
1380 		return RESULT_UNSUP_HOST;
1381 
1382 	sg_init_table(&sg, 1);
1383 	sg_set_page(&sg, test->highmem, size, 0);
1384 
1385 	return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0);
1386 }
1387 
1388 #else
1389 
1390 static int mmc_test_no_highmem(struct mmc_test_card *test)
1391 {
1392 	pr_info("%s: Highmem not configured - test skipped\n",
1393 	       mmc_hostname(test->card->host));
1394 	return 0;
1395 }
1396 
1397 #endif /* CONFIG_HIGHMEM */
1398 
1399 /*
1400  * Map sz bytes so that it can be transferred.
1401  */
1402 static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
1403 			     int max_scatter, int min_sg_len)
1404 {
1405 	struct mmc_test_area *t = &test->area;
1406 	int err;
1407 
1408 	t->blocks = sz >> 9;
1409 
1410 	if (max_scatter) {
1411 		err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
1412 						  t->max_segs, t->max_seg_sz,
1413 				       &t->sg_len);
1414 	} else {
1415 		err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1416 				      t->max_seg_sz, &t->sg_len, min_sg_len);
1417 	}
1418 	if (err)
1419 		pr_info("%s: Failed to map sg list\n",
1420 		       mmc_hostname(test->card->host));
1421 	return err;
1422 }
1423 
1424 /*
1425  * Transfer bytes mapped by mmc_test_area_map().
1426  */
1427 static int mmc_test_area_transfer(struct mmc_test_card *test,
1428 				  unsigned int dev_addr, int write)
1429 {
1430 	struct mmc_test_area *t = &test->area;
1431 
1432 	return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
1433 					t->blocks, 512, write);
1434 }
1435 
1436 /*
1437  * Map and transfer bytes for multiple transfers.
1438  */
1439 static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
1440 				unsigned int dev_addr, int write,
1441 				int max_scatter, int timed, int count,
1442 				bool nonblock, int min_sg_len)
1443 {
1444 	struct timespec64 ts1, ts2;
1445 	int ret = 0;
1446 	int i;
1447 	struct mmc_test_area *t = &test->area;
1448 
1449 	/*
1450 	 * In the case of a maximally scattered transfer, the maximum transfer
1451 	 * size is further limited by using PAGE_SIZE segments.
1452 	 */
1453 	if (max_scatter) {
1454 		struct mmc_test_area *t = &test->area;
1455 		unsigned long max_tfr;
1456 
1457 		if (t->max_seg_sz >= PAGE_SIZE)
1458 			max_tfr = t->max_segs * PAGE_SIZE;
1459 		else
1460 			max_tfr = t->max_segs * t->max_seg_sz;
1461 		if (sz > max_tfr)
1462 			sz = max_tfr;
1463 	}
1464 
1465 	ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len);
1466 	if (ret)
1467 		return ret;
1468 
1469 	if (timed)
1470 		ktime_get_ts64(&ts1);
1471 	if (nonblock)
1472 		ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len,
1473 				 dev_addr, t->blocks, 512, write, count);
1474 	else
1475 		for (i = 0; i < count && ret == 0; i++) {
1476 			ret = mmc_test_area_transfer(test, dev_addr, write);
1477 			dev_addr += sz >> 9;
1478 		}
1479 
1480 	if (ret)
1481 		return ret;
1482 
1483 	if (timed)
1484 		ktime_get_ts64(&ts2);
1485 
1486 	if (timed)
1487 		mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
1488 
1489 	return 0;
1490 }
1491 
1492 static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1493 			    unsigned int dev_addr, int write, int max_scatter,
1494 			    int timed)
1495 {
1496 	return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter,
1497 				    timed, 1, false, 0);
1498 }
1499 
1500 /*
1501  * Write the test area entirely.
1502  */
1503 static int mmc_test_area_fill(struct mmc_test_card *test)
1504 {
1505 	struct mmc_test_area *t = &test->area;
1506 
1507 	return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0);
1508 }
1509 
1510 /*
1511  * Erase the test area entirely.
1512  */
1513 static int mmc_test_area_erase(struct mmc_test_card *test)
1514 {
1515 	struct mmc_test_area *t = &test->area;
1516 
1517 	if (!mmc_can_erase(test->card))
1518 		return 0;
1519 
1520 	return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
1521 			 MMC_ERASE_ARG);
1522 }
1523 
1524 /*
1525  * Cleanup struct mmc_test_area.
1526  */
1527 static int mmc_test_area_cleanup(struct mmc_test_card *test)
1528 {
1529 	struct mmc_test_area *t = &test->area;
1530 
1531 	kfree(t->sg);
1532 	mmc_test_free_mem(t->mem);
1533 
1534 	return 0;
1535 }
1536 
1537 /*
1538  * Initialize an area for testing large transfers.  The test area is set to the
1539  * middle of the card because cards may have different characteristics at the
1540  * front (for FAT file system optimization).  Optionally, the area is erased
1541  * (if the card supports it) which may improve write performance.  Optionally,
1542  * the area is filled with data for subsequent read tests.
1543  */
1544 static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1545 {
1546 	struct mmc_test_area *t = &test->area;
1547 	unsigned long min_sz = 64 * 1024, sz;
1548 	int ret;
1549 
1550 	ret = mmc_test_set_blksize(test, 512);
1551 	if (ret)
1552 		return ret;
1553 
1554 	/* Make the test area size about 4MiB */
1555 	sz = (unsigned long)test->card->pref_erase << 9;
1556 	t->max_sz = sz;
1557 	while (t->max_sz < 4 * 1024 * 1024)
1558 		t->max_sz += sz;
1559 	while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
1560 		t->max_sz -= sz;
1561 
1562 	t->max_segs = test->card->host->max_segs;
1563 	t->max_seg_sz = test->card->host->max_seg_size;
1564 	t->max_seg_sz -= t->max_seg_sz % 512;
1565 
1566 	t->max_tfr = t->max_sz;
1567 	if (t->max_tfr >> 9 > test->card->host->max_blk_count)
1568 		t->max_tfr = test->card->host->max_blk_count << 9;
1569 	if (t->max_tfr > test->card->host->max_req_size)
1570 		t->max_tfr = test->card->host->max_req_size;
1571 	if (t->max_tfr / t->max_seg_sz > t->max_segs)
1572 		t->max_tfr = t->max_segs * t->max_seg_sz;
1573 
1574 	/*
1575 	 * Try to allocate enough memory for a max. sized transfer.  Less is OK
1576 	 * because the same memory can be mapped into the scatterlist more than
1577 	 * once.  Also, take into account the limits imposed on scatterlist
1578 	 * segments by the host driver.
1579 	 */
1580 	t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
1581 				    t->max_seg_sz);
1582 	if (!t->mem)
1583 		return -ENOMEM;
1584 
1585 	t->sg = kmalloc_array(t->max_segs, sizeof(*t->sg), GFP_KERNEL);
1586 	if (!t->sg) {
1587 		ret = -ENOMEM;
1588 		goto out_free;
1589 	}
1590 
1591 	t->dev_addr = mmc_test_capacity(test->card) / 2;
1592 	t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
1593 
1594 	if (erase) {
1595 		ret = mmc_test_area_erase(test);
1596 		if (ret)
1597 			goto out_free;
1598 	}
1599 
1600 	if (fill) {
1601 		ret = mmc_test_area_fill(test);
1602 		if (ret)
1603 			goto out_free;
1604 	}
1605 
1606 	return 0;
1607 
1608 out_free:
1609 	mmc_test_area_cleanup(test);
1610 	return ret;
1611 }
1612 
1613 /*
1614  * Prepare for large transfers.  Do not erase the test area.
1615  */
1616 static int mmc_test_area_prepare(struct mmc_test_card *test)
1617 {
1618 	return mmc_test_area_init(test, 0, 0);
1619 }
1620 
1621 /*
1622  * Prepare for large transfers.  Do erase the test area.
1623  */
1624 static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
1625 {
1626 	return mmc_test_area_init(test, 1, 0);
1627 }
1628 
1629 /*
1630  * Prepare for large transfers.  Erase and fill the test area.
1631  */
1632 static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1633 {
1634 	return mmc_test_area_init(test, 1, 1);
1635 }
1636 
1637 /*
1638  * Test best-case performance.  Best-case performance is expected from
1639  * a single large transfer.
1640  *
1641  * An additional option (max_scatter) allows the measurement of the same
1642  * transfer but with no contiguous pages in the scatter list.  This tests
1643  * the efficiency of DMA to handle scattered pages.
1644  */
1645 static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1646 				     int max_scatter)
1647 {
1648 	struct mmc_test_area *t = &test->area;
1649 
1650 	return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write,
1651 				max_scatter, 1);
1652 }
1653 
1654 /*
1655  * Best-case read performance.
1656  */
1657 static int mmc_test_best_read_performance(struct mmc_test_card *test)
1658 {
1659 	return mmc_test_best_performance(test, 0, 0);
1660 }
1661 
1662 /*
1663  * Best-case write performance.
1664  */
1665 static int mmc_test_best_write_performance(struct mmc_test_card *test)
1666 {
1667 	return mmc_test_best_performance(test, 1, 0);
1668 }
1669 
1670 /*
1671  * Best-case read performance into scattered pages.
1672  */
1673 static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
1674 {
1675 	return mmc_test_best_performance(test, 0, 1);
1676 }
1677 
1678 /*
1679  * Best-case write performance from scattered pages.
1680  */
1681 static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
1682 {
1683 	return mmc_test_best_performance(test, 1, 1);
1684 }
1685 
1686 /*
1687  * Single read performance by transfer size.
1688  */
1689 static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1690 {
1691 	struct mmc_test_area *t = &test->area;
1692 	unsigned long sz;
1693 	unsigned int dev_addr;
1694 	int ret;
1695 
1696 	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1697 		dev_addr = t->dev_addr + (sz >> 9);
1698 		ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1699 		if (ret)
1700 			return ret;
1701 	}
1702 	sz = t->max_tfr;
1703 	dev_addr = t->dev_addr;
1704 	return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1705 }
1706 
1707 /*
1708  * Single write performance by transfer size.
1709  */
1710 static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1711 {
1712 	struct mmc_test_area *t = &test->area;
1713 	unsigned long sz;
1714 	unsigned int dev_addr;
1715 	int ret;
1716 
1717 	ret = mmc_test_area_erase(test);
1718 	if (ret)
1719 		return ret;
1720 	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1721 		dev_addr = t->dev_addr + (sz >> 9);
1722 		ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1723 		if (ret)
1724 			return ret;
1725 	}
1726 	ret = mmc_test_area_erase(test);
1727 	if (ret)
1728 		return ret;
1729 	sz = t->max_tfr;
1730 	dev_addr = t->dev_addr;
1731 	return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1732 }
1733 
1734 /*
1735  * Single trim performance by transfer size.
1736  */
1737 static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1738 {
1739 	struct mmc_test_area *t = &test->area;
1740 	unsigned long sz;
1741 	unsigned int dev_addr;
1742 	struct timespec64 ts1, ts2;
1743 	int ret;
1744 
1745 	if (!mmc_can_trim(test->card))
1746 		return RESULT_UNSUP_CARD;
1747 
1748 	if (!mmc_can_erase(test->card))
1749 		return RESULT_UNSUP_HOST;
1750 
1751 	for (sz = 512; sz < t->max_sz; sz <<= 1) {
1752 		dev_addr = t->dev_addr + (sz >> 9);
1753 		ktime_get_ts64(&ts1);
1754 		ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1755 		if (ret)
1756 			return ret;
1757 		ktime_get_ts64(&ts2);
1758 		mmc_test_print_rate(test, sz, &ts1, &ts2);
1759 	}
1760 	dev_addr = t->dev_addr;
1761 	ktime_get_ts64(&ts1);
1762 	ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1763 	if (ret)
1764 		return ret;
1765 	ktime_get_ts64(&ts2);
1766 	mmc_test_print_rate(test, sz, &ts1, &ts2);
1767 	return 0;
1768 }
1769 
1770 static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
1771 {
1772 	struct mmc_test_area *t = &test->area;
1773 	unsigned int dev_addr, i, cnt;
1774 	struct timespec64 ts1, ts2;
1775 	int ret;
1776 
1777 	cnt = t->max_sz / sz;
1778 	dev_addr = t->dev_addr;
1779 	ktime_get_ts64(&ts1);
1780 	for (i = 0; i < cnt; i++) {
1781 		ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
1782 		if (ret)
1783 			return ret;
1784 		dev_addr += (sz >> 9);
1785 	}
1786 	ktime_get_ts64(&ts2);
1787 	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1788 	return 0;
1789 }
1790 
1791 /*
1792  * Consecutive read performance by transfer size.
1793  */
1794 static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1795 {
1796 	struct mmc_test_area *t = &test->area;
1797 	unsigned long sz;
1798 	int ret;
1799 
1800 	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1801 		ret = mmc_test_seq_read_perf(test, sz);
1802 		if (ret)
1803 			return ret;
1804 	}
1805 	sz = t->max_tfr;
1806 	return mmc_test_seq_read_perf(test, sz);
1807 }
1808 
1809 static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1810 {
1811 	struct mmc_test_area *t = &test->area;
1812 	unsigned int dev_addr, i, cnt;
1813 	struct timespec64 ts1, ts2;
1814 	int ret;
1815 
1816 	ret = mmc_test_area_erase(test);
1817 	if (ret)
1818 		return ret;
1819 	cnt = t->max_sz / sz;
1820 	dev_addr = t->dev_addr;
1821 	ktime_get_ts64(&ts1);
1822 	for (i = 0; i < cnt; i++) {
1823 		ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1824 		if (ret)
1825 			return ret;
1826 		dev_addr += (sz >> 9);
1827 	}
1828 	ktime_get_ts64(&ts2);
1829 	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1830 	return 0;
1831 }
1832 
1833 /*
1834  * Consecutive write performance by transfer size.
1835  */
1836 static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1837 {
1838 	struct mmc_test_area *t = &test->area;
1839 	unsigned long sz;
1840 	int ret;
1841 
1842 	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1843 		ret = mmc_test_seq_write_perf(test, sz);
1844 		if (ret)
1845 			return ret;
1846 	}
1847 	sz = t->max_tfr;
1848 	return mmc_test_seq_write_perf(test, sz);
1849 }
1850 
1851 /*
1852  * Consecutive trim performance by transfer size.
1853  */
1854 static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1855 {
1856 	struct mmc_test_area *t = &test->area;
1857 	unsigned long sz;
1858 	unsigned int dev_addr, i, cnt;
1859 	struct timespec64 ts1, ts2;
1860 	int ret;
1861 
1862 	if (!mmc_can_trim(test->card))
1863 		return RESULT_UNSUP_CARD;
1864 
1865 	if (!mmc_can_erase(test->card))
1866 		return RESULT_UNSUP_HOST;
1867 
1868 	for (sz = 512; sz <= t->max_sz; sz <<= 1) {
1869 		ret = mmc_test_area_erase(test);
1870 		if (ret)
1871 			return ret;
1872 		ret = mmc_test_area_fill(test);
1873 		if (ret)
1874 			return ret;
1875 		cnt = t->max_sz / sz;
1876 		dev_addr = t->dev_addr;
1877 		ktime_get_ts64(&ts1);
1878 		for (i = 0; i < cnt; i++) {
1879 			ret = mmc_erase(test->card, dev_addr, sz >> 9,
1880 					MMC_TRIM_ARG);
1881 			if (ret)
1882 				return ret;
1883 			dev_addr += (sz >> 9);
1884 		}
1885 		ktime_get_ts64(&ts2);
1886 		mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1887 	}
1888 	return 0;
1889 }
1890 
1891 static unsigned int rnd_next = 1;
1892 
1893 static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
1894 {
1895 	uint64_t r;
1896 
1897 	rnd_next = rnd_next * 1103515245 + 12345;
1898 	r = (rnd_next >> 16) & 0x7fff;
1899 	return (r * rnd_cnt) >> 15;
1900 }
1901 
1902 static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
1903 			     unsigned long sz)
1904 {
1905 	unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
1906 	unsigned int ssz;
1907 	struct timespec64 ts1, ts2, ts;
1908 	int ret;
1909 
1910 	ssz = sz >> 9;
1911 
1912 	rnd_addr = mmc_test_capacity(test->card) / 4;
1913 	range1 = rnd_addr / test->card->pref_erase;
1914 	range2 = range1 / ssz;
1915 
1916 	ktime_get_ts64(&ts1);
1917 	for (cnt = 0; cnt < UINT_MAX; cnt++) {
1918 		ktime_get_ts64(&ts2);
1919 		ts = timespec64_sub(ts2, ts1);
1920 		if (ts.tv_sec >= 10)
1921 			break;
1922 		ea = mmc_test_rnd_num(range1);
1923 		if (ea == last_ea)
1924 			ea -= 1;
1925 		last_ea = ea;
1926 		dev_addr = rnd_addr + test->card->pref_erase * ea +
1927 			   ssz * mmc_test_rnd_num(range2);
1928 		ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
1929 		if (ret)
1930 			return ret;
1931 	}
1932 	if (print)
1933 		mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1934 	return 0;
1935 }
1936 
1937 static int mmc_test_random_perf(struct mmc_test_card *test, int write)
1938 {
1939 	struct mmc_test_area *t = &test->area;
1940 	unsigned int next;
1941 	unsigned long sz;
1942 	int ret;
1943 
1944 	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1945 		/*
1946 		 * When writing, try to get more consistent results by running
1947 		 * the test twice with exactly the same I/O but outputting the
1948 		 * results only for the 2nd run.
1949 		 */
1950 		if (write) {
1951 			next = rnd_next;
1952 			ret = mmc_test_rnd_perf(test, write, 0, sz);
1953 			if (ret)
1954 				return ret;
1955 			rnd_next = next;
1956 		}
1957 		ret = mmc_test_rnd_perf(test, write, 1, sz);
1958 		if (ret)
1959 			return ret;
1960 	}
1961 	sz = t->max_tfr;
1962 	if (write) {
1963 		next = rnd_next;
1964 		ret = mmc_test_rnd_perf(test, write, 0, sz);
1965 		if (ret)
1966 			return ret;
1967 		rnd_next = next;
1968 	}
1969 	return mmc_test_rnd_perf(test, write, 1, sz);
1970 }
1971 
1972 /*
1973  * Random read performance by transfer size.
1974  */
1975 static int mmc_test_random_read_perf(struct mmc_test_card *test)
1976 {
1977 	return mmc_test_random_perf(test, 0);
1978 }
1979 
1980 /*
1981  * Random write performance by transfer size.
1982  */
1983 static int mmc_test_random_write_perf(struct mmc_test_card *test)
1984 {
1985 	return mmc_test_random_perf(test, 1);
1986 }
1987 
1988 static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
1989 			     unsigned int tot_sz, int max_scatter)
1990 {
1991 	struct mmc_test_area *t = &test->area;
1992 	unsigned int dev_addr, i, cnt, sz, ssz;
1993 	struct timespec64 ts1, ts2;
1994 	int ret;
1995 
1996 	sz = t->max_tfr;
1997 
1998 	/*
1999 	 * In the case of a maximally scattered transfer, the maximum transfer
2000 	 * size is further limited by using PAGE_SIZE segments.
2001 	 */
2002 	if (max_scatter) {
2003 		unsigned long max_tfr;
2004 
2005 		if (t->max_seg_sz >= PAGE_SIZE)
2006 			max_tfr = t->max_segs * PAGE_SIZE;
2007 		else
2008 			max_tfr = t->max_segs * t->max_seg_sz;
2009 		if (sz > max_tfr)
2010 			sz = max_tfr;
2011 	}
2012 
2013 	ssz = sz >> 9;
2014 	dev_addr = mmc_test_capacity(test->card) / 4;
2015 	if (tot_sz > dev_addr << 9)
2016 		tot_sz = dev_addr << 9;
2017 	cnt = tot_sz / sz;
2018 	dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2019 
2020 	ktime_get_ts64(&ts1);
2021 	for (i = 0; i < cnt; i++) {
2022 		ret = mmc_test_area_io(test, sz, dev_addr, write,
2023 				       max_scatter, 0);
2024 		if (ret)
2025 			return ret;
2026 		dev_addr += ssz;
2027 	}
2028 	ktime_get_ts64(&ts2);
2029 
2030 	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
2031 
2032 	return 0;
2033 }
2034 
2035 static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
2036 {
2037 	int ret, i;
2038 
2039 	for (i = 0; i < 10; i++) {
2040 		ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
2041 		if (ret)
2042 			return ret;
2043 	}
2044 	for (i = 0; i < 5; i++) {
2045 		ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
2046 		if (ret)
2047 			return ret;
2048 	}
2049 	for (i = 0; i < 3; i++) {
2050 		ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
2051 		if (ret)
2052 			return ret;
2053 	}
2054 
2055 	return ret;
2056 }
2057 
2058 /*
2059  * Large sequential read performance.
2060  */
2061 static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
2062 {
2063 	return mmc_test_large_seq_perf(test, 0);
2064 }
2065 
2066 /*
2067  * Large sequential write performance.
2068  */
2069 static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
2070 {
2071 	return mmc_test_large_seq_perf(test, 1);
2072 }
2073 
2074 static int mmc_test_rw_multiple(struct mmc_test_card *test,
2075 				struct mmc_test_multiple_rw *tdata,
2076 				unsigned int reqsize, unsigned int size,
2077 				int min_sg_len)
2078 {
2079 	unsigned int dev_addr;
2080 	struct mmc_test_area *t = &test->area;
2081 	int ret = 0;
2082 
2083 	/* Set up test area */
2084 	if (size > mmc_test_capacity(test->card) / 2 * 512)
2085 		size = mmc_test_capacity(test->card) / 2 * 512;
2086 	if (reqsize > t->max_tfr)
2087 		reqsize = t->max_tfr;
2088 	dev_addr = mmc_test_capacity(test->card) / 4;
2089 	if ((dev_addr & 0xffff0000))
2090 		dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2091 	else
2092 		dev_addr &= 0xfffff800; /* Round to 1MiB boundary */
2093 	if (!dev_addr)
2094 		goto err;
2095 
2096 	if (reqsize > size)
2097 		return 0;
2098 
2099 	/* prepare test area */
2100 	if (mmc_can_erase(test->card) &&
2101 	    tdata->prepare & MMC_TEST_PREP_ERASE) {
2102 		ret = mmc_erase(test->card, dev_addr,
2103 				size / 512, MMC_SECURE_ERASE_ARG);
2104 		if (ret)
2105 			ret = mmc_erase(test->card, dev_addr,
2106 					size / 512, MMC_ERASE_ARG);
2107 		if (ret)
2108 			goto err;
2109 	}
2110 
2111 	/* Run test */
2112 	ret = mmc_test_area_io_seq(test, reqsize, dev_addr,
2113 				   tdata->do_write, 0, 1, size / reqsize,
2114 				   tdata->do_nonblock_req, min_sg_len);
2115 	if (ret)
2116 		goto err;
2117 
2118 	return ret;
2119  err:
2120 	pr_info("[%s] error\n", __func__);
2121 	return ret;
2122 }
2123 
2124 static int mmc_test_rw_multiple_size(struct mmc_test_card *test,
2125 				     struct mmc_test_multiple_rw *rw)
2126 {
2127 	int ret = 0;
2128 	int i;
2129 	void *pre_req = test->card->host->ops->pre_req;
2130 	void *post_req = test->card->host->ops->post_req;
2131 
2132 	if (rw->do_nonblock_req &&
2133 	    ((!pre_req && post_req) || (pre_req && !post_req))) {
2134 		pr_info("error: only one of pre/post is defined\n");
2135 		return -EINVAL;
2136 	}
2137 
2138 	for (i = 0 ; i < rw->len && ret == 0; i++) {
2139 		ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0);
2140 		if (ret)
2141 			break;
2142 	}
2143 	return ret;
2144 }
2145 
2146 static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test,
2147 				       struct mmc_test_multiple_rw *rw)
2148 {
2149 	int ret = 0;
2150 	int i;
2151 
2152 	for (i = 0 ; i < rw->len && ret == 0; i++) {
2153 		ret = mmc_test_rw_multiple(test, rw, 512 * 1024, rw->size,
2154 					   rw->sg_len[i]);
2155 		if (ret)
2156 			break;
2157 	}
2158 	return ret;
2159 }
2160 
2161 /*
2162  * Multiple blocking write 4k to 4 MB chunks
2163  */
2164 static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test)
2165 {
2166 	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2167 			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2168 	struct mmc_test_multiple_rw test_data = {
2169 		.bs = bs,
2170 		.size = TEST_AREA_MAX_SIZE,
2171 		.len = ARRAY_SIZE(bs),
2172 		.do_write = true,
2173 		.do_nonblock_req = false,
2174 		.prepare = MMC_TEST_PREP_ERASE,
2175 	};
2176 
2177 	return mmc_test_rw_multiple_size(test, &test_data);
2178 };
2179 
2180 /*
2181  * Multiple non-blocking write 4k to 4 MB chunks
2182  */
2183 static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test)
2184 {
2185 	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2186 			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2187 	struct mmc_test_multiple_rw test_data = {
2188 		.bs = bs,
2189 		.size = TEST_AREA_MAX_SIZE,
2190 		.len = ARRAY_SIZE(bs),
2191 		.do_write = true,
2192 		.do_nonblock_req = true,
2193 		.prepare = MMC_TEST_PREP_ERASE,
2194 	};
2195 
2196 	return mmc_test_rw_multiple_size(test, &test_data);
2197 }
2198 
2199 /*
2200  * Multiple blocking read 4k to 4 MB chunks
2201  */
2202 static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test)
2203 {
2204 	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2205 			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2206 	struct mmc_test_multiple_rw test_data = {
2207 		.bs = bs,
2208 		.size = TEST_AREA_MAX_SIZE,
2209 		.len = ARRAY_SIZE(bs),
2210 		.do_write = false,
2211 		.do_nonblock_req = false,
2212 		.prepare = MMC_TEST_PREP_NONE,
2213 	};
2214 
2215 	return mmc_test_rw_multiple_size(test, &test_data);
2216 }
2217 
2218 /*
2219  * Multiple non-blocking read 4k to 4 MB chunks
2220  */
2221 static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test)
2222 {
2223 	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2224 			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2225 	struct mmc_test_multiple_rw test_data = {
2226 		.bs = bs,
2227 		.size = TEST_AREA_MAX_SIZE,
2228 		.len = ARRAY_SIZE(bs),
2229 		.do_write = false,
2230 		.do_nonblock_req = true,
2231 		.prepare = MMC_TEST_PREP_NONE,
2232 	};
2233 
2234 	return mmc_test_rw_multiple_size(test, &test_data);
2235 }
2236 
2237 /*
2238  * Multiple blocking write 1 to 512 sg elements
2239  */
2240 static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test)
2241 {
2242 	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2243 				 1 << 7, 1 << 8, 1 << 9};
2244 	struct mmc_test_multiple_rw test_data = {
2245 		.sg_len = sg_len,
2246 		.size = TEST_AREA_MAX_SIZE,
2247 		.len = ARRAY_SIZE(sg_len),
2248 		.do_write = true,
2249 		.do_nonblock_req = false,
2250 		.prepare = MMC_TEST_PREP_ERASE,
2251 	};
2252 
2253 	return mmc_test_rw_multiple_sg_len(test, &test_data);
2254 };
2255 
2256 /*
2257  * Multiple non-blocking write 1 to 512 sg elements
2258  */
2259 static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test)
2260 {
2261 	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2262 				 1 << 7, 1 << 8, 1 << 9};
2263 	struct mmc_test_multiple_rw test_data = {
2264 		.sg_len = sg_len,
2265 		.size = TEST_AREA_MAX_SIZE,
2266 		.len = ARRAY_SIZE(sg_len),
2267 		.do_write = true,
2268 		.do_nonblock_req = true,
2269 		.prepare = MMC_TEST_PREP_ERASE,
2270 	};
2271 
2272 	return mmc_test_rw_multiple_sg_len(test, &test_data);
2273 }
2274 
2275 /*
2276  * Multiple blocking read 1 to 512 sg elements
2277  */
2278 static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test)
2279 {
2280 	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2281 				 1 << 7, 1 << 8, 1 << 9};
2282 	struct mmc_test_multiple_rw test_data = {
2283 		.sg_len = sg_len,
2284 		.size = TEST_AREA_MAX_SIZE,
2285 		.len = ARRAY_SIZE(sg_len),
2286 		.do_write = false,
2287 		.do_nonblock_req = false,
2288 		.prepare = MMC_TEST_PREP_NONE,
2289 	};
2290 
2291 	return mmc_test_rw_multiple_sg_len(test, &test_data);
2292 }
2293 
2294 /*
2295  * Multiple non-blocking read 1 to 512 sg elements
2296  */
2297 static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
2298 {
2299 	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2300 				 1 << 7, 1 << 8, 1 << 9};
2301 	struct mmc_test_multiple_rw test_data = {
2302 		.sg_len = sg_len,
2303 		.size = TEST_AREA_MAX_SIZE,
2304 		.len = ARRAY_SIZE(sg_len),
2305 		.do_write = false,
2306 		.do_nonblock_req = true,
2307 		.prepare = MMC_TEST_PREP_NONE,
2308 	};
2309 
2310 	return mmc_test_rw_multiple_sg_len(test, &test_data);
2311 }
2312 
2313 /*
2314  * eMMC hardware reset.
2315  */
2316 static int mmc_test_reset(struct mmc_test_card *test)
2317 {
2318 	struct mmc_card *card = test->card;
2319 	struct mmc_host *host = card->host;
2320 	int err;
2321 
2322 	err = mmc_hw_reset(host);
2323 	if (!err) {
2324 		/*
2325 		 * Reset will re-enable the card's command queue, but tests
2326 		 * expect it to be disabled.
2327 		 */
2328 		if (card->ext_csd.cmdq_en)
2329 			mmc_cmdq_disable(card);
2330 		return RESULT_OK;
2331 	} else if (err == -EOPNOTSUPP) {
2332 		return RESULT_UNSUP_HOST;
2333 	}
2334 
2335 	return RESULT_FAIL;
2336 }
2337 
2338 static int mmc_test_send_status(struct mmc_test_card *test,
2339 				struct mmc_command *cmd)
2340 {
2341 	memset(cmd, 0, sizeof(*cmd));
2342 
2343 	cmd->opcode = MMC_SEND_STATUS;
2344 	if (!mmc_host_is_spi(test->card->host))
2345 		cmd->arg = test->card->rca << 16;
2346 	cmd->flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
2347 
2348 	return mmc_wait_for_cmd(test->card->host, cmd, 0);
2349 }
2350 
2351 static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
2352 				     unsigned int dev_addr, int use_sbc,
2353 				     int repeat_cmd, int write, int use_areq)
2354 {
2355 	struct mmc_test_req *rq = mmc_test_req_alloc();
2356 	struct mmc_host *host = test->card->host;
2357 	struct mmc_test_area *t = &test->area;
2358 	struct mmc_request *mrq;
2359 	unsigned long timeout;
2360 	bool expired = false;
2361 	int ret = 0, cmd_ret;
2362 	u32 status = 0;
2363 	int count = 0;
2364 
2365 	if (!rq)
2366 		return -ENOMEM;
2367 
2368 	mrq = &rq->mrq;
2369 	if (use_sbc)
2370 		mrq->sbc = &rq->sbc;
2371 	mrq->cap_cmd_during_tfr = true;
2372 
2373 	mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks,
2374 			     512, write);
2375 
2376 	if (use_sbc && t->blocks > 1 && !mrq->sbc) {
2377 		ret =  mmc_host_cmd23(host) ?
2378 		       RESULT_UNSUP_CARD :
2379 		       RESULT_UNSUP_HOST;
2380 		goto out_free;
2381 	}
2382 
2383 	/* Start ongoing data request */
2384 	if (use_areq) {
2385 		ret = mmc_test_start_areq(test, mrq, NULL);
2386 		if (ret)
2387 			goto out_free;
2388 	} else {
2389 		mmc_wait_for_req(host, mrq);
2390 	}
2391 
2392 	timeout = jiffies + msecs_to_jiffies(3000);
2393 	do {
2394 		count += 1;
2395 
2396 		/* Send status command while data transfer in progress */
2397 		cmd_ret = mmc_test_send_status(test, &rq->status);
2398 		if (cmd_ret)
2399 			break;
2400 
2401 		status = rq->status.resp[0];
2402 		if (status & R1_ERROR) {
2403 			cmd_ret = -EIO;
2404 			break;
2405 		}
2406 
2407 		if (mmc_is_req_done(host, mrq))
2408 			break;
2409 
2410 		expired = time_after(jiffies, timeout);
2411 		if (expired) {
2412 			pr_info("%s: timeout waiting for Tran state status %#x\n",
2413 				mmc_hostname(host), status);
2414 			cmd_ret = -ETIMEDOUT;
2415 			break;
2416 		}
2417 	} while (repeat_cmd && R1_CURRENT_STATE(status) != R1_STATE_TRAN);
2418 
2419 	/* Wait for data request to complete */
2420 	if (use_areq) {
2421 		ret = mmc_test_start_areq(test, NULL, mrq);
2422 	} else {
2423 		mmc_wait_for_req_done(test->card->host, mrq);
2424 	}
2425 
2426 	/*
2427 	 * For cap_cmd_during_tfr request, upper layer must send stop if
2428 	 * required.
2429 	 */
2430 	if (mrq->data->stop && (mrq->data->error || !mrq->sbc)) {
2431 		if (ret)
2432 			mmc_wait_for_cmd(host, mrq->data->stop, 0);
2433 		else
2434 			ret = mmc_wait_for_cmd(host, mrq->data->stop, 0);
2435 	}
2436 
2437 	if (ret)
2438 		goto out_free;
2439 
2440 	if (cmd_ret) {
2441 		pr_info("%s: Send Status failed: status %#x, error %d\n",
2442 			mmc_hostname(test->card->host), status, cmd_ret);
2443 	}
2444 
2445 	ret = mmc_test_check_result(test, mrq);
2446 	if (ret)
2447 		goto out_free;
2448 
2449 	ret = mmc_test_wait_busy(test);
2450 	if (ret)
2451 		goto out_free;
2452 
2453 	if (repeat_cmd && (t->blocks + 1) << 9 > t->max_tfr)
2454 		pr_info("%s: %d commands completed during transfer of %u blocks\n",
2455 			mmc_hostname(test->card->host), count, t->blocks);
2456 
2457 	if (cmd_ret)
2458 		ret = cmd_ret;
2459 out_free:
2460 	kfree(rq);
2461 
2462 	return ret;
2463 }
2464 
2465 static int __mmc_test_cmds_during_tfr(struct mmc_test_card *test,
2466 				      unsigned long sz, int use_sbc, int write,
2467 				      int use_areq)
2468 {
2469 	struct mmc_test_area *t = &test->area;
2470 	int ret;
2471 
2472 	if (!(test->card->host->caps & MMC_CAP_CMD_DURING_TFR))
2473 		return RESULT_UNSUP_HOST;
2474 
2475 	ret = mmc_test_area_map(test, sz, 0, 0);
2476 	if (ret)
2477 		return ret;
2478 
2479 	ret = mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 0, write,
2480 					use_areq);
2481 	if (ret)
2482 		return ret;
2483 
2484 	return mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 1, write,
2485 					 use_areq);
2486 }
2487 
2488 static int mmc_test_cmds_during_tfr(struct mmc_test_card *test, int use_sbc,
2489 				    int write, int use_areq)
2490 {
2491 	struct mmc_test_area *t = &test->area;
2492 	unsigned long sz;
2493 	int ret;
2494 
2495 	for (sz = 512; sz <= t->max_tfr; sz += 512) {
2496 		ret = __mmc_test_cmds_during_tfr(test, sz, use_sbc, write,
2497 						 use_areq);
2498 		if (ret)
2499 			return ret;
2500 	}
2501 	return 0;
2502 }
2503 
2504 /*
2505  * Commands during read - no Set Block Count (CMD23).
2506  */
2507 static int mmc_test_cmds_during_read(struct mmc_test_card *test)
2508 {
2509 	return mmc_test_cmds_during_tfr(test, 0, 0, 0);
2510 }
2511 
2512 /*
2513  * Commands during write - no Set Block Count (CMD23).
2514  */
2515 static int mmc_test_cmds_during_write(struct mmc_test_card *test)
2516 {
2517 	return mmc_test_cmds_during_tfr(test, 0, 1, 0);
2518 }
2519 
2520 /*
2521  * Commands during read - use Set Block Count (CMD23).
2522  */
2523 static int mmc_test_cmds_during_read_cmd23(struct mmc_test_card *test)
2524 {
2525 	return mmc_test_cmds_during_tfr(test, 1, 0, 0);
2526 }
2527 
2528 /*
2529  * Commands during write - use Set Block Count (CMD23).
2530  */
2531 static int mmc_test_cmds_during_write_cmd23(struct mmc_test_card *test)
2532 {
2533 	return mmc_test_cmds_during_tfr(test, 1, 1, 0);
2534 }
2535 
2536 /*
2537  * Commands during non-blocking read - use Set Block Count (CMD23).
2538  */
2539 static int mmc_test_cmds_during_read_cmd23_nonblock(struct mmc_test_card *test)
2540 {
2541 	return mmc_test_cmds_during_tfr(test, 1, 0, 1);
2542 }
2543 
2544 /*
2545  * Commands during non-blocking write - use Set Block Count (CMD23).
2546  */
2547 static int mmc_test_cmds_during_write_cmd23_nonblock(struct mmc_test_card *test)
2548 {
2549 	return mmc_test_cmds_during_tfr(test, 1, 1, 1);
2550 }
2551 
2552 static const struct mmc_test_case mmc_test_cases[] = {
2553 	{
2554 		.name = "Basic write (no data verification)",
2555 		.run = mmc_test_basic_write,
2556 	},
2557 
2558 	{
2559 		.name = "Basic read (no data verification)",
2560 		.run = mmc_test_basic_read,
2561 	},
2562 
2563 	{
2564 		.name = "Basic write (with data verification)",
2565 		.prepare = mmc_test_prepare_write,
2566 		.run = mmc_test_verify_write,
2567 		.cleanup = mmc_test_cleanup,
2568 	},
2569 
2570 	{
2571 		.name = "Basic read (with data verification)",
2572 		.prepare = mmc_test_prepare_read,
2573 		.run = mmc_test_verify_read,
2574 		.cleanup = mmc_test_cleanup,
2575 	},
2576 
2577 	{
2578 		.name = "Multi-block write",
2579 		.prepare = mmc_test_prepare_write,
2580 		.run = mmc_test_multi_write,
2581 		.cleanup = mmc_test_cleanup,
2582 	},
2583 
2584 	{
2585 		.name = "Multi-block read",
2586 		.prepare = mmc_test_prepare_read,
2587 		.run = mmc_test_multi_read,
2588 		.cleanup = mmc_test_cleanup,
2589 	},
2590 
2591 	{
2592 		.name = "Power of two block writes",
2593 		.prepare = mmc_test_prepare_write,
2594 		.run = mmc_test_pow2_write,
2595 		.cleanup = mmc_test_cleanup,
2596 	},
2597 
2598 	{
2599 		.name = "Power of two block reads",
2600 		.prepare = mmc_test_prepare_read,
2601 		.run = mmc_test_pow2_read,
2602 		.cleanup = mmc_test_cleanup,
2603 	},
2604 
2605 	{
2606 		.name = "Weird sized block writes",
2607 		.prepare = mmc_test_prepare_write,
2608 		.run = mmc_test_weird_write,
2609 		.cleanup = mmc_test_cleanup,
2610 	},
2611 
2612 	{
2613 		.name = "Weird sized block reads",
2614 		.prepare = mmc_test_prepare_read,
2615 		.run = mmc_test_weird_read,
2616 		.cleanup = mmc_test_cleanup,
2617 	},
2618 
2619 	{
2620 		.name = "Badly aligned write",
2621 		.prepare = mmc_test_prepare_write,
2622 		.run = mmc_test_align_write,
2623 		.cleanup = mmc_test_cleanup,
2624 	},
2625 
2626 	{
2627 		.name = "Badly aligned read",
2628 		.prepare = mmc_test_prepare_read,
2629 		.run = mmc_test_align_read,
2630 		.cleanup = mmc_test_cleanup,
2631 	},
2632 
2633 	{
2634 		.name = "Badly aligned multi-block write",
2635 		.prepare = mmc_test_prepare_write,
2636 		.run = mmc_test_align_multi_write,
2637 		.cleanup = mmc_test_cleanup,
2638 	},
2639 
2640 	{
2641 		.name = "Badly aligned multi-block read",
2642 		.prepare = mmc_test_prepare_read,
2643 		.run = mmc_test_align_multi_read,
2644 		.cleanup = mmc_test_cleanup,
2645 	},
2646 
2647 	{
2648 		.name = "Correct xfer_size at write (start failure)",
2649 		.run = mmc_test_xfersize_write,
2650 	},
2651 
2652 	{
2653 		.name = "Correct xfer_size at read (start failure)",
2654 		.run = mmc_test_xfersize_read,
2655 	},
2656 
2657 	{
2658 		.name = "Correct xfer_size at write (midway failure)",
2659 		.run = mmc_test_multi_xfersize_write,
2660 	},
2661 
2662 	{
2663 		.name = "Correct xfer_size at read (midway failure)",
2664 		.run = mmc_test_multi_xfersize_read,
2665 	},
2666 
2667 #ifdef CONFIG_HIGHMEM
2668 
2669 	{
2670 		.name = "Highmem write",
2671 		.prepare = mmc_test_prepare_write,
2672 		.run = mmc_test_write_high,
2673 		.cleanup = mmc_test_cleanup,
2674 	},
2675 
2676 	{
2677 		.name = "Highmem read",
2678 		.prepare = mmc_test_prepare_read,
2679 		.run = mmc_test_read_high,
2680 		.cleanup = mmc_test_cleanup,
2681 	},
2682 
2683 	{
2684 		.name = "Multi-block highmem write",
2685 		.prepare = mmc_test_prepare_write,
2686 		.run = mmc_test_multi_write_high,
2687 		.cleanup = mmc_test_cleanup,
2688 	},
2689 
2690 	{
2691 		.name = "Multi-block highmem read",
2692 		.prepare = mmc_test_prepare_read,
2693 		.run = mmc_test_multi_read_high,
2694 		.cleanup = mmc_test_cleanup,
2695 	},
2696 
2697 #else
2698 
2699 	{
2700 		.name = "Highmem write",
2701 		.run = mmc_test_no_highmem,
2702 	},
2703 
2704 	{
2705 		.name = "Highmem read",
2706 		.run = mmc_test_no_highmem,
2707 	},
2708 
2709 	{
2710 		.name = "Multi-block highmem write",
2711 		.run = mmc_test_no_highmem,
2712 	},
2713 
2714 	{
2715 		.name = "Multi-block highmem read",
2716 		.run = mmc_test_no_highmem,
2717 	},
2718 
2719 #endif /* CONFIG_HIGHMEM */
2720 
2721 	{
2722 		.name = "Best-case read performance",
2723 		.prepare = mmc_test_area_prepare_fill,
2724 		.run = mmc_test_best_read_performance,
2725 		.cleanup = mmc_test_area_cleanup,
2726 	},
2727 
2728 	{
2729 		.name = "Best-case write performance",
2730 		.prepare = mmc_test_area_prepare_erase,
2731 		.run = mmc_test_best_write_performance,
2732 		.cleanup = mmc_test_area_cleanup,
2733 	},
2734 
2735 	{
2736 		.name = "Best-case read performance into scattered pages",
2737 		.prepare = mmc_test_area_prepare_fill,
2738 		.run = mmc_test_best_read_perf_max_scatter,
2739 		.cleanup = mmc_test_area_cleanup,
2740 	},
2741 
2742 	{
2743 		.name = "Best-case write performance from scattered pages",
2744 		.prepare = mmc_test_area_prepare_erase,
2745 		.run = mmc_test_best_write_perf_max_scatter,
2746 		.cleanup = mmc_test_area_cleanup,
2747 	},
2748 
2749 	{
2750 		.name = "Single read performance by transfer size",
2751 		.prepare = mmc_test_area_prepare_fill,
2752 		.run = mmc_test_profile_read_perf,
2753 		.cleanup = mmc_test_area_cleanup,
2754 	},
2755 
2756 	{
2757 		.name = "Single write performance by transfer size",
2758 		.prepare = mmc_test_area_prepare,
2759 		.run = mmc_test_profile_write_perf,
2760 		.cleanup = mmc_test_area_cleanup,
2761 	},
2762 
2763 	{
2764 		.name = "Single trim performance by transfer size",
2765 		.prepare = mmc_test_area_prepare_fill,
2766 		.run = mmc_test_profile_trim_perf,
2767 		.cleanup = mmc_test_area_cleanup,
2768 	},
2769 
2770 	{
2771 		.name = "Consecutive read performance by transfer size",
2772 		.prepare = mmc_test_area_prepare_fill,
2773 		.run = mmc_test_profile_seq_read_perf,
2774 		.cleanup = mmc_test_area_cleanup,
2775 	},
2776 
2777 	{
2778 		.name = "Consecutive write performance by transfer size",
2779 		.prepare = mmc_test_area_prepare,
2780 		.run = mmc_test_profile_seq_write_perf,
2781 		.cleanup = mmc_test_area_cleanup,
2782 	},
2783 
2784 	{
2785 		.name = "Consecutive trim performance by transfer size",
2786 		.prepare = mmc_test_area_prepare,
2787 		.run = mmc_test_profile_seq_trim_perf,
2788 		.cleanup = mmc_test_area_cleanup,
2789 	},
2790 
2791 	{
2792 		.name = "Random read performance by transfer size",
2793 		.prepare = mmc_test_area_prepare,
2794 		.run = mmc_test_random_read_perf,
2795 		.cleanup = mmc_test_area_cleanup,
2796 	},
2797 
2798 	{
2799 		.name = "Random write performance by transfer size",
2800 		.prepare = mmc_test_area_prepare,
2801 		.run = mmc_test_random_write_perf,
2802 		.cleanup = mmc_test_area_cleanup,
2803 	},
2804 
2805 	{
2806 		.name = "Large sequential read into scattered pages",
2807 		.prepare = mmc_test_area_prepare,
2808 		.run = mmc_test_large_seq_read_perf,
2809 		.cleanup = mmc_test_area_cleanup,
2810 	},
2811 
2812 	{
2813 		.name = "Large sequential write from scattered pages",
2814 		.prepare = mmc_test_area_prepare,
2815 		.run = mmc_test_large_seq_write_perf,
2816 		.cleanup = mmc_test_area_cleanup,
2817 	},
2818 
2819 	{
2820 		.name = "Write performance with blocking req 4k to 4MB",
2821 		.prepare = mmc_test_area_prepare,
2822 		.run = mmc_test_profile_mult_write_blocking_perf,
2823 		.cleanup = mmc_test_area_cleanup,
2824 	},
2825 
2826 	{
2827 		.name = "Write performance with non-blocking req 4k to 4MB",
2828 		.prepare = mmc_test_area_prepare,
2829 		.run = mmc_test_profile_mult_write_nonblock_perf,
2830 		.cleanup = mmc_test_area_cleanup,
2831 	},
2832 
2833 	{
2834 		.name = "Read performance with blocking req 4k to 4MB",
2835 		.prepare = mmc_test_area_prepare,
2836 		.run = mmc_test_profile_mult_read_blocking_perf,
2837 		.cleanup = mmc_test_area_cleanup,
2838 	},
2839 
2840 	{
2841 		.name = "Read performance with non-blocking req 4k to 4MB",
2842 		.prepare = mmc_test_area_prepare,
2843 		.run = mmc_test_profile_mult_read_nonblock_perf,
2844 		.cleanup = mmc_test_area_cleanup,
2845 	},
2846 
2847 	{
2848 		.name = "Write performance blocking req 1 to 512 sg elems",
2849 		.prepare = mmc_test_area_prepare,
2850 		.run = mmc_test_profile_sglen_wr_blocking_perf,
2851 		.cleanup = mmc_test_area_cleanup,
2852 	},
2853 
2854 	{
2855 		.name = "Write performance non-blocking req 1 to 512 sg elems",
2856 		.prepare = mmc_test_area_prepare,
2857 		.run = mmc_test_profile_sglen_wr_nonblock_perf,
2858 		.cleanup = mmc_test_area_cleanup,
2859 	},
2860 
2861 	{
2862 		.name = "Read performance blocking req 1 to 512 sg elems",
2863 		.prepare = mmc_test_area_prepare,
2864 		.run = mmc_test_profile_sglen_r_blocking_perf,
2865 		.cleanup = mmc_test_area_cleanup,
2866 	},
2867 
2868 	{
2869 		.name = "Read performance non-blocking req 1 to 512 sg elems",
2870 		.prepare = mmc_test_area_prepare,
2871 		.run = mmc_test_profile_sglen_r_nonblock_perf,
2872 		.cleanup = mmc_test_area_cleanup,
2873 	},
2874 
2875 	{
2876 		.name = "Reset test",
2877 		.run = mmc_test_reset,
2878 	},
2879 
2880 	{
2881 		.name = "Commands during read - no Set Block Count (CMD23)",
2882 		.prepare = mmc_test_area_prepare,
2883 		.run = mmc_test_cmds_during_read,
2884 		.cleanup = mmc_test_area_cleanup,
2885 	},
2886 
2887 	{
2888 		.name = "Commands during write - no Set Block Count (CMD23)",
2889 		.prepare = mmc_test_area_prepare,
2890 		.run = mmc_test_cmds_during_write,
2891 		.cleanup = mmc_test_area_cleanup,
2892 	},
2893 
2894 	{
2895 		.name = "Commands during read - use Set Block Count (CMD23)",
2896 		.prepare = mmc_test_area_prepare,
2897 		.run = mmc_test_cmds_during_read_cmd23,
2898 		.cleanup = mmc_test_area_cleanup,
2899 	},
2900 
2901 	{
2902 		.name = "Commands during write - use Set Block Count (CMD23)",
2903 		.prepare = mmc_test_area_prepare,
2904 		.run = mmc_test_cmds_during_write_cmd23,
2905 		.cleanup = mmc_test_area_cleanup,
2906 	},
2907 
2908 	{
2909 		.name = "Commands during non-blocking read - use Set Block Count (CMD23)",
2910 		.prepare = mmc_test_area_prepare,
2911 		.run = mmc_test_cmds_during_read_cmd23_nonblock,
2912 		.cleanup = mmc_test_area_cleanup,
2913 	},
2914 
2915 	{
2916 		.name = "Commands during non-blocking write - use Set Block Count (CMD23)",
2917 		.prepare = mmc_test_area_prepare,
2918 		.run = mmc_test_cmds_during_write_cmd23_nonblock,
2919 		.cleanup = mmc_test_area_cleanup,
2920 	},
2921 };
2922 
2923 static DEFINE_MUTEX(mmc_test_lock);
2924 
2925 static LIST_HEAD(mmc_test_result);
2926 
2927 static void mmc_test_run(struct mmc_test_card *test, int testcase)
2928 {
2929 	int i, ret;
2930 
2931 	pr_info("%s: Starting tests of card %s...\n",
2932 		mmc_hostname(test->card->host), mmc_card_id(test->card));
2933 
2934 	mmc_claim_host(test->card->host);
2935 
2936 	for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++) {
2937 		struct mmc_test_general_result *gr;
2938 
2939 		if (testcase && ((i + 1) != testcase))
2940 			continue;
2941 
2942 		pr_info("%s: Test case %d. %s...\n",
2943 			mmc_hostname(test->card->host), i + 1,
2944 			mmc_test_cases[i].name);
2945 
2946 		if (mmc_test_cases[i].prepare) {
2947 			ret = mmc_test_cases[i].prepare(test);
2948 			if (ret) {
2949 				pr_info("%s: Result: Prepare stage failed! (%d)\n",
2950 					mmc_hostname(test->card->host),
2951 					ret);
2952 				continue;
2953 			}
2954 		}
2955 
2956 		gr = kzalloc(sizeof(*gr), GFP_KERNEL);
2957 		if (gr) {
2958 			INIT_LIST_HEAD(&gr->tr_lst);
2959 
2960 			/* Assign data what we know already */
2961 			gr->card = test->card;
2962 			gr->testcase = i;
2963 
2964 			/* Append container to global one */
2965 			list_add_tail(&gr->link, &mmc_test_result);
2966 
2967 			/*
2968 			 * Save the pointer to created container in our private
2969 			 * structure.
2970 			 */
2971 			test->gr = gr;
2972 		}
2973 
2974 		ret = mmc_test_cases[i].run(test);
2975 		switch (ret) {
2976 		case RESULT_OK:
2977 			pr_info("%s: Result: OK\n",
2978 				mmc_hostname(test->card->host));
2979 			break;
2980 		case RESULT_FAIL:
2981 			pr_info("%s: Result: FAILED\n",
2982 				mmc_hostname(test->card->host));
2983 			break;
2984 		case RESULT_UNSUP_HOST:
2985 			pr_info("%s: Result: UNSUPPORTED (by host)\n",
2986 				mmc_hostname(test->card->host));
2987 			break;
2988 		case RESULT_UNSUP_CARD:
2989 			pr_info("%s: Result: UNSUPPORTED (by card)\n",
2990 				mmc_hostname(test->card->host));
2991 			break;
2992 		default:
2993 			pr_info("%s: Result: ERROR (%d)\n",
2994 				mmc_hostname(test->card->host), ret);
2995 		}
2996 
2997 		/* Save the result */
2998 		if (gr)
2999 			gr->result = ret;
3000 
3001 		if (mmc_test_cases[i].cleanup) {
3002 			ret = mmc_test_cases[i].cleanup(test);
3003 			if (ret) {
3004 				pr_info("%s: Warning: Cleanup stage failed! (%d)\n",
3005 					mmc_hostname(test->card->host),
3006 					ret);
3007 			}
3008 		}
3009 	}
3010 
3011 	mmc_release_host(test->card->host);
3012 
3013 	pr_info("%s: Tests completed.\n",
3014 		mmc_hostname(test->card->host));
3015 }
3016 
3017 static void mmc_test_free_result(struct mmc_card *card)
3018 {
3019 	struct mmc_test_general_result *gr, *grs;
3020 
3021 	mutex_lock(&mmc_test_lock);
3022 
3023 	list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
3024 		struct mmc_test_transfer_result *tr, *trs;
3025 
3026 		if (card && gr->card != card)
3027 			continue;
3028 
3029 		list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
3030 			list_del(&tr->link);
3031 			kfree(tr);
3032 		}
3033 
3034 		list_del(&gr->link);
3035 		kfree(gr);
3036 	}
3037 
3038 	mutex_unlock(&mmc_test_lock);
3039 }
3040 
3041 static LIST_HEAD(mmc_test_file_test);
3042 
3043 static int mtf_test_show(struct seq_file *sf, void *data)
3044 {
3045 	struct mmc_card *card = (struct mmc_card *)sf->private;
3046 	struct mmc_test_general_result *gr;
3047 
3048 	mutex_lock(&mmc_test_lock);
3049 
3050 	list_for_each_entry(gr, &mmc_test_result, link) {
3051 		struct mmc_test_transfer_result *tr;
3052 
3053 		if (gr->card != card)
3054 			continue;
3055 
3056 		seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
3057 
3058 		list_for_each_entry(tr, &gr->tr_lst, link) {
3059 			seq_printf(sf, "%u %d %llu.%09u %u %u.%02u\n",
3060 				tr->count, tr->sectors,
3061 				(u64)tr->ts.tv_sec, (u32)tr->ts.tv_nsec,
3062 				tr->rate, tr->iops / 100, tr->iops % 100);
3063 		}
3064 	}
3065 
3066 	mutex_unlock(&mmc_test_lock);
3067 
3068 	return 0;
3069 }
3070 
3071 static int mtf_test_open(struct inode *inode, struct file *file)
3072 {
3073 	return single_open(file, mtf_test_show, inode->i_private);
3074 }
3075 
3076 static ssize_t mtf_test_write(struct file *file, const char __user *buf,
3077 	size_t count, loff_t *pos)
3078 {
3079 	struct seq_file *sf = (struct seq_file *)file->private_data;
3080 	struct mmc_card *card = (struct mmc_card *)sf->private;
3081 	struct mmc_test_card *test;
3082 	long testcase;
3083 	int ret;
3084 
3085 	ret = kstrtol_from_user(buf, count, 10, &testcase);
3086 	if (ret)
3087 		return ret;
3088 
3089 	test = kzalloc(sizeof(*test), GFP_KERNEL);
3090 	if (!test)
3091 		return -ENOMEM;
3092 
3093 	/*
3094 	 * Remove all test cases associated with given card. Thus we have only
3095 	 * actual data of the last run.
3096 	 */
3097 	mmc_test_free_result(card);
3098 
3099 	test->card = card;
3100 
3101 	test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
3102 #ifdef CONFIG_HIGHMEM
3103 	test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
3104 #endif
3105 
3106 #ifdef CONFIG_HIGHMEM
3107 	if (test->buffer && test->highmem) {
3108 #else
3109 	if (test->buffer) {
3110 #endif
3111 		mutex_lock(&mmc_test_lock);
3112 		mmc_test_run(test, testcase);
3113 		mutex_unlock(&mmc_test_lock);
3114 	}
3115 
3116 #ifdef CONFIG_HIGHMEM
3117 	__free_pages(test->highmem, BUFFER_ORDER);
3118 #endif
3119 	kfree(test->buffer);
3120 	kfree(test);
3121 
3122 	return count;
3123 }
3124 
3125 static const struct file_operations mmc_test_fops_test = {
3126 	.open		= mtf_test_open,
3127 	.read		= seq_read,
3128 	.write		= mtf_test_write,
3129 	.llseek		= seq_lseek,
3130 	.release	= single_release,
3131 };
3132 
3133 static int mtf_testlist_show(struct seq_file *sf, void *data)
3134 {
3135 	int i;
3136 
3137 	mutex_lock(&mmc_test_lock);
3138 
3139 	seq_puts(sf, "0:\tRun all tests\n");
3140 	for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
3141 		seq_printf(sf, "%d:\t%s\n", i + 1, mmc_test_cases[i].name);
3142 
3143 	mutex_unlock(&mmc_test_lock);
3144 
3145 	return 0;
3146 }
3147 
3148 static int mtf_testlist_open(struct inode *inode, struct file *file)
3149 {
3150 	return single_open(file, mtf_testlist_show, inode->i_private);
3151 }
3152 
3153 static const struct file_operations mmc_test_fops_testlist = {
3154 	.open		= mtf_testlist_open,
3155 	.read		= seq_read,
3156 	.llseek		= seq_lseek,
3157 	.release	= single_release,
3158 };
3159 
3160 static void mmc_test_free_dbgfs_file(struct mmc_card *card)
3161 {
3162 	struct mmc_test_dbgfs_file *df, *dfs;
3163 
3164 	mutex_lock(&mmc_test_lock);
3165 
3166 	list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
3167 		if (card && df->card != card)
3168 			continue;
3169 		debugfs_remove(df->file);
3170 		list_del(&df->link);
3171 		kfree(df);
3172 	}
3173 
3174 	mutex_unlock(&mmc_test_lock);
3175 }
3176 
3177 static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
3178 	const char *name, umode_t mode, const struct file_operations *fops)
3179 {
3180 	struct dentry *file = NULL;
3181 	struct mmc_test_dbgfs_file *df;
3182 
3183 	if (card->debugfs_root)
3184 		file = debugfs_create_file(name, mode, card->debugfs_root,
3185 			card, fops);
3186 
3187 	if (IS_ERR_OR_NULL(file)) {
3188 		dev_err(&card->dev,
3189 			"Can't create %s. Perhaps debugfs is disabled.\n",
3190 			name);
3191 		return -ENODEV;
3192 	}
3193 
3194 	df = kmalloc(sizeof(*df), GFP_KERNEL);
3195 	if (!df) {
3196 		debugfs_remove(file);
3197 		return -ENOMEM;
3198 	}
3199 
3200 	df->card = card;
3201 	df->file = file;
3202 
3203 	list_add(&df->link, &mmc_test_file_test);
3204 	return 0;
3205 }
3206 
3207 static int mmc_test_register_dbgfs_file(struct mmc_card *card)
3208 {
3209 	int ret;
3210 
3211 	mutex_lock(&mmc_test_lock);
3212 
3213 	ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO,
3214 		&mmc_test_fops_test);
3215 	if (ret)
3216 		goto err;
3217 
3218 	ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO,
3219 		&mmc_test_fops_testlist);
3220 	if (ret)
3221 		goto err;
3222 
3223 err:
3224 	mutex_unlock(&mmc_test_lock);
3225 
3226 	return ret;
3227 }
3228 
3229 static int mmc_test_probe(struct mmc_card *card)
3230 {
3231 	int ret;
3232 
3233 	if (!mmc_card_mmc(card) && !mmc_card_sd(card))
3234 		return -ENODEV;
3235 
3236 	ret = mmc_test_register_dbgfs_file(card);
3237 	if (ret)
3238 		return ret;
3239 
3240 	if (card->ext_csd.cmdq_en) {
3241 		mmc_claim_host(card->host);
3242 		ret = mmc_cmdq_disable(card);
3243 		mmc_release_host(card->host);
3244 		if (ret)
3245 			return ret;
3246 	}
3247 
3248 	dev_info(&card->dev, "Card claimed for testing.\n");
3249 
3250 	return 0;
3251 }
3252 
3253 static void mmc_test_remove(struct mmc_card *card)
3254 {
3255 	if (card->reenable_cmdq) {
3256 		mmc_claim_host(card->host);
3257 		mmc_cmdq_enable(card);
3258 		mmc_release_host(card->host);
3259 	}
3260 	mmc_test_free_result(card);
3261 	mmc_test_free_dbgfs_file(card);
3262 }
3263 
3264 static void mmc_test_shutdown(struct mmc_card *card)
3265 {
3266 }
3267 
3268 static struct mmc_driver mmc_driver = {
3269 	.drv		= {
3270 		.name	= "mmc_test",
3271 	},
3272 	.probe		= mmc_test_probe,
3273 	.remove		= mmc_test_remove,
3274 	.shutdown	= mmc_test_shutdown,
3275 };
3276 
3277 static int __init mmc_test_init(void)
3278 {
3279 	return mmc_register_driver(&mmc_driver);
3280 }
3281 
3282 static void __exit mmc_test_exit(void)
3283 {
3284 	/* Clear stalled data if card is still plugged */
3285 	mmc_test_free_result(NULL);
3286 	mmc_test_free_dbgfs_file(NULL);
3287 
3288 	mmc_unregister_driver(&mmc_driver);
3289 }
3290 
3291 module_init(mmc_test_init);
3292 module_exit(mmc_test_exit);
3293 
3294 MODULE_LICENSE("GPL");
3295 MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
3296 MODULE_AUTHOR("Pierre Ossman");
3297