xref: /openbmc/linux/drivers/mmc/core/mmc_test.c (revision 28efb0046512e8a13ed9f9bdf0d68d10bbfbe9cf)
1 /*
2  *  Copyright 2007-2008 Pierre Ossman
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or (at
7  * your option) any later version.
8  */
9 
10 #include <linux/mmc/core.h>
11 #include <linux/mmc/card.h>
12 #include <linux/mmc/host.h>
13 #include <linux/mmc/mmc.h>
14 #include <linux/slab.h>
15 
16 #include <linux/scatterlist.h>
17 #include <linux/swap.h>		/* For nr_free_buffer_pages() */
18 #include <linux/list.h>
19 
20 #include <linux/debugfs.h>
21 #include <linux/uaccess.h>
22 #include <linux/seq_file.h>
23 #include <linux/module.h>
24 
25 #include "core.h"
26 #include "card.h"
27 #include "host.h"
28 #include "bus.h"
29 #include "mmc_ops.h"
30 
31 #define RESULT_OK		0
32 #define RESULT_FAIL		1
33 #define RESULT_UNSUP_HOST	2
34 #define RESULT_UNSUP_CARD	3
35 
36 #define BUFFER_ORDER		2
37 #define BUFFER_SIZE		(PAGE_SIZE << BUFFER_ORDER)
38 
39 #define TEST_ALIGN_END		8
40 
41 /*
42  * Limit the test area size to the maximum MMC HC erase group size.  Note that
43  * the maximum SD allocation unit size is just 4MiB.
44  */
45 #define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
46 
47 /**
48  * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
49  * @page: first page in the allocation
50  * @order: order of the number of pages allocated
51  */
52 struct mmc_test_pages {
53 	struct page *page;
54 	unsigned int order;
55 };
56 
57 /**
58  * struct mmc_test_mem - allocated memory.
59  * @arr: array of allocations
60  * @cnt: number of allocations
61  */
62 struct mmc_test_mem {
63 	struct mmc_test_pages *arr;
64 	unsigned int cnt;
65 };
66 
67 /**
68  * struct mmc_test_area - information for performance tests.
69  * @max_sz: test area size (in bytes)
70  * @dev_addr: address on card at which to do performance tests
71  * @max_tfr: maximum transfer size allowed by driver (in bytes)
72  * @max_segs: maximum segments allowed by driver in scatterlist @sg
73  * @max_seg_sz: maximum segment size allowed by driver
74  * @blocks: number of (512 byte) blocks currently mapped by @sg
75  * @sg_len: length of currently mapped scatterlist @sg
76  * @mem: allocated memory
77  * @sg: scatterlist
78  */
79 struct mmc_test_area {
80 	unsigned long max_sz;
81 	unsigned int dev_addr;
82 	unsigned int max_tfr;
83 	unsigned int max_segs;
84 	unsigned int max_seg_sz;
85 	unsigned int blocks;
86 	unsigned int sg_len;
87 	struct mmc_test_mem *mem;
88 	struct scatterlist *sg;
89 };
90 
91 /**
92  * struct mmc_test_transfer_result - transfer results for performance tests.
93  * @link: double-linked list
94  * @count: amount of group of sectors to check
95  * @sectors: amount of sectors to check in one group
96  * @ts: time values of transfer
97  * @rate: calculated transfer rate
98  * @iops: I/O operations per second (times 100)
99  */
100 struct mmc_test_transfer_result {
101 	struct list_head link;
102 	unsigned int count;
103 	unsigned int sectors;
104 	struct timespec ts;
105 	unsigned int rate;
106 	unsigned int iops;
107 };
108 
109 /**
110  * struct mmc_test_general_result - results for tests.
111  * @link: double-linked list
112  * @card: card under test
113  * @testcase: number of test case
114  * @result: result of test run
115  * @tr_lst: transfer measurements if any as mmc_test_transfer_result
116  */
117 struct mmc_test_general_result {
118 	struct list_head link;
119 	struct mmc_card *card;
120 	int testcase;
121 	int result;
122 	struct list_head tr_lst;
123 };
124 
125 /**
126  * struct mmc_test_dbgfs_file - debugfs related file.
127  * @link: double-linked list
128  * @card: card under test
129  * @file: file created under debugfs
130  */
131 struct mmc_test_dbgfs_file {
132 	struct list_head link;
133 	struct mmc_card *card;
134 	struct dentry *file;
135 };
136 
137 /**
138  * struct mmc_test_card - test information.
139  * @card: card under test
140  * @scratch: transfer buffer
141  * @buffer: transfer buffer
142  * @highmem: buffer for highmem tests
143  * @area: information for performance tests
144  * @gr: pointer to results of current testcase
145  */
146 struct mmc_test_card {
147 	struct mmc_card	*card;
148 
149 	u8		scratch[BUFFER_SIZE];
150 	u8		*buffer;
151 #ifdef CONFIG_HIGHMEM
152 	struct page	*highmem;
153 #endif
154 	struct mmc_test_area		area;
155 	struct mmc_test_general_result	*gr;
156 };
157 
158 enum mmc_test_prep_media {
159 	MMC_TEST_PREP_NONE = 0,
160 	MMC_TEST_PREP_WRITE_FULL = 1 << 0,
161 	MMC_TEST_PREP_ERASE = 1 << 1,
162 };
163 
164 struct mmc_test_multiple_rw {
165 	unsigned int *sg_len;
166 	unsigned int *bs;
167 	unsigned int len;
168 	unsigned int size;
169 	bool do_write;
170 	bool do_nonblock_req;
171 	enum mmc_test_prep_media prepare;
172 };
173 
174 struct mmc_test_async_req {
175 	struct mmc_async_req areq;
176 	struct mmc_test_card *test;
177 };
178 
179 /*******************************************************************/
180 /*  General helper functions                                       */
181 /*******************************************************************/
182 
183 /*
184  * Configure correct block size in card
185  */
186 static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
187 {
188 	return mmc_set_blocklen(test->card, size);
189 }
190 
191 static bool mmc_test_card_cmd23(struct mmc_card *card)
192 {
193 	return mmc_card_mmc(card) ||
194 	       (mmc_card_sd(card) && card->scr.cmds & SD_SCR_CMD23_SUPPORT);
195 }
196 
197 static void mmc_test_prepare_sbc(struct mmc_test_card *test,
198 				 struct mmc_request *mrq, unsigned int blocks)
199 {
200 	struct mmc_card *card = test->card;
201 
202 	if (!mrq->sbc || !mmc_host_cmd23(card->host) ||
203 	    !mmc_test_card_cmd23(card) || !mmc_op_multi(mrq->cmd->opcode) ||
204 	    (card->quirks & MMC_QUIRK_BLK_NO_CMD23)) {
205 		mrq->sbc = NULL;
206 		return;
207 	}
208 
209 	mrq->sbc->opcode = MMC_SET_BLOCK_COUNT;
210 	mrq->sbc->arg = blocks;
211 	mrq->sbc->flags = MMC_RSP_R1 | MMC_CMD_AC;
212 }
213 
214 /*
215  * Fill in the mmc_request structure given a set of transfer parameters.
216  */
217 static void mmc_test_prepare_mrq(struct mmc_test_card *test,
218 	struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
219 	unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
220 {
221 	if (WARN_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop))
222 		return;
223 
224 	if (blocks > 1) {
225 		mrq->cmd->opcode = write ?
226 			MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
227 	} else {
228 		mrq->cmd->opcode = write ?
229 			MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
230 	}
231 
232 	mrq->cmd->arg = dev_addr;
233 	if (!mmc_card_blockaddr(test->card))
234 		mrq->cmd->arg <<= 9;
235 
236 	mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
237 
238 	if (blocks == 1)
239 		mrq->stop = NULL;
240 	else {
241 		mrq->stop->opcode = MMC_STOP_TRANSMISSION;
242 		mrq->stop->arg = 0;
243 		mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
244 	}
245 
246 	mrq->data->blksz = blksz;
247 	mrq->data->blocks = blocks;
248 	mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
249 	mrq->data->sg = sg;
250 	mrq->data->sg_len = sg_len;
251 
252 	mmc_test_prepare_sbc(test, mrq, blocks);
253 
254 	mmc_set_data_timeout(mrq->data, test->card);
255 }
256 
257 static int mmc_test_busy(struct mmc_command *cmd)
258 {
259 	return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
260 		(R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG);
261 }
262 
263 /*
264  * Wait for the card to finish the busy state
265  */
266 static int mmc_test_wait_busy(struct mmc_test_card *test)
267 {
268 	int ret, busy;
269 	struct mmc_command cmd = {};
270 
271 	busy = 0;
272 	do {
273 		memset(&cmd, 0, sizeof(struct mmc_command));
274 
275 		cmd.opcode = MMC_SEND_STATUS;
276 		cmd.arg = test->card->rca << 16;
277 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
278 
279 		ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
280 		if (ret)
281 			break;
282 
283 		if (!busy && mmc_test_busy(&cmd)) {
284 			busy = 1;
285 			if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
286 				pr_info("%s: Warning: Host did not wait for busy state to end.\n",
287 					mmc_hostname(test->card->host));
288 		}
289 	} while (mmc_test_busy(&cmd));
290 
291 	return ret;
292 }
293 
294 /*
295  * Transfer a single sector of kernel addressable data
296  */
297 static int mmc_test_buffer_transfer(struct mmc_test_card *test,
298 	u8 *buffer, unsigned addr, unsigned blksz, int write)
299 {
300 	struct mmc_request mrq = {};
301 	struct mmc_command cmd = {};
302 	struct mmc_command stop = {};
303 	struct mmc_data data = {};
304 
305 	struct scatterlist sg;
306 
307 	mrq.cmd = &cmd;
308 	mrq.data = &data;
309 	mrq.stop = &stop;
310 
311 	sg_init_one(&sg, buffer, blksz);
312 
313 	mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
314 
315 	mmc_wait_for_req(test->card->host, &mrq);
316 
317 	if (cmd.error)
318 		return cmd.error;
319 	if (data.error)
320 		return data.error;
321 
322 	return mmc_test_wait_busy(test);
323 }
324 
325 static void mmc_test_free_mem(struct mmc_test_mem *mem)
326 {
327 	if (!mem)
328 		return;
329 	while (mem->cnt--)
330 		__free_pages(mem->arr[mem->cnt].page,
331 			     mem->arr[mem->cnt].order);
332 	kfree(mem->arr);
333 	kfree(mem);
334 }
335 
336 /*
337  * Allocate a lot of memory, preferably max_sz but at least min_sz.  In case
338  * there isn't much memory do not exceed 1/16th total lowmem pages.  Also do
339  * not exceed a maximum number of segments and try not to make segments much
340  * bigger than maximum segment size.
341  */
342 static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
343 					       unsigned long max_sz,
344 					       unsigned int max_segs,
345 					       unsigned int max_seg_sz)
346 {
347 	unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
348 	unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
349 	unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
350 	unsigned long page_cnt = 0;
351 	unsigned long limit = nr_free_buffer_pages() >> 4;
352 	struct mmc_test_mem *mem;
353 
354 	if (max_page_cnt > limit)
355 		max_page_cnt = limit;
356 	if (min_page_cnt > max_page_cnt)
357 		min_page_cnt = max_page_cnt;
358 
359 	if (max_seg_page_cnt > max_page_cnt)
360 		max_seg_page_cnt = max_page_cnt;
361 
362 	if (max_segs > max_page_cnt)
363 		max_segs = max_page_cnt;
364 
365 	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
366 	if (!mem)
367 		return NULL;
368 
369 	mem->arr = kcalloc(max_segs, sizeof(*mem->arr), GFP_KERNEL);
370 	if (!mem->arr)
371 		goto out_free;
372 
373 	while (max_page_cnt) {
374 		struct page *page;
375 		unsigned int order;
376 		gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
377 				__GFP_NORETRY;
378 
379 		order = get_order(max_seg_page_cnt << PAGE_SHIFT);
380 		while (1) {
381 			page = alloc_pages(flags, order);
382 			if (page || !order)
383 				break;
384 			order -= 1;
385 		}
386 		if (!page) {
387 			if (page_cnt < min_page_cnt)
388 				goto out_free;
389 			break;
390 		}
391 		mem->arr[mem->cnt].page = page;
392 		mem->arr[mem->cnt].order = order;
393 		mem->cnt += 1;
394 		if (max_page_cnt <= (1UL << order))
395 			break;
396 		max_page_cnt -= 1UL << order;
397 		page_cnt += 1UL << order;
398 		if (mem->cnt >= max_segs) {
399 			if (page_cnt < min_page_cnt)
400 				goto out_free;
401 			break;
402 		}
403 	}
404 
405 	return mem;
406 
407 out_free:
408 	mmc_test_free_mem(mem);
409 	return NULL;
410 }
411 
412 /*
413  * Map memory into a scatterlist.  Optionally allow the same memory to be
414  * mapped more than once.
415  */
416 static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size,
417 			   struct scatterlist *sglist, int repeat,
418 			   unsigned int max_segs, unsigned int max_seg_sz,
419 			   unsigned int *sg_len, int min_sg_len)
420 {
421 	struct scatterlist *sg = NULL;
422 	unsigned int i;
423 	unsigned long sz = size;
424 
425 	sg_init_table(sglist, max_segs);
426 	if (min_sg_len > max_segs)
427 		min_sg_len = max_segs;
428 
429 	*sg_len = 0;
430 	do {
431 		for (i = 0; i < mem->cnt; i++) {
432 			unsigned long len = PAGE_SIZE << mem->arr[i].order;
433 
434 			if (min_sg_len && (size / min_sg_len < len))
435 				len = ALIGN(size / min_sg_len, 512);
436 			if (len > sz)
437 				len = sz;
438 			if (len > max_seg_sz)
439 				len = max_seg_sz;
440 			if (sg)
441 				sg = sg_next(sg);
442 			else
443 				sg = sglist;
444 			if (!sg)
445 				return -EINVAL;
446 			sg_set_page(sg, mem->arr[i].page, len, 0);
447 			sz -= len;
448 			*sg_len += 1;
449 			if (!sz)
450 				break;
451 		}
452 	} while (sz && repeat);
453 
454 	if (sz)
455 		return -EINVAL;
456 
457 	if (sg)
458 		sg_mark_end(sg);
459 
460 	return 0;
461 }
462 
463 /*
464  * Map memory into a scatterlist so that no pages are contiguous.  Allow the
465  * same memory to be mapped more than once.
466  */
467 static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
468 				       unsigned long sz,
469 				       struct scatterlist *sglist,
470 				       unsigned int max_segs,
471 				       unsigned int max_seg_sz,
472 				       unsigned int *sg_len)
473 {
474 	struct scatterlist *sg = NULL;
475 	unsigned int i = mem->cnt, cnt;
476 	unsigned long len;
477 	void *base, *addr, *last_addr = NULL;
478 
479 	sg_init_table(sglist, max_segs);
480 
481 	*sg_len = 0;
482 	while (sz) {
483 		base = page_address(mem->arr[--i].page);
484 		cnt = 1 << mem->arr[i].order;
485 		while (sz && cnt) {
486 			addr = base + PAGE_SIZE * --cnt;
487 			if (last_addr && last_addr + PAGE_SIZE == addr)
488 				continue;
489 			last_addr = addr;
490 			len = PAGE_SIZE;
491 			if (len > max_seg_sz)
492 				len = max_seg_sz;
493 			if (len > sz)
494 				len = sz;
495 			if (sg)
496 				sg = sg_next(sg);
497 			else
498 				sg = sglist;
499 			if (!sg)
500 				return -EINVAL;
501 			sg_set_page(sg, virt_to_page(addr), len, 0);
502 			sz -= len;
503 			*sg_len += 1;
504 		}
505 		if (i == 0)
506 			i = mem->cnt;
507 	}
508 
509 	if (sg)
510 		sg_mark_end(sg);
511 
512 	return 0;
513 }
514 
515 /*
516  * Calculate transfer rate in bytes per second.
517  */
518 static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
519 {
520 	uint64_t ns;
521 
522 	ns = ts->tv_sec;
523 	ns *= 1000000000;
524 	ns += ts->tv_nsec;
525 
526 	bytes *= 1000000000;
527 
528 	while (ns > UINT_MAX) {
529 		bytes >>= 1;
530 		ns >>= 1;
531 	}
532 
533 	if (!ns)
534 		return 0;
535 
536 	do_div(bytes, (uint32_t)ns);
537 
538 	return bytes;
539 }
540 
541 /*
542  * Save transfer results for future usage
543  */
544 static void mmc_test_save_transfer_result(struct mmc_test_card *test,
545 	unsigned int count, unsigned int sectors, struct timespec ts,
546 	unsigned int rate, unsigned int iops)
547 {
548 	struct mmc_test_transfer_result *tr;
549 
550 	if (!test->gr)
551 		return;
552 
553 	tr = kmalloc(sizeof(*tr), GFP_KERNEL);
554 	if (!tr)
555 		return;
556 
557 	tr->count = count;
558 	tr->sectors = sectors;
559 	tr->ts = ts;
560 	tr->rate = rate;
561 	tr->iops = iops;
562 
563 	list_add_tail(&tr->link, &test->gr->tr_lst);
564 }
565 
566 /*
567  * Print the transfer rate.
568  */
569 static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
570 				struct timespec *ts1, struct timespec *ts2)
571 {
572 	unsigned int rate, iops, sectors = bytes >> 9;
573 	struct timespec ts;
574 
575 	ts = timespec_sub(*ts2, *ts1);
576 
577 	rate = mmc_test_rate(bytes, &ts);
578 	iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
579 
580 	pr_info("%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
581 			 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
582 			 mmc_hostname(test->card->host), sectors, sectors >> 1,
583 			 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
584 			 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024,
585 			 iops / 100, iops % 100);
586 
587 	mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
588 }
589 
590 /*
591  * Print the average transfer rate.
592  */
593 static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
594 				    unsigned int count, struct timespec *ts1,
595 				    struct timespec *ts2)
596 {
597 	unsigned int rate, iops, sectors = bytes >> 9;
598 	uint64_t tot = bytes * count;
599 	struct timespec ts;
600 
601 	ts = timespec_sub(*ts2, *ts1);
602 
603 	rate = mmc_test_rate(tot, &ts);
604 	iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
605 
606 	pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
607 			 "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
608 			 "%u.%02u IOPS, sg_len %d)\n",
609 			 mmc_hostname(test->card->host), count, sectors, count,
610 			 sectors >> 1, (sectors & 1 ? ".5" : ""),
611 			 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
612 			 rate / 1000, rate / 1024, iops / 100, iops % 100,
613 			 test->area.sg_len);
614 
615 	mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
616 }
617 
618 /*
619  * Return the card size in sectors.
620  */
621 static unsigned int mmc_test_capacity(struct mmc_card *card)
622 {
623 	if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
624 		return card->ext_csd.sectors;
625 	else
626 		return card->csd.capacity << (card->csd.read_blkbits - 9);
627 }
628 
629 /*******************************************************************/
630 /*  Test preparation and cleanup                                   */
631 /*******************************************************************/
632 
633 /*
634  * Fill the first couple of sectors of the card with known data
635  * so that bad reads/writes can be detected
636  */
637 static int __mmc_test_prepare(struct mmc_test_card *test, int write)
638 {
639 	int ret, i;
640 
641 	ret = mmc_test_set_blksize(test, 512);
642 	if (ret)
643 		return ret;
644 
645 	if (write)
646 		memset(test->buffer, 0xDF, 512);
647 	else {
648 		for (i = 0; i < 512; i++)
649 			test->buffer[i] = i;
650 	}
651 
652 	for (i = 0; i < BUFFER_SIZE / 512; i++) {
653 		ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
654 		if (ret)
655 			return ret;
656 	}
657 
658 	return 0;
659 }
660 
661 static int mmc_test_prepare_write(struct mmc_test_card *test)
662 {
663 	return __mmc_test_prepare(test, 1);
664 }
665 
666 static int mmc_test_prepare_read(struct mmc_test_card *test)
667 {
668 	return __mmc_test_prepare(test, 0);
669 }
670 
671 static int mmc_test_cleanup(struct mmc_test_card *test)
672 {
673 	int ret, i;
674 
675 	ret = mmc_test_set_blksize(test, 512);
676 	if (ret)
677 		return ret;
678 
679 	memset(test->buffer, 0, 512);
680 
681 	for (i = 0; i < BUFFER_SIZE / 512; i++) {
682 		ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
683 		if (ret)
684 			return ret;
685 	}
686 
687 	return 0;
688 }
689 
690 /*******************************************************************/
691 /*  Test execution helpers                                         */
692 /*******************************************************************/
693 
694 /*
695  * Modifies the mmc_request to perform the "short transfer" tests
696  */
697 static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
698 	struct mmc_request *mrq, int write)
699 {
700 	if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
701 		return;
702 
703 	if (mrq->data->blocks > 1) {
704 		mrq->cmd->opcode = write ?
705 			MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
706 		mrq->stop = NULL;
707 	} else {
708 		mrq->cmd->opcode = MMC_SEND_STATUS;
709 		mrq->cmd->arg = test->card->rca << 16;
710 	}
711 }
712 
713 /*
714  * Checks that a normal transfer didn't have any errors
715  */
716 static int mmc_test_check_result(struct mmc_test_card *test,
717 				 struct mmc_request *mrq)
718 {
719 	int ret;
720 
721 	if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
722 		return -EINVAL;
723 
724 	ret = 0;
725 
726 	if (mrq->sbc && mrq->sbc->error)
727 		ret = mrq->sbc->error;
728 	if (!ret && mrq->cmd->error)
729 		ret = mrq->cmd->error;
730 	if (!ret && mrq->data->error)
731 		ret = mrq->data->error;
732 	if (!ret && mrq->stop && mrq->stop->error)
733 		ret = mrq->stop->error;
734 	if (!ret && mrq->data->bytes_xfered !=
735 		mrq->data->blocks * mrq->data->blksz)
736 		ret = RESULT_FAIL;
737 
738 	if (ret == -EINVAL)
739 		ret = RESULT_UNSUP_HOST;
740 
741 	return ret;
742 }
743 
744 static enum mmc_blk_status mmc_test_check_result_async(struct mmc_card *card,
745 				       struct mmc_async_req *areq)
746 {
747 	struct mmc_test_async_req *test_async =
748 		container_of(areq, struct mmc_test_async_req, areq);
749 	int ret;
750 
751 	mmc_test_wait_busy(test_async->test);
752 
753 	/*
754 	 * FIXME: this would earlier just casts a regular error code,
755 	 * either of the kernel type -ERRORCODE or the local test framework
756 	 * RESULT_* errorcode, into an enum mmc_blk_status and return as
757 	 * result check. Instead, convert it to some reasonable type by just
758 	 * returning either MMC_BLK_SUCCESS or MMC_BLK_CMD_ERR.
759 	 * If possible, a reasonable error code should be returned.
760 	 */
761 	ret = mmc_test_check_result(test_async->test, areq->mrq);
762 	if (ret)
763 		return MMC_BLK_CMD_ERR;
764 
765 	return MMC_BLK_SUCCESS;
766 }
767 
768 /*
769  * Checks that a "short transfer" behaved as expected
770  */
771 static int mmc_test_check_broken_result(struct mmc_test_card *test,
772 	struct mmc_request *mrq)
773 {
774 	int ret;
775 
776 	if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
777 		return -EINVAL;
778 
779 	ret = 0;
780 
781 	if (!ret && mrq->cmd->error)
782 		ret = mrq->cmd->error;
783 	if (!ret && mrq->data->error == 0)
784 		ret = RESULT_FAIL;
785 	if (!ret && mrq->data->error != -ETIMEDOUT)
786 		ret = mrq->data->error;
787 	if (!ret && mrq->stop && mrq->stop->error)
788 		ret = mrq->stop->error;
789 	if (mrq->data->blocks > 1) {
790 		if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
791 			ret = RESULT_FAIL;
792 	} else {
793 		if (!ret && mrq->data->bytes_xfered > 0)
794 			ret = RESULT_FAIL;
795 	}
796 
797 	if (ret == -EINVAL)
798 		ret = RESULT_UNSUP_HOST;
799 
800 	return ret;
801 }
802 
803 struct mmc_test_req {
804 	struct mmc_request mrq;
805 	struct mmc_command sbc;
806 	struct mmc_command cmd;
807 	struct mmc_command stop;
808 	struct mmc_command status;
809 	struct mmc_data data;
810 };
811 
812 /*
813  * Tests nonblock transfer with certain parameters
814  */
815 static void mmc_test_req_reset(struct mmc_test_req *rq)
816 {
817 	memset(rq, 0, sizeof(struct mmc_test_req));
818 
819 	rq->mrq.cmd = &rq->cmd;
820 	rq->mrq.data = &rq->data;
821 	rq->mrq.stop = &rq->stop;
822 }
823 
824 static struct mmc_test_req *mmc_test_req_alloc(void)
825 {
826 	struct mmc_test_req *rq = kmalloc(sizeof(*rq), GFP_KERNEL);
827 
828 	if (rq)
829 		mmc_test_req_reset(rq);
830 
831 	return rq;
832 }
833 
834 
835 static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
836 				      struct scatterlist *sg, unsigned sg_len,
837 				      unsigned dev_addr, unsigned blocks,
838 				      unsigned blksz, int write, int count)
839 {
840 	struct mmc_test_req *rq1, *rq2;
841 	struct mmc_test_async_req test_areq[2];
842 	struct mmc_async_req *done_areq;
843 	struct mmc_async_req *cur_areq = &test_areq[0].areq;
844 	struct mmc_async_req *other_areq = &test_areq[1].areq;
845 	enum mmc_blk_status status;
846 	int i;
847 	int ret = RESULT_OK;
848 
849 	test_areq[0].test = test;
850 	test_areq[1].test = test;
851 
852 	rq1 = mmc_test_req_alloc();
853 	rq2 = mmc_test_req_alloc();
854 	if (!rq1 || !rq2) {
855 		ret = RESULT_FAIL;
856 		goto err;
857 	}
858 
859 	cur_areq->mrq = &rq1->mrq;
860 	cur_areq->err_check = mmc_test_check_result_async;
861 	other_areq->mrq = &rq2->mrq;
862 	other_areq->err_check = mmc_test_check_result_async;
863 
864 	for (i = 0; i < count; i++) {
865 		mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr,
866 				     blocks, blksz, write);
867 		done_areq = mmc_start_areq(test->card->host, cur_areq, &status);
868 
869 		if (status != MMC_BLK_SUCCESS || (!done_areq && i > 0)) {
870 			ret = RESULT_FAIL;
871 			goto err;
872 		}
873 
874 		if (done_areq)
875 			mmc_test_req_reset(container_of(done_areq->mrq,
876 						struct mmc_test_req, mrq));
877 
878 		swap(cur_areq, other_areq);
879 		dev_addr += blocks;
880 	}
881 
882 	done_areq = mmc_start_areq(test->card->host, NULL, &status);
883 	if (status != MMC_BLK_SUCCESS)
884 		ret = RESULT_FAIL;
885 
886 err:
887 	kfree(rq1);
888 	kfree(rq2);
889 	return ret;
890 }
891 
892 /*
893  * Tests a basic transfer with certain parameters
894  */
895 static int mmc_test_simple_transfer(struct mmc_test_card *test,
896 	struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
897 	unsigned blocks, unsigned blksz, int write)
898 {
899 	struct mmc_request mrq = {};
900 	struct mmc_command cmd = {};
901 	struct mmc_command stop = {};
902 	struct mmc_data data = {};
903 
904 	mrq.cmd = &cmd;
905 	mrq.data = &data;
906 	mrq.stop = &stop;
907 
908 	mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
909 		blocks, blksz, write);
910 
911 	mmc_wait_for_req(test->card->host, &mrq);
912 
913 	mmc_test_wait_busy(test);
914 
915 	return mmc_test_check_result(test, &mrq);
916 }
917 
918 /*
919  * Tests a transfer where the card will fail completely or partly
920  */
921 static int mmc_test_broken_transfer(struct mmc_test_card *test,
922 	unsigned blocks, unsigned blksz, int write)
923 {
924 	struct mmc_request mrq = {};
925 	struct mmc_command cmd = {};
926 	struct mmc_command stop = {};
927 	struct mmc_data data = {};
928 
929 	struct scatterlist sg;
930 
931 	mrq.cmd = &cmd;
932 	mrq.data = &data;
933 	mrq.stop = &stop;
934 
935 	sg_init_one(&sg, test->buffer, blocks * blksz);
936 
937 	mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
938 	mmc_test_prepare_broken_mrq(test, &mrq, write);
939 
940 	mmc_wait_for_req(test->card->host, &mrq);
941 
942 	mmc_test_wait_busy(test);
943 
944 	return mmc_test_check_broken_result(test, &mrq);
945 }
946 
947 /*
948  * Does a complete transfer test where data is also validated
949  *
950  * Note: mmc_test_prepare() must have been done before this call
951  */
952 static int mmc_test_transfer(struct mmc_test_card *test,
953 	struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
954 	unsigned blocks, unsigned blksz, int write)
955 {
956 	int ret, i;
957 	unsigned long flags;
958 
959 	if (write) {
960 		for (i = 0; i < blocks * blksz; i++)
961 			test->scratch[i] = i;
962 	} else {
963 		memset(test->scratch, 0, BUFFER_SIZE);
964 	}
965 	local_irq_save(flags);
966 	sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
967 	local_irq_restore(flags);
968 
969 	ret = mmc_test_set_blksize(test, blksz);
970 	if (ret)
971 		return ret;
972 
973 	ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
974 		blocks, blksz, write);
975 	if (ret)
976 		return ret;
977 
978 	if (write) {
979 		int sectors;
980 
981 		ret = mmc_test_set_blksize(test, 512);
982 		if (ret)
983 			return ret;
984 
985 		sectors = (blocks * blksz + 511) / 512;
986 		if ((sectors * 512) == (blocks * blksz))
987 			sectors++;
988 
989 		if ((sectors * 512) > BUFFER_SIZE)
990 			return -EINVAL;
991 
992 		memset(test->buffer, 0, sectors * 512);
993 
994 		for (i = 0; i < sectors; i++) {
995 			ret = mmc_test_buffer_transfer(test,
996 				test->buffer + i * 512,
997 				dev_addr + i, 512, 0);
998 			if (ret)
999 				return ret;
1000 		}
1001 
1002 		for (i = 0; i < blocks * blksz; i++) {
1003 			if (test->buffer[i] != (u8)i)
1004 				return RESULT_FAIL;
1005 		}
1006 
1007 		for (; i < sectors * 512; i++) {
1008 			if (test->buffer[i] != 0xDF)
1009 				return RESULT_FAIL;
1010 		}
1011 	} else {
1012 		local_irq_save(flags);
1013 		sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
1014 		local_irq_restore(flags);
1015 		for (i = 0; i < blocks * blksz; i++) {
1016 			if (test->scratch[i] != (u8)i)
1017 				return RESULT_FAIL;
1018 		}
1019 	}
1020 
1021 	return 0;
1022 }
1023 
1024 /*******************************************************************/
1025 /*  Tests                                                          */
1026 /*******************************************************************/
1027 
1028 struct mmc_test_case {
1029 	const char *name;
1030 
1031 	int (*prepare)(struct mmc_test_card *);
1032 	int (*run)(struct mmc_test_card *);
1033 	int (*cleanup)(struct mmc_test_card *);
1034 };
1035 
1036 static int mmc_test_basic_write(struct mmc_test_card *test)
1037 {
1038 	int ret;
1039 	struct scatterlist sg;
1040 
1041 	ret = mmc_test_set_blksize(test, 512);
1042 	if (ret)
1043 		return ret;
1044 
1045 	sg_init_one(&sg, test->buffer, 512);
1046 
1047 	return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
1048 }
1049 
1050 static int mmc_test_basic_read(struct mmc_test_card *test)
1051 {
1052 	int ret;
1053 	struct scatterlist sg;
1054 
1055 	ret = mmc_test_set_blksize(test, 512);
1056 	if (ret)
1057 		return ret;
1058 
1059 	sg_init_one(&sg, test->buffer, 512);
1060 
1061 	return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
1062 }
1063 
1064 static int mmc_test_verify_write(struct mmc_test_card *test)
1065 {
1066 	struct scatterlist sg;
1067 
1068 	sg_init_one(&sg, test->buffer, 512);
1069 
1070 	return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1071 }
1072 
1073 static int mmc_test_verify_read(struct mmc_test_card *test)
1074 {
1075 	struct scatterlist sg;
1076 
1077 	sg_init_one(&sg, test->buffer, 512);
1078 
1079 	return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1080 }
1081 
1082 static int mmc_test_multi_write(struct mmc_test_card *test)
1083 {
1084 	unsigned int size;
1085 	struct scatterlist sg;
1086 
1087 	if (test->card->host->max_blk_count == 1)
1088 		return RESULT_UNSUP_HOST;
1089 
1090 	size = PAGE_SIZE * 2;
1091 	size = min(size, test->card->host->max_req_size);
1092 	size = min(size, test->card->host->max_seg_size);
1093 	size = min(size, test->card->host->max_blk_count * 512);
1094 
1095 	if (size < 1024)
1096 		return RESULT_UNSUP_HOST;
1097 
1098 	sg_init_one(&sg, test->buffer, size);
1099 
1100 	return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1);
1101 }
1102 
1103 static int mmc_test_multi_read(struct mmc_test_card *test)
1104 {
1105 	unsigned int size;
1106 	struct scatterlist sg;
1107 
1108 	if (test->card->host->max_blk_count == 1)
1109 		return RESULT_UNSUP_HOST;
1110 
1111 	size = PAGE_SIZE * 2;
1112 	size = min(size, test->card->host->max_req_size);
1113 	size = min(size, test->card->host->max_seg_size);
1114 	size = min(size, test->card->host->max_blk_count * 512);
1115 
1116 	if (size < 1024)
1117 		return RESULT_UNSUP_HOST;
1118 
1119 	sg_init_one(&sg, test->buffer, size);
1120 
1121 	return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0);
1122 }
1123 
1124 static int mmc_test_pow2_write(struct mmc_test_card *test)
1125 {
1126 	int ret, i;
1127 	struct scatterlist sg;
1128 
1129 	if (!test->card->csd.write_partial)
1130 		return RESULT_UNSUP_CARD;
1131 
1132 	for (i = 1; i < 512; i <<= 1) {
1133 		sg_init_one(&sg, test->buffer, i);
1134 		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1135 		if (ret)
1136 			return ret;
1137 	}
1138 
1139 	return 0;
1140 }
1141 
1142 static int mmc_test_pow2_read(struct mmc_test_card *test)
1143 {
1144 	int ret, i;
1145 	struct scatterlist sg;
1146 
1147 	if (!test->card->csd.read_partial)
1148 		return RESULT_UNSUP_CARD;
1149 
1150 	for (i = 1; i < 512; i <<= 1) {
1151 		sg_init_one(&sg, test->buffer, i);
1152 		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1153 		if (ret)
1154 			return ret;
1155 	}
1156 
1157 	return 0;
1158 }
1159 
1160 static int mmc_test_weird_write(struct mmc_test_card *test)
1161 {
1162 	int ret, i;
1163 	struct scatterlist sg;
1164 
1165 	if (!test->card->csd.write_partial)
1166 		return RESULT_UNSUP_CARD;
1167 
1168 	for (i = 3; i < 512; i += 7) {
1169 		sg_init_one(&sg, test->buffer, i);
1170 		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1171 		if (ret)
1172 			return ret;
1173 	}
1174 
1175 	return 0;
1176 }
1177 
1178 static int mmc_test_weird_read(struct mmc_test_card *test)
1179 {
1180 	int ret, i;
1181 	struct scatterlist sg;
1182 
1183 	if (!test->card->csd.read_partial)
1184 		return RESULT_UNSUP_CARD;
1185 
1186 	for (i = 3; i < 512; i += 7) {
1187 		sg_init_one(&sg, test->buffer, i);
1188 		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1189 		if (ret)
1190 			return ret;
1191 	}
1192 
1193 	return 0;
1194 }
1195 
1196 static int mmc_test_align_write(struct mmc_test_card *test)
1197 {
1198 	int ret, i;
1199 	struct scatterlist sg;
1200 
1201 	for (i = 1; i < TEST_ALIGN_END; i++) {
1202 		sg_init_one(&sg, test->buffer + i, 512);
1203 		ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1204 		if (ret)
1205 			return ret;
1206 	}
1207 
1208 	return 0;
1209 }
1210 
1211 static int mmc_test_align_read(struct mmc_test_card *test)
1212 {
1213 	int ret, i;
1214 	struct scatterlist sg;
1215 
1216 	for (i = 1; i < TEST_ALIGN_END; i++) {
1217 		sg_init_one(&sg, test->buffer + i, 512);
1218 		ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1219 		if (ret)
1220 			return ret;
1221 	}
1222 
1223 	return 0;
1224 }
1225 
1226 static int mmc_test_align_multi_write(struct mmc_test_card *test)
1227 {
1228 	int ret, i;
1229 	unsigned int size;
1230 	struct scatterlist sg;
1231 
1232 	if (test->card->host->max_blk_count == 1)
1233 		return RESULT_UNSUP_HOST;
1234 
1235 	size = PAGE_SIZE * 2;
1236 	size = min(size, test->card->host->max_req_size);
1237 	size = min(size, test->card->host->max_seg_size);
1238 	size = min(size, test->card->host->max_blk_count * 512);
1239 
1240 	if (size < 1024)
1241 		return RESULT_UNSUP_HOST;
1242 
1243 	for (i = 1; i < TEST_ALIGN_END; i++) {
1244 		sg_init_one(&sg, test->buffer + i, size);
1245 		ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1);
1246 		if (ret)
1247 			return ret;
1248 	}
1249 
1250 	return 0;
1251 }
1252 
1253 static int mmc_test_align_multi_read(struct mmc_test_card *test)
1254 {
1255 	int ret, i;
1256 	unsigned int size;
1257 	struct scatterlist sg;
1258 
1259 	if (test->card->host->max_blk_count == 1)
1260 		return RESULT_UNSUP_HOST;
1261 
1262 	size = PAGE_SIZE * 2;
1263 	size = min(size, test->card->host->max_req_size);
1264 	size = min(size, test->card->host->max_seg_size);
1265 	size = min(size, test->card->host->max_blk_count * 512);
1266 
1267 	if (size < 1024)
1268 		return RESULT_UNSUP_HOST;
1269 
1270 	for (i = 1; i < TEST_ALIGN_END; i++) {
1271 		sg_init_one(&sg, test->buffer + i, size);
1272 		ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0);
1273 		if (ret)
1274 			return ret;
1275 	}
1276 
1277 	return 0;
1278 }
1279 
1280 static int mmc_test_xfersize_write(struct mmc_test_card *test)
1281 {
1282 	int ret;
1283 
1284 	ret = mmc_test_set_blksize(test, 512);
1285 	if (ret)
1286 		return ret;
1287 
1288 	return mmc_test_broken_transfer(test, 1, 512, 1);
1289 }
1290 
1291 static int mmc_test_xfersize_read(struct mmc_test_card *test)
1292 {
1293 	int ret;
1294 
1295 	ret = mmc_test_set_blksize(test, 512);
1296 	if (ret)
1297 		return ret;
1298 
1299 	return mmc_test_broken_transfer(test, 1, 512, 0);
1300 }
1301 
1302 static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
1303 {
1304 	int ret;
1305 
1306 	if (test->card->host->max_blk_count == 1)
1307 		return RESULT_UNSUP_HOST;
1308 
1309 	ret = mmc_test_set_blksize(test, 512);
1310 	if (ret)
1311 		return ret;
1312 
1313 	return mmc_test_broken_transfer(test, 2, 512, 1);
1314 }
1315 
1316 static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
1317 {
1318 	int ret;
1319 
1320 	if (test->card->host->max_blk_count == 1)
1321 		return RESULT_UNSUP_HOST;
1322 
1323 	ret = mmc_test_set_blksize(test, 512);
1324 	if (ret)
1325 		return ret;
1326 
1327 	return mmc_test_broken_transfer(test, 2, 512, 0);
1328 }
1329 
1330 #ifdef CONFIG_HIGHMEM
1331 
1332 static int mmc_test_write_high(struct mmc_test_card *test)
1333 {
1334 	struct scatterlist sg;
1335 
1336 	sg_init_table(&sg, 1);
1337 	sg_set_page(&sg, test->highmem, 512, 0);
1338 
1339 	return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1340 }
1341 
1342 static int mmc_test_read_high(struct mmc_test_card *test)
1343 {
1344 	struct scatterlist sg;
1345 
1346 	sg_init_table(&sg, 1);
1347 	sg_set_page(&sg, test->highmem, 512, 0);
1348 
1349 	return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1350 }
1351 
1352 static int mmc_test_multi_write_high(struct mmc_test_card *test)
1353 {
1354 	unsigned int size;
1355 	struct scatterlist sg;
1356 
1357 	if (test->card->host->max_blk_count == 1)
1358 		return RESULT_UNSUP_HOST;
1359 
1360 	size = PAGE_SIZE * 2;
1361 	size = min(size, test->card->host->max_req_size);
1362 	size = min(size, test->card->host->max_seg_size);
1363 	size = min(size, test->card->host->max_blk_count * 512);
1364 
1365 	if (size < 1024)
1366 		return RESULT_UNSUP_HOST;
1367 
1368 	sg_init_table(&sg, 1);
1369 	sg_set_page(&sg, test->highmem, size, 0);
1370 
1371 	return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1);
1372 }
1373 
1374 static int mmc_test_multi_read_high(struct mmc_test_card *test)
1375 {
1376 	unsigned int size;
1377 	struct scatterlist sg;
1378 
1379 	if (test->card->host->max_blk_count == 1)
1380 		return RESULT_UNSUP_HOST;
1381 
1382 	size = PAGE_SIZE * 2;
1383 	size = min(size, test->card->host->max_req_size);
1384 	size = min(size, test->card->host->max_seg_size);
1385 	size = min(size, test->card->host->max_blk_count * 512);
1386 
1387 	if (size < 1024)
1388 		return RESULT_UNSUP_HOST;
1389 
1390 	sg_init_table(&sg, 1);
1391 	sg_set_page(&sg, test->highmem, size, 0);
1392 
1393 	return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0);
1394 }
1395 
1396 #else
1397 
1398 static int mmc_test_no_highmem(struct mmc_test_card *test)
1399 {
1400 	pr_info("%s: Highmem not configured - test skipped\n",
1401 	       mmc_hostname(test->card->host));
1402 	return 0;
1403 }
1404 
1405 #endif /* CONFIG_HIGHMEM */
1406 
1407 /*
1408  * Map sz bytes so that it can be transferred.
1409  */
1410 static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
1411 			     int max_scatter, int min_sg_len)
1412 {
1413 	struct mmc_test_area *t = &test->area;
1414 	int err;
1415 
1416 	t->blocks = sz >> 9;
1417 
1418 	if (max_scatter) {
1419 		err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
1420 						  t->max_segs, t->max_seg_sz,
1421 				       &t->sg_len);
1422 	} else {
1423 		err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1424 				      t->max_seg_sz, &t->sg_len, min_sg_len);
1425 	}
1426 	if (err)
1427 		pr_info("%s: Failed to map sg list\n",
1428 		       mmc_hostname(test->card->host));
1429 	return err;
1430 }
1431 
1432 /*
1433  * Transfer bytes mapped by mmc_test_area_map().
1434  */
1435 static int mmc_test_area_transfer(struct mmc_test_card *test,
1436 				  unsigned int dev_addr, int write)
1437 {
1438 	struct mmc_test_area *t = &test->area;
1439 
1440 	return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
1441 					t->blocks, 512, write);
1442 }
1443 
1444 /*
1445  * Map and transfer bytes for multiple transfers.
1446  */
1447 static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
1448 				unsigned int dev_addr, int write,
1449 				int max_scatter, int timed, int count,
1450 				bool nonblock, int min_sg_len)
1451 {
1452 	struct timespec ts1, ts2;
1453 	int ret = 0;
1454 	int i;
1455 	struct mmc_test_area *t = &test->area;
1456 
1457 	/*
1458 	 * In the case of a maximally scattered transfer, the maximum transfer
1459 	 * size is further limited by using PAGE_SIZE segments.
1460 	 */
1461 	if (max_scatter) {
1462 		struct mmc_test_area *t = &test->area;
1463 		unsigned long max_tfr;
1464 
1465 		if (t->max_seg_sz >= PAGE_SIZE)
1466 			max_tfr = t->max_segs * PAGE_SIZE;
1467 		else
1468 			max_tfr = t->max_segs * t->max_seg_sz;
1469 		if (sz > max_tfr)
1470 			sz = max_tfr;
1471 	}
1472 
1473 	ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len);
1474 	if (ret)
1475 		return ret;
1476 
1477 	if (timed)
1478 		getnstimeofday(&ts1);
1479 	if (nonblock)
1480 		ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len,
1481 				 dev_addr, t->blocks, 512, write, count);
1482 	else
1483 		for (i = 0; i < count && ret == 0; i++) {
1484 			ret = mmc_test_area_transfer(test, dev_addr, write);
1485 			dev_addr += sz >> 9;
1486 		}
1487 
1488 	if (ret)
1489 		return ret;
1490 
1491 	if (timed)
1492 		getnstimeofday(&ts2);
1493 
1494 	if (timed)
1495 		mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
1496 
1497 	return 0;
1498 }
1499 
1500 static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1501 			    unsigned int dev_addr, int write, int max_scatter,
1502 			    int timed)
1503 {
1504 	return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter,
1505 				    timed, 1, false, 0);
1506 }
1507 
1508 /*
1509  * Write the test area entirely.
1510  */
1511 static int mmc_test_area_fill(struct mmc_test_card *test)
1512 {
1513 	struct mmc_test_area *t = &test->area;
1514 
1515 	return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0);
1516 }
1517 
1518 /*
1519  * Erase the test area entirely.
1520  */
1521 static int mmc_test_area_erase(struct mmc_test_card *test)
1522 {
1523 	struct mmc_test_area *t = &test->area;
1524 
1525 	if (!mmc_can_erase(test->card))
1526 		return 0;
1527 
1528 	return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
1529 			 MMC_ERASE_ARG);
1530 }
1531 
1532 /*
1533  * Cleanup struct mmc_test_area.
1534  */
1535 static int mmc_test_area_cleanup(struct mmc_test_card *test)
1536 {
1537 	struct mmc_test_area *t = &test->area;
1538 
1539 	kfree(t->sg);
1540 	mmc_test_free_mem(t->mem);
1541 
1542 	return 0;
1543 }
1544 
1545 /*
1546  * Initialize an area for testing large transfers.  The test area is set to the
1547  * middle of the card because cards may have different characteristics at the
1548  * front (for FAT file system optimization).  Optionally, the area is erased
1549  * (if the card supports it) which may improve write performance.  Optionally,
1550  * the area is filled with data for subsequent read tests.
1551  */
1552 static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1553 {
1554 	struct mmc_test_area *t = &test->area;
1555 	unsigned long min_sz = 64 * 1024, sz;
1556 	int ret;
1557 
1558 	ret = mmc_test_set_blksize(test, 512);
1559 	if (ret)
1560 		return ret;
1561 
1562 	/* Make the test area size about 4MiB */
1563 	sz = (unsigned long)test->card->pref_erase << 9;
1564 	t->max_sz = sz;
1565 	while (t->max_sz < 4 * 1024 * 1024)
1566 		t->max_sz += sz;
1567 	while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
1568 		t->max_sz -= sz;
1569 
1570 	t->max_segs = test->card->host->max_segs;
1571 	t->max_seg_sz = test->card->host->max_seg_size;
1572 	t->max_seg_sz -= t->max_seg_sz % 512;
1573 
1574 	t->max_tfr = t->max_sz;
1575 	if (t->max_tfr >> 9 > test->card->host->max_blk_count)
1576 		t->max_tfr = test->card->host->max_blk_count << 9;
1577 	if (t->max_tfr > test->card->host->max_req_size)
1578 		t->max_tfr = test->card->host->max_req_size;
1579 	if (t->max_tfr / t->max_seg_sz > t->max_segs)
1580 		t->max_tfr = t->max_segs * t->max_seg_sz;
1581 
1582 	/*
1583 	 * Try to allocate enough memory for a max. sized transfer.  Less is OK
1584 	 * because the same memory can be mapped into the scatterlist more than
1585 	 * once.  Also, take into account the limits imposed on scatterlist
1586 	 * segments by the host driver.
1587 	 */
1588 	t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
1589 				    t->max_seg_sz);
1590 	if (!t->mem)
1591 		return -ENOMEM;
1592 
1593 	t->sg = kmalloc_array(t->max_segs, sizeof(*t->sg), GFP_KERNEL);
1594 	if (!t->sg) {
1595 		ret = -ENOMEM;
1596 		goto out_free;
1597 	}
1598 
1599 	t->dev_addr = mmc_test_capacity(test->card) / 2;
1600 	t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
1601 
1602 	if (erase) {
1603 		ret = mmc_test_area_erase(test);
1604 		if (ret)
1605 			goto out_free;
1606 	}
1607 
1608 	if (fill) {
1609 		ret = mmc_test_area_fill(test);
1610 		if (ret)
1611 			goto out_free;
1612 	}
1613 
1614 	return 0;
1615 
1616 out_free:
1617 	mmc_test_area_cleanup(test);
1618 	return ret;
1619 }
1620 
1621 /*
1622  * Prepare for large transfers.  Do not erase the test area.
1623  */
1624 static int mmc_test_area_prepare(struct mmc_test_card *test)
1625 {
1626 	return mmc_test_area_init(test, 0, 0);
1627 }
1628 
1629 /*
1630  * Prepare for large transfers.  Do erase the test area.
1631  */
1632 static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
1633 {
1634 	return mmc_test_area_init(test, 1, 0);
1635 }
1636 
1637 /*
1638  * Prepare for large transfers.  Erase and fill the test area.
1639  */
1640 static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1641 {
1642 	return mmc_test_area_init(test, 1, 1);
1643 }
1644 
1645 /*
1646  * Test best-case performance.  Best-case performance is expected from
1647  * a single large transfer.
1648  *
1649  * An additional option (max_scatter) allows the measurement of the same
1650  * transfer but with no contiguous pages in the scatter list.  This tests
1651  * the efficiency of DMA to handle scattered pages.
1652  */
1653 static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1654 				     int max_scatter)
1655 {
1656 	struct mmc_test_area *t = &test->area;
1657 
1658 	return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write,
1659 				max_scatter, 1);
1660 }
1661 
1662 /*
1663  * Best-case read performance.
1664  */
1665 static int mmc_test_best_read_performance(struct mmc_test_card *test)
1666 {
1667 	return mmc_test_best_performance(test, 0, 0);
1668 }
1669 
1670 /*
1671  * Best-case write performance.
1672  */
1673 static int mmc_test_best_write_performance(struct mmc_test_card *test)
1674 {
1675 	return mmc_test_best_performance(test, 1, 0);
1676 }
1677 
1678 /*
1679  * Best-case read performance into scattered pages.
1680  */
1681 static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
1682 {
1683 	return mmc_test_best_performance(test, 0, 1);
1684 }
1685 
1686 /*
1687  * Best-case write performance from scattered pages.
1688  */
1689 static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
1690 {
1691 	return mmc_test_best_performance(test, 1, 1);
1692 }
1693 
1694 /*
1695  * Single read performance by transfer size.
1696  */
1697 static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1698 {
1699 	struct mmc_test_area *t = &test->area;
1700 	unsigned long sz;
1701 	unsigned int dev_addr;
1702 	int ret;
1703 
1704 	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1705 		dev_addr = t->dev_addr + (sz >> 9);
1706 		ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1707 		if (ret)
1708 			return ret;
1709 	}
1710 	sz = t->max_tfr;
1711 	dev_addr = t->dev_addr;
1712 	return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1713 }
1714 
1715 /*
1716  * Single write performance by transfer size.
1717  */
1718 static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1719 {
1720 	struct mmc_test_area *t = &test->area;
1721 	unsigned long sz;
1722 	unsigned int dev_addr;
1723 	int ret;
1724 
1725 	ret = mmc_test_area_erase(test);
1726 	if (ret)
1727 		return ret;
1728 	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1729 		dev_addr = t->dev_addr + (sz >> 9);
1730 		ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1731 		if (ret)
1732 			return ret;
1733 	}
1734 	ret = mmc_test_area_erase(test);
1735 	if (ret)
1736 		return ret;
1737 	sz = t->max_tfr;
1738 	dev_addr = t->dev_addr;
1739 	return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1740 }
1741 
1742 /*
1743  * Single trim performance by transfer size.
1744  */
1745 static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1746 {
1747 	struct mmc_test_area *t = &test->area;
1748 	unsigned long sz;
1749 	unsigned int dev_addr;
1750 	struct timespec ts1, ts2;
1751 	int ret;
1752 
1753 	if (!mmc_can_trim(test->card))
1754 		return RESULT_UNSUP_CARD;
1755 
1756 	if (!mmc_can_erase(test->card))
1757 		return RESULT_UNSUP_HOST;
1758 
1759 	for (sz = 512; sz < t->max_sz; sz <<= 1) {
1760 		dev_addr = t->dev_addr + (sz >> 9);
1761 		getnstimeofday(&ts1);
1762 		ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1763 		if (ret)
1764 			return ret;
1765 		getnstimeofday(&ts2);
1766 		mmc_test_print_rate(test, sz, &ts1, &ts2);
1767 	}
1768 	dev_addr = t->dev_addr;
1769 	getnstimeofday(&ts1);
1770 	ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1771 	if (ret)
1772 		return ret;
1773 	getnstimeofday(&ts2);
1774 	mmc_test_print_rate(test, sz, &ts1, &ts2);
1775 	return 0;
1776 }
1777 
1778 static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
1779 {
1780 	struct mmc_test_area *t = &test->area;
1781 	unsigned int dev_addr, i, cnt;
1782 	struct timespec ts1, ts2;
1783 	int ret;
1784 
1785 	cnt = t->max_sz / sz;
1786 	dev_addr = t->dev_addr;
1787 	getnstimeofday(&ts1);
1788 	for (i = 0; i < cnt; i++) {
1789 		ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
1790 		if (ret)
1791 			return ret;
1792 		dev_addr += (sz >> 9);
1793 	}
1794 	getnstimeofday(&ts2);
1795 	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1796 	return 0;
1797 }
1798 
1799 /*
1800  * Consecutive read performance by transfer size.
1801  */
1802 static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1803 {
1804 	struct mmc_test_area *t = &test->area;
1805 	unsigned long sz;
1806 	int ret;
1807 
1808 	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1809 		ret = mmc_test_seq_read_perf(test, sz);
1810 		if (ret)
1811 			return ret;
1812 	}
1813 	sz = t->max_tfr;
1814 	return mmc_test_seq_read_perf(test, sz);
1815 }
1816 
1817 static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1818 {
1819 	struct mmc_test_area *t = &test->area;
1820 	unsigned int dev_addr, i, cnt;
1821 	struct timespec ts1, ts2;
1822 	int ret;
1823 
1824 	ret = mmc_test_area_erase(test);
1825 	if (ret)
1826 		return ret;
1827 	cnt = t->max_sz / sz;
1828 	dev_addr = t->dev_addr;
1829 	getnstimeofday(&ts1);
1830 	for (i = 0; i < cnt; i++) {
1831 		ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1832 		if (ret)
1833 			return ret;
1834 		dev_addr += (sz >> 9);
1835 	}
1836 	getnstimeofday(&ts2);
1837 	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1838 	return 0;
1839 }
1840 
1841 /*
1842  * Consecutive write performance by transfer size.
1843  */
1844 static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1845 {
1846 	struct mmc_test_area *t = &test->area;
1847 	unsigned long sz;
1848 	int ret;
1849 
1850 	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1851 		ret = mmc_test_seq_write_perf(test, sz);
1852 		if (ret)
1853 			return ret;
1854 	}
1855 	sz = t->max_tfr;
1856 	return mmc_test_seq_write_perf(test, sz);
1857 }
1858 
1859 /*
1860  * Consecutive trim performance by transfer size.
1861  */
1862 static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1863 {
1864 	struct mmc_test_area *t = &test->area;
1865 	unsigned long sz;
1866 	unsigned int dev_addr, i, cnt;
1867 	struct timespec ts1, ts2;
1868 	int ret;
1869 
1870 	if (!mmc_can_trim(test->card))
1871 		return RESULT_UNSUP_CARD;
1872 
1873 	if (!mmc_can_erase(test->card))
1874 		return RESULT_UNSUP_HOST;
1875 
1876 	for (sz = 512; sz <= t->max_sz; sz <<= 1) {
1877 		ret = mmc_test_area_erase(test);
1878 		if (ret)
1879 			return ret;
1880 		ret = mmc_test_area_fill(test);
1881 		if (ret)
1882 			return ret;
1883 		cnt = t->max_sz / sz;
1884 		dev_addr = t->dev_addr;
1885 		getnstimeofday(&ts1);
1886 		for (i = 0; i < cnt; i++) {
1887 			ret = mmc_erase(test->card, dev_addr, sz >> 9,
1888 					MMC_TRIM_ARG);
1889 			if (ret)
1890 				return ret;
1891 			dev_addr += (sz >> 9);
1892 		}
1893 		getnstimeofday(&ts2);
1894 		mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1895 	}
1896 	return 0;
1897 }
1898 
1899 static unsigned int rnd_next = 1;
1900 
1901 static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
1902 {
1903 	uint64_t r;
1904 
1905 	rnd_next = rnd_next * 1103515245 + 12345;
1906 	r = (rnd_next >> 16) & 0x7fff;
1907 	return (r * rnd_cnt) >> 15;
1908 }
1909 
1910 static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
1911 			     unsigned long sz)
1912 {
1913 	unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
1914 	unsigned int ssz;
1915 	struct timespec ts1, ts2, ts;
1916 	int ret;
1917 
1918 	ssz = sz >> 9;
1919 
1920 	rnd_addr = mmc_test_capacity(test->card) / 4;
1921 	range1 = rnd_addr / test->card->pref_erase;
1922 	range2 = range1 / ssz;
1923 
1924 	getnstimeofday(&ts1);
1925 	for (cnt = 0; cnt < UINT_MAX; cnt++) {
1926 		getnstimeofday(&ts2);
1927 		ts = timespec_sub(ts2, ts1);
1928 		if (ts.tv_sec >= 10)
1929 			break;
1930 		ea = mmc_test_rnd_num(range1);
1931 		if (ea == last_ea)
1932 			ea -= 1;
1933 		last_ea = ea;
1934 		dev_addr = rnd_addr + test->card->pref_erase * ea +
1935 			   ssz * mmc_test_rnd_num(range2);
1936 		ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
1937 		if (ret)
1938 			return ret;
1939 	}
1940 	if (print)
1941 		mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1942 	return 0;
1943 }
1944 
1945 static int mmc_test_random_perf(struct mmc_test_card *test, int write)
1946 {
1947 	struct mmc_test_area *t = &test->area;
1948 	unsigned int next;
1949 	unsigned long sz;
1950 	int ret;
1951 
1952 	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1953 		/*
1954 		 * When writing, try to get more consistent results by running
1955 		 * the test twice with exactly the same I/O but outputting the
1956 		 * results only for the 2nd run.
1957 		 */
1958 		if (write) {
1959 			next = rnd_next;
1960 			ret = mmc_test_rnd_perf(test, write, 0, sz);
1961 			if (ret)
1962 				return ret;
1963 			rnd_next = next;
1964 		}
1965 		ret = mmc_test_rnd_perf(test, write, 1, sz);
1966 		if (ret)
1967 			return ret;
1968 	}
1969 	sz = t->max_tfr;
1970 	if (write) {
1971 		next = rnd_next;
1972 		ret = mmc_test_rnd_perf(test, write, 0, sz);
1973 		if (ret)
1974 			return ret;
1975 		rnd_next = next;
1976 	}
1977 	return mmc_test_rnd_perf(test, write, 1, sz);
1978 }
1979 
1980 /*
1981  * Random read performance by transfer size.
1982  */
1983 static int mmc_test_random_read_perf(struct mmc_test_card *test)
1984 {
1985 	return mmc_test_random_perf(test, 0);
1986 }
1987 
1988 /*
1989  * Random write performance by transfer size.
1990  */
1991 static int mmc_test_random_write_perf(struct mmc_test_card *test)
1992 {
1993 	return mmc_test_random_perf(test, 1);
1994 }
1995 
1996 static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
1997 			     unsigned int tot_sz, int max_scatter)
1998 {
1999 	struct mmc_test_area *t = &test->area;
2000 	unsigned int dev_addr, i, cnt, sz, ssz;
2001 	struct timespec ts1, ts2;
2002 	int ret;
2003 
2004 	sz = t->max_tfr;
2005 
2006 	/*
2007 	 * In the case of a maximally scattered transfer, the maximum transfer
2008 	 * size is further limited by using PAGE_SIZE segments.
2009 	 */
2010 	if (max_scatter) {
2011 		unsigned long max_tfr;
2012 
2013 		if (t->max_seg_sz >= PAGE_SIZE)
2014 			max_tfr = t->max_segs * PAGE_SIZE;
2015 		else
2016 			max_tfr = t->max_segs * t->max_seg_sz;
2017 		if (sz > max_tfr)
2018 			sz = max_tfr;
2019 	}
2020 
2021 	ssz = sz >> 9;
2022 	dev_addr = mmc_test_capacity(test->card) / 4;
2023 	if (tot_sz > dev_addr << 9)
2024 		tot_sz = dev_addr << 9;
2025 	cnt = tot_sz / sz;
2026 	dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2027 
2028 	getnstimeofday(&ts1);
2029 	for (i = 0; i < cnt; i++) {
2030 		ret = mmc_test_area_io(test, sz, dev_addr, write,
2031 				       max_scatter, 0);
2032 		if (ret)
2033 			return ret;
2034 		dev_addr += ssz;
2035 	}
2036 	getnstimeofday(&ts2);
2037 
2038 	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
2039 
2040 	return 0;
2041 }
2042 
2043 static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
2044 {
2045 	int ret, i;
2046 
2047 	for (i = 0; i < 10; i++) {
2048 		ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
2049 		if (ret)
2050 			return ret;
2051 	}
2052 	for (i = 0; i < 5; i++) {
2053 		ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
2054 		if (ret)
2055 			return ret;
2056 	}
2057 	for (i = 0; i < 3; i++) {
2058 		ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
2059 		if (ret)
2060 			return ret;
2061 	}
2062 
2063 	return ret;
2064 }
2065 
2066 /*
2067  * Large sequential read performance.
2068  */
2069 static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
2070 {
2071 	return mmc_test_large_seq_perf(test, 0);
2072 }
2073 
2074 /*
2075  * Large sequential write performance.
2076  */
2077 static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
2078 {
2079 	return mmc_test_large_seq_perf(test, 1);
2080 }
2081 
2082 static int mmc_test_rw_multiple(struct mmc_test_card *test,
2083 				struct mmc_test_multiple_rw *tdata,
2084 				unsigned int reqsize, unsigned int size,
2085 				int min_sg_len)
2086 {
2087 	unsigned int dev_addr;
2088 	struct mmc_test_area *t = &test->area;
2089 	int ret = 0;
2090 
2091 	/* Set up test area */
2092 	if (size > mmc_test_capacity(test->card) / 2 * 512)
2093 		size = mmc_test_capacity(test->card) / 2 * 512;
2094 	if (reqsize > t->max_tfr)
2095 		reqsize = t->max_tfr;
2096 	dev_addr = mmc_test_capacity(test->card) / 4;
2097 	if ((dev_addr & 0xffff0000))
2098 		dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2099 	else
2100 		dev_addr &= 0xfffff800; /* Round to 1MiB boundary */
2101 	if (!dev_addr)
2102 		goto err;
2103 
2104 	if (reqsize > size)
2105 		return 0;
2106 
2107 	/* prepare test area */
2108 	if (mmc_can_erase(test->card) &&
2109 	    tdata->prepare & MMC_TEST_PREP_ERASE) {
2110 		ret = mmc_erase(test->card, dev_addr,
2111 				size / 512, MMC_SECURE_ERASE_ARG);
2112 		if (ret)
2113 			ret = mmc_erase(test->card, dev_addr,
2114 					size / 512, MMC_ERASE_ARG);
2115 		if (ret)
2116 			goto err;
2117 	}
2118 
2119 	/* Run test */
2120 	ret = mmc_test_area_io_seq(test, reqsize, dev_addr,
2121 				   tdata->do_write, 0, 1, size / reqsize,
2122 				   tdata->do_nonblock_req, min_sg_len);
2123 	if (ret)
2124 		goto err;
2125 
2126 	return ret;
2127  err:
2128 	pr_info("[%s] error\n", __func__);
2129 	return ret;
2130 }
2131 
2132 static int mmc_test_rw_multiple_size(struct mmc_test_card *test,
2133 				     struct mmc_test_multiple_rw *rw)
2134 {
2135 	int ret = 0;
2136 	int i;
2137 	void *pre_req = test->card->host->ops->pre_req;
2138 	void *post_req = test->card->host->ops->post_req;
2139 
2140 	if (rw->do_nonblock_req &&
2141 	    ((!pre_req && post_req) || (pre_req && !post_req))) {
2142 		pr_info("error: only one of pre/post is defined\n");
2143 		return -EINVAL;
2144 	}
2145 
2146 	for (i = 0 ; i < rw->len && ret == 0; i++) {
2147 		ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0);
2148 		if (ret)
2149 			break;
2150 	}
2151 	return ret;
2152 }
2153 
2154 static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test,
2155 				       struct mmc_test_multiple_rw *rw)
2156 {
2157 	int ret = 0;
2158 	int i;
2159 
2160 	for (i = 0 ; i < rw->len && ret == 0; i++) {
2161 		ret = mmc_test_rw_multiple(test, rw, 512 * 1024, rw->size,
2162 					   rw->sg_len[i]);
2163 		if (ret)
2164 			break;
2165 	}
2166 	return ret;
2167 }
2168 
2169 /*
2170  * Multiple blocking write 4k to 4 MB chunks
2171  */
2172 static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test)
2173 {
2174 	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2175 			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2176 	struct mmc_test_multiple_rw test_data = {
2177 		.bs = bs,
2178 		.size = TEST_AREA_MAX_SIZE,
2179 		.len = ARRAY_SIZE(bs),
2180 		.do_write = true,
2181 		.do_nonblock_req = false,
2182 		.prepare = MMC_TEST_PREP_ERASE,
2183 	};
2184 
2185 	return mmc_test_rw_multiple_size(test, &test_data);
2186 };
2187 
2188 /*
2189  * Multiple non-blocking write 4k to 4 MB chunks
2190  */
2191 static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test)
2192 {
2193 	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2194 			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2195 	struct mmc_test_multiple_rw test_data = {
2196 		.bs = bs,
2197 		.size = TEST_AREA_MAX_SIZE,
2198 		.len = ARRAY_SIZE(bs),
2199 		.do_write = true,
2200 		.do_nonblock_req = true,
2201 		.prepare = MMC_TEST_PREP_ERASE,
2202 	};
2203 
2204 	return mmc_test_rw_multiple_size(test, &test_data);
2205 }
2206 
2207 /*
2208  * Multiple blocking read 4k to 4 MB chunks
2209  */
2210 static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test)
2211 {
2212 	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2213 			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2214 	struct mmc_test_multiple_rw test_data = {
2215 		.bs = bs,
2216 		.size = TEST_AREA_MAX_SIZE,
2217 		.len = ARRAY_SIZE(bs),
2218 		.do_write = false,
2219 		.do_nonblock_req = false,
2220 		.prepare = MMC_TEST_PREP_NONE,
2221 	};
2222 
2223 	return mmc_test_rw_multiple_size(test, &test_data);
2224 }
2225 
2226 /*
2227  * Multiple non-blocking read 4k to 4 MB chunks
2228  */
2229 static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test)
2230 {
2231 	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2232 			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2233 	struct mmc_test_multiple_rw test_data = {
2234 		.bs = bs,
2235 		.size = TEST_AREA_MAX_SIZE,
2236 		.len = ARRAY_SIZE(bs),
2237 		.do_write = false,
2238 		.do_nonblock_req = true,
2239 		.prepare = MMC_TEST_PREP_NONE,
2240 	};
2241 
2242 	return mmc_test_rw_multiple_size(test, &test_data);
2243 }
2244 
2245 /*
2246  * Multiple blocking write 1 to 512 sg elements
2247  */
2248 static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test)
2249 {
2250 	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2251 				 1 << 7, 1 << 8, 1 << 9};
2252 	struct mmc_test_multiple_rw test_data = {
2253 		.sg_len = sg_len,
2254 		.size = TEST_AREA_MAX_SIZE,
2255 		.len = ARRAY_SIZE(sg_len),
2256 		.do_write = true,
2257 		.do_nonblock_req = false,
2258 		.prepare = MMC_TEST_PREP_ERASE,
2259 	};
2260 
2261 	return mmc_test_rw_multiple_sg_len(test, &test_data);
2262 };
2263 
2264 /*
2265  * Multiple non-blocking write 1 to 512 sg elements
2266  */
2267 static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test)
2268 {
2269 	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2270 				 1 << 7, 1 << 8, 1 << 9};
2271 	struct mmc_test_multiple_rw test_data = {
2272 		.sg_len = sg_len,
2273 		.size = TEST_AREA_MAX_SIZE,
2274 		.len = ARRAY_SIZE(sg_len),
2275 		.do_write = true,
2276 		.do_nonblock_req = true,
2277 		.prepare = MMC_TEST_PREP_ERASE,
2278 	};
2279 
2280 	return mmc_test_rw_multiple_sg_len(test, &test_data);
2281 }
2282 
2283 /*
2284  * Multiple blocking read 1 to 512 sg elements
2285  */
2286 static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test)
2287 {
2288 	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2289 				 1 << 7, 1 << 8, 1 << 9};
2290 	struct mmc_test_multiple_rw test_data = {
2291 		.sg_len = sg_len,
2292 		.size = TEST_AREA_MAX_SIZE,
2293 		.len = ARRAY_SIZE(sg_len),
2294 		.do_write = false,
2295 		.do_nonblock_req = false,
2296 		.prepare = MMC_TEST_PREP_NONE,
2297 	};
2298 
2299 	return mmc_test_rw_multiple_sg_len(test, &test_data);
2300 }
2301 
2302 /*
2303  * Multiple non-blocking read 1 to 512 sg elements
2304  */
2305 static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
2306 {
2307 	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2308 				 1 << 7, 1 << 8, 1 << 9};
2309 	struct mmc_test_multiple_rw test_data = {
2310 		.sg_len = sg_len,
2311 		.size = TEST_AREA_MAX_SIZE,
2312 		.len = ARRAY_SIZE(sg_len),
2313 		.do_write = false,
2314 		.do_nonblock_req = true,
2315 		.prepare = MMC_TEST_PREP_NONE,
2316 	};
2317 
2318 	return mmc_test_rw_multiple_sg_len(test, &test_data);
2319 }
2320 
2321 /*
2322  * eMMC hardware reset.
2323  */
2324 static int mmc_test_reset(struct mmc_test_card *test)
2325 {
2326 	struct mmc_card *card = test->card;
2327 	struct mmc_host *host = card->host;
2328 	int err;
2329 
2330 	err = mmc_hw_reset(host);
2331 	if (!err)
2332 		return RESULT_OK;
2333 	else if (err == -EOPNOTSUPP)
2334 		return RESULT_UNSUP_HOST;
2335 
2336 	return RESULT_FAIL;
2337 }
2338 
2339 static int mmc_test_send_status(struct mmc_test_card *test,
2340 				struct mmc_command *cmd)
2341 {
2342 	memset(cmd, 0, sizeof(*cmd));
2343 
2344 	cmd->opcode = MMC_SEND_STATUS;
2345 	if (!mmc_host_is_spi(test->card->host))
2346 		cmd->arg = test->card->rca << 16;
2347 	cmd->flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
2348 
2349 	return mmc_wait_for_cmd(test->card->host, cmd, 0);
2350 }
2351 
2352 static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
2353 				     unsigned int dev_addr, int use_sbc,
2354 				     int repeat_cmd, int write, int use_areq)
2355 {
2356 	struct mmc_test_req *rq = mmc_test_req_alloc();
2357 	struct mmc_host *host = test->card->host;
2358 	struct mmc_test_area *t = &test->area;
2359 	struct mmc_test_async_req test_areq = { .test = test };
2360 	struct mmc_request *mrq;
2361 	unsigned long timeout;
2362 	bool expired = false;
2363 	enum mmc_blk_status blkstat = MMC_BLK_SUCCESS;
2364 	int ret = 0, cmd_ret;
2365 	u32 status = 0;
2366 	int count = 0;
2367 
2368 	if (!rq)
2369 		return -ENOMEM;
2370 
2371 	mrq = &rq->mrq;
2372 	if (use_sbc)
2373 		mrq->sbc = &rq->sbc;
2374 	mrq->cap_cmd_during_tfr = true;
2375 
2376 	test_areq.areq.mrq = mrq;
2377 	test_areq.areq.err_check = mmc_test_check_result_async;
2378 
2379 	mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks,
2380 			     512, write);
2381 
2382 	if (use_sbc && t->blocks > 1 && !mrq->sbc) {
2383 		ret =  mmc_host_cmd23(host) ?
2384 		       RESULT_UNSUP_CARD :
2385 		       RESULT_UNSUP_HOST;
2386 		goto out_free;
2387 	}
2388 
2389 	/* Start ongoing data request */
2390 	if (use_areq) {
2391 		mmc_start_areq(host, &test_areq.areq, &blkstat);
2392 		if (blkstat != MMC_BLK_SUCCESS) {
2393 			ret = RESULT_FAIL;
2394 			goto out_free;
2395 		}
2396 	} else {
2397 		mmc_wait_for_req(host, mrq);
2398 	}
2399 
2400 	timeout = jiffies + msecs_to_jiffies(3000);
2401 	do {
2402 		count += 1;
2403 
2404 		/* Send status command while data transfer in progress */
2405 		cmd_ret = mmc_test_send_status(test, &rq->status);
2406 		if (cmd_ret)
2407 			break;
2408 
2409 		status = rq->status.resp[0];
2410 		if (status & R1_ERROR) {
2411 			cmd_ret = -EIO;
2412 			break;
2413 		}
2414 
2415 		if (mmc_is_req_done(host, mrq))
2416 			break;
2417 
2418 		expired = time_after(jiffies, timeout);
2419 		if (expired) {
2420 			pr_info("%s: timeout waiting for Tran state status %#x\n",
2421 				mmc_hostname(host), status);
2422 			cmd_ret = -ETIMEDOUT;
2423 			break;
2424 		}
2425 	} while (repeat_cmd && R1_CURRENT_STATE(status) != R1_STATE_TRAN);
2426 
2427 	/* Wait for data request to complete */
2428 	if (use_areq) {
2429 		mmc_start_areq(host, NULL, &blkstat);
2430 		if (blkstat != MMC_BLK_SUCCESS)
2431 			ret = RESULT_FAIL;
2432 	} else {
2433 		mmc_wait_for_req_done(test->card->host, mrq);
2434 	}
2435 
2436 	/*
2437 	 * For cap_cmd_during_tfr request, upper layer must send stop if
2438 	 * required.
2439 	 */
2440 	if (mrq->data->stop && (mrq->data->error || !mrq->sbc)) {
2441 		if (ret)
2442 			mmc_wait_for_cmd(host, mrq->data->stop, 0);
2443 		else
2444 			ret = mmc_wait_for_cmd(host, mrq->data->stop, 0);
2445 	}
2446 
2447 	if (ret)
2448 		goto out_free;
2449 
2450 	if (cmd_ret) {
2451 		pr_info("%s: Send Status failed: status %#x, error %d\n",
2452 			mmc_hostname(test->card->host), status, cmd_ret);
2453 	}
2454 
2455 	ret = mmc_test_check_result(test, mrq);
2456 	if (ret)
2457 		goto out_free;
2458 
2459 	ret = mmc_test_wait_busy(test);
2460 	if (ret)
2461 		goto out_free;
2462 
2463 	if (repeat_cmd && (t->blocks + 1) << 9 > t->max_tfr)
2464 		pr_info("%s: %d commands completed during transfer of %u blocks\n",
2465 			mmc_hostname(test->card->host), count, t->blocks);
2466 
2467 	if (cmd_ret)
2468 		ret = cmd_ret;
2469 out_free:
2470 	kfree(rq);
2471 
2472 	return ret;
2473 }
2474 
2475 static int __mmc_test_cmds_during_tfr(struct mmc_test_card *test,
2476 				      unsigned long sz, int use_sbc, int write,
2477 				      int use_areq)
2478 {
2479 	struct mmc_test_area *t = &test->area;
2480 	int ret;
2481 
2482 	if (!(test->card->host->caps & MMC_CAP_CMD_DURING_TFR))
2483 		return RESULT_UNSUP_HOST;
2484 
2485 	ret = mmc_test_area_map(test, sz, 0, 0);
2486 	if (ret)
2487 		return ret;
2488 
2489 	ret = mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 0, write,
2490 					use_areq);
2491 	if (ret)
2492 		return ret;
2493 
2494 	return mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 1, write,
2495 					 use_areq);
2496 }
2497 
2498 static int mmc_test_cmds_during_tfr(struct mmc_test_card *test, int use_sbc,
2499 				    int write, int use_areq)
2500 {
2501 	struct mmc_test_area *t = &test->area;
2502 	unsigned long sz;
2503 	int ret;
2504 
2505 	for (sz = 512; sz <= t->max_tfr; sz += 512) {
2506 		ret = __mmc_test_cmds_during_tfr(test, sz, use_sbc, write,
2507 						 use_areq);
2508 		if (ret)
2509 			return ret;
2510 	}
2511 	return 0;
2512 }
2513 
2514 /*
2515  * Commands during read - no Set Block Count (CMD23).
2516  */
2517 static int mmc_test_cmds_during_read(struct mmc_test_card *test)
2518 {
2519 	return mmc_test_cmds_during_tfr(test, 0, 0, 0);
2520 }
2521 
2522 /*
2523  * Commands during write - no Set Block Count (CMD23).
2524  */
2525 static int mmc_test_cmds_during_write(struct mmc_test_card *test)
2526 {
2527 	return mmc_test_cmds_during_tfr(test, 0, 1, 0);
2528 }
2529 
2530 /*
2531  * Commands during read - use Set Block Count (CMD23).
2532  */
2533 static int mmc_test_cmds_during_read_cmd23(struct mmc_test_card *test)
2534 {
2535 	return mmc_test_cmds_during_tfr(test, 1, 0, 0);
2536 }
2537 
2538 /*
2539  * Commands during write - use Set Block Count (CMD23).
2540  */
2541 static int mmc_test_cmds_during_write_cmd23(struct mmc_test_card *test)
2542 {
2543 	return mmc_test_cmds_during_tfr(test, 1, 1, 0);
2544 }
2545 
2546 /*
2547  * Commands during non-blocking read - use Set Block Count (CMD23).
2548  */
2549 static int mmc_test_cmds_during_read_cmd23_nonblock(struct mmc_test_card *test)
2550 {
2551 	return mmc_test_cmds_during_tfr(test, 1, 0, 1);
2552 }
2553 
2554 /*
2555  * Commands during non-blocking write - use Set Block Count (CMD23).
2556  */
2557 static int mmc_test_cmds_during_write_cmd23_nonblock(struct mmc_test_card *test)
2558 {
2559 	return mmc_test_cmds_during_tfr(test, 1, 1, 1);
2560 }
2561 
2562 static const struct mmc_test_case mmc_test_cases[] = {
2563 	{
2564 		.name = "Basic write (no data verification)",
2565 		.run = mmc_test_basic_write,
2566 	},
2567 
2568 	{
2569 		.name = "Basic read (no data verification)",
2570 		.run = mmc_test_basic_read,
2571 	},
2572 
2573 	{
2574 		.name = "Basic write (with data verification)",
2575 		.prepare = mmc_test_prepare_write,
2576 		.run = mmc_test_verify_write,
2577 		.cleanup = mmc_test_cleanup,
2578 	},
2579 
2580 	{
2581 		.name = "Basic read (with data verification)",
2582 		.prepare = mmc_test_prepare_read,
2583 		.run = mmc_test_verify_read,
2584 		.cleanup = mmc_test_cleanup,
2585 	},
2586 
2587 	{
2588 		.name = "Multi-block write",
2589 		.prepare = mmc_test_prepare_write,
2590 		.run = mmc_test_multi_write,
2591 		.cleanup = mmc_test_cleanup,
2592 	},
2593 
2594 	{
2595 		.name = "Multi-block read",
2596 		.prepare = mmc_test_prepare_read,
2597 		.run = mmc_test_multi_read,
2598 		.cleanup = mmc_test_cleanup,
2599 	},
2600 
2601 	{
2602 		.name = "Power of two block writes",
2603 		.prepare = mmc_test_prepare_write,
2604 		.run = mmc_test_pow2_write,
2605 		.cleanup = mmc_test_cleanup,
2606 	},
2607 
2608 	{
2609 		.name = "Power of two block reads",
2610 		.prepare = mmc_test_prepare_read,
2611 		.run = mmc_test_pow2_read,
2612 		.cleanup = mmc_test_cleanup,
2613 	},
2614 
2615 	{
2616 		.name = "Weird sized block writes",
2617 		.prepare = mmc_test_prepare_write,
2618 		.run = mmc_test_weird_write,
2619 		.cleanup = mmc_test_cleanup,
2620 	},
2621 
2622 	{
2623 		.name = "Weird sized block reads",
2624 		.prepare = mmc_test_prepare_read,
2625 		.run = mmc_test_weird_read,
2626 		.cleanup = mmc_test_cleanup,
2627 	},
2628 
2629 	{
2630 		.name = "Badly aligned write",
2631 		.prepare = mmc_test_prepare_write,
2632 		.run = mmc_test_align_write,
2633 		.cleanup = mmc_test_cleanup,
2634 	},
2635 
2636 	{
2637 		.name = "Badly aligned read",
2638 		.prepare = mmc_test_prepare_read,
2639 		.run = mmc_test_align_read,
2640 		.cleanup = mmc_test_cleanup,
2641 	},
2642 
2643 	{
2644 		.name = "Badly aligned multi-block write",
2645 		.prepare = mmc_test_prepare_write,
2646 		.run = mmc_test_align_multi_write,
2647 		.cleanup = mmc_test_cleanup,
2648 	},
2649 
2650 	{
2651 		.name = "Badly aligned multi-block read",
2652 		.prepare = mmc_test_prepare_read,
2653 		.run = mmc_test_align_multi_read,
2654 		.cleanup = mmc_test_cleanup,
2655 	},
2656 
2657 	{
2658 		.name = "Correct xfer_size at write (start failure)",
2659 		.run = mmc_test_xfersize_write,
2660 	},
2661 
2662 	{
2663 		.name = "Correct xfer_size at read (start failure)",
2664 		.run = mmc_test_xfersize_read,
2665 	},
2666 
2667 	{
2668 		.name = "Correct xfer_size at write (midway failure)",
2669 		.run = mmc_test_multi_xfersize_write,
2670 	},
2671 
2672 	{
2673 		.name = "Correct xfer_size at read (midway failure)",
2674 		.run = mmc_test_multi_xfersize_read,
2675 	},
2676 
2677 #ifdef CONFIG_HIGHMEM
2678 
2679 	{
2680 		.name = "Highmem write",
2681 		.prepare = mmc_test_prepare_write,
2682 		.run = mmc_test_write_high,
2683 		.cleanup = mmc_test_cleanup,
2684 	},
2685 
2686 	{
2687 		.name = "Highmem read",
2688 		.prepare = mmc_test_prepare_read,
2689 		.run = mmc_test_read_high,
2690 		.cleanup = mmc_test_cleanup,
2691 	},
2692 
2693 	{
2694 		.name = "Multi-block highmem write",
2695 		.prepare = mmc_test_prepare_write,
2696 		.run = mmc_test_multi_write_high,
2697 		.cleanup = mmc_test_cleanup,
2698 	},
2699 
2700 	{
2701 		.name = "Multi-block highmem read",
2702 		.prepare = mmc_test_prepare_read,
2703 		.run = mmc_test_multi_read_high,
2704 		.cleanup = mmc_test_cleanup,
2705 	},
2706 
2707 #else
2708 
2709 	{
2710 		.name = "Highmem write",
2711 		.run = mmc_test_no_highmem,
2712 	},
2713 
2714 	{
2715 		.name = "Highmem read",
2716 		.run = mmc_test_no_highmem,
2717 	},
2718 
2719 	{
2720 		.name = "Multi-block highmem write",
2721 		.run = mmc_test_no_highmem,
2722 	},
2723 
2724 	{
2725 		.name = "Multi-block highmem read",
2726 		.run = mmc_test_no_highmem,
2727 	},
2728 
2729 #endif /* CONFIG_HIGHMEM */
2730 
2731 	{
2732 		.name = "Best-case read performance",
2733 		.prepare = mmc_test_area_prepare_fill,
2734 		.run = mmc_test_best_read_performance,
2735 		.cleanup = mmc_test_area_cleanup,
2736 	},
2737 
2738 	{
2739 		.name = "Best-case write performance",
2740 		.prepare = mmc_test_area_prepare_erase,
2741 		.run = mmc_test_best_write_performance,
2742 		.cleanup = mmc_test_area_cleanup,
2743 	},
2744 
2745 	{
2746 		.name = "Best-case read performance into scattered pages",
2747 		.prepare = mmc_test_area_prepare_fill,
2748 		.run = mmc_test_best_read_perf_max_scatter,
2749 		.cleanup = mmc_test_area_cleanup,
2750 	},
2751 
2752 	{
2753 		.name = "Best-case write performance from scattered pages",
2754 		.prepare = mmc_test_area_prepare_erase,
2755 		.run = mmc_test_best_write_perf_max_scatter,
2756 		.cleanup = mmc_test_area_cleanup,
2757 	},
2758 
2759 	{
2760 		.name = "Single read performance by transfer size",
2761 		.prepare = mmc_test_area_prepare_fill,
2762 		.run = mmc_test_profile_read_perf,
2763 		.cleanup = mmc_test_area_cleanup,
2764 	},
2765 
2766 	{
2767 		.name = "Single write performance by transfer size",
2768 		.prepare = mmc_test_area_prepare,
2769 		.run = mmc_test_profile_write_perf,
2770 		.cleanup = mmc_test_area_cleanup,
2771 	},
2772 
2773 	{
2774 		.name = "Single trim performance by transfer size",
2775 		.prepare = mmc_test_area_prepare_fill,
2776 		.run = mmc_test_profile_trim_perf,
2777 		.cleanup = mmc_test_area_cleanup,
2778 	},
2779 
2780 	{
2781 		.name = "Consecutive read performance by transfer size",
2782 		.prepare = mmc_test_area_prepare_fill,
2783 		.run = mmc_test_profile_seq_read_perf,
2784 		.cleanup = mmc_test_area_cleanup,
2785 	},
2786 
2787 	{
2788 		.name = "Consecutive write performance by transfer size",
2789 		.prepare = mmc_test_area_prepare,
2790 		.run = mmc_test_profile_seq_write_perf,
2791 		.cleanup = mmc_test_area_cleanup,
2792 	},
2793 
2794 	{
2795 		.name = "Consecutive trim performance by transfer size",
2796 		.prepare = mmc_test_area_prepare,
2797 		.run = mmc_test_profile_seq_trim_perf,
2798 		.cleanup = mmc_test_area_cleanup,
2799 	},
2800 
2801 	{
2802 		.name = "Random read performance by transfer size",
2803 		.prepare = mmc_test_area_prepare,
2804 		.run = mmc_test_random_read_perf,
2805 		.cleanup = mmc_test_area_cleanup,
2806 	},
2807 
2808 	{
2809 		.name = "Random write performance by transfer size",
2810 		.prepare = mmc_test_area_prepare,
2811 		.run = mmc_test_random_write_perf,
2812 		.cleanup = mmc_test_area_cleanup,
2813 	},
2814 
2815 	{
2816 		.name = "Large sequential read into scattered pages",
2817 		.prepare = mmc_test_area_prepare,
2818 		.run = mmc_test_large_seq_read_perf,
2819 		.cleanup = mmc_test_area_cleanup,
2820 	},
2821 
2822 	{
2823 		.name = "Large sequential write from scattered pages",
2824 		.prepare = mmc_test_area_prepare,
2825 		.run = mmc_test_large_seq_write_perf,
2826 		.cleanup = mmc_test_area_cleanup,
2827 	},
2828 
2829 	{
2830 		.name = "Write performance with blocking req 4k to 4MB",
2831 		.prepare = mmc_test_area_prepare,
2832 		.run = mmc_test_profile_mult_write_blocking_perf,
2833 		.cleanup = mmc_test_area_cleanup,
2834 	},
2835 
2836 	{
2837 		.name = "Write performance with non-blocking req 4k to 4MB",
2838 		.prepare = mmc_test_area_prepare,
2839 		.run = mmc_test_profile_mult_write_nonblock_perf,
2840 		.cleanup = mmc_test_area_cleanup,
2841 	},
2842 
2843 	{
2844 		.name = "Read performance with blocking req 4k to 4MB",
2845 		.prepare = mmc_test_area_prepare,
2846 		.run = mmc_test_profile_mult_read_blocking_perf,
2847 		.cleanup = mmc_test_area_cleanup,
2848 	},
2849 
2850 	{
2851 		.name = "Read performance with non-blocking req 4k to 4MB",
2852 		.prepare = mmc_test_area_prepare,
2853 		.run = mmc_test_profile_mult_read_nonblock_perf,
2854 		.cleanup = mmc_test_area_cleanup,
2855 	},
2856 
2857 	{
2858 		.name = "Write performance blocking req 1 to 512 sg elems",
2859 		.prepare = mmc_test_area_prepare,
2860 		.run = mmc_test_profile_sglen_wr_blocking_perf,
2861 		.cleanup = mmc_test_area_cleanup,
2862 	},
2863 
2864 	{
2865 		.name = "Write performance non-blocking req 1 to 512 sg elems",
2866 		.prepare = mmc_test_area_prepare,
2867 		.run = mmc_test_profile_sglen_wr_nonblock_perf,
2868 		.cleanup = mmc_test_area_cleanup,
2869 	},
2870 
2871 	{
2872 		.name = "Read performance blocking req 1 to 512 sg elems",
2873 		.prepare = mmc_test_area_prepare,
2874 		.run = mmc_test_profile_sglen_r_blocking_perf,
2875 		.cleanup = mmc_test_area_cleanup,
2876 	},
2877 
2878 	{
2879 		.name = "Read performance non-blocking req 1 to 512 sg elems",
2880 		.prepare = mmc_test_area_prepare,
2881 		.run = mmc_test_profile_sglen_r_nonblock_perf,
2882 		.cleanup = mmc_test_area_cleanup,
2883 	},
2884 
2885 	{
2886 		.name = "Reset test",
2887 		.run = mmc_test_reset,
2888 	},
2889 
2890 	{
2891 		.name = "Commands during read - no Set Block Count (CMD23)",
2892 		.prepare = mmc_test_area_prepare,
2893 		.run = mmc_test_cmds_during_read,
2894 		.cleanup = mmc_test_area_cleanup,
2895 	},
2896 
2897 	{
2898 		.name = "Commands during write - no Set Block Count (CMD23)",
2899 		.prepare = mmc_test_area_prepare,
2900 		.run = mmc_test_cmds_during_write,
2901 		.cleanup = mmc_test_area_cleanup,
2902 	},
2903 
2904 	{
2905 		.name = "Commands during read - use Set Block Count (CMD23)",
2906 		.prepare = mmc_test_area_prepare,
2907 		.run = mmc_test_cmds_during_read_cmd23,
2908 		.cleanup = mmc_test_area_cleanup,
2909 	},
2910 
2911 	{
2912 		.name = "Commands during write - use Set Block Count (CMD23)",
2913 		.prepare = mmc_test_area_prepare,
2914 		.run = mmc_test_cmds_during_write_cmd23,
2915 		.cleanup = mmc_test_area_cleanup,
2916 	},
2917 
2918 	{
2919 		.name = "Commands during non-blocking read - use Set Block Count (CMD23)",
2920 		.prepare = mmc_test_area_prepare,
2921 		.run = mmc_test_cmds_during_read_cmd23_nonblock,
2922 		.cleanup = mmc_test_area_cleanup,
2923 	},
2924 
2925 	{
2926 		.name = "Commands during non-blocking write - use Set Block Count (CMD23)",
2927 		.prepare = mmc_test_area_prepare,
2928 		.run = mmc_test_cmds_during_write_cmd23_nonblock,
2929 		.cleanup = mmc_test_area_cleanup,
2930 	},
2931 };
2932 
2933 static DEFINE_MUTEX(mmc_test_lock);
2934 
2935 static LIST_HEAD(mmc_test_result);
2936 
2937 static void mmc_test_run(struct mmc_test_card *test, int testcase)
2938 {
2939 	int i, ret;
2940 
2941 	pr_info("%s: Starting tests of card %s...\n",
2942 		mmc_hostname(test->card->host), mmc_card_id(test->card));
2943 
2944 	mmc_claim_host(test->card->host);
2945 
2946 	for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++) {
2947 		struct mmc_test_general_result *gr;
2948 
2949 		if (testcase && ((i + 1) != testcase))
2950 			continue;
2951 
2952 		pr_info("%s: Test case %d. %s...\n",
2953 			mmc_hostname(test->card->host), i + 1,
2954 			mmc_test_cases[i].name);
2955 
2956 		if (mmc_test_cases[i].prepare) {
2957 			ret = mmc_test_cases[i].prepare(test);
2958 			if (ret) {
2959 				pr_info("%s: Result: Prepare stage failed! (%d)\n",
2960 					mmc_hostname(test->card->host),
2961 					ret);
2962 				continue;
2963 			}
2964 		}
2965 
2966 		gr = kzalloc(sizeof(*gr), GFP_KERNEL);
2967 		if (gr) {
2968 			INIT_LIST_HEAD(&gr->tr_lst);
2969 
2970 			/* Assign data what we know already */
2971 			gr->card = test->card;
2972 			gr->testcase = i;
2973 
2974 			/* Append container to global one */
2975 			list_add_tail(&gr->link, &mmc_test_result);
2976 
2977 			/*
2978 			 * Save the pointer to created container in our private
2979 			 * structure.
2980 			 */
2981 			test->gr = gr;
2982 		}
2983 
2984 		ret = mmc_test_cases[i].run(test);
2985 		switch (ret) {
2986 		case RESULT_OK:
2987 			pr_info("%s: Result: OK\n",
2988 				mmc_hostname(test->card->host));
2989 			break;
2990 		case RESULT_FAIL:
2991 			pr_info("%s: Result: FAILED\n",
2992 				mmc_hostname(test->card->host));
2993 			break;
2994 		case RESULT_UNSUP_HOST:
2995 			pr_info("%s: Result: UNSUPPORTED (by host)\n",
2996 				mmc_hostname(test->card->host));
2997 			break;
2998 		case RESULT_UNSUP_CARD:
2999 			pr_info("%s: Result: UNSUPPORTED (by card)\n",
3000 				mmc_hostname(test->card->host));
3001 			break;
3002 		default:
3003 			pr_info("%s: Result: ERROR (%d)\n",
3004 				mmc_hostname(test->card->host), ret);
3005 		}
3006 
3007 		/* Save the result */
3008 		if (gr)
3009 			gr->result = ret;
3010 
3011 		if (mmc_test_cases[i].cleanup) {
3012 			ret = mmc_test_cases[i].cleanup(test);
3013 			if (ret) {
3014 				pr_info("%s: Warning: Cleanup stage failed! (%d)\n",
3015 					mmc_hostname(test->card->host),
3016 					ret);
3017 			}
3018 		}
3019 	}
3020 
3021 	mmc_release_host(test->card->host);
3022 
3023 	pr_info("%s: Tests completed.\n",
3024 		mmc_hostname(test->card->host));
3025 }
3026 
3027 static void mmc_test_free_result(struct mmc_card *card)
3028 {
3029 	struct mmc_test_general_result *gr, *grs;
3030 
3031 	mutex_lock(&mmc_test_lock);
3032 
3033 	list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
3034 		struct mmc_test_transfer_result *tr, *trs;
3035 
3036 		if (card && gr->card != card)
3037 			continue;
3038 
3039 		list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
3040 			list_del(&tr->link);
3041 			kfree(tr);
3042 		}
3043 
3044 		list_del(&gr->link);
3045 		kfree(gr);
3046 	}
3047 
3048 	mutex_unlock(&mmc_test_lock);
3049 }
3050 
3051 static LIST_HEAD(mmc_test_file_test);
3052 
3053 static int mtf_test_show(struct seq_file *sf, void *data)
3054 {
3055 	struct mmc_card *card = (struct mmc_card *)sf->private;
3056 	struct mmc_test_general_result *gr;
3057 
3058 	mutex_lock(&mmc_test_lock);
3059 
3060 	list_for_each_entry(gr, &mmc_test_result, link) {
3061 		struct mmc_test_transfer_result *tr;
3062 
3063 		if (gr->card != card)
3064 			continue;
3065 
3066 		seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
3067 
3068 		list_for_each_entry(tr, &gr->tr_lst, link) {
3069 			seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n",
3070 				tr->count, tr->sectors,
3071 				(unsigned long)tr->ts.tv_sec,
3072 				(unsigned long)tr->ts.tv_nsec,
3073 				tr->rate, tr->iops / 100, tr->iops % 100);
3074 		}
3075 	}
3076 
3077 	mutex_unlock(&mmc_test_lock);
3078 
3079 	return 0;
3080 }
3081 
3082 static int mtf_test_open(struct inode *inode, struct file *file)
3083 {
3084 	return single_open(file, mtf_test_show, inode->i_private);
3085 }
3086 
3087 static ssize_t mtf_test_write(struct file *file, const char __user *buf,
3088 	size_t count, loff_t *pos)
3089 {
3090 	struct seq_file *sf = (struct seq_file *)file->private_data;
3091 	struct mmc_card *card = (struct mmc_card *)sf->private;
3092 	struct mmc_test_card *test;
3093 	long testcase;
3094 	int ret;
3095 
3096 	ret = kstrtol_from_user(buf, count, 10, &testcase);
3097 	if (ret)
3098 		return ret;
3099 
3100 	test = kzalloc(sizeof(*test), GFP_KERNEL);
3101 	if (!test)
3102 		return -ENOMEM;
3103 
3104 	/*
3105 	 * Remove all test cases associated with given card. Thus we have only
3106 	 * actual data of the last run.
3107 	 */
3108 	mmc_test_free_result(card);
3109 
3110 	test->card = card;
3111 
3112 	test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
3113 #ifdef CONFIG_HIGHMEM
3114 	test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
3115 #endif
3116 
3117 #ifdef CONFIG_HIGHMEM
3118 	if (test->buffer && test->highmem) {
3119 #else
3120 	if (test->buffer) {
3121 #endif
3122 		mutex_lock(&mmc_test_lock);
3123 		mmc_test_run(test, testcase);
3124 		mutex_unlock(&mmc_test_lock);
3125 	}
3126 
3127 #ifdef CONFIG_HIGHMEM
3128 	__free_pages(test->highmem, BUFFER_ORDER);
3129 #endif
3130 	kfree(test->buffer);
3131 	kfree(test);
3132 
3133 	return count;
3134 }
3135 
3136 static const struct file_operations mmc_test_fops_test = {
3137 	.open		= mtf_test_open,
3138 	.read		= seq_read,
3139 	.write		= mtf_test_write,
3140 	.llseek		= seq_lseek,
3141 	.release	= single_release,
3142 };
3143 
3144 static int mtf_testlist_show(struct seq_file *sf, void *data)
3145 {
3146 	int i;
3147 
3148 	mutex_lock(&mmc_test_lock);
3149 
3150 	seq_puts(sf, "0:\tRun all tests\n");
3151 	for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
3152 		seq_printf(sf, "%d:\t%s\n", i + 1, mmc_test_cases[i].name);
3153 
3154 	mutex_unlock(&mmc_test_lock);
3155 
3156 	return 0;
3157 }
3158 
3159 static int mtf_testlist_open(struct inode *inode, struct file *file)
3160 {
3161 	return single_open(file, mtf_testlist_show, inode->i_private);
3162 }
3163 
3164 static const struct file_operations mmc_test_fops_testlist = {
3165 	.open		= mtf_testlist_open,
3166 	.read		= seq_read,
3167 	.llseek		= seq_lseek,
3168 	.release	= single_release,
3169 };
3170 
3171 static void mmc_test_free_dbgfs_file(struct mmc_card *card)
3172 {
3173 	struct mmc_test_dbgfs_file *df, *dfs;
3174 
3175 	mutex_lock(&mmc_test_lock);
3176 
3177 	list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
3178 		if (card && df->card != card)
3179 			continue;
3180 		debugfs_remove(df->file);
3181 		list_del(&df->link);
3182 		kfree(df);
3183 	}
3184 
3185 	mutex_unlock(&mmc_test_lock);
3186 }
3187 
3188 static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
3189 	const char *name, umode_t mode, const struct file_operations *fops)
3190 {
3191 	struct dentry *file = NULL;
3192 	struct mmc_test_dbgfs_file *df;
3193 
3194 	if (card->debugfs_root)
3195 		file = debugfs_create_file(name, mode, card->debugfs_root,
3196 			card, fops);
3197 
3198 	if (IS_ERR_OR_NULL(file)) {
3199 		dev_err(&card->dev,
3200 			"Can't create %s. Perhaps debugfs is disabled.\n",
3201 			name);
3202 		return -ENODEV;
3203 	}
3204 
3205 	df = kmalloc(sizeof(*df), GFP_KERNEL);
3206 	if (!df) {
3207 		debugfs_remove(file);
3208 		return -ENOMEM;
3209 	}
3210 
3211 	df->card = card;
3212 	df->file = file;
3213 
3214 	list_add(&df->link, &mmc_test_file_test);
3215 	return 0;
3216 }
3217 
3218 static int mmc_test_register_dbgfs_file(struct mmc_card *card)
3219 {
3220 	int ret;
3221 
3222 	mutex_lock(&mmc_test_lock);
3223 
3224 	ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO,
3225 		&mmc_test_fops_test);
3226 	if (ret)
3227 		goto err;
3228 
3229 	ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO,
3230 		&mmc_test_fops_testlist);
3231 	if (ret)
3232 		goto err;
3233 
3234 err:
3235 	mutex_unlock(&mmc_test_lock);
3236 
3237 	return ret;
3238 }
3239 
3240 static int mmc_test_probe(struct mmc_card *card)
3241 {
3242 	int ret;
3243 
3244 	if (!mmc_card_mmc(card) && !mmc_card_sd(card))
3245 		return -ENODEV;
3246 
3247 	ret = mmc_test_register_dbgfs_file(card);
3248 	if (ret)
3249 		return ret;
3250 
3251 	if (card->ext_csd.cmdq_en) {
3252 		mmc_claim_host(card->host);
3253 		ret = mmc_cmdq_disable(card);
3254 		mmc_release_host(card->host);
3255 		if (ret)
3256 			return ret;
3257 	}
3258 
3259 	dev_info(&card->dev, "Card claimed for testing.\n");
3260 
3261 	return 0;
3262 }
3263 
3264 static void mmc_test_remove(struct mmc_card *card)
3265 {
3266 	if (card->reenable_cmdq) {
3267 		mmc_claim_host(card->host);
3268 		mmc_cmdq_enable(card);
3269 		mmc_release_host(card->host);
3270 	}
3271 	mmc_test_free_result(card);
3272 	mmc_test_free_dbgfs_file(card);
3273 }
3274 
3275 static void mmc_test_shutdown(struct mmc_card *card)
3276 {
3277 }
3278 
3279 static struct mmc_driver mmc_driver = {
3280 	.drv		= {
3281 		.name	= "mmc_test",
3282 	},
3283 	.probe		= mmc_test_probe,
3284 	.remove		= mmc_test_remove,
3285 	.shutdown	= mmc_test_shutdown,
3286 };
3287 
3288 static int __init mmc_test_init(void)
3289 {
3290 	return mmc_register_driver(&mmc_driver);
3291 }
3292 
3293 static void __exit mmc_test_exit(void)
3294 {
3295 	/* Clear stalled data if card is still plugged */
3296 	mmc_test_free_result(NULL);
3297 	mmc_test_free_dbgfs_file(NULL);
3298 
3299 	mmc_unregister_driver(&mmc_driver);
3300 }
3301 
3302 module_init(mmc_test_init);
3303 module_exit(mmc_test_exit);
3304 
3305 MODULE_LICENSE("GPL");
3306 MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
3307 MODULE_AUTHOR("Pierre Ossman");
3308