xref: /openbmc/linux/drivers/mmc/core/mmc_ops.c (revision 23c2b932)
1 /*
2  *  linux/drivers/mmc/core/mmc_ops.h
3  *
4  *  Copyright 2006-2007 Pierre Ossman
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or (at
9  * your option) any later version.
10  */
11 
12 #include <linux/slab.h>
13 #include <linux/export.h>
14 #include <linux/types.h>
15 #include <linux/scatterlist.h>
16 
17 #include <linux/mmc/host.h>
18 #include <linux/mmc/card.h>
19 #include <linux/mmc/mmc.h>
20 
21 #include "core.h"
22 #include "host.h"
23 #include "mmc_ops.h"
24 
25 #define MMC_OPS_TIMEOUT_MS	(10 * 60 * 1000) /* 10 minute timeout */
26 
27 static const u8 tuning_blk_pattern_4bit[] = {
28 	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
29 	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
30 	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
31 	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
32 	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
33 	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
34 	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
35 	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
36 };
37 
38 static const u8 tuning_blk_pattern_8bit[] = {
39 	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
40 	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
41 	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
42 	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
43 	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
44 	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
45 	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
46 	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
47 	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
48 	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
49 	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
50 	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
51 	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
52 	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
53 	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
54 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
55 };
56 
57 static inline int __mmc_send_status(struct mmc_card *card, u32 *status,
58 				    bool ignore_crc)
59 {
60 	int err;
61 	struct mmc_command cmd = {0};
62 
63 	BUG_ON(!card);
64 	BUG_ON(!card->host);
65 
66 	cmd.opcode = MMC_SEND_STATUS;
67 	if (!mmc_host_is_spi(card->host))
68 		cmd.arg = card->rca << 16;
69 	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
70 	if (ignore_crc)
71 		cmd.flags &= ~MMC_RSP_CRC;
72 
73 	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
74 	if (err)
75 		return err;
76 
77 	/* NOTE: callers are required to understand the difference
78 	 * between "native" and SPI format status words!
79 	 */
80 	if (status)
81 		*status = cmd.resp[0];
82 
83 	return 0;
84 }
85 
86 int mmc_send_status(struct mmc_card *card, u32 *status)
87 {
88 	return __mmc_send_status(card, status, false);
89 }
90 
91 static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
92 {
93 	struct mmc_command cmd = {0};
94 
95 	BUG_ON(!host);
96 
97 	cmd.opcode = MMC_SELECT_CARD;
98 
99 	if (card) {
100 		cmd.arg = card->rca << 16;
101 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
102 	} else {
103 		cmd.arg = 0;
104 		cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
105 	}
106 
107 	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
108 }
109 
110 int mmc_select_card(struct mmc_card *card)
111 {
112 	BUG_ON(!card);
113 
114 	return _mmc_select_card(card->host, card);
115 }
116 
117 int mmc_deselect_cards(struct mmc_host *host)
118 {
119 	return _mmc_select_card(host, NULL);
120 }
121 
122 /*
123  * Write the value specified in the device tree or board code into the optional
124  * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
125  * drive strength of the DAT and CMD outputs. The actual meaning of a given
126  * value is hardware dependant.
127  * The presence of the DSR register can be determined from the CSD register,
128  * bit 76.
129  */
130 int mmc_set_dsr(struct mmc_host *host)
131 {
132 	struct mmc_command cmd = {0};
133 
134 	cmd.opcode = MMC_SET_DSR;
135 
136 	cmd.arg = (host->dsr << 16) | 0xffff;
137 	cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
138 
139 	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
140 }
141 
142 int mmc_go_idle(struct mmc_host *host)
143 {
144 	int err;
145 	struct mmc_command cmd = {0};
146 
147 	/*
148 	 * Non-SPI hosts need to prevent chipselect going active during
149 	 * GO_IDLE; that would put chips into SPI mode.  Remind them of
150 	 * that in case of hardware that won't pull up DAT3/nCS otherwise.
151 	 *
152 	 * SPI hosts ignore ios.chip_select; it's managed according to
153 	 * rules that must accommodate non-MMC slaves which this layer
154 	 * won't even know about.
155 	 */
156 	if (!mmc_host_is_spi(host)) {
157 		mmc_set_chip_select(host, MMC_CS_HIGH);
158 		mmc_delay(1);
159 	}
160 
161 	cmd.opcode = MMC_GO_IDLE_STATE;
162 	cmd.arg = 0;
163 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
164 
165 	err = mmc_wait_for_cmd(host, &cmd, 0);
166 
167 	mmc_delay(1);
168 
169 	if (!mmc_host_is_spi(host)) {
170 		mmc_set_chip_select(host, MMC_CS_DONTCARE);
171 		mmc_delay(1);
172 	}
173 
174 	host->use_spi_crc = 0;
175 
176 	return err;
177 }
178 
179 int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
180 {
181 	struct mmc_command cmd = {0};
182 	int i, err = 0;
183 
184 	BUG_ON(!host);
185 
186 	cmd.opcode = MMC_SEND_OP_COND;
187 	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
188 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
189 
190 	for (i = 100; i; i--) {
191 		err = mmc_wait_for_cmd(host, &cmd, 0);
192 		if (err)
193 			break;
194 
195 		/* if we're just probing, do a single pass */
196 		if (ocr == 0)
197 			break;
198 
199 		/* otherwise wait until reset completes */
200 		if (mmc_host_is_spi(host)) {
201 			if (!(cmd.resp[0] & R1_SPI_IDLE))
202 				break;
203 		} else {
204 			if (cmd.resp[0] & MMC_CARD_BUSY)
205 				break;
206 		}
207 
208 		err = -ETIMEDOUT;
209 
210 		mmc_delay(10);
211 	}
212 
213 	if (rocr && !mmc_host_is_spi(host))
214 		*rocr = cmd.resp[0];
215 
216 	return err;
217 }
218 
219 int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
220 {
221 	int err;
222 	struct mmc_command cmd = {0};
223 
224 	BUG_ON(!host);
225 	BUG_ON(!cid);
226 
227 	cmd.opcode = MMC_ALL_SEND_CID;
228 	cmd.arg = 0;
229 	cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
230 
231 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
232 	if (err)
233 		return err;
234 
235 	memcpy(cid, cmd.resp, sizeof(u32) * 4);
236 
237 	return 0;
238 }
239 
240 int mmc_set_relative_addr(struct mmc_card *card)
241 {
242 	struct mmc_command cmd = {0};
243 
244 	BUG_ON(!card);
245 	BUG_ON(!card->host);
246 
247 	cmd.opcode = MMC_SET_RELATIVE_ADDR;
248 	cmd.arg = card->rca << 16;
249 	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
250 
251 	return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
252 }
253 
254 static int
255 mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
256 {
257 	int err;
258 	struct mmc_command cmd = {0};
259 
260 	BUG_ON(!host);
261 	BUG_ON(!cxd);
262 
263 	cmd.opcode = opcode;
264 	cmd.arg = arg;
265 	cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
266 
267 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
268 	if (err)
269 		return err;
270 
271 	memcpy(cxd, cmd.resp, sizeof(u32) * 4);
272 
273 	return 0;
274 }
275 
276 /*
277  * NOTE: void *buf, caller for the buf is required to use DMA-capable
278  * buffer or on-stack buffer (with some overhead in callee).
279  */
280 static int
281 mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
282 		u32 opcode, void *buf, unsigned len)
283 {
284 	struct mmc_request mrq = {NULL};
285 	struct mmc_command cmd = {0};
286 	struct mmc_data data = {0};
287 	struct scatterlist sg;
288 
289 	mrq.cmd = &cmd;
290 	mrq.data = &data;
291 
292 	cmd.opcode = opcode;
293 	cmd.arg = 0;
294 
295 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
296 	 * rely on callers to never use this with "native" calls for reading
297 	 * CSD or CID.  Native versions of those commands use the R2 type,
298 	 * not R1 plus a data block.
299 	 */
300 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
301 
302 	data.blksz = len;
303 	data.blocks = 1;
304 	data.flags = MMC_DATA_READ;
305 	data.sg = &sg;
306 	data.sg_len = 1;
307 
308 	sg_init_one(&sg, buf, len);
309 
310 	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
311 		/*
312 		 * The spec states that CSR and CID accesses have a timeout
313 		 * of 64 clock cycles.
314 		 */
315 		data.timeout_ns = 0;
316 		data.timeout_clks = 64;
317 	} else
318 		mmc_set_data_timeout(&data, card);
319 
320 	mmc_wait_for_req(host, &mrq);
321 
322 	if (cmd.error)
323 		return cmd.error;
324 	if (data.error)
325 		return data.error;
326 
327 	return 0;
328 }
329 
330 int mmc_send_csd(struct mmc_card *card, u32 *csd)
331 {
332 	int ret, i;
333 	u32 *csd_tmp;
334 
335 	if (!mmc_host_is_spi(card->host))
336 		return mmc_send_cxd_native(card->host, card->rca << 16,
337 				csd, MMC_SEND_CSD);
338 
339 	csd_tmp = kzalloc(16, GFP_KERNEL);
340 	if (!csd_tmp)
341 		return -ENOMEM;
342 
343 	ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
344 	if (ret)
345 		goto err;
346 
347 	for (i = 0;i < 4;i++)
348 		csd[i] = be32_to_cpu(csd_tmp[i]);
349 
350 err:
351 	kfree(csd_tmp);
352 	return ret;
353 }
354 
355 int mmc_send_cid(struct mmc_host *host, u32 *cid)
356 {
357 	int ret, i;
358 	u32 *cid_tmp;
359 
360 	if (!mmc_host_is_spi(host)) {
361 		if (!host->card)
362 			return -EINVAL;
363 		return mmc_send_cxd_native(host, host->card->rca << 16,
364 				cid, MMC_SEND_CID);
365 	}
366 
367 	cid_tmp = kzalloc(16, GFP_KERNEL);
368 	if (!cid_tmp)
369 		return -ENOMEM;
370 
371 	ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
372 	if (ret)
373 		goto err;
374 
375 	for (i = 0;i < 4;i++)
376 		cid[i] = be32_to_cpu(cid_tmp[i]);
377 
378 err:
379 	kfree(cid_tmp);
380 	return ret;
381 }
382 
383 int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
384 {
385 	int err;
386 	u8 *ext_csd;
387 
388 	if (!card || !new_ext_csd)
389 		return -EINVAL;
390 
391 	if (!mmc_can_ext_csd(card))
392 		return -EOPNOTSUPP;
393 
394 	/*
395 	 * As the ext_csd is so large and mostly unused, we don't store the
396 	 * raw block in mmc_card.
397 	 */
398 	ext_csd = kzalloc(512, GFP_KERNEL);
399 	if (!ext_csd)
400 		return -ENOMEM;
401 
402 	err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
403 				512);
404 	if (err)
405 		kfree(ext_csd);
406 	else
407 		*new_ext_csd = ext_csd;
408 
409 	return err;
410 }
411 EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
412 
413 int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
414 {
415 	struct mmc_command cmd = {0};
416 	int err;
417 
418 	cmd.opcode = MMC_SPI_READ_OCR;
419 	cmd.arg = highcap ? (1 << 30) : 0;
420 	cmd.flags = MMC_RSP_SPI_R3;
421 
422 	err = mmc_wait_for_cmd(host, &cmd, 0);
423 
424 	*ocrp = cmd.resp[1];
425 	return err;
426 }
427 
428 int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
429 {
430 	struct mmc_command cmd = {0};
431 	int err;
432 
433 	cmd.opcode = MMC_SPI_CRC_ON_OFF;
434 	cmd.flags = MMC_RSP_SPI_R1;
435 	cmd.arg = use_crc;
436 
437 	err = mmc_wait_for_cmd(host, &cmd, 0);
438 	if (!err)
439 		host->use_spi_crc = use_crc;
440 	return err;
441 }
442 
443 int mmc_switch_status_error(struct mmc_host *host, u32 status)
444 {
445 	if (mmc_host_is_spi(host)) {
446 		if (status & R1_SPI_ILLEGAL_COMMAND)
447 			return -EBADMSG;
448 	} else {
449 		if (status & 0xFDFFA000)
450 			pr_warn("%s: unexpected status %#x after switch\n",
451 				mmc_hostname(host), status);
452 		if (status & R1_SWITCH_ERROR)
453 			return -EBADMSG;
454 	}
455 	return 0;
456 }
457 
458 /**
459  *	__mmc_switch - modify EXT_CSD register
460  *	@card: the MMC card associated with the data transfer
461  *	@set: cmd set values
462  *	@index: EXT_CSD register index
463  *	@value: value to program into EXT_CSD register
464  *	@timeout_ms: timeout (ms) for operation performed by register write,
465  *                   timeout of zero implies maximum possible timeout
466  *	@use_busy_signal: use the busy signal as response type
467  *	@send_status: send status cmd to poll for busy
468  *	@ignore_crc: ignore CRC errors when sending status cmd to poll for busy
469  *
470  *	Modifies the EXT_CSD register for selected card.
471  */
472 int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
473 		unsigned int timeout_ms, bool use_busy_signal, bool send_status,
474 		bool ignore_crc)
475 {
476 	struct mmc_host *host = card->host;
477 	int err;
478 	struct mmc_command cmd = {0};
479 	unsigned long timeout;
480 	u32 status = 0;
481 	bool use_r1b_resp = use_busy_signal;
482 	bool expired = false;
483 
484 	mmc_retune_hold(host);
485 
486 	/*
487 	 * If the cmd timeout and the max_busy_timeout of the host are both
488 	 * specified, let's validate them. A failure means we need to prevent
489 	 * the host from doing hw busy detection, which is done by converting
490 	 * to a R1 response instead of a R1B.
491 	 */
492 	if (timeout_ms && host->max_busy_timeout &&
493 		(timeout_ms > host->max_busy_timeout))
494 		use_r1b_resp = false;
495 
496 	cmd.opcode = MMC_SWITCH;
497 	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
498 		  (index << 16) |
499 		  (value << 8) |
500 		  set;
501 	cmd.flags = MMC_CMD_AC;
502 	if (use_r1b_resp) {
503 		cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
504 		/*
505 		 * A busy_timeout of zero means the host can decide to use
506 		 * whatever value it finds suitable.
507 		 */
508 		cmd.busy_timeout = timeout_ms;
509 	} else {
510 		cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
511 	}
512 
513 	if (index == EXT_CSD_SANITIZE_START)
514 		cmd.sanitize_busy = true;
515 
516 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
517 	if (err)
518 		goto out;
519 
520 	/* No need to check card status in case of unblocking command */
521 	if (!use_busy_signal)
522 		goto out;
523 
524 	/*
525 	 * CRC errors shall only be ignored in cases were CMD13 is used to poll
526 	 * to detect busy completion.
527 	 */
528 	if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
529 		ignore_crc = false;
530 
531 	/* We have an unspecified cmd timeout, use the fallback value. */
532 	if (!timeout_ms)
533 		timeout_ms = MMC_OPS_TIMEOUT_MS;
534 
535 	/* Must check status to be sure of no errors. */
536 	timeout = jiffies + msecs_to_jiffies(timeout_ms);
537 	do {
538 		if (send_status) {
539 			/*
540 			 * Due to the possibility of being preempted after
541 			 * sending the status command, check the expiration
542 			 * time first.
543 			 */
544 			expired = time_after(jiffies, timeout);
545 			err = __mmc_send_status(card, &status, ignore_crc);
546 			if (err)
547 				goto out;
548 		}
549 		if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
550 			break;
551 		if (mmc_host_is_spi(host))
552 			break;
553 
554 		/*
555 		 * We are not allowed to issue a status command and the host
556 		 * does'nt support MMC_CAP_WAIT_WHILE_BUSY, then we can only
557 		 * rely on waiting for the stated timeout to be sufficient.
558 		 */
559 		if (!send_status) {
560 			mmc_delay(timeout_ms);
561 			goto out;
562 		}
563 
564 		/* Timeout if the device never leaves the program state. */
565 		if (expired && R1_CURRENT_STATE(status) == R1_STATE_PRG) {
566 			pr_err("%s: Card stuck in programming state! %s\n",
567 				mmc_hostname(host), __func__);
568 			err = -ETIMEDOUT;
569 			goto out;
570 		}
571 	} while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
572 
573 	err = mmc_switch_status_error(host, status);
574 out:
575 	mmc_retune_release(host);
576 
577 	return err;
578 }
579 
580 int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
581 		unsigned int timeout_ms)
582 {
583 	return __mmc_switch(card, set, index, value, timeout_ms, true, true,
584 				false);
585 }
586 EXPORT_SYMBOL_GPL(mmc_switch);
587 
588 int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
589 {
590 	struct mmc_request mrq = {NULL};
591 	struct mmc_command cmd = {0};
592 	struct mmc_data data = {0};
593 	struct scatterlist sg;
594 	struct mmc_ios *ios = &host->ios;
595 	const u8 *tuning_block_pattern;
596 	int size, err = 0;
597 	u8 *data_buf;
598 
599 	if (ios->bus_width == MMC_BUS_WIDTH_8) {
600 		tuning_block_pattern = tuning_blk_pattern_8bit;
601 		size = sizeof(tuning_blk_pattern_8bit);
602 	} else if (ios->bus_width == MMC_BUS_WIDTH_4) {
603 		tuning_block_pattern = tuning_blk_pattern_4bit;
604 		size = sizeof(tuning_blk_pattern_4bit);
605 	} else
606 		return -EINVAL;
607 
608 	data_buf = kzalloc(size, GFP_KERNEL);
609 	if (!data_buf)
610 		return -ENOMEM;
611 
612 	mrq.cmd = &cmd;
613 	mrq.data = &data;
614 
615 	cmd.opcode = opcode;
616 	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
617 
618 	data.blksz = size;
619 	data.blocks = 1;
620 	data.flags = MMC_DATA_READ;
621 
622 	/*
623 	 * According to the tuning specs, Tuning process
624 	 * is normally shorter 40 executions of CMD19,
625 	 * and timeout value should be shorter than 150 ms
626 	 */
627 	data.timeout_ns = 150 * NSEC_PER_MSEC;
628 
629 	data.sg = &sg;
630 	data.sg_len = 1;
631 	sg_init_one(&sg, data_buf, size);
632 
633 	mmc_wait_for_req(host, &mrq);
634 
635 	if (cmd_error)
636 		*cmd_error = cmd.error;
637 
638 	if (cmd.error) {
639 		err = cmd.error;
640 		goto out;
641 	}
642 
643 	if (data.error) {
644 		err = data.error;
645 		goto out;
646 	}
647 
648 	if (memcmp(data_buf, tuning_block_pattern, size))
649 		err = -EIO;
650 
651 out:
652 	kfree(data_buf);
653 	return err;
654 }
655 EXPORT_SYMBOL_GPL(mmc_send_tuning);
656 
657 static int
658 mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
659 		  u8 len)
660 {
661 	struct mmc_request mrq = {NULL};
662 	struct mmc_command cmd = {0};
663 	struct mmc_data data = {0};
664 	struct scatterlist sg;
665 	u8 *data_buf;
666 	u8 *test_buf;
667 	int i, err;
668 	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
669 	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
670 
671 	/* dma onto stack is unsafe/nonportable, but callers to this
672 	 * routine normally provide temporary on-stack buffers ...
673 	 */
674 	data_buf = kmalloc(len, GFP_KERNEL);
675 	if (!data_buf)
676 		return -ENOMEM;
677 
678 	if (len == 8)
679 		test_buf = testdata_8bit;
680 	else if (len == 4)
681 		test_buf = testdata_4bit;
682 	else {
683 		pr_err("%s: Invalid bus_width %d\n",
684 		       mmc_hostname(host), len);
685 		kfree(data_buf);
686 		return -EINVAL;
687 	}
688 
689 	if (opcode == MMC_BUS_TEST_W)
690 		memcpy(data_buf, test_buf, len);
691 
692 	mrq.cmd = &cmd;
693 	mrq.data = &data;
694 	cmd.opcode = opcode;
695 	cmd.arg = 0;
696 
697 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
698 	 * rely on callers to never use this with "native" calls for reading
699 	 * CSD or CID.  Native versions of those commands use the R2 type,
700 	 * not R1 plus a data block.
701 	 */
702 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
703 
704 	data.blksz = len;
705 	data.blocks = 1;
706 	if (opcode == MMC_BUS_TEST_R)
707 		data.flags = MMC_DATA_READ;
708 	else
709 		data.flags = MMC_DATA_WRITE;
710 
711 	data.sg = &sg;
712 	data.sg_len = 1;
713 	mmc_set_data_timeout(&data, card);
714 	sg_init_one(&sg, data_buf, len);
715 	mmc_wait_for_req(host, &mrq);
716 	err = 0;
717 	if (opcode == MMC_BUS_TEST_R) {
718 		for (i = 0; i < len / 4; i++)
719 			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
720 				err = -EIO;
721 				break;
722 			}
723 	}
724 	kfree(data_buf);
725 
726 	if (cmd.error)
727 		return cmd.error;
728 	if (data.error)
729 		return data.error;
730 
731 	return err;
732 }
733 
734 int mmc_bus_test(struct mmc_card *card, u8 bus_width)
735 {
736 	int width;
737 
738 	if (bus_width == MMC_BUS_WIDTH_8)
739 		width = 8;
740 	else if (bus_width == MMC_BUS_WIDTH_4)
741 		width = 4;
742 	else if (bus_width == MMC_BUS_WIDTH_1)
743 		return 0; /* no need for test */
744 	else
745 		return -EINVAL;
746 
747 	/*
748 	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
749 	 * is a problem.  This improves chances that the test will work.
750 	 */
751 	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
752 	return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
753 }
754 
755 int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
756 {
757 	struct mmc_command cmd = {0};
758 	unsigned int opcode;
759 	int err;
760 
761 	if (!card->ext_csd.hpi) {
762 		pr_warn("%s: Card didn't support HPI command\n",
763 			mmc_hostname(card->host));
764 		return -EINVAL;
765 	}
766 
767 	opcode = card->ext_csd.hpi_cmd;
768 	if (opcode == MMC_STOP_TRANSMISSION)
769 		cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
770 	else if (opcode == MMC_SEND_STATUS)
771 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
772 
773 	cmd.opcode = opcode;
774 	cmd.arg = card->rca << 16 | 1;
775 
776 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
777 	if (err) {
778 		pr_warn("%s: error %d interrupting operation. "
779 			"HPI command response %#x\n", mmc_hostname(card->host),
780 			err, cmd.resp[0]);
781 		return err;
782 	}
783 	if (status)
784 		*status = cmd.resp[0];
785 
786 	return 0;
787 }
788 
789 int mmc_can_ext_csd(struct mmc_card *card)
790 {
791 	return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
792 }
793