xref: /openbmc/linux/drivers/mmc/core/mmc_ops.c (revision 4a44a19b)
1 /*
2  *  linux/drivers/mmc/core/mmc_ops.h
3  *
4  *  Copyright 2006-2007 Pierre Ossman
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or (at
9  * your option) any later version.
10  */
11 
12 #include <linux/slab.h>
13 #include <linux/export.h>
14 #include <linux/types.h>
15 #include <linux/scatterlist.h>
16 
17 #include <linux/mmc/host.h>
18 #include <linux/mmc/card.h>
19 #include <linux/mmc/mmc.h>
20 
21 #include "core.h"
22 #include "mmc_ops.h"
23 
24 #define MMC_OPS_TIMEOUT_MS	(10 * 60 * 1000) /* 10 minute timeout */
25 
26 static inline int __mmc_send_status(struct mmc_card *card, u32 *status,
27 				    bool ignore_crc)
28 {
29 	int err;
30 	struct mmc_command cmd = {0};
31 
32 	BUG_ON(!card);
33 	BUG_ON(!card->host);
34 
35 	cmd.opcode = MMC_SEND_STATUS;
36 	if (!mmc_host_is_spi(card->host))
37 		cmd.arg = card->rca << 16;
38 	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
39 	if (ignore_crc)
40 		cmd.flags &= ~MMC_RSP_CRC;
41 
42 	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
43 	if (err)
44 		return err;
45 
46 	/* NOTE: callers are required to understand the difference
47 	 * between "native" and SPI format status words!
48 	 */
49 	if (status)
50 		*status = cmd.resp[0];
51 
52 	return 0;
53 }
54 
55 int mmc_send_status(struct mmc_card *card, u32 *status)
56 {
57 	return __mmc_send_status(card, status, false);
58 }
59 
60 static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
61 {
62 	int err;
63 	struct mmc_command cmd = {0};
64 
65 	BUG_ON(!host);
66 
67 	cmd.opcode = MMC_SELECT_CARD;
68 
69 	if (card) {
70 		cmd.arg = card->rca << 16;
71 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
72 	} else {
73 		cmd.arg = 0;
74 		cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
75 	}
76 
77 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
78 	if (err)
79 		return err;
80 
81 	return 0;
82 }
83 
84 int mmc_select_card(struct mmc_card *card)
85 {
86 	BUG_ON(!card);
87 
88 	return _mmc_select_card(card->host, card);
89 }
90 
91 int mmc_deselect_cards(struct mmc_host *host)
92 {
93 	return _mmc_select_card(host, NULL);
94 }
95 
96 /*
97  * Write the value specified in the device tree or board code into the optional
98  * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
99  * drive strength of the DAT and CMD outputs. The actual meaning of a given
100  * value is hardware dependant.
101  * The presence of the DSR register can be determined from the CSD register,
102  * bit 76.
103  */
104 int mmc_set_dsr(struct mmc_host *host)
105 {
106 	struct mmc_command cmd = {0};
107 
108 	cmd.opcode = MMC_SET_DSR;
109 
110 	cmd.arg = (host->dsr << 16) | 0xffff;
111 	cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
112 
113 	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
114 }
115 
116 int mmc_go_idle(struct mmc_host *host)
117 {
118 	int err;
119 	struct mmc_command cmd = {0};
120 
121 	/*
122 	 * Non-SPI hosts need to prevent chipselect going active during
123 	 * GO_IDLE; that would put chips into SPI mode.  Remind them of
124 	 * that in case of hardware that won't pull up DAT3/nCS otherwise.
125 	 *
126 	 * SPI hosts ignore ios.chip_select; it's managed according to
127 	 * rules that must accommodate non-MMC slaves which this layer
128 	 * won't even know about.
129 	 */
130 	if (!mmc_host_is_spi(host)) {
131 		mmc_set_chip_select(host, MMC_CS_HIGH);
132 		mmc_delay(1);
133 	}
134 
135 	cmd.opcode = MMC_GO_IDLE_STATE;
136 	cmd.arg = 0;
137 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
138 
139 	err = mmc_wait_for_cmd(host, &cmd, 0);
140 
141 	mmc_delay(1);
142 
143 	if (!mmc_host_is_spi(host)) {
144 		mmc_set_chip_select(host, MMC_CS_DONTCARE);
145 		mmc_delay(1);
146 	}
147 
148 	host->use_spi_crc = 0;
149 
150 	return err;
151 }
152 
153 int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
154 {
155 	struct mmc_command cmd = {0};
156 	int i, err = 0;
157 
158 	BUG_ON(!host);
159 
160 	cmd.opcode = MMC_SEND_OP_COND;
161 	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
162 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
163 
164 	for (i = 100; i; i--) {
165 		err = mmc_wait_for_cmd(host, &cmd, 0);
166 		if (err)
167 			break;
168 
169 		/* if we're just probing, do a single pass */
170 		if (ocr == 0)
171 			break;
172 
173 		/* otherwise wait until reset completes */
174 		if (mmc_host_is_spi(host)) {
175 			if (!(cmd.resp[0] & R1_SPI_IDLE))
176 				break;
177 		} else {
178 			if (cmd.resp[0] & MMC_CARD_BUSY)
179 				break;
180 		}
181 
182 		err = -ETIMEDOUT;
183 
184 		mmc_delay(10);
185 	}
186 
187 	if (rocr && !mmc_host_is_spi(host))
188 		*rocr = cmd.resp[0];
189 
190 	return err;
191 }
192 
193 int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
194 {
195 	int err;
196 	struct mmc_command cmd = {0};
197 
198 	BUG_ON(!host);
199 	BUG_ON(!cid);
200 
201 	cmd.opcode = MMC_ALL_SEND_CID;
202 	cmd.arg = 0;
203 	cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
204 
205 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
206 	if (err)
207 		return err;
208 
209 	memcpy(cid, cmd.resp, sizeof(u32) * 4);
210 
211 	return 0;
212 }
213 
214 int mmc_set_relative_addr(struct mmc_card *card)
215 {
216 	int err;
217 	struct mmc_command cmd = {0};
218 
219 	BUG_ON(!card);
220 	BUG_ON(!card->host);
221 
222 	cmd.opcode = MMC_SET_RELATIVE_ADDR;
223 	cmd.arg = card->rca << 16;
224 	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
225 
226 	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
227 	if (err)
228 		return err;
229 
230 	return 0;
231 }
232 
233 static int
234 mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
235 {
236 	int err;
237 	struct mmc_command cmd = {0};
238 
239 	BUG_ON(!host);
240 	BUG_ON(!cxd);
241 
242 	cmd.opcode = opcode;
243 	cmd.arg = arg;
244 	cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
245 
246 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
247 	if (err)
248 		return err;
249 
250 	memcpy(cxd, cmd.resp, sizeof(u32) * 4);
251 
252 	return 0;
253 }
254 
255 /*
256  * NOTE: void *buf, caller for the buf is required to use DMA-capable
257  * buffer or on-stack buffer (with some overhead in callee).
258  */
259 static int
260 mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
261 		u32 opcode, void *buf, unsigned len)
262 {
263 	struct mmc_request mrq = {NULL};
264 	struct mmc_command cmd = {0};
265 	struct mmc_data data = {0};
266 	struct scatterlist sg;
267 	void *data_buf;
268 	int is_on_stack;
269 
270 	is_on_stack = object_is_on_stack(buf);
271 	if (is_on_stack) {
272 		/*
273 		 * dma onto stack is unsafe/nonportable, but callers to this
274 		 * routine normally provide temporary on-stack buffers ...
275 		 */
276 		data_buf = kmalloc(len, GFP_KERNEL);
277 		if (!data_buf)
278 			return -ENOMEM;
279 	} else
280 		data_buf = buf;
281 
282 	mrq.cmd = &cmd;
283 	mrq.data = &data;
284 
285 	cmd.opcode = opcode;
286 	cmd.arg = 0;
287 
288 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
289 	 * rely on callers to never use this with "native" calls for reading
290 	 * CSD or CID.  Native versions of those commands use the R2 type,
291 	 * not R1 plus a data block.
292 	 */
293 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
294 
295 	data.blksz = len;
296 	data.blocks = 1;
297 	data.flags = MMC_DATA_READ;
298 	data.sg = &sg;
299 	data.sg_len = 1;
300 
301 	sg_init_one(&sg, data_buf, len);
302 
303 	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
304 		/*
305 		 * The spec states that CSR and CID accesses have a timeout
306 		 * of 64 clock cycles.
307 		 */
308 		data.timeout_ns = 0;
309 		data.timeout_clks = 64;
310 	} else
311 		mmc_set_data_timeout(&data, card);
312 
313 	mmc_wait_for_req(host, &mrq);
314 
315 	if (is_on_stack) {
316 		memcpy(buf, data_buf, len);
317 		kfree(data_buf);
318 	}
319 
320 	if (cmd.error)
321 		return cmd.error;
322 	if (data.error)
323 		return data.error;
324 
325 	return 0;
326 }
327 
328 int mmc_send_csd(struct mmc_card *card, u32 *csd)
329 {
330 	int ret, i;
331 	u32 *csd_tmp;
332 
333 	if (!mmc_host_is_spi(card->host))
334 		return mmc_send_cxd_native(card->host, card->rca << 16,
335 				csd, MMC_SEND_CSD);
336 
337 	csd_tmp = kmalloc(16, GFP_KERNEL);
338 	if (!csd_tmp)
339 		return -ENOMEM;
340 
341 	ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
342 	if (ret)
343 		goto err;
344 
345 	for (i = 0;i < 4;i++)
346 		csd[i] = be32_to_cpu(csd_tmp[i]);
347 
348 err:
349 	kfree(csd_tmp);
350 	return ret;
351 }
352 
353 int mmc_send_cid(struct mmc_host *host, u32 *cid)
354 {
355 	int ret, i;
356 	u32 *cid_tmp;
357 
358 	if (!mmc_host_is_spi(host)) {
359 		if (!host->card)
360 			return -EINVAL;
361 		return mmc_send_cxd_native(host, host->card->rca << 16,
362 				cid, MMC_SEND_CID);
363 	}
364 
365 	cid_tmp = kmalloc(16, GFP_KERNEL);
366 	if (!cid_tmp)
367 		return -ENOMEM;
368 
369 	ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
370 	if (ret)
371 		goto err;
372 
373 	for (i = 0;i < 4;i++)
374 		cid[i] = be32_to_cpu(cid_tmp[i]);
375 
376 err:
377 	kfree(cid_tmp);
378 	return ret;
379 }
380 
381 int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd)
382 {
383 	return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD,
384 			ext_csd, 512);
385 }
386 EXPORT_SYMBOL_GPL(mmc_send_ext_csd);
387 
388 int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
389 {
390 	struct mmc_command cmd = {0};
391 	int err;
392 
393 	cmd.opcode = MMC_SPI_READ_OCR;
394 	cmd.arg = highcap ? (1 << 30) : 0;
395 	cmd.flags = MMC_RSP_SPI_R3;
396 
397 	err = mmc_wait_for_cmd(host, &cmd, 0);
398 
399 	*ocrp = cmd.resp[1];
400 	return err;
401 }
402 
403 int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
404 {
405 	struct mmc_command cmd = {0};
406 	int err;
407 
408 	cmd.opcode = MMC_SPI_CRC_ON_OFF;
409 	cmd.flags = MMC_RSP_SPI_R1;
410 	cmd.arg = use_crc;
411 
412 	err = mmc_wait_for_cmd(host, &cmd, 0);
413 	if (!err)
414 		host->use_spi_crc = use_crc;
415 	return err;
416 }
417 
418 /**
419  *	__mmc_switch - modify EXT_CSD register
420  *	@card: the MMC card associated with the data transfer
421  *	@set: cmd set values
422  *	@index: EXT_CSD register index
423  *	@value: value to program into EXT_CSD register
424  *	@timeout_ms: timeout (ms) for operation performed by register write,
425  *                   timeout of zero implies maximum possible timeout
426  *	@use_busy_signal: use the busy signal as response type
427  *	@send_status: send status cmd to poll for busy
428  *	@ignore_crc: ignore CRC errors when sending status cmd to poll for busy
429  *
430  *	Modifies the EXT_CSD register for selected card.
431  */
432 int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
433 		unsigned int timeout_ms, bool use_busy_signal, bool send_status,
434 		bool ignore_crc)
435 {
436 	struct mmc_host *host = card->host;
437 	int err;
438 	struct mmc_command cmd = {0};
439 	unsigned long timeout;
440 	u32 status = 0;
441 	bool use_r1b_resp = use_busy_signal;
442 
443 	/*
444 	 * If the cmd timeout and the max_busy_timeout of the host are both
445 	 * specified, let's validate them. A failure means we need to prevent
446 	 * the host from doing hw busy detection, which is done by converting
447 	 * to a R1 response instead of a R1B.
448 	 */
449 	if (timeout_ms && host->max_busy_timeout &&
450 		(timeout_ms > host->max_busy_timeout))
451 		use_r1b_resp = false;
452 
453 	cmd.opcode = MMC_SWITCH;
454 	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
455 		  (index << 16) |
456 		  (value << 8) |
457 		  set;
458 	cmd.flags = MMC_CMD_AC;
459 	if (use_r1b_resp) {
460 		cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
461 		/*
462 		 * A busy_timeout of zero means the host can decide to use
463 		 * whatever value it finds suitable.
464 		 */
465 		cmd.busy_timeout = timeout_ms;
466 	} else {
467 		cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
468 	}
469 
470 	if (index == EXT_CSD_SANITIZE_START)
471 		cmd.sanitize_busy = true;
472 
473 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
474 	if (err)
475 		return err;
476 
477 	/* No need to check card status in case of unblocking command */
478 	if (!use_busy_signal)
479 		return 0;
480 
481 	/*
482 	 * CRC errors shall only be ignored in cases were CMD13 is used to poll
483 	 * to detect busy completion.
484 	 */
485 	if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
486 		ignore_crc = false;
487 
488 	/* We have an unspecified cmd timeout, use the fallback value. */
489 	if (!timeout_ms)
490 		timeout_ms = MMC_OPS_TIMEOUT_MS;
491 
492 	/* Must check status to be sure of no errors. */
493 	timeout = jiffies + msecs_to_jiffies(timeout_ms);
494 	do {
495 		if (send_status) {
496 			err = __mmc_send_status(card, &status, ignore_crc);
497 			if (err)
498 				return err;
499 		}
500 		if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
501 			break;
502 		if (mmc_host_is_spi(host))
503 			break;
504 
505 		/*
506 		 * We are not allowed to issue a status command and the host
507 		 * does'nt support MMC_CAP_WAIT_WHILE_BUSY, then we can only
508 		 * rely on waiting for the stated timeout to be sufficient.
509 		 */
510 		if (!send_status) {
511 			mmc_delay(timeout_ms);
512 			return 0;
513 		}
514 
515 		/* Timeout if the device never leaves the program state. */
516 		if (time_after(jiffies, timeout)) {
517 			pr_err("%s: Card stuck in programming state! %s\n",
518 				mmc_hostname(host), __func__);
519 			return -ETIMEDOUT;
520 		}
521 	} while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
522 
523 	if (mmc_host_is_spi(host)) {
524 		if (status & R1_SPI_ILLEGAL_COMMAND)
525 			return -EBADMSG;
526 	} else {
527 		if (status & 0xFDFFA000)
528 			pr_warn("%s: unexpected status %#x after switch\n",
529 				mmc_hostname(host), status);
530 		if (status & R1_SWITCH_ERROR)
531 			return -EBADMSG;
532 	}
533 
534 	return 0;
535 }
536 EXPORT_SYMBOL_GPL(__mmc_switch);
537 
538 int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
539 		unsigned int timeout_ms)
540 {
541 	return __mmc_switch(card, set, index, value, timeout_ms, true, true,
542 				false);
543 }
544 EXPORT_SYMBOL_GPL(mmc_switch);
545 
546 static int
547 mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
548 		  u8 len)
549 {
550 	struct mmc_request mrq = {NULL};
551 	struct mmc_command cmd = {0};
552 	struct mmc_data data = {0};
553 	struct scatterlist sg;
554 	u8 *data_buf;
555 	u8 *test_buf;
556 	int i, err;
557 	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
558 	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
559 
560 	/* dma onto stack is unsafe/nonportable, but callers to this
561 	 * routine normally provide temporary on-stack buffers ...
562 	 */
563 	data_buf = kmalloc(len, GFP_KERNEL);
564 	if (!data_buf)
565 		return -ENOMEM;
566 
567 	if (len == 8)
568 		test_buf = testdata_8bit;
569 	else if (len == 4)
570 		test_buf = testdata_4bit;
571 	else {
572 		pr_err("%s: Invalid bus_width %d\n",
573 		       mmc_hostname(host), len);
574 		kfree(data_buf);
575 		return -EINVAL;
576 	}
577 
578 	if (opcode == MMC_BUS_TEST_W)
579 		memcpy(data_buf, test_buf, len);
580 
581 	mrq.cmd = &cmd;
582 	mrq.data = &data;
583 	cmd.opcode = opcode;
584 	cmd.arg = 0;
585 
586 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
587 	 * rely on callers to never use this with "native" calls for reading
588 	 * CSD or CID.  Native versions of those commands use the R2 type,
589 	 * not R1 plus a data block.
590 	 */
591 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
592 
593 	data.blksz = len;
594 	data.blocks = 1;
595 	if (opcode == MMC_BUS_TEST_R)
596 		data.flags = MMC_DATA_READ;
597 	else
598 		data.flags = MMC_DATA_WRITE;
599 
600 	data.sg = &sg;
601 	data.sg_len = 1;
602 	mmc_set_data_timeout(&data, card);
603 	sg_init_one(&sg, data_buf, len);
604 	mmc_wait_for_req(host, &mrq);
605 	err = 0;
606 	if (opcode == MMC_BUS_TEST_R) {
607 		for (i = 0; i < len / 4; i++)
608 			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
609 				err = -EIO;
610 				break;
611 			}
612 	}
613 	kfree(data_buf);
614 
615 	if (cmd.error)
616 		return cmd.error;
617 	if (data.error)
618 		return data.error;
619 
620 	return err;
621 }
622 
623 int mmc_bus_test(struct mmc_card *card, u8 bus_width)
624 {
625 	int err, width;
626 
627 	if (bus_width == MMC_BUS_WIDTH_8)
628 		width = 8;
629 	else if (bus_width == MMC_BUS_WIDTH_4)
630 		width = 4;
631 	else if (bus_width == MMC_BUS_WIDTH_1)
632 		return 0; /* no need for test */
633 	else
634 		return -EINVAL;
635 
636 	/*
637 	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
638 	 * is a problem.  This improves chances that the test will work.
639 	 */
640 	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
641 	err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
642 	return err;
643 }
644 
645 int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
646 {
647 	struct mmc_command cmd = {0};
648 	unsigned int opcode;
649 	int err;
650 
651 	if (!card->ext_csd.hpi) {
652 		pr_warn("%s: Card didn't support HPI command\n",
653 			mmc_hostname(card->host));
654 		return -EINVAL;
655 	}
656 
657 	opcode = card->ext_csd.hpi_cmd;
658 	if (opcode == MMC_STOP_TRANSMISSION)
659 		cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
660 	else if (opcode == MMC_SEND_STATUS)
661 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
662 
663 	cmd.opcode = opcode;
664 	cmd.arg = card->rca << 16 | 1;
665 
666 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
667 	if (err) {
668 		pr_warn("%s: error %d interrupting operation. "
669 			"HPI command response %#x\n", mmc_hostname(card->host),
670 			err, cmd.resp[0]);
671 		return err;
672 	}
673 	if (status)
674 		*status = cmd.resp[0];
675 
676 	return 0;
677 }
678