xref: /openbmc/linux/drivers/mmc/core/mmc_ops.c (revision 1c2f87c2)
1 /*
2  *  linux/drivers/mmc/core/mmc_ops.h
3  *
4  *  Copyright 2006-2007 Pierre Ossman
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or (at
9  * your option) any later version.
10  */
11 
12 #include <linux/slab.h>
13 #include <linux/export.h>
14 #include <linux/types.h>
15 #include <linux/scatterlist.h>
16 
17 #include <linux/mmc/host.h>
18 #include <linux/mmc/card.h>
19 #include <linux/mmc/mmc.h>
20 
21 #include "core.h"
22 #include "mmc_ops.h"
23 
24 #define MMC_OPS_TIMEOUT_MS	(10 * 60 * 1000) /* 10 minute timeout */
25 
26 static inline int __mmc_send_status(struct mmc_card *card, u32 *status,
27 				    bool ignore_crc)
28 {
29 	int err;
30 	struct mmc_command cmd = {0};
31 
32 	BUG_ON(!card);
33 	BUG_ON(!card->host);
34 
35 	cmd.opcode = MMC_SEND_STATUS;
36 	if (!mmc_host_is_spi(card->host))
37 		cmd.arg = card->rca << 16;
38 	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
39 	if (ignore_crc)
40 		cmd.flags &= ~MMC_RSP_CRC;
41 
42 	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
43 	if (err)
44 		return err;
45 
46 	/* NOTE: callers are required to understand the difference
47 	 * between "native" and SPI format status words!
48 	 */
49 	if (status)
50 		*status = cmd.resp[0];
51 
52 	return 0;
53 }
54 
55 int mmc_send_status(struct mmc_card *card, u32 *status)
56 {
57 	return __mmc_send_status(card, status, false);
58 }
59 
60 static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
61 {
62 	int err;
63 	struct mmc_command cmd = {0};
64 
65 	BUG_ON(!host);
66 
67 	cmd.opcode = MMC_SELECT_CARD;
68 
69 	if (card) {
70 		cmd.arg = card->rca << 16;
71 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
72 	} else {
73 		cmd.arg = 0;
74 		cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
75 	}
76 
77 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
78 	if (err)
79 		return err;
80 
81 	return 0;
82 }
83 
84 int mmc_select_card(struct mmc_card *card)
85 {
86 	BUG_ON(!card);
87 
88 	return _mmc_select_card(card->host, card);
89 }
90 
91 int mmc_deselect_cards(struct mmc_host *host)
92 {
93 	return _mmc_select_card(host, NULL);
94 }
95 
96 int mmc_go_idle(struct mmc_host *host)
97 {
98 	int err;
99 	struct mmc_command cmd = {0};
100 
101 	/*
102 	 * Non-SPI hosts need to prevent chipselect going active during
103 	 * GO_IDLE; that would put chips into SPI mode.  Remind them of
104 	 * that in case of hardware that won't pull up DAT3/nCS otherwise.
105 	 *
106 	 * SPI hosts ignore ios.chip_select; it's managed according to
107 	 * rules that must accommodate non-MMC slaves which this layer
108 	 * won't even know about.
109 	 */
110 	if (!mmc_host_is_spi(host)) {
111 		mmc_set_chip_select(host, MMC_CS_HIGH);
112 		mmc_delay(1);
113 	}
114 
115 	cmd.opcode = MMC_GO_IDLE_STATE;
116 	cmd.arg = 0;
117 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
118 
119 	err = mmc_wait_for_cmd(host, &cmd, 0);
120 
121 	mmc_delay(1);
122 
123 	if (!mmc_host_is_spi(host)) {
124 		mmc_set_chip_select(host, MMC_CS_DONTCARE);
125 		mmc_delay(1);
126 	}
127 
128 	host->use_spi_crc = 0;
129 
130 	return err;
131 }
132 
133 int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
134 {
135 	struct mmc_command cmd = {0};
136 	int i, err = 0;
137 
138 	BUG_ON(!host);
139 
140 	cmd.opcode = MMC_SEND_OP_COND;
141 	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
142 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
143 
144 	for (i = 100; i; i--) {
145 		err = mmc_wait_for_cmd(host, &cmd, 0);
146 		if (err)
147 			break;
148 
149 		/* if we're just probing, do a single pass */
150 		if (ocr == 0)
151 			break;
152 
153 		/* otherwise wait until reset completes */
154 		if (mmc_host_is_spi(host)) {
155 			if (!(cmd.resp[0] & R1_SPI_IDLE))
156 				break;
157 		} else {
158 			if (cmd.resp[0] & MMC_CARD_BUSY)
159 				break;
160 		}
161 
162 		err = -ETIMEDOUT;
163 
164 		mmc_delay(10);
165 	}
166 
167 	if (rocr && !mmc_host_is_spi(host))
168 		*rocr = cmd.resp[0];
169 
170 	return err;
171 }
172 
173 int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
174 {
175 	int err;
176 	struct mmc_command cmd = {0};
177 
178 	BUG_ON(!host);
179 	BUG_ON(!cid);
180 
181 	cmd.opcode = MMC_ALL_SEND_CID;
182 	cmd.arg = 0;
183 	cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
184 
185 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
186 	if (err)
187 		return err;
188 
189 	memcpy(cid, cmd.resp, sizeof(u32) * 4);
190 
191 	return 0;
192 }
193 
194 int mmc_set_relative_addr(struct mmc_card *card)
195 {
196 	int err;
197 	struct mmc_command cmd = {0};
198 
199 	BUG_ON(!card);
200 	BUG_ON(!card->host);
201 
202 	cmd.opcode = MMC_SET_RELATIVE_ADDR;
203 	cmd.arg = card->rca << 16;
204 	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
205 
206 	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
207 	if (err)
208 		return err;
209 
210 	return 0;
211 }
212 
213 static int
214 mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
215 {
216 	int err;
217 	struct mmc_command cmd = {0};
218 
219 	BUG_ON(!host);
220 	BUG_ON(!cxd);
221 
222 	cmd.opcode = opcode;
223 	cmd.arg = arg;
224 	cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
225 
226 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
227 	if (err)
228 		return err;
229 
230 	memcpy(cxd, cmd.resp, sizeof(u32) * 4);
231 
232 	return 0;
233 }
234 
235 /*
236  * NOTE: void *buf, caller for the buf is required to use DMA-capable
237  * buffer or on-stack buffer (with some overhead in callee).
238  */
239 static int
240 mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
241 		u32 opcode, void *buf, unsigned len)
242 {
243 	struct mmc_request mrq = {NULL};
244 	struct mmc_command cmd = {0};
245 	struct mmc_data data = {0};
246 	struct scatterlist sg;
247 	void *data_buf;
248 	int is_on_stack;
249 
250 	is_on_stack = object_is_on_stack(buf);
251 	if (is_on_stack) {
252 		/*
253 		 * dma onto stack is unsafe/nonportable, but callers to this
254 		 * routine normally provide temporary on-stack buffers ...
255 		 */
256 		data_buf = kmalloc(len, GFP_KERNEL);
257 		if (!data_buf)
258 			return -ENOMEM;
259 	} else
260 		data_buf = buf;
261 
262 	mrq.cmd = &cmd;
263 	mrq.data = &data;
264 
265 	cmd.opcode = opcode;
266 	cmd.arg = 0;
267 
268 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
269 	 * rely on callers to never use this with "native" calls for reading
270 	 * CSD or CID.  Native versions of those commands use the R2 type,
271 	 * not R1 plus a data block.
272 	 */
273 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
274 
275 	data.blksz = len;
276 	data.blocks = 1;
277 	data.flags = MMC_DATA_READ;
278 	data.sg = &sg;
279 	data.sg_len = 1;
280 
281 	sg_init_one(&sg, data_buf, len);
282 
283 	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
284 		/*
285 		 * The spec states that CSR and CID accesses have a timeout
286 		 * of 64 clock cycles.
287 		 */
288 		data.timeout_ns = 0;
289 		data.timeout_clks = 64;
290 	} else
291 		mmc_set_data_timeout(&data, card);
292 
293 	mmc_wait_for_req(host, &mrq);
294 
295 	if (is_on_stack) {
296 		memcpy(buf, data_buf, len);
297 		kfree(data_buf);
298 	}
299 
300 	if (cmd.error)
301 		return cmd.error;
302 	if (data.error)
303 		return data.error;
304 
305 	return 0;
306 }
307 
308 int mmc_send_csd(struct mmc_card *card, u32 *csd)
309 {
310 	int ret, i;
311 	u32 *csd_tmp;
312 
313 	if (!mmc_host_is_spi(card->host))
314 		return mmc_send_cxd_native(card->host, card->rca << 16,
315 				csd, MMC_SEND_CSD);
316 
317 	csd_tmp = kmalloc(16, GFP_KERNEL);
318 	if (!csd_tmp)
319 		return -ENOMEM;
320 
321 	ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
322 	if (ret)
323 		goto err;
324 
325 	for (i = 0;i < 4;i++)
326 		csd[i] = be32_to_cpu(csd_tmp[i]);
327 
328 err:
329 	kfree(csd_tmp);
330 	return ret;
331 }
332 
333 int mmc_send_cid(struct mmc_host *host, u32 *cid)
334 {
335 	int ret, i;
336 	u32 *cid_tmp;
337 
338 	if (!mmc_host_is_spi(host)) {
339 		if (!host->card)
340 			return -EINVAL;
341 		return mmc_send_cxd_native(host, host->card->rca << 16,
342 				cid, MMC_SEND_CID);
343 	}
344 
345 	cid_tmp = kmalloc(16, GFP_KERNEL);
346 	if (!cid_tmp)
347 		return -ENOMEM;
348 
349 	ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
350 	if (ret)
351 		goto err;
352 
353 	for (i = 0;i < 4;i++)
354 		cid[i] = be32_to_cpu(cid_tmp[i]);
355 
356 err:
357 	kfree(cid_tmp);
358 	return ret;
359 }
360 
361 int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd)
362 {
363 	return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD,
364 			ext_csd, 512);
365 }
366 EXPORT_SYMBOL_GPL(mmc_send_ext_csd);
367 
368 int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
369 {
370 	struct mmc_command cmd = {0};
371 	int err;
372 
373 	cmd.opcode = MMC_SPI_READ_OCR;
374 	cmd.arg = highcap ? (1 << 30) : 0;
375 	cmd.flags = MMC_RSP_SPI_R3;
376 
377 	err = mmc_wait_for_cmd(host, &cmd, 0);
378 
379 	*ocrp = cmd.resp[1];
380 	return err;
381 }
382 
383 int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
384 {
385 	struct mmc_command cmd = {0};
386 	int err;
387 
388 	cmd.opcode = MMC_SPI_CRC_ON_OFF;
389 	cmd.flags = MMC_RSP_SPI_R1;
390 	cmd.arg = use_crc;
391 
392 	err = mmc_wait_for_cmd(host, &cmd, 0);
393 	if (!err)
394 		host->use_spi_crc = use_crc;
395 	return err;
396 }
397 
398 /**
399  *	__mmc_switch - modify EXT_CSD register
400  *	@card: the MMC card associated with the data transfer
401  *	@set: cmd set values
402  *	@index: EXT_CSD register index
403  *	@value: value to program into EXT_CSD register
404  *	@timeout_ms: timeout (ms) for operation performed by register write,
405  *                   timeout of zero implies maximum possible timeout
406  *	@use_busy_signal: use the busy signal as response type
407  *	@send_status: send status cmd to poll for busy
408  *	@ignore_crc: ignore CRC errors when sending status cmd to poll for busy
409  *
410  *	Modifies the EXT_CSD register for selected card.
411  */
412 int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
413 		unsigned int timeout_ms, bool use_busy_signal, bool send_status,
414 		bool ignore_crc)
415 {
416 	struct mmc_host *host = card->host;
417 	int err;
418 	struct mmc_command cmd = {0};
419 	unsigned long timeout;
420 	u32 status = 0;
421 	bool use_r1b_resp = use_busy_signal;
422 
423 	/*
424 	 * If the cmd timeout and the max_busy_timeout of the host are both
425 	 * specified, let's validate them. A failure means we need to prevent
426 	 * the host from doing hw busy detection, which is done by converting
427 	 * to a R1 response instead of a R1B.
428 	 */
429 	if (timeout_ms && host->max_busy_timeout &&
430 		(timeout_ms > host->max_busy_timeout))
431 		use_r1b_resp = false;
432 
433 	cmd.opcode = MMC_SWITCH;
434 	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
435 		  (index << 16) |
436 		  (value << 8) |
437 		  set;
438 	cmd.flags = MMC_CMD_AC;
439 	if (use_r1b_resp) {
440 		cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
441 		/*
442 		 * A busy_timeout of zero means the host can decide to use
443 		 * whatever value it finds suitable.
444 		 */
445 		cmd.busy_timeout = timeout_ms;
446 	} else {
447 		cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
448 	}
449 
450 	if (index == EXT_CSD_SANITIZE_START)
451 		cmd.sanitize_busy = true;
452 
453 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
454 	if (err)
455 		return err;
456 
457 	/* No need to check card status in case of unblocking command */
458 	if (!use_busy_signal)
459 		return 0;
460 
461 	/*
462 	 * CRC errors shall only be ignored in cases were CMD13 is used to poll
463 	 * to detect busy completion.
464 	 */
465 	if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
466 		ignore_crc = false;
467 
468 	/* We have an unspecified cmd timeout, use the fallback value. */
469 	if (!timeout_ms)
470 		timeout_ms = MMC_OPS_TIMEOUT_MS;
471 
472 	/* Must check status to be sure of no errors. */
473 	timeout = jiffies + msecs_to_jiffies(timeout_ms);
474 	do {
475 		if (send_status) {
476 			err = __mmc_send_status(card, &status, ignore_crc);
477 			if (err)
478 				return err;
479 		}
480 		if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
481 			break;
482 		if (mmc_host_is_spi(host))
483 			break;
484 
485 		/*
486 		 * We are not allowed to issue a status command and the host
487 		 * does'nt support MMC_CAP_WAIT_WHILE_BUSY, then we can only
488 		 * rely on waiting for the stated timeout to be sufficient.
489 		 */
490 		if (!send_status) {
491 			mmc_delay(timeout_ms);
492 			return 0;
493 		}
494 
495 		/* Timeout if the device never leaves the program state. */
496 		if (time_after(jiffies, timeout)) {
497 			pr_err("%s: Card stuck in programming state! %s\n",
498 				mmc_hostname(host), __func__);
499 			return -ETIMEDOUT;
500 		}
501 	} while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
502 
503 	if (mmc_host_is_spi(host)) {
504 		if (status & R1_SPI_ILLEGAL_COMMAND)
505 			return -EBADMSG;
506 	} else {
507 		if (status & 0xFDFFA000)
508 			pr_warn("%s: unexpected status %#x after switch\n",
509 				mmc_hostname(host), status);
510 		if (status & R1_SWITCH_ERROR)
511 			return -EBADMSG;
512 	}
513 
514 	return 0;
515 }
516 EXPORT_SYMBOL_GPL(__mmc_switch);
517 
518 int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
519 		unsigned int timeout_ms)
520 {
521 	return __mmc_switch(card, set, index, value, timeout_ms, true, true,
522 				false);
523 }
524 EXPORT_SYMBOL_GPL(mmc_switch);
525 
526 static int
527 mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
528 		  u8 len)
529 {
530 	struct mmc_request mrq = {NULL};
531 	struct mmc_command cmd = {0};
532 	struct mmc_data data = {0};
533 	struct scatterlist sg;
534 	u8 *data_buf;
535 	u8 *test_buf;
536 	int i, err;
537 	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
538 	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
539 
540 	/* dma onto stack is unsafe/nonportable, but callers to this
541 	 * routine normally provide temporary on-stack buffers ...
542 	 */
543 	data_buf = kmalloc(len, GFP_KERNEL);
544 	if (!data_buf)
545 		return -ENOMEM;
546 
547 	if (len == 8)
548 		test_buf = testdata_8bit;
549 	else if (len == 4)
550 		test_buf = testdata_4bit;
551 	else {
552 		pr_err("%s: Invalid bus_width %d\n",
553 		       mmc_hostname(host), len);
554 		kfree(data_buf);
555 		return -EINVAL;
556 	}
557 
558 	if (opcode == MMC_BUS_TEST_W)
559 		memcpy(data_buf, test_buf, len);
560 
561 	mrq.cmd = &cmd;
562 	mrq.data = &data;
563 	cmd.opcode = opcode;
564 	cmd.arg = 0;
565 
566 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
567 	 * rely on callers to never use this with "native" calls for reading
568 	 * CSD or CID.  Native versions of those commands use the R2 type,
569 	 * not R1 plus a data block.
570 	 */
571 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
572 
573 	data.blksz = len;
574 	data.blocks = 1;
575 	if (opcode == MMC_BUS_TEST_R)
576 		data.flags = MMC_DATA_READ;
577 	else
578 		data.flags = MMC_DATA_WRITE;
579 
580 	data.sg = &sg;
581 	data.sg_len = 1;
582 	mmc_set_data_timeout(&data, card);
583 	sg_init_one(&sg, data_buf, len);
584 	mmc_wait_for_req(host, &mrq);
585 	err = 0;
586 	if (opcode == MMC_BUS_TEST_R) {
587 		for (i = 0; i < len / 4; i++)
588 			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
589 				err = -EIO;
590 				break;
591 			}
592 	}
593 	kfree(data_buf);
594 
595 	if (cmd.error)
596 		return cmd.error;
597 	if (data.error)
598 		return data.error;
599 
600 	return err;
601 }
602 
603 int mmc_bus_test(struct mmc_card *card, u8 bus_width)
604 {
605 	int err, width;
606 
607 	if (bus_width == MMC_BUS_WIDTH_8)
608 		width = 8;
609 	else if (bus_width == MMC_BUS_WIDTH_4)
610 		width = 4;
611 	else if (bus_width == MMC_BUS_WIDTH_1)
612 		return 0; /* no need for test */
613 	else
614 		return -EINVAL;
615 
616 	/*
617 	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
618 	 * is a problem.  This improves chances that the test will work.
619 	 */
620 	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
621 	err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
622 	return err;
623 }
624 
625 int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
626 {
627 	struct mmc_command cmd = {0};
628 	unsigned int opcode;
629 	int err;
630 
631 	if (!card->ext_csd.hpi) {
632 		pr_warning("%s: Card didn't support HPI command\n",
633 			   mmc_hostname(card->host));
634 		return -EINVAL;
635 	}
636 
637 	opcode = card->ext_csd.hpi_cmd;
638 	if (opcode == MMC_STOP_TRANSMISSION)
639 		cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
640 	else if (opcode == MMC_SEND_STATUS)
641 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
642 
643 	cmd.opcode = opcode;
644 	cmd.arg = card->rca << 16 | 1;
645 
646 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
647 	if (err) {
648 		pr_warn("%s: error %d interrupting operation. "
649 			"HPI command response %#x\n", mmc_hostname(card->host),
650 			err, cmd.resp[0]);
651 		return err;
652 	}
653 	if (status)
654 		*status = cmd.resp[0];
655 
656 	return 0;
657 }
658