xref: /openbmc/linux/drivers/mmc/core/mmc_ops.c (revision 972d5084)
12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2da7fbe58SPierre Ossman /*
370f10482SPierre Ossman  *  linux/drivers/mmc/core/mmc_ops.h
4da7fbe58SPierre Ossman  *
5da7fbe58SPierre Ossman  *  Copyright 2006-2007 Pierre Ossman
6da7fbe58SPierre Ossman  */
7da7fbe58SPierre Ossman 
85a0e3ad6STejun Heo #include <linux/slab.h>
93ef77af1SPaul Gortmaker #include <linux/export.h>
10da7fbe58SPierre Ossman #include <linux/types.h>
11da7fbe58SPierre Ossman #include <linux/scatterlist.h>
12da7fbe58SPierre Ossman 
13da7fbe58SPierre Ossman #include <linux/mmc/host.h>
14da7fbe58SPierre Ossman #include <linux/mmc/card.h>
15da7fbe58SPierre Ossman #include <linux/mmc/mmc.h>
16da7fbe58SPierre Ossman 
17da7fbe58SPierre Ossman #include "core.h"
181cf8f7e5SUlf Hansson #include "card.h"
19c6dbab9cSAdrian Hunter #include "host.h"
20da7fbe58SPierre Ossman #include "mmc_ops.h"
21da7fbe58SPierre Ossman 
2224ed3bd0SUlf Hansson #define MMC_BKOPS_TIMEOUT_MS		(120 * 1000) /* 120s */
2355c2b8b9SUlf Hansson #define MMC_SANITIZE_TIMEOUT_MS		(240 * 1000) /* 240s */
248fee476bSTrey Ramsay 
2504cdbbfaSUlf Hansson static const u8 tuning_blk_pattern_4bit[] = {
2604cdbbfaSUlf Hansson 	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
2704cdbbfaSUlf Hansson 	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
2804cdbbfaSUlf Hansson 	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
2904cdbbfaSUlf Hansson 	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
3004cdbbfaSUlf Hansson 	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
3104cdbbfaSUlf Hansson 	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
3204cdbbfaSUlf Hansson 	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
3304cdbbfaSUlf Hansson 	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
3404cdbbfaSUlf Hansson };
3504cdbbfaSUlf Hansson 
3604cdbbfaSUlf Hansson static const u8 tuning_blk_pattern_8bit[] = {
3704cdbbfaSUlf Hansson 	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
3804cdbbfaSUlf Hansson 	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
3904cdbbfaSUlf Hansson 	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
4004cdbbfaSUlf Hansson 	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
4104cdbbfaSUlf Hansson 	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
4204cdbbfaSUlf Hansson 	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
4304cdbbfaSUlf Hansson 	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
4404cdbbfaSUlf Hansson 	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
4504cdbbfaSUlf Hansson 	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
4604cdbbfaSUlf Hansson 	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
4704cdbbfaSUlf Hansson 	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
4804cdbbfaSUlf Hansson 	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
4904cdbbfaSUlf Hansson 	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
5004cdbbfaSUlf Hansson 	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
5104cdbbfaSUlf Hansson 	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
5204cdbbfaSUlf Hansson 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
5304cdbbfaSUlf Hansson };
5404cdbbfaSUlf Hansson 
5504f967adSUlf Hansson struct mmc_busy_data {
5604f967adSUlf Hansson 	struct mmc_card *card;
5704f967adSUlf Hansson 	bool retry_crc_err;
5804f967adSUlf Hansson 	enum mmc_busy_cmd busy_cmd;
5904f967adSUlf Hansson };
6004f967adSUlf Hansson 
612185bc2cSUlf Hansson int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
62a27fbf2fSSeungwon Jeon {
63a27fbf2fSSeungwon Jeon 	int err;
64c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
65a27fbf2fSSeungwon Jeon 
66a27fbf2fSSeungwon Jeon 	cmd.opcode = MMC_SEND_STATUS;
67a27fbf2fSSeungwon Jeon 	if (!mmc_host_is_spi(card->host))
68a27fbf2fSSeungwon Jeon 		cmd.arg = card->rca << 16;
69a27fbf2fSSeungwon Jeon 	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
70a27fbf2fSSeungwon Jeon 
712185bc2cSUlf Hansson 	err = mmc_wait_for_cmd(card->host, &cmd, retries);
72a27fbf2fSSeungwon Jeon 	if (err)
73a27fbf2fSSeungwon Jeon 		return err;
74a27fbf2fSSeungwon Jeon 
75a27fbf2fSSeungwon Jeon 	/* NOTE: callers are required to understand the difference
76a27fbf2fSSeungwon Jeon 	 * between "native" and SPI format status words!
77a27fbf2fSSeungwon Jeon 	 */
78a27fbf2fSSeungwon Jeon 	if (status)
79a27fbf2fSSeungwon Jeon 		*status = cmd.resp[0];
80a27fbf2fSSeungwon Jeon 
81a27fbf2fSSeungwon Jeon 	return 0;
82a27fbf2fSSeungwon Jeon }
832185bc2cSUlf Hansson EXPORT_SYMBOL_GPL(__mmc_send_status);
842185bc2cSUlf Hansson 
852185bc2cSUlf Hansson int mmc_send_status(struct mmc_card *card, u32 *status)
862185bc2cSUlf Hansson {
872185bc2cSUlf Hansson 	return __mmc_send_status(card, status, MMC_CMD_RETRIES);
882185bc2cSUlf Hansson }
891bee324aSLinus Walleij EXPORT_SYMBOL_GPL(mmc_send_status);
90a27fbf2fSSeungwon Jeon 
91da7fbe58SPierre Ossman static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
92da7fbe58SPierre Ossman {
93c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
94da7fbe58SPierre Ossman 
95da7fbe58SPierre Ossman 	cmd.opcode = MMC_SELECT_CARD;
96da7fbe58SPierre Ossman 
97da7fbe58SPierre Ossman 	if (card) {
98da7fbe58SPierre Ossman 		cmd.arg = card->rca << 16;
99da7fbe58SPierre Ossman 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
100da7fbe58SPierre Ossman 	} else {
101da7fbe58SPierre Ossman 		cmd.arg = 0;
102da7fbe58SPierre Ossman 		cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
103da7fbe58SPierre Ossman 	}
104da7fbe58SPierre Ossman 
1050899e741SMasahiro Yamada 	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
106da7fbe58SPierre Ossman }
107da7fbe58SPierre Ossman 
108da7fbe58SPierre Ossman int mmc_select_card(struct mmc_card *card)
109da7fbe58SPierre Ossman {
110da7fbe58SPierre Ossman 
111da7fbe58SPierre Ossman 	return _mmc_select_card(card->host, card);
112da7fbe58SPierre Ossman }
113da7fbe58SPierre Ossman 
114da7fbe58SPierre Ossman int mmc_deselect_cards(struct mmc_host *host)
115da7fbe58SPierre Ossman {
116da7fbe58SPierre Ossman 	return _mmc_select_card(host, NULL);
117da7fbe58SPierre Ossman }
118da7fbe58SPierre Ossman 
1193d705d14SSascha Hauer /*
1203d705d14SSascha Hauer  * Write the value specified in the device tree or board code into the optional
1213d705d14SSascha Hauer  * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
1223d705d14SSascha Hauer  * drive strength of the DAT and CMD outputs. The actual meaning of a given
1233d705d14SSascha Hauer  * value is hardware dependant.
1243d705d14SSascha Hauer  * The presence of the DSR register can be determined from the CSD register,
1253d705d14SSascha Hauer  * bit 76.
1263d705d14SSascha Hauer  */
1273d705d14SSascha Hauer int mmc_set_dsr(struct mmc_host *host)
1283d705d14SSascha Hauer {
129c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
1303d705d14SSascha Hauer 
1313d705d14SSascha Hauer 	cmd.opcode = MMC_SET_DSR;
1323d705d14SSascha Hauer 
1333d705d14SSascha Hauer 	cmd.arg = (host->dsr << 16) | 0xffff;
1343d705d14SSascha Hauer 	cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
1353d705d14SSascha Hauer 
1363d705d14SSascha Hauer 	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
1373d705d14SSascha Hauer }
1383d705d14SSascha Hauer 
139da7fbe58SPierre Ossman int mmc_go_idle(struct mmc_host *host)
140da7fbe58SPierre Ossman {
141da7fbe58SPierre Ossman 	int err;
142c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
143da7fbe58SPierre Ossman 
144af517150SDavid Brownell 	/*
145af517150SDavid Brownell 	 * Non-SPI hosts need to prevent chipselect going active during
146af517150SDavid Brownell 	 * GO_IDLE; that would put chips into SPI mode.  Remind them of
147af517150SDavid Brownell 	 * that in case of hardware that won't pull up DAT3/nCS otherwise.
148af517150SDavid Brownell 	 *
149af517150SDavid Brownell 	 * SPI hosts ignore ios.chip_select; it's managed according to
15025985edcSLucas De Marchi 	 * rules that must accommodate non-MMC slaves which this layer
151af517150SDavid Brownell 	 * won't even know about.
152af517150SDavid Brownell 	 */
153af517150SDavid Brownell 	if (!mmc_host_is_spi(host)) {
154da7fbe58SPierre Ossman 		mmc_set_chip_select(host, MMC_CS_HIGH);
155da7fbe58SPierre Ossman 		mmc_delay(1);
156af517150SDavid Brownell 	}
157da7fbe58SPierre Ossman 
158da7fbe58SPierre Ossman 	cmd.opcode = MMC_GO_IDLE_STATE;
159da7fbe58SPierre Ossman 	cmd.arg = 0;
160af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
161da7fbe58SPierre Ossman 
162da7fbe58SPierre Ossman 	err = mmc_wait_for_cmd(host, &cmd, 0);
163da7fbe58SPierre Ossman 
164da7fbe58SPierre Ossman 	mmc_delay(1);
165da7fbe58SPierre Ossman 
166af517150SDavid Brownell 	if (!mmc_host_is_spi(host)) {
167da7fbe58SPierre Ossman 		mmc_set_chip_select(host, MMC_CS_DONTCARE);
168da7fbe58SPierre Ossman 		mmc_delay(1);
169af517150SDavid Brownell 	}
170af517150SDavid Brownell 
171af517150SDavid Brownell 	host->use_spi_crc = 0;
172da7fbe58SPierre Ossman 
173da7fbe58SPierre Ossman 	return err;
174da7fbe58SPierre Ossman }
175da7fbe58SPierre Ossman 
176da7fbe58SPierre Ossman int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
177da7fbe58SPierre Ossman {
178c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
179da7fbe58SPierre Ossman 	int i, err = 0;
180da7fbe58SPierre Ossman 
181da7fbe58SPierre Ossman 	cmd.opcode = MMC_SEND_OP_COND;
182af517150SDavid Brownell 	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
183af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
184da7fbe58SPierre Ossman 
185da7fbe58SPierre Ossman 	for (i = 100; i; i--) {
186da7fbe58SPierre Ossman 		err = mmc_wait_for_cmd(host, &cmd, 0);
18717b0429dSPierre Ossman 		if (err)
188da7fbe58SPierre Ossman 			break;
189da7fbe58SPierre Ossman 
1904c94cb65SYoshihiro Shimoda 		/* wait until reset completes */
191af517150SDavid Brownell 		if (mmc_host_is_spi(host)) {
192af517150SDavid Brownell 			if (!(cmd.resp[0] & R1_SPI_IDLE))
193af517150SDavid Brownell 				break;
194af517150SDavid Brownell 		} else {
195af517150SDavid Brownell 			if (cmd.resp[0] & MMC_CARD_BUSY)
196af517150SDavid Brownell 				break;
197af517150SDavid Brownell 		}
198af517150SDavid Brownell 
19917b0429dSPierre Ossman 		err = -ETIMEDOUT;
200da7fbe58SPierre Ossman 
201da7fbe58SPierre Ossman 		mmc_delay(10);
2024c94cb65SYoshihiro Shimoda 
2034c94cb65SYoshihiro Shimoda 		/*
2044c94cb65SYoshihiro Shimoda 		 * According to eMMC specification v5.1 section 6.4.3, we
2054c94cb65SYoshihiro Shimoda 		 * should issue CMD1 repeatedly in the idle state until
2064c94cb65SYoshihiro Shimoda 		 * the eMMC is ready. Otherwise some eMMC devices seem to enter
2074c94cb65SYoshihiro Shimoda 		 * the inactive mode after mmc_init_card() issued CMD0 when
2084c94cb65SYoshihiro Shimoda 		 * the eMMC device is busy.
2094c94cb65SYoshihiro Shimoda 		 */
2104c94cb65SYoshihiro Shimoda 		if (!ocr && !mmc_host_is_spi(host))
2114c94cb65SYoshihiro Shimoda 			cmd.arg = cmd.resp[0] | BIT(30);
212da7fbe58SPierre Ossman 	}
213da7fbe58SPierre Ossman 
214af517150SDavid Brownell 	if (rocr && !mmc_host_is_spi(host))
215da7fbe58SPierre Ossman 		*rocr = cmd.resp[0];
216da7fbe58SPierre Ossman 
217da7fbe58SPierre Ossman 	return err;
218da7fbe58SPierre Ossman }
219da7fbe58SPierre Ossman 
220da7fbe58SPierre Ossman int mmc_set_relative_addr(struct mmc_card *card)
221da7fbe58SPierre Ossman {
222c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
223da7fbe58SPierre Ossman 
224da7fbe58SPierre Ossman 	cmd.opcode = MMC_SET_RELATIVE_ADDR;
225da7fbe58SPierre Ossman 	cmd.arg = card->rca << 16;
226da7fbe58SPierre Ossman 	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
227da7fbe58SPierre Ossman 
2280899e741SMasahiro Yamada 	return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
229da7fbe58SPierre Ossman }
230da7fbe58SPierre Ossman 
231af517150SDavid Brownell static int
232af517150SDavid Brownell mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
233da7fbe58SPierre Ossman {
234da7fbe58SPierre Ossman 	int err;
235c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
236da7fbe58SPierre Ossman 
237af517150SDavid Brownell 	cmd.opcode = opcode;
238af517150SDavid Brownell 	cmd.arg = arg;
239da7fbe58SPierre Ossman 	cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
240da7fbe58SPierre Ossman 
241af517150SDavid Brownell 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
24217b0429dSPierre Ossman 	if (err)
243da7fbe58SPierre Ossman 		return err;
244da7fbe58SPierre Ossman 
245af517150SDavid Brownell 	memcpy(cxd, cmd.resp, sizeof(u32) * 4);
246da7fbe58SPierre Ossman 
24717b0429dSPierre Ossman 	return 0;
248da7fbe58SPierre Ossman }
249da7fbe58SPierre Ossman 
2501a41313eSKyungsik Lee /*
2511a41313eSKyungsik Lee  * NOTE: void *buf, caller for the buf is required to use DMA-capable
2521a41313eSKyungsik Lee  * buffer or on-stack buffer (with some overhead in callee).
2531a41313eSKyungsik Lee  */
254cec18ad9SUlf Hansson int mmc_send_adtc_data(struct mmc_card *card, struct mmc_host *host, u32 opcode,
255cec18ad9SUlf Hansson 		       u32 args, void *buf, unsigned len)
256da7fbe58SPierre Ossman {
257c7836d15SMasahiro Yamada 	struct mmc_request mrq = {};
258c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
259c7836d15SMasahiro Yamada 	struct mmc_data data = {};
260da7fbe58SPierre Ossman 	struct scatterlist sg;
261da7fbe58SPierre Ossman 
262da7fbe58SPierre Ossman 	mrq.cmd = &cmd;
263da7fbe58SPierre Ossman 	mrq.data = &data;
264da7fbe58SPierre Ossman 
265af517150SDavid Brownell 	cmd.opcode = opcode;
266cec18ad9SUlf Hansson 	cmd.arg = args;
267da7fbe58SPierre Ossman 
268af517150SDavid Brownell 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
269af517150SDavid Brownell 	 * rely on callers to never use this with "native" calls for reading
270af517150SDavid Brownell 	 * CSD or CID.  Native versions of those commands use the R2 type,
271af517150SDavid Brownell 	 * not R1 plus a data block.
272af517150SDavid Brownell 	 */
273af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
274af517150SDavid Brownell 
275af517150SDavid Brownell 	data.blksz = len;
276da7fbe58SPierre Ossman 	data.blocks = 1;
277da7fbe58SPierre Ossman 	data.flags = MMC_DATA_READ;
278da7fbe58SPierre Ossman 	data.sg = &sg;
279da7fbe58SPierre Ossman 	data.sg_len = 1;
280da7fbe58SPierre Ossman 
281601ed60cSUlf Hansson 	sg_init_one(&sg, buf, len);
282da7fbe58SPierre Ossman 
283cda56ac2SAdrian Hunter 	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
2840d3e0460SMatthew Fleming 		/*
2850d3e0460SMatthew Fleming 		 * The spec states that CSR and CID accesses have a timeout
2860d3e0460SMatthew Fleming 		 * of 64 clock cycles.
2870d3e0460SMatthew Fleming 		 */
2880d3e0460SMatthew Fleming 		data.timeout_ns = 0;
2890d3e0460SMatthew Fleming 		data.timeout_clks = 64;
290cda56ac2SAdrian Hunter 	} else
291cda56ac2SAdrian Hunter 		mmc_set_data_timeout(&data, card);
292da7fbe58SPierre Ossman 
293af517150SDavid Brownell 	mmc_wait_for_req(host, &mrq);
294af517150SDavid Brownell 
29517b0429dSPierre Ossman 	if (cmd.error)
296da7fbe58SPierre Ossman 		return cmd.error;
29717b0429dSPierre Ossman 	if (data.error)
298da7fbe58SPierre Ossman 		return data.error;
299da7fbe58SPierre Ossman 
30017b0429dSPierre Ossman 	return 0;
301da7fbe58SPierre Ossman }
302da7fbe58SPierre Ossman 
303b53f0beeSYue Hu static int mmc_spi_send_cxd(struct mmc_host *host, u32 *cxd, u32 opcode)
304af517150SDavid Brownell {
30578e48073SPierre Ossman 	int ret, i;
306b53f0beeSYue Hu 	__be32 *cxd_tmp;
30778e48073SPierre Ossman 
308b53f0beeSYue Hu 	cxd_tmp = kzalloc(16, GFP_KERNEL);
309b53f0beeSYue Hu 	if (!cxd_tmp)
3101a41313eSKyungsik Lee 		return -ENOMEM;
3111a41313eSKyungsik Lee 
312cec18ad9SUlf Hansson 	ret = mmc_send_adtc_data(NULL, host, opcode, 0, cxd_tmp, 16);
31378e48073SPierre Ossman 	if (ret)
3141a41313eSKyungsik Lee 		goto err;
31578e48073SPierre Ossman 
31678e48073SPierre Ossman 	for (i = 0; i < 4; i++)
317b53f0beeSYue Hu 		cxd[i] = be32_to_cpu(cxd_tmp[i]);
31878e48073SPierre Ossman 
3191a41313eSKyungsik Lee err:
320b53f0beeSYue Hu 	kfree(cxd_tmp);
3211a41313eSKyungsik Lee 	return ret;
322af517150SDavid Brownell }
323af517150SDavid Brownell 
3240796e439SUlf Hansson int mmc_send_csd(struct mmc_card *card, u32 *csd)
3250796e439SUlf Hansson {
3260796e439SUlf Hansson 	if (mmc_host_is_spi(card->host))
327b53f0beeSYue Hu 		return mmc_spi_send_cxd(card->host, csd, MMC_SEND_CSD);
3280796e439SUlf Hansson 
3290796e439SUlf Hansson 	return mmc_send_cxd_native(card->host, card->rca << 16,	csd,
3300796e439SUlf Hansson 				MMC_SEND_CSD);
3310796e439SUlf Hansson }
3320796e439SUlf Hansson 
333a1473732SUlf Hansson int mmc_send_cid(struct mmc_host *host, u32 *cid)
334a1473732SUlf Hansson {
335a1473732SUlf Hansson 	if (mmc_host_is_spi(host))
336b53f0beeSYue Hu 		return mmc_spi_send_cxd(host, cid, MMC_SEND_CID);
337a1473732SUlf Hansson 
338c92e68d8SUlf Hansson 	return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
339a1473732SUlf Hansson }
340a1473732SUlf Hansson 
341e21aa519SUlf Hansson int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
342e21aa519SUlf Hansson {
343e21aa519SUlf Hansson 	int err;
344e21aa519SUlf Hansson 	u8 *ext_csd;
345e21aa519SUlf Hansson 
346e21aa519SUlf Hansson 	if (!card || !new_ext_csd)
347e21aa519SUlf Hansson 		return -EINVAL;
348e21aa519SUlf Hansson 
349e21aa519SUlf Hansson 	if (!mmc_can_ext_csd(card))
350e21aa519SUlf Hansson 		return -EOPNOTSUPP;
351e21aa519SUlf Hansson 
352e21aa519SUlf Hansson 	/*
353e21aa519SUlf Hansson 	 * As the ext_csd is so large and mostly unused, we don't store the
354e21aa519SUlf Hansson 	 * raw block in mmc_card.
355e21aa519SUlf Hansson 	 */
35622b78700SUlf Hansson 	ext_csd = kzalloc(512, GFP_KERNEL);
357e21aa519SUlf Hansson 	if (!ext_csd)
358e21aa519SUlf Hansson 		return -ENOMEM;
359e21aa519SUlf Hansson 
360cec18ad9SUlf Hansson 	err = mmc_send_adtc_data(card, card->host, MMC_SEND_EXT_CSD, 0, ext_csd,
3612fc91e8bSUlf Hansson 				512);
362e21aa519SUlf Hansson 	if (err)
363e21aa519SUlf Hansson 		kfree(ext_csd);
364e21aa519SUlf Hansson 	else
365e21aa519SUlf Hansson 		*new_ext_csd = ext_csd;
366e21aa519SUlf Hansson 
367e21aa519SUlf Hansson 	return err;
368e21aa519SUlf Hansson }
369e21aa519SUlf Hansson EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
370e21aa519SUlf Hansson 
371af517150SDavid Brownell int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
372af517150SDavid Brownell {
373c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
374af517150SDavid Brownell 	int err;
375af517150SDavid Brownell 
376af517150SDavid Brownell 	cmd.opcode = MMC_SPI_READ_OCR;
377af517150SDavid Brownell 	cmd.arg = highcap ? (1 << 30) : 0;
378af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R3;
379af517150SDavid Brownell 
380af517150SDavid Brownell 	err = mmc_wait_for_cmd(host, &cmd, 0);
381af517150SDavid Brownell 
382af517150SDavid Brownell 	*ocrp = cmd.resp[1];
383af517150SDavid Brownell 	return err;
384af517150SDavid Brownell }
385af517150SDavid Brownell 
386af517150SDavid Brownell int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
387af517150SDavid Brownell {
388c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
389af517150SDavid Brownell 	int err;
390af517150SDavid Brownell 
391af517150SDavid Brownell 	cmd.opcode = MMC_SPI_CRC_ON_OFF;
392af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1;
393af517150SDavid Brownell 	cmd.arg = use_crc;
394af517150SDavid Brownell 
395af517150SDavid Brownell 	err = mmc_wait_for_cmd(host, &cmd, 0);
396af517150SDavid Brownell 	if (!err)
397af517150SDavid Brownell 		host->use_spi_crc = use_crc;
398af517150SDavid Brownell 	return err;
399af517150SDavid Brownell }
400af517150SDavid Brownell 
40120348d19SUlf Hansson static int mmc_switch_status_error(struct mmc_host *host, u32 status)
402ed16f58dSAdrian Hunter {
403ed16f58dSAdrian Hunter 	if (mmc_host_is_spi(host)) {
404ed16f58dSAdrian Hunter 		if (status & R1_SPI_ILLEGAL_COMMAND)
405ed16f58dSAdrian Hunter 			return -EBADMSG;
406ed16f58dSAdrian Hunter 	} else {
407a94a7483SShawn Lin 		if (R1_STATUS(status))
408ed16f58dSAdrian Hunter 			pr_warn("%s: unexpected status %#x after switch\n",
409ed16f58dSAdrian Hunter 				mmc_hostname(host), status);
410ed16f58dSAdrian Hunter 		if (status & R1_SWITCH_ERROR)
411ed16f58dSAdrian Hunter 			return -EBADMSG;
412ed16f58dSAdrian Hunter 	}
413ed16f58dSAdrian Hunter 	return 0;
414ed16f58dSAdrian Hunter }
415ed16f58dSAdrian Hunter 
41620348d19SUlf Hansson /* Caller must hold re-tuning */
41760db8a47SUlf Hansson int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
41820348d19SUlf Hansson {
41920348d19SUlf Hansson 	u32 status;
42020348d19SUlf Hansson 	int err;
42120348d19SUlf Hansson 
42220348d19SUlf Hansson 	err = mmc_send_status(card, &status);
423ef3d2322SAdrian Hunter 	if (!crc_err_fatal && err == -EILSEQ)
424ef3d2322SAdrian Hunter 		return 0;
42520348d19SUlf Hansson 	if (err)
42620348d19SUlf Hansson 		return err;
42720348d19SUlf Hansson 
42820348d19SUlf Hansson 	return mmc_switch_status_error(card->host, status);
42920348d19SUlf Hansson }
43020348d19SUlf Hansson 
43104f967adSUlf Hansson static int mmc_busy_cb(void *cb_data, bool *busy)
4326972096aSUlf Hansson {
43304f967adSUlf Hansson 	struct mmc_busy_data *data = cb_data;
43404f967adSUlf Hansson 	struct mmc_host *host = data->card->host;
4356972096aSUlf Hansson 	u32 status = 0;
4366972096aSUlf Hansson 	int err;
4376972096aSUlf Hansson 
438*972d5084SUlf Hansson 	if (data->busy_cmd != MMC_BUSY_IO && host->ops->card_busy) {
4396972096aSUlf Hansson 		*busy = host->ops->card_busy(host);
4406972096aSUlf Hansson 		return 0;
4416972096aSUlf Hansson 	}
4426972096aSUlf Hansson 
44304f967adSUlf Hansson 	err = mmc_send_status(data->card, &status);
44404f967adSUlf Hansson 	if (data->retry_crc_err && err == -EILSEQ) {
4456972096aSUlf Hansson 		*busy = true;
4466972096aSUlf Hansson 		return 0;
4476972096aSUlf Hansson 	}
4486972096aSUlf Hansson 	if (err)
4496972096aSUlf Hansson 		return err;
4506972096aSUlf Hansson 
45104f967adSUlf Hansson 	switch (data->busy_cmd) {
4520d84c3e6SUlf Hansson 	case MMC_BUSY_CMD6:
45304f967adSUlf Hansson 		err = mmc_switch_status_error(host, status);
4540d84c3e6SUlf Hansson 		break;
4550d84c3e6SUlf Hansson 	case MMC_BUSY_ERASE:
4560d84c3e6SUlf Hansson 		err = R1_STATUS(status) ? -EIO : 0;
4570d84c3e6SUlf Hansson 		break;
458490ff95fSUlf Hansson 	case MMC_BUSY_HPI:
459130206a6SUlf Hansson 	case MMC_BUSY_EXTR_SINGLE:
460*972d5084SUlf Hansson 	case MMC_BUSY_IO:
461490ff95fSUlf Hansson 		break;
4620d84c3e6SUlf Hansson 	default:
4630d84c3e6SUlf Hansson 		err = -EINVAL;
4640d84c3e6SUlf Hansson 	}
4650d84c3e6SUlf Hansson 
4666972096aSUlf Hansson 	if (err)
4676972096aSUlf Hansson 		return err;
4686972096aSUlf Hansson 
4692a1c7cdaSUlf Hansson 	*busy = !mmc_ready_for_data(status);
4706972096aSUlf Hansson 	return 0;
4716972096aSUlf Hansson }
4726972096aSUlf Hansson 
47304f967adSUlf Hansson int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
47404f967adSUlf Hansson 			int (*busy_cb)(void *cb_data, bool *busy),
47504f967adSUlf Hansson 			void *cb_data)
476716bdb89SUlf Hansson {
477716bdb89SUlf Hansson 	struct mmc_host *host = card->host;
478716bdb89SUlf Hansson 	int err;
479716bdb89SUlf Hansson 	unsigned long timeout;
480d46a24a9SUlf Hansson 	unsigned int udelay = 32, udelay_max = 32768;
481716bdb89SUlf Hansson 	bool expired = false;
482716bdb89SUlf Hansson 	bool busy = false;
483716bdb89SUlf Hansson 
484716bdb89SUlf Hansson 	timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
485716bdb89SUlf Hansson 	do {
486716bdb89SUlf Hansson 		/*
48770562644SUlf Hansson 		 * Due to the possibility of being preempted while polling,
48870562644SUlf Hansson 		 * check the expiration time first.
489716bdb89SUlf Hansson 		 */
490716bdb89SUlf Hansson 		expired = time_after(jiffies, timeout);
49170562644SUlf Hansson 
49204f967adSUlf Hansson 		err = (*busy_cb)(cb_data, &busy);
4935ec32f84SUlf Hansson 		if (err)
4945ec32f84SUlf Hansson 			return err;
495716bdb89SUlf Hansson 
49670562644SUlf Hansson 		/* Timeout if the device still remains busy. */
49770562644SUlf Hansson 		if (expired && busy) {
49870562644SUlf Hansson 			pr_err("%s: Card stuck being busy! %s\n",
499716bdb89SUlf Hansson 				mmc_hostname(host), __func__);
500716bdb89SUlf Hansson 			return -ETIMEDOUT;
501716bdb89SUlf Hansson 		}
502d46a24a9SUlf Hansson 
503d46a24a9SUlf Hansson 		/* Throttle the polling rate to avoid hogging the CPU. */
504d46a24a9SUlf Hansson 		if (busy) {
505d46a24a9SUlf Hansson 			usleep_range(udelay, udelay * 2);
506d46a24a9SUlf Hansson 			if (udelay < udelay_max)
507d46a24a9SUlf Hansson 				udelay *= 2;
508d46a24a9SUlf Hansson 		}
50970562644SUlf Hansson 	} while (busy);
510716bdb89SUlf Hansson 
5115ec32f84SUlf Hansson 	return 0;
512716bdb89SUlf Hansson }
513716bdb89SUlf Hansson 
5140d84c3e6SUlf Hansson int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
51504f967adSUlf Hansson 		      bool retry_crc_err, enum mmc_busy_cmd busy_cmd)
5160d84c3e6SUlf Hansson {
51704f967adSUlf Hansson 	struct mmc_busy_data cb_data;
51804f967adSUlf Hansson 
51904f967adSUlf Hansson 	cb_data.card = card;
52004f967adSUlf Hansson 	cb_data.retry_crc_err = retry_crc_err;
52104f967adSUlf Hansson 	cb_data.busy_cmd = busy_cmd;
52204f967adSUlf Hansson 
52304f967adSUlf Hansson 	return __mmc_poll_for_busy(card, timeout_ms, &mmc_busy_cb, &cb_data);
5240d84c3e6SUlf Hansson }
525*972d5084SUlf Hansson EXPORT_SYMBOL_GPL(mmc_poll_for_busy);
5260d84c3e6SUlf Hansson 
527e62f1e0bSUlf Hansson bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
528e62f1e0bSUlf Hansson 			  unsigned int timeout_ms)
529e62f1e0bSUlf Hansson {
530e62f1e0bSUlf Hansson 	/*
531e62f1e0bSUlf Hansson 	 * If the max_busy_timeout of the host is specified, make sure it's
532e62f1e0bSUlf Hansson 	 * enough to fit the used timeout_ms. In case it's not, let's instruct
533e62f1e0bSUlf Hansson 	 * the host to avoid HW busy detection, by converting to a R1 response
534e62f1e0bSUlf Hansson 	 * instead of a R1B. Note, some hosts requires R1B, which also means
535e62f1e0bSUlf Hansson 	 * they are on their own when it comes to deal with the busy timeout.
536e62f1e0bSUlf Hansson 	 */
537e62f1e0bSUlf Hansson 	if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
538e62f1e0bSUlf Hansson 	    (timeout_ms > host->max_busy_timeout)) {
539e62f1e0bSUlf Hansson 		cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1 | MMC_RSP_R1;
540e62f1e0bSUlf Hansson 		return false;
541e62f1e0bSUlf Hansson 	}
542e62f1e0bSUlf Hansson 
543e62f1e0bSUlf Hansson 	cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1B | MMC_RSP_R1B;
544e62f1e0bSUlf Hansson 	cmd->busy_timeout = timeout_ms;
545e62f1e0bSUlf Hansson 	return true;
546e62f1e0bSUlf Hansson }
547e62f1e0bSUlf Hansson 
548d3a8d95dSAndrei Warkentin /**
549950d56acSJaehoon Chung  *	__mmc_switch - modify EXT_CSD register
550d3a8d95dSAndrei Warkentin  *	@card: the MMC card associated with the data transfer
551d3a8d95dSAndrei Warkentin  *	@set: cmd set values
552d3a8d95dSAndrei Warkentin  *	@index: EXT_CSD register index
553d3a8d95dSAndrei Warkentin  *	@value: value to program into EXT_CSD register
554d3a8d95dSAndrei Warkentin  *	@timeout_ms: timeout (ms) for operation performed by register write,
555d3a8d95dSAndrei Warkentin  *                   timeout of zero implies maximum possible timeout
556aa33ce3cSUlf Hansson  *	@timing: new timing to change to
557878e200bSUlf Hansson  *	@send_status: send status cmd to poll for busy
558625228faSUlf Hansson  *	@retry_crc_err: retry when CRC errors when polling with CMD13 for busy
5595e52a168SBean Huo  *	@retries: number of retries
560d3a8d95dSAndrei Warkentin  *
561d3a8d95dSAndrei Warkentin  *	Modifies the EXT_CSD register for selected card.
562d3a8d95dSAndrei Warkentin  */
563950d56acSJaehoon Chung int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
564aa33ce3cSUlf Hansson 		unsigned int timeout_ms, unsigned char timing,
5655e52a168SBean Huo 		bool send_status, bool retry_crc_err, unsigned int retries)
566da7fbe58SPierre Ossman {
567636bd13cSUlf Hansson 	struct mmc_host *host = card->host;
568da7fbe58SPierre Ossman 	int err;
569c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
570e62f1e0bSUlf Hansson 	bool use_r1b_resp;
571aa33ce3cSUlf Hansson 	unsigned char old_timing = host->ios.timing;
572b9ec2616SUlf Hansson 
573c6dbab9cSAdrian Hunter 	mmc_retune_hold(host);
574c6dbab9cSAdrian Hunter 
575533a6cfeSUlf Hansson 	if (!timeout_ms) {
576533a6cfeSUlf Hansson 		pr_warn("%s: unspecified timeout for CMD6 - use generic\n",
577533a6cfeSUlf Hansson 			mmc_hostname(host));
578533a6cfeSUlf Hansson 		timeout_ms = card->ext_csd.generic_cmd6_time;
579533a6cfeSUlf Hansson 	}
580533a6cfeSUlf Hansson 
581da7fbe58SPierre Ossman 	cmd.opcode = MMC_SWITCH;
582da7fbe58SPierre Ossman 	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
583da7fbe58SPierre Ossman 		  (index << 16) |
584da7fbe58SPierre Ossman 		  (value << 8) |
585da7fbe58SPierre Ossman 		  set;
586e62f1e0bSUlf Hansson 	use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, timeout_ms);
587b9ec2616SUlf Hansson 
5885e52a168SBean Huo 	err = mmc_wait_for_cmd(host, &cmd, retries);
58917b0429dSPierre Ossman 	if (err)
590c6dbab9cSAdrian Hunter 		goto out;
591da7fbe58SPierre Ossman 
592cb26ce06SUlf Hansson 	/*If SPI or used HW busy detection above, then we don't need to poll. */
593cb26ce06SUlf Hansson 	if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
594ee6ff743SUlf Hansson 		mmc_host_is_spi(host))
595aa33ce3cSUlf Hansson 		goto out_tim;
596a27fbf2fSSeungwon Jeon 
5971e0b069bSUlf Hansson 	/*
5981e0b069bSUlf Hansson 	 * If the host doesn't support HW polling via the ->card_busy() ops and
5991e0b069bSUlf Hansson 	 * when it's not allowed to poll by using CMD13, then we need to rely on
6001e0b069bSUlf Hansson 	 * waiting the stated timeout to be sufficient.
6011e0b069bSUlf Hansson 	 */
6021e0b069bSUlf Hansson 	if (!send_status && !host->ops->card_busy) {
6031e0b069bSUlf Hansson 		mmc_delay(timeout_ms);
6041e0b069bSUlf Hansson 		goto out_tim;
6051e0b069bSUlf Hansson 	}
6061e0b069bSUlf Hansson 
607716bdb89SUlf Hansson 	/* Let's try to poll to find out when the command is completed. */
60804f967adSUlf Hansson 	err = mmc_poll_for_busy(card, timeout_ms, retry_crc_err, MMC_BUSY_CMD6);
609ee6ff743SUlf Hansson 	if (err)
610ee6ff743SUlf Hansson 		goto out;
611aa33ce3cSUlf Hansson 
612aa33ce3cSUlf Hansson out_tim:
613ee6ff743SUlf Hansson 	/* Switch to new timing before check switch status. */
614ee6ff743SUlf Hansson 	if (timing)
615ee6ff743SUlf Hansson 		mmc_set_timing(host, timing);
616ee6ff743SUlf Hansson 
617ee6ff743SUlf Hansson 	if (send_status) {
61860db8a47SUlf Hansson 		err = mmc_switch_status(card, true);
619aa33ce3cSUlf Hansson 		if (err && timing)
620aa33ce3cSUlf Hansson 			mmc_set_timing(host, old_timing);
621ee6ff743SUlf Hansson 	}
622c6dbab9cSAdrian Hunter out:
623c6dbab9cSAdrian Hunter 	mmc_retune_release(host);
624ef0b27d4SAdrian Hunter 
625c6dbab9cSAdrian Hunter 	return err;
626da7fbe58SPierre Ossman }
627950d56acSJaehoon Chung 
628950d56acSJaehoon Chung int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
629950d56acSJaehoon Chung 		unsigned int timeout_ms)
630950d56acSJaehoon Chung {
631aa33ce3cSUlf Hansson 	return __mmc_switch(card, set, index, value, timeout_ms, 0,
6325e52a168SBean Huo 			    true, false, MMC_CMD_RETRIES);
633950d56acSJaehoon Chung }
634d3a8d95dSAndrei Warkentin EXPORT_SYMBOL_GPL(mmc_switch);
635da7fbe58SPierre Ossman 
6369979dbe5SChaotian Jing int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
637996903deSMinda Chen {
638c7836d15SMasahiro Yamada 	struct mmc_request mrq = {};
639c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
640c7836d15SMasahiro Yamada 	struct mmc_data data = {};
641996903deSMinda Chen 	struct scatterlist sg;
642fe5afb13SUlf Hansson 	struct mmc_ios *ios = &host->ios;
643996903deSMinda Chen 	const u8 *tuning_block_pattern;
644996903deSMinda Chen 	int size, err = 0;
645996903deSMinda Chen 	u8 *data_buf;
646996903deSMinda Chen 
647996903deSMinda Chen 	if (ios->bus_width == MMC_BUS_WIDTH_8) {
648996903deSMinda Chen 		tuning_block_pattern = tuning_blk_pattern_8bit;
649996903deSMinda Chen 		size = sizeof(tuning_blk_pattern_8bit);
650996903deSMinda Chen 	} else if (ios->bus_width == MMC_BUS_WIDTH_4) {
651996903deSMinda Chen 		tuning_block_pattern = tuning_blk_pattern_4bit;
652996903deSMinda Chen 		size = sizeof(tuning_blk_pattern_4bit);
653996903deSMinda Chen 	} else
654996903deSMinda Chen 		return -EINVAL;
655996903deSMinda Chen 
656996903deSMinda Chen 	data_buf = kzalloc(size, GFP_KERNEL);
657996903deSMinda Chen 	if (!data_buf)
658996903deSMinda Chen 		return -ENOMEM;
659996903deSMinda Chen 
660996903deSMinda Chen 	mrq.cmd = &cmd;
661996903deSMinda Chen 	mrq.data = &data;
662996903deSMinda Chen 
663996903deSMinda Chen 	cmd.opcode = opcode;
664996903deSMinda Chen 	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
665996903deSMinda Chen 
666996903deSMinda Chen 	data.blksz = size;
667996903deSMinda Chen 	data.blocks = 1;
668996903deSMinda Chen 	data.flags = MMC_DATA_READ;
669996903deSMinda Chen 
670996903deSMinda Chen 	/*
671996903deSMinda Chen 	 * According to the tuning specs, Tuning process
672996903deSMinda Chen 	 * is normally shorter 40 executions of CMD19,
673996903deSMinda Chen 	 * and timeout value should be shorter than 150 ms
674996903deSMinda Chen 	 */
675996903deSMinda Chen 	data.timeout_ns = 150 * NSEC_PER_MSEC;
676996903deSMinda Chen 
677996903deSMinda Chen 	data.sg = &sg;
678996903deSMinda Chen 	data.sg_len = 1;
679996903deSMinda Chen 	sg_init_one(&sg, data_buf, size);
680996903deSMinda Chen 
681fe5afb13SUlf Hansson 	mmc_wait_for_req(host, &mrq);
682996903deSMinda Chen 
6839979dbe5SChaotian Jing 	if (cmd_error)
6849979dbe5SChaotian Jing 		*cmd_error = cmd.error;
6859979dbe5SChaotian Jing 
686996903deSMinda Chen 	if (cmd.error) {
687996903deSMinda Chen 		err = cmd.error;
688996903deSMinda Chen 		goto out;
689996903deSMinda Chen 	}
690996903deSMinda Chen 
691996903deSMinda Chen 	if (data.error) {
692996903deSMinda Chen 		err = data.error;
693996903deSMinda Chen 		goto out;
694996903deSMinda Chen 	}
695996903deSMinda Chen 
696996903deSMinda Chen 	if (memcmp(data_buf, tuning_block_pattern, size))
697996903deSMinda Chen 		err = -EIO;
698996903deSMinda Chen 
699996903deSMinda Chen out:
700996903deSMinda Chen 	kfree(data_buf);
701996903deSMinda Chen 	return err;
702996903deSMinda Chen }
703996903deSMinda Chen EXPORT_SYMBOL_GPL(mmc_send_tuning);
704996903deSMinda Chen 
70521adc2e4SWolfram Sang int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode)
706e711f030SAdrian Hunter {
707c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
708e711f030SAdrian Hunter 
709e711f030SAdrian Hunter 	/*
710e711f030SAdrian Hunter 	 * eMMC specification specifies that CMD12 can be used to stop a tuning
711e711f030SAdrian Hunter 	 * command, but SD specification does not, so do nothing unless it is
712e711f030SAdrian Hunter 	 * eMMC.
713e711f030SAdrian Hunter 	 */
714e711f030SAdrian Hunter 	if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
715e711f030SAdrian Hunter 		return 0;
716e711f030SAdrian Hunter 
717e711f030SAdrian Hunter 	cmd.opcode = MMC_STOP_TRANSMISSION;
718e711f030SAdrian Hunter 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
719e711f030SAdrian Hunter 
720e711f030SAdrian Hunter 	/*
721e711f030SAdrian Hunter 	 * For drivers that override R1 to R1b, set an arbitrary timeout based
722e711f030SAdrian Hunter 	 * on the tuning timeout i.e. 150ms.
723e711f030SAdrian Hunter 	 */
724e711f030SAdrian Hunter 	cmd.busy_timeout = 150;
725e711f030SAdrian Hunter 
726e711f030SAdrian Hunter 	return mmc_wait_for_cmd(host, &cmd, 0);
727e711f030SAdrian Hunter }
72821adc2e4SWolfram Sang EXPORT_SYMBOL_GPL(mmc_send_abort_tuning);
729e711f030SAdrian Hunter 
73022113efdSAries Lee static int
73122113efdSAries Lee mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
73222113efdSAries Lee 		  u8 len)
73322113efdSAries Lee {
734c7836d15SMasahiro Yamada 	struct mmc_request mrq = {};
735c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
736c7836d15SMasahiro Yamada 	struct mmc_data data = {};
73722113efdSAries Lee 	struct scatterlist sg;
73822113efdSAries Lee 	u8 *data_buf;
73922113efdSAries Lee 	u8 *test_buf;
74022113efdSAries Lee 	int i, err;
74122113efdSAries Lee 	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
74222113efdSAries Lee 	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
74322113efdSAries Lee 
74422113efdSAries Lee 	/* dma onto stack is unsafe/nonportable, but callers to this
74522113efdSAries Lee 	 * routine normally provide temporary on-stack buffers ...
74622113efdSAries Lee 	 */
74722113efdSAries Lee 	data_buf = kmalloc(len, GFP_KERNEL);
74822113efdSAries Lee 	if (!data_buf)
74922113efdSAries Lee 		return -ENOMEM;
75022113efdSAries Lee 
75122113efdSAries Lee 	if (len == 8)
75222113efdSAries Lee 		test_buf = testdata_8bit;
75322113efdSAries Lee 	else if (len == 4)
75422113efdSAries Lee 		test_buf = testdata_4bit;
75522113efdSAries Lee 	else {
756a3c76eb9SGirish K S 		pr_err("%s: Invalid bus_width %d\n",
75722113efdSAries Lee 		       mmc_hostname(host), len);
75822113efdSAries Lee 		kfree(data_buf);
75922113efdSAries Lee 		return -EINVAL;
76022113efdSAries Lee 	}
76122113efdSAries Lee 
76222113efdSAries Lee 	if (opcode == MMC_BUS_TEST_W)
76322113efdSAries Lee 		memcpy(data_buf, test_buf, len);
76422113efdSAries Lee 
76522113efdSAries Lee 	mrq.cmd = &cmd;
76622113efdSAries Lee 	mrq.data = &data;
76722113efdSAries Lee 	cmd.opcode = opcode;
76822113efdSAries Lee 	cmd.arg = 0;
76922113efdSAries Lee 
77022113efdSAries Lee 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
77122113efdSAries Lee 	 * rely on callers to never use this with "native" calls for reading
77222113efdSAries Lee 	 * CSD or CID.  Native versions of those commands use the R2 type,
77322113efdSAries Lee 	 * not R1 plus a data block.
77422113efdSAries Lee 	 */
77522113efdSAries Lee 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
77622113efdSAries Lee 
77722113efdSAries Lee 	data.blksz = len;
77822113efdSAries Lee 	data.blocks = 1;
77922113efdSAries Lee 	if (opcode == MMC_BUS_TEST_R)
78022113efdSAries Lee 		data.flags = MMC_DATA_READ;
78122113efdSAries Lee 	else
78222113efdSAries Lee 		data.flags = MMC_DATA_WRITE;
78322113efdSAries Lee 
78422113efdSAries Lee 	data.sg = &sg;
78522113efdSAries Lee 	data.sg_len = 1;
78684532e33SMinjian Wu 	mmc_set_data_timeout(&data, card);
78722113efdSAries Lee 	sg_init_one(&sg, data_buf, len);
78822113efdSAries Lee 	mmc_wait_for_req(host, &mrq);
78922113efdSAries Lee 	err = 0;
79022113efdSAries Lee 	if (opcode == MMC_BUS_TEST_R) {
79122113efdSAries Lee 		for (i = 0; i < len / 4; i++)
79222113efdSAries Lee 			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
79322113efdSAries Lee 				err = -EIO;
79422113efdSAries Lee 				break;
79522113efdSAries Lee 			}
79622113efdSAries Lee 	}
79722113efdSAries Lee 	kfree(data_buf);
79822113efdSAries Lee 
79922113efdSAries Lee 	if (cmd.error)
80022113efdSAries Lee 		return cmd.error;
80122113efdSAries Lee 	if (data.error)
80222113efdSAries Lee 		return data.error;
80322113efdSAries Lee 
80422113efdSAries Lee 	return err;
80522113efdSAries Lee }
80622113efdSAries Lee 
80722113efdSAries Lee int mmc_bus_test(struct mmc_card *card, u8 bus_width)
80822113efdSAries Lee {
8090899e741SMasahiro Yamada 	int width;
81022113efdSAries Lee 
81122113efdSAries Lee 	if (bus_width == MMC_BUS_WIDTH_8)
81222113efdSAries Lee 		width = 8;
81322113efdSAries Lee 	else if (bus_width == MMC_BUS_WIDTH_4)
81422113efdSAries Lee 		width = 4;
81522113efdSAries Lee 	else if (bus_width == MMC_BUS_WIDTH_1)
81622113efdSAries Lee 		return 0; /* no need for test */
81722113efdSAries Lee 	else
81822113efdSAries Lee 		return -EINVAL;
81922113efdSAries Lee 
82022113efdSAries Lee 	/*
82122113efdSAries Lee 	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
82222113efdSAries Lee 	 * is a problem.  This improves chances that the test will work.
82322113efdSAries Lee 	 */
82422113efdSAries Lee 	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
8250899e741SMasahiro Yamada 	return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
82622113efdSAries Lee }
827eb0d8f13SJaehoon Chung 
8289f94d047SUlf Hansson static int mmc_send_hpi_cmd(struct mmc_card *card)
829eb0d8f13SJaehoon Chung {
830490ff95fSUlf Hansson 	unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time;
831892bf100SUlf Hansson 	struct mmc_host *host = card->host;
832c7bedef0SUlf Hansson 	bool use_r1b_resp = false;
833c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
834eb0d8f13SJaehoon Chung 	int err;
835eb0d8f13SJaehoon Chung 
836892bf100SUlf Hansson 	cmd.opcode = card->ext_csd.hpi_cmd;
837eb0d8f13SJaehoon Chung 	cmd.arg = card->rca << 16 | 1;
838892bf100SUlf Hansson 	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
839c7bedef0SUlf Hansson 
840c7bedef0SUlf Hansson 	if (cmd.opcode == MMC_STOP_TRANSMISSION)
841c7bedef0SUlf Hansson 		use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd,
842c7bedef0SUlf Hansson 						    busy_timeout_ms);
843892bf100SUlf Hansson 
844892bf100SUlf Hansson 	err = mmc_wait_for_cmd(host, &cmd, 0);
845eb0d8f13SJaehoon Chung 	if (err) {
846892bf100SUlf Hansson 		pr_warn("%s: HPI error %d. Command response %#x\n",
847892bf100SUlf Hansson 			mmc_hostname(host), err, cmd.resp[0]);
848eb0d8f13SJaehoon Chung 		return err;
849eb0d8f13SJaehoon Chung 	}
850eb0d8f13SJaehoon Chung 
851892bf100SUlf Hansson 	/* No need to poll when using HW busy detection. */
852892bf100SUlf Hansson 	if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp)
853892bf100SUlf Hansson 		return 0;
854892bf100SUlf Hansson 
855490ff95fSUlf Hansson 	/* Let's poll to find out when the HPI request completes. */
85604f967adSUlf Hansson 	return mmc_poll_for_busy(card, busy_timeout_ms, false, MMC_BUSY_HPI);
857eb0d8f13SJaehoon Chung }
858148bcab2SUlf Hansson 
8590f2c0512SUlf Hansson /**
8600f2c0512SUlf Hansson  *	mmc_interrupt_hpi - Issue for High priority Interrupt
8610f2c0512SUlf Hansson  *	@card: the MMC card associated with the HPI transfer
8620f2c0512SUlf Hansson  *
8630f2c0512SUlf Hansson  *	Issued High Priority Interrupt, and check for card status
8640f2c0512SUlf Hansson  *	until out-of prg-state.
8650f2c0512SUlf Hansson  */
86644aebc16SJason Yan static int mmc_interrupt_hpi(struct mmc_card *card)
8670f2c0512SUlf Hansson {
8680f2c0512SUlf Hansson 	int err;
8690f2c0512SUlf Hansson 	u32 status;
8700f2c0512SUlf Hansson 
8710f2c0512SUlf Hansson 	if (!card->ext_csd.hpi_en) {
8720f2c0512SUlf Hansson 		pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
8730f2c0512SUlf Hansson 		return 1;
8740f2c0512SUlf Hansson 	}
8750f2c0512SUlf Hansson 
8760f2c0512SUlf Hansson 	err = mmc_send_status(card, &status);
8770f2c0512SUlf Hansson 	if (err) {
8780f2c0512SUlf Hansson 		pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
8790f2c0512SUlf Hansson 		goto out;
8800f2c0512SUlf Hansson 	}
8810f2c0512SUlf Hansson 
8820f2c0512SUlf Hansson 	switch (R1_CURRENT_STATE(status)) {
8830f2c0512SUlf Hansson 	case R1_STATE_IDLE:
8840f2c0512SUlf Hansson 	case R1_STATE_READY:
8850f2c0512SUlf Hansson 	case R1_STATE_STBY:
8860f2c0512SUlf Hansson 	case R1_STATE_TRAN:
8870f2c0512SUlf Hansson 		/*
8880f2c0512SUlf Hansson 		 * In idle and transfer states, HPI is not needed and the caller
8890f2c0512SUlf Hansson 		 * can issue the next intended command immediately
8900f2c0512SUlf Hansson 		 */
8910f2c0512SUlf Hansson 		goto out;
8920f2c0512SUlf Hansson 	case R1_STATE_PRG:
8930f2c0512SUlf Hansson 		break;
8940f2c0512SUlf Hansson 	default:
8950f2c0512SUlf Hansson 		/* In all other states, it's illegal to issue HPI */
8960f2c0512SUlf Hansson 		pr_debug("%s: HPI cannot be sent. Card state=%d\n",
8970f2c0512SUlf Hansson 			mmc_hostname(card->host), R1_CURRENT_STATE(status));
8980f2c0512SUlf Hansson 		err = -EINVAL;
8990f2c0512SUlf Hansson 		goto out;
9000f2c0512SUlf Hansson 	}
9010f2c0512SUlf Hansson 
9029f94d047SUlf Hansson 	err = mmc_send_hpi_cmd(card);
9030f2c0512SUlf Hansson out:
9040f2c0512SUlf Hansson 	return err;
9050f2c0512SUlf Hansson }
9060f2c0512SUlf Hansson 
907148bcab2SUlf Hansson int mmc_can_ext_csd(struct mmc_card *card)
908148bcab2SUlf Hansson {
909148bcab2SUlf Hansson 	return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
910148bcab2SUlf Hansson }
911b658af71SAdrian Hunter 
9121cf8f7e5SUlf Hansson static int mmc_read_bkops_status(struct mmc_card *card)
9131cf8f7e5SUlf Hansson {
9141cf8f7e5SUlf Hansson 	int err;
9151cf8f7e5SUlf Hansson 	u8 *ext_csd;
9161cf8f7e5SUlf Hansson 
9171cf8f7e5SUlf Hansson 	err = mmc_get_ext_csd(card, &ext_csd);
9181cf8f7e5SUlf Hansson 	if (err)
9191cf8f7e5SUlf Hansson 		return err;
9201cf8f7e5SUlf Hansson 
9211cf8f7e5SUlf Hansson 	card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
9221cf8f7e5SUlf Hansson 	card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
9231cf8f7e5SUlf Hansson 	kfree(ext_csd);
9241cf8f7e5SUlf Hansson 	return 0;
9251cf8f7e5SUlf Hansson }
9261cf8f7e5SUlf Hansson 
9271cf8f7e5SUlf Hansson /**
9280c204979SUlf Hansson  *	mmc_run_bkops - Run BKOPS for supported cards
9290c204979SUlf Hansson  *	@card: MMC card to run BKOPS for
9301cf8f7e5SUlf Hansson  *
9310c204979SUlf Hansson  *	Run background operations synchronously for cards having manual BKOPS
9320c204979SUlf Hansson  *	enabled and in case it reports urgent BKOPS level.
9331cf8f7e5SUlf Hansson */
9340c204979SUlf Hansson void mmc_run_bkops(struct mmc_card *card)
9351cf8f7e5SUlf Hansson {
9361cf8f7e5SUlf Hansson 	int err;
9371cf8f7e5SUlf Hansson 
9380c204979SUlf Hansson 	if (!card->ext_csd.man_bkops_en)
9391cf8f7e5SUlf Hansson 		return;
9401cf8f7e5SUlf Hansson 
9411cf8f7e5SUlf Hansson 	err = mmc_read_bkops_status(card);
9421cf8f7e5SUlf Hansson 	if (err) {
9431cf8f7e5SUlf Hansson 		pr_err("%s: Failed to read bkops status: %d\n",
9441cf8f7e5SUlf Hansson 		       mmc_hostname(card->host), err);
9451cf8f7e5SUlf Hansson 		return;
9461cf8f7e5SUlf Hansson 	}
9471cf8f7e5SUlf Hansson 
9480c204979SUlf Hansson 	if (!card->ext_csd.raw_bkops_status ||
9490c204979SUlf Hansson 	    card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2)
9501cf8f7e5SUlf Hansson 		return;
9511cf8f7e5SUlf Hansson 
9521cf8f7e5SUlf Hansson 	mmc_retune_hold(card->host);
9531cf8f7e5SUlf Hansson 
9540c204979SUlf Hansson 	/*
9550c204979SUlf Hansson 	 * For urgent BKOPS status, LEVEL_2 and higher, let's execute
9560c204979SUlf Hansson 	 * synchronously. Future wise, we may consider to start BKOPS, for less
9570c204979SUlf Hansson 	 * urgent levels by using an asynchronous background task, when idle.
9580c204979SUlf Hansson 	 */
9590c204979SUlf Hansson 	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
96024ed3bd0SUlf Hansson 			 EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS);
9610c204979SUlf Hansson 	if (err)
9621cf8f7e5SUlf Hansson 		pr_warn("%s: Error %d starting bkops\n",
9631cf8f7e5SUlf Hansson 			mmc_hostname(card->host), err);
9641cf8f7e5SUlf Hansson 
9651cf8f7e5SUlf Hansson 	mmc_retune_release(card->host);
9661cf8f7e5SUlf Hansson }
9670c204979SUlf Hansson EXPORT_SYMBOL(mmc_run_bkops);
9681cf8f7e5SUlf Hansson 
969b658af71SAdrian Hunter static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
970b658af71SAdrian Hunter {
971b658af71SAdrian Hunter 	u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
972b658af71SAdrian Hunter 	int err;
973b658af71SAdrian Hunter 
974b658af71SAdrian Hunter 	if (!card->ext_csd.cmdq_support)
975b658af71SAdrian Hunter 		return -EOPNOTSUPP;
976b658af71SAdrian Hunter 
977b658af71SAdrian Hunter 	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
978b658af71SAdrian Hunter 			 val, card->ext_csd.generic_cmd6_time);
979b658af71SAdrian Hunter 	if (!err)
980b658af71SAdrian Hunter 		card->ext_csd.cmdq_en = enable;
981b658af71SAdrian Hunter 
982b658af71SAdrian Hunter 	return err;
983b658af71SAdrian Hunter }
984b658af71SAdrian Hunter 
985b658af71SAdrian Hunter int mmc_cmdq_enable(struct mmc_card *card)
986b658af71SAdrian Hunter {
987b658af71SAdrian Hunter 	return mmc_cmdq_switch(card, true);
988b658af71SAdrian Hunter }
989b658af71SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
990b658af71SAdrian Hunter 
991b658af71SAdrian Hunter int mmc_cmdq_disable(struct mmc_card *card)
992b658af71SAdrian Hunter {
993b658af71SAdrian Hunter 	return mmc_cmdq_switch(card, false);
994b658af71SAdrian Hunter }
995b658af71SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_cmdq_disable);
99655c2b8b9SUlf Hansson 
9974f111d04SBean Huo int mmc_sanitize(struct mmc_card *card, unsigned int timeout_ms)
99855c2b8b9SUlf Hansson {
99955c2b8b9SUlf Hansson 	struct mmc_host *host = card->host;
100055c2b8b9SUlf Hansson 	int err;
100155c2b8b9SUlf Hansson 
100255c2b8b9SUlf Hansson 	if (!mmc_can_sanitize(card)) {
100355c2b8b9SUlf Hansson 		pr_warn("%s: Sanitize not supported\n", mmc_hostname(host));
100455c2b8b9SUlf Hansson 		return -EOPNOTSUPP;
100555c2b8b9SUlf Hansson 	}
100655c2b8b9SUlf Hansson 
10074f111d04SBean Huo 	if (!timeout_ms)
10084f111d04SBean Huo 		timeout_ms = MMC_SANITIZE_TIMEOUT_MS;
10094f111d04SBean Huo 
101055c2b8b9SUlf Hansson 	pr_debug("%s: Sanitize in progress...\n", mmc_hostname(host));
101155c2b8b9SUlf Hansson 
101255c2b8b9SUlf Hansson 	mmc_retune_hold(host);
101355c2b8b9SUlf Hansson 
10145b96247cSBean Huo 	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START,
10155b96247cSBean Huo 			   1, timeout_ms, 0, true, false, 0);
101655c2b8b9SUlf Hansson 	if (err)
101755c2b8b9SUlf Hansson 		pr_err("%s: Sanitize failed err=%d\n", mmc_hostname(host), err);
101855c2b8b9SUlf Hansson 
101955c2b8b9SUlf Hansson 	/*
102055c2b8b9SUlf Hansson 	 * If the sanitize operation timed out, the card is probably still busy
102155c2b8b9SUlf Hansson 	 * in the R1_STATE_PRG. Rather than continue to wait, let's try to abort
102255c2b8b9SUlf Hansson 	 * it with a HPI command to get back into R1_STATE_TRAN.
102355c2b8b9SUlf Hansson 	 */
102455c2b8b9SUlf Hansson 	if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
102555c2b8b9SUlf Hansson 		pr_warn("%s: Sanitize aborted\n", mmc_hostname(host));
102655c2b8b9SUlf Hansson 
102755c2b8b9SUlf Hansson 	mmc_retune_release(host);
102855c2b8b9SUlf Hansson 
102955c2b8b9SUlf Hansson 	pr_debug("%s: Sanitize completed\n", mmc_hostname(host));
103055c2b8b9SUlf Hansson 	return err;
103155c2b8b9SUlf Hansson }
103255c2b8b9SUlf Hansson EXPORT_SYMBOL_GPL(mmc_sanitize);
1033