xref: /openbmc/linux/drivers/mmc/core/mmc_ops.c (revision 04f967ad)
12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2da7fbe58SPierre Ossman /*
370f10482SPierre Ossman  *  linux/drivers/mmc/core/mmc_ops.h
4da7fbe58SPierre Ossman  *
5da7fbe58SPierre Ossman  *  Copyright 2006-2007 Pierre Ossman
6da7fbe58SPierre Ossman  */
7da7fbe58SPierre Ossman 
85a0e3ad6STejun Heo #include <linux/slab.h>
93ef77af1SPaul Gortmaker #include <linux/export.h>
10da7fbe58SPierre Ossman #include <linux/types.h>
11da7fbe58SPierre Ossman #include <linux/scatterlist.h>
12da7fbe58SPierre Ossman 
13da7fbe58SPierre Ossman #include <linux/mmc/host.h>
14da7fbe58SPierre Ossman #include <linux/mmc/card.h>
15da7fbe58SPierre Ossman #include <linux/mmc/mmc.h>
16da7fbe58SPierre Ossman 
17da7fbe58SPierre Ossman #include "core.h"
181cf8f7e5SUlf Hansson #include "card.h"
19c6dbab9cSAdrian Hunter #include "host.h"
20da7fbe58SPierre Ossman #include "mmc_ops.h"
21da7fbe58SPierre Ossman 
2224ed3bd0SUlf Hansson #define MMC_BKOPS_TIMEOUT_MS		(120 * 1000) /* 120s */
2324ed3bd0SUlf Hansson #define MMC_CACHE_FLUSH_TIMEOUT_MS	(30 * 1000) /* 30s */
2455c2b8b9SUlf Hansson #define MMC_SANITIZE_TIMEOUT_MS		(240 * 1000) /* 240s */
258fee476bSTrey Ramsay 
2604cdbbfaSUlf Hansson static const u8 tuning_blk_pattern_4bit[] = {
2704cdbbfaSUlf Hansson 	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
2804cdbbfaSUlf Hansson 	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
2904cdbbfaSUlf Hansson 	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
3004cdbbfaSUlf Hansson 	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
3104cdbbfaSUlf Hansson 	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
3204cdbbfaSUlf Hansson 	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
3304cdbbfaSUlf Hansson 	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
3404cdbbfaSUlf Hansson 	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
3504cdbbfaSUlf Hansson };
3604cdbbfaSUlf Hansson 
3704cdbbfaSUlf Hansson static const u8 tuning_blk_pattern_8bit[] = {
3804cdbbfaSUlf Hansson 	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
3904cdbbfaSUlf Hansson 	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
4004cdbbfaSUlf Hansson 	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
4104cdbbfaSUlf Hansson 	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
4204cdbbfaSUlf Hansson 	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
4304cdbbfaSUlf Hansson 	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
4404cdbbfaSUlf Hansson 	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
4504cdbbfaSUlf Hansson 	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
4604cdbbfaSUlf Hansson 	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
4704cdbbfaSUlf Hansson 	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
4804cdbbfaSUlf Hansson 	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
4904cdbbfaSUlf Hansson 	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
5004cdbbfaSUlf Hansson 	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
5104cdbbfaSUlf Hansson 	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
5204cdbbfaSUlf Hansson 	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
5304cdbbfaSUlf Hansson 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
5404cdbbfaSUlf Hansson };
5504cdbbfaSUlf Hansson 
56*04f967adSUlf Hansson struct mmc_busy_data {
57*04f967adSUlf Hansson 	struct mmc_card *card;
58*04f967adSUlf Hansson 	bool retry_crc_err;
59*04f967adSUlf Hansson 	enum mmc_busy_cmd busy_cmd;
60*04f967adSUlf Hansson };
61*04f967adSUlf Hansson 
622185bc2cSUlf Hansson int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
63a27fbf2fSSeungwon Jeon {
64a27fbf2fSSeungwon Jeon 	int err;
65c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
66a27fbf2fSSeungwon Jeon 
67a27fbf2fSSeungwon Jeon 	cmd.opcode = MMC_SEND_STATUS;
68a27fbf2fSSeungwon Jeon 	if (!mmc_host_is_spi(card->host))
69a27fbf2fSSeungwon Jeon 		cmd.arg = card->rca << 16;
70a27fbf2fSSeungwon Jeon 	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
71a27fbf2fSSeungwon Jeon 
722185bc2cSUlf Hansson 	err = mmc_wait_for_cmd(card->host, &cmd, retries);
73a27fbf2fSSeungwon Jeon 	if (err)
74a27fbf2fSSeungwon Jeon 		return err;
75a27fbf2fSSeungwon Jeon 
76a27fbf2fSSeungwon Jeon 	/* NOTE: callers are required to understand the difference
77a27fbf2fSSeungwon Jeon 	 * between "native" and SPI format status words!
78a27fbf2fSSeungwon Jeon 	 */
79a27fbf2fSSeungwon Jeon 	if (status)
80a27fbf2fSSeungwon Jeon 		*status = cmd.resp[0];
81a27fbf2fSSeungwon Jeon 
82a27fbf2fSSeungwon Jeon 	return 0;
83a27fbf2fSSeungwon Jeon }
842185bc2cSUlf Hansson EXPORT_SYMBOL_GPL(__mmc_send_status);
852185bc2cSUlf Hansson 
862185bc2cSUlf Hansson int mmc_send_status(struct mmc_card *card, u32 *status)
872185bc2cSUlf Hansson {
882185bc2cSUlf Hansson 	return __mmc_send_status(card, status, MMC_CMD_RETRIES);
892185bc2cSUlf Hansson }
901bee324aSLinus Walleij EXPORT_SYMBOL_GPL(mmc_send_status);
91a27fbf2fSSeungwon Jeon 
92da7fbe58SPierre Ossman static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
93da7fbe58SPierre Ossman {
94c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
95da7fbe58SPierre Ossman 
96da7fbe58SPierre Ossman 	cmd.opcode = MMC_SELECT_CARD;
97da7fbe58SPierre Ossman 
98da7fbe58SPierre Ossman 	if (card) {
99da7fbe58SPierre Ossman 		cmd.arg = card->rca << 16;
100da7fbe58SPierre Ossman 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
101da7fbe58SPierre Ossman 	} else {
102da7fbe58SPierre Ossman 		cmd.arg = 0;
103da7fbe58SPierre Ossman 		cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
104da7fbe58SPierre Ossman 	}
105da7fbe58SPierre Ossman 
1060899e741SMasahiro Yamada 	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
107da7fbe58SPierre Ossman }
108da7fbe58SPierre Ossman 
109da7fbe58SPierre Ossman int mmc_select_card(struct mmc_card *card)
110da7fbe58SPierre Ossman {
111da7fbe58SPierre Ossman 
112da7fbe58SPierre Ossman 	return _mmc_select_card(card->host, card);
113da7fbe58SPierre Ossman }
114da7fbe58SPierre Ossman 
115da7fbe58SPierre Ossman int mmc_deselect_cards(struct mmc_host *host)
116da7fbe58SPierre Ossman {
117da7fbe58SPierre Ossman 	return _mmc_select_card(host, NULL);
118da7fbe58SPierre Ossman }
119da7fbe58SPierre Ossman 
1203d705d14SSascha Hauer /*
1213d705d14SSascha Hauer  * Write the value specified in the device tree or board code into the optional
1223d705d14SSascha Hauer  * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
1233d705d14SSascha Hauer  * drive strength of the DAT and CMD outputs. The actual meaning of a given
1243d705d14SSascha Hauer  * value is hardware dependant.
1253d705d14SSascha Hauer  * The presence of the DSR register can be determined from the CSD register,
1263d705d14SSascha Hauer  * bit 76.
1273d705d14SSascha Hauer  */
1283d705d14SSascha Hauer int mmc_set_dsr(struct mmc_host *host)
1293d705d14SSascha Hauer {
130c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
1313d705d14SSascha Hauer 
1323d705d14SSascha Hauer 	cmd.opcode = MMC_SET_DSR;
1333d705d14SSascha Hauer 
1343d705d14SSascha Hauer 	cmd.arg = (host->dsr << 16) | 0xffff;
1353d705d14SSascha Hauer 	cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
1363d705d14SSascha Hauer 
1373d705d14SSascha Hauer 	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
1383d705d14SSascha Hauer }
1393d705d14SSascha Hauer 
140da7fbe58SPierre Ossman int mmc_go_idle(struct mmc_host *host)
141da7fbe58SPierre Ossman {
142da7fbe58SPierre Ossman 	int err;
143c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
144da7fbe58SPierre Ossman 
145af517150SDavid Brownell 	/*
146af517150SDavid Brownell 	 * Non-SPI hosts need to prevent chipselect going active during
147af517150SDavid Brownell 	 * GO_IDLE; that would put chips into SPI mode.  Remind them of
148af517150SDavid Brownell 	 * that in case of hardware that won't pull up DAT3/nCS otherwise.
149af517150SDavid Brownell 	 *
150af517150SDavid Brownell 	 * SPI hosts ignore ios.chip_select; it's managed according to
15125985edcSLucas De Marchi 	 * rules that must accommodate non-MMC slaves which this layer
152af517150SDavid Brownell 	 * won't even know about.
153af517150SDavid Brownell 	 */
154af517150SDavid Brownell 	if (!mmc_host_is_spi(host)) {
155da7fbe58SPierre Ossman 		mmc_set_chip_select(host, MMC_CS_HIGH);
156da7fbe58SPierre Ossman 		mmc_delay(1);
157af517150SDavid Brownell 	}
158da7fbe58SPierre Ossman 
159da7fbe58SPierre Ossman 	cmd.opcode = MMC_GO_IDLE_STATE;
160da7fbe58SPierre Ossman 	cmd.arg = 0;
161af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
162da7fbe58SPierre Ossman 
163da7fbe58SPierre Ossman 	err = mmc_wait_for_cmd(host, &cmd, 0);
164da7fbe58SPierre Ossman 
165da7fbe58SPierre Ossman 	mmc_delay(1);
166da7fbe58SPierre Ossman 
167af517150SDavid Brownell 	if (!mmc_host_is_spi(host)) {
168da7fbe58SPierre Ossman 		mmc_set_chip_select(host, MMC_CS_DONTCARE);
169da7fbe58SPierre Ossman 		mmc_delay(1);
170af517150SDavid Brownell 	}
171af517150SDavid Brownell 
172af517150SDavid Brownell 	host->use_spi_crc = 0;
173da7fbe58SPierre Ossman 
174da7fbe58SPierre Ossman 	return err;
175da7fbe58SPierre Ossman }
176da7fbe58SPierre Ossman 
177da7fbe58SPierre Ossman int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
178da7fbe58SPierre Ossman {
179c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
180da7fbe58SPierre Ossman 	int i, err = 0;
181da7fbe58SPierre Ossman 
182da7fbe58SPierre Ossman 	cmd.opcode = MMC_SEND_OP_COND;
183af517150SDavid Brownell 	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
184af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
185da7fbe58SPierre Ossman 
186da7fbe58SPierre Ossman 	for (i = 100; i; i--) {
187da7fbe58SPierre Ossman 		err = mmc_wait_for_cmd(host, &cmd, 0);
18817b0429dSPierre Ossman 		if (err)
189da7fbe58SPierre Ossman 			break;
190da7fbe58SPierre Ossman 
1914c94cb65SYoshihiro Shimoda 		/* wait until reset completes */
192af517150SDavid Brownell 		if (mmc_host_is_spi(host)) {
193af517150SDavid Brownell 			if (!(cmd.resp[0] & R1_SPI_IDLE))
194af517150SDavid Brownell 				break;
195af517150SDavid Brownell 		} else {
196af517150SDavid Brownell 			if (cmd.resp[0] & MMC_CARD_BUSY)
197af517150SDavid Brownell 				break;
198af517150SDavid Brownell 		}
199af517150SDavid Brownell 
20017b0429dSPierre Ossman 		err = -ETIMEDOUT;
201da7fbe58SPierre Ossman 
202da7fbe58SPierre Ossman 		mmc_delay(10);
2034c94cb65SYoshihiro Shimoda 
2044c94cb65SYoshihiro Shimoda 		/*
2054c94cb65SYoshihiro Shimoda 		 * According to eMMC specification v5.1 section 6.4.3, we
2064c94cb65SYoshihiro Shimoda 		 * should issue CMD1 repeatedly in the idle state until
2074c94cb65SYoshihiro Shimoda 		 * the eMMC is ready. Otherwise some eMMC devices seem to enter
2084c94cb65SYoshihiro Shimoda 		 * the inactive mode after mmc_init_card() issued CMD0 when
2094c94cb65SYoshihiro Shimoda 		 * the eMMC device is busy.
2104c94cb65SYoshihiro Shimoda 		 */
2114c94cb65SYoshihiro Shimoda 		if (!ocr && !mmc_host_is_spi(host))
2124c94cb65SYoshihiro Shimoda 			cmd.arg = cmd.resp[0] | BIT(30);
213da7fbe58SPierre Ossman 	}
214da7fbe58SPierre Ossman 
215af517150SDavid Brownell 	if (rocr && !mmc_host_is_spi(host))
216da7fbe58SPierre Ossman 		*rocr = cmd.resp[0];
217da7fbe58SPierre Ossman 
218da7fbe58SPierre Ossman 	return err;
219da7fbe58SPierre Ossman }
220da7fbe58SPierre Ossman 
221da7fbe58SPierre Ossman int mmc_set_relative_addr(struct mmc_card *card)
222da7fbe58SPierre Ossman {
223c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
224da7fbe58SPierre Ossman 
225da7fbe58SPierre Ossman 	cmd.opcode = MMC_SET_RELATIVE_ADDR;
226da7fbe58SPierre Ossman 	cmd.arg = card->rca << 16;
227da7fbe58SPierre Ossman 	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
228da7fbe58SPierre Ossman 
2290899e741SMasahiro Yamada 	return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
230da7fbe58SPierre Ossman }
231da7fbe58SPierre Ossman 
232af517150SDavid Brownell static int
233af517150SDavid Brownell mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
234da7fbe58SPierre Ossman {
235da7fbe58SPierre Ossman 	int err;
236c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
237da7fbe58SPierre Ossman 
238af517150SDavid Brownell 	cmd.opcode = opcode;
239af517150SDavid Brownell 	cmd.arg = arg;
240da7fbe58SPierre Ossman 	cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
241da7fbe58SPierre Ossman 
242af517150SDavid Brownell 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
24317b0429dSPierre Ossman 	if (err)
244da7fbe58SPierre Ossman 		return err;
245da7fbe58SPierre Ossman 
246af517150SDavid Brownell 	memcpy(cxd, cmd.resp, sizeof(u32) * 4);
247da7fbe58SPierre Ossman 
24817b0429dSPierre Ossman 	return 0;
249da7fbe58SPierre Ossman }
250da7fbe58SPierre Ossman 
2511a41313eSKyungsik Lee /*
2521a41313eSKyungsik Lee  * NOTE: void *buf, caller for the buf is required to use DMA-capable
2531a41313eSKyungsik Lee  * buffer or on-stack buffer (with some overhead in callee).
2541a41313eSKyungsik Lee  */
255af517150SDavid Brownell static int
256af517150SDavid Brownell mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
257af517150SDavid Brownell 		u32 opcode, void *buf, unsigned len)
258da7fbe58SPierre Ossman {
259c7836d15SMasahiro Yamada 	struct mmc_request mrq = {};
260c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
261c7836d15SMasahiro Yamada 	struct mmc_data data = {};
262da7fbe58SPierre Ossman 	struct scatterlist sg;
263da7fbe58SPierre Ossman 
264da7fbe58SPierre Ossman 	mrq.cmd = &cmd;
265da7fbe58SPierre Ossman 	mrq.data = &data;
266da7fbe58SPierre Ossman 
267af517150SDavid Brownell 	cmd.opcode = opcode;
268da7fbe58SPierre Ossman 	cmd.arg = 0;
269da7fbe58SPierre Ossman 
270af517150SDavid Brownell 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
271af517150SDavid Brownell 	 * rely on callers to never use this with "native" calls for reading
272af517150SDavid Brownell 	 * CSD or CID.  Native versions of those commands use the R2 type,
273af517150SDavid Brownell 	 * not R1 plus a data block.
274af517150SDavid Brownell 	 */
275af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
276af517150SDavid Brownell 
277af517150SDavid Brownell 	data.blksz = len;
278da7fbe58SPierre Ossman 	data.blocks = 1;
279da7fbe58SPierre Ossman 	data.flags = MMC_DATA_READ;
280da7fbe58SPierre Ossman 	data.sg = &sg;
281da7fbe58SPierre Ossman 	data.sg_len = 1;
282da7fbe58SPierre Ossman 
283601ed60cSUlf Hansson 	sg_init_one(&sg, buf, len);
284da7fbe58SPierre Ossman 
285cda56ac2SAdrian Hunter 	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
2860d3e0460SMatthew Fleming 		/*
2870d3e0460SMatthew Fleming 		 * The spec states that CSR and CID accesses have a timeout
2880d3e0460SMatthew Fleming 		 * of 64 clock cycles.
2890d3e0460SMatthew Fleming 		 */
2900d3e0460SMatthew Fleming 		data.timeout_ns = 0;
2910d3e0460SMatthew Fleming 		data.timeout_clks = 64;
292cda56ac2SAdrian Hunter 	} else
293cda56ac2SAdrian Hunter 		mmc_set_data_timeout(&data, card);
294da7fbe58SPierre Ossman 
295af517150SDavid Brownell 	mmc_wait_for_req(host, &mrq);
296af517150SDavid Brownell 
29717b0429dSPierre Ossman 	if (cmd.error)
298da7fbe58SPierre Ossman 		return cmd.error;
29917b0429dSPierre Ossman 	if (data.error)
300da7fbe58SPierre Ossman 		return data.error;
301da7fbe58SPierre Ossman 
30217b0429dSPierre Ossman 	return 0;
303da7fbe58SPierre Ossman }
304da7fbe58SPierre Ossman 
305b53f0beeSYue Hu static int mmc_spi_send_cxd(struct mmc_host *host, u32 *cxd, u32 opcode)
306af517150SDavid Brownell {
30778e48073SPierre Ossman 	int ret, i;
308b53f0beeSYue Hu 	__be32 *cxd_tmp;
30978e48073SPierre Ossman 
310b53f0beeSYue Hu 	cxd_tmp = kzalloc(16, GFP_KERNEL);
311b53f0beeSYue Hu 	if (!cxd_tmp)
3121a41313eSKyungsik Lee 		return -ENOMEM;
3131a41313eSKyungsik Lee 
314b53f0beeSYue Hu 	ret = mmc_send_cxd_data(NULL, host, opcode, cxd_tmp, 16);
31578e48073SPierre Ossman 	if (ret)
3161a41313eSKyungsik Lee 		goto err;
31778e48073SPierre Ossman 
31878e48073SPierre Ossman 	for (i = 0; i < 4; i++)
319b53f0beeSYue Hu 		cxd[i] = be32_to_cpu(cxd_tmp[i]);
32078e48073SPierre Ossman 
3211a41313eSKyungsik Lee err:
322b53f0beeSYue Hu 	kfree(cxd_tmp);
3231a41313eSKyungsik Lee 	return ret;
324af517150SDavid Brownell }
325af517150SDavid Brownell 
3260796e439SUlf Hansson int mmc_send_csd(struct mmc_card *card, u32 *csd)
3270796e439SUlf Hansson {
3280796e439SUlf Hansson 	if (mmc_host_is_spi(card->host))
329b53f0beeSYue Hu 		return mmc_spi_send_cxd(card->host, csd, MMC_SEND_CSD);
3300796e439SUlf Hansson 
3310796e439SUlf Hansson 	return mmc_send_cxd_native(card->host, card->rca << 16,	csd,
3320796e439SUlf Hansson 				MMC_SEND_CSD);
3330796e439SUlf Hansson }
3340796e439SUlf Hansson 
335a1473732SUlf Hansson int mmc_send_cid(struct mmc_host *host, u32 *cid)
336a1473732SUlf Hansson {
337a1473732SUlf Hansson 	if (mmc_host_is_spi(host))
338b53f0beeSYue Hu 		return mmc_spi_send_cxd(host, cid, MMC_SEND_CID);
339a1473732SUlf Hansson 
340c92e68d8SUlf Hansson 	return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
341a1473732SUlf Hansson }
342a1473732SUlf Hansson 
343e21aa519SUlf Hansson int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
344e21aa519SUlf Hansson {
345e21aa519SUlf Hansson 	int err;
346e21aa519SUlf Hansson 	u8 *ext_csd;
347e21aa519SUlf Hansson 
348e21aa519SUlf Hansson 	if (!card || !new_ext_csd)
349e21aa519SUlf Hansson 		return -EINVAL;
350e21aa519SUlf Hansson 
351e21aa519SUlf Hansson 	if (!mmc_can_ext_csd(card))
352e21aa519SUlf Hansson 		return -EOPNOTSUPP;
353e21aa519SUlf Hansson 
354e21aa519SUlf Hansson 	/*
355e21aa519SUlf Hansson 	 * As the ext_csd is so large and mostly unused, we don't store the
356e21aa519SUlf Hansson 	 * raw block in mmc_card.
357e21aa519SUlf Hansson 	 */
35822b78700SUlf Hansson 	ext_csd = kzalloc(512, GFP_KERNEL);
359e21aa519SUlf Hansson 	if (!ext_csd)
360e21aa519SUlf Hansson 		return -ENOMEM;
361e21aa519SUlf Hansson 
3622fc91e8bSUlf Hansson 	err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
3632fc91e8bSUlf Hansson 				512);
364e21aa519SUlf Hansson 	if (err)
365e21aa519SUlf Hansson 		kfree(ext_csd);
366e21aa519SUlf Hansson 	else
367e21aa519SUlf Hansson 		*new_ext_csd = ext_csd;
368e21aa519SUlf Hansson 
369e21aa519SUlf Hansson 	return err;
370e21aa519SUlf Hansson }
371e21aa519SUlf Hansson EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
372e21aa519SUlf Hansson 
373af517150SDavid Brownell int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
374af517150SDavid Brownell {
375c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
376af517150SDavid Brownell 	int err;
377af517150SDavid Brownell 
378af517150SDavid Brownell 	cmd.opcode = MMC_SPI_READ_OCR;
379af517150SDavid Brownell 	cmd.arg = highcap ? (1 << 30) : 0;
380af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R3;
381af517150SDavid Brownell 
382af517150SDavid Brownell 	err = mmc_wait_for_cmd(host, &cmd, 0);
383af517150SDavid Brownell 
384af517150SDavid Brownell 	*ocrp = cmd.resp[1];
385af517150SDavid Brownell 	return err;
386af517150SDavid Brownell }
387af517150SDavid Brownell 
388af517150SDavid Brownell int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
389af517150SDavid Brownell {
390c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
391af517150SDavid Brownell 	int err;
392af517150SDavid Brownell 
393af517150SDavid Brownell 	cmd.opcode = MMC_SPI_CRC_ON_OFF;
394af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1;
395af517150SDavid Brownell 	cmd.arg = use_crc;
396af517150SDavid Brownell 
397af517150SDavid Brownell 	err = mmc_wait_for_cmd(host, &cmd, 0);
398af517150SDavid Brownell 	if (!err)
399af517150SDavid Brownell 		host->use_spi_crc = use_crc;
400af517150SDavid Brownell 	return err;
401af517150SDavid Brownell }
402af517150SDavid Brownell 
40320348d19SUlf Hansson static int mmc_switch_status_error(struct mmc_host *host, u32 status)
404ed16f58dSAdrian Hunter {
405ed16f58dSAdrian Hunter 	if (mmc_host_is_spi(host)) {
406ed16f58dSAdrian Hunter 		if (status & R1_SPI_ILLEGAL_COMMAND)
407ed16f58dSAdrian Hunter 			return -EBADMSG;
408ed16f58dSAdrian Hunter 	} else {
409a94a7483SShawn Lin 		if (R1_STATUS(status))
410ed16f58dSAdrian Hunter 			pr_warn("%s: unexpected status %#x after switch\n",
411ed16f58dSAdrian Hunter 				mmc_hostname(host), status);
412ed16f58dSAdrian Hunter 		if (status & R1_SWITCH_ERROR)
413ed16f58dSAdrian Hunter 			return -EBADMSG;
414ed16f58dSAdrian Hunter 	}
415ed16f58dSAdrian Hunter 	return 0;
416ed16f58dSAdrian Hunter }
417ed16f58dSAdrian Hunter 
41820348d19SUlf Hansson /* Caller must hold re-tuning */
41960db8a47SUlf Hansson int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
42020348d19SUlf Hansson {
42120348d19SUlf Hansson 	u32 status;
42220348d19SUlf Hansson 	int err;
42320348d19SUlf Hansson 
42420348d19SUlf Hansson 	err = mmc_send_status(card, &status);
425ef3d2322SAdrian Hunter 	if (!crc_err_fatal && err == -EILSEQ)
426ef3d2322SAdrian Hunter 		return 0;
42720348d19SUlf Hansson 	if (err)
42820348d19SUlf Hansson 		return err;
42920348d19SUlf Hansson 
43020348d19SUlf Hansson 	return mmc_switch_status_error(card->host, status);
43120348d19SUlf Hansson }
43220348d19SUlf Hansson 
433*04f967adSUlf Hansson static int mmc_busy_cb(void *cb_data, bool *busy)
4346972096aSUlf Hansson {
435*04f967adSUlf Hansson 	struct mmc_busy_data *data = cb_data;
436*04f967adSUlf Hansson 	struct mmc_host *host = data->card->host;
4376972096aSUlf Hansson 	u32 status = 0;
4386972096aSUlf Hansson 	int err;
4396972096aSUlf Hansson 
4406972096aSUlf Hansson 	if (host->ops->card_busy) {
4416972096aSUlf Hansson 		*busy = host->ops->card_busy(host);
4426972096aSUlf Hansson 		return 0;
4436972096aSUlf Hansson 	}
4446972096aSUlf Hansson 
445*04f967adSUlf Hansson 	err = mmc_send_status(data->card, &status);
446*04f967adSUlf Hansson 	if (data->retry_crc_err && err == -EILSEQ) {
4476972096aSUlf Hansson 		*busy = true;
4486972096aSUlf Hansson 		return 0;
4496972096aSUlf Hansson 	}
4506972096aSUlf Hansson 	if (err)
4516972096aSUlf Hansson 		return err;
4526972096aSUlf Hansson 
453*04f967adSUlf Hansson 	switch (data->busy_cmd) {
4540d84c3e6SUlf Hansson 	case MMC_BUSY_CMD6:
455*04f967adSUlf Hansson 		err = mmc_switch_status_error(host, status);
4560d84c3e6SUlf Hansson 		break;
4570d84c3e6SUlf Hansson 	case MMC_BUSY_ERASE:
4580d84c3e6SUlf Hansson 		err = R1_STATUS(status) ? -EIO : 0;
4590d84c3e6SUlf Hansson 		break;
460490ff95fSUlf Hansson 	case MMC_BUSY_HPI:
461490ff95fSUlf Hansson 		break;
4620d84c3e6SUlf Hansson 	default:
4630d84c3e6SUlf Hansson 		err = -EINVAL;
4640d84c3e6SUlf Hansson 	}
4650d84c3e6SUlf Hansson 
4666972096aSUlf Hansson 	if (err)
4676972096aSUlf Hansson 		return err;
4686972096aSUlf Hansson 
4692a1c7cdaSUlf Hansson 	*busy = !mmc_ready_for_data(status);
4706972096aSUlf Hansson 	return 0;
4716972096aSUlf Hansson }
4726972096aSUlf Hansson 
473*04f967adSUlf Hansson int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
474*04f967adSUlf Hansson 			int (*busy_cb)(void *cb_data, bool *busy),
475*04f967adSUlf Hansson 			void *cb_data)
476716bdb89SUlf Hansson {
477716bdb89SUlf Hansson 	struct mmc_host *host = card->host;
478716bdb89SUlf Hansson 	int err;
479716bdb89SUlf Hansson 	unsigned long timeout;
480d46a24a9SUlf Hansson 	unsigned int udelay = 32, udelay_max = 32768;
481716bdb89SUlf Hansson 	bool expired = false;
482716bdb89SUlf Hansson 	bool busy = false;
483716bdb89SUlf Hansson 
484716bdb89SUlf Hansson 	timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
485716bdb89SUlf Hansson 	do {
486716bdb89SUlf Hansson 		/*
48770562644SUlf Hansson 		 * Due to the possibility of being preempted while polling,
48870562644SUlf Hansson 		 * check the expiration time first.
489716bdb89SUlf Hansson 		 */
490716bdb89SUlf Hansson 		expired = time_after(jiffies, timeout);
49170562644SUlf Hansson 
492*04f967adSUlf Hansson 		err = (*busy_cb)(cb_data, &busy);
4935ec32f84SUlf Hansson 		if (err)
4945ec32f84SUlf Hansson 			return err;
495716bdb89SUlf Hansson 
49670562644SUlf Hansson 		/* Timeout if the device still remains busy. */
49770562644SUlf Hansson 		if (expired && busy) {
49870562644SUlf Hansson 			pr_err("%s: Card stuck being busy! %s\n",
499716bdb89SUlf Hansson 				mmc_hostname(host), __func__);
500716bdb89SUlf Hansson 			return -ETIMEDOUT;
501716bdb89SUlf Hansson 		}
502d46a24a9SUlf Hansson 
503d46a24a9SUlf Hansson 		/* Throttle the polling rate to avoid hogging the CPU. */
504d46a24a9SUlf Hansson 		if (busy) {
505d46a24a9SUlf Hansson 			usleep_range(udelay, udelay * 2);
506d46a24a9SUlf Hansson 			if (udelay < udelay_max)
507d46a24a9SUlf Hansson 				udelay *= 2;
508d46a24a9SUlf Hansson 		}
50970562644SUlf Hansson 	} while (busy);
510716bdb89SUlf Hansson 
5115ec32f84SUlf Hansson 	return 0;
512716bdb89SUlf Hansson }
513716bdb89SUlf Hansson 
5140d84c3e6SUlf Hansson int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
515*04f967adSUlf Hansson 		      bool retry_crc_err, enum mmc_busy_cmd busy_cmd)
5160d84c3e6SUlf Hansson {
517*04f967adSUlf Hansson 	struct mmc_busy_data cb_data;
518*04f967adSUlf Hansson 
519*04f967adSUlf Hansson 	cb_data.card = card;
520*04f967adSUlf Hansson 	cb_data.retry_crc_err = retry_crc_err;
521*04f967adSUlf Hansson 	cb_data.busy_cmd = busy_cmd;
522*04f967adSUlf Hansson 
523*04f967adSUlf Hansson 	return __mmc_poll_for_busy(card, timeout_ms, &mmc_busy_cb, &cb_data);
5240d84c3e6SUlf Hansson }
5250d84c3e6SUlf Hansson 
526e62f1e0bSUlf Hansson bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
527e62f1e0bSUlf Hansson 			  unsigned int timeout_ms)
528e62f1e0bSUlf Hansson {
529e62f1e0bSUlf Hansson 	/*
530e62f1e0bSUlf Hansson 	 * If the max_busy_timeout of the host is specified, make sure it's
531e62f1e0bSUlf Hansson 	 * enough to fit the used timeout_ms. In case it's not, let's instruct
532e62f1e0bSUlf Hansson 	 * the host to avoid HW busy detection, by converting to a R1 response
533e62f1e0bSUlf Hansson 	 * instead of a R1B. Note, some hosts requires R1B, which also means
534e62f1e0bSUlf Hansson 	 * they are on their own when it comes to deal with the busy timeout.
535e62f1e0bSUlf Hansson 	 */
536e62f1e0bSUlf Hansson 	if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
537e62f1e0bSUlf Hansson 	    (timeout_ms > host->max_busy_timeout)) {
538e62f1e0bSUlf Hansson 		cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1 | MMC_RSP_R1;
539e62f1e0bSUlf Hansson 		return false;
540e62f1e0bSUlf Hansson 	}
541e62f1e0bSUlf Hansson 
542e62f1e0bSUlf Hansson 	cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1B | MMC_RSP_R1B;
543e62f1e0bSUlf Hansson 	cmd->busy_timeout = timeout_ms;
544e62f1e0bSUlf Hansson 	return true;
545e62f1e0bSUlf Hansson }
546e62f1e0bSUlf Hansson 
547d3a8d95dSAndrei Warkentin /**
548950d56acSJaehoon Chung  *	__mmc_switch - modify EXT_CSD register
549d3a8d95dSAndrei Warkentin  *	@card: the MMC card associated with the data transfer
550d3a8d95dSAndrei Warkentin  *	@set: cmd set values
551d3a8d95dSAndrei Warkentin  *	@index: EXT_CSD register index
552d3a8d95dSAndrei Warkentin  *	@value: value to program into EXT_CSD register
553d3a8d95dSAndrei Warkentin  *	@timeout_ms: timeout (ms) for operation performed by register write,
554d3a8d95dSAndrei Warkentin  *                   timeout of zero implies maximum possible timeout
555aa33ce3cSUlf Hansson  *	@timing: new timing to change to
556878e200bSUlf Hansson  *	@send_status: send status cmd to poll for busy
557625228faSUlf Hansson  *	@retry_crc_err: retry when CRC errors when polling with CMD13 for busy
5585e52a168SBean Huo  *	@retries: number of retries
559d3a8d95dSAndrei Warkentin  *
560d3a8d95dSAndrei Warkentin  *	Modifies the EXT_CSD register for selected card.
561d3a8d95dSAndrei Warkentin  */
562950d56acSJaehoon Chung int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
563aa33ce3cSUlf Hansson 		unsigned int timeout_ms, unsigned char timing,
5645e52a168SBean Huo 		bool send_status, bool retry_crc_err, unsigned int retries)
565da7fbe58SPierre Ossman {
566636bd13cSUlf Hansson 	struct mmc_host *host = card->host;
567da7fbe58SPierre Ossman 	int err;
568c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
569e62f1e0bSUlf Hansson 	bool use_r1b_resp;
570aa33ce3cSUlf Hansson 	unsigned char old_timing = host->ios.timing;
571b9ec2616SUlf Hansson 
572c6dbab9cSAdrian Hunter 	mmc_retune_hold(host);
573c6dbab9cSAdrian Hunter 
574533a6cfeSUlf Hansson 	if (!timeout_ms) {
575533a6cfeSUlf Hansson 		pr_warn("%s: unspecified timeout for CMD6 - use generic\n",
576533a6cfeSUlf Hansson 			mmc_hostname(host));
577533a6cfeSUlf Hansson 		timeout_ms = card->ext_csd.generic_cmd6_time;
578533a6cfeSUlf Hansson 	}
579533a6cfeSUlf Hansson 
580da7fbe58SPierre Ossman 	cmd.opcode = MMC_SWITCH;
581da7fbe58SPierre Ossman 	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
582da7fbe58SPierre Ossman 		  (index << 16) |
583da7fbe58SPierre Ossman 		  (value << 8) |
584da7fbe58SPierre Ossman 		  set;
585e62f1e0bSUlf Hansson 	use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, timeout_ms);
586b9ec2616SUlf Hansson 
5875e52a168SBean Huo 	err = mmc_wait_for_cmd(host, &cmd, retries);
58817b0429dSPierre Ossman 	if (err)
589c6dbab9cSAdrian Hunter 		goto out;
590da7fbe58SPierre Ossman 
591cb26ce06SUlf Hansson 	/*If SPI or used HW busy detection above, then we don't need to poll. */
592cb26ce06SUlf Hansson 	if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
593ee6ff743SUlf Hansson 		mmc_host_is_spi(host))
594aa33ce3cSUlf Hansson 		goto out_tim;
595a27fbf2fSSeungwon Jeon 
5961e0b069bSUlf Hansson 	/*
5971e0b069bSUlf Hansson 	 * If the host doesn't support HW polling via the ->card_busy() ops and
5981e0b069bSUlf Hansson 	 * when it's not allowed to poll by using CMD13, then we need to rely on
5991e0b069bSUlf Hansson 	 * waiting the stated timeout to be sufficient.
6001e0b069bSUlf Hansson 	 */
6011e0b069bSUlf Hansson 	if (!send_status && !host->ops->card_busy) {
6021e0b069bSUlf Hansson 		mmc_delay(timeout_ms);
6031e0b069bSUlf Hansson 		goto out_tim;
6041e0b069bSUlf Hansson 	}
6051e0b069bSUlf Hansson 
606716bdb89SUlf Hansson 	/* Let's try to poll to find out when the command is completed. */
607*04f967adSUlf Hansson 	err = mmc_poll_for_busy(card, timeout_ms, retry_crc_err, MMC_BUSY_CMD6);
608ee6ff743SUlf Hansson 	if (err)
609ee6ff743SUlf Hansson 		goto out;
610aa33ce3cSUlf Hansson 
611aa33ce3cSUlf Hansson out_tim:
612ee6ff743SUlf Hansson 	/* Switch to new timing before check switch status. */
613ee6ff743SUlf Hansson 	if (timing)
614ee6ff743SUlf Hansson 		mmc_set_timing(host, timing);
615ee6ff743SUlf Hansson 
616ee6ff743SUlf Hansson 	if (send_status) {
61760db8a47SUlf Hansson 		err = mmc_switch_status(card, true);
618aa33ce3cSUlf Hansson 		if (err && timing)
619aa33ce3cSUlf Hansson 			mmc_set_timing(host, old_timing);
620ee6ff743SUlf Hansson 	}
621c6dbab9cSAdrian Hunter out:
622c6dbab9cSAdrian Hunter 	mmc_retune_release(host);
623ef0b27d4SAdrian Hunter 
624c6dbab9cSAdrian Hunter 	return err;
625da7fbe58SPierre Ossman }
626950d56acSJaehoon Chung 
627950d56acSJaehoon Chung int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
628950d56acSJaehoon Chung 		unsigned int timeout_ms)
629950d56acSJaehoon Chung {
630aa33ce3cSUlf Hansson 	return __mmc_switch(card, set, index, value, timeout_ms, 0,
6315e52a168SBean Huo 			    true, false, MMC_CMD_RETRIES);
632950d56acSJaehoon Chung }
633d3a8d95dSAndrei Warkentin EXPORT_SYMBOL_GPL(mmc_switch);
634da7fbe58SPierre Ossman 
6359979dbe5SChaotian Jing int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
636996903deSMinda Chen {
637c7836d15SMasahiro Yamada 	struct mmc_request mrq = {};
638c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
639c7836d15SMasahiro Yamada 	struct mmc_data data = {};
640996903deSMinda Chen 	struct scatterlist sg;
641fe5afb13SUlf Hansson 	struct mmc_ios *ios = &host->ios;
642996903deSMinda Chen 	const u8 *tuning_block_pattern;
643996903deSMinda Chen 	int size, err = 0;
644996903deSMinda Chen 	u8 *data_buf;
645996903deSMinda Chen 
646996903deSMinda Chen 	if (ios->bus_width == MMC_BUS_WIDTH_8) {
647996903deSMinda Chen 		tuning_block_pattern = tuning_blk_pattern_8bit;
648996903deSMinda Chen 		size = sizeof(tuning_blk_pattern_8bit);
649996903deSMinda Chen 	} else if (ios->bus_width == MMC_BUS_WIDTH_4) {
650996903deSMinda Chen 		tuning_block_pattern = tuning_blk_pattern_4bit;
651996903deSMinda Chen 		size = sizeof(tuning_blk_pattern_4bit);
652996903deSMinda Chen 	} else
653996903deSMinda Chen 		return -EINVAL;
654996903deSMinda Chen 
655996903deSMinda Chen 	data_buf = kzalloc(size, GFP_KERNEL);
656996903deSMinda Chen 	if (!data_buf)
657996903deSMinda Chen 		return -ENOMEM;
658996903deSMinda Chen 
659996903deSMinda Chen 	mrq.cmd = &cmd;
660996903deSMinda Chen 	mrq.data = &data;
661996903deSMinda Chen 
662996903deSMinda Chen 	cmd.opcode = opcode;
663996903deSMinda Chen 	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
664996903deSMinda Chen 
665996903deSMinda Chen 	data.blksz = size;
666996903deSMinda Chen 	data.blocks = 1;
667996903deSMinda Chen 	data.flags = MMC_DATA_READ;
668996903deSMinda Chen 
669996903deSMinda Chen 	/*
670996903deSMinda Chen 	 * According to the tuning specs, Tuning process
671996903deSMinda Chen 	 * is normally shorter 40 executions of CMD19,
672996903deSMinda Chen 	 * and timeout value should be shorter than 150 ms
673996903deSMinda Chen 	 */
674996903deSMinda Chen 	data.timeout_ns = 150 * NSEC_PER_MSEC;
675996903deSMinda Chen 
676996903deSMinda Chen 	data.sg = &sg;
677996903deSMinda Chen 	data.sg_len = 1;
678996903deSMinda Chen 	sg_init_one(&sg, data_buf, size);
679996903deSMinda Chen 
680fe5afb13SUlf Hansson 	mmc_wait_for_req(host, &mrq);
681996903deSMinda Chen 
6829979dbe5SChaotian Jing 	if (cmd_error)
6839979dbe5SChaotian Jing 		*cmd_error = cmd.error;
6849979dbe5SChaotian Jing 
685996903deSMinda Chen 	if (cmd.error) {
686996903deSMinda Chen 		err = cmd.error;
687996903deSMinda Chen 		goto out;
688996903deSMinda Chen 	}
689996903deSMinda Chen 
690996903deSMinda Chen 	if (data.error) {
691996903deSMinda Chen 		err = data.error;
692996903deSMinda Chen 		goto out;
693996903deSMinda Chen 	}
694996903deSMinda Chen 
695996903deSMinda Chen 	if (memcmp(data_buf, tuning_block_pattern, size))
696996903deSMinda Chen 		err = -EIO;
697996903deSMinda Chen 
698996903deSMinda Chen out:
699996903deSMinda Chen 	kfree(data_buf);
700996903deSMinda Chen 	return err;
701996903deSMinda Chen }
702996903deSMinda Chen EXPORT_SYMBOL_GPL(mmc_send_tuning);
703996903deSMinda Chen 
704e711f030SAdrian Hunter int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
705e711f030SAdrian Hunter {
706c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
707e711f030SAdrian Hunter 
708e711f030SAdrian Hunter 	/*
709e711f030SAdrian Hunter 	 * eMMC specification specifies that CMD12 can be used to stop a tuning
710e711f030SAdrian Hunter 	 * command, but SD specification does not, so do nothing unless it is
711e711f030SAdrian Hunter 	 * eMMC.
712e711f030SAdrian Hunter 	 */
713e711f030SAdrian Hunter 	if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
714e711f030SAdrian Hunter 		return 0;
715e711f030SAdrian Hunter 
716e711f030SAdrian Hunter 	cmd.opcode = MMC_STOP_TRANSMISSION;
717e711f030SAdrian Hunter 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
718e711f030SAdrian Hunter 
719e711f030SAdrian Hunter 	/*
720e711f030SAdrian Hunter 	 * For drivers that override R1 to R1b, set an arbitrary timeout based
721e711f030SAdrian Hunter 	 * on the tuning timeout i.e. 150ms.
722e711f030SAdrian Hunter 	 */
723e711f030SAdrian Hunter 	cmd.busy_timeout = 150;
724e711f030SAdrian Hunter 
725e711f030SAdrian Hunter 	return mmc_wait_for_cmd(host, &cmd, 0);
726e711f030SAdrian Hunter }
727e711f030SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_abort_tuning);
728e711f030SAdrian Hunter 
72922113efdSAries Lee static int
73022113efdSAries Lee mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
73122113efdSAries Lee 		  u8 len)
73222113efdSAries Lee {
733c7836d15SMasahiro Yamada 	struct mmc_request mrq = {};
734c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
735c7836d15SMasahiro Yamada 	struct mmc_data data = {};
73622113efdSAries Lee 	struct scatterlist sg;
73722113efdSAries Lee 	u8 *data_buf;
73822113efdSAries Lee 	u8 *test_buf;
73922113efdSAries Lee 	int i, err;
74022113efdSAries Lee 	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
74122113efdSAries Lee 	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
74222113efdSAries Lee 
74322113efdSAries Lee 	/* dma onto stack is unsafe/nonportable, but callers to this
74422113efdSAries Lee 	 * routine normally provide temporary on-stack buffers ...
74522113efdSAries Lee 	 */
74622113efdSAries Lee 	data_buf = kmalloc(len, GFP_KERNEL);
74722113efdSAries Lee 	if (!data_buf)
74822113efdSAries Lee 		return -ENOMEM;
74922113efdSAries Lee 
75022113efdSAries Lee 	if (len == 8)
75122113efdSAries Lee 		test_buf = testdata_8bit;
75222113efdSAries Lee 	else if (len == 4)
75322113efdSAries Lee 		test_buf = testdata_4bit;
75422113efdSAries Lee 	else {
755a3c76eb9SGirish K S 		pr_err("%s: Invalid bus_width %d\n",
75622113efdSAries Lee 		       mmc_hostname(host), len);
75722113efdSAries Lee 		kfree(data_buf);
75822113efdSAries Lee 		return -EINVAL;
75922113efdSAries Lee 	}
76022113efdSAries Lee 
76122113efdSAries Lee 	if (opcode == MMC_BUS_TEST_W)
76222113efdSAries Lee 		memcpy(data_buf, test_buf, len);
76322113efdSAries Lee 
76422113efdSAries Lee 	mrq.cmd = &cmd;
76522113efdSAries Lee 	mrq.data = &data;
76622113efdSAries Lee 	cmd.opcode = opcode;
76722113efdSAries Lee 	cmd.arg = 0;
76822113efdSAries Lee 
76922113efdSAries Lee 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
77022113efdSAries Lee 	 * rely on callers to never use this with "native" calls for reading
77122113efdSAries Lee 	 * CSD or CID.  Native versions of those commands use the R2 type,
77222113efdSAries Lee 	 * not R1 plus a data block.
77322113efdSAries Lee 	 */
77422113efdSAries Lee 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
77522113efdSAries Lee 
77622113efdSAries Lee 	data.blksz = len;
77722113efdSAries Lee 	data.blocks = 1;
77822113efdSAries Lee 	if (opcode == MMC_BUS_TEST_R)
77922113efdSAries Lee 		data.flags = MMC_DATA_READ;
78022113efdSAries Lee 	else
78122113efdSAries Lee 		data.flags = MMC_DATA_WRITE;
78222113efdSAries Lee 
78322113efdSAries Lee 	data.sg = &sg;
78422113efdSAries Lee 	data.sg_len = 1;
78584532e33SMinjian Wu 	mmc_set_data_timeout(&data, card);
78622113efdSAries Lee 	sg_init_one(&sg, data_buf, len);
78722113efdSAries Lee 	mmc_wait_for_req(host, &mrq);
78822113efdSAries Lee 	err = 0;
78922113efdSAries Lee 	if (opcode == MMC_BUS_TEST_R) {
79022113efdSAries Lee 		for (i = 0; i < len / 4; i++)
79122113efdSAries Lee 			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
79222113efdSAries Lee 				err = -EIO;
79322113efdSAries Lee 				break;
79422113efdSAries Lee 			}
79522113efdSAries Lee 	}
79622113efdSAries Lee 	kfree(data_buf);
79722113efdSAries Lee 
79822113efdSAries Lee 	if (cmd.error)
79922113efdSAries Lee 		return cmd.error;
80022113efdSAries Lee 	if (data.error)
80122113efdSAries Lee 		return data.error;
80222113efdSAries Lee 
80322113efdSAries Lee 	return err;
80422113efdSAries Lee }
80522113efdSAries Lee 
80622113efdSAries Lee int mmc_bus_test(struct mmc_card *card, u8 bus_width)
80722113efdSAries Lee {
8080899e741SMasahiro Yamada 	int width;
80922113efdSAries Lee 
81022113efdSAries Lee 	if (bus_width == MMC_BUS_WIDTH_8)
81122113efdSAries Lee 		width = 8;
81222113efdSAries Lee 	else if (bus_width == MMC_BUS_WIDTH_4)
81322113efdSAries Lee 		width = 4;
81422113efdSAries Lee 	else if (bus_width == MMC_BUS_WIDTH_1)
81522113efdSAries Lee 		return 0; /* no need for test */
81622113efdSAries Lee 	else
81722113efdSAries Lee 		return -EINVAL;
81822113efdSAries Lee 
81922113efdSAries Lee 	/*
82022113efdSAries Lee 	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
82122113efdSAries Lee 	 * is a problem.  This improves chances that the test will work.
82222113efdSAries Lee 	 */
82322113efdSAries Lee 	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
8240899e741SMasahiro Yamada 	return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
82522113efdSAries Lee }
826eb0d8f13SJaehoon Chung 
8279f94d047SUlf Hansson static int mmc_send_hpi_cmd(struct mmc_card *card)
828eb0d8f13SJaehoon Chung {
829490ff95fSUlf Hansson 	unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time;
830892bf100SUlf Hansson 	struct mmc_host *host = card->host;
831c7bedef0SUlf Hansson 	bool use_r1b_resp = false;
832c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
833eb0d8f13SJaehoon Chung 	int err;
834eb0d8f13SJaehoon Chung 
835892bf100SUlf Hansson 	cmd.opcode = card->ext_csd.hpi_cmd;
836eb0d8f13SJaehoon Chung 	cmd.arg = card->rca << 16 | 1;
837892bf100SUlf Hansson 	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
838c7bedef0SUlf Hansson 
839c7bedef0SUlf Hansson 	if (cmd.opcode == MMC_STOP_TRANSMISSION)
840c7bedef0SUlf Hansson 		use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd,
841c7bedef0SUlf Hansson 						    busy_timeout_ms);
842892bf100SUlf Hansson 
843892bf100SUlf Hansson 	err = mmc_wait_for_cmd(host, &cmd, 0);
844eb0d8f13SJaehoon Chung 	if (err) {
845892bf100SUlf Hansson 		pr_warn("%s: HPI error %d. Command response %#x\n",
846892bf100SUlf Hansson 			mmc_hostname(host), err, cmd.resp[0]);
847eb0d8f13SJaehoon Chung 		return err;
848eb0d8f13SJaehoon Chung 	}
849eb0d8f13SJaehoon Chung 
850892bf100SUlf Hansson 	/* No need to poll when using HW busy detection. */
851892bf100SUlf Hansson 	if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp)
852892bf100SUlf Hansson 		return 0;
853892bf100SUlf Hansson 
854490ff95fSUlf Hansson 	/* Let's poll to find out when the HPI request completes. */
855*04f967adSUlf Hansson 	return mmc_poll_for_busy(card, busy_timeout_ms, false, MMC_BUSY_HPI);
856eb0d8f13SJaehoon Chung }
857148bcab2SUlf Hansson 
8580f2c0512SUlf Hansson /**
8590f2c0512SUlf Hansson  *	mmc_interrupt_hpi - Issue for High priority Interrupt
8600f2c0512SUlf Hansson  *	@card: the MMC card associated with the HPI transfer
8610f2c0512SUlf Hansson  *
8620f2c0512SUlf Hansson  *	Issued High Priority Interrupt, and check for card status
8630f2c0512SUlf Hansson  *	until out-of prg-state.
8640f2c0512SUlf Hansson  */
86544aebc16SJason Yan static int mmc_interrupt_hpi(struct mmc_card *card)
8660f2c0512SUlf Hansson {
8670f2c0512SUlf Hansson 	int err;
8680f2c0512SUlf Hansson 	u32 status;
8690f2c0512SUlf Hansson 
8700f2c0512SUlf Hansson 	if (!card->ext_csd.hpi_en) {
8710f2c0512SUlf Hansson 		pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
8720f2c0512SUlf Hansson 		return 1;
8730f2c0512SUlf Hansson 	}
8740f2c0512SUlf Hansson 
8750f2c0512SUlf Hansson 	err = mmc_send_status(card, &status);
8760f2c0512SUlf Hansson 	if (err) {
8770f2c0512SUlf Hansson 		pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
8780f2c0512SUlf Hansson 		goto out;
8790f2c0512SUlf Hansson 	}
8800f2c0512SUlf Hansson 
8810f2c0512SUlf Hansson 	switch (R1_CURRENT_STATE(status)) {
8820f2c0512SUlf Hansson 	case R1_STATE_IDLE:
8830f2c0512SUlf Hansson 	case R1_STATE_READY:
8840f2c0512SUlf Hansson 	case R1_STATE_STBY:
8850f2c0512SUlf Hansson 	case R1_STATE_TRAN:
8860f2c0512SUlf Hansson 		/*
8870f2c0512SUlf Hansson 		 * In idle and transfer states, HPI is not needed and the caller
8880f2c0512SUlf Hansson 		 * can issue the next intended command immediately
8890f2c0512SUlf Hansson 		 */
8900f2c0512SUlf Hansson 		goto out;
8910f2c0512SUlf Hansson 	case R1_STATE_PRG:
8920f2c0512SUlf Hansson 		break;
8930f2c0512SUlf Hansson 	default:
8940f2c0512SUlf Hansson 		/* In all other states, it's illegal to issue HPI */
8950f2c0512SUlf Hansson 		pr_debug("%s: HPI cannot be sent. Card state=%d\n",
8960f2c0512SUlf Hansson 			mmc_hostname(card->host), R1_CURRENT_STATE(status));
8970f2c0512SUlf Hansson 		err = -EINVAL;
8980f2c0512SUlf Hansson 		goto out;
8990f2c0512SUlf Hansson 	}
9000f2c0512SUlf Hansson 
9019f94d047SUlf Hansson 	err = mmc_send_hpi_cmd(card);
9020f2c0512SUlf Hansson out:
9030f2c0512SUlf Hansson 	return err;
9040f2c0512SUlf Hansson }
9050f2c0512SUlf Hansson 
906148bcab2SUlf Hansson int mmc_can_ext_csd(struct mmc_card *card)
907148bcab2SUlf Hansson {
908148bcab2SUlf Hansson 	return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
909148bcab2SUlf Hansson }
910b658af71SAdrian Hunter 
9111cf8f7e5SUlf Hansson static int mmc_read_bkops_status(struct mmc_card *card)
9121cf8f7e5SUlf Hansson {
9131cf8f7e5SUlf Hansson 	int err;
9141cf8f7e5SUlf Hansson 	u8 *ext_csd;
9151cf8f7e5SUlf Hansson 
9161cf8f7e5SUlf Hansson 	err = mmc_get_ext_csd(card, &ext_csd);
9171cf8f7e5SUlf Hansson 	if (err)
9181cf8f7e5SUlf Hansson 		return err;
9191cf8f7e5SUlf Hansson 
9201cf8f7e5SUlf Hansson 	card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
9211cf8f7e5SUlf Hansson 	card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
9221cf8f7e5SUlf Hansson 	kfree(ext_csd);
9231cf8f7e5SUlf Hansson 	return 0;
9241cf8f7e5SUlf Hansson }
9251cf8f7e5SUlf Hansson 
9261cf8f7e5SUlf Hansson /**
9270c204979SUlf Hansson  *	mmc_run_bkops - Run BKOPS for supported cards
9280c204979SUlf Hansson  *	@card: MMC card to run BKOPS for
9291cf8f7e5SUlf Hansson  *
9300c204979SUlf Hansson  *	Run background operations synchronously for cards having manual BKOPS
9310c204979SUlf Hansson  *	enabled and in case it reports urgent BKOPS level.
9321cf8f7e5SUlf Hansson */
9330c204979SUlf Hansson void mmc_run_bkops(struct mmc_card *card)
9341cf8f7e5SUlf Hansson {
9351cf8f7e5SUlf Hansson 	int err;
9361cf8f7e5SUlf Hansson 
9370c204979SUlf Hansson 	if (!card->ext_csd.man_bkops_en)
9381cf8f7e5SUlf Hansson 		return;
9391cf8f7e5SUlf Hansson 
9401cf8f7e5SUlf Hansson 	err = mmc_read_bkops_status(card);
9411cf8f7e5SUlf Hansson 	if (err) {
9421cf8f7e5SUlf Hansson 		pr_err("%s: Failed to read bkops status: %d\n",
9431cf8f7e5SUlf Hansson 		       mmc_hostname(card->host), err);
9441cf8f7e5SUlf Hansson 		return;
9451cf8f7e5SUlf Hansson 	}
9461cf8f7e5SUlf Hansson 
9470c204979SUlf Hansson 	if (!card->ext_csd.raw_bkops_status ||
9480c204979SUlf Hansson 	    card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2)
9491cf8f7e5SUlf Hansson 		return;
9501cf8f7e5SUlf Hansson 
9511cf8f7e5SUlf Hansson 	mmc_retune_hold(card->host);
9521cf8f7e5SUlf Hansson 
9530c204979SUlf Hansson 	/*
9540c204979SUlf Hansson 	 * For urgent BKOPS status, LEVEL_2 and higher, let's execute
9550c204979SUlf Hansson 	 * synchronously. Future wise, we may consider to start BKOPS, for less
9560c204979SUlf Hansson 	 * urgent levels by using an asynchronous background task, when idle.
9570c204979SUlf Hansson 	 */
9580c204979SUlf Hansson 	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
95924ed3bd0SUlf Hansson 			 EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS);
9600c204979SUlf Hansson 	if (err)
9611cf8f7e5SUlf Hansson 		pr_warn("%s: Error %d starting bkops\n",
9621cf8f7e5SUlf Hansson 			mmc_hostname(card->host), err);
9631cf8f7e5SUlf Hansson 
9641cf8f7e5SUlf Hansson 	mmc_retune_release(card->host);
9651cf8f7e5SUlf Hansson }
9660c204979SUlf Hansson EXPORT_SYMBOL(mmc_run_bkops);
9671cf8f7e5SUlf Hansson 
968d9df1737SUlf Hansson /*
969d9df1737SUlf Hansson  * Flush the cache to the non-volatile storage.
970d9df1737SUlf Hansson  */
971d9df1737SUlf Hansson int mmc_flush_cache(struct mmc_card *card)
972d9df1737SUlf Hansson {
973d9df1737SUlf Hansson 	int err = 0;
974d9df1737SUlf Hansson 
97597fce126SAvri Altman 	if (mmc_cache_enabled(card->host)) {
976d9df1737SUlf Hansson 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
97724ed3bd0SUlf Hansson 				 EXT_CSD_FLUSH_CACHE, 1,
97824ed3bd0SUlf Hansson 				 MMC_CACHE_FLUSH_TIMEOUT_MS);
979d9df1737SUlf Hansson 		if (err)
980d9df1737SUlf Hansson 			pr_err("%s: cache flush error %d\n",
981d9df1737SUlf Hansson 					mmc_hostname(card->host), err);
982d9df1737SUlf Hansson 	}
983d9df1737SUlf Hansson 
984d9df1737SUlf Hansson 	return err;
985d9df1737SUlf Hansson }
986d9df1737SUlf Hansson EXPORT_SYMBOL(mmc_flush_cache);
987d9df1737SUlf Hansson 
988b658af71SAdrian Hunter static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
989b658af71SAdrian Hunter {
990b658af71SAdrian Hunter 	u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
991b658af71SAdrian Hunter 	int err;
992b658af71SAdrian Hunter 
993b658af71SAdrian Hunter 	if (!card->ext_csd.cmdq_support)
994b658af71SAdrian Hunter 		return -EOPNOTSUPP;
995b658af71SAdrian Hunter 
996b658af71SAdrian Hunter 	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
997b658af71SAdrian Hunter 			 val, card->ext_csd.generic_cmd6_time);
998b658af71SAdrian Hunter 	if (!err)
999b658af71SAdrian Hunter 		card->ext_csd.cmdq_en = enable;
1000b658af71SAdrian Hunter 
1001b658af71SAdrian Hunter 	return err;
1002b658af71SAdrian Hunter }
1003b658af71SAdrian Hunter 
1004b658af71SAdrian Hunter int mmc_cmdq_enable(struct mmc_card *card)
1005b658af71SAdrian Hunter {
1006b658af71SAdrian Hunter 	return mmc_cmdq_switch(card, true);
1007b658af71SAdrian Hunter }
1008b658af71SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
1009b658af71SAdrian Hunter 
1010b658af71SAdrian Hunter int mmc_cmdq_disable(struct mmc_card *card)
1011b658af71SAdrian Hunter {
1012b658af71SAdrian Hunter 	return mmc_cmdq_switch(card, false);
1013b658af71SAdrian Hunter }
1014b658af71SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_cmdq_disable);
101555c2b8b9SUlf Hansson 
10164f111d04SBean Huo int mmc_sanitize(struct mmc_card *card, unsigned int timeout_ms)
101755c2b8b9SUlf Hansson {
101855c2b8b9SUlf Hansson 	struct mmc_host *host = card->host;
101955c2b8b9SUlf Hansson 	int err;
102055c2b8b9SUlf Hansson 
102155c2b8b9SUlf Hansson 	if (!mmc_can_sanitize(card)) {
102255c2b8b9SUlf Hansson 		pr_warn("%s: Sanitize not supported\n", mmc_hostname(host));
102355c2b8b9SUlf Hansson 		return -EOPNOTSUPP;
102455c2b8b9SUlf Hansson 	}
102555c2b8b9SUlf Hansson 
10264f111d04SBean Huo 	if (!timeout_ms)
10274f111d04SBean Huo 		timeout_ms = MMC_SANITIZE_TIMEOUT_MS;
10284f111d04SBean Huo 
102955c2b8b9SUlf Hansson 	pr_debug("%s: Sanitize in progress...\n", mmc_hostname(host));
103055c2b8b9SUlf Hansson 
103155c2b8b9SUlf Hansson 	mmc_retune_hold(host);
103255c2b8b9SUlf Hansson 
10335b96247cSBean Huo 	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START,
10345b96247cSBean Huo 			   1, timeout_ms, 0, true, false, 0);
103555c2b8b9SUlf Hansson 	if (err)
103655c2b8b9SUlf Hansson 		pr_err("%s: Sanitize failed err=%d\n", mmc_hostname(host), err);
103755c2b8b9SUlf Hansson 
103855c2b8b9SUlf Hansson 	/*
103955c2b8b9SUlf Hansson 	 * If the sanitize operation timed out, the card is probably still busy
104055c2b8b9SUlf Hansson 	 * in the R1_STATE_PRG. Rather than continue to wait, let's try to abort
104155c2b8b9SUlf Hansson 	 * it with a HPI command to get back into R1_STATE_TRAN.
104255c2b8b9SUlf Hansson 	 */
104355c2b8b9SUlf Hansson 	if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
104455c2b8b9SUlf Hansson 		pr_warn("%s: Sanitize aborted\n", mmc_hostname(host));
104555c2b8b9SUlf Hansson 
104655c2b8b9SUlf Hansson 	mmc_retune_release(host);
104755c2b8b9SUlf Hansson 
104855c2b8b9SUlf Hansson 	pr_debug("%s: Sanitize completed\n", mmc_hostname(host));
104955c2b8b9SUlf Hansson 	return err;
105055c2b8b9SUlf Hansson }
105155c2b8b9SUlf Hansson EXPORT_SYMBOL_GPL(mmc_sanitize);
1052