xref: /openbmc/linux/drivers/mmc/core/mmc_ops.c (revision 5e52a168)
12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2da7fbe58SPierre Ossman /*
370f10482SPierre Ossman  *  linux/drivers/mmc/core/mmc_ops.h
4da7fbe58SPierre Ossman  *
5da7fbe58SPierre Ossman  *  Copyright 2006-2007 Pierre Ossman
6da7fbe58SPierre Ossman  */
7da7fbe58SPierre Ossman 
85a0e3ad6STejun Heo #include <linux/slab.h>
93ef77af1SPaul Gortmaker #include <linux/export.h>
10da7fbe58SPierre Ossman #include <linux/types.h>
11da7fbe58SPierre Ossman #include <linux/scatterlist.h>
12da7fbe58SPierre Ossman 
13da7fbe58SPierre Ossman #include <linux/mmc/host.h>
14da7fbe58SPierre Ossman #include <linux/mmc/card.h>
15da7fbe58SPierre Ossman #include <linux/mmc/mmc.h>
16da7fbe58SPierre Ossman 
17da7fbe58SPierre Ossman #include "core.h"
181cf8f7e5SUlf Hansson #include "card.h"
19c6dbab9cSAdrian Hunter #include "host.h"
20da7fbe58SPierre Ossman #include "mmc_ops.h"
21da7fbe58SPierre Ossman 
2224ed3bd0SUlf Hansson #define MMC_BKOPS_TIMEOUT_MS		(120 * 1000) /* 120s */
2324ed3bd0SUlf Hansson #define MMC_CACHE_FLUSH_TIMEOUT_MS	(30 * 1000) /* 30s */
2455c2b8b9SUlf Hansson #define MMC_SANITIZE_TIMEOUT_MS		(240 * 1000) /* 240s */
258fee476bSTrey Ramsay 
2604cdbbfaSUlf Hansson static const u8 tuning_blk_pattern_4bit[] = {
2704cdbbfaSUlf Hansson 	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
2804cdbbfaSUlf Hansson 	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
2904cdbbfaSUlf Hansson 	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
3004cdbbfaSUlf Hansson 	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
3104cdbbfaSUlf Hansson 	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
3204cdbbfaSUlf Hansson 	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
3304cdbbfaSUlf Hansson 	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
3404cdbbfaSUlf Hansson 	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
3504cdbbfaSUlf Hansson };
3604cdbbfaSUlf Hansson 
3704cdbbfaSUlf Hansson static const u8 tuning_blk_pattern_8bit[] = {
3804cdbbfaSUlf Hansson 	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
3904cdbbfaSUlf Hansson 	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
4004cdbbfaSUlf Hansson 	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
4104cdbbfaSUlf Hansson 	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
4204cdbbfaSUlf Hansson 	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
4304cdbbfaSUlf Hansson 	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
4404cdbbfaSUlf Hansson 	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
4504cdbbfaSUlf Hansson 	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
4604cdbbfaSUlf Hansson 	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
4704cdbbfaSUlf Hansson 	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
4804cdbbfaSUlf Hansson 	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
4904cdbbfaSUlf Hansson 	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
5004cdbbfaSUlf Hansson 	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
5104cdbbfaSUlf Hansson 	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
5204cdbbfaSUlf Hansson 	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
5304cdbbfaSUlf Hansson 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
5404cdbbfaSUlf Hansson };
5504cdbbfaSUlf Hansson 
562185bc2cSUlf Hansson int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
57a27fbf2fSSeungwon Jeon {
58a27fbf2fSSeungwon Jeon 	int err;
59c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
60a27fbf2fSSeungwon Jeon 
61a27fbf2fSSeungwon Jeon 	cmd.opcode = MMC_SEND_STATUS;
62a27fbf2fSSeungwon Jeon 	if (!mmc_host_is_spi(card->host))
63a27fbf2fSSeungwon Jeon 		cmd.arg = card->rca << 16;
64a27fbf2fSSeungwon Jeon 	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
65a27fbf2fSSeungwon Jeon 
662185bc2cSUlf Hansson 	err = mmc_wait_for_cmd(card->host, &cmd, retries);
67a27fbf2fSSeungwon Jeon 	if (err)
68a27fbf2fSSeungwon Jeon 		return err;
69a27fbf2fSSeungwon Jeon 
70a27fbf2fSSeungwon Jeon 	/* NOTE: callers are required to understand the difference
71a27fbf2fSSeungwon Jeon 	 * between "native" and SPI format status words!
72a27fbf2fSSeungwon Jeon 	 */
73a27fbf2fSSeungwon Jeon 	if (status)
74a27fbf2fSSeungwon Jeon 		*status = cmd.resp[0];
75a27fbf2fSSeungwon Jeon 
76a27fbf2fSSeungwon Jeon 	return 0;
77a27fbf2fSSeungwon Jeon }
782185bc2cSUlf Hansson EXPORT_SYMBOL_GPL(__mmc_send_status);
792185bc2cSUlf Hansson 
802185bc2cSUlf Hansson int mmc_send_status(struct mmc_card *card, u32 *status)
812185bc2cSUlf Hansson {
822185bc2cSUlf Hansson 	return __mmc_send_status(card, status, MMC_CMD_RETRIES);
832185bc2cSUlf Hansson }
841bee324aSLinus Walleij EXPORT_SYMBOL_GPL(mmc_send_status);
85a27fbf2fSSeungwon Jeon 
86da7fbe58SPierre Ossman static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
87da7fbe58SPierre Ossman {
88c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
89da7fbe58SPierre Ossman 
90da7fbe58SPierre Ossman 	cmd.opcode = MMC_SELECT_CARD;
91da7fbe58SPierre Ossman 
92da7fbe58SPierre Ossman 	if (card) {
93da7fbe58SPierre Ossman 		cmd.arg = card->rca << 16;
94da7fbe58SPierre Ossman 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
95da7fbe58SPierre Ossman 	} else {
96da7fbe58SPierre Ossman 		cmd.arg = 0;
97da7fbe58SPierre Ossman 		cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
98da7fbe58SPierre Ossman 	}
99da7fbe58SPierre Ossman 
1000899e741SMasahiro Yamada 	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
101da7fbe58SPierre Ossman }
102da7fbe58SPierre Ossman 
103da7fbe58SPierre Ossman int mmc_select_card(struct mmc_card *card)
104da7fbe58SPierre Ossman {
105da7fbe58SPierre Ossman 
106da7fbe58SPierre Ossman 	return _mmc_select_card(card->host, card);
107da7fbe58SPierre Ossman }
108da7fbe58SPierre Ossman 
109da7fbe58SPierre Ossman int mmc_deselect_cards(struct mmc_host *host)
110da7fbe58SPierre Ossman {
111da7fbe58SPierre Ossman 	return _mmc_select_card(host, NULL);
112da7fbe58SPierre Ossman }
113da7fbe58SPierre Ossman 
1143d705d14SSascha Hauer /*
1153d705d14SSascha Hauer  * Write the value specified in the device tree or board code into the optional
1163d705d14SSascha Hauer  * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
1173d705d14SSascha Hauer  * drive strength of the DAT and CMD outputs. The actual meaning of a given
1183d705d14SSascha Hauer  * value is hardware dependant.
1193d705d14SSascha Hauer  * The presence of the DSR register can be determined from the CSD register,
1203d705d14SSascha Hauer  * bit 76.
1213d705d14SSascha Hauer  */
1223d705d14SSascha Hauer int mmc_set_dsr(struct mmc_host *host)
1233d705d14SSascha Hauer {
124c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
1253d705d14SSascha Hauer 
1263d705d14SSascha Hauer 	cmd.opcode = MMC_SET_DSR;
1273d705d14SSascha Hauer 
1283d705d14SSascha Hauer 	cmd.arg = (host->dsr << 16) | 0xffff;
1293d705d14SSascha Hauer 	cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
1303d705d14SSascha Hauer 
1313d705d14SSascha Hauer 	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
1323d705d14SSascha Hauer }
1333d705d14SSascha Hauer 
134da7fbe58SPierre Ossman int mmc_go_idle(struct mmc_host *host)
135da7fbe58SPierre Ossman {
136da7fbe58SPierre Ossman 	int err;
137c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
138da7fbe58SPierre Ossman 
139af517150SDavid Brownell 	/*
140af517150SDavid Brownell 	 * Non-SPI hosts need to prevent chipselect going active during
141af517150SDavid Brownell 	 * GO_IDLE; that would put chips into SPI mode.  Remind them of
142af517150SDavid Brownell 	 * that in case of hardware that won't pull up DAT3/nCS otherwise.
143af517150SDavid Brownell 	 *
144af517150SDavid Brownell 	 * SPI hosts ignore ios.chip_select; it's managed according to
14525985edcSLucas De Marchi 	 * rules that must accommodate non-MMC slaves which this layer
146af517150SDavid Brownell 	 * won't even know about.
147af517150SDavid Brownell 	 */
148af517150SDavid Brownell 	if (!mmc_host_is_spi(host)) {
149da7fbe58SPierre Ossman 		mmc_set_chip_select(host, MMC_CS_HIGH);
150da7fbe58SPierre Ossman 		mmc_delay(1);
151af517150SDavid Brownell 	}
152da7fbe58SPierre Ossman 
153da7fbe58SPierre Ossman 	cmd.opcode = MMC_GO_IDLE_STATE;
154da7fbe58SPierre Ossman 	cmd.arg = 0;
155af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
156da7fbe58SPierre Ossman 
157da7fbe58SPierre Ossman 	err = mmc_wait_for_cmd(host, &cmd, 0);
158da7fbe58SPierre Ossman 
159da7fbe58SPierre Ossman 	mmc_delay(1);
160da7fbe58SPierre Ossman 
161af517150SDavid Brownell 	if (!mmc_host_is_spi(host)) {
162da7fbe58SPierre Ossman 		mmc_set_chip_select(host, MMC_CS_DONTCARE);
163da7fbe58SPierre Ossman 		mmc_delay(1);
164af517150SDavid Brownell 	}
165af517150SDavid Brownell 
166af517150SDavid Brownell 	host->use_spi_crc = 0;
167da7fbe58SPierre Ossman 
168da7fbe58SPierre Ossman 	return err;
169da7fbe58SPierre Ossman }
170da7fbe58SPierre Ossman 
171da7fbe58SPierre Ossman int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
172da7fbe58SPierre Ossman {
173c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
174da7fbe58SPierre Ossman 	int i, err = 0;
175da7fbe58SPierre Ossman 
176da7fbe58SPierre Ossman 	cmd.opcode = MMC_SEND_OP_COND;
177af517150SDavid Brownell 	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
178af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
179da7fbe58SPierre Ossman 
180da7fbe58SPierre Ossman 	for (i = 100; i; i--) {
181da7fbe58SPierre Ossman 		err = mmc_wait_for_cmd(host, &cmd, 0);
18217b0429dSPierre Ossman 		if (err)
183da7fbe58SPierre Ossman 			break;
184da7fbe58SPierre Ossman 
1854c94cb65SYoshihiro Shimoda 		/* wait until reset completes */
186af517150SDavid Brownell 		if (mmc_host_is_spi(host)) {
187af517150SDavid Brownell 			if (!(cmd.resp[0] & R1_SPI_IDLE))
188af517150SDavid Brownell 				break;
189af517150SDavid Brownell 		} else {
190af517150SDavid Brownell 			if (cmd.resp[0] & MMC_CARD_BUSY)
191af517150SDavid Brownell 				break;
192af517150SDavid Brownell 		}
193af517150SDavid Brownell 
19417b0429dSPierre Ossman 		err = -ETIMEDOUT;
195da7fbe58SPierre Ossman 
196da7fbe58SPierre Ossman 		mmc_delay(10);
1974c94cb65SYoshihiro Shimoda 
1984c94cb65SYoshihiro Shimoda 		/*
1994c94cb65SYoshihiro Shimoda 		 * According to eMMC specification v5.1 section 6.4.3, we
2004c94cb65SYoshihiro Shimoda 		 * should issue CMD1 repeatedly in the idle state until
2014c94cb65SYoshihiro Shimoda 		 * the eMMC is ready. Otherwise some eMMC devices seem to enter
2024c94cb65SYoshihiro Shimoda 		 * the inactive mode after mmc_init_card() issued CMD0 when
2034c94cb65SYoshihiro Shimoda 		 * the eMMC device is busy.
2044c94cb65SYoshihiro Shimoda 		 */
2054c94cb65SYoshihiro Shimoda 		if (!ocr && !mmc_host_is_spi(host))
2064c94cb65SYoshihiro Shimoda 			cmd.arg = cmd.resp[0] | BIT(30);
207da7fbe58SPierre Ossman 	}
208da7fbe58SPierre Ossman 
209af517150SDavid Brownell 	if (rocr && !mmc_host_is_spi(host))
210da7fbe58SPierre Ossman 		*rocr = cmd.resp[0];
211da7fbe58SPierre Ossman 
212da7fbe58SPierre Ossman 	return err;
213da7fbe58SPierre Ossman }
214da7fbe58SPierre Ossman 
215da7fbe58SPierre Ossman int mmc_set_relative_addr(struct mmc_card *card)
216da7fbe58SPierre Ossman {
217c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
218da7fbe58SPierre Ossman 
219da7fbe58SPierre Ossman 	cmd.opcode = MMC_SET_RELATIVE_ADDR;
220da7fbe58SPierre Ossman 	cmd.arg = card->rca << 16;
221da7fbe58SPierre Ossman 	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
222da7fbe58SPierre Ossman 
2230899e741SMasahiro Yamada 	return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
224da7fbe58SPierre Ossman }
225da7fbe58SPierre Ossman 
226af517150SDavid Brownell static int
227af517150SDavid Brownell mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
228da7fbe58SPierre Ossman {
229da7fbe58SPierre Ossman 	int err;
230c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
231da7fbe58SPierre Ossman 
232af517150SDavid Brownell 	cmd.opcode = opcode;
233af517150SDavid Brownell 	cmd.arg = arg;
234da7fbe58SPierre Ossman 	cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
235da7fbe58SPierre Ossman 
236af517150SDavid Brownell 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
23717b0429dSPierre Ossman 	if (err)
238da7fbe58SPierre Ossman 		return err;
239da7fbe58SPierre Ossman 
240af517150SDavid Brownell 	memcpy(cxd, cmd.resp, sizeof(u32) * 4);
241da7fbe58SPierre Ossman 
24217b0429dSPierre Ossman 	return 0;
243da7fbe58SPierre Ossman }
244da7fbe58SPierre Ossman 
2451a41313eSKyungsik Lee /*
2461a41313eSKyungsik Lee  * NOTE: void *buf, caller for the buf is required to use DMA-capable
2471a41313eSKyungsik Lee  * buffer or on-stack buffer (with some overhead in callee).
2481a41313eSKyungsik Lee  */
249af517150SDavid Brownell static int
250af517150SDavid Brownell mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
251af517150SDavid Brownell 		u32 opcode, void *buf, unsigned len)
252da7fbe58SPierre Ossman {
253c7836d15SMasahiro Yamada 	struct mmc_request mrq = {};
254c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
255c7836d15SMasahiro Yamada 	struct mmc_data data = {};
256da7fbe58SPierre Ossman 	struct scatterlist sg;
257da7fbe58SPierre Ossman 
258da7fbe58SPierre Ossman 	mrq.cmd = &cmd;
259da7fbe58SPierre Ossman 	mrq.data = &data;
260da7fbe58SPierre Ossman 
261af517150SDavid Brownell 	cmd.opcode = opcode;
262da7fbe58SPierre Ossman 	cmd.arg = 0;
263da7fbe58SPierre Ossman 
264af517150SDavid Brownell 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
265af517150SDavid Brownell 	 * rely on callers to never use this with "native" calls for reading
266af517150SDavid Brownell 	 * CSD or CID.  Native versions of those commands use the R2 type,
267af517150SDavid Brownell 	 * not R1 plus a data block.
268af517150SDavid Brownell 	 */
269af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
270af517150SDavid Brownell 
271af517150SDavid Brownell 	data.blksz = len;
272da7fbe58SPierre Ossman 	data.blocks = 1;
273da7fbe58SPierre Ossman 	data.flags = MMC_DATA_READ;
274da7fbe58SPierre Ossman 	data.sg = &sg;
275da7fbe58SPierre Ossman 	data.sg_len = 1;
276da7fbe58SPierre Ossman 
277601ed60cSUlf Hansson 	sg_init_one(&sg, buf, len);
278da7fbe58SPierre Ossman 
279cda56ac2SAdrian Hunter 	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
2800d3e0460SMatthew Fleming 		/*
2810d3e0460SMatthew Fleming 		 * The spec states that CSR and CID accesses have a timeout
2820d3e0460SMatthew Fleming 		 * of 64 clock cycles.
2830d3e0460SMatthew Fleming 		 */
2840d3e0460SMatthew Fleming 		data.timeout_ns = 0;
2850d3e0460SMatthew Fleming 		data.timeout_clks = 64;
286cda56ac2SAdrian Hunter 	} else
287cda56ac2SAdrian Hunter 		mmc_set_data_timeout(&data, card);
288da7fbe58SPierre Ossman 
289af517150SDavid Brownell 	mmc_wait_for_req(host, &mrq);
290af517150SDavid Brownell 
29117b0429dSPierre Ossman 	if (cmd.error)
292da7fbe58SPierre Ossman 		return cmd.error;
29317b0429dSPierre Ossman 	if (data.error)
294da7fbe58SPierre Ossman 		return data.error;
295da7fbe58SPierre Ossman 
29617b0429dSPierre Ossman 	return 0;
297da7fbe58SPierre Ossman }
298da7fbe58SPierre Ossman 
299b53f0beeSYue Hu static int mmc_spi_send_cxd(struct mmc_host *host, u32 *cxd, u32 opcode)
300af517150SDavid Brownell {
30178e48073SPierre Ossman 	int ret, i;
302b53f0beeSYue Hu 	__be32 *cxd_tmp;
30378e48073SPierre Ossman 
304b53f0beeSYue Hu 	cxd_tmp = kzalloc(16, GFP_KERNEL);
305b53f0beeSYue Hu 	if (!cxd_tmp)
3061a41313eSKyungsik Lee 		return -ENOMEM;
3071a41313eSKyungsik Lee 
308b53f0beeSYue Hu 	ret = mmc_send_cxd_data(NULL, host, opcode, cxd_tmp, 16);
30978e48073SPierre Ossman 	if (ret)
3101a41313eSKyungsik Lee 		goto err;
31178e48073SPierre Ossman 
31278e48073SPierre Ossman 	for (i = 0; i < 4; i++)
313b53f0beeSYue Hu 		cxd[i] = be32_to_cpu(cxd_tmp[i]);
31478e48073SPierre Ossman 
3151a41313eSKyungsik Lee err:
316b53f0beeSYue Hu 	kfree(cxd_tmp);
3171a41313eSKyungsik Lee 	return ret;
318af517150SDavid Brownell }
319af517150SDavid Brownell 
3200796e439SUlf Hansson int mmc_send_csd(struct mmc_card *card, u32 *csd)
3210796e439SUlf Hansson {
3220796e439SUlf Hansson 	if (mmc_host_is_spi(card->host))
323b53f0beeSYue Hu 		return mmc_spi_send_cxd(card->host, csd, MMC_SEND_CSD);
3240796e439SUlf Hansson 
3250796e439SUlf Hansson 	return mmc_send_cxd_native(card->host, card->rca << 16,	csd,
3260796e439SUlf Hansson 				MMC_SEND_CSD);
3270796e439SUlf Hansson }
3280796e439SUlf Hansson 
329a1473732SUlf Hansson int mmc_send_cid(struct mmc_host *host, u32 *cid)
330a1473732SUlf Hansson {
331a1473732SUlf Hansson 	if (mmc_host_is_spi(host))
332b53f0beeSYue Hu 		return mmc_spi_send_cxd(host, cid, MMC_SEND_CID);
333a1473732SUlf Hansson 
334c92e68d8SUlf Hansson 	return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
335a1473732SUlf Hansson }
336a1473732SUlf Hansson 
337e21aa519SUlf Hansson int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
338e21aa519SUlf Hansson {
339e21aa519SUlf Hansson 	int err;
340e21aa519SUlf Hansson 	u8 *ext_csd;
341e21aa519SUlf Hansson 
342e21aa519SUlf Hansson 	if (!card || !new_ext_csd)
343e21aa519SUlf Hansson 		return -EINVAL;
344e21aa519SUlf Hansson 
345e21aa519SUlf Hansson 	if (!mmc_can_ext_csd(card))
346e21aa519SUlf Hansson 		return -EOPNOTSUPP;
347e21aa519SUlf Hansson 
348e21aa519SUlf Hansson 	/*
349e21aa519SUlf Hansson 	 * As the ext_csd is so large and mostly unused, we don't store the
350e21aa519SUlf Hansson 	 * raw block in mmc_card.
351e21aa519SUlf Hansson 	 */
35222b78700SUlf Hansson 	ext_csd = kzalloc(512, GFP_KERNEL);
353e21aa519SUlf Hansson 	if (!ext_csd)
354e21aa519SUlf Hansson 		return -ENOMEM;
355e21aa519SUlf Hansson 
3562fc91e8bSUlf Hansson 	err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
3572fc91e8bSUlf Hansson 				512);
358e21aa519SUlf Hansson 	if (err)
359e21aa519SUlf Hansson 		kfree(ext_csd);
360e21aa519SUlf Hansson 	else
361e21aa519SUlf Hansson 		*new_ext_csd = ext_csd;
362e21aa519SUlf Hansson 
363e21aa519SUlf Hansson 	return err;
364e21aa519SUlf Hansson }
365e21aa519SUlf Hansson EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
366e21aa519SUlf Hansson 
367af517150SDavid Brownell int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
368af517150SDavid Brownell {
369c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
370af517150SDavid Brownell 	int err;
371af517150SDavid Brownell 
372af517150SDavid Brownell 	cmd.opcode = MMC_SPI_READ_OCR;
373af517150SDavid Brownell 	cmd.arg = highcap ? (1 << 30) : 0;
374af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R3;
375af517150SDavid Brownell 
376af517150SDavid Brownell 	err = mmc_wait_for_cmd(host, &cmd, 0);
377af517150SDavid Brownell 
378af517150SDavid Brownell 	*ocrp = cmd.resp[1];
379af517150SDavid Brownell 	return err;
380af517150SDavid Brownell }
381af517150SDavid Brownell 
382af517150SDavid Brownell int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
383af517150SDavid Brownell {
384c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
385af517150SDavid Brownell 	int err;
386af517150SDavid Brownell 
387af517150SDavid Brownell 	cmd.opcode = MMC_SPI_CRC_ON_OFF;
388af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1;
389af517150SDavid Brownell 	cmd.arg = use_crc;
390af517150SDavid Brownell 
391af517150SDavid Brownell 	err = mmc_wait_for_cmd(host, &cmd, 0);
392af517150SDavid Brownell 	if (!err)
393af517150SDavid Brownell 		host->use_spi_crc = use_crc;
394af517150SDavid Brownell 	return err;
395af517150SDavid Brownell }
396af517150SDavid Brownell 
39720348d19SUlf Hansson static int mmc_switch_status_error(struct mmc_host *host, u32 status)
398ed16f58dSAdrian Hunter {
399ed16f58dSAdrian Hunter 	if (mmc_host_is_spi(host)) {
400ed16f58dSAdrian Hunter 		if (status & R1_SPI_ILLEGAL_COMMAND)
401ed16f58dSAdrian Hunter 			return -EBADMSG;
402ed16f58dSAdrian Hunter 	} else {
403a94a7483SShawn Lin 		if (R1_STATUS(status))
404ed16f58dSAdrian Hunter 			pr_warn("%s: unexpected status %#x after switch\n",
405ed16f58dSAdrian Hunter 				mmc_hostname(host), status);
406ed16f58dSAdrian Hunter 		if (status & R1_SWITCH_ERROR)
407ed16f58dSAdrian Hunter 			return -EBADMSG;
408ed16f58dSAdrian Hunter 	}
409ed16f58dSAdrian Hunter 	return 0;
410ed16f58dSAdrian Hunter }
411ed16f58dSAdrian Hunter 
41220348d19SUlf Hansson /* Caller must hold re-tuning */
41360db8a47SUlf Hansson int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
41420348d19SUlf Hansson {
41520348d19SUlf Hansson 	u32 status;
41620348d19SUlf Hansson 	int err;
41720348d19SUlf Hansson 
41820348d19SUlf Hansson 	err = mmc_send_status(card, &status);
419ef3d2322SAdrian Hunter 	if (!crc_err_fatal && err == -EILSEQ)
420ef3d2322SAdrian Hunter 		return 0;
42120348d19SUlf Hansson 	if (err)
42220348d19SUlf Hansson 		return err;
42320348d19SUlf Hansson 
42420348d19SUlf Hansson 	return mmc_switch_status_error(card->host, status);
42520348d19SUlf Hansson }
42620348d19SUlf Hansson 
4276972096aSUlf Hansson static int mmc_busy_status(struct mmc_card *card, bool retry_crc_err,
4280d84c3e6SUlf Hansson 			   enum mmc_busy_cmd busy_cmd, bool *busy)
4296972096aSUlf Hansson {
4306972096aSUlf Hansson 	struct mmc_host *host = card->host;
4316972096aSUlf Hansson 	u32 status = 0;
4326972096aSUlf Hansson 	int err;
4336972096aSUlf Hansson 
4346972096aSUlf Hansson 	if (host->ops->card_busy) {
4356972096aSUlf Hansson 		*busy = host->ops->card_busy(host);
4366972096aSUlf Hansson 		return 0;
4376972096aSUlf Hansson 	}
4386972096aSUlf Hansson 
4396972096aSUlf Hansson 	err = mmc_send_status(card, &status);
4406972096aSUlf Hansson 	if (retry_crc_err && err == -EILSEQ) {
4416972096aSUlf Hansson 		*busy = true;
4426972096aSUlf Hansson 		return 0;
4436972096aSUlf Hansson 	}
4446972096aSUlf Hansson 	if (err)
4456972096aSUlf Hansson 		return err;
4466972096aSUlf Hansson 
4470d84c3e6SUlf Hansson 	switch (busy_cmd) {
4480d84c3e6SUlf Hansson 	case MMC_BUSY_CMD6:
4496972096aSUlf Hansson 		err = mmc_switch_status_error(card->host, status);
4500d84c3e6SUlf Hansson 		break;
4510d84c3e6SUlf Hansson 	case MMC_BUSY_ERASE:
4520d84c3e6SUlf Hansson 		err = R1_STATUS(status) ? -EIO : 0;
4530d84c3e6SUlf Hansson 		break;
454490ff95fSUlf Hansson 	case MMC_BUSY_HPI:
455490ff95fSUlf Hansson 		break;
4560d84c3e6SUlf Hansson 	default:
4570d84c3e6SUlf Hansson 		err = -EINVAL;
4580d84c3e6SUlf Hansson 	}
4590d84c3e6SUlf Hansson 
4606972096aSUlf Hansson 	if (err)
4616972096aSUlf Hansson 		return err;
4626972096aSUlf Hansson 
4632a1c7cdaSUlf Hansson 	*busy = !mmc_ready_for_data(status);
4646972096aSUlf Hansson 	return 0;
4656972096aSUlf Hansson }
4666972096aSUlf Hansson 
4670d84c3e6SUlf Hansson static int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
4680d84c3e6SUlf Hansson 			       bool send_status, bool retry_crc_err,
4690d84c3e6SUlf Hansson 			       enum mmc_busy_cmd busy_cmd)
470716bdb89SUlf Hansson {
471716bdb89SUlf Hansson 	struct mmc_host *host = card->host;
472716bdb89SUlf Hansson 	int err;
473716bdb89SUlf Hansson 	unsigned long timeout;
474d46a24a9SUlf Hansson 	unsigned int udelay = 32, udelay_max = 32768;
475716bdb89SUlf Hansson 	bool expired = false;
476716bdb89SUlf Hansson 	bool busy = false;
477716bdb89SUlf Hansson 
478716bdb89SUlf Hansson 	/*
479716bdb89SUlf Hansson 	 * In cases when not allowed to poll by using CMD13 or because we aren't
480716bdb89SUlf Hansson 	 * capable of polling by using ->card_busy(), then rely on waiting the
481716bdb89SUlf Hansson 	 * stated timeout to be sufficient.
482716bdb89SUlf Hansson 	 */
483716bdb89SUlf Hansson 	if (!send_status && !host->ops->card_busy) {
484716bdb89SUlf Hansson 		mmc_delay(timeout_ms);
485716bdb89SUlf Hansson 		return 0;
486716bdb89SUlf Hansson 	}
487716bdb89SUlf Hansson 
488716bdb89SUlf Hansson 	timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
489716bdb89SUlf Hansson 	do {
490716bdb89SUlf Hansson 		/*
49170562644SUlf Hansson 		 * Due to the possibility of being preempted while polling,
49270562644SUlf Hansson 		 * check the expiration time first.
493716bdb89SUlf Hansson 		 */
494716bdb89SUlf Hansson 		expired = time_after(jiffies, timeout);
49570562644SUlf Hansson 
4960d84c3e6SUlf Hansson 		err = mmc_busy_status(card, retry_crc_err, busy_cmd, &busy);
4975ec32f84SUlf Hansson 		if (err)
4985ec32f84SUlf Hansson 			return err;
499716bdb89SUlf Hansson 
50070562644SUlf Hansson 		/* Timeout if the device still remains busy. */
50170562644SUlf Hansson 		if (expired && busy) {
50270562644SUlf Hansson 			pr_err("%s: Card stuck being busy! %s\n",
503716bdb89SUlf Hansson 				mmc_hostname(host), __func__);
504716bdb89SUlf Hansson 			return -ETIMEDOUT;
505716bdb89SUlf Hansson 		}
506d46a24a9SUlf Hansson 
507d46a24a9SUlf Hansson 		/* Throttle the polling rate to avoid hogging the CPU. */
508d46a24a9SUlf Hansson 		if (busy) {
509d46a24a9SUlf Hansson 			usleep_range(udelay, udelay * 2);
510d46a24a9SUlf Hansson 			if (udelay < udelay_max)
511d46a24a9SUlf Hansson 				udelay *= 2;
512d46a24a9SUlf Hansson 		}
51370562644SUlf Hansson 	} while (busy);
514716bdb89SUlf Hansson 
5155ec32f84SUlf Hansson 	return 0;
516716bdb89SUlf Hansson }
517716bdb89SUlf Hansson 
5180d84c3e6SUlf Hansson int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
5190d84c3e6SUlf Hansson 		      enum mmc_busy_cmd busy_cmd)
5200d84c3e6SUlf Hansson {
5210d84c3e6SUlf Hansson 	return __mmc_poll_for_busy(card, timeout_ms, true, false, busy_cmd);
5220d84c3e6SUlf Hansson }
5230d84c3e6SUlf Hansson 
524d3a8d95dSAndrei Warkentin /**
525950d56acSJaehoon Chung  *	__mmc_switch - modify EXT_CSD register
526d3a8d95dSAndrei Warkentin  *	@card: the MMC card associated with the data transfer
527d3a8d95dSAndrei Warkentin  *	@set: cmd set values
528d3a8d95dSAndrei Warkentin  *	@index: EXT_CSD register index
529d3a8d95dSAndrei Warkentin  *	@value: value to program into EXT_CSD register
530d3a8d95dSAndrei Warkentin  *	@timeout_ms: timeout (ms) for operation performed by register write,
531d3a8d95dSAndrei Warkentin  *                   timeout of zero implies maximum possible timeout
532aa33ce3cSUlf Hansson  *	@timing: new timing to change to
533878e200bSUlf Hansson  *	@send_status: send status cmd to poll for busy
534625228faSUlf Hansson  *	@retry_crc_err: retry when CRC errors when polling with CMD13 for busy
535*5e52a168SBean Huo  *	@retries: number of retries
536d3a8d95dSAndrei Warkentin  *
537d3a8d95dSAndrei Warkentin  *	Modifies the EXT_CSD register for selected card.
538d3a8d95dSAndrei Warkentin  */
539950d56acSJaehoon Chung int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
540aa33ce3cSUlf Hansson 		unsigned int timeout_ms, unsigned char timing,
541*5e52a168SBean Huo 		bool send_status, bool retry_crc_err, unsigned int retries)
542da7fbe58SPierre Ossman {
543636bd13cSUlf Hansson 	struct mmc_host *host = card->host;
544da7fbe58SPierre Ossman 	int err;
545c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
54602098ccdSUlf Hansson 	bool use_r1b_resp = true;
547aa33ce3cSUlf Hansson 	unsigned char old_timing = host->ios.timing;
548b9ec2616SUlf Hansson 
549c6dbab9cSAdrian Hunter 	mmc_retune_hold(host);
550c6dbab9cSAdrian Hunter 
551533a6cfeSUlf Hansson 	if (!timeout_ms) {
552533a6cfeSUlf Hansson 		pr_warn("%s: unspecified timeout for CMD6 - use generic\n",
553533a6cfeSUlf Hansson 			mmc_hostname(host));
554533a6cfeSUlf Hansson 		timeout_ms = card->ext_csd.generic_cmd6_time;
555533a6cfeSUlf Hansson 	}
556533a6cfeSUlf Hansson 
557b9ec2616SUlf Hansson 	/*
558533a6cfeSUlf Hansson 	 * If the max_busy_timeout of the host is specified, make sure it's
559533a6cfeSUlf Hansson 	 * enough to fit the used timeout_ms. In case it's not, let's instruct
560533a6cfeSUlf Hansson 	 * the host to avoid HW busy detection, by converting to a R1 response
5611292e3efSUlf Hansson 	 * instead of a R1B. Note, some hosts requires R1B, which also means
5621292e3efSUlf Hansson 	 * they are on their own when it comes to deal with the busy timeout.
563b9ec2616SUlf Hansson 	 */
5641292e3efSUlf Hansson 	if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
5651292e3efSUlf Hansson 	    (timeout_ms > host->max_busy_timeout))
566b9ec2616SUlf Hansson 		use_r1b_resp = false;
567da7fbe58SPierre Ossman 
568da7fbe58SPierre Ossman 	cmd.opcode = MMC_SWITCH;
569da7fbe58SPierre Ossman 	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
570da7fbe58SPierre Ossman 		  (index << 16) |
571da7fbe58SPierre Ossman 		  (value << 8) |
572da7fbe58SPierre Ossman 		  set;
573950d56acSJaehoon Chung 	cmd.flags = MMC_CMD_AC;
574b9ec2616SUlf Hansson 	if (use_r1b_resp) {
575950d56acSJaehoon Chung 		cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
5761d4d7744SUlf Hansson 		cmd.busy_timeout = timeout_ms;
577b9ec2616SUlf Hansson 	} else {
578b9ec2616SUlf Hansson 		cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
579b9ec2616SUlf Hansson 	}
580b9ec2616SUlf Hansson 
581*5e52a168SBean Huo 	err = mmc_wait_for_cmd(host, &cmd, retries);
58217b0429dSPierre Ossman 	if (err)
583c6dbab9cSAdrian Hunter 		goto out;
584da7fbe58SPierre Ossman 
585cb26ce06SUlf Hansson 	/*If SPI or used HW busy detection above, then we don't need to poll. */
586cb26ce06SUlf Hansson 	if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
587ee6ff743SUlf Hansson 		mmc_host_is_spi(host))
588aa33ce3cSUlf Hansson 		goto out_tim;
589a27fbf2fSSeungwon Jeon 
590716bdb89SUlf Hansson 	/* Let's try to poll to find out when the command is completed. */
5910d84c3e6SUlf Hansson 	err = __mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err,
5920d84c3e6SUlf Hansson 				  MMC_BUSY_CMD6);
593ee6ff743SUlf Hansson 	if (err)
594ee6ff743SUlf Hansson 		goto out;
595aa33ce3cSUlf Hansson 
596aa33ce3cSUlf Hansson out_tim:
597ee6ff743SUlf Hansson 	/* Switch to new timing before check switch status. */
598ee6ff743SUlf Hansson 	if (timing)
599ee6ff743SUlf Hansson 		mmc_set_timing(host, timing);
600ee6ff743SUlf Hansson 
601ee6ff743SUlf Hansson 	if (send_status) {
60260db8a47SUlf Hansson 		err = mmc_switch_status(card, true);
603aa33ce3cSUlf Hansson 		if (err && timing)
604aa33ce3cSUlf Hansson 			mmc_set_timing(host, old_timing);
605ee6ff743SUlf Hansson 	}
606c6dbab9cSAdrian Hunter out:
607c6dbab9cSAdrian Hunter 	mmc_retune_release(host);
608ef0b27d4SAdrian Hunter 
609c6dbab9cSAdrian Hunter 	return err;
610da7fbe58SPierre Ossman }
611950d56acSJaehoon Chung 
612950d56acSJaehoon Chung int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
613950d56acSJaehoon Chung 		unsigned int timeout_ms)
614950d56acSJaehoon Chung {
615aa33ce3cSUlf Hansson 	return __mmc_switch(card, set, index, value, timeout_ms, 0,
616*5e52a168SBean Huo 			    true, false, MMC_CMD_RETRIES);
617950d56acSJaehoon Chung }
618d3a8d95dSAndrei Warkentin EXPORT_SYMBOL_GPL(mmc_switch);
619da7fbe58SPierre Ossman 
6209979dbe5SChaotian Jing int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
621996903deSMinda Chen {
622c7836d15SMasahiro Yamada 	struct mmc_request mrq = {};
623c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
624c7836d15SMasahiro Yamada 	struct mmc_data data = {};
625996903deSMinda Chen 	struct scatterlist sg;
626fe5afb13SUlf Hansson 	struct mmc_ios *ios = &host->ios;
627996903deSMinda Chen 	const u8 *tuning_block_pattern;
628996903deSMinda Chen 	int size, err = 0;
629996903deSMinda Chen 	u8 *data_buf;
630996903deSMinda Chen 
631996903deSMinda Chen 	if (ios->bus_width == MMC_BUS_WIDTH_8) {
632996903deSMinda Chen 		tuning_block_pattern = tuning_blk_pattern_8bit;
633996903deSMinda Chen 		size = sizeof(tuning_blk_pattern_8bit);
634996903deSMinda Chen 	} else if (ios->bus_width == MMC_BUS_WIDTH_4) {
635996903deSMinda Chen 		tuning_block_pattern = tuning_blk_pattern_4bit;
636996903deSMinda Chen 		size = sizeof(tuning_blk_pattern_4bit);
637996903deSMinda Chen 	} else
638996903deSMinda Chen 		return -EINVAL;
639996903deSMinda Chen 
640996903deSMinda Chen 	data_buf = kzalloc(size, GFP_KERNEL);
641996903deSMinda Chen 	if (!data_buf)
642996903deSMinda Chen 		return -ENOMEM;
643996903deSMinda Chen 
644996903deSMinda Chen 	mrq.cmd = &cmd;
645996903deSMinda Chen 	mrq.data = &data;
646996903deSMinda Chen 
647996903deSMinda Chen 	cmd.opcode = opcode;
648996903deSMinda Chen 	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
649996903deSMinda Chen 
650996903deSMinda Chen 	data.blksz = size;
651996903deSMinda Chen 	data.blocks = 1;
652996903deSMinda Chen 	data.flags = MMC_DATA_READ;
653996903deSMinda Chen 
654996903deSMinda Chen 	/*
655996903deSMinda Chen 	 * According to the tuning specs, Tuning process
656996903deSMinda Chen 	 * is normally shorter 40 executions of CMD19,
657996903deSMinda Chen 	 * and timeout value should be shorter than 150 ms
658996903deSMinda Chen 	 */
659996903deSMinda Chen 	data.timeout_ns = 150 * NSEC_PER_MSEC;
660996903deSMinda Chen 
661996903deSMinda Chen 	data.sg = &sg;
662996903deSMinda Chen 	data.sg_len = 1;
663996903deSMinda Chen 	sg_init_one(&sg, data_buf, size);
664996903deSMinda Chen 
665fe5afb13SUlf Hansson 	mmc_wait_for_req(host, &mrq);
666996903deSMinda Chen 
6679979dbe5SChaotian Jing 	if (cmd_error)
6689979dbe5SChaotian Jing 		*cmd_error = cmd.error;
6699979dbe5SChaotian Jing 
670996903deSMinda Chen 	if (cmd.error) {
671996903deSMinda Chen 		err = cmd.error;
672996903deSMinda Chen 		goto out;
673996903deSMinda Chen 	}
674996903deSMinda Chen 
675996903deSMinda Chen 	if (data.error) {
676996903deSMinda Chen 		err = data.error;
677996903deSMinda Chen 		goto out;
678996903deSMinda Chen 	}
679996903deSMinda Chen 
680996903deSMinda Chen 	if (memcmp(data_buf, tuning_block_pattern, size))
681996903deSMinda Chen 		err = -EIO;
682996903deSMinda Chen 
683996903deSMinda Chen out:
684996903deSMinda Chen 	kfree(data_buf);
685996903deSMinda Chen 	return err;
686996903deSMinda Chen }
687996903deSMinda Chen EXPORT_SYMBOL_GPL(mmc_send_tuning);
688996903deSMinda Chen 
689e711f030SAdrian Hunter int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
690e711f030SAdrian Hunter {
691c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
692e711f030SAdrian Hunter 
693e711f030SAdrian Hunter 	/*
694e711f030SAdrian Hunter 	 * eMMC specification specifies that CMD12 can be used to stop a tuning
695e711f030SAdrian Hunter 	 * command, but SD specification does not, so do nothing unless it is
696e711f030SAdrian Hunter 	 * eMMC.
697e711f030SAdrian Hunter 	 */
698e711f030SAdrian Hunter 	if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
699e711f030SAdrian Hunter 		return 0;
700e711f030SAdrian Hunter 
701e711f030SAdrian Hunter 	cmd.opcode = MMC_STOP_TRANSMISSION;
702e711f030SAdrian Hunter 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
703e711f030SAdrian Hunter 
704e711f030SAdrian Hunter 	/*
705e711f030SAdrian Hunter 	 * For drivers that override R1 to R1b, set an arbitrary timeout based
706e711f030SAdrian Hunter 	 * on the tuning timeout i.e. 150ms.
707e711f030SAdrian Hunter 	 */
708e711f030SAdrian Hunter 	cmd.busy_timeout = 150;
709e711f030SAdrian Hunter 
710e711f030SAdrian Hunter 	return mmc_wait_for_cmd(host, &cmd, 0);
711e711f030SAdrian Hunter }
712e711f030SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_abort_tuning);
713e711f030SAdrian Hunter 
71422113efdSAries Lee static int
71522113efdSAries Lee mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
71622113efdSAries Lee 		  u8 len)
71722113efdSAries Lee {
718c7836d15SMasahiro Yamada 	struct mmc_request mrq = {};
719c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
720c7836d15SMasahiro Yamada 	struct mmc_data data = {};
72122113efdSAries Lee 	struct scatterlist sg;
72222113efdSAries Lee 	u8 *data_buf;
72322113efdSAries Lee 	u8 *test_buf;
72422113efdSAries Lee 	int i, err;
72522113efdSAries Lee 	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
72622113efdSAries Lee 	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
72722113efdSAries Lee 
72822113efdSAries Lee 	/* dma onto stack is unsafe/nonportable, but callers to this
72922113efdSAries Lee 	 * routine normally provide temporary on-stack buffers ...
73022113efdSAries Lee 	 */
73122113efdSAries Lee 	data_buf = kmalloc(len, GFP_KERNEL);
73222113efdSAries Lee 	if (!data_buf)
73322113efdSAries Lee 		return -ENOMEM;
73422113efdSAries Lee 
73522113efdSAries Lee 	if (len == 8)
73622113efdSAries Lee 		test_buf = testdata_8bit;
73722113efdSAries Lee 	else if (len == 4)
73822113efdSAries Lee 		test_buf = testdata_4bit;
73922113efdSAries Lee 	else {
740a3c76eb9SGirish K S 		pr_err("%s: Invalid bus_width %d\n",
74122113efdSAries Lee 		       mmc_hostname(host), len);
74222113efdSAries Lee 		kfree(data_buf);
74322113efdSAries Lee 		return -EINVAL;
74422113efdSAries Lee 	}
74522113efdSAries Lee 
74622113efdSAries Lee 	if (opcode == MMC_BUS_TEST_W)
74722113efdSAries Lee 		memcpy(data_buf, test_buf, len);
74822113efdSAries Lee 
74922113efdSAries Lee 	mrq.cmd = &cmd;
75022113efdSAries Lee 	mrq.data = &data;
75122113efdSAries Lee 	cmd.opcode = opcode;
75222113efdSAries Lee 	cmd.arg = 0;
75322113efdSAries Lee 
75422113efdSAries Lee 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
75522113efdSAries Lee 	 * rely on callers to never use this with "native" calls for reading
75622113efdSAries Lee 	 * CSD or CID.  Native versions of those commands use the R2 type,
75722113efdSAries Lee 	 * not R1 plus a data block.
75822113efdSAries Lee 	 */
75922113efdSAries Lee 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
76022113efdSAries Lee 
76122113efdSAries Lee 	data.blksz = len;
76222113efdSAries Lee 	data.blocks = 1;
76322113efdSAries Lee 	if (opcode == MMC_BUS_TEST_R)
76422113efdSAries Lee 		data.flags = MMC_DATA_READ;
76522113efdSAries Lee 	else
76622113efdSAries Lee 		data.flags = MMC_DATA_WRITE;
76722113efdSAries Lee 
76822113efdSAries Lee 	data.sg = &sg;
76922113efdSAries Lee 	data.sg_len = 1;
77084532e33SMinjian Wu 	mmc_set_data_timeout(&data, card);
77122113efdSAries Lee 	sg_init_one(&sg, data_buf, len);
77222113efdSAries Lee 	mmc_wait_for_req(host, &mrq);
77322113efdSAries Lee 	err = 0;
77422113efdSAries Lee 	if (opcode == MMC_BUS_TEST_R) {
77522113efdSAries Lee 		for (i = 0; i < len / 4; i++)
77622113efdSAries Lee 			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
77722113efdSAries Lee 				err = -EIO;
77822113efdSAries Lee 				break;
77922113efdSAries Lee 			}
78022113efdSAries Lee 	}
78122113efdSAries Lee 	kfree(data_buf);
78222113efdSAries Lee 
78322113efdSAries Lee 	if (cmd.error)
78422113efdSAries Lee 		return cmd.error;
78522113efdSAries Lee 	if (data.error)
78622113efdSAries Lee 		return data.error;
78722113efdSAries Lee 
78822113efdSAries Lee 	return err;
78922113efdSAries Lee }
79022113efdSAries Lee 
79122113efdSAries Lee int mmc_bus_test(struct mmc_card *card, u8 bus_width)
79222113efdSAries Lee {
7930899e741SMasahiro Yamada 	int width;
79422113efdSAries Lee 
79522113efdSAries Lee 	if (bus_width == MMC_BUS_WIDTH_8)
79622113efdSAries Lee 		width = 8;
79722113efdSAries Lee 	else if (bus_width == MMC_BUS_WIDTH_4)
79822113efdSAries Lee 		width = 4;
79922113efdSAries Lee 	else if (bus_width == MMC_BUS_WIDTH_1)
80022113efdSAries Lee 		return 0; /* no need for test */
80122113efdSAries Lee 	else
80222113efdSAries Lee 		return -EINVAL;
80322113efdSAries Lee 
80422113efdSAries Lee 	/*
80522113efdSAries Lee 	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
80622113efdSAries Lee 	 * is a problem.  This improves chances that the test will work.
80722113efdSAries Lee 	 */
80822113efdSAries Lee 	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
8090899e741SMasahiro Yamada 	return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
81022113efdSAries Lee }
811eb0d8f13SJaehoon Chung 
8129f94d047SUlf Hansson static int mmc_send_hpi_cmd(struct mmc_card *card)
813eb0d8f13SJaehoon Chung {
814490ff95fSUlf Hansson 	unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time;
815892bf100SUlf Hansson 	struct mmc_host *host = card->host;
816892bf100SUlf Hansson 	bool use_r1b_resp = true;
817c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
818eb0d8f13SJaehoon Chung 	int err;
819eb0d8f13SJaehoon Chung 
820892bf100SUlf Hansson 	cmd.opcode = card->ext_csd.hpi_cmd;
821eb0d8f13SJaehoon Chung 	cmd.arg = card->rca << 16 | 1;
822eb0d8f13SJaehoon Chung 
823892bf100SUlf Hansson 	/*
824892bf100SUlf Hansson 	 * Make sure the host's max_busy_timeout fit the needed timeout for HPI.
825892bf100SUlf Hansson 	 * In case it doesn't, let's instruct the host to avoid HW busy
826892bf100SUlf Hansson 	 * detection, by using a R1 response instead of R1B.
827892bf100SUlf Hansson 	 */
828892bf100SUlf Hansson 	if (host->max_busy_timeout && busy_timeout_ms > host->max_busy_timeout)
829892bf100SUlf Hansson 		use_r1b_resp = false;
830892bf100SUlf Hansson 
831892bf100SUlf Hansson 	if (cmd.opcode == MMC_STOP_TRANSMISSION && use_r1b_resp) {
832892bf100SUlf Hansson 		cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
833892bf100SUlf Hansson 		cmd.busy_timeout = busy_timeout_ms;
834892bf100SUlf Hansson 	} else {
835892bf100SUlf Hansson 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
836892bf100SUlf Hansson 		use_r1b_resp = false;
837892bf100SUlf Hansson 	}
838892bf100SUlf Hansson 
839892bf100SUlf Hansson 	err = mmc_wait_for_cmd(host, &cmd, 0);
840eb0d8f13SJaehoon Chung 	if (err) {
841892bf100SUlf Hansson 		pr_warn("%s: HPI error %d. Command response %#x\n",
842892bf100SUlf Hansson 			mmc_hostname(host), err, cmd.resp[0]);
843eb0d8f13SJaehoon Chung 		return err;
844eb0d8f13SJaehoon Chung 	}
845eb0d8f13SJaehoon Chung 
846892bf100SUlf Hansson 	/* No need to poll when using HW busy detection. */
847892bf100SUlf Hansson 	if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp)
848892bf100SUlf Hansson 		return 0;
849892bf100SUlf Hansson 
850490ff95fSUlf Hansson 	/* Let's poll to find out when the HPI request completes. */
851490ff95fSUlf Hansson 	return mmc_poll_for_busy(card, busy_timeout_ms, MMC_BUSY_HPI);
852eb0d8f13SJaehoon Chung }
853148bcab2SUlf Hansson 
8540f2c0512SUlf Hansson /**
8550f2c0512SUlf Hansson  *	mmc_interrupt_hpi - Issue for High priority Interrupt
8560f2c0512SUlf Hansson  *	@card: the MMC card associated with the HPI transfer
8570f2c0512SUlf Hansson  *
8580f2c0512SUlf Hansson  *	Issued High Priority Interrupt, and check for card status
8590f2c0512SUlf Hansson  *	until out-of prg-state.
8600f2c0512SUlf Hansson  */
86144aebc16SJason Yan static int mmc_interrupt_hpi(struct mmc_card *card)
8620f2c0512SUlf Hansson {
8630f2c0512SUlf Hansson 	int err;
8640f2c0512SUlf Hansson 	u32 status;
8650f2c0512SUlf Hansson 
8660f2c0512SUlf Hansson 	if (!card->ext_csd.hpi_en) {
8670f2c0512SUlf Hansson 		pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
8680f2c0512SUlf Hansson 		return 1;
8690f2c0512SUlf Hansson 	}
8700f2c0512SUlf Hansson 
8710f2c0512SUlf Hansson 	err = mmc_send_status(card, &status);
8720f2c0512SUlf Hansson 	if (err) {
8730f2c0512SUlf Hansson 		pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
8740f2c0512SUlf Hansson 		goto out;
8750f2c0512SUlf Hansson 	}
8760f2c0512SUlf Hansson 
8770f2c0512SUlf Hansson 	switch (R1_CURRENT_STATE(status)) {
8780f2c0512SUlf Hansson 	case R1_STATE_IDLE:
8790f2c0512SUlf Hansson 	case R1_STATE_READY:
8800f2c0512SUlf Hansson 	case R1_STATE_STBY:
8810f2c0512SUlf Hansson 	case R1_STATE_TRAN:
8820f2c0512SUlf Hansson 		/*
8830f2c0512SUlf Hansson 		 * In idle and transfer states, HPI is not needed and the caller
8840f2c0512SUlf Hansson 		 * can issue the next intended command immediately
8850f2c0512SUlf Hansson 		 */
8860f2c0512SUlf Hansson 		goto out;
8870f2c0512SUlf Hansson 	case R1_STATE_PRG:
8880f2c0512SUlf Hansson 		break;
8890f2c0512SUlf Hansson 	default:
8900f2c0512SUlf Hansson 		/* In all other states, it's illegal to issue HPI */
8910f2c0512SUlf Hansson 		pr_debug("%s: HPI cannot be sent. Card state=%d\n",
8920f2c0512SUlf Hansson 			mmc_hostname(card->host), R1_CURRENT_STATE(status));
8930f2c0512SUlf Hansson 		err = -EINVAL;
8940f2c0512SUlf Hansson 		goto out;
8950f2c0512SUlf Hansson 	}
8960f2c0512SUlf Hansson 
8979f94d047SUlf Hansson 	err = mmc_send_hpi_cmd(card);
8980f2c0512SUlf Hansson out:
8990f2c0512SUlf Hansson 	return err;
9000f2c0512SUlf Hansson }
9010f2c0512SUlf Hansson 
902148bcab2SUlf Hansson int mmc_can_ext_csd(struct mmc_card *card)
903148bcab2SUlf Hansson {
904148bcab2SUlf Hansson 	return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
905148bcab2SUlf Hansson }
906b658af71SAdrian Hunter 
9071cf8f7e5SUlf Hansson static int mmc_read_bkops_status(struct mmc_card *card)
9081cf8f7e5SUlf Hansson {
9091cf8f7e5SUlf Hansson 	int err;
9101cf8f7e5SUlf Hansson 	u8 *ext_csd;
9111cf8f7e5SUlf Hansson 
9121cf8f7e5SUlf Hansson 	err = mmc_get_ext_csd(card, &ext_csd);
9131cf8f7e5SUlf Hansson 	if (err)
9141cf8f7e5SUlf Hansson 		return err;
9151cf8f7e5SUlf Hansson 
9161cf8f7e5SUlf Hansson 	card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
9171cf8f7e5SUlf Hansson 	card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
9181cf8f7e5SUlf Hansson 	kfree(ext_csd);
9191cf8f7e5SUlf Hansson 	return 0;
9201cf8f7e5SUlf Hansson }
9211cf8f7e5SUlf Hansson 
9221cf8f7e5SUlf Hansson /**
9230c204979SUlf Hansson  *	mmc_run_bkops - Run BKOPS for supported cards
9240c204979SUlf Hansson  *	@card: MMC card to run BKOPS for
9251cf8f7e5SUlf Hansson  *
9260c204979SUlf Hansson  *	Run background operations synchronously for cards having manual BKOPS
9270c204979SUlf Hansson  *	enabled and in case it reports urgent BKOPS level.
9281cf8f7e5SUlf Hansson */
9290c204979SUlf Hansson void mmc_run_bkops(struct mmc_card *card)
9301cf8f7e5SUlf Hansson {
9311cf8f7e5SUlf Hansson 	int err;
9321cf8f7e5SUlf Hansson 
9330c204979SUlf Hansson 	if (!card->ext_csd.man_bkops_en)
9341cf8f7e5SUlf Hansson 		return;
9351cf8f7e5SUlf Hansson 
9361cf8f7e5SUlf Hansson 	err = mmc_read_bkops_status(card);
9371cf8f7e5SUlf Hansson 	if (err) {
9381cf8f7e5SUlf Hansson 		pr_err("%s: Failed to read bkops status: %d\n",
9391cf8f7e5SUlf Hansson 		       mmc_hostname(card->host), err);
9401cf8f7e5SUlf Hansson 		return;
9411cf8f7e5SUlf Hansson 	}
9421cf8f7e5SUlf Hansson 
9430c204979SUlf Hansson 	if (!card->ext_csd.raw_bkops_status ||
9440c204979SUlf Hansson 	    card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2)
9451cf8f7e5SUlf Hansson 		return;
9461cf8f7e5SUlf Hansson 
9471cf8f7e5SUlf Hansson 	mmc_retune_hold(card->host);
9481cf8f7e5SUlf Hansson 
9490c204979SUlf Hansson 	/*
9500c204979SUlf Hansson 	 * For urgent BKOPS status, LEVEL_2 and higher, let's execute
9510c204979SUlf Hansson 	 * synchronously. Future wise, we may consider to start BKOPS, for less
9520c204979SUlf Hansson 	 * urgent levels by using an asynchronous background task, when idle.
9530c204979SUlf Hansson 	 */
9540c204979SUlf Hansson 	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
95524ed3bd0SUlf Hansson 			 EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS);
9560c204979SUlf Hansson 	if (err)
9571cf8f7e5SUlf Hansson 		pr_warn("%s: Error %d starting bkops\n",
9581cf8f7e5SUlf Hansson 			mmc_hostname(card->host), err);
9591cf8f7e5SUlf Hansson 
9601cf8f7e5SUlf Hansson 	mmc_retune_release(card->host);
9611cf8f7e5SUlf Hansson }
9620c204979SUlf Hansson EXPORT_SYMBOL(mmc_run_bkops);
9631cf8f7e5SUlf Hansson 
964d9df1737SUlf Hansson /*
965d9df1737SUlf Hansson  * Flush the cache to the non-volatile storage.
966d9df1737SUlf Hansson  */
967d9df1737SUlf Hansson int mmc_flush_cache(struct mmc_card *card)
968d9df1737SUlf Hansson {
969d9df1737SUlf Hansson 	int err = 0;
970d9df1737SUlf Hansson 
971d9df1737SUlf Hansson 	if (mmc_card_mmc(card) &&
972d9df1737SUlf Hansson 			(card->ext_csd.cache_size > 0) &&
973d9df1737SUlf Hansson 			(card->ext_csd.cache_ctrl & 1)) {
974d9df1737SUlf Hansson 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
97524ed3bd0SUlf Hansson 				 EXT_CSD_FLUSH_CACHE, 1,
97624ed3bd0SUlf Hansson 				 MMC_CACHE_FLUSH_TIMEOUT_MS);
977d9df1737SUlf Hansson 		if (err)
978d9df1737SUlf Hansson 			pr_err("%s: cache flush error %d\n",
979d9df1737SUlf Hansson 					mmc_hostname(card->host), err);
980d9df1737SUlf Hansson 	}
981d9df1737SUlf Hansson 
982d9df1737SUlf Hansson 	return err;
983d9df1737SUlf Hansson }
984d9df1737SUlf Hansson EXPORT_SYMBOL(mmc_flush_cache);
985d9df1737SUlf Hansson 
986b658af71SAdrian Hunter static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
987b658af71SAdrian Hunter {
988b658af71SAdrian Hunter 	u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
989b658af71SAdrian Hunter 	int err;
990b658af71SAdrian Hunter 
991b658af71SAdrian Hunter 	if (!card->ext_csd.cmdq_support)
992b658af71SAdrian Hunter 		return -EOPNOTSUPP;
993b658af71SAdrian Hunter 
994b658af71SAdrian Hunter 	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
995b658af71SAdrian Hunter 			 val, card->ext_csd.generic_cmd6_time);
996b658af71SAdrian Hunter 	if (!err)
997b658af71SAdrian Hunter 		card->ext_csd.cmdq_en = enable;
998b658af71SAdrian Hunter 
999b658af71SAdrian Hunter 	return err;
1000b658af71SAdrian Hunter }
1001b658af71SAdrian Hunter 
1002b658af71SAdrian Hunter int mmc_cmdq_enable(struct mmc_card *card)
1003b658af71SAdrian Hunter {
1004b658af71SAdrian Hunter 	return mmc_cmdq_switch(card, true);
1005b658af71SAdrian Hunter }
1006b658af71SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
1007b658af71SAdrian Hunter 
1008b658af71SAdrian Hunter int mmc_cmdq_disable(struct mmc_card *card)
1009b658af71SAdrian Hunter {
1010b658af71SAdrian Hunter 	return mmc_cmdq_switch(card, false);
1011b658af71SAdrian Hunter }
1012b658af71SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_cmdq_disable);
101355c2b8b9SUlf Hansson 
10144f111d04SBean Huo int mmc_sanitize(struct mmc_card *card, unsigned int timeout_ms)
101555c2b8b9SUlf Hansson {
101655c2b8b9SUlf Hansson 	struct mmc_host *host = card->host;
101755c2b8b9SUlf Hansson 	int err;
101855c2b8b9SUlf Hansson 
101955c2b8b9SUlf Hansson 	if (!mmc_can_sanitize(card)) {
102055c2b8b9SUlf Hansson 		pr_warn("%s: Sanitize not supported\n", mmc_hostname(host));
102155c2b8b9SUlf Hansson 		return -EOPNOTSUPP;
102255c2b8b9SUlf Hansson 	}
102355c2b8b9SUlf Hansson 
10244f111d04SBean Huo 	if (!timeout_ms)
10254f111d04SBean Huo 		timeout_ms = MMC_SANITIZE_TIMEOUT_MS;
10264f111d04SBean Huo 
102755c2b8b9SUlf Hansson 	pr_debug("%s: Sanitize in progress...\n", mmc_hostname(host));
102855c2b8b9SUlf Hansson 
102955c2b8b9SUlf Hansson 	mmc_retune_hold(host);
103055c2b8b9SUlf Hansson 
103155c2b8b9SUlf Hansson 	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START,
10324f111d04SBean Huo 			 1, timeout_ms);
103355c2b8b9SUlf Hansson 	if (err)
103455c2b8b9SUlf Hansson 		pr_err("%s: Sanitize failed err=%d\n", mmc_hostname(host), err);
103555c2b8b9SUlf Hansson 
103655c2b8b9SUlf Hansson 	/*
103755c2b8b9SUlf Hansson 	 * If the sanitize operation timed out, the card is probably still busy
103855c2b8b9SUlf Hansson 	 * in the R1_STATE_PRG. Rather than continue to wait, let's try to abort
103955c2b8b9SUlf Hansson 	 * it with a HPI command to get back into R1_STATE_TRAN.
104055c2b8b9SUlf Hansson 	 */
104155c2b8b9SUlf Hansson 	if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
104255c2b8b9SUlf Hansson 		pr_warn("%s: Sanitize aborted\n", mmc_hostname(host));
104355c2b8b9SUlf Hansson 
104455c2b8b9SUlf Hansson 	mmc_retune_release(host);
104555c2b8b9SUlf Hansson 
104655c2b8b9SUlf Hansson 	pr_debug("%s: Sanitize completed\n", mmc_hostname(host));
104755c2b8b9SUlf Hansson 	return err;
104855c2b8b9SUlf Hansson }
104955c2b8b9SUlf Hansson EXPORT_SYMBOL_GPL(mmc_sanitize);
1050