xref: /openbmc/linux/drivers/mmc/core/mmc_ops.c (revision 533a6cfe)
12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2da7fbe58SPierre Ossman /*
370f10482SPierre Ossman  *  linux/drivers/mmc/core/mmc_ops.h
4da7fbe58SPierre Ossman  *
5da7fbe58SPierre Ossman  *  Copyright 2006-2007 Pierre Ossman
6da7fbe58SPierre Ossman  */
7da7fbe58SPierre Ossman 
85a0e3ad6STejun Heo #include <linux/slab.h>
93ef77af1SPaul Gortmaker #include <linux/export.h>
10da7fbe58SPierre Ossman #include <linux/types.h>
11da7fbe58SPierre Ossman #include <linux/scatterlist.h>
12da7fbe58SPierre Ossman 
13da7fbe58SPierre Ossman #include <linux/mmc/host.h>
14da7fbe58SPierre Ossman #include <linux/mmc/card.h>
15da7fbe58SPierre Ossman #include <linux/mmc/mmc.h>
16da7fbe58SPierre Ossman 
17da7fbe58SPierre Ossman #include "core.h"
181cf8f7e5SUlf Hansson #include "card.h"
19c6dbab9cSAdrian Hunter #include "host.h"
20da7fbe58SPierre Ossman #include "mmc_ops.h"
21da7fbe58SPierre Ossman 
2224ed3bd0SUlf Hansson #define MMC_OPS_TIMEOUT_MS		(10 * 60 * 1000) /* 10min*/
2324ed3bd0SUlf Hansson #define MMC_BKOPS_TIMEOUT_MS		(120 * 1000) /* 120s */
2424ed3bd0SUlf Hansson #define MMC_CACHE_FLUSH_TIMEOUT_MS	(30 * 1000) /* 30s */
258fee476bSTrey Ramsay 
2604cdbbfaSUlf Hansson static const u8 tuning_blk_pattern_4bit[] = {
2704cdbbfaSUlf Hansson 	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
2804cdbbfaSUlf Hansson 	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
2904cdbbfaSUlf Hansson 	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
3004cdbbfaSUlf Hansson 	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
3104cdbbfaSUlf Hansson 	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
3204cdbbfaSUlf Hansson 	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
3304cdbbfaSUlf Hansson 	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
3404cdbbfaSUlf Hansson 	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
3504cdbbfaSUlf Hansson };
3604cdbbfaSUlf Hansson 
3704cdbbfaSUlf Hansson static const u8 tuning_blk_pattern_8bit[] = {
3804cdbbfaSUlf Hansson 	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
3904cdbbfaSUlf Hansson 	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
4004cdbbfaSUlf Hansson 	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
4104cdbbfaSUlf Hansson 	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
4204cdbbfaSUlf Hansson 	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
4304cdbbfaSUlf Hansson 	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
4404cdbbfaSUlf Hansson 	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
4504cdbbfaSUlf Hansson 	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
4604cdbbfaSUlf Hansson 	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
4704cdbbfaSUlf Hansson 	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
4804cdbbfaSUlf Hansson 	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
4904cdbbfaSUlf Hansson 	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
5004cdbbfaSUlf Hansson 	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
5104cdbbfaSUlf Hansson 	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
5204cdbbfaSUlf Hansson 	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
5304cdbbfaSUlf Hansson 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
5404cdbbfaSUlf Hansson };
5504cdbbfaSUlf Hansson 
562185bc2cSUlf Hansson int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
57a27fbf2fSSeungwon Jeon {
58a27fbf2fSSeungwon Jeon 	int err;
59c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
60a27fbf2fSSeungwon Jeon 
61a27fbf2fSSeungwon Jeon 	cmd.opcode = MMC_SEND_STATUS;
62a27fbf2fSSeungwon Jeon 	if (!mmc_host_is_spi(card->host))
63a27fbf2fSSeungwon Jeon 		cmd.arg = card->rca << 16;
64a27fbf2fSSeungwon Jeon 	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
65a27fbf2fSSeungwon Jeon 
662185bc2cSUlf Hansson 	err = mmc_wait_for_cmd(card->host, &cmd, retries);
67a27fbf2fSSeungwon Jeon 	if (err)
68a27fbf2fSSeungwon Jeon 		return err;
69a27fbf2fSSeungwon Jeon 
70a27fbf2fSSeungwon Jeon 	/* NOTE: callers are required to understand the difference
71a27fbf2fSSeungwon Jeon 	 * between "native" and SPI format status words!
72a27fbf2fSSeungwon Jeon 	 */
73a27fbf2fSSeungwon Jeon 	if (status)
74a27fbf2fSSeungwon Jeon 		*status = cmd.resp[0];
75a27fbf2fSSeungwon Jeon 
76a27fbf2fSSeungwon Jeon 	return 0;
77a27fbf2fSSeungwon Jeon }
782185bc2cSUlf Hansson EXPORT_SYMBOL_GPL(__mmc_send_status);
792185bc2cSUlf Hansson 
802185bc2cSUlf Hansson int mmc_send_status(struct mmc_card *card, u32 *status)
812185bc2cSUlf Hansson {
822185bc2cSUlf Hansson 	return __mmc_send_status(card, status, MMC_CMD_RETRIES);
832185bc2cSUlf Hansson }
841bee324aSLinus Walleij EXPORT_SYMBOL_GPL(mmc_send_status);
85a27fbf2fSSeungwon Jeon 
86da7fbe58SPierre Ossman static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
87da7fbe58SPierre Ossman {
88c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
89da7fbe58SPierre Ossman 
90da7fbe58SPierre Ossman 	cmd.opcode = MMC_SELECT_CARD;
91da7fbe58SPierre Ossman 
92da7fbe58SPierre Ossman 	if (card) {
93da7fbe58SPierre Ossman 		cmd.arg = card->rca << 16;
94da7fbe58SPierre Ossman 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
95da7fbe58SPierre Ossman 	} else {
96da7fbe58SPierre Ossman 		cmd.arg = 0;
97da7fbe58SPierre Ossman 		cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
98da7fbe58SPierre Ossman 	}
99da7fbe58SPierre Ossman 
1000899e741SMasahiro Yamada 	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
101da7fbe58SPierre Ossman }
102da7fbe58SPierre Ossman 
103da7fbe58SPierre Ossman int mmc_select_card(struct mmc_card *card)
104da7fbe58SPierre Ossman {
105da7fbe58SPierre Ossman 
106da7fbe58SPierre Ossman 	return _mmc_select_card(card->host, card);
107da7fbe58SPierre Ossman }
108da7fbe58SPierre Ossman 
109da7fbe58SPierre Ossman int mmc_deselect_cards(struct mmc_host *host)
110da7fbe58SPierre Ossman {
111da7fbe58SPierre Ossman 	return _mmc_select_card(host, NULL);
112da7fbe58SPierre Ossman }
113da7fbe58SPierre Ossman 
1143d705d14SSascha Hauer /*
1153d705d14SSascha Hauer  * Write the value specified in the device tree or board code into the optional
1163d705d14SSascha Hauer  * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
1173d705d14SSascha Hauer  * drive strength of the DAT and CMD outputs. The actual meaning of a given
1183d705d14SSascha Hauer  * value is hardware dependant.
1193d705d14SSascha Hauer  * The presence of the DSR register can be determined from the CSD register,
1203d705d14SSascha Hauer  * bit 76.
1213d705d14SSascha Hauer  */
1223d705d14SSascha Hauer int mmc_set_dsr(struct mmc_host *host)
1233d705d14SSascha Hauer {
124c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
1253d705d14SSascha Hauer 
1263d705d14SSascha Hauer 	cmd.opcode = MMC_SET_DSR;
1273d705d14SSascha Hauer 
1283d705d14SSascha Hauer 	cmd.arg = (host->dsr << 16) | 0xffff;
1293d705d14SSascha Hauer 	cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
1303d705d14SSascha Hauer 
1313d705d14SSascha Hauer 	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
1323d705d14SSascha Hauer }
1333d705d14SSascha Hauer 
134da7fbe58SPierre Ossman int mmc_go_idle(struct mmc_host *host)
135da7fbe58SPierre Ossman {
136da7fbe58SPierre Ossman 	int err;
137c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
138da7fbe58SPierre Ossman 
139af517150SDavid Brownell 	/*
140af517150SDavid Brownell 	 * Non-SPI hosts need to prevent chipselect going active during
141af517150SDavid Brownell 	 * GO_IDLE; that would put chips into SPI mode.  Remind them of
142af517150SDavid Brownell 	 * that in case of hardware that won't pull up DAT3/nCS otherwise.
143af517150SDavid Brownell 	 *
144af517150SDavid Brownell 	 * SPI hosts ignore ios.chip_select; it's managed according to
14525985edcSLucas De Marchi 	 * rules that must accommodate non-MMC slaves which this layer
146af517150SDavid Brownell 	 * won't even know about.
147af517150SDavid Brownell 	 */
148af517150SDavid Brownell 	if (!mmc_host_is_spi(host)) {
149da7fbe58SPierre Ossman 		mmc_set_chip_select(host, MMC_CS_HIGH);
150da7fbe58SPierre Ossman 		mmc_delay(1);
151af517150SDavid Brownell 	}
152da7fbe58SPierre Ossman 
153da7fbe58SPierre Ossman 	cmd.opcode = MMC_GO_IDLE_STATE;
154da7fbe58SPierre Ossman 	cmd.arg = 0;
155af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
156da7fbe58SPierre Ossman 
157da7fbe58SPierre Ossman 	err = mmc_wait_for_cmd(host, &cmd, 0);
158da7fbe58SPierre Ossman 
159da7fbe58SPierre Ossman 	mmc_delay(1);
160da7fbe58SPierre Ossman 
161af517150SDavid Brownell 	if (!mmc_host_is_spi(host)) {
162da7fbe58SPierre Ossman 		mmc_set_chip_select(host, MMC_CS_DONTCARE);
163da7fbe58SPierre Ossman 		mmc_delay(1);
164af517150SDavid Brownell 	}
165af517150SDavid Brownell 
166af517150SDavid Brownell 	host->use_spi_crc = 0;
167da7fbe58SPierre Ossman 
168da7fbe58SPierre Ossman 	return err;
169da7fbe58SPierre Ossman }
170da7fbe58SPierre Ossman 
171da7fbe58SPierre Ossman int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
172da7fbe58SPierre Ossman {
173c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
174da7fbe58SPierre Ossman 	int i, err = 0;
175da7fbe58SPierre Ossman 
176da7fbe58SPierre Ossman 	cmd.opcode = MMC_SEND_OP_COND;
177af517150SDavid Brownell 	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
178af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
179da7fbe58SPierre Ossman 
180da7fbe58SPierre Ossman 	for (i = 100; i; i--) {
181da7fbe58SPierre Ossman 		err = mmc_wait_for_cmd(host, &cmd, 0);
18217b0429dSPierre Ossman 		if (err)
183da7fbe58SPierre Ossman 			break;
184da7fbe58SPierre Ossman 
1854c94cb65SYoshihiro Shimoda 		/* wait until reset completes */
186af517150SDavid Brownell 		if (mmc_host_is_spi(host)) {
187af517150SDavid Brownell 			if (!(cmd.resp[0] & R1_SPI_IDLE))
188af517150SDavid Brownell 				break;
189af517150SDavid Brownell 		} else {
190af517150SDavid Brownell 			if (cmd.resp[0] & MMC_CARD_BUSY)
191af517150SDavid Brownell 				break;
192af517150SDavid Brownell 		}
193af517150SDavid Brownell 
19417b0429dSPierre Ossman 		err = -ETIMEDOUT;
195da7fbe58SPierre Ossman 
196da7fbe58SPierre Ossman 		mmc_delay(10);
1974c94cb65SYoshihiro Shimoda 
1984c94cb65SYoshihiro Shimoda 		/*
1994c94cb65SYoshihiro Shimoda 		 * According to eMMC specification v5.1 section 6.4.3, we
2004c94cb65SYoshihiro Shimoda 		 * should issue CMD1 repeatedly in the idle state until
2014c94cb65SYoshihiro Shimoda 		 * the eMMC is ready. Otherwise some eMMC devices seem to enter
2024c94cb65SYoshihiro Shimoda 		 * the inactive mode after mmc_init_card() issued CMD0 when
2034c94cb65SYoshihiro Shimoda 		 * the eMMC device is busy.
2044c94cb65SYoshihiro Shimoda 		 */
2054c94cb65SYoshihiro Shimoda 		if (!ocr && !mmc_host_is_spi(host))
2064c94cb65SYoshihiro Shimoda 			cmd.arg = cmd.resp[0] | BIT(30);
207da7fbe58SPierre Ossman 	}
208da7fbe58SPierre Ossman 
209af517150SDavid Brownell 	if (rocr && !mmc_host_is_spi(host))
210da7fbe58SPierre Ossman 		*rocr = cmd.resp[0];
211da7fbe58SPierre Ossman 
212da7fbe58SPierre Ossman 	return err;
213da7fbe58SPierre Ossman }
214da7fbe58SPierre Ossman 
215da7fbe58SPierre Ossman int mmc_set_relative_addr(struct mmc_card *card)
216da7fbe58SPierre Ossman {
217c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
218da7fbe58SPierre Ossman 
219da7fbe58SPierre Ossman 	cmd.opcode = MMC_SET_RELATIVE_ADDR;
220da7fbe58SPierre Ossman 	cmd.arg = card->rca << 16;
221da7fbe58SPierre Ossman 	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
222da7fbe58SPierre Ossman 
2230899e741SMasahiro Yamada 	return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
224da7fbe58SPierre Ossman }
225da7fbe58SPierre Ossman 
226af517150SDavid Brownell static int
227af517150SDavid Brownell mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
228da7fbe58SPierre Ossman {
229da7fbe58SPierre Ossman 	int err;
230c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
231da7fbe58SPierre Ossman 
232af517150SDavid Brownell 	cmd.opcode = opcode;
233af517150SDavid Brownell 	cmd.arg = arg;
234da7fbe58SPierre Ossman 	cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
235da7fbe58SPierre Ossman 
236af517150SDavid Brownell 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
23717b0429dSPierre Ossman 	if (err)
238da7fbe58SPierre Ossman 		return err;
239da7fbe58SPierre Ossman 
240af517150SDavid Brownell 	memcpy(cxd, cmd.resp, sizeof(u32) * 4);
241da7fbe58SPierre Ossman 
24217b0429dSPierre Ossman 	return 0;
243da7fbe58SPierre Ossman }
244da7fbe58SPierre Ossman 
2451a41313eSKyungsik Lee /*
2461a41313eSKyungsik Lee  * NOTE: void *buf, caller for the buf is required to use DMA-capable
2471a41313eSKyungsik Lee  * buffer or on-stack buffer (with some overhead in callee).
2481a41313eSKyungsik Lee  */
249af517150SDavid Brownell static int
250af517150SDavid Brownell mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
251af517150SDavid Brownell 		u32 opcode, void *buf, unsigned len)
252da7fbe58SPierre Ossman {
253c7836d15SMasahiro Yamada 	struct mmc_request mrq = {};
254c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
255c7836d15SMasahiro Yamada 	struct mmc_data data = {};
256da7fbe58SPierre Ossman 	struct scatterlist sg;
257da7fbe58SPierre Ossman 
258da7fbe58SPierre Ossman 	mrq.cmd = &cmd;
259da7fbe58SPierre Ossman 	mrq.data = &data;
260da7fbe58SPierre Ossman 
261af517150SDavid Brownell 	cmd.opcode = opcode;
262da7fbe58SPierre Ossman 	cmd.arg = 0;
263da7fbe58SPierre Ossman 
264af517150SDavid Brownell 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
265af517150SDavid Brownell 	 * rely on callers to never use this with "native" calls for reading
266af517150SDavid Brownell 	 * CSD or CID.  Native versions of those commands use the R2 type,
267af517150SDavid Brownell 	 * not R1 plus a data block.
268af517150SDavid Brownell 	 */
269af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
270af517150SDavid Brownell 
271af517150SDavid Brownell 	data.blksz = len;
272da7fbe58SPierre Ossman 	data.blocks = 1;
273da7fbe58SPierre Ossman 	data.flags = MMC_DATA_READ;
274da7fbe58SPierre Ossman 	data.sg = &sg;
275da7fbe58SPierre Ossman 	data.sg_len = 1;
276da7fbe58SPierre Ossman 
277601ed60cSUlf Hansson 	sg_init_one(&sg, buf, len);
278da7fbe58SPierre Ossman 
279cda56ac2SAdrian Hunter 	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
2800d3e0460SMatthew Fleming 		/*
2810d3e0460SMatthew Fleming 		 * The spec states that CSR and CID accesses have a timeout
2820d3e0460SMatthew Fleming 		 * of 64 clock cycles.
2830d3e0460SMatthew Fleming 		 */
2840d3e0460SMatthew Fleming 		data.timeout_ns = 0;
2850d3e0460SMatthew Fleming 		data.timeout_clks = 64;
286cda56ac2SAdrian Hunter 	} else
287cda56ac2SAdrian Hunter 		mmc_set_data_timeout(&data, card);
288da7fbe58SPierre Ossman 
289af517150SDavid Brownell 	mmc_wait_for_req(host, &mrq);
290af517150SDavid Brownell 
29117b0429dSPierre Ossman 	if (cmd.error)
292da7fbe58SPierre Ossman 		return cmd.error;
29317b0429dSPierre Ossman 	if (data.error)
294da7fbe58SPierre Ossman 		return data.error;
295da7fbe58SPierre Ossman 
29617b0429dSPierre Ossman 	return 0;
297da7fbe58SPierre Ossman }
298da7fbe58SPierre Ossman 
2990796e439SUlf Hansson static int mmc_spi_send_csd(struct mmc_card *card, u32 *csd)
300af517150SDavid Brownell {
30178e48073SPierre Ossman 	int ret, i;
30206c9ccb7SWinkler, Tomas 	__be32 *csd_tmp;
30378e48073SPierre Ossman 
30422b78700SUlf Hansson 	csd_tmp = kzalloc(16, GFP_KERNEL);
3051a41313eSKyungsik Lee 	if (!csd_tmp)
3061a41313eSKyungsik Lee 		return -ENOMEM;
3071a41313eSKyungsik Lee 
3081a41313eSKyungsik Lee 	ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
30978e48073SPierre Ossman 	if (ret)
3101a41313eSKyungsik Lee 		goto err;
31178e48073SPierre Ossman 
31278e48073SPierre Ossman 	for (i = 0; i < 4; i++)
3131a41313eSKyungsik Lee 		csd[i] = be32_to_cpu(csd_tmp[i]);
31478e48073SPierre Ossman 
3151a41313eSKyungsik Lee err:
3161a41313eSKyungsik Lee 	kfree(csd_tmp);
3171a41313eSKyungsik Lee 	return ret;
318af517150SDavid Brownell }
319af517150SDavid Brownell 
3200796e439SUlf Hansson int mmc_send_csd(struct mmc_card *card, u32 *csd)
3210796e439SUlf Hansson {
3220796e439SUlf Hansson 	if (mmc_host_is_spi(card->host))
3230796e439SUlf Hansson 		return mmc_spi_send_csd(card, csd);
3240796e439SUlf Hansson 
3250796e439SUlf Hansson 	return mmc_send_cxd_native(card->host, card->rca << 16,	csd,
3260796e439SUlf Hansson 				MMC_SEND_CSD);
3270796e439SUlf Hansson }
3280796e439SUlf Hansson 
329a1473732SUlf Hansson static int mmc_spi_send_cid(struct mmc_host *host, u32 *cid)
330af517150SDavid Brownell {
33178e48073SPierre Ossman 	int ret, i;
33206c9ccb7SWinkler, Tomas 	__be32 *cid_tmp;
33378e48073SPierre Ossman 
33422b78700SUlf Hansson 	cid_tmp = kzalloc(16, GFP_KERNEL);
3351a41313eSKyungsik Lee 	if (!cid_tmp)
3361a41313eSKyungsik Lee 		return -ENOMEM;
3371a41313eSKyungsik Lee 
3381a41313eSKyungsik Lee 	ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
33978e48073SPierre Ossman 	if (ret)
3401a41313eSKyungsik Lee 		goto err;
34178e48073SPierre Ossman 
34278e48073SPierre Ossman 	for (i = 0; i < 4; i++)
3431a41313eSKyungsik Lee 		cid[i] = be32_to_cpu(cid_tmp[i]);
34478e48073SPierre Ossman 
3451a41313eSKyungsik Lee err:
3461a41313eSKyungsik Lee 	kfree(cid_tmp);
3471a41313eSKyungsik Lee 	return ret;
348af517150SDavid Brownell }
349af517150SDavid Brownell 
350a1473732SUlf Hansson int mmc_send_cid(struct mmc_host *host, u32 *cid)
351a1473732SUlf Hansson {
352a1473732SUlf Hansson 	if (mmc_host_is_spi(host))
353a1473732SUlf Hansson 		return mmc_spi_send_cid(host, cid);
354a1473732SUlf Hansson 
355c92e68d8SUlf Hansson 	return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
356a1473732SUlf Hansson }
357a1473732SUlf Hansson 
358e21aa519SUlf Hansson int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
359e21aa519SUlf Hansson {
360e21aa519SUlf Hansson 	int err;
361e21aa519SUlf Hansson 	u8 *ext_csd;
362e21aa519SUlf Hansson 
363e21aa519SUlf Hansson 	if (!card || !new_ext_csd)
364e21aa519SUlf Hansson 		return -EINVAL;
365e21aa519SUlf Hansson 
366e21aa519SUlf Hansson 	if (!mmc_can_ext_csd(card))
367e21aa519SUlf Hansson 		return -EOPNOTSUPP;
368e21aa519SUlf Hansson 
369e21aa519SUlf Hansson 	/*
370e21aa519SUlf Hansson 	 * As the ext_csd is so large and mostly unused, we don't store the
371e21aa519SUlf Hansson 	 * raw block in mmc_card.
372e21aa519SUlf Hansson 	 */
37322b78700SUlf Hansson 	ext_csd = kzalloc(512, GFP_KERNEL);
374e21aa519SUlf Hansson 	if (!ext_csd)
375e21aa519SUlf Hansson 		return -ENOMEM;
376e21aa519SUlf Hansson 
3772fc91e8bSUlf Hansson 	err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
3782fc91e8bSUlf Hansson 				512);
379e21aa519SUlf Hansson 	if (err)
380e21aa519SUlf Hansson 		kfree(ext_csd);
381e21aa519SUlf Hansson 	else
382e21aa519SUlf Hansson 		*new_ext_csd = ext_csd;
383e21aa519SUlf Hansson 
384e21aa519SUlf Hansson 	return err;
385e21aa519SUlf Hansson }
386e21aa519SUlf Hansson EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
387e21aa519SUlf Hansson 
388af517150SDavid Brownell int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
389af517150SDavid Brownell {
390c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
391af517150SDavid Brownell 	int err;
392af517150SDavid Brownell 
393af517150SDavid Brownell 	cmd.opcode = MMC_SPI_READ_OCR;
394af517150SDavid Brownell 	cmd.arg = highcap ? (1 << 30) : 0;
395af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R3;
396af517150SDavid Brownell 
397af517150SDavid Brownell 	err = mmc_wait_for_cmd(host, &cmd, 0);
398af517150SDavid Brownell 
399af517150SDavid Brownell 	*ocrp = cmd.resp[1];
400af517150SDavid Brownell 	return err;
401af517150SDavid Brownell }
402af517150SDavid Brownell 
403af517150SDavid Brownell int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
404af517150SDavid Brownell {
405c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
406af517150SDavid Brownell 	int err;
407af517150SDavid Brownell 
408af517150SDavid Brownell 	cmd.opcode = MMC_SPI_CRC_ON_OFF;
409af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1;
410af517150SDavid Brownell 	cmd.arg = use_crc;
411af517150SDavid Brownell 
412af517150SDavid Brownell 	err = mmc_wait_for_cmd(host, &cmd, 0);
413af517150SDavid Brownell 	if (!err)
414af517150SDavid Brownell 		host->use_spi_crc = use_crc;
415af517150SDavid Brownell 	return err;
416af517150SDavid Brownell }
417af517150SDavid Brownell 
41820348d19SUlf Hansson static int mmc_switch_status_error(struct mmc_host *host, u32 status)
419ed16f58dSAdrian Hunter {
420ed16f58dSAdrian Hunter 	if (mmc_host_is_spi(host)) {
421ed16f58dSAdrian Hunter 		if (status & R1_SPI_ILLEGAL_COMMAND)
422ed16f58dSAdrian Hunter 			return -EBADMSG;
423ed16f58dSAdrian Hunter 	} else {
424a94a7483SShawn Lin 		if (R1_STATUS(status))
425ed16f58dSAdrian Hunter 			pr_warn("%s: unexpected status %#x after switch\n",
426ed16f58dSAdrian Hunter 				mmc_hostname(host), status);
427ed16f58dSAdrian Hunter 		if (status & R1_SWITCH_ERROR)
428ed16f58dSAdrian Hunter 			return -EBADMSG;
429ed16f58dSAdrian Hunter 	}
430ed16f58dSAdrian Hunter 	return 0;
431ed16f58dSAdrian Hunter }
432ed16f58dSAdrian Hunter 
43320348d19SUlf Hansson /* Caller must hold re-tuning */
434ef3d2322SAdrian Hunter int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
43520348d19SUlf Hansson {
43620348d19SUlf Hansson 	u32 status;
43720348d19SUlf Hansson 	int err;
43820348d19SUlf Hansson 
43920348d19SUlf Hansson 	err = mmc_send_status(card, &status);
440ef3d2322SAdrian Hunter 	if (!crc_err_fatal && err == -EILSEQ)
441ef3d2322SAdrian Hunter 		return 0;
44220348d19SUlf Hansson 	if (err)
44320348d19SUlf Hansson 		return err;
44420348d19SUlf Hansson 
44520348d19SUlf Hansson 	return mmc_switch_status_error(card->host, status);
44620348d19SUlf Hansson }
44720348d19SUlf Hansson 
448ef3d2322SAdrian Hunter int mmc_switch_status(struct mmc_card *card)
449ef3d2322SAdrian Hunter {
450ef3d2322SAdrian Hunter 	return __mmc_switch_status(card, true);
451ef3d2322SAdrian Hunter }
452ef3d2322SAdrian Hunter 
453716bdb89SUlf Hansson static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
454625228faSUlf Hansson 			bool send_status, bool retry_crc_err)
455716bdb89SUlf Hansson {
456716bdb89SUlf Hansson 	struct mmc_host *host = card->host;
457716bdb89SUlf Hansson 	int err;
458716bdb89SUlf Hansson 	unsigned long timeout;
459716bdb89SUlf Hansson 	u32 status = 0;
460716bdb89SUlf Hansson 	bool expired = false;
461716bdb89SUlf Hansson 	bool busy = false;
462716bdb89SUlf Hansson 
463716bdb89SUlf Hansson 	/*
464716bdb89SUlf Hansson 	 * In cases when not allowed to poll by using CMD13 or because we aren't
465716bdb89SUlf Hansson 	 * capable of polling by using ->card_busy(), then rely on waiting the
466716bdb89SUlf Hansson 	 * stated timeout to be sufficient.
467716bdb89SUlf Hansson 	 */
468716bdb89SUlf Hansson 	if (!send_status && !host->ops->card_busy) {
469716bdb89SUlf Hansson 		mmc_delay(timeout_ms);
470716bdb89SUlf Hansson 		return 0;
471716bdb89SUlf Hansson 	}
472716bdb89SUlf Hansson 
473716bdb89SUlf Hansson 	timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
474716bdb89SUlf Hansson 	do {
475716bdb89SUlf Hansson 		/*
47670562644SUlf Hansson 		 * Due to the possibility of being preempted while polling,
47770562644SUlf Hansson 		 * check the expiration time first.
478716bdb89SUlf Hansson 		 */
479716bdb89SUlf Hansson 		expired = time_after(jiffies, timeout);
48070562644SUlf Hansson 
48170562644SUlf Hansson 		if (host->ops->card_busy) {
48270562644SUlf Hansson 			busy = host->ops->card_busy(host);
48370562644SUlf Hansson 		} else {
484437590a1SUlf Hansson 			err = mmc_send_status(card, &status);
4855ec32f84SUlf Hansson 			if (retry_crc_err && err == -EILSEQ) {
486437590a1SUlf Hansson 				busy = true;
4875ec32f84SUlf Hansson 			} else if (err) {
488716bdb89SUlf Hansson 				return err;
4895ec32f84SUlf Hansson 			} else {
4905ec32f84SUlf Hansson 				err = mmc_switch_status_error(host, status);
4915ec32f84SUlf Hansson 				if (err)
4925ec32f84SUlf Hansson 					return err;
49370562644SUlf Hansson 				busy = R1_CURRENT_STATE(status) == R1_STATE_PRG;
494716bdb89SUlf Hansson 			}
4955ec32f84SUlf Hansson 		}
496716bdb89SUlf Hansson 
49770562644SUlf Hansson 		/* Timeout if the device still remains busy. */
49870562644SUlf Hansson 		if (expired && busy) {
49970562644SUlf Hansson 			pr_err("%s: Card stuck being busy! %s\n",
500716bdb89SUlf Hansson 				mmc_hostname(host), __func__);
501716bdb89SUlf Hansson 			return -ETIMEDOUT;
502716bdb89SUlf Hansson 		}
50370562644SUlf Hansson 	} while (busy);
504716bdb89SUlf Hansson 
5055ec32f84SUlf Hansson 	return 0;
506716bdb89SUlf Hansson }
507716bdb89SUlf Hansson 
508d3a8d95dSAndrei Warkentin /**
509950d56acSJaehoon Chung  *	__mmc_switch - modify EXT_CSD register
510d3a8d95dSAndrei Warkentin  *	@card: the MMC card associated with the data transfer
511d3a8d95dSAndrei Warkentin  *	@set: cmd set values
512d3a8d95dSAndrei Warkentin  *	@index: EXT_CSD register index
513d3a8d95dSAndrei Warkentin  *	@value: value to program into EXT_CSD register
514d3a8d95dSAndrei Warkentin  *	@timeout_ms: timeout (ms) for operation performed by register write,
515d3a8d95dSAndrei Warkentin  *                   timeout of zero implies maximum possible timeout
516aa33ce3cSUlf Hansson  *	@timing: new timing to change to
517950d56acSJaehoon Chung  *	@use_busy_signal: use the busy signal as response type
518878e200bSUlf Hansson  *	@send_status: send status cmd to poll for busy
519625228faSUlf Hansson  *	@retry_crc_err: retry when CRC errors when polling with CMD13 for busy
520d3a8d95dSAndrei Warkentin  *
521d3a8d95dSAndrei Warkentin  *	Modifies the EXT_CSD register for selected card.
522d3a8d95dSAndrei Warkentin  */
523950d56acSJaehoon Chung int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
524aa33ce3cSUlf Hansson 		unsigned int timeout_ms, unsigned char timing,
525aa33ce3cSUlf Hansson 		bool use_busy_signal, bool send_status,	bool retry_crc_err)
526da7fbe58SPierre Ossman {
527636bd13cSUlf Hansson 	struct mmc_host *host = card->host;
528da7fbe58SPierre Ossman 	int err;
529c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
530b9ec2616SUlf Hansson 	bool use_r1b_resp = use_busy_signal;
531aa33ce3cSUlf Hansson 	unsigned char old_timing = host->ios.timing;
532b9ec2616SUlf Hansson 
533c6dbab9cSAdrian Hunter 	mmc_retune_hold(host);
534c6dbab9cSAdrian Hunter 
535533a6cfeSUlf Hansson 	if (!timeout_ms) {
536533a6cfeSUlf Hansson 		pr_warn("%s: unspecified timeout for CMD6 - use generic\n",
537533a6cfeSUlf Hansson 			mmc_hostname(host));
538533a6cfeSUlf Hansson 		timeout_ms = card->ext_csd.generic_cmd6_time;
539533a6cfeSUlf Hansson 	}
540533a6cfeSUlf Hansson 
541b9ec2616SUlf Hansson 	/*
542533a6cfeSUlf Hansson 	 * If the max_busy_timeout of the host is specified, make sure it's
543533a6cfeSUlf Hansson 	 * enough to fit the used timeout_ms. In case it's not, let's instruct
544533a6cfeSUlf Hansson 	 * the host to avoid HW busy detection, by converting to a R1 response
545533a6cfeSUlf Hansson 	 * instead of a R1B.
546b9ec2616SUlf Hansson 	 */
547533a6cfeSUlf Hansson 	if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
548b9ec2616SUlf Hansson 		use_r1b_resp = false;
549da7fbe58SPierre Ossman 
550da7fbe58SPierre Ossman 	cmd.opcode = MMC_SWITCH;
551da7fbe58SPierre Ossman 	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
552da7fbe58SPierre Ossman 		  (index << 16) |
553da7fbe58SPierre Ossman 		  (value << 8) |
554da7fbe58SPierre Ossman 		  set;
555950d56acSJaehoon Chung 	cmd.flags = MMC_CMD_AC;
556b9ec2616SUlf Hansson 	if (use_r1b_resp) {
557950d56acSJaehoon Chung 		cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
5581d4d7744SUlf Hansson 		cmd.busy_timeout = timeout_ms;
559b9ec2616SUlf Hansson 	} else {
560b9ec2616SUlf Hansson 		cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
561b9ec2616SUlf Hansson 	}
562b9ec2616SUlf Hansson 
563775a9362SMaya Erez 	if (index == EXT_CSD_SANITIZE_START)
564775a9362SMaya Erez 		cmd.sanitize_busy = true;
565da7fbe58SPierre Ossman 
5668ad8e02cSJan Kaisrlik 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
56717b0429dSPierre Ossman 	if (err)
568c6dbab9cSAdrian Hunter 		goto out;
569da7fbe58SPierre Ossman 
570950d56acSJaehoon Chung 	/* No need to check card status in case of unblocking command */
571950d56acSJaehoon Chung 	if (!use_busy_signal)
572c6dbab9cSAdrian Hunter 		goto out;
573950d56acSJaehoon Chung 
574cb26ce06SUlf Hansson 	/*If SPI or used HW busy detection above, then we don't need to poll. */
575cb26ce06SUlf Hansson 	if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
576ee6ff743SUlf Hansson 		mmc_host_is_spi(host))
577aa33ce3cSUlf Hansson 		goto out_tim;
578a27fbf2fSSeungwon Jeon 
579716bdb89SUlf Hansson 	/* Let's try to poll to find out when the command is completed. */
580625228faSUlf Hansson 	err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err);
581ee6ff743SUlf Hansson 	if (err)
582ee6ff743SUlf Hansson 		goto out;
583aa33ce3cSUlf Hansson 
584aa33ce3cSUlf Hansson out_tim:
585ee6ff743SUlf Hansson 	/* Switch to new timing before check switch status. */
586ee6ff743SUlf Hansson 	if (timing)
587ee6ff743SUlf Hansson 		mmc_set_timing(host, timing);
588ee6ff743SUlf Hansson 
589ee6ff743SUlf Hansson 	if (send_status) {
590ee6ff743SUlf Hansson 		err = mmc_switch_status(card);
591aa33ce3cSUlf Hansson 		if (err && timing)
592aa33ce3cSUlf Hansson 			mmc_set_timing(host, old_timing);
593ee6ff743SUlf Hansson 	}
594c6dbab9cSAdrian Hunter out:
595c6dbab9cSAdrian Hunter 	mmc_retune_release(host);
596ef0b27d4SAdrian Hunter 
597c6dbab9cSAdrian Hunter 	return err;
598da7fbe58SPierre Ossman }
599950d56acSJaehoon Chung 
600950d56acSJaehoon Chung int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
601950d56acSJaehoon Chung 		unsigned int timeout_ms)
602950d56acSJaehoon Chung {
603aa33ce3cSUlf Hansson 	return __mmc_switch(card, set, index, value, timeout_ms, 0,
604aa33ce3cSUlf Hansson 			true, true, false);
605950d56acSJaehoon Chung }
606d3a8d95dSAndrei Warkentin EXPORT_SYMBOL_GPL(mmc_switch);
607da7fbe58SPierre Ossman 
6089979dbe5SChaotian Jing int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
609996903deSMinda Chen {
610c7836d15SMasahiro Yamada 	struct mmc_request mrq = {};
611c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
612c7836d15SMasahiro Yamada 	struct mmc_data data = {};
613996903deSMinda Chen 	struct scatterlist sg;
614fe5afb13SUlf Hansson 	struct mmc_ios *ios = &host->ios;
615996903deSMinda Chen 	const u8 *tuning_block_pattern;
616996903deSMinda Chen 	int size, err = 0;
617996903deSMinda Chen 	u8 *data_buf;
618996903deSMinda Chen 
619996903deSMinda Chen 	if (ios->bus_width == MMC_BUS_WIDTH_8) {
620996903deSMinda Chen 		tuning_block_pattern = tuning_blk_pattern_8bit;
621996903deSMinda Chen 		size = sizeof(tuning_blk_pattern_8bit);
622996903deSMinda Chen 	} else if (ios->bus_width == MMC_BUS_WIDTH_4) {
623996903deSMinda Chen 		tuning_block_pattern = tuning_blk_pattern_4bit;
624996903deSMinda Chen 		size = sizeof(tuning_blk_pattern_4bit);
625996903deSMinda Chen 	} else
626996903deSMinda Chen 		return -EINVAL;
627996903deSMinda Chen 
628996903deSMinda Chen 	data_buf = kzalloc(size, GFP_KERNEL);
629996903deSMinda Chen 	if (!data_buf)
630996903deSMinda Chen 		return -ENOMEM;
631996903deSMinda Chen 
632996903deSMinda Chen 	mrq.cmd = &cmd;
633996903deSMinda Chen 	mrq.data = &data;
634996903deSMinda Chen 
635996903deSMinda Chen 	cmd.opcode = opcode;
636996903deSMinda Chen 	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
637996903deSMinda Chen 
638996903deSMinda Chen 	data.blksz = size;
639996903deSMinda Chen 	data.blocks = 1;
640996903deSMinda Chen 	data.flags = MMC_DATA_READ;
641996903deSMinda Chen 
642996903deSMinda Chen 	/*
643996903deSMinda Chen 	 * According to the tuning specs, Tuning process
644996903deSMinda Chen 	 * is normally shorter 40 executions of CMD19,
645996903deSMinda Chen 	 * and timeout value should be shorter than 150 ms
646996903deSMinda Chen 	 */
647996903deSMinda Chen 	data.timeout_ns = 150 * NSEC_PER_MSEC;
648996903deSMinda Chen 
649996903deSMinda Chen 	data.sg = &sg;
650996903deSMinda Chen 	data.sg_len = 1;
651996903deSMinda Chen 	sg_init_one(&sg, data_buf, size);
652996903deSMinda Chen 
653fe5afb13SUlf Hansson 	mmc_wait_for_req(host, &mrq);
654996903deSMinda Chen 
6559979dbe5SChaotian Jing 	if (cmd_error)
6569979dbe5SChaotian Jing 		*cmd_error = cmd.error;
6579979dbe5SChaotian Jing 
658996903deSMinda Chen 	if (cmd.error) {
659996903deSMinda Chen 		err = cmd.error;
660996903deSMinda Chen 		goto out;
661996903deSMinda Chen 	}
662996903deSMinda Chen 
663996903deSMinda Chen 	if (data.error) {
664996903deSMinda Chen 		err = data.error;
665996903deSMinda Chen 		goto out;
666996903deSMinda Chen 	}
667996903deSMinda Chen 
668996903deSMinda Chen 	if (memcmp(data_buf, tuning_block_pattern, size))
669996903deSMinda Chen 		err = -EIO;
670996903deSMinda Chen 
671996903deSMinda Chen out:
672996903deSMinda Chen 	kfree(data_buf);
673996903deSMinda Chen 	return err;
674996903deSMinda Chen }
675996903deSMinda Chen EXPORT_SYMBOL_GPL(mmc_send_tuning);
676996903deSMinda Chen 
677e711f030SAdrian Hunter int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
678e711f030SAdrian Hunter {
679c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
680e711f030SAdrian Hunter 
681e711f030SAdrian Hunter 	/*
682e711f030SAdrian Hunter 	 * eMMC specification specifies that CMD12 can be used to stop a tuning
683e711f030SAdrian Hunter 	 * command, but SD specification does not, so do nothing unless it is
684e711f030SAdrian Hunter 	 * eMMC.
685e711f030SAdrian Hunter 	 */
686e711f030SAdrian Hunter 	if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
687e711f030SAdrian Hunter 		return 0;
688e711f030SAdrian Hunter 
689e711f030SAdrian Hunter 	cmd.opcode = MMC_STOP_TRANSMISSION;
690e711f030SAdrian Hunter 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
691e711f030SAdrian Hunter 
692e711f030SAdrian Hunter 	/*
693e711f030SAdrian Hunter 	 * For drivers that override R1 to R1b, set an arbitrary timeout based
694e711f030SAdrian Hunter 	 * on the tuning timeout i.e. 150ms.
695e711f030SAdrian Hunter 	 */
696e711f030SAdrian Hunter 	cmd.busy_timeout = 150;
697e711f030SAdrian Hunter 
698e711f030SAdrian Hunter 	return mmc_wait_for_cmd(host, &cmd, 0);
699e711f030SAdrian Hunter }
700e711f030SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_abort_tuning);
701e711f030SAdrian Hunter 
70222113efdSAries Lee static int
70322113efdSAries Lee mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
70422113efdSAries Lee 		  u8 len)
70522113efdSAries Lee {
706c7836d15SMasahiro Yamada 	struct mmc_request mrq = {};
707c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
708c7836d15SMasahiro Yamada 	struct mmc_data data = {};
70922113efdSAries Lee 	struct scatterlist sg;
71022113efdSAries Lee 	u8 *data_buf;
71122113efdSAries Lee 	u8 *test_buf;
71222113efdSAries Lee 	int i, err;
71322113efdSAries Lee 	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
71422113efdSAries Lee 	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
71522113efdSAries Lee 
71622113efdSAries Lee 	/* dma onto stack is unsafe/nonportable, but callers to this
71722113efdSAries Lee 	 * routine normally provide temporary on-stack buffers ...
71822113efdSAries Lee 	 */
71922113efdSAries Lee 	data_buf = kmalloc(len, GFP_KERNEL);
72022113efdSAries Lee 	if (!data_buf)
72122113efdSAries Lee 		return -ENOMEM;
72222113efdSAries Lee 
72322113efdSAries Lee 	if (len == 8)
72422113efdSAries Lee 		test_buf = testdata_8bit;
72522113efdSAries Lee 	else if (len == 4)
72622113efdSAries Lee 		test_buf = testdata_4bit;
72722113efdSAries Lee 	else {
728a3c76eb9SGirish K S 		pr_err("%s: Invalid bus_width %d\n",
72922113efdSAries Lee 		       mmc_hostname(host), len);
73022113efdSAries Lee 		kfree(data_buf);
73122113efdSAries Lee 		return -EINVAL;
73222113efdSAries Lee 	}
73322113efdSAries Lee 
73422113efdSAries Lee 	if (opcode == MMC_BUS_TEST_W)
73522113efdSAries Lee 		memcpy(data_buf, test_buf, len);
73622113efdSAries Lee 
73722113efdSAries Lee 	mrq.cmd = &cmd;
73822113efdSAries Lee 	mrq.data = &data;
73922113efdSAries Lee 	cmd.opcode = opcode;
74022113efdSAries Lee 	cmd.arg = 0;
74122113efdSAries Lee 
74222113efdSAries Lee 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
74322113efdSAries Lee 	 * rely on callers to never use this with "native" calls for reading
74422113efdSAries Lee 	 * CSD or CID.  Native versions of those commands use the R2 type,
74522113efdSAries Lee 	 * not R1 plus a data block.
74622113efdSAries Lee 	 */
74722113efdSAries Lee 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
74822113efdSAries Lee 
74922113efdSAries Lee 	data.blksz = len;
75022113efdSAries Lee 	data.blocks = 1;
75122113efdSAries Lee 	if (opcode == MMC_BUS_TEST_R)
75222113efdSAries Lee 		data.flags = MMC_DATA_READ;
75322113efdSAries Lee 	else
75422113efdSAries Lee 		data.flags = MMC_DATA_WRITE;
75522113efdSAries Lee 
75622113efdSAries Lee 	data.sg = &sg;
75722113efdSAries Lee 	data.sg_len = 1;
75884532e33SMinjian Wu 	mmc_set_data_timeout(&data, card);
75922113efdSAries Lee 	sg_init_one(&sg, data_buf, len);
76022113efdSAries Lee 	mmc_wait_for_req(host, &mrq);
76122113efdSAries Lee 	err = 0;
76222113efdSAries Lee 	if (opcode == MMC_BUS_TEST_R) {
76322113efdSAries Lee 		for (i = 0; i < len / 4; i++)
76422113efdSAries Lee 			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
76522113efdSAries Lee 				err = -EIO;
76622113efdSAries Lee 				break;
76722113efdSAries Lee 			}
76822113efdSAries Lee 	}
76922113efdSAries Lee 	kfree(data_buf);
77022113efdSAries Lee 
77122113efdSAries Lee 	if (cmd.error)
77222113efdSAries Lee 		return cmd.error;
77322113efdSAries Lee 	if (data.error)
77422113efdSAries Lee 		return data.error;
77522113efdSAries Lee 
77622113efdSAries Lee 	return err;
77722113efdSAries Lee }
77822113efdSAries Lee 
77922113efdSAries Lee int mmc_bus_test(struct mmc_card *card, u8 bus_width)
78022113efdSAries Lee {
7810899e741SMasahiro Yamada 	int width;
78222113efdSAries Lee 
78322113efdSAries Lee 	if (bus_width == MMC_BUS_WIDTH_8)
78422113efdSAries Lee 		width = 8;
78522113efdSAries Lee 	else if (bus_width == MMC_BUS_WIDTH_4)
78622113efdSAries Lee 		width = 4;
78722113efdSAries Lee 	else if (bus_width == MMC_BUS_WIDTH_1)
78822113efdSAries Lee 		return 0; /* no need for test */
78922113efdSAries Lee 	else
79022113efdSAries Lee 		return -EINVAL;
79122113efdSAries Lee 
79222113efdSAries Lee 	/*
79322113efdSAries Lee 	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
79422113efdSAries Lee 	 * is a problem.  This improves chances that the test will work.
79522113efdSAries Lee 	 */
79622113efdSAries Lee 	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
7970899e741SMasahiro Yamada 	return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
79822113efdSAries Lee }
799eb0d8f13SJaehoon Chung 
8000f2c0512SUlf Hansson static int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
801eb0d8f13SJaehoon Chung {
802c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
803eb0d8f13SJaehoon Chung 	unsigned int opcode;
804eb0d8f13SJaehoon Chung 	int err;
805eb0d8f13SJaehoon Chung 
806eb0d8f13SJaehoon Chung 	opcode = card->ext_csd.hpi_cmd;
807eb0d8f13SJaehoon Chung 	if (opcode == MMC_STOP_TRANSMISSION)
8082378975bSJaehoon Chung 		cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
809eb0d8f13SJaehoon Chung 	else if (opcode == MMC_SEND_STATUS)
8102378975bSJaehoon Chung 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
811eb0d8f13SJaehoon Chung 
812eb0d8f13SJaehoon Chung 	cmd.opcode = opcode;
813eb0d8f13SJaehoon Chung 	cmd.arg = card->rca << 16 | 1;
814eb0d8f13SJaehoon Chung 
815eb0d8f13SJaehoon Chung 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
816eb0d8f13SJaehoon Chung 	if (err) {
817eb0d8f13SJaehoon Chung 		pr_warn("%s: error %d interrupting operation. "
818eb0d8f13SJaehoon Chung 			"HPI command response %#x\n", mmc_hostname(card->host),
819eb0d8f13SJaehoon Chung 			err, cmd.resp[0]);
820eb0d8f13SJaehoon Chung 		return err;
821eb0d8f13SJaehoon Chung 	}
822eb0d8f13SJaehoon Chung 	if (status)
823eb0d8f13SJaehoon Chung 		*status = cmd.resp[0];
824eb0d8f13SJaehoon Chung 
825eb0d8f13SJaehoon Chung 	return 0;
826eb0d8f13SJaehoon Chung }
827148bcab2SUlf Hansson 
8280f2c0512SUlf Hansson /**
8290f2c0512SUlf Hansson  *	mmc_interrupt_hpi - Issue for High priority Interrupt
8300f2c0512SUlf Hansson  *	@card: the MMC card associated with the HPI transfer
8310f2c0512SUlf Hansson  *
8320f2c0512SUlf Hansson  *	Issued High Priority Interrupt, and check for card status
8330f2c0512SUlf Hansson  *	until out-of prg-state.
8340f2c0512SUlf Hansson  */
8350f2c0512SUlf Hansson int mmc_interrupt_hpi(struct mmc_card *card)
8360f2c0512SUlf Hansson {
8370f2c0512SUlf Hansson 	int err;
8380f2c0512SUlf Hansson 	u32 status;
8390f2c0512SUlf Hansson 	unsigned long prg_wait;
8400f2c0512SUlf Hansson 
8410f2c0512SUlf Hansson 	if (!card->ext_csd.hpi_en) {
8420f2c0512SUlf Hansson 		pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
8430f2c0512SUlf Hansson 		return 1;
8440f2c0512SUlf Hansson 	}
8450f2c0512SUlf Hansson 
8460f2c0512SUlf Hansson 	err = mmc_send_status(card, &status);
8470f2c0512SUlf Hansson 	if (err) {
8480f2c0512SUlf Hansson 		pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
8490f2c0512SUlf Hansson 		goto out;
8500f2c0512SUlf Hansson 	}
8510f2c0512SUlf Hansson 
8520f2c0512SUlf Hansson 	switch (R1_CURRENT_STATE(status)) {
8530f2c0512SUlf Hansson 	case R1_STATE_IDLE:
8540f2c0512SUlf Hansson 	case R1_STATE_READY:
8550f2c0512SUlf Hansson 	case R1_STATE_STBY:
8560f2c0512SUlf Hansson 	case R1_STATE_TRAN:
8570f2c0512SUlf Hansson 		/*
8580f2c0512SUlf Hansson 		 * In idle and transfer states, HPI is not needed and the caller
8590f2c0512SUlf Hansson 		 * can issue the next intended command immediately
8600f2c0512SUlf Hansson 		 */
8610f2c0512SUlf Hansson 		goto out;
8620f2c0512SUlf Hansson 	case R1_STATE_PRG:
8630f2c0512SUlf Hansson 		break;
8640f2c0512SUlf Hansson 	default:
8650f2c0512SUlf Hansson 		/* In all other states, it's illegal to issue HPI */
8660f2c0512SUlf Hansson 		pr_debug("%s: HPI cannot be sent. Card state=%d\n",
8670f2c0512SUlf Hansson 			mmc_hostname(card->host), R1_CURRENT_STATE(status));
8680f2c0512SUlf Hansson 		err = -EINVAL;
8690f2c0512SUlf Hansson 		goto out;
8700f2c0512SUlf Hansson 	}
8710f2c0512SUlf Hansson 
8720f2c0512SUlf Hansson 	err = mmc_send_hpi_cmd(card, &status);
8730f2c0512SUlf Hansson 	if (err)
8740f2c0512SUlf Hansson 		goto out;
8750f2c0512SUlf Hansson 
8760f2c0512SUlf Hansson 	prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
8770f2c0512SUlf Hansson 	do {
8780f2c0512SUlf Hansson 		err = mmc_send_status(card, &status);
8790f2c0512SUlf Hansson 
8800f2c0512SUlf Hansson 		if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
8810f2c0512SUlf Hansson 			break;
8820f2c0512SUlf Hansson 		if (time_after(jiffies, prg_wait))
8830f2c0512SUlf Hansson 			err = -ETIMEDOUT;
8840f2c0512SUlf Hansson 	} while (!err);
8850f2c0512SUlf Hansson 
8860f2c0512SUlf Hansson out:
8870f2c0512SUlf Hansson 	return err;
8880f2c0512SUlf Hansson }
8890f2c0512SUlf Hansson 
890148bcab2SUlf Hansson int mmc_can_ext_csd(struct mmc_card *card)
891148bcab2SUlf Hansson {
892148bcab2SUlf Hansson 	return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
893148bcab2SUlf Hansson }
894b658af71SAdrian Hunter 
8951cf8f7e5SUlf Hansson static int mmc_read_bkops_status(struct mmc_card *card)
8961cf8f7e5SUlf Hansson {
8971cf8f7e5SUlf Hansson 	int err;
8981cf8f7e5SUlf Hansson 	u8 *ext_csd;
8991cf8f7e5SUlf Hansson 
9001cf8f7e5SUlf Hansson 	err = mmc_get_ext_csd(card, &ext_csd);
9011cf8f7e5SUlf Hansson 	if (err)
9021cf8f7e5SUlf Hansson 		return err;
9031cf8f7e5SUlf Hansson 
9041cf8f7e5SUlf Hansson 	card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
9051cf8f7e5SUlf Hansson 	card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
9061cf8f7e5SUlf Hansson 	kfree(ext_csd);
9071cf8f7e5SUlf Hansson 	return 0;
9081cf8f7e5SUlf Hansson }
9091cf8f7e5SUlf Hansson 
9101cf8f7e5SUlf Hansson /**
9110c204979SUlf Hansson  *	mmc_run_bkops - Run BKOPS for supported cards
9120c204979SUlf Hansson  *	@card: MMC card to run BKOPS for
9131cf8f7e5SUlf Hansson  *
9140c204979SUlf Hansson  *	Run background operations synchronously for cards having manual BKOPS
9150c204979SUlf Hansson  *	enabled and in case it reports urgent BKOPS level.
9161cf8f7e5SUlf Hansson */
9170c204979SUlf Hansson void mmc_run_bkops(struct mmc_card *card)
9181cf8f7e5SUlf Hansson {
9191cf8f7e5SUlf Hansson 	int err;
9201cf8f7e5SUlf Hansson 
9210c204979SUlf Hansson 	if (!card->ext_csd.man_bkops_en)
9221cf8f7e5SUlf Hansson 		return;
9231cf8f7e5SUlf Hansson 
9241cf8f7e5SUlf Hansson 	err = mmc_read_bkops_status(card);
9251cf8f7e5SUlf Hansson 	if (err) {
9261cf8f7e5SUlf Hansson 		pr_err("%s: Failed to read bkops status: %d\n",
9271cf8f7e5SUlf Hansson 		       mmc_hostname(card->host), err);
9281cf8f7e5SUlf Hansson 		return;
9291cf8f7e5SUlf Hansson 	}
9301cf8f7e5SUlf Hansson 
9310c204979SUlf Hansson 	if (!card->ext_csd.raw_bkops_status ||
9320c204979SUlf Hansson 	    card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2)
9331cf8f7e5SUlf Hansson 		return;
9341cf8f7e5SUlf Hansson 
9351cf8f7e5SUlf Hansson 	mmc_retune_hold(card->host);
9361cf8f7e5SUlf Hansson 
9370c204979SUlf Hansson 	/*
9380c204979SUlf Hansson 	 * For urgent BKOPS status, LEVEL_2 and higher, let's execute
9390c204979SUlf Hansson 	 * synchronously. Future wise, we may consider to start BKOPS, for less
9400c204979SUlf Hansson 	 * urgent levels by using an asynchronous background task, when idle.
9410c204979SUlf Hansson 	 */
9420c204979SUlf Hansson 	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
94324ed3bd0SUlf Hansson 			 EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS);
9440c204979SUlf Hansson 	if (err)
9451cf8f7e5SUlf Hansson 		pr_warn("%s: Error %d starting bkops\n",
9461cf8f7e5SUlf Hansson 			mmc_hostname(card->host), err);
9471cf8f7e5SUlf Hansson 
9481cf8f7e5SUlf Hansson 	mmc_retune_release(card->host);
9491cf8f7e5SUlf Hansson }
9500c204979SUlf Hansson EXPORT_SYMBOL(mmc_run_bkops);
9511cf8f7e5SUlf Hansson 
952d9df1737SUlf Hansson /*
953d9df1737SUlf Hansson  * Flush the cache to the non-volatile storage.
954d9df1737SUlf Hansson  */
955d9df1737SUlf Hansson int mmc_flush_cache(struct mmc_card *card)
956d9df1737SUlf Hansson {
957d9df1737SUlf Hansson 	int err = 0;
958d9df1737SUlf Hansson 
959d9df1737SUlf Hansson 	if (mmc_card_mmc(card) &&
960d9df1737SUlf Hansson 			(card->ext_csd.cache_size > 0) &&
961d9df1737SUlf Hansson 			(card->ext_csd.cache_ctrl & 1)) {
962d9df1737SUlf Hansson 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
96324ed3bd0SUlf Hansson 				 EXT_CSD_FLUSH_CACHE, 1,
96424ed3bd0SUlf Hansson 				 MMC_CACHE_FLUSH_TIMEOUT_MS);
965d9df1737SUlf Hansson 		if (err)
966d9df1737SUlf Hansson 			pr_err("%s: cache flush error %d\n",
967d9df1737SUlf Hansson 					mmc_hostname(card->host), err);
968d9df1737SUlf Hansson 	}
969d9df1737SUlf Hansson 
970d9df1737SUlf Hansson 	return err;
971d9df1737SUlf Hansson }
972d9df1737SUlf Hansson EXPORT_SYMBOL(mmc_flush_cache);
973d9df1737SUlf Hansson 
974b658af71SAdrian Hunter static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
975b658af71SAdrian Hunter {
976b658af71SAdrian Hunter 	u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
977b658af71SAdrian Hunter 	int err;
978b658af71SAdrian Hunter 
979b658af71SAdrian Hunter 	if (!card->ext_csd.cmdq_support)
980b658af71SAdrian Hunter 		return -EOPNOTSUPP;
981b658af71SAdrian Hunter 
982b658af71SAdrian Hunter 	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
983b658af71SAdrian Hunter 			 val, card->ext_csd.generic_cmd6_time);
984b658af71SAdrian Hunter 	if (!err)
985b658af71SAdrian Hunter 		card->ext_csd.cmdq_en = enable;
986b658af71SAdrian Hunter 
987b658af71SAdrian Hunter 	return err;
988b658af71SAdrian Hunter }
989b658af71SAdrian Hunter 
990b658af71SAdrian Hunter int mmc_cmdq_enable(struct mmc_card *card)
991b658af71SAdrian Hunter {
992b658af71SAdrian Hunter 	return mmc_cmdq_switch(card, true);
993b658af71SAdrian Hunter }
994b658af71SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
995b658af71SAdrian Hunter 
996b658af71SAdrian Hunter int mmc_cmdq_disable(struct mmc_card *card)
997b658af71SAdrian Hunter {
998b658af71SAdrian Hunter 	return mmc_cmdq_switch(card, false);
999b658af71SAdrian Hunter }
1000b658af71SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_cmdq_disable);
1001