xref: /openbmc/linux/drivers/mmc/core/mmc_ops.c (revision 6972096a)
12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2da7fbe58SPierre Ossman /*
370f10482SPierre Ossman  *  linux/drivers/mmc/core/mmc_ops.h
4da7fbe58SPierre Ossman  *
5da7fbe58SPierre Ossman  *  Copyright 2006-2007 Pierre Ossman
6da7fbe58SPierre Ossman  */
7da7fbe58SPierre Ossman 
85a0e3ad6STejun Heo #include <linux/slab.h>
93ef77af1SPaul Gortmaker #include <linux/export.h>
10da7fbe58SPierre Ossman #include <linux/types.h>
11da7fbe58SPierre Ossman #include <linux/scatterlist.h>
12da7fbe58SPierre Ossman 
13da7fbe58SPierre Ossman #include <linux/mmc/host.h>
14da7fbe58SPierre Ossman #include <linux/mmc/card.h>
15da7fbe58SPierre Ossman #include <linux/mmc/mmc.h>
16da7fbe58SPierre Ossman 
17da7fbe58SPierre Ossman #include "core.h"
181cf8f7e5SUlf Hansson #include "card.h"
19c6dbab9cSAdrian Hunter #include "host.h"
20da7fbe58SPierre Ossman #include "mmc_ops.h"
21da7fbe58SPierre Ossman 
2224ed3bd0SUlf Hansson #define MMC_BKOPS_TIMEOUT_MS		(120 * 1000) /* 120s */
2324ed3bd0SUlf Hansson #define MMC_CACHE_FLUSH_TIMEOUT_MS	(30 * 1000) /* 30s */
248fee476bSTrey Ramsay 
2504cdbbfaSUlf Hansson static const u8 tuning_blk_pattern_4bit[] = {
2604cdbbfaSUlf Hansson 	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
2704cdbbfaSUlf Hansson 	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
2804cdbbfaSUlf Hansson 	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
2904cdbbfaSUlf Hansson 	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
3004cdbbfaSUlf Hansson 	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
3104cdbbfaSUlf Hansson 	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
3204cdbbfaSUlf Hansson 	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
3304cdbbfaSUlf Hansson 	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
3404cdbbfaSUlf Hansson };
3504cdbbfaSUlf Hansson 
3604cdbbfaSUlf Hansson static const u8 tuning_blk_pattern_8bit[] = {
3704cdbbfaSUlf Hansson 	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
3804cdbbfaSUlf Hansson 	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
3904cdbbfaSUlf Hansson 	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
4004cdbbfaSUlf Hansson 	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
4104cdbbfaSUlf Hansson 	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
4204cdbbfaSUlf Hansson 	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
4304cdbbfaSUlf Hansson 	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
4404cdbbfaSUlf Hansson 	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
4504cdbbfaSUlf Hansson 	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
4604cdbbfaSUlf Hansson 	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
4704cdbbfaSUlf Hansson 	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
4804cdbbfaSUlf Hansson 	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
4904cdbbfaSUlf Hansson 	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
5004cdbbfaSUlf Hansson 	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
5104cdbbfaSUlf Hansson 	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
5204cdbbfaSUlf Hansson 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
5304cdbbfaSUlf Hansson };
5404cdbbfaSUlf Hansson 
552185bc2cSUlf Hansson int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
56a27fbf2fSSeungwon Jeon {
57a27fbf2fSSeungwon Jeon 	int err;
58c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
59a27fbf2fSSeungwon Jeon 
60a27fbf2fSSeungwon Jeon 	cmd.opcode = MMC_SEND_STATUS;
61a27fbf2fSSeungwon Jeon 	if (!mmc_host_is_spi(card->host))
62a27fbf2fSSeungwon Jeon 		cmd.arg = card->rca << 16;
63a27fbf2fSSeungwon Jeon 	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
64a27fbf2fSSeungwon Jeon 
652185bc2cSUlf Hansson 	err = mmc_wait_for_cmd(card->host, &cmd, retries);
66a27fbf2fSSeungwon Jeon 	if (err)
67a27fbf2fSSeungwon Jeon 		return err;
68a27fbf2fSSeungwon Jeon 
69a27fbf2fSSeungwon Jeon 	/* NOTE: callers are required to understand the difference
70a27fbf2fSSeungwon Jeon 	 * between "native" and SPI format status words!
71a27fbf2fSSeungwon Jeon 	 */
72a27fbf2fSSeungwon Jeon 	if (status)
73a27fbf2fSSeungwon Jeon 		*status = cmd.resp[0];
74a27fbf2fSSeungwon Jeon 
75a27fbf2fSSeungwon Jeon 	return 0;
76a27fbf2fSSeungwon Jeon }
772185bc2cSUlf Hansson EXPORT_SYMBOL_GPL(__mmc_send_status);
782185bc2cSUlf Hansson 
792185bc2cSUlf Hansson int mmc_send_status(struct mmc_card *card, u32 *status)
802185bc2cSUlf Hansson {
812185bc2cSUlf Hansson 	return __mmc_send_status(card, status, MMC_CMD_RETRIES);
822185bc2cSUlf Hansson }
831bee324aSLinus Walleij EXPORT_SYMBOL_GPL(mmc_send_status);
84a27fbf2fSSeungwon Jeon 
85da7fbe58SPierre Ossman static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
86da7fbe58SPierre Ossman {
87c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
88da7fbe58SPierre Ossman 
89da7fbe58SPierre Ossman 	cmd.opcode = MMC_SELECT_CARD;
90da7fbe58SPierre Ossman 
91da7fbe58SPierre Ossman 	if (card) {
92da7fbe58SPierre Ossman 		cmd.arg = card->rca << 16;
93da7fbe58SPierre Ossman 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
94da7fbe58SPierre Ossman 	} else {
95da7fbe58SPierre Ossman 		cmd.arg = 0;
96da7fbe58SPierre Ossman 		cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
97da7fbe58SPierre Ossman 	}
98da7fbe58SPierre Ossman 
990899e741SMasahiro Yamada 	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
100da7fbe58SPierre Ossman }
101da7fbe58SPierre Ossman 
102da7fbe58SPierre Ossman int mmc_select_card(struct mmc_card *card)
103da7fbe58SPierre Ossman {
104da7fbe58SPierre Ossman 
105da7fbe58SPierre Ossman 	return _mmc_select_card(card->host, card);
106da7fbe58SPierre Ossman }
107da7fbe58SPierre Ossman 
108da7fbe58SPierre Ossman int mmc_deselect_cards(struct mmc_host *host)
109da7fbe58SPierre Ossman {
110da7fbe58SPierre Ossman 	return _mmc_select_card(host, NULL);
111da7fbe58SPierre Ossman }
112da7fbe58SPierre Ossman 
1133d705d14SSascha Hauer /*
1143d705d14SSascha Hauer  * Write the value specified in the device tree or board code into the optional
1153d705d14SSascha Hauer  * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
1163d705d14SSascha Hauer  * drive strength of the DAT and CMD outputs. The actual meaning of a given
1173d705d14SSascha Hauer  * value is hardware dependant.
1183d705d14SSascha Hauer  * The presence of the DSR register can be determined from the CSD register,
1193d705d14SSascha Hauer  * bit 76.
1203d705d14SSascha Hauer  */
1213d705d14SSascha Hauer int mmc_set_dsr(struct mmc_host *host)
1223d705d14SSascha Hauer {
123c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
1243d705d14SSascha Hauer 
1253d705d14SSascha Hauer 	cmd.opcode = MMC_SET_DSR;
1263d705d14SSascha Hauer 
1273d705d14SSascha Hauer 	cmd.arg = (host->dsr << 16) | 0xffff;
1283d705d14SSascha Hauer 	cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
1293d705d14SSascha Hauer 
1303d705d14SSascha Hauer 	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
1313d705d14SSascha Hauer }
1323d705d14SSascha Hauer 
133da7fbe58SPierre Ossman int mmc_go_idle(struct mmc_host *host)
134da7fbe58SPierre Ossman {
135da7fbe58SPierre Ossman 	int err;
136c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
137da7fbe58SPierre Ossman 
138af517150SDavid Brownell 	/*
139af517150SDavid Brownell 	 * Non-SPI hosts need to prevent chipselect going active during
140af517150SDavid Brownell 	 * GO_IDLE; that would put chips into SPI mode.  Remind them of
141af517150SDavid Brownell 	 * that in case of hardware that won't pull up DAT3/nCS otherwise.
142af517150SDavid Brownell 	 *
143af517150SDavid Brownell 	 * SPI hosts ignore ios.chip_select; it's managed according to
14425985edcSLucas De Marchi 	 * rules that must accommodate non-MMC slaves which this layer
145af517150SDavid Brownell 	 * won't even know about.
146af517150SDavid Brownell 	 */
147af517150SDavid Brownell 	if (!mmc_host_is_spi(host)) {
148da7fbe58SPierre Ossman 		mmc_set_chip_select(host, MMC_CS_HIGH);
149da7fbe58SPierre Ossman 		mmc_delay(1);
150af517150SDavid Brownell 	}
151da7fbe58SPierre Ossman 
152da7fbe58SPierre Ossman 	cmd.opcode = MMC_GO_IDLE_STATE;
153da7fbe58SPierre Ossman 	cmd.arg = 0;
154af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
155da7fbe58SPierre Ossman 
156da7fbe58SPierre Ossman 	err = mmc_wait_for_cmd(host, &cmd, 0);
157da7fbe58SPierre Ossman 
158da7fbe58SPierre Ossman 	mmc_delay(1);
159da7fbe58SPierre Ossman 
160af517150SDavid Brownell 	if (!mmc_host_is_spi(host)) {
161da7fbe58SPierre Ossman 		mmc_set_chip_select(host, MMC_CS_DONTCARE);
162da7fbe58SPierre Ossman 		mmc_delay(1);
163af517150SDavid Brownell 	}
164af517150SDavid Brownell 
165af517150SDavid Brownell 	host->use_spi_crc = 0;
166da7fbe58SPierre Ossman 
167da7fbe58SPierre Ossman 	return err;
168da7fbe58SPierre Ossman }
169da7fbe58SPierre Ossman 
170da7fbe58SPierre Ossman int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
171da7fbe58SPierre Ossman {
172c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
173da7fbe58SPierre Ossman 	int i, err = 0;
174da7fbe58SPierre Ossman 
175da7fbe58SPierre Ossman 	cmd.opcode = MMC_SEND_OP_COND;
176af517150SDavid Brownell 	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
177af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
178da7fbe58SPierre Ossman 
179da7fbe58SPierre Ossman 	for (i = 100; i; i--) {
180da7fbe58SPierre Ossman 		err = mmc_wait_for_cmd(host, &cmd, 0);
18117b0429dSPierre Ossman 		if (err)
182da7fbe58SPierre Ossman 			break;
183da7fbe58SPierre Ossman 
1844c94cb65SYoshihiro Shimoda 		/* wait until reset completes */
185af517150SDavid Brownell 		if (mmc_host_is_spi(host)) {
186af517150SDavid Brownell 			if (!(cmd.resp[0] & R1_SPI_IDLE))
187af517150SDavid Brownell 				break;
188af517150SDavid Brownell 		} else {
189af517150SDavid Brownell 			if (cmd.resp[0] & MMC_CARD_BUSY)
190af517150SDavid Brownell 				break;
191af517150SDavid Brownell 		}
192af517150SDavid Brownell 
19317b0429dSPierre Ossman 		err = -ETIMEDOUT;
194da7fbe58SPierre Ossman 
195da7fbe58SPierre Ossman 		mmc_delay(10);
1964c94cb65SYoshihiro Shimoda 
1974c94cb65SYoshihiro Shimoda 		/*
1984c94cb65SYoshihiro Shimoda 		 * According to eMMC specification v5.1 section 6.4.3, we
1994c94cb65SYoshihiro Shimoda 		 * should issue CMD1 repeatedly in the idle state until
2004c94cb65SYoshihiro Shimoda 		 * the eMMC is ready. Otherwise some eMMC devices seem to enter
2014c94cb65SYoshihiro Shimoda 		 * the inactive mode after mmc_init_card() issued CMD0 when
2024c94cb65SYoshihiro Shimoda 		 * the eMMC device is busy.
2034c94cb65SYoshihiro Shimoda 		 */
2044c94cb65SYoshihiro Shimoda 		if (!ocr && !mmc_host_is_spi(host))
2054c94cb65SYoshihiro Shimoda 			cmd.arg = cmd.resp[0] | BIT(30);
206da7fbe58SPierre Ossman 	}
207da7fbe58SPierre Ossman 
208af517150SDavid Brownell 	if (rocr && !mmc_host_is_spi(host))
209da7fbe58SPierre Ossman 		*rocr = cmd.resp[0];
210da7fbe58SPierre Ossman 
211da7fbe58SPierre Ossman 	return err;
212da7fbe58SPierre Ossman }
213da7fbe58SPierre Ossman 
214da7fbe58SPierre Ossman int mmc_set_relative_addr(struct mmc_card *card)
215da7fbe58SPierre Ossman {
216c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
217da7fbe58SPierre Ossman 
218da7fbe58SPierre Ossman 	cmd.opcode = MMC_SET_RELATIVE_ADDR;
219da7fbe58SPierre Ossman 	cmd.arg = card->rca << 16;
220da7fbe58SPierre Ossman 	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
221da7fbe58SPierre Ossman 
2220899e741SMasahiro Yamada 	return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
223da7fbe58SPierre Ossman }
224da7fbe58SPierre Ossman 
225af517150SDavid Brownell static int
226af517150SDavid Brownell mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
227da7fbe58SPierre Ossman {
228da7fbe58SPierre Ossman 	int err;
229c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
230da7fbe58SPierre Ossman 
231af517150SDavid Brownell 	cmd.opcode = opcode;
232af517150SDavid Brownell 	cmd.arg = arg;
233da7fbe58SPierre Ossman 	cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
234da7fbe58SPierre Ossman 
235af517150SDavid Brownell 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
23617b0429dSPierre Ossman 	if (err)
237da7fbe58SPierre Ossman 		return err;
238da7fbe58SPierre Ossman 
239af517150SDavid Brownell 	memcpy(cxd, cmd.resp, sizeof(u32) * 4);
240da7fbe58SPierre Ossman 
24117b0429dSPierre Ossman 	return 0;
242da7fbe58SPierre Ossman }
243da7fbe58SPierre Ossman 
2441a41313eSKyungsik Lee /*
2451a41313eSKyungsik Lee  * NOTE: void *buf, caller for the buf is required to use DMA-capable
2461a41313eSKyungsik Lee  * buffer or on-stack buffer (with some overhead in callee).
2471a41313eSKyungsik Lee  */
248af517150SDavid Brownell static int
249af517150SDavid Brownell mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
250af517150SDavid Brownell 		u32 opcode, void *buf, unsigned len)
251da7fbe58SPierre Ossman {
252c7836d15SMasahiro Yamada 	struct mmc_request mrq = {};
253c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
254c7836d15SMasahiro Yamada 	struct mmc_data data = {};
255da7fbe58SPierre Ossman 	struct scatterlist sg;
256da7fbe58SPierre Ossman 
257da7fbe58SPierre Ossman 	mrq.cmd = &cmd;
258da7fbe58SPierre Ossman 	mrq.data = &data;
259da7fbe58SPierre Ossman 
260af517150SDavid Brownell 	cmd.opcode = opcode;
261da7fbe58SPierre Ossman 	cmd.arg = 0;
262da7fbe58SPierre Ossman 
263af517150SDavid Brownell 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
264af517150SDavid Brownell 	 * rely on callers to never use this with "native" calls for reading
265af517150SDavid Brownell 	 * CSD or CID.  Native versions of those commands use the R2 type,
266af517150SDavid Brownell 	 * not R1 plus a data block.
267af517150SDavid Brownell 	 */
268af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
269af517150SDavid Brownell 
270af517150SDavid Brownell 	data.blksz = len;
271da7fbe58SPierre Ossman 	data.blocks = 1;
272da7fbe58SPierre Ossman 	data.flags = MMC_DATA_READ;
273da7fbe58SPierre Ossman 	data.sg = &sg;
274da7fbe58SPierre Ossman 	data.sg_len = 1;
275da7fbe58SPierre Ossman 
276601ed60cSUlf Hansson 	sg_init_one(&sg, buf, len);
277da7fbe58SPierre Ossman 
278cda56ac2SAdrian Hunter 	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
2790d3e0460SMatthew Fleming 		/*
2800d3e0460SMatthew Fleming 		 * The spec states that CSR and CID accesses have a timeout
2810d3e0460SMatthew Fleming 		 * of 64 clock cycles.
2820d3e0460SMatthew Fleming 		 */
2830d3e0460SMatthew Fleming 		data.timeout_ns = 0;
2840d3e0460SMatthew Fleming 		data.timeout_clks = 64;
285cda56ac2SAdrian Hunter 	} else
286cda56ac2SAdrian Hunter 		mmc_set_data_timeout(&data, card);
287da7fbe58SPierre Ossman 
288af517150SDavid Brownell 	mmc_wait_for_req(host, &mrq);
289af517150SDavid Brownell 
29017b0429dSPierre Ossman 	if (cmd.error)
291da7fbe58SPierre Ossman 		return cmd.error;
29217b0429dSPierre Ossman 	if (data.error)
293da7fbe58SPierre Ossman 		return data.error;
294da7fbe58SPierre Ossman 
29517b0429dSPierre Ossman 	return 0;
296da7fbe58SPierre Ossman }
297da7fbe58SPierre Ossman 
2980796e439SUlf Hansson static int mmc_spi_send_csd(struct mmc_card *card, u32 *csd)
299af517150SDavid Brownell {
30078e48073SPierre Ossman 	int ret, i;
30106c9ccb7SWinkler, Tomas 	__be32 *csd_tmp;
30278e48073SPierre Ossman 
30322b78700SUlf Hansson 	csd_tmp = kzalloc(16, GFP_KERNEL);
3041a41313eSKyungsik Lee 	if (!csd_tmp)
3051a41313eSKyungsik Lee 		return -ENOMEM;
3061a41313eSKyungsik Lee 
3071a41313eSKyungsik Lee 	ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
30878e48073SPierre Ossman 	if (ret)
3091a41313eSKyungsik Lee 		goto err;
31078e48073SPierre Ossman 
31178e48073SPierre Ossman 	for (i = 0; i < 4; i++)
3121a41313eSKyungsik Lee 		csd[i] = be32_to_cpu(csd_tmp[i]);
31378e48073SPierre Ossman 
3141a41313eSKyungsik Lee err:
3151a41313eSKyungsik Lee 	kfree(csd_tmp);
3161a41313eSKyungsik Lee 	return ret;
317af517150SDavid Brownell }
318af517150SDavid Brownell 
3190796e439SUlf Hansson int mmc_send_csd(struct mmc_card *card, u32 *csd)
3200796e439SUlf Hansson {
3210796e439SUlf Hansson 	if (mmc_host_is_spi(card->host))
3220796e439SUlf Hansson 		return mmc_spi_send_csd(card, csd);
3230796e439SUlf Hansson 
3240796e439SUlf Hansson 	return mmc_send_cxd_native(card->host, card->rca << 16,	csd,
3250796e439SUlf Hansson 				MMC_SEND_CSD);
3260796e439SUlf Hansson }
3270796e439SUlf Hansson 
328a1473732SUlf Hansson static int mmc_spi_send_cid(struct mmc_host *host, u32 *cid)
329af517150SDavid Brownell {
33078e48073SPierre Ossman 	int ret, i;
33106c9ccb7SWinkler, Tomas 	__be32 *cid_tmp;
33278e48073SPierre Ossman 
33322b78700SUlf Hansson 	cid_tmp = kzalloc(16, GFP_KERNEL);
3341a41313eSKyungsik Lee 	if (!cid_tmp)
3351a41313eSKyungsik Lee 		return -ENOMEM;
3361a41313eSKyungsik Lee 
3371a41313eSKyungsik Lee 	ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
33878e48073SPierre Ossman 	if (ret)
3391a41313eSKyungsik Lee 		goto err;
34078e48073SPierre Ossman 
34178e48073SPierre Ossman 	for (i = 0; i < 4; i++)
3421a41313eSKyungsik Lee 		cid[i] = be32_to_cpu(cid_tmp[i]);
34378e48073SPierre Ossman 
3441a41313eSKyungsik Lee err:
3451a41313eSKyungsik Lee 	kfree(cid_tmp);
3461a41313eSKyungsik Lee 	return ret;
347af517150SDavid Brownell }
348af517150SDavid Brownell 
349a1473732SUlf Hansson int mmc_send_cid(struct mmc_host *host, u32 *cid)
350a1473732SUlf Hansson {
351a1473732SUlf Hansson 	if (mmc_host_is_spi(host))
352a1473732SUlf Hansson 		return mmc_spi_send_cid(host, cid);
353a1473732SUlf Hansson 
354c92e68d8SUlf Hansson 	return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
355a1473732SUlf Hansson }
356a1473732SUlf Hansson 
357e21aa519SUlf Hansson int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
358e21aa519SUlf Hansson {
359e21aa519SUlf Hansson 	int err;
360e21aa519SUlf Hansson 	u8 *ext_csd;
361e21aa519SUlf Hansson 
362e21aa519SUlf Hansson 	if (!card || !new_ext_csd)
363e21aa519SUlf Hansson 		return -EINVAL;
364e21aa519SUlf Hansson 
365e21aa519SUlf Hansson 	if (!mmc_can_ext_csd(card))
366e21aa519SUlf Hansson 		return -EOPNOTSUPP;
367e21aa519SUlf Hansson 
368e21aa519SUlf Hansson 	/*
369e21aa519SUlf Hansson 	 * As the ext_csd is so large and mostly unused, we don't store the
370e21aa519SUlf Hansson 	 * raw block in mmc_card.
371e21aa519SUlf Hansson 	 */
37222b78700SUlf Hansson 	ext_csd = kzalloc(512, GFP_KERNEL);
373e21aa519SUlf Hansson 	if (!ext_csd)
374e21aa519SUlf Hansson 		return -ENOMEM;
375e21aa519SUlf Hansson 
3762fc91e8bSUlf Hansson 	err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
3772fc91e8bSUlf Hansson 				512);
378e21aa519SUlf Hansson 	if (err)
379e21aa519SUlf Hansson 		kfree(ext_csd);
380e21aa519SUlf Hansson 	else
381e21aa519SUlf Hansson 		*new_ext_csd = ext_csd;
382e21aa519SUlf Hansson 
383e21aa519SUlf Hansson 	return err;
384e21aa519SUlf Hansson }
385e21aa519SUlf Hansson EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
386e21aa519SUlf Hansson 
387af517150SDavid Brownell int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
388af517150SDavid Brownell {
389c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
390af517150SDavid Brownell 	int err;
391af517150SDavid Brownell 
392af517150SDavid Brownell 	cmd.opcode = MMC_SPI_READ_OCR;
393af517150SDavid Brownell 	cmd.arg = highcap ? (1 << 30) : 0;
394af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R3;
395af517150SDavid Brownell 
396af517150SDavid Brownell 	err = mmc_wait_for_cmd(host, &cmd, 0);
397af517150SDavid Brownell 
398af517150SDavid Brownell 	*ocrp = cmd.resp[1];
399af517150SDavid Brownell 	return err;
400af517150SDavid Brownell }
401af517150SDavid Brownell 
402af517150SDavid Brownell int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
403af517150SDavid Brownell {
404c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
405af517150SDavid Brownell 	int err;
406af517150SDavid Brownell 
407af517150SDavid Brownell 	cmd.opcode = MMC_SPI_CRC_ON_OFF;
408af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1;
409af517150SDavid Brownell 	cmd.arg = use_crc;
410af517150SDavid Brownell 
411af517150SDavid Brownell 	err = mmc_wait_for_cmd(host, &cmd, 0);
412af517150SDavid Brownell 	if (!err)
413af517150SDavid Brownell 		host->use_spi_crc = use_crc;
414af517150SDavid Brownell 	return err;
415af517150SDavid Brownell }
416af517150SDavid Brownell 
41720348d19SUlf Hansson static int mmc_switch_status_error(struct mmc_host *host, u32 status)
418ed16f58dSAdrian Hunter {
419ed16f58dSAdrian Hunter 	if (mmc_host_is_spi(host)) {
420ed16f58dSAdrian Hunter 		if (status & R1_SPI_ILLEGAL_COMMAND)
421ed16f58dSAdrian Hunter 			return -EBADMSG;
422ed16f58dSAdrian Hunter 	} else {
423a94a7483SShawn Lin 		if (R1_STATUS(status))
424ed16f58dSAdrian Hunter 			pr_warn("%s: unexpected status %#x after switch\n",
425ed16f58dSAdrian Hunter 				mmc_hostname(host), status);
426ed16f58dSAdrian Hunter 		if (status & R1_SWITCH_ERROR)
427ed16f58dSAdrian Hunter 			return -EBADMSG;
428ed16f58dSAdrian Hunter 	}
429ed16f58dSAdrian Hunter 	return 0;
430ed16f58dSAdrian Hunter }
431ed16f58dSAdrian Hunter 
43220348d19SUlf Hansson /* Caller must hold re-tuning */
43360db8a47SUlf Hansson int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
43420348d19SUlf Hansson {
43520348d19SUlf Hansson 	u32 status;
43620348d19SUlf Hansson 	int err;
43720348d19SUlf Hansson 
43820348d19SUlf Hansson 	err = mmc_send_status(card, &status);
439ef3d2322SAdrian Hunter 	if (!crc_err_fatal && err == -EILSEQ)
440ef3d2322SAdrian Hunter 		return 0;
44120348d19SUlf Hansson 	if (err)
44220348d19SUlf Hansson 		return err;
44320348d19SUlf Hansson 
44420348d19SUlf Hansson 	return mmc_switch_status_error(card->host, status);
44520348d19SUlf Hansson }
44620348d19SUlf Hansson 
4476972096aSUlf Hansson static int mmc_busy_status(struct mmc_card *card, bool retry_crc_err,
4486972096aSUlf Hansson 			   bool *busy)
4496972096aSUlf Hansson {
4506972096aSUlf Hansson 	struct mmc_host *host = card->host;
4516972096aSUlf Hansson 	u32 status = 0;
4526972096aSUlf Hansson 	int err;
4536972096aSUlf Hansson 
4546972096aSUlf Hansson 	if (host->ops->card_busy) {
4556972096aSUlf Hansson 		*busy = host->ops->card_busy(host);
4566972096aSUlf Hansson 		return 0;
4576972096aSUlf Hansson 	}
4586972096aSUlf Hansson 
4596972096aSUlf Hansson 	err = mmc_send_status(card, &status);
4606972096aSUlf Hansson 	if (retry_crc_err && err == -EILSEQ) {
4616972096aSUlf Hansson 		*busy = true;
4626972096aSUlf Hansson 		return 0;
4636972096aSUlf Hansson 	}
4646972096aSUlf Hansson 	if (err)
4656972096aSUlf Hansson 		return err;
4666972096aSUlf Hansson 
4676972096aSUlf Hansson 	err = mmc_switch_status_error(card->host, status);
4686972096aSUlf Hansson 	if (err)
4696972096aSUlf Hansson 		return err;
4706972096aSUlf Hansson 
4716972096aSUlf Hansson 	*busy = R1_CURRENT_STATE(status) == R1_STATE_PRG;
4726972096aSUlf Hansson 	return 0;
4736972096aSUlf Hansson }
4746972096aSUlf Hansson 
475716bdb89SUlf Hansson static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
476625228faSUlf Hansson 			bool send_status, bool retry_crc_err)
477716bdb89SUlf Hansson {
478716bdb89SUlf Hansson 	struct mmc_host *host = card->host;
479716bdb89SUlf Hansson 	int err;
480716bdb89SUlf Hansson 	unsigned long timeout;
481d46a24a9SUlf Hansson 	unsigned int udelay = 32, udelay_max = 32768;
482716bdb89SUlf Hansson 	bool expired = false;
483716bdb89SUlf Hansson 	bool busy = false;
484716bdb89SUlf Hansson 
485716bdb89SUlf Hansson 	/*
486716bdb89SUlf Hansson 	 * In cases when not allowed to poll by using CMD13 or because we aren't
487716bdb89SUlf Hansson 	 * capable of polling by using ->card_busy(), then rely on waiting the
488716bdb89SUlf Hansson 	 * stated timeout to be sufficient.
489716bdb89SUlf Hansson 	 */
490716bdb89SUlf Hansson 	if (!send_status && !host->ops->card_busy) {
491716bdb89SUlf Hansson 		mmc_delay(timeout_ms);
492716bdb89SUlf Hansson 		return 0;
493716bdb89SUlf Hansson 	}
494716bdb89SUlf Hansson 
495716bdb89SUlf Hansson 	timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
496716bdb89SUlf Hansson 	do {
497716bdb89SUlf Hansson 		/*
49870562644SUlf Hansson 		 * Due to the possibility of being preempted while polling,
49970562644SUlf Hansson 		 * check the expiration time first.
500716bdb89SUlf Hansson 		 */
501716bdb89SUlf Hansson 		expired = time_after(jiffies, timeout);
50270562644SUlf Hansson 
5036972096aSUlf Hansson 		err = mmc_busy_status(card, retry_crc_err, &busy);
5045ec32f84SUlf Hansson 		if (err)
5055ec32f84SUlf Hansson 			return err;
506716bdb89SUlf Hansson 
50770562644SUlf Hansson 		/* Timeout if the device still remains busy. */
50870562644SUlf Hansson 		if (expired && busy) {
50970562644SUlf Hansson 			pr_err("%s: Card stuck being busy! %s\n",
510716bdb89SUlf Hansson 				mmc_hostname(host), __func__);
511716bdb89SUlf Hansson 			return -ETIMEDOUT;
512716bdb89SUlf Hansson 		}
513d46a24a9SUlf Hansson 
514d46a24a9SUlf Hansson 		/* Throttle the polling rate to avoid hogging the CPU. */
515d46a24a9SUlf Hansson 		if (busy) {
516d46a24a9SUlf Hansson 			usleep_range(udelay, udelay * 2);
517d46a24a9SUlf Hansson 			if (udelay < udelay_max)
518d46a24a9SUlf Hansson 				udelay *= 2;
519d46a24a9SUlf Hansson 		}
52070562644SUlf Hansson 	} while (busy);
521716bdb89SUlf Hansson 
5225ec32f84SUlf Hansson 	return 0;
523716bdb89SUlf Hansson }
524716bdb89SUlf Hansson 
525d3a8d95dSAndrei Warkentin /**
526950d56acSJaehoon Chung  *	__mmc_switch - modify EXT_CSD register
527d3a8d95dSAndrei Warkentin  *	@card: the MMC card associated with the data transfer
528d3a8d95dSAndrei Warkentin  *	@set: cmd set values
529d3a8d95dSAndrei Warkentin  *	@index: EXT_CSD register index
530d3a8d95dSAndrei Warkentin  *	@value: value to program into EXT_CSD register
531d3a8d95dSAndrei Warkentin  *	@timeout_ms: timeout (ms) for operation performed by register write,
532d3a8d95dSAndrei Warkentin  *                   timeout of zero implies maximum possible timeout
533aa33ce3cSUlf Hansson  *	@timing: new timing to change to
534878e200bSUlf Hansson  *	@send_status: send status cmd to poll for busy
535625228faSUlf Hansson  *	@retry_crc_err: retry when CRC errors when polling with CMD13 for busy
536d3a8d95dSAndrei Warkentin  *
537d3a8d95dSAndrei Warkentin  *	Modifies the EXT_CSD register for selected card.
538d3a8d95dSAndrei Warkentin  */
539950d56acSJaehoon Chung int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
540aa33ce3cSUlf Hansson 		unsigned int timeout_ms, unsigned char timing,
54102098ccdSUlf Hansson 		bool send_status, bool retry_crc_err)
542da7fbe58SPierre Ossman {
543636bd13cSUlf Hansson 	struct mmc_host *host = card->host;
544da7fbe58SPierre Ossman 	int err;
545c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
54602098ccdSUlf Hansson 	bool use_r1b_resp = true;
547aa33ce3cSUlf Hansson 	unsigned char old_timing = host->ios.timing;
548b9ec2616SUlf Hansson 
549c6dbab9cSAdrian Hunter 	mmc_retune_hold(host);
550c6dbab9cSAdrian Hunter 
551533a6cfeSUlf Hansson 	if (!timeout_ms) {
552533a6cfeSUlf Hansson 		pr_warn("%s: unspecified timeout for CMD6 - use generic\n",
553533a6cfeSUlf Hansson 			mmc_hostname(host));
554533a6cfeSUlf Hansson 		timeout_ms = card->ext_csd.generic_cmd6_time;
555533a6cfeSUlf Hansson 	}
556533a6cfeSUlf Hansson 
557b9ec2616SUlf Hansson 	/*
558533a6cfeSUlf Hansson 	 * If the max_busy_timeout of the host is specified, make sure it's
559533a6cfeSUlf Hansson 	 * enough to fit the used timeout_ms. In case it's not, let's instruct
560533a6cfeSUlf Hansson 	 * the host to avoid HW busy detection, by converting to a R1 response
5611292e3efSUlf Hansson 	 * instead of a R1B. Note, some hosts requires R1B, which also means
5621292e3efSUlf Hansson 	 * they are on their own when it comes to deal with the busy timeout.
563b9ec2616SUlf Hansson 	 */
5641292e3efSUlf Hansson 	if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
5651292e3efSUlf Hansson 	    (timeout_ms > host->max_busy_timeout))
566b9ec2616SUlf Hansson 		use_r1b_resp = false;
567da7fbe58SPierre Ossman 
568da7fbe58SPierre Ossman 	cmd.opcode = MMC_SWITCH;
569da7fbe58SPierre Ossman 	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
570da7fbe58SPierre Ossman 		  (index << 16) |
571da7fbe58SPierre Ossman 		  (value << 8) |
572da7fbe58SPierre Ossman 		  set;
573950d56acSJaehoon Chung 	cmd.flags = MMC_CMD_AC;
574b9ec2616SUlf Hansson 	if (use_r1b_resp) {
575950d56acSJaehoon Chung 		cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
5761d4d7744SUlf Hansson 		cmd.busy_timeout = timeout_ms;
577b9ec2616SUlf Hansson 	} else {
578b9ec2616SUlf Hansson 		cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
579b9ec2616SUlf Hansson 	}
580b9ec2616SUlf Hansson 
581775a9362SMaya Erez 	if (index == EXT_CSD_SANITIZE_START)
582775a9362SMaya Erez 		cmd.sanitize_busy = true;
583da7fbe58SPierre Ossman 
5848ad8e02cSJan Kaisrlik 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
58517b0429dSPierre Ossman 	if (err)
586c6dbab9cSAdrian Hunter 		goto out;
587da7fbe58SPierre Ossman 
588cb26ce06SUlf Hansson 	/*If SPI or used HW busy detection above, then we don't need to poll. */
589cb26ce06SUlf Hansson 	if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
590ee6ff743SUlf Hansson 		mmc_host_is_spi(host))
591aa33ce3cSUlf Hansson 		goto out_tim;
592a27fbf2fSSeungwon Jeon 
593716bdb89SUlf Hansson 	/* Let's try to poll to find out when the command is completed. */
594625228faSUlf Hansson 	err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err);
595ee6ff743SUlf Hansson 	if (err)
596ee6ff743SUlf Hansson 		goto out;
597aa33ce3cSUlf Hansson 
598aa33ce3cSUlf Hansson out_tim:
599ee6ff743SUlf Hansson 	/* Switch to new timing before check switch status. */
600ee6ff743SUlf Hansson 	if (timing)
601ee6ff743SUlf Hansson 		mmc_set_timing(host, timing);
602ee6ff743SUlf Hansson 
603ee6ff743SUlf Hansson 	if (send_status) {
60460db8a47SUlf Hansson 		err = mmc_switch_status(card, true);
605aa33ce3cSUlf Hansson 		if (err && timing)
606aa33ce3cSUlf Hansson 			mmc_set_timing(host, old_timing);
607ee6ff743SUlf Hansson 	}
608c6dbab9cSAdrian Hunter out:
609c6dbab9cSAdrian Hunter 	mmc_retune_release(host);
610ef0b27d4SAdrian Hunter 
611c6dbab9cSAdrian Hunter 	return err;
612da7fbe58SPierre Ossman }
613950d56acSJaehoon Chung 
614950d56acSJaehoon Chung int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
615950d56acSJaehoon Chung 		unsigned int timeout_ms)
616950d56acSJaehoon Chung {
617aa33ce3cSUlf Hansson 	return __mmc_switch(card, set, index, value, timeout_ms, 0,
61802098ccdSUlf Hansson 			    true, false);
619950d56acSJaehoon Chung }
620d3a8d95dSAndrei Warkentin EXPORT_SYMBOL_GPL(mmc_switch);
621da7fbe58SPierre Ossman 
6229979dbe5SChaotian Jing int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
623996903deSMinda Chen {
624c7836d15SMasahiro Yamada 	struct mmc_request mrq = {};
625c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
626c7836d15SMasahiro Yamada 	struct mmc_data data = {};
627996903deSMinda Chen 	struct scatterlist sg;
628fe5afb13SUlf Hansson 	struct mmc_ios *ios = &host->ios;
629996903deSMinda Chen 	const u8 *tuning_block_pattern;
630996903deSMinda Chen 	int size, err = 0;
631996903deSMinda Chen 	u8 *data_buf;
632996903deSMinda Chen 
633996903deSMinda Chen 	if (ios->bus_width == MMC_BUS_WIDTH_8) {
634996903deSMinda Chen 		tuning_block_pattern = tuning_blk_pattern_8bit;
635996903deSMinda Chen 		size = sizeof(tuning_blk_pattern_8bit);
636996903deSMinda Chen 	} else if (ios->bus_width == MMC_BUS_WIDTH_4) {
637996903deSMinda Chen 		tuning_block_pattern = tuning_blk_pattern_4bit;
638996903deSMinda Chen 		size = sizeof(tuning_blk_pattern_4bit);
639996903deSMinda Chen 	} else
640996903deSMinda Chen 		return -EINVAL;
641996903deSMinda Chen 
642996903deSMinda Chen 	data_buf = kzalloc(size, GFP_KERNEL);
643996903deSMinda Chen 	if (!data_buf)
644996903deSMinda Chen 		return -ENOMEM;
645996903deSMinda Chen 
646996903deSMinda Chen 	mrq.cmd = &cmd;
647996903deSMinda Chen 	mrq.data = &data;
648996903deSMinda Chen 
649996903deSMinda Chen 	cmd.opcode = opcode;
650996903deSMinda Chen 	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
651996903deSMinda Chen 
652996903deSMinda Chen 	data.blksz = size;
653996903deSMinda Chen 	data.blocks = 1;
654996903deSMinda Chen 	data.flags = MMC_DATA_READ;
655996903deSMinda Chen 
656996903deSMinda Chen 	/*
657996903deSMinda Chen 	 * According to the tuning specs, Tuning process
658996903deSMinda Chen 	 * is normally shorter 40 executions of CMD19,
659996903deSMinda Chen 	 * and timeout value should be shorter than 150 ms
660996903deSMinda Chen 	 */
661996903deSMinda Chen 	data.timeout_ns = 150 * NSEC_PER_MSEC;
662996903deSMinda Chen 
663996903deSMinda Chen 	data.sg = &sg;
664996903deSMinda Chen 	data.sg_len = 1;
665996903deSMinda Chen 	sg_init_one(&sg, data_buf, size);
666996903deSMinda Chen 
667fe5afb13SUlf Hansson 	mmc_wait_for_req(host, &mrq);
668996903deSMinda Chen 
6699979dbe5SChaotian Jing 	if (cmd_error)
6709979dbe5SChaotian Jing 		*cmd_error = cmd.error;
6719979dbe5SChaotian Jing 
672996903deSMinda Chen 	if (cmd.error) {
673996903deSMinda Chen 		err = cmd.error;
674996903deSMinda Chen 		goto out;
675996903deSMinda Chen 	}
676996903deSMinda Chen 
677996903deSMinda Chen 	if (data.error) {
678996903deSMinda Chen 		err = data.error;
679996903deSMinda Chen 		goto out;
680996903deSMinda Chen 	}
681996903deSMinda Chen 
682996903deSMinda Chen 	if (memcmp(data_buf, tuning_block_pattern, size))
683996903deSMinda Chen 		err = -EIO;
684996903deSMinda Chen 
685996903deSMinda Chen out:
686996903deSMinda Chen 	kfree(data_buf);
687996903deSMinda Chen 	return err;
688996903deSMinda Chen }
689996903deSMinda Chen EXPORT_SYMBOL_GPL(mmc_send_tuning);
690996903deSMinda Chen 
691e711f030SAdrian Hunter int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
692e711f030SAdrian Hunter {
693c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
694e711f030SAdrian Hunter 
695e711f030SAdrian Hunter 	/*
696e711f030SAdrian Hunter 	 * eMMC specification specifies that CMD12 can be used to stop a tuning
697e711f030SAdrian Hunter 	 * command, but SD specification does not, so do nothing unless it is
698e711f030SAdrian Hunter 	 * eMMC.
699e711f030SAdrian Hunter 	 */
700e711f030SAdrian Hunter 	if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
701e711f030SAdrian Hunter 		return 0;
702e711f030SAdrian Hunter 
703e711f030SAdrian Hunter 	cmd.opcode = MMC_STOP_TRANSMISSION;
704e711f030SAdrian Hunter 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
705e711f030SAdrian Hunter 
706e711f030SAdrian Hunter 	/*
707e711f030SAdrian Hunter 	 * For drivers that override R1 to R1b, set an arbitrary timeout based
708e711f030SAdrian Hunter 	 * on the tuning timeout i.e. 150ms.
709e711f030SAdrian Hunter 	 */
710e711f030SAdrian Hunter 	cmd.busy_timeout = 150;
711e711f030SAdrian Hunter 
712e711f030SAdrian Hunter 	return mmc_wait_for_cmd(host, &cmd, 0);
713e711f030SAdrian Hunter }
714e711f030SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_abort_tuning);
715e711f030SAdrian Hunter 
71622113efdSAries Lee static int
71722113efdSAries Lee mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
71822113efdSAries Lee 		  u8 len)
71922113efdSAries Lee {
720c7836d15SMasahiro Yamada 	struct mmc_request mrq = {};
721c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
722c7836d15SMasahiro Yamada 	struct mmc_data data = {};
72322113efdSAries Lee 	struct scatterlist sg;
72422113efdSAries Lee 	u8 *data_buf;
72522113efdSAries Lee 	u8 *test_buf;
72622113efdSAries Lee 	int i, err;
72722113efdSAries Lee 	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
72822113efdSAries Lee 	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
72922113efdSAries Lee 
73022113efdSAries Lee 	/* dma onto stack is unsafe/nonportable, but callers to this
73122113efdSAries Lee 	 * routine normally provide temporary on-stack buffers ...
73222113efdSAries Lee 	 */
73322113efdSAries Lee 	data_buf = kmalloc(len, GFP_KERNEL);
73422113efdSAries Lee 	if (!data_buf)
73522113efdSAries Lee 		return -ENOMEM;
73622113efdSAries Lee 
73722113efdSAries Lee 	if (len == 8)
73822113efdSAries Lee 		test_buf = testdata_8bit;
73922113efdSAries Lee 	else if (len == 4)
74022113efdSAries Lee 		test_buf = testdata_4bit;
74122113efdSAries Lee 	else {
742a3c76eb9SGirish K S 		pr_err("%s: Invalid bus_width %d\n",
74322113efdSAries Lee 		       mmc_hostname(host), len);
74422113efdSAries Lee 		kfree(data_buf);
74522113efdSAries Lee 		return -EINVAL;
74622113efdSAries Lee 	}
74722113efdSAries Lee 
74822113efdSAries Lee 	if (opcode == MMC_BUS_TEST_W)
74922113efdSAries Lee 		memcpy(data_buf, test_buf, len);
75022113efdSAries Lee 
75122113efdSAries Lee 	mrq.cmd = &cmd;
75222113efdSAries Lee 	mrq.data = &data;
75322113efdSAries Lee 	cmd.opcode = opcode;
75422113efdSAries Lee 	cmd.arg = 0;
75522113efdSAries Lee 
75622113efdSAries Lee 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
75722113efdSAries Lee 	 * rely on callers to never use this with "native" calls for reading
75822113efdSAries Lee 	 * CSD or CID.  Native versions of those commands use the R2 type,
75922113efdSAries Lee 	 * not R1 plus a data block.
76022113efdSAries Lee 	 */
76122113efdSAries Lee 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
76222113efdSAries Lee 
76322113efdSAries Lee 	data.blksz = len;
76422113efdSAries Lee 	data.blocks = 1;
76522113efdSAries Lee 	if (opcode == MMC_BUS_TEST_R)
76622113efdSAries Lee 		data.flags = MMC_DATA_READ;
76722113efdSAries Lee 	else
76822113efdSAries Lee 		data.flags = MMC_DATA_WRITE;
76922113efdSAries Lee 
77022113efdSAries Lee 	data.sg = &sg;
77122113efdSAries Lee 	data.sg_len = 1;
77284532e33SMinjian Wu 	mmc_set_data_timeout(&data, card);
77322113efdSAries Lee 	sg_init_one(&sg, data_buf, len);
77422113efdSAries Lee 	mmc_wait_for_req(host, &mrq);
77522113efdSAries Lee 	err = 0;
77622113efdSAries Lee 	if (opcode == MMC_BUS_TEST_R) {
77722113efdSAries Lee 		for (i = 0; i < len / 4; i++)
77822113efdSAries Lee 			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
77922113efdSAries Lee 				err = -EIO;
78022113efdSAries Lee 				break;
78122113efdSAries Lee 			}
78222113efdSAries Lee 	}
78322113efdSAries Lee 	kfree(data_buf);
78422113efdSAries Lee 
78522113efdSAries Lee 	if (cmd.error)
78622113efdSAries Lee 		return cmd.error;
78722113efdSAries Lee 	if (data.error)
78822113efdSAries Lee 		return data.error;
78922113efdSAries Lee 
79022113efdSAries Lee 	return err;
79122113efdSAries Lee }
79222113efdSAries Lee 
79322113efdSAries Lee int mmc_bus_test(struct mmc_card *card, u8 bus_width)
79422113efdSAries Lee {
7950899e741SMasahiro Yamada 	int width;
79622113efdSAries Lee 
79722113efdSAries Lee 	if (bus_width == MMC_BUS_WIDTH_8)
79822113efdSAries Lee 		width = 8;
79922113efdSAries Lee 	else if (bus_width == MMC_BUS_WIDTH_4)
80022113efdSAries Lee 		width = 4;
80122113efdSAries Lee 	else if (bus_width == MMC_BUS_WIDTH_1)
80222113efdSAries Lee 		return 0; /* no need for test */
80322113efdSAries Lee 	else
80422113efdSAries Lee 		return -EINVAL;
80522113efdSAries Lee 
80622113efdSAries Lee 	/*
80722113efdSAries Lee 	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
80822113efdSAries Lee 	 * is a problem.  This improves chances that the test will work.
80922113efdSAries Lee 	 */
81022113efdSAries Lee 	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
8110899e741SMasahiro Yamada 	return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
81222113efdSAries Lee }
813eb0d8f13SJaehoon Chung 
8140f2c0512SUlf Hansson static int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
815eb0d8f13SJaehoon Chung {
816c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
817eb0d8f13SJaehoon Chung 	unsigned int opcode;
818eb0d8f13SJaehoon Chung 	int err;
819eb0d8f13SJaehoon Chung 
820eb0d8f13SJaehoon Chung 	opcode = card->ext_csd.hpi_cmd;
821eb0d8f13SJaehoon Chung 	if (opcode == MMC_STOP_TRANSMISSION)
8222378975bSJaehoon Chung 		cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
823eb0d8f13SJaehoon Chung 	else if (opcode == MMC_SEND_STATUS)
8242378975bSJaehoon Chung 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
825eb0d8f13SJaehoon Chung 
826eb0d8f13SJaehoon Chung 	cmd.opcode = opcode;
827eb0d8f13SJaehoon Chung 	cmd.arg = card->rca << 16 | 1;
828eb0d8f13SJaehoon Chung 
829eb0d8f13SJaehoon Chung 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
830eb0d8f13SJaehoon Chung 	if (err) {
831eb0d8f13SJaehoon Chung 		pr_warn("%s: error %d interrupting operation. "
832eb0d8f13SJaehoon Chung 			"HPI command response %#x\n", mmc_hostname(card->host),
833eb0d8f13SJaehoon Chung 			err, cmd.resp[0]);
834eb0d8f13SJaehoon Chung 		return err;
835eb0d8f13SJaehoon Chung 	}
836eb0d8f13SJaehoon Chung 	if (status)
837eb0d8f13SJaehoon Chung 		*status = cmd.resp[0];
838eb0d8f13SJaehoon Chung 
839eb0d8f13SJaehoon Chung 	return 0;
840eb0d8f13SJaehoon Chung }
841148bcab2SUlf Hansson 
8420f2c0512SUlf Hansson /**
8430f2c0512SUlf Hansson  *	mmc_interrupt_hpi - Issue for High priority Interrupt
8440f2c0512SUlf Hansson  *	@card: the MMC card associated with the HPI transfer
8450f2c0512SUlf Hansson  *
8460f2c0512SUlf Hansson  *	Issued High Priority Interrupt, and check for card status
8470f2c0512SUlf Hansson  *	until out-of prg-state.
8480f2c0512SUlf Hansson  */
8490f2c0512SUlf Hansson int mmc_interrupt_hpi(struct mmc_card *card)
8500f2c0512SUlf Hansson {
8510f2c0512SUlf Hansson 	int err;
8520f2c0512SUlf Hansson 	u32 status;
8530f2c0512SUlf Hansson 	unsigned long prg_wait;
8540f2c0512SUlf Hansson 
8550f2c0512SUlf Hansson 	if (!card->ext_csd.hpi_en) {
8560f2c0512SUlf Hansson 		pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
8570f2c0512SUlf Hansson 		return 1;
8580f2c0512SUlf Hansson 	}
8590f2c0512SUlf Hansson 
8600f2c0512SUlf Hansson 	err = mmc_send_status(card, &status);
8610f2c0512SUlf Hansson 	if (err) {
8620f2c0512SUlf Hansson 		pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
8630f2c0512SUlf Hansson 		goto out;
8640f2c0512SUlf Hansson 	}
8650f2c0512SUlf Hansson 
8660f2c0512SUlf Hansson 	switch (R1_CURRENT_STATE(status)) {
8670f2c0512SUlf Hansson 	case R1_STATE_IDLE:
8680f2c0512SUlf Hansson 	case R1_STATE_READY:
8690f2c0512SUlf Hansson 	case R1_STATE_STBY:
8700f2c0512SUlf Hansson 	case R1_STATE_TRAN:
8710f2c0512SUlf Hansson 		/*
8720f2c0512SUlf Hansson 		 * In idle and transfer states, HPI is not needed and the caller
8730f2c0512SUlf Hansson 		 * can issue the next intended command immediately
8740f2c0512SUlf Hansson 		 */
8750f2c0512SUlf Hansson 		goto out;
8760f2c0512SUlf Hansson 	case R1_STATE_PRG:
8770f2c0512SUlf Hansson 		break;
8780f2c0512SUlf Hansson 	default:
8790f2c0512SUlf Hansson 		/* In all other states, it's illegal to issue HPI */
8800f2c0512SUlf Hansson 		pr_debug("%s: HPI cannot be sent. Card state=%d\n",
8810f2c0512SUlf Hansson 			mmc_hostname(card->host), R1_CURRENT_STATE(status));
8820f2c0512SUlf Hansson 		err = -EINVAL;
8830f2c0512SUlf Hansson 		goto out;
8840f2c0512SUlf Hansson 	}
8850f2c0512SUlf Hansson 
8860f2c0512SUlf Hansson 	err = mmc_send_hpi_cmd(card, &status);
8870f2c0512SUlf Hansson 	if (err)
8880f2c0512SUlf Hansson 		goto out;
8890f2c0512SUlf Hansson 
8900f2c0512SUlf Hansson 	prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
8910f2c0512SUlf Hansson 	do {
8920f2c0512SUlf Hansson 		err = mmc_send_status(card, &status);
8930f2c0512SUlf Hansson 
8940f2c0512SUlf Hansson 		if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
8950f2c0512SUlf Hansson 			break;
8960f2c0512SUlf Hansson 		if (time_after(jiffies, prg_wait))
8970f2c0512SUlf Hansson 			err = -ETIMEDOUT;
8980f2c0512SUlf Hansson 	} while (!err);
8990f2c0512SUlf Hansson 
9000f2c0512SUlf Hansson out:
9010f2c0512SUlf Hansson 	return err;
9020f2c0512SUlf Hansson }
9030f2c0512SUlf Hansson 
904148bcab2SUlf Hansson int mmc_can_ext_csd(struct mmc_card *card)
905148bcab2SUlf Hansson {
906148bcab2SUlf Hansson 	return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
907148bcab2SUlf Hansson }
908b658af71SAdrian Hunter 
9091cf8f7e5SUlf Hansson static int mmc_read_bkops_status(struct mmc_card *card)
9101cf8f7e5SUlf Hansson {
9111cf8f7e5SUlf Hansson 	int err;
9121cf8f7e5SUlf Hansson 	u8 *ext_csd;
9131cf8f7e5SUlf Hansson 
9141cf8f7e5SUlf Hansson 	err = mmc_get_ext_csd(card, &ext_csd);
9151cf8f7e5SUlf Hansson 	if (err)
9161cf8f7e5SUlf Hansson 		return err;
9171cf8f7e5SUlf Hansson 
9181cf8f7e5SUlf Hansson 	card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
9191cf8f7e5SUlf Hansson 	card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
9201cf8f7e5SUlf Hansson 	kfree(ext_csd);
9211cf8f7e5SUlf Hansson 	return 0;
9221cf8f7e5SUlf Hansson }
9231cf8f7e5SUlf Hansson 
9241cf8f7e5SUlf Hansson /**
9250c204979SUlf Hansson  *	mmc_run_bkops - Run BKOPS for supported cards
9260c204979SUlf Hansson  *	@card: MMC card to run BKOPS for
9271cf8f7e5SUlf Hansson  *
9280c204979SUlf Hansson  *	Run background operations synchronously for cards having manual BKOPS
9290c204979SUlf Hansson  *	enabled and in case it reports urgent BKOPS level.
9301cf8f7e5SUlf Hansson */
9310c204979SUlf Hansson void mmc_run_bkops(struct mmc_card *card)
9321cf8f7e5SUlf Hansson {
9331cf8f7e5SUlf Hansson 	int err;
9341cf8f7e5SUlf Hansson 
9350c204979SUlf Hansson 	if (!card->ext_csd.man_bkops_en)
9361cf8f7e5SUlf Hansson 		return;
9371cf8f7e5SUlf Hansson 
9381cf8f7e5SUlf Hansson 	err = mmc_read_bkops_status(card);
9391cf8f7e5SUlf Hansson 	if (err) {
9401cf8f7e5SUlf Hansson 		pr_err("%s: Failed to read bkops status: %d\n",
9411cf8f7e5SUlf Hansson 		       mmc_hostname(card->host), err);
9421cf8f7e5SUlf Hansson 		return;
9431cf8f7e5SUlf Hansson 	}
9441cf8f7e5SUlf Hansson 
9450c204979SUlf Hansson 	if (!card->ext_csd.raw_bkops_status ||
9460c204979SUlf Hansson 	    card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2)
9471cf8f7e5SUlf Hansson 		return;
9481cf8f7e5SUlf Hansson 
9491cf8f7e5SUlf Hansson 	mmc_retune_hold(card->host);
9501cf8f7e5SUlf Hansson 
9510c204979SUlf Hansson 	/*
9520c204979SUlf Hansson 	 * For urgent BKOPS status, LEVEL_2 and higher, let's execute
9530c204979SUlf Hansson 	 * synchronously. Future wise, we may consider to start BKOPS, for less
9540c204979SUlf Hansson 	 * urgent levels by using an asynchronous background task, when idle.
9550c204979SUlf Hansson 	 */
9560c204979SUlf Hansson 	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
95724ed3bd0SUlf Hansson 			 EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS);
9580c204979SUlf Hansson 	if (err)
9591cf8f7e5SUlf Hansson 		pr_warn("%s: Error %d starting bkops\n",
9601cf8f7e5SUlf Hansson 			mmc_hostname(card->host), err);
9611cf8f7e5SUlf Hansson 
9621cf8f7e5SUlf Hansson 	mmc_retune_release(card->host);
9631cf8f7e5SUlf Hansson }
9640c204979SUlf Hansson EXPORT_SYMBOL(mmc_run_bkops);
9651cf8f7e5SUlf Hansson 
966d9df1737SUlf Hansson /*
967d9df1737SUlf Hansson  * Flush the cache to the non-volatile storage.
968d9df1737SUlf Hansson  */
969d9df1737SUlf Hansson int mmc_flush_cache(struct mmc_card *card)
970d9df1737SUlf Hansson {
971d9df1737SUlf Hansson 	int err = 0;
972d9df1737SUlf Hansson 
973d9df1737SUlf Hansson 	if (mmc_card_mmc(card) &&
974d9df1737SUlf Hansson 			(card->ext_csd.cache_size > 0) &&
975d9df1737SUlf Hansson 			(card->ext_csd.cache_ctrl & 1)) {
976d9df1737SUlf Hansson 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
97724ed3bd0SUlf Hansson 				 EXT_CSD_FLUSH_CACHE, 1,
97824ed3bd0SUlf Hansson 				 MMC_CACHE_FLUSH_TIMEOUT_MS);
979d9df1737SUlf Hansson 		if (err)
980d9df1737SUlf Hansson 			pr_err("%s: cache flush error %d\n",
981d9df1737SUlf Hansson 					mmc_hostname(card->host), err);
982d9df1737SUlf Hansson 	}
983d9df1737SUlf Hansson 
984d9df1737SUlf Hansson 	return err;
985d9df1737SUlf Hansson }
986d9df1737SUlf Hansson EXPORT_SYMBOL(mmc_flush_cache);
987d9df1737SUlf Hansson 
988b658af71SAdrian Hunter static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
989b658af71SAdrian Hunter {
990b658af71SAdrian Hunter 	u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
991b658af71SAdrian Hunter 	int err;
992b658af71SAdrian Hunter 
993b658af71SAdrian Hunter 	if (!card->ext_csd.cmdq_support)
994b658af71SAdrian Hunter 		return -EOPNOTSUPP;
995b658af71SAdrian Hunter 
996b658af71SAdrian Hunter 	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
997b658af71SAdrian Hunter 			 val, card->ext_csd.generic_cmd6_time);
998b658af71SAdrian Hunter 	if (!err)
999b658af71SAdrian Hunter 		card->ext_csd.cmdq_en = enable;
1000b658af71SAdrian Hunter 
1001b658af71SAdrian Hunter 	return err;
1002b658af71SAdrian Hunter }
1003b658af71SAdrian Hunter 
1004b658af71SAdrian Hunter int mmc_cmdq_enable(struct mmc_card *card)
1005b658af71SAdrian Hunter {
1006b658af71SAdrian Hunter 	return mmc_cmdq_switch(card, true);
1007b658af71SAdrian Hunter }
1008b658af71SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
1009b658af71SAdrian Hunter 
1010b658af71SAdrian Hunter int mmc_cmdq_disable(struct mmc_card *card)
1011b658af71SAdrian Hunter {
1012b658af71SAdrian Hunter 	return mmc_cmdq_switch(card, false);
1013b658af71SAdrian Hunter }
1014b658af71SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_cmdq_disable);
1015