xref: /openbmc/linux/drivers/mmc/core/mmc_ops.c (revision 4c94cb65)
1da7fbe58SPierre Ossman /*
270f10482SPierre Ossman  *  linux/drivers/mmc/core/mmc_ops.h
3da7fbe58SPierre Ossman  *
4da7fbe58SPierre Ossman  *  Copyright 2006-2007 Pierre Ossman
5da7fbe58SPierre Ossman  *
6da7fbe58SPierre Ossman  * This program is free software; you can redistribute it and/or modify
7da7fbe58SPierre Ossman  * it under the terms of the GNU General Public License as published by
8da7fbe58SPierre Ossman  * the Free Software Foundation; either version 2 of the License, or (at
9da7fbe58SPierre Ossman  * your option) any later version.
10da7fbe58SPierre Ossman  */
11da7fbe58SPierre Ossman 
125a0e3ad6STejun Heo #include <linux/slab.h>
133ef77af1SPaul Gortmaker #include <linux/export.h>
14da7fbe58SPierre Ossman #include <linux/types.h>
15da7fbe58SPierre Ossman #include <linux/scatterlist.h>
16da7fbe58SPierre Ossman 
17da7fbe58SPierre Ossman #include <linux/mmc/host.h>
18da7fbe58SPierre Ossman #include <linux/mmc/card.h>
19da7fbe58SPierre Ossman #include <linux/mmc/mmc.h>
20da7fbe58SPierre Ossman 
21da7fbe58SPierre Ossman #include "core.h"
221cf8f7e5SUlf Hansson #include "card.h"
23c6dbab9cSAdrian Hunter #include "host.h"
24da7fbe58SPierre Ossman #include "mmc_ops.h"
25da7fbe58SPierre Ossman 
268fee476bSTrey Ramsay #define MMC_OPS_TIMEOUT_MS	(10 * 60 * 1000) /* 10 minute timeout */
278fee476bSTrey Ramsay 
2804cdbbfaSUlf Hansson static const u8 tuning_blk_pattern_4bit[] = {
2904cdbbfaSUlf Hansson 	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
3004cdbbfaSUlf Hansson 	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
3104cdbbfaSUlf Hansson 	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
3204cdbbfaSUlf Hansson 	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
3304cdbbfaSUlf Hansson 	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
3404cdbbfaSUlf Hansson 	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
3504cdbbfaSUlf Hansson 	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
3604cdbbfaSUlf Hansson 	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
3704cdbbfaSUlf Hansson };
3804cdbbfaSUlf Hansson 
3904cdbbfaSUlf Hansson static const u8 tuning_blk_pattern_8bit[] = {
4004cdbbfaSUlf Hansson 	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
4104cdbbfaSUlf Hansson 	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
4204cdbbfaSUlf Hansson 	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
4304cdbbfaSUlf Hansson 	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
4404cdbbfaSUlf Hansson 	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
4504cdbbfaSUlf Hansson 	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
4604cdbbfaSUlf Hansson 	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
4704cdbbfaSUlf Hansson 	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
4804cdbbfaSUlf Hansson 	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
4904cdbbfaSUlf Hansson 	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
5004cdbbfaSUlf Hansson 	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
5104cdbbfaSUlf Hansson 	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
5204cdbbfaSUlf Hansson 	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
5304cdbbfaSUlf Hansson 	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
5404cdbbfaSUlf Hansson 	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
5504cdbbfaSUlf Hansson 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
5604cdbbfaSUlf Hansson };
5704cdbbfaSUlf Hansson 
582185bc2cSUlf Hansson int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
59a27fbf2fSSeungwon Jeon {
60a27fbf2fSSeungwon Jeon 	int err;
61c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
62a27fbf2fSSeungwon Jeon 
63a27fbf2fSSeungwon Jeon 	cmd.opcode = MMC_SEND_STATUS;
64a27fbf2fSSeungwon Jeon 	if (!mmc_host_is_spi(card->host))
65a27fbf2fSSeungwon Jeon 		cmd.arg = card->rca << 16;
66a27fbf2fSSeungwon Jeon 	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
67a27fbf2fSSeungwon Jeon 
682185bc2cSUlf Hansson 	err = mmc_wait_for_cmd(card->host, &cmd, retries);
69a27fbf2fSSeungwon Jeon 	if (err)
70a27fbf2fSSeungwon Jeon 		return err;
71a27fbf2fSSeungwon Jeon 
72a27fbf2fSSeungwon Jeon 	/* NOTE: callers are required to understand the difference
73a27fbf2fSSeungwon Jeon 	 * between "native" and SPI format status words!
74a27fbf2fSSeungwon Jeon 	 */
75a27fbf2fSSeungwon Jeon 	if (status)
76a27fbf2fSSeungwon Jeon 		*status = cmd.resp[0];
77a27fbf2fSSeungwon Jeon 
78a27fbf2fSSeungwon Jeon 	return 0;
79a27fbf2fSSeungwon Jeon }
802185bc2cSUlf Hansson EXPORT_SYMBOL_GPL(__mmc_send_status);
812185bc2cSUlf Hansson 
822185bc2cSUlf Hansson int mmc_send_status(struct mmc_card *card, u32 *status)
832185bc2cSUlf Hansson {
842185bc2cSUlf Hansson 	return __mmc_send_status(card, status, MMC_CMD_RETRIES);
852185bc2cSUlf Hansson }
861bee324aSLinus Walleij EXPORT_SYMBOL_GPL(mmc_send_status);
87a27fbf2fSSeungwon Jeon 
88da7fbe58SPierre Ossman static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
89da7fbe58SPierre Ossman {
90c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
91da7fbe58SPierre Ossman 
92da7fbe58SPierre Ossman 	cmd.opcode = MMC_SELECT_CARD;
93da7fbe58SPierre Ossman 
94da7fbe58SPierre Ossman 	if (card) {
95da7fbe58SPierre Ossman 		cmd.arg = card->rca << 16;
96da7fbe58SPierre Ossman 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
97da7fbe58SPierre Ossman 	} else {
98da7fbe58SPierre Ossman 		cmd.arg = 0;
99da7fbe58SPierre Ossman 		cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
100da7fbe58SPierre Ossman 	}
101da7fbe58SPierre Ossman 
1020899e741SMasahiro Yamada 	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
103da7fbe58SPierre Ossman }
104da7fbe58SPierre Ossman 
105da7fbe58SPierre Ossman int mmc_select_card(struct mmc_card *card)
106da7fbe58SPierre Ossman {
107da7fbe58SPierre Ossman 
108da7fbe58SPierre Ossman 	return _mmc_select_card(card->host, card);
109da7fbe58SPierre Ossman }
110da7fbe58SPierre Ossman 
111da7fbe58SPierre Ossman int mmc_deselect_cards(struct mmc_host *host)
112da7fbe58SPierre Ossman {
113da7fbe58SPierre Ossman 	return _mmc_select_card(host, NULL);
114da7fbe58SPierre Ossman }
115da7fbe58SPierre Ossman 
1163d705d14SSascha Hauer /*
1173d705d14SSascha Hauer  * Write the value specified in the device tree or board code into the optional
1183d705d14SSascha Hauer  * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
1193d705d14SSascha Hauer  * drive strength of the DAT and CMD outputs. The actual meaning of a given
1203d705d14SSascha Hauer  * value is hardware dependant.
1213d705d14SSascha Hauer  * The presence of the DSR register can be determined from the CSD register,
1223d705d14SSascha Hauer  * bit 76.
1233d705d14SSascha Hauer  */
1243d705d14SSascha Hauer int mmc_set_dsr(struct mmc_host *host)
1253d705d14SSascha Hauer {
126c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
1273d705d14SSascha Hauer 
1283d705d14SSascha Hauer 	cmd.opcode = MMC_SET_DSR;
1293d705d14SSascha Hauer 
1303d705d14SSascha Hauer 	cmd.arg = (host->dsr << 16) | 0xffff;
1313d705d14SSascha Hauer 	cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
1323d705d14SSascha Hauer 
1333d705d14SSascha Hauer 	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
1343d705d14SSascha Hauer }
1353d705d14SSascha Hauer 
136da7fbe58SPierre Ossman int mmc_go_idle(struct mmc_host *host)
137da7fbe58SPierre Ossman {
138da7fbe58SPierre Ossman 	int err;
139c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
140da7fbe58SPierre Ossman 
141af517150SDavid Brownell 	/*
142af517150SDavid Brownell 	 * Non-SPI hosts need to prevent chipselect going active during
143af517150SDavid Brownell 	 * GO_IDLE; that would put chips into SPI mode.  Remind them of
144af517150SDavid Brownell 	 * that in case of hardware that won't pull up DAT3/nCS otherwise.
145af517150SDavid Brownell 	 *
146af517150SDavid Brownell 	 * SPI hosts ignore ios.chip_select; it's managed according to
14725985edcSLucas De Marchi 	 * rules that must accommodate non-MMC slaves which this layer
148af517150SDavid Brownell 	 * won't even know about.
149af517150SDavid Brownell 	 */
150af517150SDavid Brownell 	if (!mmc_host_is_spi(host)) {
151da7fbe58SPierre Ossman 		mmc_set_chip_select(host, MMC_CS_HIGH);
152da7fbe58SPierre Ossman 		mmc_delay(1);
153af517150SDavid Brownell 	}
154da7fbe58SPierre Ossman 
155da7fbe58SPierre Ossman 	cmd.opcode = MMC_GO_IDLE_STATE;
156da7fbe58SPierre Ossman 	cmd.arg = 0;
157af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
158da7fbe58SPierre Ossman 
159da7fbe58SPierre Ossman 	err = mmc_wait_for_cmd(host, &cmd, 0);
160da7fbe58SPierre Ossman 
161da7fbe58SPierre Ossman 	mmc_delay(1);
162da7fbe58SPierre Ossman 
163af517150SDavid Brownell 	if (!mmc_host_is_spi(host)) {
164da7fbe58SPierre Ossman 		mmc_set_chip_select(host, MMC_CS_DONTCARE);
165da7fbe58SPierre Ossman 		mmc_delay(1);
166af517150SDavid Brownell 	}
167af517150SDavid Brownell 
168af517150SDavid Brownell 	host->use_spi_crc = 0;
169da7fbe58SPierre Ossman 
170da7fbe58SPierre Ossman 	return err;
171da7fbe58SPierre Ossman }
172da7fbe58SPierre Ossman 
173da7fbe58SPierre Ossman int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
174da7fbe58SPierre Ossman {
175c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
176da7fbe58SPierre Ossman 	int i, err = 0;
177da7fbe58SPierre Ossman 
178da7fbe58SPierre Ossman 	cmd.opcode = MMC_SEND_OP_COND;
179af517150SDavid Brownell 	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
180af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
181da7fbe58SPierre Ossman 
182da7fbe58SPierre Ossman 	for (i = 100; i; i--) {
183da7fbe58SPierre Ossman 		err = mmc_wait_for_cmd(host, &cmd, 0);
18417b0429dSPierre Ossman 		if (err)
185da7fbe58SPierre Ossman 			break;
186da7fbe58SPierre Ossman 
1874c94cb65SYoshihiro Shimoda 		/* wait until reset completes */
188af517150SDavid Brownell 		if (mmc_host_is_spi(host)) {
189af517150SDavid Brownell 			if (!(cmd.resp[0] & R1_SPI_IDLE))
190af517150SDavid Brownell 				break;
191af517150SDavid Brownell 		} else {
192af517150SDavid Brownell 			if (cmd.resp[0] & MMC_CARD_BUSY)
193af517150SDavid Brownell 				break;
194af517150SDavid Brownell 		}
195af517150SDavid Brownell 
19617b0429dSPierre Ossman 		err = -ETIMEDOUT;
197da7fbe58SPierre Ossman 
198da7fbe58SPierre Ossman 		mmc_delay(10);
1994c94cb65SYoshihiro Shimoda 
2004c94cb65SYoshihiro Shimoda 		/*
2014c94cb65SYoshihiro Shimoda 		 * According to eMMC specification v5.1 section 6.4.3, we
2024c94cb65SYoshihiro Shimoda 		 * should issue CMD1 repeatedly in the idle state until
2034c94cb65SYoshihiro Shimoda 		 * the eMMC is ready. Otherwise some eMMC devices seem to enter
2044c94cb65SYoshihiro Shimoda 		 * the inactive mode after mmc_init_card() issued CMD0 when
2054c94cb65SYoshihiro Shimoda 		 * the eMMC device is busy.
2064c94cb65SYoshihiro Shimoda 		 */
2074c94cb65SYoshihiro Shimoda 		if (!ocr && !mmc_host_is_spi(host))
2084c94cb65SYoshihiro Shimoda 			cmd.arg = cmd.resp[0] | BIT(30);
209da7fbe58SPierre Ossman 	}
210da7fbe58SPierre Ossman 
211af517150SDavid Brownell 	if (rocr && !mmc_host_is_spi(host))
212da7fbe58SPierre Ossman 		*rocr = cmd.resp[0];
213da7fbe58SPierre Ossman 
214da7fbe58SPierre Ossman 	return err;
215da7fbe58SPierre Ossman }
216da7fbe58SPierre Ossman 
217da7fbe58SPierre Ossman int mmc_set_relative_addr(struct mmc_card *card)
218da7fbe58SPierre Ossman {
219c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
220da7fbe58SPierre Ossman 
221da7fbe58SPierre Ossman 	cmd.opcode = MMC_SET_RELATIVE_ADDR;
222da7fbe58SPierre Ossman 	cmd.arg = card->rca << 16;
223da7fbe58SPierre Ossman 	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
224da7fbe58SPierre Ossman 
2250899e741SMasahiro Yamada 	return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
226da7fbe58SPierre Ossman }
227da7fbe58SPierre Ossman 
228af517150SDavid Brownell static int
229af517150SDavid Brownell mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
230da7fbe58SPierre Ossman {
231da7fbe58SPierre Ossman 	int err;
232c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
233da7fbe58SPierre Ossman 
234af517150SDavid Brownell 	cmd.opcode = opcode;
235af517150SDavid Brownell 	cmd.arg = arg;
236da7fbe58SPierre Ossman 	cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
237da7fbe58SPierre Ossman 
238af517150SDavid Brownell 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
23917b0429dSPierre Ossman 	if (err)
240da7fbe58SPierre Ossman 		return err;
241da7fbe58SPierre Ossman 
242af517150SDavid Brownell 	memcpy(cxd, cmd.resp, sizeof(u32) * 4);
243da7fbe58SPierre Ossman 
24417b0429dSPierre Ossman 	return 0;
245da7fbe58SPierre Ossman }
246da7fbe58SPierre Ossman 
2471a41313eSKyungsik Lee /*
2481a41313eSKyungsik Lee  * NOTE: void *buf, caller for the buf is required to use DMA-capable
2491a41313eSKyungsik Lee  * buffer or on-stack buffer (with some overhead in callee).
2501a41313eSKyungsik Lee  */
251af517150SDavid Brownell static int
252af517150SDavid Brownell mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
253af517150SDavid Brownell 		u32 opcode, void *buf, unsigned len)
254da7fbe58SPierre Ossman {
255c7836d15SMasahiro Yamada 	struct mmc_request mrq = {};
256c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
257c7836d15SMasahiro Yamada 	struct mmc_data data = {};
258da7fbe58SPierre Ossman 	struct scatterlist sg;
259da7fbe58SPierre Ossman 
260da7fbe58SPierre Ossman 	mrq.cmd = &cmd;
261da7fbe58SPierre Ossman 	mrq.data = &data;
262da7fbe58SPierre Ossman 
263af517150SDavid Brownell 	cmd.opcode = opcode;
264da7fbe58SPierre Ossman 	cmd.arg = 0;
265da7fbe58SPierre Ossman 
266af517150SDavid Brownell 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
267af517150SDavid Brownell 	 * rely on callers to never use this with "native" calls for reading
268af517150SDavid Brownell 	 * CSD or CID.  Native versions of those commands use the R2 type,
269af517150SDavid Brownell 	 * not R1 plus a data block.
270af517150SDavid Brownell 	 */
271af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
272af517150SDavid Brownell 
273af517150SDavid Brownell 	data.blksz = len;
274da7fbe58SPierre Ossman 	data.blocks = 1;
275da7fbe58SPierre Ossman 	data.flags = MMC_DATA_READ;
276da7fbe58SPierre Ossman 	data.sg = &sg;
277da7fbe58SPierre Ossman 	data.sg_len = 1;
278da7fbe58SPierre Ossman 
279601ed60cSUlf Hansson 	sg_init_one(&sg, buf, len);
280da7fbe58SPierre Ossman 
281cda56ac2SAdrian Hunter 	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
2820d3e0460SMatthew Fleming 		/*
2830d3e0460SMatthew Fleming 		 * The spec states that CSR and CID accesses have a timeout
2840d3e0460SMatthew Fleming 		 * of 64 clock cycles.
2850d3e0460SMatthew Fleming 		 */
2860d3e0460SMatthew Fleming 		data.timeout_ns = 0;
2870d3e0460SMatthew Fleming 		data.timeout_clks = 64;
288cda56ac2SAdrian Hunter 	} else
289cda56ac2SAdrian Hunter 		mmc_set_data_timeout(&data, card);
290da7fbe58SPierre Ossman 
291af517150SDavid Brownell 	mmc_wait_for_req(host, &mrq);
292af517150SDavid Brownell 
29317b0429dSPierre Ossman 	if (cmd.error)
294da7fbe58SPierre Ossman 		return cmd.error;
29517b0429dSPierre Ossman 	if (data.error)
296da7fbe58SPierre Ossman 		return data.error;
297da7fbe58SPierre Ossman 
29817b0429dSPierre Ossman 	return 0;
299da7fbe58SPierre Ossman }
300da7fbe58SPierre Ossman 
3010796e439SUlf Hansson static int mmc_spi_send_csd(struct mmc_card *card, u32 *csd)
302af517150SDavid Brownell {
30378e48073SPierre Ossman 	int ret, i;
30406c9ccb7SWinkler, Tomas 	__be32 *csd_tmp;
30578e48073SPierre Ossman 
30622b78700SUlf Hansson 	csd_tmp = kzalloc(16, GFP_KERNEL);
3071a41313eSKyungsik Lee 	if (!csd_tmp)
3081a41313eSKyungsik Lee 		return -ENOMEM;
3091a41313eSKyungsik Lee 
3101a41313eSKyungsik Lee 	ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
31178e48073SPierre Ossman 	if (ret)
3121a41313eSKyungsik Lee 		goto err;
31378e48073SPierre Ossman 
31478e48073SPierre Ossman 	for (i = 0; i < 4; i++)
3151a41313eSKyungsik Lee 		csd[i] = be32_to_cpu(csd_tmp[i]);
31678e48073SPierre Ossman 
3171a41313eSKyungsik Lee err:
3181a41313eSKyungsik Lee 	kfree(csd_tmp);
3191a41313eSKyungsik Lee 	return ret;
320af517150SDavid Brownell }
321af517150SDavid Brownell 
3220796e439SUlf Hansson int mmc_send_csd(struct mmc_card *card, u32 *csd)
3230796e439SUlf Hansson {
3240796e439SUlf Hansson 	if (mmc_host_is_spi(card->host))
3250796e439SUlf Hansson 		return mmc_spi_send_csd(card, csd);
3260796e439SUlf Hansson 
3270796e439SUlf Hansson 	return mmc_send_cxd_native(card->host, card->rca << 16,	csd,
3280796e439SUlf Hansson 				MMC_SEND_CSD);
3290796e439SUlf Hansson }
3300796e439SUlf Hansson 
331a1473732SUlf Hansson static int mmc_spi_send_cid(struct mmc_host *host, u32 *cid)
332af517150SDavid Brownell {
33378e48073SPierre Ossman 	int ret, i;
33406c9ccb7SWinkler, Tomas 	__be32 *cid_tmp;
33578e48073SPierre Ossman 
33622b78700SUlf Hansson 	cid_tmp = kzalloc(16, GFP_KERNEL);
3371a41313eSKyungsik Lee 	if (!cid_tmp)
3381a41313eSKyungsik Lee 		return -ENOMEM;
3391a41313eSKyungsik Lee 
3401a41313eSKyungsik Lee 	ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
34178e48073SPierre Ossman 	if (ret)
3421a41313eSKyungsik Lee 		goto err;
34378e48073SPierre Ossman 
34478e48073SPierre Ossman 	for (i = 0; i < 4; i++)
3451a41313eSKyungsik Lee 		cid[i] = be32_to_cpu(cid_tmp[i]);
34678e48073SPierre Ossman 
3471a41313eSKyungsik Lee err:
3481a41313eSKyungsik Lee 	kfree(cid_tmp);
3491a41313eSKyungsik Lee 	return ret;
350af517150SDavid Brownell }
351af517150SDavid Brownell 
352a1473732SUlf Hansson int mmc_send_cid(struct mmc_host *host, u32 *cid)
353a1473732SUlf Hansson {
354a1473732SUlf Hansson 	if (mmc_host_is_spi(host))
355a1473732SUlf Hansson 		return mmc_spi_send_cid(host, cid);
356a1473732SUlf Hansson 
357c92e68d8SUlf Hansson 	return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
358a1473732SUlf Hansson }
359a1473732SUlf Hansson 
360e21aa519SUlf Hansson int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
361e21aa519SUlf Hansson {
362e21aa519SUlf Hansson 	int err;
363e21aa519SUlf Hansson 	u8 *ext_csd;
364e21aa519SUlf Hansson 
365e21aa519SUlf Hansson 	if (!card || !new_ext_csd)
366e21aa519SUlf Hansson 		return -EINVAL;
367e21aa519SUlf Hansson 
368e21aa519SUlf Hansson 	if (!mmc_can_ext_csd(card))
369e21aa519SUlf Hansson 		return -EOPNOTSUPP;
370e21aa519SUlf Hansson 
371e21aa519SUlf Hansson 	/*
372e21aa519SUlf Hansson 	 * As the ext_csd is so large and mostly unused, we don't store the
373e21aa519SUlf Hansson 	 * raw block in mmc_card.
374e21aa519SUlf Hansson 	 */
37522b78700SUlf Hansson 	ext_csd = kzalloc(512, GFP_KERNEL);
376e21aa519SUlf Hansson 	if (!ext_csd)
377e21aa519SUlf Hansson 		return -ENOMEM;
378e21aa519SUlf Hansson 
3792fc91e8bSUlf Hansson 	err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
3802fc91e8bSUlf Hansson 				512);
381e21aa519SUlf Hansson 	if (err)
382e21aa519SUlf Hansson 		kfree(ext_csd);
383e21aa519SUlf Hansson 	else
384e21aa519SUlf Hansson 		*new_ext_csd = ext_csd;
385e21aa519SUlf Hansson 
386e21aa519SUlf Hansson 	return err;
387e21aa519SUlf Hansson }
388e21aa519SUlf Hansson EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
389e21aa519SUlf Hansson 
390af517150SDavid Brownell int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
391af517150SDavid Brownell {
392c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
393af517150SDavid Brownell 	int err;
394af517150SDavid Brownell 
395af517150SDavid Brownell 	cmd.opcode = MMC_SPI_READ_OCR;
396af517150SDavid Brownell 	cmd.arg = highcap ? (1 << 30) : 0;
397af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R3;
398af517150SDavid Brownell 
399af517150SDavid Brownell 	err = mmc_wait_for_cmd(host, &cmd, 0);
400af517150SDavid Brownell 
401af517150SDavid Brownell 	*ocrp = cmd.resp[1];
402af517150SDavid Brownell 	return err;
403af517150SDavid Brownell }
404af517150SDavid Brownell 
405af517150SDavid Brownell int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
406af517150SDavid Brownell {
407c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
408af517150SDavid Brownell 	int err;
409af517150SDavid Brownell 
410af517150SDavid Brownell 	cmd.opcode = MMC_SPI_CRC_ON_OFF;
411af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1;
412af517150SDavid Brownell 	cmd.arg = use_crc;
413af517150SDavid Brownell 
414af517150SDavid Brownell 	err = mmc_wait_for_cmd(host, &cmd, 0);
415af517150SDavid Brownell 	if (!err)
416af517150SDavid Brownell 		host->use_spi_crc = use_crc;
417af517150SDavid Brownell 	return err;
418af517150SDavid Brownell }
419af517150SDavid Brownell 
42020348d19SUlf Hansson static int mmc_switch_status_error(struct mmc_host *host, u32 status)
421ed16f58dSAdrian Hunter {
422ed16f58dSAdrian Hunter 	if (mmc_host_is_spi(host)) {
423ed16f58dSAdrian Hunter 		if (status & R1_SPI_ILLEGAL_COMMAND)
424ed16f58dSAdrian Hunter 			return -EBADMSG;
425ed16f58dSAdrian Hunter 	} else {
426a94a7483SShawn Lin 		if (R1_STATUS(status))
427ed16f58dSAdrian Hunter 			pr_warn("%s: unexpected status %#x after switch\n",
428ed16f58dSAdrian Hunter 				mmc_hostname(host), status);
429ed16f58dSAdrian Hunter 		if (status & R1_SWITCH_ERROR)
430ed16f58dSAdrian Hunter 			return -EBADMSG;
431ed16f58dSAdrian Hunter 	}
432ed16f58dSAdrian Hunter 	return 0;
433ed16f58dSAdrian Hunter }
434ed16f58dSAdrian Hunter 
43520348d19SUlf Hansson /* Caller must hold re-tuning */
436ef3d2322SAdrian Hunter int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
43720348d19SUlf Hansson {
43820348d19SUlf Hansson 	u32 status;
43920348d19SUlf Hansson 	int err;
44020348d19SUlf Hansson 
44120348d19SUlf Hansson 	err = mmc_send_status(card, &status);
442ef3d2322SAdrian Hunter 	if (!crc_err_fatal && err == -EILSEQ)
443ef3d2322SAdrian Hunter 		return 0;
44420348d19SUlf Hansson 	if (err)
44520348d19SUlf Hansson 		return err;
44620348d19SUlf Hansson 
44720348d19SUlf Hansson 	return mmc_switch_status_error(card->host, status);
44820348d19SUlf Hansson }
44920348d19SUlf Hansson 
450ef3d2322SAdrian Hunter int mmc_switch_status(struct mmc_card *card)
451ef3d2322SAdrian Hunter {
452ef3d2322SAdrian Hunter 	return __mmc_switch_status(card, true);
453ef3d2322SAdrian Hunter }
454ef3d2322SAdrian Hunter 
455716bdb89SUlf Hansson static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
456625228faSUlf Hansson 			bool send_status, bool retry_crc_err)
457716bdb89SUlf Hansson {
458716bdb89SUlf Hansson 	struct mmc_host *host = card->host;
459716bdb89SUlf Hansson 	int err;
460716bdb89SUlf Hansson 	unsigned long timeout;
461716bdb89SUlf Hansson 	u32 status = 0;
462716bdb89SUlf Hansson 	bool expired = false;
463716bdb89SUlf Hansson 	bool busy = false;
464716bdb89SUlf Hansson 
465716bdb89SUlf Hansson 	/* We have an unspecified cmd timeout, use the fallback value. */
466716bdb89SUlf Hansson 	if (!timeout_ms)
467716bdb89SUlf Hansson 		timeout_ms = MMC_OPS_TIMEOUT_MS;
468716bdb89SUlf Hansson 
469716bdb89SUlf Hansson 	/*
470716bdb89SUlf Hansson 	 * In cases when not allowed to poll by using CMD13 or because we aren't
471716bdb89SUlf Hansson 	 * capable of polling by using ->card_busy(), then rely on waiting the
472716bdb89SUlf Hansson 	 * stated timeout to be sufficient.
473716bdb89SUlf Hansson 	 */
474716bdb89SUlf Hansson 	if (!send_status && !host->ops->card_busy) {
475716bdb89SUlf Hansson 		mmc_delay(timeout_ms);
476716bdb89SUlf Hansson 		return 0;
477716bdb89SUlf Hansson 	}
478716bdb89SUlf Hansson 
479716bdb89SUlf Hansson 	timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
480716bdb89SUlf Hansson 	do {
481716bdb89SUlf Hansson 		/*
48270562644SUlf Hansson 		 * Due to the possibility of being preempted while polling,
48370562644SUlf Hansson 		 * check the expiration time first.
484716bdb89SUlf Hansson 		 */
485716bdb89SUlf Hansson 		expired = time_after(jiffies, timeout);
48670562644SUlf Hansson 
48770562644SUlf Hansson 		if (host->ops->card_busy) {
48870562644SUlf Hansson 			busy = host->ops->card_busy(host);
48970562644SUlf Hansson 		} else {
490437590a1SUlf Hansson 			err = mmc_send_status(card, &status);
4915ec32f84SUlf Hansson 			if (retry_crc_err && err == -EILSEQ) {
492437590a1SUlf Hansson 				busy = true;
4935ec32f84SUlf Hansson 			} else if (err) {
494716bdb89SUlf Hansson 				return err;
4955ec32f84SUlf Hansson 			} else {
4965ec32f84SUlf Hansson 				err = mmc_switch_status_error(host, status);
4975ec32f84SUlf Hansson 				if (err)
4985ec32f84SUlf Hansson 					return err;
49970562644SUlf Hansson 				busy = R1_CURRENT_STATE(status) == R1_STATE_PRG;
500716bdb89SUlf Hansson 			}
5015ec32f84SUlf Hansson 		}
502716bdb89SUlf Hansson 
50370562644SUlf Hansson 		/* Timeout if the device still remains busy. */
50470562644SUlf Hansson 		if (expired && busy) {
50570562644SUlf Hansson 			pr_err("%s: Card stuck being busy! %s\n",
506716bdb89SUlf Hansson 				mmc_hostname(host), __func__);
507716bdb89SUlf Hansson 			return -ETIMEDOUT;
508716bdb89SUlf Hansson 		}
50970562644SUlf Hansson 	} while (busy);
510716bdb89SUlf Hansson 
5115ec32f84SUlf Hansson 	return 0;
512716bdb89SUlf Hansson }
513716bdb89SUlf Hansson 
514d3a8d95dSAndrei Warkentin /**
515950d56acSJaehoon Chung  *	__mmc_switch - modify EXT_CSD register
516d3a8d95dSAndrei Warkentin  *	@card: the MMC card associated with the data transfer
517d3a8d95dSAndrei Warkentin  *	@set: cmd set values
518d3a8d95dSAndrei Warkentin  *	@index: EXT_CSD register index
519d3a8d95dSAndrei Warkentin  *	@value: value to program into EXT_CSD register
520d3a8d95dSAndrei Warkentin  *	@timeout_ms: timeout (ms) for operation performed by register write,
521d3a8d95dSAndrei Warkentin  *                   timeout of zero implies maximum possible timeout
522aa33ce3cSUlf Hansson  *	@timing: new timing to change to
523950d56acSJaehoon Chung  *	@use_busy_signal: use the busy signal as response type
524878e200bSUlf Hansson  *	@send_status: send status cmd to poll for busy
525625228faSUlf Hansson  *	@retry_crc_err: retry when CRC errors when polling with CMD13 for busy
526d3a8d95dSAndrei Warkentin  *
527d3a8d95dSAndrei Warkentin  *	Modifies the EXT_CSD register for selected card.
528d3a8d95dSAndrei Warkentin  */
529950d56acSJaehoon Chung int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
530aa33ce3cSUlf Hansson 		unsigned int timeout_ms, unsigned char timing,
531aa33ce3cSUlf Hansson 		bool use_busy_signal, bool send_status,	bool retry_crc_err)
532da7fbe58SPierre Ossman {
533636bd13cSUlf Hansson 	struct mmc_host *host = card->host;
534da7fbe58SPierre Ossman 	int err;
535c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
536b9ec2616SUlf Hansson 	bool use_r1b_resp = use_busy_signal;
537aa33ce3cSUlf Hansson 	unsigned char old_timing = host->ios.timing;
538b9ec2616SUlf Hansson 
539c6dbab9cSAdrian Hunter 	mmc_retune_hold(host);
540c6dbab9cSAdrian Hunter 
541b9ec2616SUlf Hansson 	/*
542b9ec2616SUlf Hansson 	 * If the cmd timeout and the max_busy_timeout of the host are both
543b9ec2616SUlf Hansson 	 * specified, let's validate them. A failure means we need to prevent
544b9ec2616SUlf Hansson 	 * the host from doing hw busy detection, which is done by converting
545b9ec2616SUlf Hansson 	 * to a R1 response instead of a R1B.
546b9ec2616SUlf Hansson 	 */
547b9ec2616SUlf Hansson 	if (timeout_ms && host->max_busy_timeout &&
548b9ec2616SUlf Hansson 		(timeout_ms > host->max_busy_timeout))
549b9ec2616SUlf Hansson 		use_r1b_resp = false;
550da7fbe58SPierre Ossman 
551da7fbe58SPierre Ossman 	cmd.opcode = MMC_SWITCH;
552da7fbe58SPierre Ossman 	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
553da7fbe58SPierre Ossman 		  (index << 16) |
554da7fbe58SPierre Ossman 		  (value << 8) |
555da7fbe58SPierre Ossman 		  set;
556950d56acSJaehoon Chung 	cmd.flags = MMC_CMD_AC;
557b9ec2616SUlf Hansson 	if (use_r1b_resp) {
558950d56acSJaehoon Chung 		cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
559b9ec2616SUlf Hansson 		/*
560b9ec2616SUlf Hansson 		 * A busy_timeout of zero means the host can decide to use
561b9ec2616SUlf Hansson 		 * whatever value it finds suitable.
562b9ec2616SUlf Hansson 		 */
5631d4d7744SUlf Hansson 		cmd.busy_timeout = timeout_ms;
564b9ec2616SUlf Hansson 	} else {
565b9ec2616SUlf Hansson 		cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
566b9ec2616SUlf Hansson 	}
567b9ec2616SUlf Hansson 
568775a9362SMaya Erez 	if (index == EXT_CSD_SANITIZE_START)
569775a9362SMaya Erez 		cmd.sanitize_busy = true;
570da7fbe58SPierre Ossman 
5713a0681c7SChaotian Jing 	err = mmc_wait_for_cmd(host, &cmd, 0);
57217b0429dSPierre Ossman 	if (err)
573c6dbab9cSAdrian Hunter 		goto out;
574da7fbe58SPierre Ossman 
575950d56acSJaehoon Chung 	/* No need to check card status in case of unblocking command */
576950d56acSJaehoon Chung 	if (!use_busy_signal)
577c6dbab9cSAdrian Hunter 		goto out;
578950d56acSJaehoon Chung 
579cb26ce06SUlf Hansson 	/*If SPI or used HW busy detection above, then we don't need to poll. */
580cb26ce06SUlf Hansson 	if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
581ee6ff743SUlf Hansson 		mmc_host_is_spi(host))
582aa33ce3cSUlf Hansson 		goto out_tim;
583a27fbf2fSSeungwon Jeon 
584716bdb89SUlf Hansson 	/* Let's try to poll to find out when the command is completed. */
585625228faSUlf Hansson 	err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err);
586ee6ff743SUlf Hansson 	if (err)
587ee6ff743SUlf Hansson 		goto out;
588aa33ce3cSUlf Hansson 
589aa33ce3cSUlf Hansson out_tim:
590ee6ff743SUlf Hansson 	/* Switch to new timing before check switch status. */
591ee6ff743SUlf Hansson 	if (timing)
592ee6ff743SUlf Hansson 		mmc_set_timing(host, timing);
593ee6ff743SUlf Hansson 
594ee6ff743SUlf Hansson 	if (send_status) {
595ee6ff743SUlf Hansson 		err = mmc_switch_status(card);
596aa33ce3cSUlf Hansson 		if (err && timing)
597aa33ce3cSUlf Hansson 			mmc_set_timing(host, old_timing);
598ee6ff743SUlf Hansson 	}
599c6dbab9cSAdrian Hunter out:
600c6dbab9cSAdrian Hunter 	mmc_retune_release(host);
601ef0b27d4SAdrian Hunter 
602c6dbab9cSAdrian Hunter 	return err;
603da7fbe58SPierre Ossman }
604950d56acSJaehoon Chung 
605950d56acSJaehoon Chung int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
606950d56acSJaehoon Chung 		unsigned int timeout_ms)
607950d56acSJaehoon Chung {
608aa33ce3cSUlf Hansson 	return __mmc_switch(card, set, index, value, timeout_ms, 0,
609aa33ce3cSUlf Hansson 			true, true, false);
610950d56acSJaehoon Chung }
611d3a8d95dSAndrei Warkentin EXPORT_SYMBOL_GPL(mmc_switch);
612da7fbe58SPierre Ossman 
6139979dbe5SChaotian Jing int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
614996903deSMinda Chen {
615c7836d15SMasahiro Yamada 	struct mmc_request mrq = {};
616c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
617c7836d15SMasahiro Yamada 	struct mmc_data data = {};
618996903deSMinda Chen 	struct scatterlist sg;
619fe5afb13SUlf Hansson 	struct mmc_ios *ios = &host->ios;
620996903deSMinda Chen 	const u8 *tuning_block_pattern;
621996903deSMinda Chen 	int size, err = 0;
622996903deSMinda Chen 	u8 *data_buf;
623996903deSMinda Chen 
624996903deSMinda Chen 	if (ios->bus_width == MMC_BUS_WIDTH_8) {
625996903deSMinda Chen 		tuning_block_pattern = tuning_blk_pattern_8bit;
626996903deSMinda Chen 		size = sizeof(tuning_blk_pattern_8bit);
627996903deSMinda Chen 	} else if (ios->bus_width == MMC_BUS_WIDTH_4) {
628996903deSMinda Chen 		tuning_block_pattern = tuning_blk_pattern_4bit;
629996903deSMinda Chen 		size = sizeof(tuning_blk_pattern_4bit);
630996903deSMinda Chen 	} else
631996903deSMinda Chen 		return -EINVAL;
632996903deSMinda Chen 
633996903deSMinda Chen 	data_buf = kzalloc(size, GFP_KERNEL);
634996903deSMinda Chen 	if (!data_buf)
635996903deSMinda Chen 		return -ENOMEM;
636996903deSMinda Chen 
637996903deSMinda Chen 	mrq.cmd = &cmd;
638996903deSMinda Chen 	mrq.data = &data;
639996903deSMinda Chen 
640996903deSMinda Chen 	cmd.opcode = opcode;
641996903deSMinda Chen 	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
642996903deSMinda Chen 
643996903deSMinda Chen 	data.blksz = size;
644996903deSMinda Chen 	data.blocks = 1;
645996903deSMinda Chen 	data.flags = MMC_DATA_READ;
646996903deSMinda Chen 
647996903deSMinda Chen 	/*
648996903deSMinda Chen 	 * According to the tuning specs, Tuning process
649996903deSMinda Chen 	 * is normally shorter 40 executions of CMD19,
650996903deSMinda Chen 	 * and timeout value should be shorter than 150 ms
651996903deSMinda Chen 	 */
652996903deSMinda Chen 	data.timeout_ns = 150 * NSEC_PER_MSEC;
653996903deSMinda Chen 
654996903deSMinda Chen 	data.sg = &sg;
655996903deSMinda Chen 	data.sg_len = 1;
656996903deSMinda Chen 	sg_init_one(&sg, data_buf, size);
657996903deSMinda Chen 
658fe5afb13SUlf Hansson 	mmc_wait_for_req(host, &mrq);
659996903deSMinda Chen 
6609979dbe5SChaotian Jing 	if (cmd_error)
6619979dbe5SChaotian Jing 		*cmd_error = cmd.error;
6629979dbe5SChaotian Jing 
663996903deSMinda Chen 	if (cmd.error) {
664996903deSMinda Chen 		err = cmd.error;
665996903deSMinda Chen 		goto out;
666996903deSMinda Chen 	}
667996903deSMinda Chen 
668996903deSMinda Chen 	if (data.error) {
669996903deSMinda Chen 		err = data.error;
670996903deSMinda Chen 		goto out;
671996903deSMinda Chen 	}
672996903deSMinda Chen 
673996903deSMinda Chen 	if (memcmp(data_buf, tuning_block_pattern, size))
674996903deSMinda Chen 		err = -EIO;
675996903deSMinda Chen 
676996903deSMinda Chen out:
677996903deSMinda Chen 	kfree(data_buf);
678996903deSMinda Chen 	return err;
679996903deSMinda Chen }
680996903deSMinda Chen EXPORT_SYMBOL_GPL(mmc_send_tuning);
681996903deSMinda Chen 
682e711f030SAdrian Hunter int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
683e711f030SAdrian Hunter {
684c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
685e711f030SAdrian Hunter 
686e711f030SAdrian Hunter 	/*
687e711f030SAdrian Hunter 	 * eMMC specification specifies that CMD12 can be used to stop a tuning
688e711f030SAdrian Hunter 	 * command, but SD specification does not, so do nothing unless it is
689e711f030SAdrian Hunter 	 * eMMC.
690e711f030SAdrian Hunter 	 */
691e711f030SAdrian Hunter 	if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
692e711f030SAdrian Hunter 		return 0;
693e711f030SAdrian Hunter 
694e711f030SAdrian Hunter 	cmd.opcode = MMC_STOP_TRANSMISSION;
695e711f030SAdrian Hunter 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
696e711f030SAdrian Hunter 
697e711f030SAdrian Hunter 	/*
698e711f030SAdrian Hunter 	 * For drivers that override R1 to R1b, set an arbitrary timeout based
699e711f030SAdrian Hunter 	 * on the tuning timeout i.e. 150ms.
700e711f030SAdrian Hunter 	 */
701e711f030SAdrian Hunter 	cmd.busy_timeout = 150;
702e711f030SAdrian Hunter 
703e711f030SAdrian Hunter 	return mmc_wait_for_cmd(host, &cmd, 0);
704e711f030SAdrian Hunter }
705e711f030SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_abort_tuning);
706e711f030SAdrian Hunter 
70722113efdSAries Lee static int
70822113efdSAries Lee mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
70922113efdSAries Lee 		  u8 len)
71022113efdSAries Lee {
711c7836d15SMasahiro Yamada 	struct mmc_request mrq = {};
712c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
713c7836d15SMasahiro Yamada 	struct mmc_data data = {};
71422113efdSAries Lee 	struct scatterlist sg;
71522113efdSAries Lee 	u8 *data_buf;
71622113efdSAries Lee 	u8 *test_buf;
71722113efdSAries Lee 	int i, err;
71822113efdSAries Lee 	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
71922113efdSAries Lee 	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
72022113efdSAries Lee 
72122113efdSAries Lee 	/* dma onto stack is unsafe/nonportable, but callers to this
72222113efdSAries Lee 	 * routine normally provide temporary on-stack buffers ...
72322113efdSAries Lee 	 */
72422113efdSAries Lee 	data_buf = kmalloc(len, GFP_KERNEL);
72522113efdSAries Lee 	if (!data_buf)
72622113efdSAries Lee 		return -ENOMEM;
72722113efdSAries Lee 
72822113efdSAries Lee 	if (len == 8)
72922113efdSAries Lee 		test_buf = testdata_8bit;
73022113efdSAries Lee 	else if (len == 4)
73122113efdSAries Lee 		test_buf = testdata_4bit;
73222113efdSAries Lee 	else {
733a3c76eb9SGirish K S 		pr_err("%s: Invalid bus_width %d\n",
73422113efdSAries Lee 		       mmc_hostname(host), len);
73522113efdSAries Lee 		kfree(data_buf);
73622113efdSAries Lee 		return -EINVAL;
73722113efdSAries Lee 	}
73822113efdSAries Lee 
73922113efdSAries Lee 	if (opcode == MMC_BUS_TEST_W)
74022113efdSAries Lee 		memcpy(data_buf, test_buf, len);
74122113efdSAries Lee 
74222113efdSAries Lee 	mrq.cmd = &cmd;
74322113efdSAries Lee 	mrq.data = &data;
74422113efdSAries Lee 	cmd.opcode = opcode;
74522113efdSAries Lee 	cmd.arg = 0;
74622113efdSAries Lee 
74722113efdSAries Lee 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
74822113efdSAries Lee 	 * rely on callers to never use this with "native" calls for reading
74922113efdSAries Lee 	 * CSD or CID.  Native versions of those commands use the R2 type,
75022113efdSAries Lee 	 * not R1 plus a data block.
75122113efdSAries Lee 	 */
75222113efdSAries Lee 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
75322113efdSAries Lee 
75422113efdSAries Lee 	data.blksz = len;
75522113efdSAries Lee 	data.blocks = 1;
75622113efdSAries Lee 	if (opcode == MMC_BUS_TEST_R)
75722113efdSAries Lee 		data.flags = MMC_DATA_READ;
75822113efdSAries Lee 	else
75922113efdSAries Lee 		data.flags = MMC_DATA_WRITE;
76022113efdSAries Lee 
76122113efdSAries Lee 	data.sg = &sg;
76222113efdSAries Lee 	data.sg_len = 1;
76384532e33SMinjian Wu 	mmc_set_data_timeout(&data, card);
76422113efdSAries Lee 	sg_init_one(&sg, data_buf, len);
76522113efdSAries Lee 	mmc_wait_for_req(host, &mrq);
76622113efdSAries Lee 	err = 0;
76722113efdSAries Lee 	if (opcode == MMC_BUS_TEST_R) {
76822113efdSAries Lee 		for (i = 0; i < len / 4; i++)
76922113efdSAries Lee 			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
77022113efdSAries Lee 				err = -EIO;
77122113efdSAries Lee 				break;
77222113efdSAries Lee 			}
77322113efdSAries Lee 	}
77422113efdSAries Lee 	kfree(data_buf);
77522113efdSAries Lee 
77622113efdSAries Lee 	if (cmd.error)
77722113efdSAries Lee 		return cmd.error;
77822113efdSAries Lee 	if (data.error)
77922113efdSAries Lee 		return data.error;
78022113efdSAries Lee 
78122113efdSAries Lee 	return err;
78222113efdSAries Lee }
78322113efdSAries Lee 
78422113efdSAries Lee int mmc_bus_test(struct mmc_card *card, u8 bus_width)
78522113efdSAries Lee {
7860899e741SMasahiro Yamada 	int width;
78722113efdSAries Lee 
78822113efdSAries Lee 	if (bus_width == MMC_BUS_WIDTH_8)
78922113efdSAries Lee 		width = 8;
79022113efdSAries Lee 	else if (bus_width == MMC_BUS_WIDTH_4)
79122113efdSAries Lee 		width = 4;
79222113efdSAries Lee 	else if (bus_width == MMC_BUS_WIDTH_1)
79322113efdSAries Lee 		return 0; /* no need for test */
79422113efdSAries Lee 	else
79522113efdSAries Lee 		return -EINVAL;
79622113efdSAries Lee 
79722113efdSAries Lee 	/*
79822113efdSAries Lee 	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
79922113efdSAries Lee 	 * is a problem.  This improves chances that the test will work.
80022113efdSAries Lee 	 */
80122113efdSAries Lee 	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
8020899e741SMasahiro Yamada 	return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
80322113efdSAries Lee }
804eb0d8f13SJaehoon Chung 
8050f2c0512SUlf Hansson static int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
806eb0d8f13SJaehoon Chung {
807c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
808eb0d8f13SJaehoon Chung 	unsigned int opcode;
809eb0d8f13SJaehoon Chung 	int err;
810eb0d8f13SJaehoon Chung 
811eb0d8f13SJaehoon Chung 	opcode = card->ext_csd.hpi_cmd;
812eb0d8f13SJaehoon Chung 	if (opcode == MMC_STOP_TRANSMISSION)
8132378975bSJaehoon Chung 		cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
814eb0d8f13SJaehoon Chung 	else if (opcode == MMC_SEND_STATUS)
8152378975bSJaehoon Chung 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
816eb0d8f13SJaehoon Chung 
817eb0d8f13SJaehoon Chung 	cmd.opcode = opcode;
818eb0d8f13SJaehoon Chung 	cmd.arg = card->rca << 16 | 1;
819eb0d8f13SJaehoon Chung 
820eb0d8f13SJaehoon Chung 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
821eb0d8f13SJaehoon Chung 	if (err) {
822eb0d8f13SJaehoon Chung 		pr_warn("%s: error %d interrupting operation. "
823eb0d8f13SJaehoon Chung 			"HPI command response %#x\n", mmc_hostname(card->host),
824eb0d8f13SJaehoon Chung 			err, cmd.resp[0]);
825eb0d8f13SJaehoon Chung 		return err;
826eb0d8f13SJaehoon Chung 	}
827eb0d8f13SJaehoon Chung 	if (status)
828eb0d8f13SJaehoon Chung 		*status = cmd.resp[0];
829eb0d8f13SJaehoon Chung 
830eb0d8f13SJaehoon Chung 	return 0;
831eb0d8f13SJaehoon Chung }
832148bcab2SUlf Hansson 
8330f2c0512SUlf Hansson /**
8340f2c0512SUlf Hansson  *	mmc_interrupt_hpi - Issue for High priority Interrupt
8350f2c0512SUlf Hansson  *	@card: the MMC card associated with the HPI transfer
8360f2c0512SUlf Hansson  *
8370f2c0512SUlf Hansson  *	Issued High Priority Interrupt, and check for card status
8380f2c0512SUlf Hansson  *	until out-of prg-state.
8390f2c0512SUlf Hansson  */
8400f2c0512SUlf Hansson int mmc_interrupt_hpi(struct mmc_card *card)
8410f2c0512SUlf Hansson {
8420f2c0512SUlf Hansson 	int err;
8430f2c0512SUlf Hansson 	u32 status;
8440f2c0512SUlf Hansson 	unsigned long prg_wait;
8450f2c0512SUlf Hansson 
8460f2c0512SUlf Hansson 	if (!card->ext_csd.hpi_en) {
8470f2c0512SUlf Hansson 		pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
8480f2c0512SUlf Hansson 		return 1;
8490f2c0512SUlf Hansson 	}
8500f2c0512SUlf Hansson 
8510f2c0512SUlf Hansson 	err = mmc_send_status(card, &status);
8520f2c0512SUlf Hansson 	if (err) {
8530f2c0512SUlf Hansson 		pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
8540f2c0512SUlf Hansson 		goto out;
8550f2c0512SUlf Hansson 	}
8560f2c0512SUlf Hansson 
8570f2c0512SUlf Hansson 	switch (R1_CURRENT_STATE(status)) {
8580f2c0512SUlf Hansson 	case R1_STATE_IDLE:
8590f2c0512SUlf Hansson 	case R1_STATE_READY:
8600f2c0512SUlf Hansson 	case R1_STATE_STBY:
8610f2c0512SUlf Hansson 	case R1_STATE_TRAN:
8620f2c0512SUlf Hansson 		/*
8630f2c0512SUlf Hansson 		 * In idle and transfer states, HPI is not needed and the caller
8640f2c0512SUlf Hansson 		 * can issue the next intended command immediately
8650f2c0512SUlf Hansson 		 */
8660f2c0512SUlf Hansson 		goto out;
8670f2c0512SUlf Hansson 	case R1_STATE_PRG:
8680f2c0512SUlf Hansson 		break;
8690f2c0512SUlf Hansson 	default:
8700f2c0512SUlf Hansson 		/* In all other states, it's illegal to issue HPI */
8710f2c0512SUlf Hansson 		pr_debug("%s: HPI cannot be sent. Card state=%d\n",
8720f2c0512SUlf Hansson 			mmc_hostname(card->host), R1_CURRENT_STATE(status));
8730f2c0512SUlf Hansson 		err = -EINVAL;
8740f2c0512SUlf Hansson 		goto out;
8750f2c0512SUlf Hansson 	}
8760f2c0512SUlf Hansson 
8770f2c0512SUlf Hansson 	err = mmc_send_hpi_cmd(card, &status);
8780f2c0512SUlf Hansson 	if (err)
8790f2c0512SUlf Hansson 		goto out;
8800f2c0512SUlf Hansson 
8810f2c0512SUlf Hansson 	prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
8820f2c0512SUlf Hansson 	do {
8830f2c0512SUlf Hansson 		err = mmc_send_status(card, &status);
8840f2c0512SUlf Hansson 
8850f2c0512SUlf Hansson 		if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
8860f2c0512SUlf Hansson 			break;
8870f2c0512SUlf Hansson 		if (time_after(jiffies, prg_wait))
8880f2c0512SUlf Hansson 			err = -ETIMEDOUT;
8890f2c0512SUlf Hansson 	} while (!err);
8900f2c0512SUlf Hansson 
8910f2c0512SUlf Hansson out:
8920f2c0512SUlf Hansson 	return err;
8930f2c0512SUlf Hansson }
8940f2c0512SUlf Hansson 
895148bcab2SUlf Hansson int mmc_can_ext_csd(struct mmc_card *card)
896148bcab2SUlf Hansson {
897148bcab2SUlf Hansson 	return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
898148bcab2SUlf Hansson }
899b658af71SAdrian Hunter 
9001cf8f7e5SUlf Hansson static int mmc_read_bkops_status(struct mmc_card *card)
9011cf8f7e5SUlf Hansson {
9021cf8f7e5SUlf Hansson 	int err;
9031cf8f7e5SUlf Hansson 	u8 *ext_csd;
9041cf8f7e5SUlf Hansson 
9051cf8f7e5SUlf Hansson 	err = mmc_get_ext_csd(card, &ext_csd);
9061cf8f7e5SUlf Hansson 	if (err)
9071cf8f7e5SUlf Hansson 		return err;
9081cf8f7e5SUlf Hansson 
9091cf8f7e5SUlf Hansson 	card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
9101cf8f7e5SUlf Hansson 	card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
9111cf8f7e5SUlf Hansson 	kfree(ext_csd);
9121cf8f7e5SUlf Hansson 	return 0;
9131cf8f7e5SUlf Hansson }
9141cf8f7e5SUlf Hansson 
9151cf8f7e5SUlf Hansson /**
9160c204979SUlf Hansson  *	mmc_run_bkops - Run BKOPS for supported cards
9170c204979SUlf Hansson  *	@card: MMC card to run BKOPS for
9181cf8f7e5SUlf Hansson  *
9190c204979SUlf Hansson  *	Run background operations synchronously for cards having manual BKOPS
9200c204979SUlf Hansson  *	enabled and in case it reports urgent BKOPS level.
9211cf8f7e5SUlf Hansson */
9220c204979SUlf Hansson void mmc_run_bkops(struct mmc_card *card)
9231cf8f7e5SUlf Hansson {
9241cf8f7e5SUlf Hansson 	int err;
9251cf8f7e5SUlf Hansson 
9260c204979SUlf Hansson 	if (!card->ext_csd.man_bkops_en)
9271cf8f7e5SUlf Hansson 		return;
9281cf8f7e5SUlf Hansson 
9291cf8f7e5SUlf Hansson 	err = mmc_read_bkops_status(card);
9301cf8f7e5SUlf Hansson 	if (err) {
9311cf8f7e5SUlf Hansson 		pr_err("%s: Failed to read bkops status: %d\n",
9321cf8f7e5SUlf Hansson 		       mmc_hostname(card->host), err);
9331cf8f7e5SUlf Hansson 		return;
9341cf8f7e5SUlf Hansson 	}
9351cf8f7e5SUlf Hansson 
9360c204979SUlf Hansson 	if (!card->ext_csd.raw_bkops_status ||
9370c204979SUlf Hansson 	    card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2)
9381cf8f7e5SUlf Hansson 		return;
9391cf8f7e5SUlf Hansson 
9401cf8f7e5SUlf Hansson 	mmc_retune_hold(card->host);
9411cf8f7e5SUlf Hansson 
9420c204979SUlf Hansson 	/*
9430c204979SUlf Hansson 	 * For urgent BKOPS status, LEVEL_2 and higher, let's execute
9440c204979SUlf Hansson 	 * synchronously. Future wise, we may consider to start BKOPS, for less
9450c204979SUlf Hansson 	 * urgent levels by using an asynchronous background task, when idle.
9460c204979SUlf Hansson 	 */
9470c204979SUlf Hansson 	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
9480c204979SUlf Hansson 			EXT_CSD_BKOPS_START, 1, MMC_OPS_TIMEOUT_MS);
9490c204979SUlf Hansson 	if (err)
9501cf8f7e5SUlf Hansson 		pr_warn("%s: Error %d starting bkops\n",
9511cf8f7e5SUlf Hansson 			mmc_hostname(card->host), err);
9521cf8f7e5SUlf Hansson 
9531cf8f7e5SUlf Hansson 	mmc_retune_release(card->host);
9541cf8f7e5SUlf Hansson }
9550c204979SUlf Hansson EXPORT_SYMBOL(mmc_run_bkops);
9561cf8f7e5SUlf Hansson 
957d9df1737SUlf Hansson /*
958d9df1737SUlf Hansson  * Flush the cache to the non-volatile storage.
959d9df1737SUlf Hansson  */
960d9df1737SUlf Hansson int mmc_flush_cache(struct mmc_card *card)
961d9df1737SUlf Hansson {
962d9df1737SUlf Hansson 	int err = 0;
963d9df1737SUlf Hansson 
964d9df1737SUlf Hansson 	if (mmc_card_mmc(card) &&
965d9df1737SUlf Hansson 			(card->ext_csd.cache_size > 0) &&
966d9df1737SUlf Hansson 			(card->ext_csd.cache_ctrl & 1)) {
967d9df1737SUlf Hansson 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
968d9df1737SUlf Hansson 				EXT_CSD_FLUSH_CACHE, 1, 0);
969d9df1737SUlf Hansson 		if (err)
970d9df1737SUlf Hansson 			pr_err("%s: cache flush error %d\n",
971d9df1737SUlf Hansson 					mmc_hostname(card->host), err);
972d9df1737SUlf Hansson 	}
973d9df1737SUlf Hansson 
974d9df1737SUlf Hansson 	return err;
975d9df1737SUlf Hansson }
976d9df1737SUlf Hansson EXPORT_SYMBOL(mmc_flush_cache);
977d9df1737SUlf Hansson 
978b658af71SAdrian Hunter static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
979b658af71SAdrian Hunter {
980b658af71SAdrian Hunter 	u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
981b658af71SAdrian Hunter 	int err;
982b658af71SAdrian Hunter 
983b658af71SAdrian Hunter 	if (!card->ext_csd.cmdq_support)
984b658af71SAdrian Hunter 		return -EOPNOTSUPP;
985b658af71SAdrian Hunter 
986b658af71SAdrian Hunter 	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
987b658af71SAdrian Hunter 			 val, card->ext_csd.generic_cmd6_time);
988b658af71SAdrian Hunter 	if (!err)
989b658af71SAdrian Hunter 		card->ext_csd.cmdq_en = enable;
990b658af71SAdrian Hunter 
991b658af71SAdrian Hunter 	return err;
992b658af71SAdrian Hunter }
993b658af71SAdrian Hunter 
994b658af71SAdrian Hunter int mmc_cmdq_enable(struct mmc_card *card)
995b658af71SAdrian Hunter {
996b658af71SAdrian Hunter 	return mmc_cmdq_switch(card, true);
997b658af71SAdrian Hunter }
998b658af71SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
999b658af71SAdrian Hunter 
1000b658af71SAdrian Hunter int mmc_cmdq_disable(struct mmc_card *card)
1001b658af71SAdrian Hunter {
1002b658af71SAdrian Hunter 	return mmc_cmdq_switch(card, false);
1003b658af71SAdrian Hunter }
1004b658af71SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_cmdq_disable);
1005