xref: /openbmc/linux/drivers/mmc/core/mmc_ops.c (revision c92e68d8)
1da7fbe58SPierre Ossman /*
270f10482SPierre Ossman  *  linux/drivers/mmc/core/mmc_ops.h
3da7fbe58SPierre Ossman  *
4da7fbe58SPierre Ossman  *  Copyright 2006-2007 Pierre Ossman
5da7fbe58SPierre Ossman  *
6da7fbe58SPierre Ossman  * This program is free software; you can redistribute it and/or modify
7da7fbe58SPierre Ossman  * it under the terms of the GNU General Public License as published by
8da7fbe58SPierre Ossman  * the Free Software Foundation; either version 2 of the License, or (at
9da7fbe58SPierre Ossman  * your option) any later version.
10da7fbe58SPierre Ossman  */
11da7fbe58SPierre Ossman 
125a0e3ad6STejun Heo #include <linux/slab.h>
133ef77af1SPaul Gortmaker #include <linux/export.h>
14da7fbe58SPierre Ossman #include <linux/types.h>
15da7fbe58SPierre Ossman #include <linux/scatterlist.h>
16da7fbe58SPierre Ossman 
17da7fbe58SPierre Ossman #include <linux/mmc/host.h>
18da7fbe58SPierre Ossman #include <linux/mmc/card.h>
19da7fbe58SPierre Ossman #include <linux/mmc/mmc.h>
20da7fbe58SPierre Ossman 
21da7fbe58SPierre Ossman #include "core.h"
221cf8f7e5SUlf Hansson #include "card.h"
23c6dbab9cSAdrian Hunter #include "host.h"
24da7fbe58SPierre Ossman #include "mmc_ops.h"
25da7fbe58SPierre Ossman 
268fee476bSTrey Ramsay #define MMC_OPS_TIMEOUT_MS	(10 * 60 * 1000) /* 10 minute timeout */
278fee476bSTrey Ramsay 
2804cdbbfaSUlf Hansson static const u8 tuning_blk_pattern_4bit[] = {
2904cdbbfaSUlf Hansson 	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
3004cdbbfaSUlf Hansson 	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
3104cdbbfaSUlf Hansson 	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
3204cdbbfaSUlf Hansson 	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
3304cdbbfaSUlf Hansson 	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
3404cdbbfaSUlf Hansson 	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
3504cdbbfaSUlf Hansson 	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
3604cdbbfaSUlf Hansson 	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
3704cdbbfaSUlf Hansson };
3804cdbbfaSUlf Hansson 
3904cdbbfaSUlf Hansson static const u8 tuning_blk_pattern_8bit[] = {
4004cdbbfaSUlf Hansson 	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
4104cdbbfaSUlf Hansson 	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
4204cdbbfaSUlf Hansson 	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
4304cdbbfaSUlf Hansson 	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
4404cdbbfaSUlf Hansson 	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
4504cdbbfaSUlf Hansson 	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
4604cdbbfaSUlf Hansson 	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
4704cdbbfaSUlf Hansson 	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
4804cdbbfaSUlf Hansson 	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
4904cdbbfaSUlf Hansson 	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
5004cdbbfaSUlf Hansson 	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
5104cdbbfaSUlf Hansson 	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
5204cdbbfaSUlf Hansson 	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
5304cdbbfaSUlf Hansson 	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
5404cdbbfaSUlf Hansson 	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
5504cdbbfaSUlf Hansson 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
5604cdbbfaSUlf Hansson };
5704cdbbfaSUlf Hansson 
582185bc2cSUlf Hansson int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
59a27fbf2fSSeungwon Jeon {
60a27fbf2fSSeungwon Jeon 	int err;
61c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
62a27fbf2fSSeungwon Jeon 
63a27fbf2fSSeungwon Jeon 	cmd.opcode = MMC_SEND_STATUS;
64a27fbf2fSSeungwon Jeon 	if (!mmc_host_is_spi(card->host))
65a27fbf2fSSeungwon Jeon 		cmd.arg = card->rca << 16;
66a27fbf2fSSeungwon Jeon 	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
67a27fbf2fSSeungwon Jeon 
682185bc2cSUlf Hansson 	err = mmc_wait_for_cmd(card->host, &cmd, retries);
69a27fbf2fSSeungwon Jeon 	if (err)
70a27fbf2fSSeungwon Jeon 		return err;
71a27fbf2fSSeungwon Jeon 
72a27fbf2fSSeungwon Jeon 	/* NOTE: callers are required to understand the difference
73a27fbf2fSSeungwon Jeon 	 * between "native" and SPI format status words!
74a27fbf2fSSeungwon Jeon 	 */
75a27fbf2fSSeungwon Jeon 	if (status)
76a27fbf2fSSeungwon Jeon 		*status = cmd.resp[0];
77a27fbf2fSSeungwon Jeon 
78a27fbf2fSSeungwon Jeon 	return 0;
79a27fbf2fSSeungwon Jeon }
802185bc2cSUlf Hansson EXPORT_SYMBOL_GPL(__mmc_send_status);
812185bc2cSUlf Hansson 
822185bc2cSUlf Hansson int mmc_send_status(struct mmc_card *card, u32 *status)
832185bc2cSUlf Hansson {
842185bc2cSUlf Hansson 	return __mmc_send_status(card, status, MMC_CMD_RETRIES);
852185bc2cSUlf Hansson }
86a27fbf2fSSeungwon Jeon 
87da7fbe58SPierre Ossman static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
88da7fbe58SPierre Ossman {
89c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
90da7fbe58SPierre Ossman 
91da7fbe58SPierre Ossman 	cmd.opcode = MMC_SELECT_CARD;
92da7fbe58SPierre Ossman 
93da7fbe58SPierre Ossman 	if (card) {
94da7fbe58SPierre Ossman 		cmd.arg = card->rca << 16;
95da7fbe58SPierre Ossman 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
96da7fbe58SPierre Ossman 	} else {
97da7fbe58SPierre Ossman 		cmd.arg = 0;
98da7fbe58SPierre Ossman 		cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
99da7fbe58SPierre Ossman 	}
100da7fbe58SPierre Ossman 
1010899e741SMasahiro Yamada 	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
102da7fbe58SPierre Ossman }
103da7fbe58SPierre Ossman 
104da7fbe58SPierre Ossman int mmc_select_card(struct mmc_card *card)
105da7fbe58SPierre Ossman {
106da7fbe58SPierre Ossman 
107da7fbe58SPierre Ossman 	return _mmc_select_card(card->host, card);
108da7fbe58SPierre Ossman }
109da7fbe58SPierre Ossman 
110da7fbe58SPierre Ossman int mmc_deselect_cards(struct mmc_host *host)
111da7fbe58SPierre Ossman {
112da7fbe58SPierre Ossman 	return _mmc_select_card(host, NULL);
113da7fbe58SPierre Ossman }
114da7fbe58SPierre Ossman 
1153d705d14SSascha Hauer /*
1163d705d14SSascha Hauer  * Write the value specified in the device tree or board code into the optional
1173d705d14SSascha Hauer  * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
1183d705d14SSascha Hauer  * drive strength of the DAT and CMD outputs. The actual meaning of a given
1193d705d14SSascha Hauer  * value is hardware dependant.
1203d705d14SSascha Hauer  * The presence of the DSR register can be determined from the CSD register,
1213d705d14SSascha Hauer  * bit 76.
1223d705d14SSascha Hauer  */
1233d705d14SSascha Hauer int mmc_set_dsr(struct mmc_host *host)
1243d705d14SSascha Hauer {
125c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
1263d705d14SSascha Hauer 
1273d705d14SSascha Hauer 	cmd.opcode = MMC_SET_DSR;
1283d705d14SSascha Hauer 
1293d705d14SSascha Hauer 	cmd.arg = (host->dsr << 16) | 0xffff;
1303d705d14SSascha Hauer 	cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
1313d705d14SSascha Hauer 
1323d705d14SSascha Hauer 	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
1333d705d14SSascha Hauer }
1343d705d14SSascha Hauer 
135da7fbe58SPierre Ossman int mmc_go_idle(struct mmc_host *host)
136da7fbe58SPierre Ossman {
137da7fbe58SPierre Ossman 	int err;
138c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
139da7fbe58SPierre Ossman 
140af517150SDavid Brownell 	/*
141af517150SDavid Brownell 	 * Non-SPI hosts need to prevent chipselect going active during
142af517150SDavid Brownell 	 * GO_IDLE; that would put chips into SPI mode.  Remind them of
143af517150SDavid Brownell 	 * that in case of hardware that won't pull up DAT3/nCS otherwise.
144af517150SDavid Brownell 	 *
145af517150SDavid Brownell 	 * SPI hosts ignore ios.chip_select; it's managed according to
14625985edcSLucas De Marchi 	 * rules that must accommodate non-MMC slaves which this layer
147af517150SDavid Brownell 	 * won't even know about.
148af517150SDavid Brownell 	 */
149af517150SDavid Brownell 	if (!mmc_host_is_spi(host)) {
150da7fbe58SPierre Ossman 		mmc_set_chip_select(host, MMC_CS_HIGH);
151da7fbe58SPierre Ossman 		mmc_delay(1);
152af517150SDavid Brownell 	}
153da7fbe58SPierre Ossman 
154da7fbe58SPierre Ossman 	cmd.opcode = MMC_GO_IDLE_STATE;
155da7fbe58SPierre Ossman 	cmd.arg = 0;
156af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
157da7fbe58SPierre Ossman 
158da7fbe58SPierre Ossman 	err = mmc_wait_for_cmd(host, &cmd, 0);
159da7fbe58SPierre Ossman 
160da7fbe58SPierre Ossman 	mmc_delay(1);
161da7fbe58SPierre Ossman 
162af517150SDavid Brownell 	if (!mmc_host_is_spi(host)) {
163da7fbe58SPierre Ossman 		mmc_set_chip_select(host, MMC_CS_DONTCARE);
164da7fbe58SPierre Ossman 		mmc_delay(1);
165af517150SDavid Brownell 	}
166af517150SDavid Brownell 
167af517150SDavid Brownell 	host->use_spi_crc = 0;
168da7fbe58SPierre Ossman 
169da7fbe58SPierre Ossman 	return err;
170da7fbe58SPierre Ossman }
171da7fbe58SPierre Ossman 
172da7fbe58SPierre Ossman int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
173da7fbe58SPierre Ossman {
174c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
175da7fbe58SPierre Ossman 	int i, err = 0;
176da7fbe58SPierre Ossman 
177da7fbe58SPierre Ossman 	cmd.opcode = MMC_SEND_OP_COND;
178af517150SDavid Brownell 	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
179af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
180da7fbe58SPierre Ossman 
181da7fbe58SPierre Ossman 	for (i = 100; i; i--) {
182da7fbe58SPierre Ossman 		err = mmc_wait_for_cmd(host, &cmd, 0);
18317b0429dSPierre Ossman 		if (err)
184da7fbe58SPierre Ossman 			break;
185da7fbe58SPierre Ossman 
186af517150SDavid Brownell 		/* if we're just probing, do a single pass */
187af517150SDavid Brownell 		if (ocr == 0)
188da7fbe58SPierre Ossman 			break;
189da7fbe58SPierre Ossman 
190af517150SDavid Brownell 		/* otherwise wait until reset completes */
191af517150SDavid Brownell 		if (mmc_host_is_spi(host)) {
192af517150SDavid Brownell 			if (!(cmd.resp[0] & R1_SPI_IDLE))
193af517150SDavid Brownell 				break;
194af517150SDavid Brownell 		} else {
195af517150SDavid Brownell 			if (cmd.resp[0] & MMC_CARD_BUSY)
196af517150SDavid Brownell 				break;
197af517150SDavid Brownell 		}
198af517150SDavid Brownell 
19917b0429dSPierre Ossman 		err = -ETIMEDOUT;
200da7fbe58SPierre Ossman 
201da7fbe58SPierre Ossman 		mmc_delay(10);
202da7fbe58SPierre Ossman 	}
203da7fbe58SPierre Ossman 
204af517150SDavid Brownell 	if (rocr && !mmc_host_is_spi(host))
205da7fbe58SPierre Ossman 		*rocr = cmd.resp[0];
206da7fbe58SPierre Ossman 
207da7fbe58SPierre Ossman 	return err;
208da7fbe58SPierre Ossman }
209da7fbe58SPierre Ossman 
210da7fbe58SPierre Ossman int mmc_set_relative_addr(struct mmc_card *card)
211da7fbe58SPierre Ossman {
212c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
213da7fbe58SPierre Ossman 
214da7fbe58SPierre Ossman 	cmd.opcode = MMC_SET_RELATIVE_ADDR;
215da7fbe58SPierre Ossman 	cmd.arg = card->rca << 16;
216da7fbe58SPierre Ossman 	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
217da7fbe58SPierre Ossman 
2180899e741SMasahiro Yamada 	return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
219da7fbe58SPierre Ossman }
220da7fbe58SPierre Ossman 
221af517150SDavid Brownell static int
222af517150SDavid Brownell mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
223da7fbe58SPierre Ossman {
224da7fbe58SPierre Ossman 	int err;
225c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
226da7fbe58SPierre Ossman 
227af517150SDavid Brownell 	cmd.opcode = opcode;
228af517150SDavid Brownell 	cmd.arg = arg;
229da7fbe58SPierre Ossman 	cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
230da7fbe58SPierre Ossman 
231af517150SDavid Brownell 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
23217b0429dSPierre Ossman 	if (err)
233da7fbe58SPierre Ossman 		return err;
234da7fbe58SPierre Ossman 
235af517150SDavid Brownell 	memcpy(cxd, cmd.resp, sizeof(u32) * 4);
236da7fbe58SPierre Ossman 
23717b0429dSPierre Ossman 	return 0;
238da7fbe58SPierre Ossman }
239da7fbe58SPierre Ossman 
2401a41313eSKyungsik Lee /*
2411a41313eSKyungsik Lee  * NOTE: void *buf, caller for the buf is required to use DMA-capable
2421a41313eSKyungsik Lee  * buffer or on-stack buffer (with some overhead in callee).
2431a41313eSKyungsik Lee  */
244af517150SDavid Brownell static int
245af517150SDavid Brownell mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
246af517150SDavid Brownell 		u32 opcode, void *buf, unsigned len)
247da7fbe58SPierre Ossman {
248c7836d15SMasahiro Yamada 	struct mmc_request mrq = {};
249c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
250c7836d15SMasahiro Yamada 	struct mmc_data data = {};
251da7fbe58SPierre Ossman 	struct scatterlist sg;
252da7fbe58SPierre Ossman 
253da7fbe58SPierre Ossman 	mrq.cmd = &cmd;
254da7fbe58SPierre Ossman 	mrq.data = &data;
255da7fbe58SPierre Ossman 
256af517150SDavid Brownell 	cmd.opcode = opcode;
257da7fbe58SPierre Ossman 	cmd.arg = 0;
258da7fbe58SPierre Ossman 
259af517150SDavid Brownell 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
260af517150SDavid Brownell 	 * rely on callers to never use this with "native" calls for reading
261af517150SDavid Brownell 	 * CSD or CID.  Native versions of those commands use the R2 type,
262af517150SDavid Brownell 	 * not R1 plus a data block.
263af517150SDavid Brownell 	 */
264af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
265af517150SDavid Brownell 
266af517150SDavid Brownell 	data.blksz = len;
267da7fbe58SPierre Ossman 	data.blocks = 1;
268da7fbe58SPierre Ossman 	data.flags = MMC_DATA_READ;
269da7fbe58SPierre Ossman 	data.sg = &sg;
270da7fbe58SPierre Ossman 	data.sg_len = 1;
271da7fbe58SPierre Ossman 
272601ed60cSUlf Hansson 	sg_init_one(&sg, buf, len);
273da7fbe58SPierre Ossman 
274cda56ac2SAdrian Hunter 	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
2750d3e0460SMatthew Fleming 		/*
2760d3e0460SMatthew Fleming 		 * The spec states that CSR and CID accesses have a timeout
2770d3e0460SMatthew Fleming 		 * of 64 clock cycles.
2780d3e0460SMatthew Fleming 		 */
2790d3e0460SMatthew Fleming 		data.timeout_ns = 0;
2800d3e0460SMatthew Fleming 		data.timeout_clks = 64;
281cda56ac2SAdrian Hunter 	} else
282cda56ac2SAdrian Hunter 		mmc_set_data_timeout(&data, card);
283da7fbe58SPierre Ossman 
284af517150SDavid Brownell 	mmc_wait_for_req(host, &mrq);
285af517150SDavid Brownell 
28617b0429dSPierre Ossman 	if (cmd.error)
287da7fbe58SPierre Ossman 		return cmd.error;
28817b0429dSPierre Ossman 	if (data.error)
289da7fbe58SPierre Ossman 		return data.error;
290da7fbe58SPierre Ossman 
29117b0429dSPierre Ossman 	return 0;
292da7fbe58SPierre Ossman }
293da7fbe58SPierre Ossman 
294af517150SDavid Brownell int mmc_send_csd(struct mmc_card *card, u32 *csd)
295af517150SDavid Brownell {
29678e48073SPierre Ossman 	int ret, i;
29706c9ccb7SWinkler, Tomas 	__be32 *csd_tmp;
29878e48073SPierre Ossman 
299af517150SDavid Brownell 	if (!mmc_host_is_spi(card->host))
300af517150SDavid Brownell 		return mmc_send_cxd_native(card->host, card->rca << 16,
301af517150SDavid Brownell 				csd, MMC_SEND_CSD);
302af517150SDavid Brownell 
30322b78700SUlf Hansson 	csd_tmp = kzalloc(16, GFP_KERNEL);
3041a41313eSKyungsik Lee 	if (!csd_tmp)
3051a41313eSKyungsik Lee 		return -ENOMEM;
3061a41313eSKyungsik Lee 
3071a41313eSKyungsik Lee 	ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
30878e48073SPierre Ossman 	if (ret)
3091a41313eSKyungsik Lee 		goto err;
31078e48073SPierre Ossman 
31178e48073SPierre Ossman 	for (i = 0; i < 4; i++)
3121a41313eSKyungsik Lee 		csd[i] = be32_to_cpu(csd_tmp[i]);
31378e48073SPierre Ossman 
3141a41313eSKyungsik Lee err:
3151a41313eSKyungsik Lee 	kfree(csd_tmp);
3161a41313eSKyungsik Lee 	return ret;
317af517150SDavid Brownell }
318af517150SDavid Brownell 
319a1473732SUlf Hansson static int mmc_spi_send_cid(struct mmc_host *host, u32 *cid)
320af517150SDavid Brownell {
32178e48073SPierre Ossman 	int ret, i;
32206c9ccb7SWinkler, Tomas 	__be32 *cid_tmp;
32378e48073SPierre Ossman 
32422b78700SUlf Hansson 	cid_tmp = kzalloc(16, GFP_KERNEL);
3251a41313eSKyungsik Lee 	if (!cid_tmp)
3261a41313eSKyungsik Lee 		return -ENOMEM;
3271a41313eSKyungsik Lee 
3281a41313eSKyungsik Lee 	ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
32978e48073SPierre Ossman 	if (ret)
3301a41313eSKyungsik Lee 		goto err;
33178e48073SPierre Ossman 
33278e48073SPierre Ossman 	for (i = 0; i < 4; i++)
3331a41313eSKyungsik Lee 		cid[i] = be32_to_cpu(cid_tmp[i]);
33478e48073SPierre Ossman 
3351a41313eSKyungsik Lee err:
3361a41313eSKyungsik Lee 	kfree(cid_tmp);
3371a41313eSKyungsik Lee 	return ret;
338af517150SDavid Brownell }
339af517150SDavid Brownell 
340a1473732SUlf Hansson int mmc_send_cid(struct mmc_host *host, u32 *cid)
341a1473732SUlf Hansson {
342a1473732SUlf Hansson 	if (mmc_host_is_spi(host))
343a1473732SUlf Hansson 		return mmc_spi_send_cid(host, cid);
344a1473732SUlf Hansson 
345c92e68d8SUlf Hansson 	return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
346a1473732SUlf Hansson }
347a1473732SUlf Hansson 
348e21aa519SUlf Hansson int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
349e21aa519SUlf Hansson {
350e21aa519SUlf Hansson 	int err;
351e21aa519SUlf Hansson 	u8 *ext_csd;
352e21aa519SUlf Hansson 
353e21aa519SUlf Hansson 	if (!card || !new_ext_csd)
354e21aa519SUlf Hansson 		return -EINVAL;
355e21aa519SUlf Hansson 
356e21aa519SUlf Hansson 	if (!mmc_can_ext_csd(card))
357e21aa519SUlf Hansson 		return -EOPNOTSUPP;
358e21aa519SUlf Hansson 
359e21aa519SUlf Hansson 	/*
360e21aa519SUlf Hansson 	 * As the ext_csd is so large and mostly unused, we don't store the
361e21aa519SUlf Hansson 	 * raw block in mmc_card.
362e21aa519SUlf Hansson 	 */
36322b78700SUlf Hansson 	ext_csd = kzalloc(512, GFP_KERNEL);
364e21aa519SUlf Hansson 	if (!ext_csd)
365e21aa519SUlf Hansson 		return -ENOMEM;
366e21aa519SUlf Hansson 
3672fc91e8bSUlf Hansson 	err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
3682fc91e8bSUlf Hansson 				512);
369e21aa519SUlf Hansson 	if (err)
370e21aa519SUlf Hansson 		kfree(ext_csd);
371e21aa519SUlf Hansson 	else
372e21aa519SUlf Hansson 		*new_ext_csd = ext_csd;
373e21aa519SUlf Hansson 
374e21aa519SUlf Hansson 	return err;
375e21aa519SUlf Hansson }
376e21aa519SUlf Hansson EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
377e21aa519SUlf Hansson 
378af517150SDavid Brownell int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
379af517150SDavid Brownell {
380c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
381af517150SDavid Brownell 	int err;
382af517150SDavid Brownell 
383af517150SDavid Brownell 	cmd.opcode = MMC_SPI_READ_OCR;
384af517150SDavid Brownell 	cmd.arg = highcap ? (1 << 30) : 0;
385af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R3;
386af517150SDavid Brownell 
387af517150SDavid Brownell 	err = mmc_wait_for_cmd(host, &cmd, 0);
388af517150SDavid Brownell 
389af517150SDavid Brownell 	*ocrp = cmd.resp[1];
390af517150SDavid Brownell 	return err;
391af517150SDavid Brownell }
392af517150SDavid Brownell 
393af517150SDavid Brownell int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
394af517150SDavid Brownell {
395c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
396af517150SDavid Brownell 	int err;
397af517150SDavid Brownell 
398af517150SDavid Brownell 	cmd.opcode = MMC_SPI_CRC_ON_OFF;
399af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1;
400af517150SDavid Brownell 	cmd.arg = use_crc;
401af517150SDavid Brownell 
402af517150SDavid Brownell 	err = mmc_wait_for_cmd(host, &cmd, 0);
403af517150SDavid Brownell 	if (!err)
404af517150SDavid Brownell 		host->use_spi_crc = use_crc;
405af517150SDavid Brownell 	return err;
406af517150SDavid Brownell }
407af517150SDavid Brownell 
40820348d19SUlf Hansson static int mmc_switch_status_error(struct mmc_host *host, u32 status)
409ed16f58dSAdrian Hunter {
410ed16f58dSAdrian Hunter 	if (mmc_host_is_spi(host)) {
411ed16f58dSAdrian Hunter 		if (status & R1_SPI_ILLEGAL_COMMAND)
412ed16f58dSAdrian Hunter 			return -EBADMSG;
413ed16f58dSAdrian Hunter 	} else {
414ed16f58dSAdrian Hunter 		if (status & 0xFDFFA000)
415ed16f58dSAdrian Hunter 			pr_warn("%s: unexpected status %#x after switch\n",
416ed16f58dSAdrian Hunter 				mmc_hostname(host), status);
417ed16f58dSAdrian Hunter 		if (status & R1_SWITCH_ERROR)
418ed16f58dSAdrian Hunter 			return -EBADMSG;
419ed16f58dSAdrian Hunter 	}
420ed16f58dSAdrian Hunter 	return 0;
421ed16f58dSAdrian Hunter }
422ed16f58dSAdrian Hunter 
42320348d19SUlf Hansson /* Caller must hold re-tuning */
424ef3d2322SAdrian Hunter int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
42520348d19SUlf Hansson {
42620348d19SUlf Hansson 	u32 status;
42720348d19SUlf Hansson 	int err;
42820348d19SUlf Hansson 
42920348d19SUlf Hansson 	err = mmc_send_status(card, &status);
430ef3d2322SAdrian Hunter 	if (!crc_err_fatal && err == -EILSEQ)
431ef3d2322SAdrian Hunter 		return 0;
43220348d19SUlf Hansson 	if (err)
43320348d19SUlf Hansson 		return err;
43420348d19SUlf Hansson 
43520348d19SUlf Hansson 	return mmc_switch_status_error(card->host, status);
43620348d19SUlf Hansson }
43720348d19SUlf Hansson 
438ef3d2322SAdrian Hunter int mmc_switch_status(struct mmc_card *card)
439ef3d2322SAdrian Hunter {
440ef3d2322SAdrian Hunter 	return __mmc_switch_status(card, true);
441ef3d2322SAdrian Hunter }
442ef3d2322SAdrian Hunter 
443716bdb89SUlf Hansson static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
444625228faSUlf Hansson 			bool send_status, bool retry_crc_err)
445716bdb89SUlf Hansson {
446716bdb89SUlf Hansson 	struct mmc_host *host = card->host;
447716bdb89SUlf Hansson 	int err;
448716bdb89SUlf Hansson 	unsigned long timeout;
449716bdb89SUlf Hansson 	u32 status = 0;
450716bdb89SUlf Hansson 	bool expired = false;
451716bdb89SUlf Hansson 	bool busy = false;
452716bdb89SUlf Hansson 
453716bdb89SUlf Hansson 	/* We have an unspecified cmd timeout, use the fallback value. */
454716bdb89SUlf Hansson 	if (!timeout_ms)
455716bdb89SUlf Hansson 		timeout_ms = MMC_OPS_TIMEOUT_MS;
456716bdb89SUlf Hansson 
457716bdb89SUlf Hansson 	/*
458716bdb89SUlf Hansson 	 * In cases when not allowed to poll by using CMD13 or because we aren't
459716bdb89SUlf Hansson 	 * capable of polling by using ->card_busy(), then rely on waiting the
460716bdb89SUlf Hansson 	 * stated timeout to be sufficient.
461716bdb89SUlf Hansson 	 */
462716bdb89SUlf Hansson 	if (!send_status && !host->ops->card_busy) {
463716bdb89SUlf Hansson 		mmc_delay(timeout_ms);
464716bdb89SUlf Hansson 		return 0;
465716bdb89SUlf Hansson 	}
466716bdb89SUlf Hansson 
467716bdb89SUlf Hansson 	timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
468716bdb89SUlf Hansson 	do {
469716bdb89SUlf Hansson 		/*
47070562644SUlf Hansson 		 * Due to the possibility of being preempted while polling,
47170562644SUlf Hansson 		 * check the expiration time first.
472716bdb89SUlf Hansson 		 */
473716bdb89SUlf Hansson 		expired = time_after(jiffies, timeout);
47470562644SUlf Hansson 
47570562644SUlf Hansson 		if (host->ops->card_busy) {
47670562644SUlf Hansson 			busy = host->ops->card_busy(host);
47770562644SUlf Hansson 		} else {
478437590a1SUlf Hansson 			err = mmc_send_status(card, &status);
4795ec32f84SUlf Hansson 			if (retry_crc_err && err == -EILSEQ) {
480437590a1SUlf Hansson 				busy = true;
4815ec32f84SUlf Hansson 			} else if (err) {
482716bdb89SUlf Hansson 				return err;
4835ec32f84SUlf Hansson 			} else {
4845ec32f84SUlf Hansson 				err = mmc_switch_status_error(host, status);
4855ec32f84SUlf Hansson 				if (err)
4865ec32f84SUlf Hansson 					return err;
48770562644SUlf Hansson 				busy = R1_CURRENT_STATE(status) == R1_STATE_PRG;
488716bdb89SUlf Hansson 			}
4895ec32f84SUlf Hansson 		}
490716bdb89SUlf Hansson 
49170562644SUlf Hansson 		/* Timeout if the device still remains busy. */
49270562644SUlf Hansson 		if (expired && busy) {
49370562644SUlf Hansson 			pr_err("%s: Card stuck being busy! %s\n",
494716bdb89SUlf Hansson 				mmc_hostname(host), __func__);
495716bdb89SUlf Hansson 			return -ETIMEDOUT;
496716bdb89SUlf Hansson 		}
49770562644SUlf Hansson 	} while (busy);
498716bdb89SUlf Hansson 
4995ec32f84SUlf Hansson 	return 0;
500716bdb89SUlf Hansson }
501716bdb89SUlf Hansson 
502d3a8d95dSAndrei Warkentin /**
503950d56acSJaehoon Chung  *	__mmc_switch - modify EXT_CSD register
504d3a8d95dSAndrei Warkentin  *	@card: the MMC card associated with the data transfer
505d3a8d95dSAndrei Warkentin  *	@set: cmd set values
506d3a8d95dSAndrei Warkentin  *	@index: EXT_CSD register index
507d3a8d95dSAndrei Warkentin  *	@value: value to program into EXT_CSD register
508d3a8d95dSAndrei Warkentin  *	@timeout_ms: timeout (ms) for operation performed by register write,
509d3a8d95dSAndrei Warkentin  *                   timeout of zero implies maximum possible timeout
510aa33ce3cSUlf Hansson  *	@timing: new timing to change to
511950d56acSJaehoon Chung  *	@use_busy_signal: use the busy signal as response type
512878e200bSUlf Hansson  *	@send_status: send status cmd to poll for busy
513625228faSUlf Hansson  *	@retry_crc_err: retry when CRC errors when polling with CMD13 for busy
514d3a8d95dSAndrei Warkentin  *
515d3a8d95dSAndrei Warkentin  *	Modifies the EXT_CSD register for selected card.
516d3a8d95dSAndrei Warkentin  */
517950d56acSJaehoon Chung int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
518aa33ce3cSUlf Hansson 		unsigned int timeout_ms, unsigned char timing,
519aa33ce3cSUlf Hansson 		bool use_busy_signal, bool send_status,	bool retry_crc_err)
520da7fbe58SPierre Ossman {
521636bd13cSUlf Hansson 	struct mmc_host *host = card->host;
522da7fbe58SPierre Ossman 	int err;
523c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
524b9ec2616SUlf Hansson 	bool use_r1b_resp = use_busy_signal;
525aa33ce3cSUlf Hansson 	unsigned char old_timing = host->ios.timing;
526b9ec2616SUlf Hansson 
527c6dbab9cSAdrian Hunter 	mmc_retune_hold(host);
528c6dbab9cSAdrian Hunter 
529b9ec2616SUlf Hansson 	/*
530b9ec2616SUlf Hansson 	 * If the cmd timeout and the max_busy_timeout of the host are both
531b9ec2616SUlf Hansson 	 * specified, let's validate them. A failure means we need to prevent
532b9ec2616SUlf Hansson 	 * the host from doing hw busy detection, which is done by converting
533b9ec2616SUlf Hansson 	 * to a R1 response instead of a R1B.
534b9ec2616SUlf Hansson 	 */
535b9ec2616SUlf Hansson 	if (timeout_ms && host->max_busy_timeout &&
536b9ec2616SUlf Hansson 		(timeout_ms > host->max_busy_timeout))
537b9ec2616SUlf Hansson 		use_r1b_resp = false;
538da7fbe58SPierre Ossman 
539da7fbe58SPierre Ossman 	cmd.opcode = MMC_SWITCH;
540da7fbe58SPierre Ossman 	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
541da7fbe58SPierre Ossman 		  (index << 16) |
542da7fbe58SPierre Ossman 		  (value << 8) |
543da7fbe58SPierre Ossman 		  set;
544950d56acSJaehoon Chung 	cmd.flags = MMC_CMD_AC;
545b9ec2616SUlf Hansson 	if (use_r1b_resp) {
546950d56acSJaehoon Chung 		cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
547b9ec2616SUlf Hansson 		/*
548b9ec2616SUlf Hansson 		 * A busy_timeout of zero means the host can decide to use
549b9ec2616SUlf Hansson 		 * whatever value it finds suitable.
550b9ec2616SUlf Hansson 		 */
5511d4d7744SUlf Hansson 		cmd.busy_timeout = timeout_ms;
552b9ec2616SUlf Hansson 	} else {
553b9ec2616SUlf Hansson 		cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
554b9ec2616SUlf Hansson 	}
555b9ec2616SUlf Hansson 
556775a9362SMaya Erez 	if (index == EXT_CSD_SANITIZE_START)
557775a9362SMaya Erez 		cmd.sanitize_busy = true;
558da7fbe58SPierre Ossman 
559636bd13cSUlf Hansson 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
56017b0429dSPierre Ossman 	if (err)
561c6dbab9cSAdrian Hunter 		goto out;
562da7fbe58SPierre Ossman 
563950d56acSJaehoon Chung 	/* No need to check card status in case of unblocking command */
564950d56acSJaehoon Chung 	if (!use_busy_signal)
565c6dbab9cSAdrian Hunter 		goto out;
566950d56acSJaehoon Chung 
567cb26ce06SUlf Hansson 	/*If SPI or used HW busy detection above, then we don't need to poll. */
568cb26ce06SUlf Hansson 	if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
569ee6ff743SUlf Hansson 		mmc_host_is_spi(host))
570aa33ce3cSUlf Hansson 		goto out_tim;
571a27fbf2fSSeungwon Jeon 
572716bdb89SUlf Hansson 	/* Let's try to poll to find out when the command is completed. */
573625228faSUlf Hansson 	err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err);
574ee6ff743SUlf Hansson 	if (err)
575ee6ff743SUlf Hansson 		goto out;
576aa33ce3cSUlf Hansson 
577aa33ce3cSUlf Hansson out_tim:
578ee6ff743SUlf Hansson 	/* Switch to new timing before check switch status. */
579ee6ff743SUlf Hansson 	if (timing)
580ee6ff743SUlf Hansson 		mmc_set_timing(host, timing);
581ee6ff743SUlf Hansson 
582ee6ff743SUlf Hansson 	if (send_status) {
583ee6ff743SUlf Hansson 		err = mmc_switch_status(card);
584aa33ce3cSUlf Hansson 		if (err && timing)
585aa33ce3cSUlf Hansson 			mmc_set_timing(host, old_timing);
586ee6ff743SUlf Hansson 	}
587c6dbab9cSAdrian Hunter out:
588c6dbab9cSAdrian Hunter 	mmc_retune_release(host);
589ef0b27d4SAdrian Hunter 
590c6dbab9cSAdrian Hunter 	return err;
591da7fbe58SPierre Ossman }
592950d56acSJaehoon Chung 
593950d56acSJaehoon Chung int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
594950d56acSJaehoon Chung 		unsigned int timeout_ms)
595950d56acSJaehoon Chung {
596aa33ce3cSUlf Hansson 	return __mmc_switch(card, set, index, value, timeout_ms, 0,
597aa33ce3cSUlf Hansson 			true, true, false);
598950d56acSJaehoon Chung }
599d3a8d95dSAndrei Warkentin EXPORT_SYMBOL_GPL(mmc_switch);
600da7fbe58SPierre Ossman 
6019979dbe5SChaotian Jing int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
602996903deSMinda Chen {
603c7836d15SMasahiro Yamada 	struct mmc_request mrq = {};
604c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
605c7836d15SMasahiro Yamada 	struct mmc_data data = {};
606996903deSMinda Chen 	struct scatterlist sg;
607fe5afb13SUlf Hansson 	struct mmc_ios *ios = &host->ios;
608996903deSMinda Chen 	const u8 *tuning_block_pattern;
609996903deSMinda Chen 	int size, err = 0;
610996903deSMinda Chen 	u8 *data_buf;
611996903deSMinda Chen 
612996903deSMinda Chen 	if (ios->bus_width == MMC_BUS_WIDTH_8) {
613996903deSMinda Chen 		tuning_block_pattern = tuning_blk_pattern_8bit;
614996903deSMinda Chen 		size = sizeof(tuning_blk_pattern_8bit);
615996903deSMinda Chen 	} else if (ios->bus_width == MMC_BUS_WIDTH_4) {
616996903deSMinda Chen 		tuning_block_pattern = tuning_blk_pattern_4bit;
617996903deSMinda Chen 		size = sizeof(tuning_blk_pattern_4bit);
618996903deSMinda Chen 	} else
619996903deSMinda Chen 		return -EINVAL;
620996903deSMinda Chen 
621996903deSMinda Chen 	data_buf = kzalloc(size, GFP_KERNEL);
622996903deSMinda Chen 	if (!data_buf)
623996903deSMinda Chen 		return -ENOMEM;
624996903deSMinda Chen 
625996903deSMinda Chen 	mrq.cmd = &cmd;
626996903deSMinda Chen 	mrq.data = &data;
627996903deSMinda Chen 
628996903deSMinda Chen 	cmd.opcode = opcode;
629996903deSMinda Chen 	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
630996903deSMinda Chen 
631996903deSMinda Chen 	data.blksz = size;
632996903deSMinda Chen 	data.blocks = 1;
633996903deSMinda Chen 	data.flags = MMC_DATA_READ;
634996903deSMinda Chen 
635996903deSMinda Chen 	/*
636996903deSMinda Chen 	 * According to the tuning specs, Tuning process
637996903deSMinda Chen 	 * is normally shorter 40 executions of CMD19,
638996903deSMinda Chen 	 * and timeout value should be shorter than 150 ms
639996903deSMinda Chen 	 */
640996903deSMinda Chen 	data.timeout_ns = 150 * NSEC_PER_MSEC;
641996903deSMinda Chen 
642996903deSMinda Chen 	data.sg = &sg;
643996903deSMinda Chen 	data.sg_len = 1;
644996903deSMinda Chen 	sg_init_one(&sg, data_buf, size);
645996903deSMinda Chen 
646fe5afb13SUlf Hansson 	mmc_wait_for_req(host, &mrq);
647996903deSMinda Chen 
6489979dbe5SChaotian Jing 	if (cmd_error)
6499979dbe5SChaotian Jing 		*cmd_error = cmd.error;
6509979dbe5SChaotian Jing 
651996903deSMinda Chen 	if (cmd.error) {
652996903deSMinda Chen 		err = cmd.error;
653996903deSMinda Chen 		goto out;
654996903deSMinda Chen 	}
655996903deSMinda Chen 
656996903deSMinda Chen 	if (data.error) {
657996903deSMinda Chen 		err = data.error;
658996903deSMinda Chen 		goto out;
659996903deSMinda Chen 	}
660996903deSMinda Chen 
661996903deSMinda Chen 	if (memcmp(data_buf, tuning_block_pattern, size))
662996903deSMinda Chen 		err = -EIO;
663996903deSMinda Chen 
664996903deSMinda Chen out:
665996903deSMinda Chen 	kfree(data_buf);
666996903deSMinda Chen 	return err;
667996903deSMinda Chen }
668996903deSMinda Chen EXPORT_SYMBOL_GPL(mmc_send_tuning);
669996903deSMinda Chen 
670e711f030SAdrian Hunter int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
671e711f030SAdrian Hunter {
672c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
673e711f030SAdrian Hunter 
674e711f030SAdrian Hunter 	/*
675e711f030SAdrian Hunter 	 * eMMC specification specifies that CMD12 can be used to stop a tuning
676e711f030SAdrian Hunter 	 * command, but SD specification does not, so do nothing unless it is
677e711f030SAdrian Hunter 	 * eMMC.
678e711f030SAdrian Hunter 	 */
679e711f030SAdrian Hunter 	if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
680e711f030SAdrian Hunter 		return 0;
681e711f030SAdrian Hunter 
682e711f030SAdrian Hunter 	cmd.opcode = MMC_STOP_TRANSMISSION;
683e711f030SAdrian Hunter 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
684e711f030SAdrian Hunter 
685e711f030SAdrian Hunter 	/*
686e711f030SAdrian Hunter 	 * For drivers that override R1 to R1b, set an arbitrary timeout based
687e711f030SAdrian Hunter 	 * on the tuning timeout i.e. 150ms.
688e711f030SAdrian Hunter 	 */
689e711f030SAdrian Hunter 	cmd.busy_timeout = 150;
690e711f030SAdrian Hunter 
691e711f030SAdrian Hunter 	return mmc_wait_for_cmd(host, &cmd, 0);
692e711f030SAdrian Hunter }
693e711f030SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_abort_tuning);
694e711f030SAdrian Hunter 
69522113efdSAries Lee static int
69622113efdSAries Lee mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
69722113efdSAries Lee 		  u8 len)
69822113efdSAries Lee {
699c7836d15SMasahiro Yamada 	struct mmc_request mrq = {};
700c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
701c7836d15SMasahiro Yamada 	struct mmc_data data = {};
70222113efdSAries Lee 	struct scatterlist sg;
70322113efdSAries Lee 	u8 *data_buf;
70422113efdSAries Lee 	u8 *test_buf;
70522113efdSAries Lee 	int i, err;
70622113efdSAries Lee 	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
70722113efdSAries Lee 	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
70822113efdSAries Lee 
70922113efdSAries Lee 	/* dma onto stack is unsafe/nonportable, but callers to this
71022113efdSAries Lee 	 * routine normally provide temporary on-stack buffers ...
71122113efdSAries Lee 	 */
71222113efdSAries Lee 	data_buf = kmalloc(len, GFP_KERNEL);
71322113efdSAries Lee 	if (!data_buf)
71422113efdSAries Lee 		return -ENOMEM;
71522113efdSAries Lee 
71622113efdSAries Lee 	if (len == 8)
71722113efdSAries Lee 		test_buf = testdata_8bit;
71822113efdSAries Lee 	else if (len == 4)
71922113efdSAries Lee 		test_buf = testdata_4bit;
72022113efdSAries Lee 	else {
721a3c76eb9SGirish K S 		pr_err("%s: Invalid bus_width %d\n",
72222113efdSAries Lee 		       mmc_hostname(host), len);
72322113efdSAries Lee 		kfree(data_buf);
72422113efdSAries Lee 		return -EINVAL;
72522113efdSAries Lee 	}
72622113efdSAries Lee 
72722113efdSAries Lee 	if (opcode == MMC_BUS_TEST_W)
72822113efdSAries Lee 		memcpy(data_buf, test_buf, len);
72922113efdSAries Lee 
73022113efdSAries Lee 	mrq.cmd = &cmd;
73122113efdSAries Lee 	mrq.data = &data;
73222113efdSAries Lee 	cmd.opcode = opcode;
73322113efdSAries Lee 	cmd.arg = 0;
73422113efdSAries Lee 
73522113efdSAries Lee 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
73622113efdSAries Lee 	 * rely on callers to never use this with "native" calls for reading
73722113efdSAries Lee 	 * CSD or CID.  Native versions of those commands use the R2 type,
73822113efdSAries Lee 	 * not R1 plus a data block.
73922113efdSAries Lee 	 */
74022113efdSAries Lee 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
74122113efdSAries Lee 
74222113efdSAries Lee 	data.blksz = len;
74322113efdSAries Lee 	data.blocks = 1;
74422113efdSAries Lee 	if (opcode == MMC_BUS_TEST_R)
74522113efdSAries Lee 		data.flags = MMC_DATA_READ;
74622113efdSAries Lee 	else
74722113efdSAries Lee 		data.flags = MMC_DATA_WRITE;
74822113efdSAries Lee 
74922113efdSAries Lee 	data.sg = &sg;
75022113efdSAries Lee 	data.sg_len = 1;
75184532e33SMinjian Wu 	mmc_set_data_timeout(&data, card);
75222113efdSAries Lee 	sg_init_one(&sg, data_buf, len);
75322113efdSAries Lee 	mmc_wait_for_req(host, &mrq);
75422113efdSAries Lee 	err = 0;
75522113efdSAries Lee 	if (opcode == MMC_BUS_TEST_R) {
75622113efdSAries Lee 		for (i = 0; i < len / 4; i++)
75722113efdSAries Lee 			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
75822113efdSAries Lee 				err = -EIO;
75922113efdSAries Lee 				break;
76022113efdSAries Lee 			}
76122113efdSAries Lee 	}
76222113efdSAries Lee 	kfree(data_buf);
76322113efdSAries Lee 
76422113efdSAries Lee 	if (cmd.error)
76522113efdSAries Lee 		return cmd.error;
76622113efdSAries Lee 	if (data.error)
76722113efdSAries Lee 		return data.error;
76822113efdSAries Lee 
76922113efdSAries Lee 	return err;
77022113efdSAries Lee }
77122113efdSAries Lee 
77222113efdSAries Lee int mmc_bus_test(struct mmc_card *card, u8 bus_width)
77322113efdSAries Lee {
7740899e741SMasahiro Yamada 	int width;
77522113efdSAries Lee 
77622113efdSAries Lee 	if (bus_width == MMC_BUS_WIDTH_8)
77722113efdSAries Lee 		width = 8;
77822113efdSAries Lee 	else if (bus_width == MMC_BUS_WIDTH_4)
77922113efdSAries Lee 		width = 4;
78022113efdSAries Lee 	else if (bus_width == MMC_BUS_WIDTH_1)
78122113efdSAries Lee 		return 0; /* no need for test */
78222113efdSAries Lee 	else
78322113efdSAries Lee 		return -EINVAL;
78422113efdSAries Lee 
78522113efdSAries Lee 	/*
78622113efdSAries Lee 	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
78722113efdSAries Lee 	 * is a problem.  This improves chances that the test will work.
78822113efdSAries Lee 	 */
78922113efdSAries Lee 	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
7900899e741SMasahiro Yamada 	return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
79122113efdSAries Lee }
792eb0d8f13SJaehoon Chung 
7930f2c0512SUlf Hansson static int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
794eb0d8f13SJaehoon Chung {
795c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
796eb0d8f13SJaehoon Chung 	unsigned int opcode;
797eb0d8f13SJaehoon Chung 	int err;
798eb0d8f13SJaehoon Chung 
7992378975bSJaehoon Chung 	if (!card->ext_csd.hpi) {
8006606110dSJoe Perches 		pr_warn("%s: Card didn't support HPI command\n",
8012378975bSJaehoon Chung 			mmc_hostname(card->host));
8022378975bSJaehoon Chung 		return -EINVAL;
8032378975bSJaehoon Chung 	}
8042378975bSJaehoon Chung 
805eb0d8f13SJaehoon Chung 	opcode = card->ext_csd.hpi_cmd;
806eb0d8f13SJaehoon Chung 	if (opcode == MMC_STOP_TRANSMISSION)
8072378975bSJaehoon Chung 		cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
808eb0d8f13SJaehoon Chung 	else if (opcode == MMC_SEND_STATUS)
8092378975bSJaehoon Chung 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
810eb0d8f13SJaehoon Chung 
811eb0d8f13SJaehoon Chung 	cmd.opcode = opcode;
812eb0d8f13SJaehoon Chung 	cmd.arg = card->rca << 16 | 1;
813eb0d8f13SJaehoon Chung 
814eb0d8f13SJaehoon Chung 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
815eb0d8f13SJaehoon Chung 	if (err) {
816eb0d8f13SJaehoon Chung 		pr_warn("%s: error %d interrupting operation. "
817eb0d8f13SJaehoon Chung 			"HPI command response %#x\n", mmc_hostname(card->host),
818eb0d8f13SJaehoon Chung 			err, cmd.resp[0]);
819eb0d8f13SJaehoon Chung 		return err;
820eb0d8f13SJaehoon Chung 	}
821eb0d8f13SJaehoon Chung 	if (status)
822eb0d8f13SJaehoon Chung 		*status = cmd.resp[0];
823eb0d8f13SJaehoon Chung 
824eb0d8f13SJaehoon Chung 	return 0;
825eb0d8f13SJaehoon Chung }
826148bcab2SUlf Hansson 
8270f2c0512SUlf Hansson /**
8280f2c0512SUlf Hansson  *	mmc_interrupt_hpi - Issue for High priority Interrupt
8290f2c0512SUlf Hansson  *	@card: the MMC card associated with the HPI transfer
8300f2c0512SUlf Hansson  *
8310f2c0512SUlf Hansson  *	Issued High Priority Interrupt, and check for card status
8320f2c0512SUlf Hansson  *	until out-of prg-state.
8330f2c0512SUlf Hansson  */
8340f2c0512SUlf Hansson int mmc_interrupt_hpi(struct mmc_card *card)
8350f2c0512SUlf Hansson {
8360f2c0512SUlf Hansson 	int err;
8370f2c0512SUlf Hansson 	u32 status;
8380f2c0512SUlf Hansson 	unsigned long prg_wait;
8390f2c0512SUlf Hansson 
8400f2c0512SUlf Hansson 	if (!card->ext_csd.hpi_en) {
8410f2c0512SUlf Hansson 		pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
8420f2c0512SUlf Hansson 		return 1;
8430f2c0512SUlf Hansson 	}
8440f2c0512SUlf Hansson 
8450f2c0512SUlf Hansson 	mmc_claim_host(card->host);
8460f2c0512SUlf Hansson 	err = mmc_send_status(card, &status);
8470f2c0512SUlf Hansson 	if (err) {
8480f2c0512SUlf Hansson 		pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
8490f2c0512SUlf Hansson 		goto out;
8500f2c0512SUlf Hansson 	}
8510f2c0512SUlf Hansson 
8520f2c0512SUlf Hansson 	switch (R1_CURRENT_STATE(status)) {
8530f2c0512SUlf Hansson 	case R1_STATE_IDLE:
8540f2c0512SUlf Hansson 	case R1_STATE_READY:
8550f2c0512SUlf Hansson 	case R1_STATE_STBY:
8560f2c0512SUlf Hansson 	case R1_STATE_TRAN:
8570f2c0512SUlf Hansson 		/*
8580f2c0512SUlf Hansson 		 * In idle and transfer states, HPI is not needed and the caller
8590f2c0512SUlf Hansson 		 * can issue the next intended command immediately
8600f2c0512SUlf Hansson 		 */
8610f2c0512SUlf Hansson 		goto out;
8620f2c0512SUlf Hansson 	case R1_STATE_PRG:
8630f2c0512SUlf Hansson 		break;
8640f2c0512SUlf Hansson 	default:
8650f2c0512SUlf Hansson 		/* In all other states, it's illegal to issue HPI */
8660f2c0512SUlf Hansson 		pr_debug("%s: HPI cannot be sent. Card state=%d\n",
8670f2c0512SUlf Hansson 			mmc_hostname(card->host), R1_CURRENT_STATE(status));
8680f2c0512SUlf Hansson 		err = -EINVAL;
8690f2c0512SUlf Hansson 		goto out;
8700f2c0512SUlf Hansson 	}
8710f2c0512SUlf Hansson 
8720f2c0512SUlf Hansson 	err = mmc_send_hpi_cmd(card, &status);
8730f2c0512SUlf Hansson 	if (err)
8740f2c0512SUlf Hansson 		goto out;
8750f2c0512SUlf Hansson 
8760f2c0512SUlf Hansson 	prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
8770f2c0512SUlf Hansson 	do {
8780f2c0512SUlf Hansson 		err = mmc_send_status(card, &status);
8790f2c0512SUlf Hansson 
8800f2c0512SUlf Hansson 		if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
8810f2c0512SUlf Hansson 			break;
8820f2c0512SUlf Hansson 		if (time_after(jiffies, prg_wait))
8830f2c0512SUlf Hansson 			err = -ETIMEDOUT;
8840f2c0512SUlf Hansson 	} while (!err);
8850f2c0512SUlf Hansson 
8860f2c0512SUlf Hansson out:
8870f2c0512SUlf Hansson 	mmc_release_host(card->host);
8880f2c0512SUlf Hansson 	return err;
8890f2c0512SUlf Hansson }
8900f2c0512SUlf Hansson 
891148bcab2SUlf Hansson int mmc_can_ext_csd(struct mmc_card *card)
892148bcab2SUlf Hansson {
893148bcab2SUlf Hansson 	return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
894148bcab2SUlf Hansson }
895b658af71SAdrian Hunter 
8961cf8f7e5SUlf Hansson /**
8971cf8f7e5SUlf Hansson  *	mmc_stop_bkops - stop ongoing BKOPS
8981cf8f7e5SUlf Hansson  *	@card: MMC card to check BKOPS
8991cf8f7e5SUlf Hansson  *
9001cf8f7e5SUlf Hansson  *	Send HPI command to stop ongoing background operations to
9011cf8f7e5SUlf Hansson  *	allow rapid servicing of foreground operations, e.g. read/
9021cf8f7e5SUlf Hansson  *	writes. Wait until the card comes out of the programming state
9031cf8f7e5SUlf Hansson  *	to avoid errors in servicing read/write requests.
9041cf8f7e5SUlf Hansson  */
9051cf8f7e5SUlf Hansson int mmc_stop_bkops(struct mmc_card *card)
9061cf8f7e5SUlf Hansson {
9071cf8f7e5SUlf Hansson 	int err = 0;
9081cf8f7e5SUlf Hansson 
9091cf8f7e5SUlf Hansson 	err = mmc_interrupt_hpi(card);
9101cf8f7e5SUlf Hansson 
9111cf8f7e5SUlf Hansson 	/*
9121cf8f7e5SUlf Hansson 	 * If err is EINVAL, we can't issue an HPI.
9131cf8f7e5SUlf Hansson 	 * It should complete the BKOPS.
9141cf8f7e5SUlf Hansson 	 */
9151cf8f7e5SUlf Hansson 	if (!err || (err == -EINVAL)) {
9161cf8f7e5SUlf Hansson 		mmc_card_clr_doing_bkops(card);
9171cf8f7e5SUlf Hansson 		mmc_retune_release(card->host);
9181cf8f7e5SUlf Hansson 		err = 0;
9191cf8f7e5SUlf Hansson 	}
9201cf8f7e5SUlf Hansson 
9211cf8f7e5SUlf Hansson 	return err;
9221cf8f7e5SUlf Hansson }
9231cf8f7e5SUlf Hansson 
9241cf8f7e5SUlf Hansson static int mmc_read_bkops_status(struct mmc_card *card)
9251cf8f7e5SUlf Hansson {
9261cf8f7e5SUlf Hansson 	int err;
9271cf8f7e5SUlf Hansson 	u8 *ext_csd;
9281cf8f7e5SUlf Hansson 
9291cf8f7e5SUlf Hansson 	mmc_claim_host(card->host);
9301cf8f7e5SUlf Hansson 	err = mmc_get_ext_csd(card, &ext_csd);
9311cf8f7e5SUlf Hansson 	mmc_release_host(card->host);
9321cf8f7e5SUlf Hansson 	if (err)
9331cf8f7e5SUlf Hansson 		return err;
9341cf8f7e5SUlf Hansson 
9351cf8f7e5SUlf Hansson 	card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
9361cf8f7e5SUlf Hansson 	card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
9371cf8f7e5SUlf Hansson 	kfree(ext_csd);
9381cf8f7e5SUlf Hansson 	return 0;
9391cf8f7e5SUlf Hansson }
9401cf8f7e5SUlf Hansson 
9411cf8f7e5SUlf Hansson /**
9421cf8f7e5SUlf Hansson  *	mmc_start_bkops - start BKOPS for supported cards
9431cf8f7e5SUlf Hansson  *	@card: MMC card to start BKOPS
9441cf8f7e5SUlf Hansson  *	@form_exception: A flag to indicate if this function was
9451cf8f7e5SUlf Hansson  *			 called due to an exception raised by the card
9461cf8f7e5SUlf Hansson  *
9471cf8f7e5SUlf Hansson  *	Start background operations whenever requested.
9481cf8f7e5SUlf Hansson  *	When the urgent BKOPS bit is set in a R1 command response
9491cf8f7e5SUlf Hansson  *	then background operations should be started immediately.
9501cf8f7e5SUlf Hansson */
9511cf8f7e5SUlf Hansson void mmc_start_bkops(struct mmc_card *card, bool from_exception)
9521cf8f7e5SUlf Hansson {
9531cf8f7e5SUlf Hansson 	int err;
9541cf8f7e5SUlf Hansson 	int timeout;
9551cf8f7e5SUlf Hansson 	bool use_busy_signal;
9561cf8f7e5SUlf Hansson 
9571cf8f7e5SUlf Hansson 	if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
9581cf8f7e5SUlf Hansson 		return;
9591cf8f7e5SUlf Hansson 
9601cf8f7e5SUlf Hansson 	err = mmc_read_bkops_status(card);
9611cf8f7e5SUlf Hansson 	if (err) {
9621cf8f7e5SUlf Hansson 		pr_err("%s: Failed to read bkops status: %d\n",
9631cf8f7e5SUlf Hansson 		       mmc_hostname(card->host), err);
9641cf8f7e5SUlf Hansson 		return;
9651cf8f7e5SUlf Hansson 	}
9661cf8f7e5SUlf Hansson 
9671cf8f7e5SUlf Hansson 	if (!card->ext_csd.raw_bkops_status)
9681cf8f7e5SUlf Hansson 		return;
9691cf8f7e5SUlf Hansson 
9701cf8f7e5SUlf Hansson 	if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
9711cf8f7e5SUlf Hansson 	    from_exception)
9721cf8f7e5SUlf Hansson 		return;
9731cf8f7e5SUlf Hansson 
9741cf8f7e5SUlf Hansson 	mmc_claim_host(card->host);
9751cf8f7e5SUlf Hansson 	if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
9761cf8f7e5SUlf Hansson 		timeout = MMC_OPS_TIMEOUT_MS;
9771cf8f7e5SUlf Hansson 		use_busy_signal = true;
9781cf8f7e5SUlf Hansson 	} else {
9791cf8f7e5SUlf Hansson 		timeout = 0;
9801cf8f7e5SUlf Hansson 		use_busy_signal = false;
9811cf8f7e5SUlf Hansson 	}
9821cf8f7e5SUlf Hansson 
9831cf8f7e5SUlf Hansson 	mmc_retune_hold(card->host);
9841cf8f7e5SUlf Hansson 
9851cf8f7e5SUlf Hansson 	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
9861cf8f7e5SUlf Hansson 			EXT_CSD_BKOPS_START, 1, timeout, 0,
9871cf8f7e5SUlf Hansson 			use_busy_signal, true, false);
9881cf8f7e5SUlf Hansson 	if (err) {
9891cf8f7e5SUlf Hansson 		pr_warn("%s: Error %d starting bkops\n",
9901cf8f7e5SUlf Hansson 			mmc_hostname(card->host), err);
9911cf8f7e5SUlf Hansson 		mmc_retune_release(card->host);
9921cf8f7e5SUlf Hansson 		goto out;
9931cf8f7e5SUlf Hansson 	}
9941cf8f7e5SUlf Hansson 
9951cf8f7e5SUlf Hansson 	/*
9961cf8f7e5SUlf Hansson 	 * For urgent bkops status (LEVEL_2 and more)
9971cf8f7e5SUlf Hansson 	 * bkops executed synchronously, otherwise
9981cf8f7e5SUlf Hansson 	 * the operation is in progress
9991cf8f7e5SUlf Hansson 	 */
10001cf8f7e5SUlf Hansson 	if (!use_busy_signal)
10011cf8f7e5SUlf Hansson 		mmc_card_set_doing_bkops(card);
10021cf8f7e5SUlf Hansson 	else
10031cf8f7e5SUlf Hansson 		mmc_retune_release(card->host);
10041cf8f7e5SUlf Hansson out:
10051cf8f7e5SUlf Hansson 	mmc_release_host(card->host);
10061cf8f7e5SUlf Hansson }
10071cf8f7e5SUlf Hansson 
1008d9df1737SUlf Hansson /*
1009d9df1737SUlf Hansson  * Flush the cache to the non-volatile storage.
1010d9df1737SUlf Hansson  */
1011d9df1737SUlf Hansson int mmc_flush_cache(struct mmc_card *card)
1012d9df1737SUlf Hansson {
1013d9df1737SUlf Hansson 	int err = 0;
1014d9df1737SUlf Hansson 
1015d9df1737SUlf Hansson 	if (mmc_card_mmc(card) &&
1016d9df1737SUlf Hansson 			(card->ext_csd.cache_size > 0) &&
1017d9df1737SUlf Hansson 			(card->ext_csd.cache_ctrl & 1)) {
1018d9df1737SUlf Hansson 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1019d9df1737SUlf Hansson 				EXT_CSD_FLUSH_CACHE, 1, 0);
1020d9df1737SUlf Hansson 		if (err)
1021d9df1737SUlf Hansson 			pr_err("%s: cache flush error %d\n",
1022d9df1737SUlf Hansson 					mmc_hostname(card->host), err);
1023d9df1737SUlf Hansson 	}
1024d9df1737SUlf Hansson 
1025d9df1737SUlf Hansson 	return err;
1026d9df1737SUlf Hansson }
1027d9df1737SUlf Hansson EXPORT_SYMBOL(mmc_flush_cache);
1028d9df1737SUlf Hansson 
1029b658af71SAdrian Hunter static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
1030b658af71SAdrian Hunter {
1031b658af71SAdrian Hunter 	u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
1032b658af71SAdrian Hunter 	int err;
1033b658af71SAdrian Hunter 
1034b658af71SAdrian Hunter 	if (!card->ext_csd.cmdq_support)
1035b658af71SAdrian Hunter 		return -EOPNOTSUPP;
1036b658af71SAdrian Hunter 
1037b658af71SAdrian Hunter 	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
1038b658af71SAdrian Hunter 			 val, card->ext_csd.generic_cmd6_time);
1039b658af71SAdrian Hunter 	if (!err)
1040b658af71SAdrian Hunter 		card->ext_csd.cmdq_en = enable;
1041b658af71SAdrian Hunter 
1042b658af71SAdrian Hunter 	return err;
1043b658af71SAdrian Hunter }
1044b658af71SAdrian Hunter 
1045b658af71SAdrian Hunter int mmc_cmdq_enable(struct mmc_card *card)
1046b658af71SAdrian Hunter {
1047b658af71SAdrian Hunter 	return mmc_cmdq_switch(card, true);
1048b658af71SAdrian Hunter }
1049b658af71SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
1050b658af71SAdrian Hunter 
1051b658af71SAdrian Hunter int mmc_cmdq_disable(struct mmc_card *card)
1052b658af71SAdrian Hunter {
1053b658af71SAdrian Hunter 	return mmc_cmdq_switch(card, false);
1054b658af71SAdrian Hunter }
1055b658af71SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_cmdq_disable);
1056