xref: /openbmc/linux/drivers/mmc/core/mmc_ops.c (revision 0796e439)
1da7fbe58SPierre Ossman /*
270f10482SPierre Ossman  *  linux/drivers/mmc/core/mmc_ops.h
3da7fbe58SPierre Ossman  *
4da7fbe58SPierre Ossman  *  Copyright 2006-2007 Pierre Ossman
5da7fbe58SPierre Ossman  *
6da7fbe58SPierre Ossman  * This program is free software; you can redistribute it and/or modify
7da7fbe58SPierre Ossman  * it under the terms of the GNU General Public License as published by
8da7fbe58SPierre Ossman  * the Free Software Foundation; either version 2 of the License, or (at
9da7fbe58SPierre Ossman  * your option) any later version.
10da7fbe58SPierre Ossman  */
11da7fbe58SPierre Ossman 
125a0e3ad6STejun Heo #include <linux/slab.h>
133ef77af1SPaul Gortmaker #include <linux/export.h>
14da7fbe58SPierre Ossman #include <linux/types.h>
15da7fbe58SPierre Ossman #include <linux/scatterlist.h>
16da7fbe58SPierre Ossman 
17da7fbe58SPierre Ossman #include <linux/mmc/host.h>
18da7fbe58SPierre Ossman #include <linux/mmc/card.h>
19da7fbe58SPierre Ossman #include <linux/mmc/mmc.h>
20da7fbe58SPierre Ossman 
21da7fbe58SPierre Ossman #include "core.h"
221cf8f7e5SUlf Hansson #include "card.h"
23c6dbab9cSAdrian Hunter #include "host.h"
24da7fbe58SPierre Ossman #include "mmc_ops.h"
25da7fbe58SPierre Ossman 
268fee476bSTrey Ramsay #define MMC_OPS_TIMEOUT_MS	(10 * 60 * 1000) /* 10 minute timeout */
278fee476bSTrey Ramsay 
2804cdbbfaSUlf Hansson static const u8 tuning_blk_pattern_4bit[] = {
2904cdbbfaSUlf Hansson 	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
3004cdbbfaSUlf Hansson 	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
3104cdbbfaSUlf Hansson 	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
3204cdbbfaSUlf Hansson 	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
3304cdbbfaSUlf Hansson 	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
3404cdbbfaSUlf Hansson 	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
3504cdbbfaSUlf Hansson 	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
3604cdbbfaSUlf Hansson 	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
3704cdbbfaSUlf Hansson };
3804cdbbfaSUlf Hansson 
3904cdbbfaSUlf Hansson static const u8 tuning_blk_pattern_8bit[] = {
4004cdbbfaSUlf Hansson 	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
4104cdbbfaSUlf Hansson 	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
4204cdbbfaSUlf Hansson 	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
4304cdbbfaSUlf Hansson 	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
4404cdbbfaSUlf Hansson 	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
4504cdbbfaSUlf Hansson 	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
4604cdbbfaSUlf Hansson 	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
4704cdbbfaSUlf Hansson 	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
4804cdbbfaSUlf Hansson 	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
4904cdbbfaSUlf Hansson 	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
5004cdbbfaSUlf Hansson 	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
5104cdbbfaSUlf Hansson 	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
5204cdbbfaSUlf Hansson 	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
5304cdbbfaSUlf Hansson 	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
5404cdbbfaSUlf Hansson 	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
5504cdbbfaSUlf Hansson 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
5604cdbbfaSUlf Hansson };
5704cdbbfaSUlf Hansson 
582185bc2cSUlf Hansson int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
59a27fbf2fSSeungwon Jeon {
60a27fbf2fSSeungwon Jeon 	int err;
61c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
62a27fbf2fSSeungwon Jeon 
63a27fbf2fSSeungwon Jeon 	cmd.opcode = MMC_SEND_STATUS;
64a27fbf2fSSeungwon Jeon 	if (!mmc_host_is_spi(card->host))
65a27fbf2fSSeungwon Jeon 		cmd.arg = card->rca << 16;
66a27fbf2fSSeungwon Jeon 	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
67a27fbf2fSSeungwon Jeon 
682185bc2cSUlf Hansson 	err = mmc_wait_for_cmd(card->host, &cmd, retries);
69a27fbf2fSSeungwon Jeon 	if (err)
70a27fbf2fSSeungwon Jeon 		return err;
71a27fbf2fSSeungwon Jeon 
72a27fbf2fSSeungwon Jeon 	/* NOTE: callers are required to understand the difference
73a27fbf2fSSeungwon Jeon 	 * between "native" and SPI format status words!
74a27fbf2fSSeungwon Jeon 	 */
75a27fbf2fSSeungwon Jeon 	if (status)
76a27fbf2fSSeungwon Jeon 		*status = cmd.resp[0];
77a27fbf2fSSeungwon Jeon 
78a27fbf2fSSeungwon Jeon 	return 0;
79a27fbf2fSSeungwon Jeon }
802185bc2cSUlf Hansson EXPORT_SYMBOL_GPL(__mmc_send_status);
812185bc2cSUlf Hansson 
822185bc2cSUlf Hansson int mmc_send_status(struct mmc_card *card, u32 *status)
832185bc2cSUlf Hansson {
842185bc2cSUlf Hansson 	return __mmc_send_status(card, status, MMC_CMD_RETRIES);
852185bc2cSUlf Hansson }
86a27fbf2fSSeungwon Jeon 
87da7fbe58SPierre Ossman static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
88da7fbe58SPierre Ossman {
89c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
90da7fbe58SPierre Ossman 
91da7fbe58SPierre Ossman 	cmd.opcode = MMC_SELECT_CARD;
92da7fbe58SPierre Ossman 
93da7fbe58SPierre Ossman 	if (card) {
94da7fbe58SPierre Ossman 		cmd.arg = card->rca << 16;
95da7fbe58SPierre Ossman 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
96da7fbe58SPierre Ossman 	} else {
97da7fbe58SPierre Ossman 		cmd.arg = 0;
98da7fbe58SPierre Ossman 		cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
99da7fbe58SPierre Ossman 	}
100da7fbe58SPierre Ossman 
1010899e741SMasahiro Yamada 	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
102da7fbe58SPierre Ossman }
103da7fbe58SPierre Ossman 
104da7fbe58SPierre Ossman int mmc_select_card(struct mmc_card *card)
105da7fbe58SPierre Ossman {
106da7fbe58SPierre Ossman 
107da7fbe58SPierre Ossman 	return _mmc_select_card(card->host, card);
108da7fbe58SPierre Ossman }
109da7fbe58SPierre Ossman 
110da7fbe58SPierre Ossman int mmc_deselect_cards(struct mmc_host *host)
111da7fbe58SPierre Ossman {
112da7fbe58SPierre Ossman 	return _mmc_select_card(host, NULL);
113da7fbe58SPierre Ossman }
114da7fbe58SPierre Ossman 
1153d705d14SSascha Hauer /*
1163d705d14SSascha Hauer  * Write the value specified in the device tree or board code into the optional
1173d705d14SSascha Hauer  * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
1183d705d14SSascha Hauer  * drive strength of the DAT and CMD outputs. The actual meaning of a given
1193d705d14SSascha Hauer  * value is hardware dependant.
1203d705d14SSascha Hauer  * The presence of the DSR register can be determined from the CSD register,
1213d705d14SSascha Hauer  * bit 76.
1223d705d14SSascha Hauer  */
1233d705d14SSascha Hauer int mmc_set_dsr(struct mmc_host *host)
1243d705d14SSascha Hauer {
125c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
1263d705d14SSascha Hauer 
1273d705d14SSascha Hauer 	cmd.opcode = MMC_SET_DSR;
1283d705d14SSascha Hauer 
1293d705d14SSascha Hauer 	cmd.arg = (host->dsr << 16) | 0xffff;
1303d705d14SSascha Hauer 	cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
1313d705d14SSascha Hauer 
1323d705d14SSascha Hauer 	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
1333d705d14SSascha Hauer }
1343d705d14SSascha Hauer 
135da7fbe58SPierre Ossman int mmc_go_idle(struct mmc_host *host)
136da7fbe58SPierre Ossman {
137da7fbe58SPierre Ossman 	int err;
138c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
139da7fbe58SPierre Ossman 
140af517150SDavid Brownell 	/*
141af517150SDavid Brownell 	 * Non-SPI hosts need to prevent chipselect going active during
142af517150SDavid Brownell 	 * GO_IDLE; that would put chips into SPI mode.  Remind them of
143af517150SDavid Brownell 	 * that in case of hardware that won't pull up DAT3/nCS otherwise.
144af517150SDavid Brownell 	 *
145af517150SDavid Brownell 	 * SPI hosts ignore ios.chip_select; it's managed according to
14625985edcSLucas De Marchi 	 * rules that must accommodate non-MMC slaves which this layer
147af517150SDavid Brownell 	 * won't even know about.
148af517150SDavid Brownell 	 */
149af517150SDavid Brownell 	if (!mmc_host_is_spi(host)) {
150da7fbe58SPierre Ossman 		mmc_set_chip_select(host, MMC_CS_HIGH);
151da7fbe58SPierre Ossman 		mmc_delay(1);
152af517150SDavid Brownell 	}
153da7fbe58SPierre Ossman 
154da7fbe58SPierre Ossman 	cmd.opcode = MMC_GO_IDLE_STATE;
155da7fbe58SPierre Ossman 	cmd.arg = 0;
156af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
157da7fbe58SPierre Ossman 
158da7fbe58SPierre Ossman 	err = mmc_wait_for_cmd(host, &cmd, 0);
159da7fbe58SPierre Ossman 
160da7fbe58SPierre Ossman 	mmc_delay(1);
161da7fbe58SPierre Ossman 
162af517150SDavid Brownell 	if (!mmc_host_is_spi(host)) {
163da7fbe58SPierre Ossman 		mmc_set_chip_select(host, MMC_CS_DONTCARE);
164da7fbe58SPierre Ossman 		mmc_delay(1);
165af517150SDavid Brownell 	}
166af517150SDavid Brownell 
167af517150SDavid Brownell 	host->use_spi_crc = 0;
168da7fbe58SPierre Ossman 
169da7fbe58SPierre Ossman 	return err;
170da7fbe58SPierre Ossman }
171da7fbe58SPierre Ossman 
172da7fbe58SPierre Ossman int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
173da7fbe58SPierre Ossman {
174c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
175da7fbe58SPierre Ossman 	int i, err = 0;
176da7fbe58SPierre Ossman 
177da7fbe58SPierre Ossman 	cmd.opcode = MMC_SEND_OP_COND;
178af517150SDavid Brownell 	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
179af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
180da7fbe58SPierre Ossman 
181da7fbe58SPierre Ossman 	for (i = 100; i; i--) {
182da7fbe58SPierre Ossman 		err = mmc_wait_for_cmd(host, &cmd, 0);
18317b0429dSPierre Ossman 		if (err)
184da7fbe58SPierre Ossman 			break;
185da7fbe58SPierre Ossman 
186af517150SDavid Brownell 		/* if we're just probing, do a single pass */
187af517150SDavid Brownell 		if (ocr == 0)
188da7fbe58SPierre Ossman 			break;
189da7fbe58SPierre Ossman 
190af517150SDavid Brownell 		/* otherwise wait until reset completes */
191af517150SDavid Brownell 		if (mmc_host_is_spi(host)) {
192af517150SDavid Brownell 			if (!(cmd.resp[0] & R1_SPI_IDLE))
193af517150SDavid Brownell 				break;
194af517150SDavid Brownell 		} else {
195af517150SDavid Brownell 			if (cmd.resp[0] & MMC_CARD_BUSY)
196af517150SDavid Brownell 				break;
197af517150SDavid Brownell 		}
198af517150SDavid Brownell 
19917b0429dSPierre Ossman 		err = -ETIMEDOUT;
200da7fbe58SPierre Ossman 
201da7fbe58SPierre Ossman 		mmc_delay(10);
202da7fbe58SPierre Ossman 	}
203da7fbe58SPierre Ossman 
204af517150SDavid Brownell 	if (rocr && !mmc_host_is_spi(host))
205da7fbe58SPierre Ossman 		*rocr = cmd.resp[0];
206da7fbe58SPierre Ossman 
207da7fbe58SPierre Ossman 	return err;
208da7fbe58SPierre Ossman }
209da7fbe58SPierre Ossman 
210da7fbe58SPierre Ossman int mmc_set_relative_addr(struct mmc_card *card)
211da7fbe58SPierre Ossman {
212c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
213da7fbe58SPierre Ossman 
214da7fbe58SPierre Ossman 	cmd.opcode = MMC_SET_RELATIVE_ADDR;
215da7fbe58SPierre Ossman 	cmd.arg = card->rca << 16;
216da7fbe58SPierre Ossman 	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
217da7fbe58SPierre Ossman 
2180899e741SMasahiro Yamada 	return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
219da7fbe58SPierre Ossman }
220da7fbe58SPierre Ossman 
221af517150SDavid Brownell static int
222af517150SDavid Brownell mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
223da7fbe58SPierre Ossman {
224da7fbe58SPierre Ossman 	int err;
225c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
226da7fbe58SPierre Ossman 
227af517150SDavid Brownell 	cmd.opcode = opcode;
228af517150SDavid Brownell 	cmd.arg = arg;
229da7fbe58SPierre Ossman 	cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
230da7fbe58SPierre Ossman 
231af517150SDavid Brownell 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
23217b0429dSPierre Ossman 	if (err)
233da7fbe58SPierre Ossman 		return err;
234da7fbe58SPierre Ossman 
235af517150SDavid Brownell 	memcpy(cxd, cmd.resp, sizeof(u32) * 4);
236da7fbe58SPierre Ossman 
23717b0429dSPierre Ossman 	return 0;
238da7fbe58SPierre Ossman }
239da7fbe58SPierre Ossman 
2401a41313eSKyungsik Lee /*
2411a41313eSKyungsik Lee  * NOTE: void *buf, caller for the buf is required to use DMA-capable
2421a41313eSKyungsik Lee  * buffer or on-stack buffer (with some overhead in callee).
2431a41313eSKyungsik Lee  */
244af517150SDavid Brownell static int
245af517150SDavid Brownell mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
246af517150SDavid Brownell 		u32 opcode, void *buf, unsigned len)
247da7fbe58SPierre Ossman {
248c7836d15SMasahiro Yamada 	struct mmc_request mrq = {};
249c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
250c7836d15SMasahiro Yamada 	struct mmc_data data = {};
251da7fbe58SPierre Ossman 	struct scatterlist sg;
252da7fbe58SPierre Ossman 
253da7fbe58SPierre Ossman 	mrq.cmd = &cmd;
254da7fbe58SPierre Ossman 	mrq.data = &data;
255da7fbe58SPierre Ossman 
256af517150SDavid Brownell 	cmd.opcode = opcode;
257da7fbe58SPierre Ossman 	cmd.arg = 0;
258da7fbe58SPierre Ossman 
259af517150SDavid Brownell 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
260af517150SDavid Brownell 	 * rely on callers to never use this with "native" calls for reading
261af517150SDavid Brownell 	 * CSD or CID.  Native versions of those commands use the R2 type,
262af517150SDavid Brownell 	 * not R1 plus a data block.
263af517150SDavid Brownell 	 */
264af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
265af517150SDavid Brownell 
266af517150SDavid Brownell 	data.blksz = len;
267da7fbe58SPierre Ossman 	data.blocks = 1;
268da7fbe58SPierre Ossman 	data.flags = MMC_DATA_READ;
269da7fbe58SPierre Ossman 	data.sg = &sg;
270da7fbe58SPierre Ossman 	data.sg_len = 1;
271da7fbe58SPierre Ossman 
272601ed60cSUlf Hansson 	sg_init_one(&sg, buf, len);
273da7fbe58SPierre Ossman 
274cda56ac2SAdrian Hunter 	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
2750d3e0460SMatthew Fleming 		/*
2760d3e0460SMatthew Fleming 		 * The spec states that CSR and CID accesses have a timeout
2770d3e0460SMatthew Fleming 		 * of 64 clock cycles.
2780d3e0460SMatthew Fleming 		 */
2790d3e0460SMatthew Fleming 		data.timeout_ns = 0;
2800d3e0460SMatthew Fleming 		data.timeout_clks = 64;
281cda56ac2SAdrian Hunter 	} else
282cda56ac2SAdrian Hunter 		mmc_set_data_timeout(&data, card);
283da7fbe58SPierre Ossman 
284af517150SDavid Brownell 	mmc_wait_for_req(host, &mrq);
285af517150SDavid Brownell 
28617b0429dSPierre Ossman 	if (cmd.error)
287da7fbe58SPierre Ossman 		return cmd.error;
28817b0429dSPierre Ossman 	if (data.error)
289da7fbe58SPierre Ossman 		return data.error;
290da7fbe58SPierre Ossman 
29117b0429dSPierre Ossman 	return 0;
292da7fbe58SPierre Ossman }
293da7fbe58SPierre Ossman 
2940796e439SUlf Hansson static int mmc_spi_send_csd(struct mmc_card *card, u32 *csd)
295af517150SDavid Brownell {
29678e48073SPierre Ossman 	int ret, i;
29706c9ccb7SWinkler, Tomas 	__be32 *csd_tmp;
29878e48073SPierre Ossman 
29922b78700SUlf Hansson 	csd_tmp = kzalloc(16, GFP_KERNEL);
3001a41313eSKyungsik Lee 	if (!csd_tmp)
3011a41313eSKyungsik Lee 		return -ENOMEM;
3021a41313eSKyungsik Lee 
3031a41313eSKyungsik Lee 	ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
30478e48073SPierre Ossman 	if (ret)
3051a41313eSKyungsik Lee 		goto err;
30678e48073SPierre Ossman 
30778e48073SPierre Ossman 	for (i = 0; i < 4; i++)
3081a41313eSKyungsik Lee 		csd[i] = be32_to_cpu(csd_tmp[i]);
30978e48073SPierre Ossman 
3101a41313eSKyungsik Lee err:
3111a41313eSKyungsik Lee 	kfree(csd_tmp);
3121a41313eSKyungsik Lee 	return ret;
313af517150SDavid Brownell }
314af517150SDavid Brownell 
3150796e439SUlf Hansson int mmc_send_csd(struct mmc_card *card, u32 *csd)
3160796e439SUlf Hansson {
3170796e439SUlf Hansson 	if (mmc_host_is_spi(card->host))
3180796e439SUlf Hansson 		return mmc_spi_send_csd(card, csd);
3190796e439SUlf Hansson 
3200796e439SUlf Hansson 	return mmc_send_cxd_native(card->host, card->rca << 16,	csd,
3210796e439SUlf Hansson 				MMC_SEND_CSD);
3220796e439SUlf Hansson }
3230796e439SUlf Hansson 
324a1473732SUlf Hansson static int mmc_spi_send_cid(struct mmc_host *host, u32 *cid)
325af517150SDavid Brownell {
32678e48073SPierre Ossman 	int ret, i;
32706c9ccb7SWinkler, Tomas 	__be32 *cid_tmp;
32878e48073SPierre Ossman 
32922b78700SUlf Hansson 	cid_tmp = kzalloc(16, GFP_KERNEL);
3301a41313eSKyungsik Lee 	if (!cid_tmp)
3311a41313eSKyungsik Lee 		return -ENOMEM;
3321a41313eSKyungsik Lee 
3331a41313eSKyungsik Lee 	ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
33478e48073SPierre Ossman 	if (ret)
3351a41313eSKyungsik Lee 		goto err;
33678e48073SPierre Ossman 
33778e48073SPierre Ossman 	for (i = 0; i < 4; i++)
3381a41313eSKyungsik Lee 		cid[i] = be32_to_cpu(cid_tmp[i]);
33978e48073SPierre Ossman 
3401a41313eSKyungsik Lee err:
3411a41313eSKyungsik Lee 	kfree(cid_tmp);
3421a41313eSKyungsik Lee 	return ret;
343af517150SDavid Brownell }
344af517150SDavid Brownell 
345a1473732SUlf Hansson int mmc_send_cid(struct mmc_host *host, u32 *cid)
346a1473732SUlf Hansson {
347a1473732SUlf Hansson 	if (mmc_host_is_spi(host))
348a1473732SUlf Hansson 		return mmc_spi_send_cid(host, cid);
349a1473732SUlf Hansson 
350c92e68d8SUlf Hansson 	return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
351a1473732SUlf Hansson }
352a1473732SUlf Hansson 
353e21aa519SUlf Hansson int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
354e21aa519SUlf Hansson {
355e21aa519SUlf Hansson 	int err;
356e21aa519SUlf Hansson 	u8 *ext_csd;
357e21aa519SUlf Hansson 
358e21aa519SUlf Hansson 	if (!card || !new_ext_csd)
359e21aa519SUlf Hansson 		return -EINVAL;
360e21aa519SUlf Hansson 
361e21aa519SUlf Hansson 	if (!mmc_can_ext_csd(card))
362e21aa519SUlf Hansson 		return -EOPNOTSUPP;
363e21aa519SUlf Hansson 
364e21aa519SUlf Hansson 	/*
365e21aa519SUlf Hansson 	 * As the ext_csd is so large and mostly unused, we don't store the
366e21aa519SUlf Hansson 	 * raw block in mmc_card.
367e21aa519SUlf Hansson 	 */
36822b78700SUlf Hansson 	ext_csd = kzalloc(512, GFP_KERNEL);
369e21aa519SUlf Hansson 	if (!ext_csd)
370e21aa519SUlf Hansson 		return -ENOMEM;
371e21aa519SUlf Hansson 
3722fc91e8bSUlf Hansson 	err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
3732fc91e8bSUlf Hansson 				512);
374e21aa519SUlf Hansson 	if (err)
375e21aa519SUlf Hansson 		kfree(ext_csd);
376e21aa519SUlf Hansson 	else
377e21aa519SUlf Hansson 		*new_ext_csd = ext_csd;
378e21aa519SUlf Hansson 
379e21aa519SUlf Hansson 	return err;
380e21aa519SUlf Hansson }
381e21aa519SUlf Hansson EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
382e21aa519SUlf Hansson 
383af517150SDavid Brownell int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
384af517150SDavid Brownell {
385c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
386af517150SDavid Brownell 	int err;
387af517150SDavid Brownell 
388af517150SDavid Brownell 	cmd.opcode = MMC_SPI_READ_OCR;
389af517150SDavid Brownell 	cmd.arg = highcap ? (1 << 30) : 0;
390af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R3;
391af517150SDavid Brownell 
392af517150SDavid Brownell 	err = mmc_wait_for_cmd(host, &cmd, 0);
393af517150SDavid Brownell 
394af517150SDavid Brownell 	*ocrp = cmd.resp[1];
395af517150SDavid Brownell 	return err;
396af517150SDavid Brownell }
397af517150SDavid Brownell 
398af517150SDavid Brownell int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
399af517150SDavid Brownell {
400c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
401af517150SDavid Brownell 	int err;
402af517150SDavid Brownell 
403af517150SDavid Brownell 	cmd.opcode = MMC_SPI_CRC_ON_OFF;
404af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1;
405af517150SDavid Brownell 	cmd.arg = use_crc;
406af517150SDavid Brownell 
407af517150SDavid Brownell 	err = mmc_wait_for_cmd(host, &cmd, 0);
408af517150SDavid Brownell 	if (!err)
409af517150SDavid Brownell 		host->use_spi_crc = use_crc;
410af517150SDavid Brownell 	return err;
411af517150SDavid Brownell }
412af517150SDavid Brownell 
41320348d19SUlf Hansson static int mmc_switch_status_error(struct mmc_host *host, u32 status)
414ed16f58dSAdrian Hunter {
415ed16f58dSAdrian Hunter 	if (mmc_host_is_spi(host)) {
416ed16f58dSAdrian Hunter 		if (status & R1_SPI_ILLEGAL_COMMAND)
417ed16f58dSAdrian Hunter 			return -EBADMSG;
418ed16f58dSAdrian Hunter 	} else {
419ed16f58dSAdrian Hunter 		if (status & 0xFDFFA000)
420ed16f58dSAdrian Hunter 			pr_warn("%s: unexpected status %#x after switch\n",
421ed16f58dSAdrian Hunter 				mmc_hostname(host), status);
422ed16f58dSAdrian Hunter 		if (status & R1_SWITCH_ERROR)
423ed16f58dSAdrian Hunter 			return -EBADMSG;
424ed16f58dSAdrian Hunter 	}
425ed16f58dSAdrian Hunter 	return 0;
426ed16f58dSAdrian Hunter }
427ed16f58dSAdrian Hunter 
42820348d19SUlf Hansson /* Caller must hold re-tuning */
429ef3d2322SAdrian Hunter int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
43020348d19SUlf Hansson {
43120348d19SUlf Hansson 	u32 status;
43220348d19SUlf Hansson 	int err;
43320348d19SUlf Hansson 
43420348d19SUlf Hansson 	err = mmc_send_status(card, &status);
435ef3d2322SAdrian Hunter 	if (!crc_err_fatal && err == -EILSEQ)
436ef3d2322SAdrian Hunter 		return 0;
43720348d19SUlf Hansson 	if (err)
43820348d19SUlf Hansson 		return err;
43920348d19SUlf Hansson 
44020348d19SUlf Hansson 	return mmc_switch_status_error(card->host, status);
44120348d19SUlf Hansson }
44220348d19SUlf Hansson 
443ef3d2322SAdrian Hunter int mmc_switch_status(struct mmc_card *card)
444ef3d2322SAdrian Hunter {
445ef3d2322SAdrian Hunter 	return __mmc_switch_status(card, true);
446ef3d2322SAdrian Hunter }
447ef3d2322SAdrian Hunter 
448716bdb89SUlf Hansson static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
449625228faSUlf Hansson 			bool send_status, bool retry_crc_err)
450716bdb89SUlf Hansson {
451716bdb89SUlf Hansson 	struct mmc_host *host = card->host;
452716bdb89SUlf Hansson 	int err;
453716bdb89SUlf Hansson 	unsigned long timeout;
454716bdb89SUlf Hansson 	u32 status = 0;
455716bdb89SUlf Hansson 	bool expired = false;
456716bdb89SUlf Hansson 	bool busy = false;
457716bdb89SUlf Hansson 
458716bdb89SUlf Hansson 	/* We have an unspecified cmd timeout, use the fallback value. */
459716bdb89SUlf Hansson 	if (!timeout_ms)
460716bdb89SUlf Hansson 		timeout_ms = MMC_OPS_TIMEOUT_MS;
461716bdb89SUlf Hansson 
462716bdb89SUlf Hansson 	/*
463716bdb89SUlf Hansson 	 * In cases when not allowed to poll by using CMD13 or because we aren't
464716bdb89SUlf Hansson 	 * capable of polling by using ->card_busy(), then rely on waiting the
465716bdb89SUlf Hansson 	 * stated timeout to be sufficient.
466716bdb89SUlf Hansson 	 */
467716bdb89SUlf Hansson 	if (!send_status && !host->ops->card_busy) {
468716bdb89SUlf Hansson 		mmc_delay(timeout_ms);
469716bdb89SUlf Hansson 		return 0;
470716bdb89SUlf Hansson 	}
471716bdb89SUlf Hansson 
472716bdb89SUlf Hansson 	timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
473716bdb89SUlf Hansson 	do {
474716bdb89SUlf Hansson 		/*
47570562644SUlf Hansson 		 * Due to the possibility of being preempted while polling,
47670562644SUlf Hansson 		 * check the expiration time first.
477716bdb89SUlf Hansson 		 */
478716bdb89SUlf Hansson 		expired = time_after(jiffies, timeout);
47970562644SUlf Hansson 
48070562644SUlf Hansson 		if (host->ops->card_busy) {
48170562644SUlf Hansson 			busy = host->ops->card_busy(host);
48270562644SUlf Hansson 		} else {
483437590a1SUlf Hansson 			err = mmc_send_status(card, &status);
4845ec32f84SUlf Hansson 			if (retry_crc_err && err == -EILSEQ) {
485437590a1SUlf Hansson 				busy = true;
4865ec32f84SUlf Hansson 			} else if (err) {
487716bdb89SUlf Hansson 				return err;
4885ec32f84SUlf Hansson 			} else {
4895ec32f84SUlf Hansson 				err = mmc_switch_status_error(host, status);
4905ec32f84SUlf Hansson 				if (err)
4915ec32f84SUlf Hansson 					return err;
49270562644SUlf Hansson 				busy = R1_CURRENT_STATE(status) == R1_STATE_PRG;
493716bdb89SUlf Hansson 			}
4945ec32f84SUlf Hansson 		}
495716bdb89SUlf Hansson 
49670562644SUlf Hansson 		/* Timeout if the device still remains busy. */
49770562644SUlf Hansson 		if (expired && busy) {
49870562644SUlf Hansson 			pr_err("%s: Card stuck being busy! %s\n",
499716bdb89SUlf Hansson 				mmc_hostname(host), __func__);
500716bdb89SUlf Hansson 			return -ETIMEDOUT;
501716bdb89SUlf Hansson 		}
50270562644SUlf Hansson 	} while (busy);
503716bdb89SUlf Hansson 
5045ec32f84SUlf Hansson 	return 0;
505716bdb89SUlf Hansson }
506716bdb89SUlf Hansson 
507d3a8d95dSAndrei Warkentin /**
508950d56acSJaehoon Chung  *	__mmc_switch - modify EXT_CSD register
509d3a8d95dSAndrei Warkentin  *	@card: the MMC card associated with the data transfer
510d3a8d95dSAndrei Warkentin  *	@set: cmd set values
511d3a8d95dSAndrei Warkentin  *	@index: EXT_CSD register index
512d3a8d95dSAndrei Warkentin  *	@value: value to program into EXT_CSD register
513d3a8d95dSAndrei Warkentin  *	@timeout_ms: timeout (ms) for operation performed by register write,
514d3a8d95dSAndrei Warkentin  *                   timeout of zero implies maximum possible timeout
515aa33ce3cSUlf Hansson  *	@timing: new timing to change to
516950d56acSJaehoon Chung  *	@use_busy_signal: use the busy signal as response type
517878e200bSUlf Hansson  *	@send_status: send status cmd to poll for busy
518625228faSUlf Hansson  *	@retry_crc_err: retry when CRC errors when polling with CMD13 for busy
519d3a8d95dSAndrei Warkentin  *
520d3a8d95dSAndrei Warkentin  *	Modifies the EXT_CSD register for selected card.
521d3a8d95dSAndrei Warkentin  */
522950d56acSJaehoon Chung int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
523aa33ce3cSUlf Hansson 		unsigned int timeout_ms, unsigned char timing,
524aa33ce3cSUlf Hansson 		bool use_busy_signal, bool send_status,	bool retry_crc_err)
525da7fbe58SPierre Ossman {
526636bd13cSUlf Hansson 	struct mmc_host *host = card->host;
527da7fbe58SPierre Ossman 	int err;
528c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
529b9ec2616SUlf Hansson 	bool use_r1b_resp = use_busy_signal;
530aa33ce3cSUlf Hansson 	unsigned char old_timing = host->ios.timing;
531b9ec2616SUlf Hansson 
532c6dbab9cSAdrian Hunter 	mmc_retune_hold(host);
533c6dbab9cSAdrian Hunter 
534b9ec2616SUlf Hansson 	/*
535b9ec2616SUlf Hansson 	 * If the cmd timeout and the max_busy_timeout of the host are both
536b9ec2616SUlf Hansson 	 * specified, let's validate them. A failure means we need to prevent
537b9ec2616SUlf Hansson 	 * the host from doing hw busy detection, which is done by converting
538b9ec2616SUlf Hansson 	 * to a R1 response instead of a R1B.
539b9ec2616SUlf Hansson 	 */
540b9ec2616SUlf Hansson 	if (timeout_ms && host->max_busy_timeout &&
541b9ec2616SUlf Hansson 		(timeout_ms > host->max_busy_timeout))
542b9ec2616SUlf Hansson 		use_r1b_resp = false;
543da7fbe58SPierre Ossman 
544da7fbe58SPierre Ossman 	cmd.opcode = MMC_SWITCH;
545da7fbe58SPierre Ossman 	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
546da7fbe58SPierre Ossman 		  (index << 16) |
547da7fbe58SPierre Ossman 		  (value << 8) |
548da7fbe58SPierre Ossman 		  set;
549950d56acSJaehoon Chung 	cmd.flags = MMC_CMD_AC;
550b9ec2616SUlf Hansson 	if (use_r1b_resp) {
551950d56acSJaehoon Chung 		cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
552b9ec2616SUlf Hansson 		/*
553b9ec2616SUlf Hansson 		 * A busy_timeout of zero means the host can decide to use
554b9ec2616SUlf Hansson 		 * whatever value it finds suitable.
555b9ec2616SUlf Hansson 		 */
5561d4d7744SUlf Hansson 		cmd.busy_timeout = timeout_ms;
557b9ec2616SUlf Hansson 	} else {
558b9ec2616SUlf Hansson 		cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
559b9ec2616SUlf Hansson 	}
560b9ec2616SUlf Hansson 
561775a9362SMaya Erez 	if (index == EXT_CSD_SANITIZE_START)
562775a9362SMaya Erez 		cmd.sanitize_busy = true;
563da7fbe58SPierre Ossman 
564636bd13cSUlf Hansson 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
56517b0429dSPierre Ossman 	if (err)
566c6dbab9cSAdrian Hunter 		goto out;
567da7fbe58SPierre Ossman 
568950d56acSJaehoon Chung 	/* No need to check card status in case of unblocking command */
569950d56acSJaehoon Chung 	if (!use_busy_signal)
570c6dbab9cSAdrian Hunter 		goto out;
571950d56acSJaehoon Chung 
572cb26ce06SUlf Hansson 	/*If SPI or used HW busy detection above, then we don't need to poll. */
573cb26ce06SUlf Hansson 	if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
574ee6ff743SUlf Hansson 		mmc_host_is_spi(host))
575aa33ce3cSUlf Hansson 		goto out_tim;
576a27fbf2fSSeungwon Jeon 
577716bdb89SUlf Hansson 	/* Let's try to poll to find out when the command is completed. */
578625228faSUlf Hansson 	err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err);
579ee6ff743SUlf Hansson 	if (err)
580ee6ff743SUlf Hansson 		goto out;
581aa33ce3cSUlf Hansson 
582aa33ce3cSUlf Hansson out_tim:
583ee6ff743SUlf Hansson 	/* Switch to new timing before check switch status. */
584ee6ff743SUlf Hansson 	if (timing)
585ee6ff743SUlf Hansson 		mmc_set_timing(host, timing);
586ee6ff743SUlf Hansson 
587ee6ff743SUlf Hansson 	if (send_status) {
588ee6ff743SUlf Hansson 		err = mmc_switch_status(card);
589aa33ce3cSUlf Hansson 		if (err && timing)
590aa33ce3cSUlf Hansson 			mmc_set_timing(host, old_timing);
591ee6ff743SUlf Hansson 	}
592c6dbab9cSAdrian Hunter out:
593c6dbab9cSAdrian Hunter 	mmc_retune_release(host);
594ef0b27d4SAdrian Hunter 
595c6dbab9cSAdrian Hunter 	return err;
596da7fbe58SPierre Ossman }
597950d56acSJaehoon Chung 
598950d56acSJaehoon Chung int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
599950d56acSJaehoon Chung 		unsigned int timeout_ms)
600950d56acSJaehoon Chung {
601aa33ce3cSUlf Hansson 	return __mmc_switch(card, set, index, value, timeout_ms, 0,
602aa33ce3cSUlf Hansson 			true, true, false);
603950d56acSJaehoon Chung }
604d3a8d95dSAndrei Warkentin EXPORT_SYMBOL_GPL(mmc_switch);
605da7fbe58SPierre Ossman 
6069979dbe5SChaotian Jing int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
607996903deSMinda Chen {
608c7836d15SMasahiro Yamada 	struct mmc_request mrq = {};
609c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
610c7836d15SMasahiro Yamada 	struct mmc_data data = {};
611996903deSMinda Chen 	struct scatterlist sg;
612fe5afb13SUlf Hansson 	struct mmc_ios *ios = &host->ios;
613996903deSMinda Chen 	const u8 *tuning_block_pattern;
614996903deSMinda Chen 	int size, err = 0;
615996903deSMinda Chen 	u8 *data_buf;
616996903deSMinda Chen 
617996903deSMinda Chen 	if (ios->bus_width == MMC_BUS_WIDTH_8) {
618996903deSMinda Chen 		tuning_block_pattern = tuning_blk_pattern_8bit;
619996903deSMinda Chen 		size = sizeof(tuning_blk_pattern_8bit);
620996903deSMinda Chen 	} else if (ios->bus_width == MMC_BUS_WIDTH_4) {
621996903deSMinda Chen 		tuning_block_pattern = tuning_blk_pattern_4bit;
622996903deSMinda Chen 		size = sizeof(tuning_blk_pattern_4bit);
623996903deSMinda Chen 	} else
624996903deSMinda Chen 		return -EINVAL;
625996903deSMinda Chen 
626996903deSMinda Chen 	data_buf = kzalloc(size, GFP_KERNEL);
627996903deSMinda Chen 	if (!data_buf)
628996903deSMinda Chen 		return -ENOMEM;
629996903deSMinda Chen 
630996903deSMinda Chen 	mrq.cmd = &cmd;
631996903deSMinda Chen 	mrq.data = &data;
632996903deSMinda Chen 
633996903deSMinda Chen 	cmd.opcode = opcode;
634996903deSMinda Chen 	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
635996903deSMinda Chen 
636996903deSMinda Chen 	data.blksz = size;
637996903deSMinda Chen 	data.blocks = 1;
638996903deSMinda Chen 	data.flags = MMC_DATA_READ;
639996903deSMinda Chen 
640996903deSMinda Chen 	/*
641996903deSMinda Chen 	 * According to the tuning specs, Tuning process
642996903deSMinda Chen 	 * is normally shorter 40 executions of CMD19,
643996903deSMinda Chen 	 * and timeout value should be shorter than 150 ms
644996903deSMinda Chen 	 */
645996903deSMinda Chen 	data.timeout_ns = 150 * NSEC_PER_MSEC;
646996903deSMinda Chen 
647996903deSMinda Chen 	data.sg = &sg;
648996903deSMinda Chen 	data.sg_len = 1;
649996903deSMinda Chen 	sg_init_one(&sg, data_buf, size);
650996903deSMinda Chen 
651fe5afb13SUlf Hansson 	mmc_wait_for_req(host, &mrq);
652996903deSMinda Chen 
6539979dbe5SChaotian Jing 	if (cmd_error)
6549979dbe5SChaotian Jing 		*cmd_error = cmd.error;
6559979dbe5SChaotian Jing 
656996903deSMinda Chen 	if (cmd.error) {
657996903deSMinda Chen 		err = cmd.error;
658996903deSMinda Chen 		goto out;
659996903deSMinda Chen 	}
660996903deSMinda Chen 
661996903deSMinda Chen 	if (data.error) {
662996903deSMinda Chen 		err = data.error;
663996903deSMinda Chen 		goto out;
664996903deSMinda Chen 	}
665996903deSMinda Chen 
666996903deSMinda Chen 	if (memcmp(data_buf, tuning_block_pattern, size))
667996903deSMinda Chen 		err = -EIO;
668996903deSMinda Chen 
669996903deSMinda Chen out:
670996903deSMinda Chen 	kfree(data_buf);
671996903deSMinda Chen 	return err;
672996903deSMinda Chen }
673996903deSMinda Chen EXPORT_SYMBOL_GPL(mmc_send_tuning);
674996903deSMinda Chen 
675e711f030SAdrian Hunter int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
676e711f030SAdrian Hunter {
677c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
678e711f030SAdrian Hunter 
679e711f030SAdrian Hunter 	/*
680e711f030SAdrian Hunter 	 * eMMC specification specifies that CMD12 can be used to stop a tuning
681e711f030SAdrian Hunter 	 * command, but SD specification does not, so do nothing unless it is
682e711f030SAdrian Hunter 	 * eMMC.
683e711f030SAdrian Hunter 	 */
684e711f030SAdrian Hunter 	if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
685e711f030SAdrian Hunter 		return 0;
686e711f030SAdrian Hunter 
687e711f030SAdrian Hunter 	cmd.opcode = MMC_STOP_TRANSMISSION;
688e711f030SAdrian Hunter 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
689e711f030SAdrian Hunter 
690e711f030SAdrian Hunter 	/*
691e711f030SAdrian Hunter 	 * For drivers that override R1 to R1b, set an arbitrary timeout based
692e711f030SAdrian Hunter 	 * on the tuning timeout i.e. 150ms.
693e711f030SAdrian Hunter 	 */
694e711f030SAdrian Hunter 	cmd.busy_timeout = 150;
695e711f030SAdrian Hunter 
696e711f030SAdrian Hunter 	return mmc_wait_for_cmd(host, &cmd, 0);
697e711f030SAdrian Hunter }
698e711f030SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_abort_tuning);
699e711f030SAdrian Hunter 
70022113efdSAries Lee static int
70122113efdSAries Lee mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
70222113efdSAries Lee 		  u8 len)
70322113efdSAries Lee {
704c7836d15SMasahiro Yamada 	struct mmc_request mrq = {};
705c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
706c7836d15SMasahiro Yamada 	struct mmc_data data = {};
70722113efdSAries Lee 	struct scatterlist sg;
70822113efdSAries Lee 	u8 *data_buf;
70922113efdSAries Lee 	u8 *test_buf;
71022113efdSAries Lee 	int i, err;
71122113efdSAries Lee 	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
71222113efdSAries Lee 	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
71322113efdSAries Lee 
71422113efdSAries Lee 	/* dma onto stack is unsafe/nonportable, but callers to this
71522113efdSAries Lee 	 * routine normally provide temporary on-stack buffers ...
71622113efdSAries Lee 	 */
71722113efdSAries Lee 	data_buf = kmalloc(len, GFP_KERNEL);
71822113efdSAries Lee 	if (!data_buf)
71922113efdSAries Lee 		return -ENOMEM;
72022113efdSAries Lee 
72122113efdSAries Lee 	if (len == 8)
72222113efdSAries Lee 		test_buf = testdata_8bit;
72322113efdSAries Lee 	else if (len == 4)
72422113efdSAries Lee 		test_buf = testdata_4bit;
72522113efdSAries Lee 	else {
726a3c76eb9SGirish K S 		pr_err("%s: Invalid bus_width %d\n",
72722113efdSAries Lee 		       mmc_hostname(host), len);
72822113efdSAries Lee 		kfree(data_buf);
72922113efdSAries Lee 		return -EINVAL;
73022113efdSAries Lee 	}
73122113efdSAries Lee 
73222113efdSAries Lee 	if (opcode == MMC_BUS_TEST_W)
73322113efdSAries Lee 		memcpy(data_buf, test_buf, len);
73422113efdSAries Lee 
73522113efdSAries Lee 	mrq.cmd = &cmd;
73622113efdSAries Lee 	mrq.data = &data;
73722113efdSAries Lee 	cmd.opcode = opcode;
73822113efdSAries Lee 	cmd.arg = 0;
73922113efdSAries Lee 
74022113efdSAries Lee 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
74122113efdSAries Lee 	 * rely on callers to never use this with "native" calls for reading
74222113efdSAries Lee 	 * CSD or CID.  Native versions of those commands use the R2 type,
74322113efdSAries Lee 	 * not R1 plus a data block.
74422113efdSAries Lee 	 */
74522113efdSAries Lee 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
74622113efdSAries Lee 
74722113efdSAries Lee 	data.blksz = len;
74822113efdSAries Lee 	data.blocks = 1;
74922113efdSAries Lee 	if (opcode == MMC_BUS_TEST_R)
75022113efdSAries Lee 		data.flags = MMC_DATA_READ;
75122113efdSAries Lee 	else
75222113efdSAries Lee 		data.flags = MMC_DATA_WRITE;
75322113efdSAries Lee 
75422113efdSAries Lee 	data.sg = &sg;
75522113efdSAries Lee 	data.sg_len = 1;
75684532e33SMinjian Wu 	mmc_set_data_timeout(&data, card);
75722113efdSAries Lee 	sg_init_one(&sg, data_buf, len);
75822113efdSAries Lee 	mmc_wait_for_req(host, &mrq);
75922113efdSAries Lee 	err = 0;
76022113efdSAries Lee 	if (opcode == MMC_BUS_TEST_R) {
76122113efdSAries Lee 		for (i = 0; i < len / 4; i++)
76222113efdSAries Lee 			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
76322113efdSAries Lee 				err = -EIO;
76422113efdSAries Lee 				break;
76522113efdSAries Lee 			}
76622113efdSAries Lee 	}
76722113efdSAries Lee 	kfree(data_buf);
76822113efdSAries Lee 
76922113efdSAries Lee 	if (cmd.error)
77022113efdSAries Lee 		return cmd.error;
77122113efdSAries Lee 	if (data.error)
77222113efdSAries Lee 		return data.error;
77322113efdSAries Lee 
77422113efdSAries Lee 	return err;
77522113efdSAries Lee }
77622113efdSAries Lee 
77722113efdSAries Lee int mmc_bus_test(struct mmc_card *card, u8 bus_width)
77822113efdSAries Lee {
7790899e741SMasahiro Yamada 	int width;
78022113efdSAries Lee 
78122113efdSAries Lee 	if (bus_width == MMC_BUS_WIDTH_8)
78222113efdSAries Lee 		width = 8;
78322113efdSAries Lee 	else if (bus_width == MMC_BUS_WIDTH_4)
78422113efdSAries Lee 		width = 4;
78522113efdSAries Lee 	else if (bus_width == MMC_BUS_WIDTH_1)
78622113efdSAries Lee 		return 0; /* no need for test */
78722113efdSAries Lee 	else
78822113efdSAries Lee 		return -EINVAL;
78922113efdSAries Lee 
79022113efdSAries Lee 	/*
79122113efdSAries Lee 	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
79222113efdSAries Lee 	 * is a problem.  This improves chances that the test will work.
79322113efdSAries Lee 	 */
79422113efdSAries Lee 	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
7950899e741SMasahiro Yamada 	return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
79622113efdSAries Lee }
797eb0d8f13SJaehoon Chung 
7980f2c0512SUlf Hansson static int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
799eb0d8f13SJaehoon Chung {
800c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
801eb0d8f13SJaehoon Chung 	unsigned int opcode;
802eb0d8f13SJaehoon Chung 	int err;
803eb0d8f13SJaehoon Chung 
8042378975bSJaehoon Chung 	if (!card->ext_csd.hpi) {
8056606110dSJoe Perches 		pr_warn("%s: Card didn't support HPI command\n",
8062378975bSJaehoon Chung 			mmc_hostname(card->host));
8072378975bSJaehoon Chung 		return -EINVAL;
8082378975bSJaehoon Chung 	}
8092378975bSJaehoon Chung 
810eb0d8f13SJaehoon Chung 	opcode = card->ext_csd.hpi_cmd;
811eb0d8f13SJaehoon Chung 	if (opcode == MMC_STOP_TRANSMISSION)
8122378975bSJaehoon Chung 		cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
813eb0d8f13SJaehoon Chung 	else if (opcode == MMC_SEND_STATUS)
8142378975bSJaehoon Chung 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
815eb0d8f13SJaehoon Chung 
816eb0d8f13SJaehoon Chung 	cmd.opcode = opcode;
817eb0d8f13SJaehoon Chung 	cmd.arg = card->rca << 16 | 1;
818eb0d8f13SJaehoon Chung 
819eb0d8f13SJaehoon Chung 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
820eb0d8f13SJaehoon Chung 	if (err) {
821eb0d8f13SJaehoon Chung 		pr_warn("%s: error %d interrupting operation. "
822eb0d8f13SJaehoon Chung 			"HPI command response %#x\n", mmc_hostname(card->host),
823eb0d8f13SJaehoon Chung 			err, cmd.resp[0]);
824eb0d8f13SJaehoon Chung 		return err;
825eb0d8f13SJaehoon Chung 	}
826eb0d8f13SJaehoon Chung 	if (status)
827eb0d8f13SJaehoon Chung 		*status = cmd.resp[0];
828eb0d8f13SJaehoon Chung 
829eb0d8f13SJaehoon Chung 	return 0;
830eb0d8f13SJaehoon Chung }
831148bcab2SUlf Hansson 
8320f2c0512SUlf Hansson /**
8330f2c0512SUlf Hansson  *	mmc_interrupt_hpi - Issue for High priority Interrupt
8340f2c0512SUlf Hansson  *	@card: the MMC card associated with the HPI transfer
8350f2c0512SUlf Hansson  *
8360f2c0512SUlf Hansson  *	Issued High Priority Interrupt, and check for card status
8370f2c0512SUlf Hansson  *	until out-of prg-state.
8380f2c0512SUlf Hansson  */
8390f2c0512SUlf Hansson int mmc_interrupt_hpi(struct mmc_card *card)
8400f2c0512SUlf Hansson {
8410f2c0512SUlf Hansson 	int err;
8420f2c0512SUlf Hansson 	u32 status;
8430f2c0512SUlf Hansson 	unsigned long prg_wait;
8440f2c0512SUlf Hansson 
8450f2c0512SUlf Hansson 	if (!card->ext_csd.hpi_en) {
8460f2c0512SUlf Hansson 		pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
8470f2c0512SUlf Hansson 		return 1;
8480f2c0512SUlf Hansson 	}
8490f2c0512SUlf Hansson 
8500f2c0512SUlf Hansson 	mmc_claim_host(card->host);
8510f2c0512SUlf Hansson 	err = mmc_send_status(card, &status);
8520f2c0512SUlf Hansson 	if (err) {
8530f2c0512SUlf Hansson 		pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
8540f2c0512SUlf Hansson 		goto out;
8550f2c0512SUlf Hansson 	}
8560f2c0512SUlf Hansson 
8570f2c0512SUlf Hansson 	switch (R1_CURRENT_STATE(status)) {
8580f2c0512SUlf Hansson 	case R1_STATE_IDLE:
8590f2c0512SUlf Hansson 	case R1_STATE_READY:
8600f2c0512SUlf Hansson 	case R1_STATE_STBY:
8610f2c0512SUlf Hansson 	case R1_STATE_TRAN:
8620f2c0512SUlf Hansson 		/*
8630f2c0512SUlf Hansson 		 * In idle and transfer states, HPI is not needed and the caller
8640f2c0512SUlf Hansson 		 * can issue the next intended command immediately
8650f2c0512SUlf Hansson 		 */
8660f2c0512SUlf Hansson 		goto out;
8670f2c0512SUlf Hansson 	case R1_STATE_PRG:
8680f2c0512SUlf Hansson 		break;
8690f2c0512SUlf Hansson 	default:
8700f2c0512SUlf Hansson 		/* In all other states, it's illegal to issue HPI */
8710f2c0512SUlf Hansson 		pr_debug("%s: HPI cannot be sent. Card state=%d\n",
8720f2c0512SUlf Hansson 			mmc_hostname(card->host), R1_CURRENT_STATE(status));
8730f2c0512SUlf Hansson 		err = -EINVAL;
8740f2c0512SUlf Hansson 		goto out;
8750f2c0512SUlf Hansson 	}
8760f2c0512SUlf Hansson 
8770f2c0512SUlf Hansson 	err = mmc_send_hpi_cmd(card, &status);
8780f2c0512SUlf Hansson 	if (err)
8790f2c0512SUlf Hansson 		goto out;
8800f2c0512SUlf Hansson 
8810f2c0512SUlf Hansson 	prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
8820f2c0512SUlf Hansson 	do {
8830f2c0512SUlf Hansson 		err = mmc_send_status(card, &status);
8840f2c0512SUlf Hansson 
8850f2c0512SUlf Hansson 		if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
8860f2c0512SUlf Hansson 			break;
8870f2c0512SUlf Hansson 		if (time_after(jiffies, prg_wait))
8880f2c0512SUlf Hansson 			err = -ETIMEDOUT;
8890f2c0512SUlf Hansson 	} while (!err);
8900f2c0512SUlf Hansson 
8910f2c0512SUlf Hansson out:
8920f2c0512SUlf Hansson 	mmc_release_host(card->host);
8930f2c0512SUlf Hansson 	return err;
8940f2c0512SUlf Hansson }
8950f2c0512SUlf Hansson 
896148bcab2SUlf Hansson int mmc_can_ext_csd(struct mmc_card *card)
897148bcab2SUlf Hansson {
898148bcab2SUlf Hansson 	return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
899148bcab2SUlf Hansson }
900b658af71SAdrian Hunter 
9011cf8f7e5SUlf Hansson /**
9021cf8f7e5SUlf Hansson  *	mmc_stop_bkops - stop ongoing BKOPS
9031cf8f7e5SUlf Hansson  *	@card: MMC card to check BKOPS
9041cf8f7e5SUlf Hansson  *
9051cf8f7e5SUlf Hansson  *	Send HPI command to stop ongoing background operations to
9061cf8f7e5SUlf Hansson  *	allow rapid servicing of foreground operations, e.g. read/
9071cf8f7e5SUlf Hansson  *	writes. Wait until the card comes out of the programming state
9081cf8f7e5SUlf Hansson  *	to avoid errors in servicing read/write requests.
9091cf8f7e5SUlf Hansson  */
9101cf8f7e5SUlf Hansson int mmc_stop_bkops(struct mmc_card *card)
9111cf8f7e5SUlf Hansson {
9121cf8f7e5SUlf Hansson 	int err = 0;
9131cf8f7e5SUlf Hansson 
9141cf8f7e5SUlf Hansson 	err = mmc_interrupt_hpi(card);
9151cf8f7e5SUlf Hansson 
9161cf8f7e5SUlf Hansson 	/*
9171cf8f7e5SUlf Hansson 	 * If err is EINVAL, we can't issue an HPI.
9181cf8f7e5SUlf Hansson 	 * It should complete the BKOPS.
9191cf8f7e5SUlf Hansson 	 */
9201cf8f7e5SUlf Hansson 	if (!err || (err == -EINVAL)) {
9211cf8f7e5SUlf Hansson 		mmc_card_clr_doing_bkops(card);
9221cf8f7e5SUlf Hansson 		mmc_retune_release(card->host);
9231cf8f7e5SUlf Hansson 		err = 0;
9241cf8f7e5SUlf Hansson 	}
9251cf8f7e5SUlf Hansson 
9261cf8f7e5SUlf Hansson 	return err;
9271cf8f7e5SUlf Hansson }
9281cf8f7e5SUlf Hansson 
9291cf8f7e5SUlf Hansson static int mmc_read_bkops_status(struct mmc_card *card)
9301cf8f7e5SUlf Hansson {
9311cf8f7e5SUlf Hansson 	int err;
9321cf8f7e5SUlf Hansson 	u8 *ext_csd;
9331cf8f7e5SUlf Hansson 
9341cf8f7e5SUlf Hansson 	mmc_claim_host(card->host);
9351cf8f7e5SUlf Hansson 	err = mmc_get_ext_csd(card, &ext_csd);
9361cf8f7e5SUlf Hansson 	mmc_release_host(card->host);
9371cf8f7e5SUlf Hansson 	if (err)
9381cf8f7e5SUlf Hansson 		return err;
9391cf8f7e5SUlf Hansson 
9401cf8f7e5SUlf Hansson 	card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
9411cf8f7e5SUlf Hansson 	card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
9421cf8f7e5SUlf Hansson 	kfree(ext_csd);
9431cf8f7e5SUlf Hansson 	return 0;
9441cf8f7e5SUlf Hansson }
9451cf8f7e5SUlf Hansson 
9461cf8f7e5SUlf Hansson /**
9471cf8f7e5SUlf Hansson  *	mmc_start_bkops - start BKOPS for supported cards
9481cf8f7e5SUlf Hansson  *	@card: MMC card to start BKOPS
9491cf8f7e5SUlf Hansson  *	@form_exception: A flag to indicate if this function was
9501cf8f7e5SUlf Hansson  *			 called due to an exception raised by the card
9511cf8f7e5SUlf Hansson  *
9521cf8f7e5SUlf Hansson  *	Start background operations whenever requested.
9531cf8f7e5SUlf Hansson  *	When the urgent BKOPS bit is set in a R1 command response
9541cf8f7e5SUlf Hansson  *	then background operations should be started immediately.
9551cf8f7e5SUlf Hansson */
9561cf8f7e5SUlf Hansson void mmc_start_bkops(struct mmc_card *card, bool from_exception)
9571cf8f7e5SUlf Hansson {
9581cf8f7e5SUlf Hansson 	int err;
9591cf8f7e5SUlf Hansson 	int timeout;
9601cf8f7e5SUlf Hansson 	bool use_busy_signal;
9611cf8f7e5SUlf Hansson 
9621cf8f7e5SUlf Hansson 	if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
9631cf8f7e5SUlf Hansson 		return;
9641cf8f7e5SUlf Hansson 
9651cf8f7e5SUlf Hansson 	err = mmc_read_bkops_status(card);
9661cf8f7e5SUlf Hansson 	if (err) {
9671cf8f7e5SUlf Hansson 		pr_err("%s: Failed to read bkops status: %d\n",
9681cf8f7e5SUlf Hansson 		       mmc_hostname(card->host), err);
9691cf8f7e5SUlf Hansson 		return;
9701cf8f7e5SUlf Hansson 	}
9711cf8f7e5SUlf Hansson 
9721cf8f7e5SUlf Hansson 	if (!card->ext_csd.raw_bkops_status)
9731cf8f7e5SUlf Hansson 		return;
9741cf8f7e5SUlf Hansson 
9751cf8f7e5SUlf Hansson 	if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
9761cf8f7e5SUlf Hansson 	    from_exception)
9771cf8f7e5SUlf Hansson 		return;
9781cf8f7e5SUlf Hansson 
9791cf8f7e5SUlf Hansson 	mmc_claim_host(card->host);
9801cf8f7e5SUlf Hansson 	if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
9811cf8f7e5SUlf Hansson 		timeout = MMC_OPS_TIMEOUT_MS;
9821cf8f7e5SUlf Hansson 		use_busy_signal = true;
9831cf8f7e5SUlf Hansson 	} else {
9841cf8f7e5SUlf Hansson 		timeout = 0;
9851cf8f7e5SUlf Hansson 		use_busy_signal = false;
9861cf8f7e5SUlf Hansson 	}
9871cf8f7e5SUlf Hansson 
9881cf8f7e5SUlf Hansson 	mmc_retune_hold(card->host);
9891cf8f7e5SUlf Hansson 
9901cf8f7e5SUlf Hansson 	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
9911cf8f7e5SUlf Hansson 			EXT_CSD_BKOPS_START, 1, timeout, 0,
9921cf8f7e5SUlf Hansson 			use_busy_signal, true, false);
9931cf8f7e5SUlf Hansson 	if (err) {
9941cf8f7e5SUlf Hansson 		pr_warn("%s: Error %d starting bkops\n",
9951cf8f7e5SUlf Hansson 			mmc_hostname(card->host), err);
9961cf8f7e5SUlf Hansson 		mmc_retune_release(card->host);
9971cf8f7e5SUlf Hansson 		goto out;
9981cf8f7e5SUlf Hansson 	}
9991cf8f7e5SUlf Hansson 
10001cf8f7e5SUlf Hansson 	/*
10011cf8f7e5SUlf Hansson 	 * For urgent bkops status (LEVEL_2 and more)
10021cf8f7e5SUlf Hansson 	 * bkops executed synchronously, otherwise
10031cf8f7e5SUlf Hansson 	 * the operation is in progress
10041cf8f7e5SUlf Hansson 	 */
10051cf8f7e5SUlf Hansson 	if (!use_busy_signal)
10061cf8f7e5SUlf Hansson 		mmc_card_set_doing_bkops(card);
10071cf8f7e5SUlf Hansson 	else
10081cf8f7e5SUlf Hansson 		mmc_retune_release(card->host);
10091cf8f7e5SUlf Hansson out:
10101cf8f7e5SUlf Hansson 	mmc_release_host(card->host);
10111cf8f7e5SUlf Hansson }
10121cf8f7e5SUlf Hansson 
1013d9df1737SUlf Hansson /*
1014d9df1737SUlf Hansson  * Flush the cache to the non-volatile storage.
1015d9df1737SUlf Hansson  */
1016d9df1737SUlf Hansson int mmc_flush_cache(struct mmc_card *card)
1017d9df1737SUlf Hansson {
1018d9df1737SUlf Hansson 	int err = 0;
1019d9df1737SUlf Hansson 
1020d9df1737SUlf Hansson 	if (mmc_card_mmc(card) &&
1021d9df1737SUlf Hansson 			(card->ext_csd.cache_size > 0) &&
1022d9df1737SUlf Hansson 			(card->ext_csd.cache_ctrl & 1)) {
1023d9df1737SUlf Hansson 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1024d9df1737SUlf Hansson 				EXT_CSD_FLUSH_CACHE, 1, 0);
1025d9df1737SUlf Hansson 		if (err)
1026d9df1737SUlf Hansson 			pr_err("%s: cache flush error %d\n",
1027d9df1737SUlf Hansson 					mmc_hostname(card->host), err);
1028d9df1737SUlf Hansson 	}
1029d9df1737SUlf Hansson 
1030d9df1737SUlf Hansson 	return err;
1031d9df1737SUlf Hansson }
1032d9df1737SUlf Hansson EXPORT_SYMBOL(mmc_flush_cache);
1033d9df1737SUlf Hansson 
1034b658af71SAdrian Hunter static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
1035b658af71SAdrian Hunter {
1036b658af71SAdrian Hunter 	u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
1037b658af71SAdrian Hunter 	int err;
1038b658af71SAdrian Hunter 
1039b658af71SAdrian Hunter 	if (!card->ext_csd.cmdq_support)
1040b658af71SAdrian Hunter 		return -EOPNOTSUPP;
1041b658af71SAdrian Hunter 
1042b658af71SAdrian Hunter 	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
1043b658af71SAdrian Hunter 			 val, card->ext_csd.generic_cmd6_time);
1044b658af71SAdrian Hunter 	if (!err)
1045b658af71SAdrian Hunter 		card->ext_csd.cmdq_en = enable;
1046b658af71SAdrian Hunter 
1047b658af71SAdrian Hunter 	return err;
1048b658af71SAdrian Hunter }
1049b658af71SAdrian Hunter 
1050b658af71SAdrian Hunter int mmc_cmdq_enable(struct mmc_card *card)
1051b658af71SAdrian Hunter {
1052b658af71SAdrian Hunter 	return mmc_cmdq_switch(card, true);
1053b658af71SAdrian Hunter }
1054b658af71SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
1055b658af71SAdrian Hunter 
1056b658af71SAdrian Hunter int mmc_cmdq_disable(struct mmc_card *card)
1057b658af71SAdrian Hunter {
1058b658af71SAdrian Hunter 	return mmc_cmdq_switch(card, false);
1059b658af71SAdrian Hunter }
1060b658af71SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_cmdq_disable);
1061