xref: /openbmc/linux/drivers/mmc/core/mmc_ops.c (revision 0c204979)
1da7fbe58SPierre Ossman /*
270f10482SPierre Ossman  *  linux/drivers/mmc/core/mmc_ops.h
3da7fbe58SPierre Ossman  *
4da7fbe58SPierre Ossman  *  Copyright 2006-2007 Pierre Ossman
5da7fbe58SPierre Ossman  *
6da7fbe58SPierre Ossman  * This program is free software; you can redistribute it and/or modify
7da7fbe58SPierre Ossman  * it under the terms of the GNU General Public License as published by
8da7fbe58SPierre Ossman  * the Free Software Foundation; either version 2 of the License, or (at
9da7fbe58SPierre Ossman  * your option) any later version.
10da7fbe58SPierre Ossman  */
11da7fbe58SPierre Ossman 
125a0e3ad6STejun Heo #include <linux/slab.h>
133ef77af1SPaul Gortmaker #include <linux/export.h>
14da7fbe58SPierre Ossman #include <linux/types.h>
15da7fbe58SPierre Ossman #include <linux/scatterlist.h>
16da7fbe58SPierre Ossman 
17da7fbe58SPierre Ossman #include <linux/mmc/host.h>
18da7fbe58SPierre Ossman #include <linux/mmc/card.h>
19da7fbe58SPierre Ossman #include <linux/mmc/mmc.h>
20da7fbe58SPierre Ossman 
21da7fbe58SPierre Ossman #include "core.h"
221cf8f7e5SUlf Hansson #include "card.h"
23c6dbab9cSAdrian Hunter #include "host.h"
24da7fbe58SPierre Ossman #include "mmc_ops.h"
25da7fbe58SPierre Ossman 
268fee476bSTrey Ramsay #define MMC_OPS_TIMEOUT_MS	(10 * 60 * 1000) /* 10 minute timeout */
278fee476bSTrey Ramsay 
2804cdbbfaSUlf Hansson static const u8 tuning_blk_pattern_4bit[] = {
2904cdbbfaSUlf Hansson 	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
3004cdbbfaSUlf Hansson 	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
3104cdbbfaSUlf Hansson 	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
3204cdbbfaSUlf Hansson 	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
3304cdbbfaSUlf Hansson 	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
3404cdbbfaSUlf Hansson 	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
3504cdbbfaSUlf Hansson 	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
3604cdbbfaSUlf Hansson 	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
3704cdbbfaSUlf Hansson };
3804cdbbfaSUlf Hansson 
3904cdbbfaSUlf Hansson static const u8 tuning_blk_pattern_8bit[] = {
4004cdbbfaSUlf Hansson 	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
4104cdbbfaSUlf Hansson 	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
4204cdbbfaSUlf Hansson 	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
4304cdbbfaSUlf Hansson 	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
4404cdbbfaSUlf Hansson 	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
4504cdbbfaSUlf Hansson 	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
4604cdbbfaSUlf Hansson 	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
4704cdbbfaSUlf Hansson 	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
4804cdbbfaSUlf Hansson 	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
4904cdbbfaSUlf Hansson 	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
5004cdbbfaSUlf Hansson 	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
5104cdbbfaSUlf Hansson 	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
5204cdbbfaSUlf Hansson 	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
5304cdbbfaSUlf Hansson 	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
5404cdbbfaSUlf Hansson 	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
5504cdbbfaSUlf Hansson 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
5604cdbbfaSUlf Hansson };
5704cdbbfaSUlf Hansson 
582185bc2cSUlf Hansson int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
59a27fbf2fSSeungwon Jeon {
60a27fbf2fSSeungwon Jeon 	int err;
61c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
62a27fbf2fSSeungwon Jeon 
63a27fbf2fSSeungwon Jeon 	cmd.opcode = MMC_SEND_STATUS;
64a27fbf2fSSeungwon Jeon 	if (!mmc_host_is_spi(card->host))
65a27fbf2fSSeungwon Jeon 		cmd.arg = card->rca << 16;
66a27fbf2fSSeungwon Jeon 	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
67a27fbf2fSSeungwon Jeon 
682185bc2cSUlf Hansson 	err = mmc_wait_for_cmd(card->host, &cmd, retries);
69a27fbf2fSSeungwon Jeon 	if (err)
70a27fbf2fSSeungwon Jeon 		return err;
71a27fbf2fSSeungwon Jeon 
72a27fbf2fSSeungwon Jeon 	/* NOTE: callers are required to understand the difference
73a27fbf2fSSeungwon Jeon 	 * between "native" and SPI format status words!
74a27fbf2fSSeungwon Jeon 	 */
75a27fbf2fSSeungwon Jeon 	if (status)
76a27fbf2fSSeungwon Jeon 		*status = cmd.resp[0];
77a27fbf2fSSeungwon Jeon 
78a27fbf2fSSeungwon Jeon 	return 0;
79a27fbf2fSSeungwon Jeon }
802185bc2cSUlf Hansson EXPORT_SYMBOL_GPL(__mmc_send_status);
812185bc2cSUlf Hansson 
822185bc2cSUlf Hansson int mmc_send_status(struct mmc_card *card, u32 *status)
832185bc2cSUlf Hansson {
842185bc2cSUlf Hansson 	return __mmc_send_status(card, status, MMC_CMD_RETRIES);
852185bc2cSUlf Hansson }
861bee324aSLinus Walleij EXPORT_SYMBOL_GPL(mmc_send_status);
87a27fbf2fSSeungwon Jeon 
88da7fbe58SPierre Ossman static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
89da7fbe58SPierre Ossman {
90c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
91da7fbe58SPierre Ossman 
92da7fbe58SPierre Ossman 	cmd.opcode = MMC_SELECT_CARD;
93da7fbe58SPierre Ossman 
94da7fbe58SPierre Ossman 	if (card) {
95da7fbe58SPierre Ossman 		cmd.arg = card->rca << 16;
96da7fbe58SPierre Ossman 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
97da7fbe58SPierre Ossman 	} else {
98da7fbe58SPierre Ossman 		cmd.arg = 0;
99da7fbe58SPierre Ossman 		cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
100da7fbe58SPierre Ossman 	}
101da7fbe58SPierre Ossman 
1020899e741SMasahiro Yamada 	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
103da7fbe58SPierre Ossman }
104da7fbe58SPierre Ossman 
105da7fbe58SPierre Ossman int mmc_select_card(struct mmc_card *card)
106da7fbe58SPierre Ossman {
107da7fbe58SPierre Ossman 
108da7fbe58SPierre Ossman 	return _mmc_select_card(card->host, card);
109da7fbe58SPierre Ossman }
110da7fbe58SPierre Ossman 
111da7fbe58SPierre Ossman int mmc_deselect_cards(struct mmc_host *host)
112da7fbe58SPierre Ossman {
113da7fbe58SPierre Ossman 	return _mmc_select_card(host, NULL);
114da7fbe58SPierre Ossman }
115da7fbe58SPierre Ossman 
1163d705d14SSascha Hauer /*
1173d705d14SSascha Hauer  * Write the value specified in the device tree or board code into the optional
1183d705d14SSascha Hauer  * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
1193d705d14SSascha Hauer  * drive strength of the DAT and CMD outputs. The actual meaning of a given
1203d705d14SSascha Hauer  * value is hardware dependant.
1213d705d14SSascha Hauer  * The presence of the DSR register can be determined from the CSD register,
1223d705d14SSascha Hauer  * bit 76.
1233d705d14SSascha Hauer  */
1243d705d14SSascha Hauer int mmc_set_dsr(struct mmc_host *host)
1253d705d14SSascha Hauer {
126c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
1273d705d14SSascha Hauer 
1283d705d14SSascha Hauer 	cmd.opcode = MMC_SET_DSR;
1293d705d14SSascha Hauer 
1303d705d14SSascha Hauer 	cmd.arg = (host->dsr << 16) | 0xffff;
1313d705d14SSascha Hauer 	cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
1323d705d14SSascha Hauer 
1333d705d14SSascha Hauer 	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
1343d705d14SSascha Hauer }
1353d705d14SSascha Hauer 
136da7fbe58SPierre Ossman int mmc_go_idle(struct mmc_host *host)
137da7fbe58SPierre Ossman {
138da7fbe58SPierre Ossman 	int err;
139c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
140da7fbe58SPierre Ossman 
141af517150SDavid Brownell 	/*
142af517150SDavid Brownell 	 * Non-SPI hosts need to prevent chipselect going active during
143af517150SDavid Brownell 	 * GO_IDLE; that would put chips into SPI mode.  Remind them of
144af517150SDavid Brownell 	 * that in case of hardware that won't pull up DAT3/nCS otherwise.
145af517150SDavid Brownell 	 *
146af517150SDavid Brownell 	 * SPI hosts ignore ios.chip_select; it's managed according to
14725985edcSLucas De Marchi 	 * rules that must accommodate non-MMC slaves which this layer
148af517150SDavid Brownell 	 * won't even know about.
149af517150SDavid Brownell 	 */
150af517150SDavid Brownell 	if (!mmc_host_is_spi(host)) {
151da7fbe58SPierre Ossman 		mmc_set_chip_select(host, MMC_CS_HIGH);
152da7fbe58SPierre Ossman 		mmc_delay(1);
153af517150SDavid Brownell 	}
154da7fbe58SPierre Ossman 
155da7fbe58SPierre Ossman 	cmd.opcode = MMC_GO_IDLE_STATE;
156da7fbe58SPierre Ossman 	cmd.arg = 0;
157af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
158da7fbe58SPierre Ossman 
159da7fbe58SPierre Ossman 	err = mmc_wait_for_cmd(host, &cmd, 0);
160da7fbe58SPierre Ossman 
161da7fbe58SPierre Ossman 	mmc_delay(1);
162da7fbe58SPierre Ossman 
163af517150SDavid Brownell 	if (!mmc_host_is_spi(host)) {
164da7fbe58SPierre Ossman 		mmc_set_chip_select(host, MMC_CS_DONTCARE);
165da7fbe58SPierre Ossman 		mmc_delay(1);
166af517150SDavid Brownell 	}
167af517150SDavid Brownell 
168af517150SDavid Brownell 	host->use_spi_crc = 0;
169da7fbe58SPierre Ossman 
170da7fbe58SPierre Ossman 	return err;
171da7fbe58SPierre Ossman }
172da7fbe58SPierre Ossman 
173da7fbe58SPierre Ossman int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
174da7fbe58SPierre Ossman {
175c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
176da7fbe58SPierre Ossman 	int i, err = 0;
177da7fbe58SPierre Ossman 
178da7fbe58SPierre Ossman 	cmd.opcode = MMC_SEND_OP_COND;
179af517150SDavid Brownell 	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
180af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
181da7fbe58SPierre Ossman 
182da7fbe58SPierre Ossman 	for (i = 100; i; i--) {
183da7fbe58SPierre Ossman 		err = mmc_wait_for_cmd(host, &cmd, 0);
18417b0429dSPierre Ossman 		if (err)
185da7fbe58SPierre Ossman 			break;
186da7fbe58SPierre Ossman 
187af517150SDavid Brownell 		/* if we're just probing, do a single pass */
188af517150SDavid Brownell 		if (ocr == 0)
189da7fbe58SPierre Ossman 			break;
190da7fbe58SPierre Ossman 
191af517150SDavid Brownell 		/* otherwise wait until reset completes */
192af517150SDavid Brownell 		if (mmc_host_is_spi(host)) {
193af517150SDavid Brownell 			if (!(cmd.resp[0] & R1_SPI_IDLE))
194af517150SDavid Brownell 				break;
195af517150SDavid Brownell 		} else {
196af517150SDavid Brownell 			if (cmd.resp[0] & MMC_CARD_BUSY)
197af517150SDavid Brownell 				break;
198af517150SDavid Brownell 		}
199af517150SDavid Brownell 
20017b0429dSPierre Ossman 		err = -ETIMEDOUT;
201da7fbe58SPierre Ossman 
202da7fbe58SPierre Ossman 		mmc_delay(10);
203da7fbe58SPierre Ossman 	}
204da7fbe58SPierre Ossman 
205af517150SDavid Brownell 	if (rocr && !mmc_host_is_spi(host))
206da7fbe58SPierre Ossman 		*rocr = cmd.resp[0];
207da7fbe58SPierre Ossman 
208da7fbe58SPierre Ossman 	return err;
209da7fbe58SPierre Ossman }
210da7fbe58SPierre Ossman 
211da7fbe58SPierre Ossman int mmc_set_relative_addr(struct mmc_card *card)
212da7fbe58SPierre Ossman {
213c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
214da7fbe58SPierre Ossman 
215da7fbe58SPierre Ossman 	cmd.opcode = MMC_SET_RELATIVE_ADDR;
216da7fbe58SPierre Ossman 	cmd.arg = card->rca << 16;
217da7fbe58SPierre Ossman 	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
218da7fbe58SPierre Ossman 
2190899e741SMasahiro Yamada 	return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
220da7fbe58SPierre Ossman }
221da7fbe58SPierre Ossman 
222af517150SDavid Brownell static int
223af517150SDavid Brownell mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
224da7fbe58SPierre Ossman {
225da7fbe58SPierre Ossman 	int err;
226c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
227da7fbe58SPierre Ossman 
228af517150SDavid Brownell 	cmd.opcode = opcode;
229af517150SDavid Brownell 	cmd.arg = arg;
230da7fbe58SPierre Ossman 	cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
231da7fbe58SPierre Ossman 
232af517150SDavid Brownell 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
23317b0429dSPierre Ossman 	if (err)
234da7fbe58SPierre Ossman 		return err;
235da7fbe58SPierre Ossman 
236af517150SDavid Brownell 	memcpy(cxd, cmd.resp, sizeof(u32) * 4);
237da7fbe58SPierre Ossman 
23817b0429dSPierre Ossman 	return 0;
239da7fbe58SPierre Ossman }
240da7fbe58SPierre Ossman 
2411a41313eSKyungsik Lee /*
2421a41313eSKyungsik Lee  * NOTE: void *buf, caller for the buf is required to use DMA-capable
2431a41313eSKyungsik Lee  * buffer or on-stack buffer (with some overhead in callee).
2441a41313eSKyungsik Lee  */
245af517150SDavid Brownell static int
246af517150SDavid Brownell mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
247af517150SDavid Brownell 		u32 opcode, void *buf, unsigned len)
248da7fbe58SPierre Ossman {
249c7836d15SMasahiro Yamada 	struct mmc_request mrq = {};
250c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
251c7836d15SMasahiro Yamada 	struct mmc_data data = {};
252da7fbe58SPierre Ossman 	struct scatterlist sg;
253da7fbe58SPierre Ossman 
254da7fbe58SPierre Ossman 	mrq.cmd = &cmd;
255da7fbe58SPierre Ossman 	mrq.data = &data;
256da7fbe58SPierre Ossman 
257af517150SDavid Brownell 	cmd.opcode = opcode;
258da7fbe58SPierre Ossman 	cmd.arg = 0;
259da7fbe58SPierre Ossman 
260af517150SDavid Brownell 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
261af517150SDavid Brownell 	 * rely on callers to never use this with "native" calls for reading
262af517150SDavid Brownell 	 * CSD or CID.  Native versions of those commands use the R2 type,
263af517150SDavid Brownell 	 * not R1 plus a data block.
264af517150SDavid Brownell 	 */
265af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
266af517150SDavid Brownell 
267af517150SDavid Brownell 	data.blksz = len;
268da7fbe58SPierre Ossman 	data.blocks = 1;
269da7fbe58SPierre Ossman 	data.flags = MMC_DATA_READ;
270da7fbe58SPierre Ossman 	data.sg = &sg;
271da7fbe58SPierre Ossman 	data.sg_len = 1;
272da7fbe58SPierre Ossman 
273601ed60cSUlf Hansson 	sg_init_one(&sg, buf, len);
274da7fbe58SPierre Ossman 
275cda56ac2SAdrian Hunter 	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
2760d3e0460SMatthew Fleming 		/*
2770d3e0460SMatthew Fleming 		 * The spec states that CSR and CID accesses have a timeout
2780d3e0460SMatthew Fleming 		 * of 64 clock cycles.
2790d3e0460SMatthew Fleming 		 */
2800d3e0460SMatthew Fleming 		data.timeout_ns = 0;
2810d3e0460SMatthew Fleming 		data.timeout_clks = 64;
282cda56ac2SAdrian Hunter 	} else
283cda56ac2SAdrian Hunter 		mmc_set_data_timeout(&data, card);
284da7fbe58SPierre Ossman 
285af517150SDavid Brownell 	mmc_wait_for_req(host, &mrq);
286af517150SDavid Brownell 
28717b0429dSPierre Ossman 	if (cmd.error)
288da7fbe58SPierre Ossman 		return cmd.error;
28917b0429dSPierre Ossman 	if (data.error)
290da7fbe58SPierre Ossman 		return data.error;
291da7fbe58SPierre Ossman 
29217b0429dSPierre Ossman 	return 0;
293da7fbe58SPierre Ossman }
294da7fbe58SPierre Ossman 
2950796e439SUlf Hansson static int mmc_spi_send_csd(struct mmc_card *card, u32 *csd)
296af517150SDavid Brownell {
29778e48073SPierre Ossman 	int ret, i;
29806c9ccb7SWinkler, Tomas 	__be32 *csd_tmp;
29978e48073SPierre Ossman 
30022b78700SUlf Hansson 	csd_tmp = kzalloc(16, GFP_KERNEL);
3011a41313eSKyungsik Lee 	if (!csd_tmp)
3021a41313eSKyungsik Lee 		return -ENOMEM;
3031a41313eSKyungsik Lee 
3041a41313eSKyungsik Lee 	ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
30578e48073SPierre Ossman 	if (ret)
3061a41313eSKyungsik Lee 		goto err;
30778e48073SPierre Ossman 
30878e48073SPierre Ossman 	for (i = 0; i < 4; i++)
3091a41313eSKyungsik Lee 		csd[i] = be32_to_cpu(csd_tmp[i]);
31078e48073SPierre Ossman 
3111a41313eSKyungsik Lee err:
3121a41313eSKyungsik Lee 	kfree(csd_tmp);
3131a41313eSKyungsik Lee 	return ret;
314af517150SDavid Brownell }
315af517150SDavid Brownell 
3160796e439SUlf Hansson int mmc_send_csd(struct mmc_card *card, u32 *csd)
3170796e439SUlf Hansson {
3180796e439SUlf Hansson 	if (mmc_host_is_spi(card->host))
3190796e439SUlf Hansson 		return mmc_spi_send_csd(card, csd);
3200796e439SUlf Hansson 
3210796e439SUlf Hansson 	return mmc_send_cxd_native(card->host, card->rca << 16,	csd,
3220796e439SUlf Hansson 				MMC_SEND_CSD);
3230796e439SUlf Hansson }
3240796e439SUlf Hansson 
325a1473732SUlf Hansson static int mmc_spi_send_cid(struct mmc_host *host, u32 *cid)
326af517150SDavid Brownell {
32778e48073SPierre Ossman 	int ret, i;
32806c9ccb7SWinkler, Tomas 	__be32 *cid_tmp;
32978e48073SPierre Ossman 
33022b78700SUlf Hansson 	cid_tmp = kzalloc(16, GFP_KERNEL);
3311a41313eSKyungsik Lee 	if (!cid_tmp)
3321a41313eSKyungsik Lee 		return -ENOMEM;
3331a41313eSKyungsik Lee 
3341a41313eSKyungsik Lee 	ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
33578e48073SPierre Ossman 	if (ret)
3361a41313eSKyungsik Lee 		goto err;
33778e48073SPierre Ossman 
33878e48073SPierre Ossman 	for (i = 0; i < 4; i++)
3391a41313eSKyungsik Lee 		cid[i] = be32_to_cpu(cid_tmp[i]);
34078e48073SPierre Ossman 
3411a41313eSKyungsik Lee err:
3421a41313eSKyungsik Lee 	kfree(cid_tmp);
3431a41313eSKyungsik Lee 	return ret;
344af517150SDavid Brownell }
345af517150SDavid Brownell 
346a1473732SUlf Hansson int mmc_send_cid(struct mmc_host *host, u32 *cid)
347a1473732SUlf Hansson {
348a1473732SUlf Hansson 	if (mmc_host_is_spi(host))
349a1473732SUlf Hansson 		return mmc_spi_send_cid(host, cid);
350a1473732SUlf Hansson 
351c92e68d8SUlf Hansson 	return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
352a1473732SUlf Hansson }
353a1473732SUlf Hansson 
354e21aa519SUlf Hansson int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
355e21aa519SUlf Hansson {
356e21aa519SUlf Hansson 	int err;
357e21aa519SUlf Hansson 	u8 *ext_csd;
358e21aa519SUlf Hansson 
359e21aa519SUlf Hansson 	if (!card || !new_ext_csd)
360e21aa519SUlf Hansson 		return -EINVAL;
361e21aa519SUlf Hansson 
362e21aa519SUlf Hansson 	if (!mmc_can_ext_csd(card))
363e21aa519SUlf Hansson 		return -EOPNOTSUPP;
364e21aa519SUlf Hansson 
365e21aa519SUlf Hansson 	/*
366e21aa519SUlf Hansson 	 * As the ext_csd is so large and mostly unused, we don't store the
367e21aa519SUlf Hansson 	 * raw block in mmc_card.
368e21aa519SUlf Hansson 	 */
36922b78700SUlf Hansson 	ext_csd = kzalloc(512, GFP_KERNEL);
370e21aa519SUlf Hansson 	if (!ext_csd)
371e21aa519SUlf Hansson 		return -ENOMEM;
372e21aa519SUlf Hansson 
3732fc91e8bSUlf Hansson 	err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
3742fc91e8bSUlf Hansson 				512);
375e21aa519SUlf Hansson 	if (err)
376e21aa519SUlf Hansson 		kfree(ext_csd);
377e21aa519SUlf Hansson 	else
378e21aa519SUlf Hansson 		*new_ext_csd = ext_csd;
379e21aa519SUlf Hansson 
380e21aa519SUlf Hansson 	return err;
381e21aa519SUlf Hansson }
382e21aa519SUlf Hansson EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
383e21aa519SUlf Hansson 
384af517150SDavid Brownell int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
385af517150SDavid Brownell {
386c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
387af517150SDavid Brownell 	int err;
388af517150SDavid Brownell 
389af517150SDavid Brownell 	cmd.opcode = MMC_SPI_READ_OCR;
390af517150SDavid Brownell 	cmd.arg = highcap ? (1 << 30) : 0;
391af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R3;
392af517150SDavid Brownell 
393af517150SDavid Brownell 	err = mmc_wait_for_cmd(host, &cmd, 0);
394af517150SDavid Brownell 
395af517150SDavid Brownell 	*ocrp = cmd.resp[1];
396af517150SDavid Brownell 	return err;
397af517150SDavid Brownell }
398af517150SDavid Brownell 
399af517150SDavid Brownell int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
400af517150SDavid Brownell {
401c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
402af517150SDavid Brownell 	int err;
403af517150SDavid Brownell 
404af517150SDavid Brownell 	cmd.opcode = MMC_SPI_CRC_ON_OFF;
405af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1;
406af517150SDavid Brownell 	cmd.arg = use_crc;
407af517150SDavid Brownell 
408af517150SDavid Brownell 	err = mmc_wait_for_cmd(host, &cmd, 0);
409af517150SDavid Brownell 	if (!err)
410af517150SDavid Brownell 		host->use_spi_crc = use_crc;
411af517150SDavid Brownell 	return err;
412af517150SDavid Brownell }
413af517150SDavid Brownell 
41420348d19SUlf Hansson static int mmc_switch_status_error(struct mmc_host *host, u32 status)
415ed16f58dSAdrian Hunter {
416ed16f58dSAdrian Hunter 	if (mmc_host_is_spi(host)) {
417ed16f58dSAdrian Hunter 		if (status & R1_SPI_ILLEGAL_COMMAND)
418ed16f58dSAdrian Hunter 			return -EBADMSG;
419ed16f58dSAdrian Hunter 	} else {
420a94a7483SShawn Lin 		if (R1_STATUS(status))
421ed16f58dSAdrian Hunter 			pr_warn("%s: unexpected status %#x after switch\n",
422ed16f58dSAdrian Hunter 				mmc_hostname(host), status);
423ed16f58dSAdrian Hunter 		if (status & R1_SWITCH_ERROR)
424ed16f58dSAdrian Hunter 			return -EBADMSG;
425ed16f58dSAdrian Hunter 	}
426ed16f58dSAdrian Hunter 	return 0;
427ed16f58dSAdrian Hunter }
428ed16f58dSAdrian Hunter 
42920348d19SUlf Hansson /* Caller must hold re-tuning */
430ef3d2322SAdrian Hunter int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
43120348d19SUlf Hansson {
43220348d19SUlf Hansson 	u32 status;
43320348d19SUlf Hansson 	int err;
43420348d19SUlf Hansson 
43520348d19SUlf Hansson 	err = mmc_send_status(card, &status);
436ef3d2322SAdrian Hunter 	if (!crc_err_fatal && err == -EILSEQ)
437ef3d2322SAdrian Hunter 		return 0;
43820348d19SUlf Hansson 	if (err)
43920348d19SUlf Hansson 		return err;
44020348d19SUlf Hansson 
44120348d19SUlf Hansson 	return mmc_switch_status_error(card->host, status);
44220348d19SUlf Hansson }
44320348d19SUlf Hansson 
444ef3d2322SAdrian Hunter int mmc_switch_status(struct mmc_card *card)
445ef3d2322SAdrian Hunter {
446ef3d2322SAdrian Hunter 	return __mmc_switch_status(card, true);
447ef3d2322SAdrian Hunter }
448ef3d2322SAdrian Hunter 
449716bdb89SUlf Hansson static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
450625228faSUlf Hansson 			bool send_status, bool retry_crc_err)
451716bdb89SUlf Hansson {
452716bdb89SUlf Hansson 	struct mmc_host *host = card->host;
453716bdb89SUlf Hansson 	int err;
454716bdb89SUlf Hansson 	unsigned long timeout;
455716bdb89SUlf Hansson 	u32 status = 0;
456716bdb89SUlf Hansson 	bool expired = false;
457716bdb89SUlf Hansson 	bool busy = false;
458716bdb89SUlf Hansson 
459716bdb89SUlf Hansson 	/* We have an unspecified cmd timeout, use the fallback value. */
460716bdb89SUlf Hansson 	if (!timeout_ms)
461716bdb89SUlf Hansson 		timeout_ms = MMC_OPS_TIMEOUT_MS;
462716bdb89SUlf Hansson 
463716bdb89SUlf Hansson 	/*
464716bdb89SUlf Hansson 	 * In cases when not allowed to poll by using CMD13 or because we aren't
465716bdb89SUlf Hansson 	 * capable of polling by using ->card_busy(), then rely on waiting the
466716bdb89SUlf Hansson 	 * stated timeout to be sufficient.
467716bdb89SUlf Hansson 	 */
468716bdb89SUlf Hansson 	if (!send_status && !host->ops->card_busy) {
469716bdb89SUlf Hansson 		mmc_delay(timeout_ms);
470716bdb89SUlf Hansson 		return 0;
471716bdb89SUlf Hansson 	}
472716bdb89SUlf Hansson 
473716bdb89SUlf Hansson 	timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
474716bdb89SUlf Hansson 	do {
475716bdb89SUlf Hansson 		/*
47670562644SUlf Hansson 		 * Due to the possibility of being preempted while polling,
47770562644SUlf Hansson 		 * check the expiration time first.
478716bdb89SUlf Hansson 		 */
479716bdb89SUlf Hansson 		expired = time_after(jiffies, timeout);
48070562644SUlf Hansson 
48170562644SUlf Hansson 		if (host->ops->card_busy) {
48270562644SUlf Hansson 			busy = host->ops->card_busy(host);
48370562644SUlf Hansson 		} else {
484437590a1SUlf Hansson 			err = mmc_send_status(card, &status);
4855ec32f84SUlf Hansson 			if (retry_crc_err && err == -EILSEQ) {
486437590a1SUlf Hansson 				busy = true;
4875ec32f84SUlf Hansson 			} else if (err) {
488716bdb89SUlf Hansson 				return err;
4895ec32f84SUlf Hansson 			} else {
4905ec32f84SUlf Hansson 				err = mmc_switch_status_error(host, status);
4915ec32f84SUlf Hansson 				if (err)
4925ec32f84SUlf Hansson 					return err;
49370562644SUlf Hansson 				busy = R1_CURRENT_STATE(status) == R1_STATE_PRG;
494716bdb89SUlf Hansson 			}
4955ec32f84SUlf Hansson 		}
496716bdb89SUlf Hansson 
49770562644SUlf Hansson 		/* Timeout if the device still remains busy. */
49870562644SUlf Hansson 		if (expired && busy) {
49970562644SUlf Hansson 			pr_err("%s: Card stuck being busy! %s\n",
500716bdb89SUlf Hansson 				mmc_hostname(host), __func__);
501716bdb89SUlf Hansson 			return -ETIMEDOUT;
502716bdb89SUlf Hansson 		}
50370562644SUlf Hansson 	} while (busy);
504716bdb89SUlf Hansson 
5055ec32f84SUlf Hansson 	return 0;
506716bdb89SUlf Hansson }
507716bdb89SUlf Hansson 
508d3a8d95dSAndrei Warkentin /**
509950d56acSJaehoon Chung  *	__mmc_switch - modify EXT_CSD register
510d3a8d95dSAndrei Warkentin  *	@card: the MMC card associated with the data transfer
511d3a8d95dSAndrei Warkentin  *	@set: cmd set values
512d3a8d95dSAndrei Warkentin  *	@index: EXT_CSD register index
513d3a8d95dSAndrei Warkentin  *	@value: value to program into EXT_CSD register
514d3a8d95dSAndrei Warkentin  *	@timeout_ms: timeout (ms) for operation performed by register write,
515d3a8d95dSAndrei Warkentin  *                   timeout of zero implies maximum possible timeout
516aa33ce3cSUlf Hansson  *	@timing: new timing to change to
517950d56acSJaehoon Chung  *	@use_busy_signal: use the busy signal as response type
518878e200bSUlf Hansson  *	@send_status: send status cmd to poll for busy
519625228faSUlf Hansson  *	@retry_crc_err: retry when CRC errors when polling with CMD13 for busy
520d3a8d95dSAndrei Warkentin  *
521d3a8d95dSAndrei Warkentin  *	Modifies the EXT_CSD register for selected card.
522d3a8d95dSAndrei Warkentin  */
523950d56acSJaehoon Chung int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
524aa33ce3cSUlf Hansson 		unsigned int timeout_ms, unsigned char timing,
525aa33ce3cSUlf Hansson 		bool use_busy_signal, bool send_status,	bool retry_crc_err)
526da7fbe58SPierre Ossman {
527636bd13cSUlf Hansson 	struct mmc_host *host = card->host;
528da7fbe58SPierre Ossman 	int err;
529c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
530b9ec2616SUlf Hansson 	bool use_r1b_resp = use_busy_signal;
531aa33ce3cSUlf Hansson 	unsigned char old_timing = host->ios.timing;
532b9ec2616SUlf Hansson 
533c6dbab9cSAdrian Hunter 	mmc_retune_hold(host);
534c6dbab9cSAdrian Hunter 
535b9ec2616SUlf Hansson 	/*
536b9ec2616SUlf Hansson 	 * If the cmd timeout and the max_busy_timeout of the host are both
537b9ec2616SUlf Hansson 	 * specified, let's validate them. A failure means we need to prevent
538b9ec2616SUlf Hansson 	 * the host from doing hw busy detection, which is done by converting
539b9ec2616SUlf Hansson 	 * to a R1 response instead of a R1B.
540b9ec2616SUlf Hansson 	 */
541b9ec2616SUlf Hansson 	if (timeout_ms && host->max_busy_timeout &&
542b9ec2616SUlf Hansson 		(timeout_ms > host->max_busy_timeout))
543b9ec2616SUlf Hansson 		use_r1b_resp = false;
544da7fbe58SPierre Ossman 
545da7fbe58SPierre Ossman 	cmd.opcode = MMC_SWITCH;
546da7fbe58SPierre Ossman 	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
547da7fbe58SPierre Ossman 		  (index << 16) |
548da7fbe58SPierre Ossman 		  (value << 8) |
549da7fbe58SPierre Ossman 		  set;
550950d56acSJaehoon Chung 	cmd.flags = MMC_CMD_AC;
551b9ec2616SUlf Hansson 	if (use_r1b_resp) {
552950d56acSJaehoon Chung 		cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
553b9ec2616SUlf Hansson 		/*
554b9ec2616SUlf Hansson 		 * A busy_timeout of zero means the host can decide to use
555b9ec2616SUlf Hansson 		 * whatever value it finds suitable.
556b9ec2616SUlf Hansson 		 */
5571d4d7744SUlf Hansson 		cmd.busy_timeout = timeout_ms;
558b9ec2616SUlf Hansson 	} else {
559b9ec2616SUlf Hansson 		cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
560b9ec2616SUlf Hansson 	}
561b9ec2616SUlf Hansson 
562775a9362SMaya Erez 	if (index == EXT_CSD_SANITIZE_START)
563775a9362SMaya Erez 		cmd.sanitize_busy = true;
564da7fbe58SPierre Ossman 
565636bd13cSUlf Hansson 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
56617b0429dSPierre Ossman 	if (err)
567c6dbab9cSAdrian Hunter 		goto out;
568da7fbe58SPierre Ossman 
569950d56acSJaehoon Chung 	/* No need to check card status in case of unblocking command */
570950d56acSJaehoon Chung 	if (!use_busy_signal)
571c6dbab9cSAdrian Hunter 		goto out;
572950d56acSJaehoon Chung 
573cb26ce06SUlf Hansson 	/*If SPI or used HW busy detection above, then we don't need to poll. */
574cb26ce06SUlf Hansson 	if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
575ee6ff743SUlf Hansson 		mmc_host_is_spi(host))
576aa33ce3cSUlf Hansson 		goto out_tim;
577a27fbf2fSSeungwon Jeon 
578716bdb89SUlf Hansson 	/* Let's try to poll to find out when the command is completed. */
579625228faSUlf Hansson 	err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err);
580ee6ff743SUlf Hansson 	if (err)
581ee6ff743SUlf Hansson 		goto out;
582aa33ce3cSUlf Hansson 
583aa33ce3cSUlf Hansson out_tim:
584ee6ff743SUlf Hansson 	/* Switch to new timing before check switch status. */
585ee6ff743SUlf Hansson 	if (timing)
586ee6ff743SUlf Hansson 		mmc_set_timing(host, timing);
587ee6ff743SUlf Hansson 
588ee6ff743SUlf Hansson 	if (send_status) {
589ee6ff743SUlf Hansson 		err = mmc_switch_status(card);
590aa33ce3cSUlf Hansson 		if (err && timing)
591aa33ce3cSUlf Hansson 			mmc_set_timing(host, old_timing);
592ee6ff743SUlf Hansson 	}
593c6dbab9cSAdrian Hunter out:
594c6dbab9cSAdrian Hunter 	mmc_retune_release(host);
595ef0b27d4SAdrian Hunter 
596c6dbab9cSAdrian Hunter 	return err;
597da7fbe58SPierre Ossman }
598950d56acSJaehoon Chung 
599950d56acSJaehoon Chung int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
600950d56acSJaehoon Chung 		unsigned int timeout_ms)
601950d56acSJaehoon Chung {
602aa33ce3cSUlf Hansson 	return __mmc_switch(card, set, index, value, timeout_ms, 0,
603aa33ce3cSUlf Hansson 			true, true, false);
604950d56acSJaehoon Chung }
605d3a8d95dSAndrei Warkentin EXPORT_SYMBOL_GPL(mmc_switch);
606da7fbe58SPierre Ossman 
6079979dbe5SChaotian Jing int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
608996903deSMinda Chen {
609c7836d15SMasahiro Yamada 	struct mmc_request mrq = {};
610c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
611c7836d15SMasahiro Yamada 	struct mmc_data data = {};
612996903deSMinda Chen 	struct scatterlist sg;
613fe5afb13SUlf Hansson 	struct mmc_ios *ios = &host->ios;
614996903deSMinda Chen 	const u8 *tuning_block_pattern;
615996903deSMinda Chen 	int size, err = 0;
616996903deSMinda Chen 	u8 *data_buf;
617996903deSMinda Chen 
618996903deSMinda Chen 	if (ios->bus_width == MMC_BUS_WIDTH_8) {
619996903deSMinda Chen 		tuning_block_pattern = tuning_blk_pattern_8bit;
620996903deSMinda Chen 		size = sizeof(tuning_blk_pattern_8bit);
621996903deSMinda Chen 	} else if (ios->bus_width == MMC_BUS_WIDTH_4) {
622996903deSMinda Chen 		tuning_block_pattern = tuning_blk_pattern_4bit;
623996903deSMinda Chen 		size = sizeof(tuning_blk_pattern_4bit);
624996903deSMinda Chen 	} else
625996903deSMinda Chen 		return -EINVAL;
626996903deSMinda Chen 
627996903deSMinda Chen 	data_buf = kzalloc(size, GFP_KERNEL);
628996903deSMinda Chen 	if (!data_buf)
629996903deSMinda Chen 		return -ENOMEM;
630996903deSMinda Chen 
631996903deSMinda Chen 	mrq.cmd = &cmd;
632996903deSMinda Chen 	mrq.data = &data;
633996903deSMinda Chen 
634996903deSMinda Chen 	cmd.opcode = opcode;
635996903deSMinda Chen 	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
636996903deSMinda Chen 
637996903deSMinda Chen 	data.blksz = size;
638996903deSMinda Chen 	data.blocks = 1;
639996903deSMinda Chen 	data.flags = MMC_DATA_READ;
640996903deSMinda Chen 
641996903deSMinda Chen 	/*
642996903deSMinda Chen 	 * According to the tuning specs, Tuning process
643996903deSMinda Chen 	 * is normally shorter 40 executions of CMD19,
644996903deSMinda Chen 	 * and timeout value should be shorter than 150 ms
645996903deSMinda Chen 	 */
646996903deSMinda Chen 	data.timeout_ns = 150 * NSEC_PER_MSEC;
647996903deSMinda Chen 
648996903deSMinda Chen 	data.sg = &sg;
649996903deSMinda Chen 	data.sg_len = 1;
650996903deSMinda Chen 	sg_init_one(&sg, data_buf, size);
651996903deSMinda Chen 
652fe5afb13SUlf Hansson 	mmc_wait_for_req(host, &mrq);
653996903deSMinda Chen 
6549979dbe5SChaotian Jing 	if (cmd_error)
6559979dbe5SChaotian Jing 		*cmd_error = cmd.error;
6569979dbe5SChaotian Jing 
657996903deSMinda Chen 	if (cmd.error) {
658996903deSMinda Chen 		err = cmd.error;
659996903deSMinda Chen 		goto out;
660996903deSMinda Chen 	}
661996903deSMinda Chen 
662996903deSMinda Chen 	if (data.error) {
663996903deSMinda Chen 		err = data.error;
664996903deSMinda Chen 		goto out;
665996903deSMinda Chen 	}
666996903deSMinda Chen 
667996903deSMinda Chen 	if (memcmp(data_buf, tuning_block_pattern, size))
668996903deSMinda Chen 		err = -EIO;
669996903deSMinda Chen 
670996903deSMinda Chen out:
671996903deSMinda Chen 	kfree(data_buf);
672996903deSMinda Chen 	return err;
673996903deSMinda Chen }
674996903deSMinda Chen EXPORT_SYMBOL_GPL(mmc_send_tuning);
675996903deSMinda Chen 
676e711f030SAdrian Hunter int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
677e711f030SAdrian Hunter {
678c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
679e711f030SAdrian Hunter 
680e711f030SAdrian Hunter 	/*
681e711f030SAdrian Hunter 	 * eMMC specification specifies that CMD12 can be used to stop a tuning
682e711f030SAdrian Hunter 	 * command, but SD specification does not, so do nothing unless it is
683e711f030SAdrian Hunter 	 * eMMC.
684e711f030SAdrian Hunter 	 */
685e711f030SAdrian Hunter 	if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
686e711f030SAdrian Hunter 		return 0;
687e711f030SAdrian Hunter 
688e711f030SAdrian Hunter 	cmd.opcode = MMC_STOP_TRANSMISSION;
689e711f030SAdrian Hunter 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
690e711f030SAdrian Hunter 
691e711f030SAdrian Hunter 	/*
692e711f030SAdrian Hunter 	 * For drivers that override R1 to R1b, set an arbitrary timeout based
693e711f030SAdrian Hunter 	 * on the tuning timeout i.e. 150ms.
694e711f030SAdrian Hunter 	 */
695e711f030SAdrian Hunter 	cmd.busy_timeout = 150;
696e711f030SAdrian Hunter 
697e711f030SAdrian Hunter 	return mmc_wait_for_cmd(host, &cmd, 0);
698e711f030SAdrian Hunter }
699e711f030SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_abort_tuning);
700e711f030SAdrian Hunter 
70122113efdSAries Lee static int
70222113efdSAries Lee mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
70322113efdSAries Lee 		  u8 len)
70422113efdSAries Lee {
705c7836d15SMasahiro Yamada 	struct mmc_request mrq = {};
706c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
707c7836d15SMasahiro Yamada 	struct mmc_data data = {};
70822113efdSAries Lee 	struct scatterlist sg;
70922113efdSAries Lee 	u8 *data_buf;
71022113efdSAries Lee 	u8 *test_buf;
71122113efdSAries Lee 	int i, err;
71222113efdSAries Lee 	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
71322113efdSAries Lee 	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
71422113efdSAries Lee 
71522113efdSAries Lee 	/* dma onto stack is unsafe/nonportable, but callers to this
71622113efdSAries Lee 	 * routine normally provide temporary on-stack buffers ...
71722113efdSAries Lee 	 */
71822113efdSAries Lee 	data_buf = kmalloc(len, GFP_KERNEL);
71922113efdSAries Lee 	if (!data_buf)
72022113efdSAries Lee 		return -ENOMEM;
72122113efdSAries Lee 
72222113efdSAries Lee 	if (len == 8)
72322113efdSAries Lee 		test_buf = testdata_8bit;
72422113efdSAries Lee 	else if (len == 4)
72522113efdSAries Lee 		test_buf = testdata_4bit;
72622113efdSAries Lee 	else {
727a3c76eb9SGirish K S 		pr_err("%s: Invalid bus_width %d\n",
72822113efdSAries Lee 		       mmc_hostname(host), len);
72922113efdSAries Lee 		kfree(data_buf);
73022113efdSAries Lee 		return -EINVAL;
73122113efdSAries Lee 	}
73222113efdSAries Lee 
73322113efdSAries Lee 	if (opcode == MMC_BUS_TEST_W)
73422113efdSAries Lee 		memcpy(data_buf, test_buf, len);
73522113efdSAries Lee 
73622113efdSAries Lee 	mrq.cmd = &cmd;
73722113efdSAries Lee 	mrq.data = &data;
73822113efdSAries Lee 	cmd.opcode = opcode;
73922113efdSAries Lee 	cmd.arg = 0;
74022113efdSAries Lee 
74122113efdSAries Lee 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
74222113efdSAries Lee 	 * rely on callers to never use this with "native" calls for reading
74322113efdSAries Lee 	 * CSD or CID.  Native versions of those commands use the R2 type,
74422113efdSAries Lee 	 * not R1 plus a data block.
74522113efdSAries Lee 	 */
74622113efdSAries Lee 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
74722113efdSAries Lee 
74822113efdSAries Lee 	data.blksz = len;
74922113efdSAries Lee 	data.blocks = 1;
75022113efdSAries Lee 	if (opcode == MMC_BUS_TEST_R)
75122113efdSAries Lee 		data.flags = MMC_DATA_READ;
75222113efdSAries Lee 	else
75322113efdSAries Lee 		data.flags = MMC_DATA_WRITE;
75422113efdSAries Lee 
75522113efdSAries Lee 	data.sg = &sg;
75622113efdSAries Lee 	data.sg_len = 1;
75784532e33SMinjian Wu 	mmc_set_data_timeout(&data, card);
75822113efdSAries Lee 	sg_init_one(&sg, data_buf, len);
75922113efdSAries Lee 	mmc_wait_for_req(host, &mrq);
76022113efdSAries Lee 	err = 0;
76122113efdSAries Lee 	if (opcode == MMC_BUS_TEST_R) {
76222113efdSAries Lee 		for (i = 0; i < len / 4; i++)
76322113efdSAries Lee 			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
76422113efdSAries Lee 				err = -EIO;
76522113efdSAries Lee 				break;
76622113efdSAries Lee 			}
76722113efdSAries Lee 	}
76822113efdSAries Lee 	kfree(data_buf);
76922113efdSAries Lee 
77022113efdSAries Lee 	if (cmd.error)
77122113efdSAries Lee 		return cmd.error;
77222113efdSAries Lee 	if (data.error)
77322113efdSAries Lee 		return data.error;
77422113efdSAries Lee 
77522113efdSAries Lee 	return err;
77622113efdSAries Lee }
77722113efdSAries Lee 
77822113efdSAries Lee int mmc_bus_test(struct mmc_card *card, u8 bus_width)
77922113efdSAries Lee {
7800899e741SMasahiro Yamada 	int width;
78122113efdSAries Lee 
78222113efdSAries Lee 	if (bus_width == MMC_BUS_WIDTH_8)
78322113efdSAries Lee 		width = 8;
78422113efdSAries Lee 	else if (bus_width == MMC_BUS_WIDTH_4)
78522113efdSAries Lee 		width = 4;
78622113efdSAries Lee 	else if (bus_width == MMC_BUS_WIDTH_1)
78722113efdSAries Lee 		return 0; /* no need for test */
78822113efdSAries Lee 	else
78922113efdSAries Lee 		return -EINVAL;
79022113efdSAries Lee 
79122113efdSAries Lee 	/*
79222113efdSAries Lee 	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
79322113efdSAries Lee 	 * is a problem.  This improves chances that the test will work.
79422113efdSAries Lee 	 */
79522113efdSAries Lee 	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
7960899e741SMasahiro Yamada 	return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
79722113efdSAries Lee }
798eb0d8f13SJaehoon Chung 
7990f2c0512SUlf Hansson static int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
800eb0d8f13SJaehoon Chung {
801c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
802eb0d8f13SJaehoon Chung 	unsigned int opcode;
803eb0d8f13SJaehoon Chung 	int err;
804eb0d8f13SJaehoon Chung 
805eb0d8f13SJaehoon Chung 	opcode = card->ext_csd.hpi_cmd;
806eb0d8f13SJaehoon Chung 	if (opcode == MMC_STOP_TRANSMISSION)
8072378975bSJaehoon Chung 		cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
808eb0d8f13SJaehoon Chung 	else if (opcode == MMC_SEND_STATUS)
8092378975bSJaehoon Chung 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
810eb0d8f13SJaehoon Chung 
811eb0d8f13SJaehoon Chung 	cmd.opcode = opcode;
812eb0d8f13SJaehoon Chung 	cmd.arg = card->rca << 16 | 1;
813eb0d8f13SJaehoon Chung 
814eb0d8f13SJaehoon Chung 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
815eb0d8f13SJaehoon Chung 	if (err) {
816eb0d8f13SJaehoon Chung 		pr_warn("%s: error %d interrupting operation. "
817eb0d8f13SJaehoon Chung 			"HPI command response %#x\n", mmc_hostname(card->host),
818eb0d8f13SJaehoon Chung 			err, cmd.resp[0]);
819eb0d8f13SJaehoon Chung 		return err;
820eb0d8f13SJaehoon Chung 	}
821eb0d8f13SJaehoon Chung 	if (status)
822eb0d8f13SJaehoon Chung 		*status = cmd.resp[0];
823eb0d8f13SJaehoon Chung 
824eb0d8f13SJaehoon Chung 	return 0;
825eb0d8f13SJaehoon Chung }
826148bcab2SUlf Hansson 
8270f2c0512SUlf Hansson /**
8280f2c0512SUlf Hansson  *	mmc_interrupt_hpi - Issue for High priority Interrupt
8290f2c0512SUlf Hansson  *	@card: the MMC card associated with the HPI transfer
8300f2c0512SUlf Hansson  *
8310f2c0512SUlf Hansson  *	Issued High Priority Interrupt, and check for card status
8320f2c0512SUlf Hansson  *	until out-of prg-state.
8330f2c0512SUlf Hansson  */
8340f2c0512SUlf Hansson int mmc_interrupt_hpi(struct mmc_card *card)
8350f2c0512SUlf Hansson {
8360f2c0512SUlf Hansson 	int err;
8370f2c0512SUlf Hansson 	u32 status;
8380f2c0512SUlf Hansson 	unsigned long prg_wait;
8390f2c0512SUlf Hansson 
8400f2c0512SUlf Hansson 	if (!card->ext_csd.hpi_en) {
8410f2c0512SUlf Hansson 		pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
8420f2c0512SUlf Hansson 		return 1;
8430f2c0512SUlf Hansson 	}
8440f2c0512SUlf Hansson 
8450f2c0512SUlf Hansson 	err = mmc_send_status(card, &status);
8460f2c0512SUlf Hansson 	if (err) {
8470f2c0512SUlf Hansson 		pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
8480f2c0512SUlf Hansson 		goto out;
8490f2c0512SUlf Hansson 	}
8500f2c0512SUlf Hansson 
8510f2c0512SUlf Hansson 	switch (R1_CURRENT_STATE(status)) {
8520f2c0512SUlf Hansson 	case R1_STATE_IDLE:
8530f2c0512SUlf Hansson 	case R1_STATE_READY:
8540f2c0512SUlf Hansson 	case R1_STATE_STBY:
8550f2c0512SUlf Hansson 	case R1_STATE_TRAN:
8560f2c0512SUlf Hansson 		/*
8570f2c0512SUlf Hansson 		 * In idle and transfer states, HPI is not needed and the caller
8580f2c0512SUlf Hansson 		 * can issue the next intended command immediately
8590f2c0512SUlf Hansson 		 */
8600f2c0512SUlf Hansson 		goto out;
8610f2c0512SUlf Hansson 	case R1_STATE_PRG:
8620f2c0512SUlf Hansson 		break;
8630f2c0512SUlf Hansson 	default:
8640f2c0512SUlf Hansson 		/* In all other states, it's illegal to issue HPI */
8650f2c0512SUlf Hansson 		pr_debug("%s: HPI cannot be sent. Card state=%d\n",
8660f2c0512SUlf Hansson 			mmc_hostname(card->host), R1_CURRENT_STATE(status));
8670f2c0512SUlf Hansson 		err = -EINVAL;
8680f2c0512SUlf Hansson 		goto out;
8690f2c0512SUlf Hansson 	}
8700f2c0512SUlf Hansson 
8710f2c0512SUlf Hansson 	err = mmc_send_hpi_cmd(card, &status);
8720f2c0512SUlf Hansson 	if (err)
8730f2c0512SUlf Hansson 		goto out;
8740f2c0512SUlf Hansson 
8750f2c0512SUlf Hansson 	prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
8760f2c0512SUlf Hansson 	do {
8770f2c0512SUlf Hansson 		err = mmc_send_status(card, &status);
8780f2c0512SUlf Hansson 
8790f2c0512SUlf Hansson 		if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
8800f2c0512SUlf Hansson 			break;
8810f2c0512SUlf Hansson 		if (time_after(jiffies, prg_wait))
8820f2c0512SUlf Hansson 			err = -ETIMEDOUT;
8830f2c0512SUlf Hansson 	} while (!err);
8840f2c0512SUlf Hansson 
8850f2c0512SUlf Hansson out:
8860f2c0512SUlf Hansson 	return err;
8870f2c0512SUlf Hansson }
8880f2c0512SUlf Hansson 
889148bcab2SUlf Hansson int mmc_can_ext_csd(struct mmc_card *card)
890148bcab2SUlf Hansson {
891148bcab2SUlf Hansson 	return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
892148bcab2SUlf Hansson }
893b658af71SAdrian Hunter 
8941cf8f7e5SUlf Hansson static int mmc_read_bkops_status(struct mmc_card *card)
8951cf8f7e5SUlf Hansson {
8961cf8f7e5SUlf Hansson 	int err;
8971cf8f7e5SUlf Hansson 	u8 *ext_csd;
8981cf8f7e5SUlf Hansson 
8991cf8f7e5SUlf Hansson 	err = mmc_get_ext_csd(card, &ext_csd);
9001cf8f7e5SUlf Hansson 	if (err)
9011cf8f7e5SUlf Hansson 		return err;
9021cf8f7e5SUlf Hansson 
9031cf8f7e5SUlf Hansson 	card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
9041cf8f7e5SUlf Hansson 	card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
9051cf8f7e5SUlf Hansson 	kfree(ext_csd);
9061cf8f7e5SUlf Hansson 	return 0;
9071cf8f7e5SUlf Hansson }
9081cf8f7e5SUlf Hansson 
9091cf8f7e5SUlf Hansson /**
9100c204979SUlf Hansson  *	mmc_run_bkops - Run BKOPS for supported cards
9110c204979SUlf Hansson  *	@card: MMC card to run BKOPS for
9121cf8f7e5SUlf Hansson  *
9130c204979SUlf Hansson  *	Run background operations synchronously for cards having manual BKOPS
9140c204979SUlf Hansson  *	enabled and in case it reports urgent BKOPS level.
9151cf8f7e5SUlf Hansson */
9160c204979SUlf Hansson void mmc_run_bkops(struct mmc_card *card)
9171cf8f7e5SUlf Hansson {
9181cf8f7e5SUlf Hansson 	int err;
9191cf8f7e5SUlf Hansson 
9200c204979SUlf Hansson 	if (!card->ext_csd.man_bkops_en)
9211cf8f7e5SUlf Hansson 		return;
9221cf8f7e5SUlf Hansson 
9231cf8f7e5SUlf Hansson 	err = mmc_read_bkops_status(card);
9241cf8f7e5SUlf Hansson 	if (err) {
9251cf8f7e5SUlf Hansson 		pr_err("%s: Failed to read bkops status: %d\n",
9261cf8f7e5SUlf Hansson 		       mmc_hostname(card->host), err);
9271cf8f7e5SUlf Hansson 		return;
9281cf8f7e5SUlf Hansson 	}
9291cf8f7e5SUlf Hansson 
9300c204979SUlf Hansson 	if (!card->ext_csd.raw_bkops_status ||
9310c204979SUlf Hansson 	    card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2)
9321cf8f7e5SUlf Hansson 		return;
9331cf8f7e5SUlf Hansson 
9341cf8f7e5SUlf Hansson 	mmc_retune_hold(card->host);
9351cf8f7e5SUlf Hansson 
9360c204979SUlf Hansson 	/*
9370c204979SUlf Hansson 	 * For urgent BKOPS status, LEVEL_2 and higher, let's execute
9380c204979SUlf Hansson 	 * synchronously. Future wise, we may consider to start BKOPS, for less
9390c204979SUlf Hansson 	 * urgent levels by using an asynchronous background task, when idle.
9400c204979SUlf Hansson 	 */
9410c204979SUlf Hansson 	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
9420c204979SUlf Hansson 			EXT_CSD_BKOPS_START, 1, MMC_OPS_TIMEOUT_MS);
9430c204979SUlf Hansson 	if (err)
9441cf8f7e5SUlf Hansson 		pr_warn("%s: Error %d starting bkops\n",
9451cf8f7e5SUlf Hansson 			mmc_hostname(card->host), err);
9461cf8f7e5SUlf Hansson 
9471cf8f7e5SUlf Hansson 	mmc_retune_release(card->host);
9481cf8f7e5SUlf Hansson }
9490c204979SUlf Hansson EXPORT_SYMBOL(mmc_run_bkops);
9501cf8f7e5SUlf Hansson 
951d9df1737SUlf Hansson /*
952d9df1737SUlf Hansson  * Flush the cache to the non-volatile storage.
953d9df1737SUlf Hansson  */
954d9df1737SUlf Hansson int mmc_flush_cache(struct mmc_card *card)
955d9df1737SUlf Hansson {
956d9df1737SUlf Hansson 	int err = 0;
957d9df1737SUlf Hansson 
958d9df1737SUlf Hansson 	if (mmc_card_mmc(card) &&
959d9df1737SUlf Hansson 			(card->ext_csd.cache_size > 0) &&
960d9df1737SUlf Hansson 			(card->ext_csd.cache_ctrl & 1)) {
961d9df1737SUlf Hansson 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
962d9df1737SUlf Hansson 				EXT_CSD_FLUSH_CACHE, 1, 0);
963d9df1737SUlf Hansson 		if (err)
964d9df1737SUlf Hansson 			pr_err("%s: cache flush error %d\n",
965d9df1737SUlf Hansson 					mmc_hostname(card->host), err);
966d9df1737SUlf Hansson 	}
967d9df1737SUlf Hansson 
968d9df1737SUlf Hansson 	return err;
969d9df1737SUlf Hansson }
970d9df1737SUlf Hansson EXPORT_SYMBOL(mmc_flush_cache);
971d9df1737SUlf Hansson 
972b658af71SAdrian Hunter static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
973b658af71SAdrian Hunter {
974b658af71SAdrian Hunter 	u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
975b658af71SAdrian Hunter 	int err;
976b658af71SAdrian Hunter 
977b658af71SAdrian Hunter 	if (!card->ext_csd.cmdq_support)
978b658af71SAdrian Hunter 		return -EOPNOTSUPP;
979b658af71SAdrian Hunter 
980b658af71SAdrian Hunter 	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
981b658af71SAdrian Hunter 			 val, card->ext_csd.generic_cmd6_time);
982b658af71SAdrian Hunter 	if (!err)
983b658af71SAdrian Hunter 		card->ext_csd.cmdq_en = enable;
984b658af71SAdrian Hunter 
985b658af71SAdrian Hunter 	return err;
986b658af71SAdrian Hunter }
987b658af71SAdrian Hunter 
988b658af71SAdrian Hunter int mmc_cmdq_enable(struct mmc_card *card)
989b658af71SAdrian Hunter {
990b658af71SAdrian Hunter 	return mmc_cmdq_switch(card, true);
991b658af71SAdrian Hunter }
992b658af71SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
993b658af71SAdrian Hunter 
994b658af71SAdrian Hunter int mmc_cmdq_disable(struct mmc_card *card)
995b658af71SAdrian Hunter {
996b658af71SAdrian Hunter 	return mmc_cmdq_switch(card, false);
997b658af71SAdrian Hunter }
998b658af71SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_cmdq_disable);
999