xref: /openbmc/linux/drivers/mmc/core/mmc_ops.c (revision 51f5b305)
12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2da7fbe58SPierre Ossman /*
370f10482SPierre Ossman  *  linux/drivers/mmc/core/mmc_ops.h
4da7fbe58SPierre Ossman  *
5da7fbe58SPierre Ossman  *  Copyright 2006-2007 Pierre Ossman
6da7fbe58SPierre Ossman  */
7da7fbe58SPierre Ossman 
85a0e3ad6STejun Heo #include <linux/slab.h>
93ef77af1SPaul Gortmaker #include <linux/export.h>
10da7fbe58SPierre Ossman #include <linux/types.h>
11da7fbe58SPierre Ossman #include <linux/scatterlist.h>
12da7fbe58SPierre Ossman 
13da7fbe58SPierre Ossman #include <linux/mmc/host.h>
14da7fbe58SPierre Ossman #include <linux/mmc/card.h>
15da7fbe58SPierre Ossman #include <linux/mmc/mmc.h>
16da7fbe58SPierre Ossman 
17da7fbe58SPierre Ossman #include "core.h"
181cf8f7e5SUlf Hansson #include "card.h"
19c6dbab9cSAdrian Hunter #include "host.h"
20da7fbe58SPierre Ossman #include "mmc_ops.h"
21da7fbe58SPierre Ossman 
2224ed3bd0SUlf Hansson #define MMC_BKOPS_TIMEOUT_MS		(120 * 1000) /* 120s */
2355c2b8b9SUlf Hansson #define MMC_SANITIZE_TIMEOUT_MS		(240 * 1000) /* 240s */
24e949dee3SUlf Hansson #define MMC_OP_COND_PERIOD_US		(4 * 1000) /* 4ms */
251760fdb6SUlf Hansson #define MMC_OP_COND_TIMEOUT_MS		1000 /* 1s */
268fee476bSTrey Ramsay 
2704cdbbfaSUlf Hansson static const u8 tuning_blk_pattern_4bit[] = {
2804cdbbfaSUlf Hansson 	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
2904cdbbfaSUlf Hansson 	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
3004cdbbfaSUlf Hansson 	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
3104cdbbfaSUlf Hansson 	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
3204cdbbfaSUlf Hansson 	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
3304cdbbfaSUlf Hansson 	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
3404cdbbfaSUlf Hansson 	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
3504cdbbfaSUlf Hansson 	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
3604cdbbfaSUlf Hansson };
3704cdbbfaSUlf Hansson 
3804cdbbfaSUlf Hansson static const u8 tuning_blk_pattern_8bit[] = {
3904cdbbfaSUlf Hansson 	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
4004cdbbfaSUlf Hansson 	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
4104cdbbfaSUlf Hansson 	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
4204cdbbfaSUlf Hansson 	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
4304cdbbfaSUlf Hansson 	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
4404cdbbfaSUlf Hansson 	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
4504cdbbfaSUlf Hansson 	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
4604cdbbfaSUlf Hansson 	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
4704cdbbfaSUlf Hansson 	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
4804cdbbfaSUlf Hansson 	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
4904cdbbfaSUlf Hansson 	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
5004cdbbfaSUlf Hansson 	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
5104cdbbfaSUlf Hansson 	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
5204cdbbfaSUlf Hansson 	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
5304cdbbfaSUlf Hansson 	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
5404cdbbfaSUlf Hansson 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
5504cdbbfaSUlf Hansson };
5604cdbbfaSUlf Hansson 
5704f967adSUlf Hansson struct mmc_busy_data {
5804f967adSUlf Hansson 	struct mmc_card *card;
5904f967adSUlf Hansson 	bool retry_crc_err;
6004f967adSUlf Hansson 	enum mmc_busy_cmd busy_cmd;
6104f967adSUlf Hansson };
6204f967adSUlf Hansson 
6376bfc7ccSHuijin Park struct mmc_op_cond_busy_data {
6476bfc7ccSHuijin Park 	struct mmc_host *host;
6576bfc7ccSHuijin Park 	u32 ocr;
6676bfc7ccSHuijin Park 	struct mmc_command *cmd;
6776bfc7ccSHuijin Park };
6876bfc7ccSHuijin Park 
__mmc_send_status(struct mmc_card * card,u32 * status,unsigned int retries)692185bc2cSUlf Hansson int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
70a27fbf2fSSeungwon Jeon {
71a27fbf2fSSeungwon Jeon 	int err;
72c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
73a27fbf2fSSeungwon Jeon 
74a27fbf2fSSeungwon Jeon 	cmd.opcode = MMC_SEND_STATUS;
75a27fbf2fSSeungwon Jeon 	if (!mmc_host_is_spi(card->host))
76a27fbf2fSSeungwon Jeon 		cmd.arg = card->rca << 16;
77a27fbf2fSSeungwon Jeon 	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
78a27fbf2fSSeungwon Jeon 
792185bc2cSUlf Hansson 	err = mmc_wait_for_cmd(card->host, &cmd, retries);
80a27fbf2fSSeungwon Jeon 	if (err)
81a27fbf2fSSeungwon Jeon 		return err;
82a27fbf2fSSeungwon Jeon 
83a27fbf2fSSeungwon Jeon 	/* NOTE: callers are required to understand the difference
84a27fbf2fSSeungwon Jeon 	 * between "native" and SPI format status words!
85a27fbf2fSSeungwon Jeon 	 */
86a27fbf2fSSeungwon Jeon 	if (status)
87a27fbf2fSSeungwon Jeon 		*status = cmd.resp[0];
88a27fbf2fSSeungwon Jeon 
89a27fbf2fSSeungwon Jeon 	return 0;
90a27fbf2fSSeungwon Jeon }
912185bc2cSUlf Hansson EXPORT_SYMBOL_GPL(__mmc_send_status);
922185bc2cSUlf Hansson 
mmc_send_status(struct mmc_card * card,u32 * status)932185bc2cSUlf Hansson int mmc_send_status(struct mmc_card *card, u32 *status)
942185bc2cSUlf Hansson {
952185bc2cSUlf Hansson 	return __mmc_send_status(card, status, MMC_CMD_RETRIES);
962185bc2cSUlf Hansson }
971bee324aSLinus Walleij EXPORT_SYMBOL_GPL(mmc_send_status);
98a27fbf2fSSeungwon Jeon 
_mmc_select_card(struct mmc_host * host,struct mmc_card * card)99da7fbe58SPierre Ossman static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
100da7fbe58SPierre Ossman {
101c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
102da7fbe58SPierre Ossman 
103da7fbe58SPierre Ossman 	cmd.opcode = MMC_SELECT_CARD;
104da7fbe58SPierre Ossman 
105da7fbe58SPierre Ossman 	if (card) {
106da7fbe58SPierre Ossman 		cmd.arg = card->rca << 16;
107da7fbe58SPierre Ossman 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
108da7fbe58SPierre Ossman 	} else {
109da7fbe58SPierre Ossman 		cmd.arg = 0;
110da7fbe58SPierre Ossman 		cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
111da7fbe58SPierre Ossman 	}
112da7fbe58SPierre Ossman 
1130899e741SMasahiro Yamada 	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
114da7fbe58SPierre Ossman }
115da7fbe58SPierre Ossman 
mmc_select_card(struct mmc_card * card)116da7fbe58SPierre Ossman int mmc_select_card(struct mmc_card *card)
117da7fbe58SPierre Ossman {
118da7fbe58SPierre Ossman 
119da7fbe58SPierre Ossman 	return _mmc_select_card(card->host, card);
120da7fbe58SPierre Ossman }
121da7fbe58SPierre Ossman 
mmc_deselect_cards(struct mmc_host * host)122da7fbe58SPierre Ossman int mmc_deselect_cards(struct mmc_host *host)
123da7fbe58SPierre Ossman {
124da7fbe58SPierre Ossman 	return _mmc_select_card(host, NULL);
125da7fbe58SPierre Ossman }
126da7fbe58SPierre Ossman 
1273d705d14SSascha Hauer /*
1283d705d14SSascha Hauer  * Write the value specified in the device tree or board code into the optional
1293d705d14SSascha Hauer  * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
1303d705d14SSascha Hauer  * drive strength of the DAT and CMD outputs. The actual meaning of a given
1313d705d14SSascha Hauer  * value is hardware dependant.
1323d705d14SSascha Hauer  * The presence of the DSR register can be determined from the CSD register,
1333d705d14SSascha Hauer  * bit 76.
1343d705d14SSascha Hauer  */
mmc_set_dsr(struct mmc_host * host)1353d705d14SSascha Hauer int mmc_set_dsr(struct mmc_host *host)
1363d705d14SSascha Hauer {
137c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
1383d705d14SSascha Hauer 
1393d705d14SSascha Hauer 	cmd.opcode = MMC_SET_DSR;
1403d705d14SSascha Hauer 
1413d705d14SSascha Hauer 	cmd.arg = (host->dsr << 16) | 0xffff;
1423d705d14SSascha Hauer 	cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
1433d705d14SSascha Hauer 
1443d705d14SSascha Hauer 	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
1453d705d14SSascha Hauer }
1463d705d14SSascha Hauer 
mmc_go_idle(struct mmc_host * host)147da7fbe58SPierre Ossman int mmc_go_idle(struct mmc_host *host)
148da7fbe58SPierre Ossman {
149da7fbe58SPierre Ossman 	int err;
150c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
151da7fbe58SPierre Ossman 
152af517150SDavid Brownell 	/*
153af517150SDavid Brownell 	 * Non-SPI hosts need to prevent chipselect going active during
154af517150SDavid Brownell 	 * GO_IDLE; that would put chips into SPI mode.  Remind them of
155af517150SDavid Brownell 	 * that in case of hardware that won't pull up DAT3/nCS otherwise.
156af517150SDavid Brownell 	 *
157af517150SDavid Brownell 	 * SPI hosts ignore ios.chip_select; it's managed according to
15825985edcSLucas De Marchi 	 * rules that must accommodate non-MMC slaves which this layer
159af517150SDavid Brownell 	 * won't even know about.
160af517150SDavid Brownell 	 */
161af517150SDavid Brownell 	if (!mmc_host_is_spi(host)) {
162da7fbe58SPierre Ossman 		mmc_set_chip_select(host, MMC_CS_HIGH);
163da7fbe58SPierre Ossman 		mmc_delay(1);
164af517150SDavid Brownell 	}
165da7fbe58SPierre Ossman 
166da7fbe58SPierre Ossman 	cmd.opcode = MMC_GO_IDLE_STATE;
167da7fbe58SPierre Ossman 	cmd.arg = 0;
168af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
169da7fbe58SPierre Ossman 
170da7fbe58SPierre Ossman 	err = mmc_wait_for_cmd(host, &cmd, 0);
171da7fbe58SPierre Ossman 
172da7fbe58SPierre Ossman 	mmc_delay(1);
173da7fbe58SPierre Ossman 
174af517150SDavid Brownell 	if (!mmc_host_is_spi(host)) {
175da7fbe58SPierre Ossman 		mmc_set_chip_select(host, MMC_CS_DONTCARE);
176da7fbe58SPierre Ossman 		mmc_delay(1);
177af517150SDavid Brownell 	}
178af517150SDavid Brownell 
179af517150SDavid Brownell 	host->use_spi_crc = 0;
180da7fbe58SPierre Ossman 
181da7fbe58SPierre Ossman 	return err;
182da7fbe58SPierre Ossman }
183da7fbe58SPierre Ossman 
__mmc_send_op_cond_cb(void * cb_data,bool * busy)18476bfc7ccSHuijin Park static int __mmc_send_op_cond_cb(void *cb_data, bool *busy)
185da7fbe58SPierre Ossman {
18676bfc7ccSHuijin Park 	struct mmc_op_cond_busy_data *data = cb_data;
18776bfc7ccSHuijin Park 	struct mmc_host *host = data->host;
18876bfc7ccSHuijin Park 	struct mmc_command *cmd = data->cmd;
18976bfc7ccSHuijin Park 	u32 ocr = data->ocr;
19076bfc7ccSHuijin Park 	int err = 0;
191da7fbe58SPierre Ossman 
19276bfc7ccSHuijin Park 	err = mmc_wait_for_cmd(host, cmd, 0);
19317b0429dSPierre Ossman 	if (err)
19476bfc7ccSHuijin Park 		return err;
195da7fbe58SPierre Ossman 
196af517150SDavid Brownell 	if (mmc_host_is_spi(host)) {
19776bfc7ccSHuijin Park 		if (!(cmd->resp[0] & R1_SPI_IDLE)) {
19876bfc7ccSHuijin Park 			*busy = false;
19976bfc7ccSHuijin Park 			return 0;
20076bfc7ccSHuijin Park 		}
201af517150SDavid Brownell 	} else {
20276bfc7ccSHuijin Park 		if (cmd->resp[0] & MMC_CARD_BUSY) {
20376bfc7ccSHuijin Park 			*busy = false;
20476bfc7ccSHuijin Park 			return 0;
20576bfc7ccSHuijin Park 		}
206af517150SDavid Brownell 	}
207af517150SDavid Brownell 
20876bfc7ccSHuijin Park 	*busy = true;
2094c94cb65SYoshihiro Shimoda 
2104c94cb65SYoshihiro Shimoda 	/*
2114c94cb65SYoshihiro Shimoda 	 * According to eMMC specification v5.1 section 6.4.3, we
2124c94cb65SYoshihiro Shimoda 	 * should issue CMD1 repeatedly in the idle state until
2134c94cb65SYoshihiro Shimoda 	 * the eMMC is ready. Otherwise some eMMC devices seem to enter
2144c94cb65SYoshihiro Shimoda 	 * the inactive mode after mmc_init_card() issued CMD0 when
2154c94cb65SYoshihiro Shimoda 	 * the eMMC device is busy.
2164c94cb65SYoshihiro Shimoda 	 */
2174c94cb65SYoshihiro Shimoda 	if (!ocr && !mmc_host_is_spi(host))
21876bfc7ccSHuijin Park 		cmd->arg = cmd->resp[0] | BIT(30);
21976bfc7ccSHuijin Park 
22076bfc7ccSHuijin Park 	return 0;
221da7fbe58SPierre Ossman }
222da7fbe58SPierre Ossman 
mmc_send_op_cond(struct mmc_host * host,u32 ocr,u32 * rocr)22376bfc7ccSHuijin Park int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
22476bfc7ccSHuijin Park {
22576bfc7ccSHuijin Park 	struct mmc_command cmd = {};
22676bfc7ccSHuijin Park 	int err = 0;
22776bfc7ccSHuijin Park 	struct mmc_op_cond_busy_data cb_data = {
22876bfc7ccSHuijin Park 		.host = host,
22976bfc7ccSHuijin Park 		.ocr = ocr,
23076bfc7ccSHuijin Park 		.cmd = &cmd
23176bfc7ccSHuijin Park 	};
23276bfc7ccSHuijin Park 
23376bfc7ccSHuijin Park 	cmd.opcode = MMC_SEND_OP_COND;
23476bfc7ccSHuijin Park 	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
23576bfc7ccSHuijin Park 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
23676bfc7ccSHuijin Park 
2371760fdb6SUlf Hansson 	err = __mmc_poll_for_busy(host, MMC_OP_COND_PERIOD_US,
2381760fdb6SUlf Hansson 				  MMC_OP_COND_TIMEOUT_MS,
2391760fdb6SUlf Hansson 				  &__mmc_send_op_cond_cb, &cb_data);
24076bfc7ccSHuijin Park 	if (err)
24176bfc7ccSHuijin Park 		return err;
24276bfc7ccSHuijin Park 
243af517150SDavid Brownell 	if (rocr && !mmc_host_is_spi(host))
244da7fbe58SPierre Ossman 		*rocr = cmd.resp[0];
245da7fbe58SPierre Ossman 
246da7fbe58SPierre Ossman 	return err;
247da7fbe58SPierre Ossman }
248da7fbe58SPierre Ossman 
mmc_set_relative_addr(struct mmc_card * card)249da7fbe58SPierre Ossman int mmc_set_relative_addr(struct mmc_card *card)
250da7fbe58SPierre Ossman {
251c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
252da7fbe58SPierre Ossman 
253da7fbe58SPierre Ossman 	cmd.opcode = MMC_SET_RELATIVE_ADDR;
254da7fbe58SPierre Ossman 	cmd.arg = card->rca << 16;
255da7fbe58SPierre Ossman 	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
256da7fbe58SPierre Ossman 
2570899e741SMasahiro Yamada 	return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
258da7fbe58SPierre Ossman }
259da7fbe58SPierre Ossman 
260af517150SDavid Brownell static int
mmc_send_cxd_native(struct mmc_host * host,u32 arg,u32 * cxd,int opcode)261af517150SDavid Brownell mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
262da7fbe58SPierre Ossman {
263da7fbe58SPierre Ossman 	int err;
264c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
265da7fbe58SPierre Ossman 
266af517150SDavid Brownell 	cmd.opcode = opcode;
267af517150SDavid Brownell 	cmd.arg = arg;
268da7fbe58SPierre Ossman 	cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
269da7fbe58SPierre Ossman 
270af517150SDavid Brownell 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
27117b0429dSPierre Ossman 	if (err)
272da7fbe58SPierre Ossman 		return err;
273da7fbe58SPierre Ossman 
274af517150SDavid Brownell 	memcpy(cxd, cmd.resp, sizeof(u32) * 4);
275da7fbe58SPierre Ossman 
27617b0429dSPierre Ossman 	return 0;
277da7fbe58SPierre Ossman }
278da7fbe58SPierre Ossman 
2791a41313eSKyungsik Lee /*
2801a41313eSKyungsik Lee  * NOTE: void *buf, caller for the buf is required to use DMA-capable
2811a41313eSKyungsik Lee  * buffer or on-stack buffer (with some overhead in callee).
2821a41313eSKyungsik Lee  */
mmc_send_adtc_data(struct mmc_card * card,struct mmc_host * host,u32 opcode,u32 args,void * buf,unsigned len)283cec18ad9SUlf Hansson int mmc_send_adtc_data(struct mmc_card *card, struct mmc_host *host, u32 opcode,
284cec18ad9SUlf Hansson 		       u32 args, void *buf, unsigned len)
285da7fbe58SPierre Ossman {
286c7836d15SMasahiro Yamada 	struct mmc_request mrq = {};
287c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
288c7836d15SMasahiro Yamada 	struct mmc_data data = {};
289da7fbe58SPierre Ossman 	struct scatterlist sg;
290da7fbe58SPierre Ossman 
291da7fbe58SPierre Ossman 	mrq.cmd = &cmd;
292da7fbe58SPierre Ossman 	mrq.data = &data;
293da7fbe58SPierre Ossman 
294af517150SDavid Brownell 	cmd.opcode = opcode;
295cec18ad9SUlf Hansson 	cmd.arg = args;
296da7fbe58SPierre Ossman 
297af517150SDavid Brownell 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
298af517150SDavid Brownell 	 * rely on callers to never use this with "native" calls for reading
299af517150SDavid Brownell 	 * CSD or CID.  Native versions of those commands use the R2 type,
300af517150SDavid Brownell 	 * not R1 plus a data block.
301af517150SDavid Brownell 	 */
302af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
303af517150SDavid Brownell 
304af517150SDavid Brownell 	data.blksz = len;
305da7fbe58SPierre Ossman 	data.blocks = 1;
306da7fbe58SPierre Ossman 	data.flags = MMC_DATA_READ;
307da7fbe58SPierre Ossman 	data.sg = &sg;
308da7fbe58SPierre Ossman 	data.sg_len = 1;
309da7fbe58SPierre Ossman 
310601ed60cSUlf Hansson 	sg_init_one(&sg, buf, len);
311da7fbe58SPierre Ossman 
312cda56ac2SAdrian Hunter 	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
3130d3e0460SMatthew Fleming 		/*
3140d3e0460SMatthew Fleming 		 * The spec states that CSR and CID accesses have a timeout
3150d3e0460SMatthew Fleming 		 * of 64 clock cycles.
3160d3e0460SMatthew Fleming 		 */
3170d3e0460SMatthew Fleming 		data.timeout_ns = 0;
3180d3e0460SMatthew Fleming 		data.timeout_clks = 64;
319cda56ac2SAdrian Hunter 	} else
320cda56ac2SAdrian Hunter 		mmc_set_data_timeout(&data, card);
321da7fbe58SPierre Ossman 
322af517150SDavid Brownell 	mmc_wait_for_req(host, &mrq);
323af517150SDavid Brownell 
32417b0429dSPierre Ossman 	if (cmd.error)
325da7fbe58SPierre Ossman 		return cmd.error;
32617b0429dSPierre Ossman 	if (data.error)
327da7fbe58SPierre Ossman 		return data.error;
328da7fbe58SPierre Ossman 
32917b0429dSPierre Ossman 	return 0;
330da7fbe58SPierre Ossman }
331da7fbe58SPierre Ossman 
mmc_spi_send_cxd(struct mmc_host * host,u32 * cxd,u32 opcode)332b53f0beeSYue Hu static int mmc_spi_send_cxd(struct mmc_host *host, u32 *cxd, u32 opcode)
333af517150SDavid Brownell {
33478e48073SPierre Ossman 	int ret, i;
335b53f0beeSYue Hu 	__be32 *cxd_tmp;
33678e48073SPierre Ossman 
337b53f0beeSYue Hu 	cxd_tmp = kzalloc(16, GFP_KERNEL);
338b53f0beeSYue Hu 	if (!cxd_tmp)
3391a41313eSKyungsik Lee 		return -ENOMEM;
3401a41313eSKyungsik Lee 
341cec18ad9SUlf Hansson 	ret = mmc_send_adtc_data(NULL, host, opcode, 0, cxd_tmp, 16);
34278e48073SPierre Ossman 	if (ret)
3431a41313eSKyungsik Lee 		goto err;
34478e48073SPierre Ossman 
34578e48073SPierre Ossman 	for (i = 0; i < 4; i++)
346b53f0beeSYue Hu 		cxd[i] = be32_to_cpu(cxd_tmp[i]);
34778e48073SPierre Ossman 
3481a41313eSKyungsik Lee err:
349b53f0beeSYue Hu 	kfree(cxd_tmp);
3501a41313eSKyungsik Lee 	return ret;
351af517150SDavid Brownell }
352af517150SDavid Brownell 
mmc_send_csd(struct mmc_card * card,u32 * csd)3530796e439SUlf Hansson int mmc_send_csd(struct mmc_card *card, u32 *csd)
3540796e439SUlf Hansson {
3550796e439SUlf Hansson 	if (mmc_host_is_spi(card->host))
356b53f0beeSYue Hu 		return mmc_spi_send_cxd(card->host, csd, MMC_SEND_CSD);
3570796e439SUlf Hansson 
3580796e439SUlf Hansson 	return mmc_send_cxd_native(card->host, card->rca << 16,	csd,
3590796e439SUlf Hansson 				MMC_SEND_CSD);
3600796e439SUlf Hansson }
3610796e439SUlf Hansson 
mmc_send_cid(struct mmc_host * host,u32 * cid)362a1473732SUlf Hansson int mmc_send_cid(struct mmc_host *host, u32 *cid)
363a1473732SUlf Hansson {
364a1473732SUlf Hansson 	if (mmc_host_is_spi(host))
365b53f0beeSYue Hu 		return mmc_spi_send_cxd(host, cid, MMC_SEND_CID);
366a1473732SUlf Hansson 
367c92e68d8SUlf Hansson 	return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
368a1473732SUlf Hansson }
369a1473732SUlf Hansson 
mmc_get_ext_csd(struct mmc_card * card,u8 ** new_ext_csd)370e21aa519SUlf Hansson int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
371e21aa519SUlf Hansson {
372e21aa519SUlf Hansson 	int err;
373e21aa519SUlf Hansson 	u8 *ext_csd;
374e21aa519SUlf Hansson 
375e21aa519SUlf Hansson 	if (!card || !new_ext_csd)
376e21aa519SUlf Hansson 		return -EINVAL;
377e21aa519SUlf Hansson 
378e21aa519SUlf Hansson 	if (!mmc_can_ext_csd(card))
379e21aa519SUlf Hansson 		return -EOPNOTSUPP;
380e21aa519SUlf Hansson 
381e21aa519SUlf Hansson 	/*
382e21aa519SUlf Hansson 	 * As the ext_csd is so large and mostly unused, we don't store the
383e21aa519SUlf Hansson 	 * raw block in mmc_card.
384e21aa519SUlf Hansson 	 */
38522b78700SUlf Hansson 	ext_csd = kzalloc(512, GFP_KERNEL);
386e21aa519SUlf Hansson 	if (!ext_csd)
387e21aa519SUlf Hansson 		return -ENOMEM;
388e21aa519SUlf Hansson 
389cec18ad9SUlf Hansson 	err = mmc_send_adtc_data(card, card->host, MMC_SEND_EXT_CSD, 0, ext_csd,
3902fc91e8bSUlf Hansson 				512);
391e21aa519SUlf Hansson 	if (err)
392e21aa519SUlf Hansson 		kfree(ext_csd);
393e21aa519SUlf Hansson 	else
394e21aa519SUlf Hansson 		*new_ext_csd = ext_csd;
395e21aa519SUlf Hansson 
396e21aa519SUlf Hansson 	return err;
397e21aa519SUlf Hansson }
398e21aa519SUlf Hansson EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
399e21aa519SUlf Hansson 
mmc_spi_read_ocr(struct mmc_host * host,int highcap,u32 * ocrp)400af517150SDavid Brownell int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
401af517150SDavid Brownell {
402c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
403af517150SDavid Brownell 	int err;
404af517150SDavid Brownell 
405af517150SDavid Brownell 	cmd.opcode = MMC_SPI_READ_OCR;
406af517150SDavid Brownell 	cmd.arg = highcap ? (1 << 30) : 0;
407af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R3;
408af517150SDavid Brownell 
409af517150SDavid Brownell 	err = mmc_wait_for_cmd(host, &cmd, 0);
410af517150SDavid Brownell 
411af517150SDavid Brownell 	*ocrp = cmd.resp[1];
412af517150SDavid Brownell 	return err;
413af517150SDavid Brownell }
414af517150SDavid Brownell 
mmc_spi_set_crc(struct mmc_host * host,int use_crc)415af517150SDavid Brownell int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
416af517150SDavid Brownell {
417c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
418af517150SDavid Brownell 	int err;
419af517150SDavid Brownell 
420af517150SDavid Brownell 	cmd.opcode = MMC_SPI_CRC_ON_OFF;
421af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1;
422af517150SDavid Brownell 	cmd.arg = use_crc;
423af517150SDavid Brownell 
424af517150SDavid Brownell 	err = mmc_wait_for_cmd(host, &cmd, 0);
425af517150SDavid Brownell 	if (!err)
426af517150SDavid Brownell 		host->use_spi_crc = use_crc;
427af517150SDavid Brownell 	return err;
428af517150SDavid Brownell }
429af517150SDavid Brownell 
mmc_switch_status_error(struct mmc_host * host,u32 status)43020348d19SUlf Hansson static int mmc_switch_status_error(struct mmc_host *host, u32 status)
431ed16f58dSAdrian Hunter {
432ed16f58dSAdrian Hunter 	if (mmc_host_is_spi(host)) {
433ed16f58dSAdrian Hunter 		if (status & R1_SPI_ILLEGAL_COMMAND)
434ed16f58dSAdrian Hunter 			return -EBADMSG;
435ed16f58dSAdrian Hunter 	} else {
436a94a7483SShawn Lin 		if (R1_STATUS(status))
437ed16f58dSAdrian Hunter 			pr_warn("%s: unexpected status %#x after switch\n",
438ed16f58dSAdrian Hunter 				mmc_hostname(host), status);
439ed16f58dSAdrian Hunter 		if (status & R1_SWITCH_ERROR)
440ed16f58dSAdrian Hunter 			return -EBADMSG;
441ed16f58dSAdrian Hunter 	}
442ed16f58dSAdrian Hunter 	return 0;
443ed16f58dSAdrian Hunter }
444ed16f58dSAdrian Hunter 
44520348d19SUlf Hansson /* Caller must hold re-tuning */
mmc_switch_status(struct mmc_card * card,bool crc_err_fatal)44660db8a47SUlf Hansson int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
44720348d19SUlf Hansson {
44820348d19SUlf Hansson 	u32 status;
44920348d19SUlf Hansson 	int err;
45020348d19SUlf Hansson 
45120348d19SUlf Hansson 	err = mmc_send_status(card, &status);
452ef3d2322SAdrian Hunter 	if (!crc_err_fatal && err == -EILSEQ)
453ef3d2322SAdrian Hunter 		return 0;
45420348d19SUlf Hansson 	if (err)
45520348d19SUlf Hansson 		return err;
45620348d19SUlf Hansson 
45720348d19SUlf Hansson 	return mmc_switch_status_error(card->host, status);
45820348d19SUlf Hansson }
45920348d19SUlf Hansson 
mmc_busy_cb(void * cb_data,bool * busy)46004f967adSUlf Hansson static int mmc_busy_cb(void *cb_data, bool *busy)
4616972096aSUlf Hansson {
46204f967adSUlf Hansson 	struct mmc_busy_data *data = cb_data;
46304f967adSUlf Hansson 	struct mmc_host *host = data->card->host;
4646972096aSUlf Hansson 	u32 status = 0;
4656972096aSUlf Hansson 	int err;
4666972096aSUlf Hansson 
467972d5084SUlf Hansson 	if (data->busy_cmd != MMC_BUSY_IO && host->ops->card_busy) {
4686972096aSUlf Hansson 		*busy = host->ops->card_busy(host);
4696972096aSUlf Hansson 		return 0;
4706972096aSUlf Hansson 	}
4716972096aSUlf Hansson 
47204f967adSUlf Hansson 	err = mmc_send_status(data->card, &status);
47304f967adSUlf Hansson 	if (data->retry_crc_err && err == -EILSEQ) {
4746972096aSUlf Hansson 		*busy = true;
4756972096aSUlf Hansson 		return 0;
4766972096aSUlf Hansson 	}
4776972096aSUlf Hansson 	if (err)
4786972096aSUlf Hansson 		return err;
4796972096aSUlf Hansson 
48004f967adSUlf Hansson 	switch (data->busy_cmd) {
4810d84c3e6SUlf Hansson 	case MMC_BUSY_CMD6:
48204f967adSUlf Hansson 		err = mmc_switch_status_error(host, status);
4830d84c3e6SUlf Hansson 		break;
4840d84c3e6SUlf Hansson 	case MMC_BUSY_ERASE:
4850d84c3e6SUlf Hansson 		err = R1_STATUS(status) ? -EIO : 0;
4860d84c3e6SUlf Hansson 		break;
487490ff95fSUlf Hansson 	case MMC_BUSY_HPI:
488130206a6SUlf Hansson 	case MMC_BUSY_EXTR_SINGLE:
489972d5084SUlf Hansson 	case MMC_BUSY_IO:
490490ff95fSUlf Hansson 		break;
4910d84c3e6SUlf Hansson 	default:
4920d84c3e6SUlf Hansson 		err = -EINVAL;
4930d84c3e6SUlf Hansson 	}
4940d84c3e6SUlf Hansson 
4956972096aSUlf Hansson 	if (err)
4966972096aSUlf Hansson 		return err;
4976972096aSUlf Hansson 
4982a1c7cdaSUlf Hansson 	*busy = !mmc_ready_for_data(status);
4996972096aSUlf Hansson 	return 0;
5006972096aSUlf Hansson }
5016972096aSUlf Hansson 
__mmc_poll_for_busy(struct mmc_host * host,unsigned int period_us,unsigned int timeout_ms,int (* busy_cb)(void * cb_data,bool * busy),void * cb_data)5021760fdb6SUlf Hansson int __mmc_poll_for_busy(struct mmc_host *host, unsigned int period_us,
5031760fdb6SUlf Hansson 			unsigned int timeout_ms,
50404f967adSUlf Hansson 			int (*busy_cb)(void *cb_data, bool *busy),
50504f967adSUlf Hansson 			void *cb_data)
506716bdb89SUlf Hansson {
507716bdb89SUlf Hansson 	int err;
508716bdb89SUlf Hansson 	unsigned long timeout;
5091760fdb6SUlf Hansson 	unsigned int udelay = period_us ? period_us : 32, udelay_max = 32768;
510716bdb89SUlf Hansson 	bool expired = false;
511716bdb89SUlf Hansson 	bool busy = false;
512716bdb89SUlf Hansson 
513716bdb89SUlf Hansson 	timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
514716bdb89SUlf Hansson 	do {
515716bdb89SUlf Hansson 		/*
51670562644SUlf Hansson 		 * Due to the possibility of being preempted while polling,
51770562644SUlf Hansson 		 * check the expiration time first.
518716bdb89SUlf Hansson 		 */
519716bdb89SUlf Hansson 		expired = time_after(jiffies, timeout);
52070562644SUlf Hansson 
52104f967adSUlf Hansson 		err = (*busy_cb)(cb_data, &busy);
5225ec32f84SUlf Hansson 		if (err)
5235ec32f84SUlf Hansson 			return err;
524716bdb89SUlf Hansson 
52570562644SUlf Hansson 		/* Timeout if the device still remains busy. */
52670562644SUlf Hansson 		if (expired && busy) {
52770562644SUlf Hansson 			pr_err("%s: Card stuck being busy! %s\n",
528716bdb89SUlf Hansson 				mmc_hostname(host), __func__);
529716bdb89SUlf Hansson 			return -ETIMEDOUT;
530716bdb89SUlf Hansson 		}
531d46a24a9SUlf Hansson 
532d46a24a9SUlf Hansson 		/* Throttle the polling rate to avoid hogging the CPU. */
533d46a24a9SUlf Hansson 		if (busy) {
534d46a24a9SUlf Hansson 			usleep_range(udelay, udelay * 2);
535d46a24a9SUlf Hansson 			if (udelay < udelay_max)
536d46a24a9SUlf Hansson 				udelay *= 2;
537d46a24a9SUlf Hansson 		}
53870562644SUlf Hansson 	} while (busy);
539716bdb89SUlf Hansson 
5405ec32f84SUlf Hansson 	return 0;
541716bdb89SUlf Hansson }
5426966e609SUlf Hansson EXPORT_SYMBOL_GPL(__mmc_poll_for_busy);
543716bdb89SUlf Hansson 
mmc_poll_for_busy(struct mmc_card * card,unsigned int timeout_ms,bool retry_crc_err,enum mmc_busy_cmd busy_cmd)5440d84c3e6SUlf Hansson int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
54504f967adSUlf Hansson 		      bool retry_crc_err, enum mmc_busy_cmd busy_cmd)
5460d84c3e6SUlf Hansson {
5472ebbdaceSHuijin Park 	struct mmc_host *host = card->host;
54804f967adSUlf Hansson 	struct mmc_busy_data cb_data;
54904f967adSUlf Hansson 
55004f967adSUlf Hansson 	cb_data.card = card;
55104f967adSUlf Hansson 	cb_data.retry_crc_err = retry_crc_err;
55204f967adSUlf Hansson 	cb_data.busy_cmd = busy_cmd;
55304f967adSUlf Hansson 
5541760fdb6SUlf Hansson 	return __mmc_poll_for_busy(host, 0, timeout_ms, &mmc_busy_cb, &cb_data);
5550d84c3e6SUlf Hansson }
556972d5084SUlf Hansson EXPORT_SYMBOL_GPL(mmc_poll_for_busy);
5570d84c3e6SUlf Hansson 
mmc_prepare_busy_cmd(struct mmc_host * host,struct mmc_command * cmd,unsigned int timeout_ms)558e62f1e0bSUlf Hansson bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
559e62f1e0bSUlf Hansson 			  unsigned int timeout_ms)
560e62f1e0bSUlf Hansson {
561e62f1e0bSUlf Hansson 	/*
562e62f1e0bSUlf Hansson 	 * If the max_busy_timeout of the host is specified, make sure it's
563e62f1e0bSUlf Hansson 	 * enough to fit the used timeout_ms. In case it's not, let's instruct
564e62f1e0bSUlf Hansson 	 * the host to avoid HW busy detection, by converting to a R1 response
565e62f1e0bSUlf Hansson 	 * instead of a R1B. Note, some hosts requires R1B, which also means
566e62f1e0bSUlf Hansson 	 * they are on their own when it comes to deal with the busy timeout.
567e62f1e0bSUlf Hansson 	 */
568e62f1e0bSUlf Hansson 	if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
569e62f1e0bSUlf Hansson 	    (timeout_ms > host->max_busy_timeout)) {
570e62f1e0bSUlf Hansson 		cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1 | MMC_RSP_R1;
571e62f1e0bSUlf Hansson 		return false;
572e62f1e0bSUlf Hansson 	}
573e62f1e0bSUlf Hansson 
574e62f1e0bSUlf Hansson 	cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1B | MMC_RSP_R1B;
575e62f1e0bSUlf Hansson 	cmd->busy_timeout = timeout_ms;
576e62f1e0bSUlf Hansson 	return true;
577e62f1e0bSUlf Hansson }
578*51f5b305SUlf Hansson EXPORT_SYMBOL_GPL(mmc_prepare_busy_cmd);
579e62f1e0bSUlf Hansson 
580d3a8d95dSAndrei Warkentin /**
581950d56acSJaehoon Chung  *	__mmc_switch - modify EXT_CSD register
582d3a8d95dSAndrei Warkentin  *	@card: the MMC card associated with the data transfer
583d3a8d95dSAndrei Warkentin  *	@set: cmd set values
584d3a8d95dSAndrei Warkentin  *	@index: EXT_CSD register index
585d3a8d95dSAndrei Warkentin  *	@value: value to program into EXT_CSD register
586d3a8d95dSAndrei Warkentin  *	@timeout_ms: timeout (ms) for operation performed by register write,
587d3a8d95dSAndrei Warkentin  *                   timeout of zero implies maximum possible timeout
588aa33ce3cSUlf Hansson  *	@timing: new timing to change to
589878e200bSUlf Hansson  *	@send_status: send status cmd to poll for busy
590625228faSUlf Hansson  *	@retry_crc_err: retry when CRC errors when polling with CMD13 for busy
5915e52a168SBean Huo  *	@retries: number of retries
592d3a8d95dSAndrei Warkentin  *
593d3a8d95dSAndrei Warkentin  *	Modifies the EXT_CSD register for selected card.
594d3a8d95dSAndrei Warkentin  */
__mmc_switch(struct mmc_card * card,u8 set,u8 index,u8 value,unsigned int timeout_ms,unsigned char timing,bool send_status,bool retry_crc_err,unsigned int retries)595950d56acSJaehoon Chung int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
596aa33ce3cSUlf Hansson 		unsigned int timeout_ms, unsigned char timing,
5975e52a168SBean Huo 		bool send_status, bool retry_crc_err, unsigned int retries)
598da7fbe58SPierre Ossman {
599636bd13cSUlf Hansson 	struct mmc_host *host = card->host;
600da7fbe58SPierre Ossman 	int err;
601c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
602e62f1e0bSUlf Hansson 	bool use_r1b_resp;
603aa33ce3cSUlf Hansson 	unsigned char old_timing = host->ios.timing;
604b9ec2616SUlf Hansson 
605c6dbab9cSAdrian Hunter 	mmc_retune_hold(host);
606c6dbab9cSAdrian Hunter 
607533a6cfeSUlf Hansson 	if (!timeout_ms) {
608533a6cfeSUlf Hansson 		pr_warn("%s: unspecified timeout for CMD6 - use generic\n",
609533a6cfeSUlf Hansson 			mmc_hostname(host));
610533a6cfeSUlf Hansson 		timeout_ms = card->ext_csd.generic_cmd6_time;
611533a6cfeSUlf Hansson 	}
612533a6cfeSUlf Hansson 
613da7fbe58SPierre Ossman 	cmd.opcode = MMC_SWITCH;
614da7fbe58SPierre Ossman 	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
615da7fbe58SPierre Ossman 		  (index << 16) |
616da7fbe58SPierre Ossman 		  (value << 8) |
617da7fbe58SPierre Ossman 		  set;
618e62f1e0bSUlf Hansson 	use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, timeout_ms);
619b9ec2616SUlf Hansson 
6205e52a168SBean Huo 	err = mmc_wait_for_cmd(host, &cmd, retries);
62117b0429dSPierre Ossman 	if (err)
622c6dbab9cSAdrian Hunter 		goto out;
623da7fbe58SPierre Ossman 
624cb26ce06SUlf Hansson 	/*If SPI or used HW busy detection above, then we don't need to poll. */
625cb26ce06SUlf Hansson 	if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
626ee6ff743SUlf Hansson 		mmc_host_is_spi(host))
627aa33ce3cSUlf Hansson 		goto out_tim;
628a27fbf2fSSeungwon Jeon 
6291e0b069bSUlf Hansson 	/*
6301e0b069bSUlf Hansson 	 * If the host doesn't support HW polling via the ->card_busy() ops and
6311e0b069bSUlf Hansson 	 * when it's not allowed to poll by using CMD13, then we need to rely on
6321e0b069bSUlf Hansson 	 * waiting the stated timeout to be sufficient.
6331e0b069bSUlf Hansson 	 */
6341e0b069bSUlf Hansson 	if (!send_status && !host->ops->card_busy) {
6351e0b069bSUlf Hansson 		mmc_delay(timeout_ms);
6361e0b069bSUlf Hansson 		goto out_tim;
6371e0b069bSUlf Hansson 	}
6381e0b069bSUlf Hansson 
639716bdb89SUlf Hansson 	/* Let's try to poll to find out when the command is completed. */
64004f967adSUlf Hansson 	err = mmc_poll_for_busy(card, timeout_ms, retry_crc_err, MMC_BUSY_CMD6);
641ee6ff743SUlf Hansson 	if (err)
642ee6ff743SUlf Hansson 		goto out;
643aa33ce3cSUlf Hansson 
644aa33ce3cSUlf Hansson out_tim:
645ee6ff743SUlf Hansson 	/* Switch to new timing before check switch status. */
646ee6ff743SUlf Hansson 	if (timing)
647ee6ff743SUlf Hansson 		mmc_set_timing(host, timing);
648ee6ff743SUlf Hansson 
649ee6ff743SUlf Hansson 	if (send_status) {
65060db8a47SUlf Hansson 		err = mmc_switch_status(card, true);
651aa33ce3cSUlf Hansson 		if (err && timing)
652aa33ce3cSUlf Hansson 			mmc_set_timing(host, old_timing);
653ee6ff743SUlf Hansson 	}
654c6dbab9cSAdrian Hunter out:
655c6dbab9cSAdrian Hunter 	mmc_retune_release(host);
656ef0b27d4SAdrian Hunter 
657c6dbab9cSAdrian Hunter 	return err;
658da7fbe58SPierre Ossman }
659950d56acSJaehoon Chung 
mmc_switch(struct mmc_card * card,u8 set,u8 index,u8 value,unsigned int timeout_ms)660950d56acSJaehoon Chung int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
661950d56acSJaehoon Chung 		unsigned int timeout_ms)
662950d56acSJaehoon Chung {
663aa33ce3cSUlf Hansson 	return __mmc_switch(card, set, index, value, timeout_ms, 0,
6645e52a168SBean Huo 			    true, false, MMC_CMD_RETRIES);
665950d56acSJaehoon Chung }
666d3a8d95dSAndrei Warkentin EXPORT_SYMBOL_GPL(mmc_switch);
667da7fbe58SPierre Ossman 
mmc_send_tuning(struct mmc_host * host,u32 opcode,int * cmd_error)6689979dbe5SChaotian Jing int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
669996903deSMinda Chen {
670c7836d15SMasahiro Yamada 	struct mmc_request mrq = {};
671c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
672c7836d15SMasahiro Yamada 	struct mmc_data data = {};
673996903deSMinda Chen 	struct scatterlist sg;
674fe5afb13SUlf Hansson 	struct mmc_ios *ios = &host->ios;
675996903deSMinda Chen 	const u8 *tuning_block_pattern;
676996903deSMinda Chen 	int size, err = 0;
677996903deSMinda Chen 	u8 *data_buf;
678996903deSMinda Chen 
679996903deSMinda Chen 	if (ios->bus_width == MMC_BUS_WIDTH_8) {
680996903deSMinda Chen 		tuning_block_pattern = tuning_blk_pattern_8bit;
681996903deSMinda Chen 		size = sizeof(tuning_blk_pattern_8bit);
682996903deSMinda Chen 	} else if (ios->bus_width == MMC_BUS_WIDTH_4) {
683996903deSMinda Chen 		tuning_block_pattern = tuning_blk_pattern_4bit;
684996903deSMinda Chen 		size = sizeof(tuning_blk_pattern_4bit);
685996903deSMinda Chen 	} else
686996903deSMinda Chen 		return -EINVAL;
687996903deSMinda Chen 
688996903deSMinda Chen 	data_buf = kzalloc(size, GFP_KERNEL);
689996903deSMinda Chen 	if (!data_buf)
690996903deSMinda Chen 		return -ENOMEM;
691996903deSMinda Chen 
692996903deSMinda Chen 	mrq.cmd = &cmd;
693996903deSMinda Chen 	mrq.data = &data;
694996903deSMinda Chen 
695996903deSMinda Chen 	cmd.opcode = opcode;
696996903deSMinda Chen 	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
697996903deSMinda Chen 
698996903deSMinda Chen 	data.blksz = size;
699996903deSMinda Chen 	data.blocks = 1;
700996903deSMinda Chen 	data.flags = MMC_DATA_READ;
701996903deSMinda Chen 
702996903deSMinda Chen 	/*
703996903deSMinda Chen 	 * According to the tuning specs, Tuning process
704996903deSMinda Chen 	 * is normally shorter 40 executions of CMD19,
705996903deSMinda Chen 	 * and timeout value should be shorter than 150 ms
706996903deSMinda Chen 	 */
707996903deSMinda Chen 	data.timeout_ns = 150 * NSEC_PER_MSEC;
708996903deSMinda Chen 
709996903deSMinda Chen 	data.sg = &sg;
710996903deSMinda Chen 	data.sg_len = 1;
711996903deSMinda Chen 	sg_init_one(&sg, data_buf, size);
712996903deSMinda Chen 
713fe5afb13SUlf Hansson 	mmc_wait_for_req(host, &mrq);
714996903deSMinda Chen 
7159979dbe5SChaotian Jing 	if (cmd_error)
7169979dbe5SChaotian Jing 		*cmd_error = cmd.error;
7179979dbe5SChaotian Jing 
718996903deSMinda Chen 	if (cmd.error) {
719996903deSMinda Chen 		err = cmd.error;
720996903deSMinda Chen 		goto out;
721996903deSMinda Chen 	}
722996903deSMinda Chen 
723996903deSMinda Chen 	if (data.error) {
724996903deSMinda Chen 		err = data.error;
725996903deSMinda Chen 		goto out;
726996903deSMinda Chen 	}
727996903deSMinda Chen 
728996903deSMinda Chen 	if (memcmp(data_buf, tuning_block_pattern, size))
729996903deSMinda Chen 		err = -EIO;
730996903deSMinda Chen 
731996903deSMinda Chen out:
732996903deSMinda Chen 	kfree(data_buf);
733996903deSMinda Chen 	return err;
734996903deSMinda Chen }
735996903deSMinda Chen EXPORT_SYMBOL_GPL(mmc_send_tuning);
736996903deSMinda Chen 
mmc_send_abort_tuning(struct mmc_host * host,u32 opcode)73721adc2e4SWolfram Sang int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode)
738e711f030SAdrian Hunter {
739c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
740e711f030SAdrian Hunter 
741e711f030SAdrian Hunter 	/*
742e711f030SAdrian Hunter 	 * eMMC specification specifies that CMD12 can be used to stop a tuning
743e711f030SAdrian Hunter 	 * command, but SD specification does not, so do nothing unless it is
744e711f030SAdrian Hunter 	 * eMMC.
745e711f030SAdrian Hunter 	 */
746e711f030SAdrian Hunter 	if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
747e711f030SAdrian Hunter 		return 0;
748e711f030SAdrian Hunter 
749e711f030SAdrian Hunter 	cmd.opcode = MMC_STOP_TRANSMISSION;
750e711f030SAdrian Hunter 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
751e711f030SAdrian Hunter 
752e711f030SAdrian Hunter 	/*
753e711f030SAdrian Hunter 	 * For drivers that override R1 to R1b, set an arbitrary timeout based
754e711f030SAdrian Hunter 	 * on the tuning timeout i.e. 150ms.
755e711f030SAdrian Hunter 	 */
756e711f030SAdrian Hunter 	cmd.busy_timeout = 150;
757e711f030SAdrian Hunter 
758e711f030SAdrian Hunter 	return mmc_wait_for_cmd(host, &cmd, 0);
759e711f030SAdrian Hunter }
76021adc2e4SWolfram Sang EXPORT_SYMBOL_GPL(mmc_send_abort_tuning);
761e711f030SAdrian Hunter 
76222113efdSAries Lee static int
mmc_send_bus_test(struct mmc_card * card,struct mmc_host * host,u8 opcode,u8 len)76322113efdSAries Lee mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
76422113efdSAries Lee 		  u8 len)
76522113efdSAries Lee {
766c7836d15SMasahiro Yamada 	struct mmc_request mrq = {};
767c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
768c7836d15SMasahiro Yamada 	struct mmc_data data = {};
76922113efdSAries Lee 	struct scatterlist sg;
77022113efdSAries Lee 	u8 *data_buf;
77122113efdSAries Lee 	u8 *test_buf;
77222113efdSAries Lee 	int i, err;
77322113efdSAries Lee 	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
77422113efdSAries Lee 	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
77522113efdSAries Lee 
77622113efdSAries Lee 	/* dma onto stack is unsafe/nonportable, but callers to this
77722113efdSAries Lee 	 * routine normally provide temporary on-stack buffers ...
77822113efdSAries Lee 	 */
77922113efdSAries Lee 	data_buf = kmalloc(len, GFP_KERNEL);
78022113efdSAries Lee 	if (!data_buf)
78122113efdSAries Lee 		return -ENOMEM;
78222113efdSAries Lee 
78322113efdSAries Lee 	if (len == 8)
78422113efdSAries Lee 		test_buf = testdata_8bit;
78522113efdSAries Lee 	else if (len == 4)
78622113efdSAries Lee 		test_buf = testdata_4bit;
78722113efdSAries Lee 	else {
788a3c76eb9SGirish K S 		pr_err("%s: Invalid bus_width %d\n",
78922113efdSAries Lee 		       mmc_hostname(host), len);
79022113efdSAries Lee 		kfree(data_buf);
79122113efdSAries Lee 		return -EINVAL;
79222113efdSAries Lee 	}
79322113efdSAries Lee 
79422113efdSAries Lee 	if (opcode == MMC_BUS_TEST_W)
79522113efdSAries Lee 		memcpy(data_buf, test_buf, len);
79622113efdSAries Lee 
79722113efdSAries Lee 	mrq.cmd = &cmd;
79822113efdSAries Lee 	mrq.data = &data;
79922113efdSAries Lee 	cmd.opcode = opcode;
80022113efdSAries Lee 	cmd.arg = 0;
80122113efdSAries Lee 
80222113efdSAries Lee 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
80322113efdSAries Lee 	 * rely on callers to never use this with "native" calls for reading
80422113efdSAries Lee 	 * CSD or CID.  Native versions of those commands use the R2 type,
80522113efdSAries Lee 	 * not R1 plus a data block.
80622113efdSAries Lee 	 */
80722113efdSAries Lee 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
80822113efdSAries Lee 
80922113efdSAries Lee 	data.blksz = len;
81022113efdSAries Lee 	data.blocks = 1;
81122113efdSAries Lee 	if (opcode == MMC_BUS_TEST_R)
81222113efdSAries Lee 		data.flags = MMC_DATA_READ;
81322113efdSAries Lee 	else
81422113efdSAries Lee 		data.flags = MMC_DATA_WRITE;
81522113efdSAries Lee 
81622113efdSAries Lee 	data.sg = &sg;
81722113efdSAries Lee 	data.sg_len = 1;
81884532e33SMinjian Wu 	mmc_set_data_timeout(&data, card);
81922113efdSAries Lee 	sg_init_one(&sg, data_buf, len);
82022113efdSAries Lee 	mmc_wait_for_req(host, &mrq);
82122113efdSAries Lee 	err = 0;
82222113efdSAries Lee 	if (opcode == MMC_BUS_TEST_R) {
82322113efdSAries Lee 		for (i = 0; i < len / 4; i++)
82422113efdSAries Lee 			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
82522113efdSAries Lee 				err = -EIO;
82622113efdSAries Lee 				break;
82722113efdSAries Lee 			}
82822113efdSAries Lee 	}
82922113efdSAries Lee 	kfree(data_buf);
83022113efdSAries Lee 
83122113efdSAries Lee 	if (cmd.error)
83222113efdSAries Lee 		return cmd.error;
83322113efdSAries Lee 	if (data.error)
83422113efdSAries Lee 		return data.error;
83522113efdSAries Lee 
83622113efdSAries Lee 	return err;
83722113efdSAries Lee }
83822113efdSAries Lee 
mmc_bus_test(struct mmc_card * card,u8 bus_width)83922113efdSAries Lee int mmc_bus_test(struct mmc_card *card, u8 bus_width)
84022113efdSAries Lee {
8410899e741SMasahiro Yamada 	int width;
84222113efdSAries Lee 
84322113efdSAries Lee 	if (bus_width == MMC_BUS_WIDTH_8)
84422113efdSAries Lee 		width = 8;
84522113efdSAries Lee 	else if (bus_width == MMC_BUS_WIDTH_4)
84622113efdSAries Lee 		width = 4;
84722113efdSAries Lee 	else if (bus_width == MMC_BUS_WIDTH_1)
84822113efdSAries Lee 		return 0; /* no need for test */
84922113efdSAries Lee 	else
85022113efdSAries Lee 		return -EINVAL;
85122113efdSAries Lee 
85222113efdSAries Lee 	/*
85322113efdSAries Lee 	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
85422113efdSAries Lee 	 * is a problem.  This improves chances that the test will work.
85522113efdSAries Lee 	 */
85622113efdSAries Lee 	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
8570899e741SMasahiro Yamada 	return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
85822113efdSAries Lee }
859eb0d8f13SJaehoon Chung 
mmc_send_hpi_cmd(struct mmc_card * card)8609f94d047SUlf Hansson static int mmc_send_hpi_cmd(struct mmc_card *card)
861eb0d8f13SJaehoon Chung {
862490ff95fSUlf Hansson 	unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time;
863892bf100SUlf Hansson 	struct mmc_host *host = card->host;
864c7bedef0SUlf Hansson 	bool use_r1b_resp = false;
865c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
866eb0d8f13SJaehoon Chung 	int err;
867eb0d8f13SJaehoon Chung 
868892bf100SUlf Hansson 	cmd.opcode = card->ext_csd.hpi_cmd;
869eb0d8f13SJaehoon Chung 	cmd.arg = card->rca << 16 | 1;
870892bf100SUlf Hansson 	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
871c7bedef0SUlf Hansson 
872c7bedef0SUlf Hansson 	if (cmd.opcode == MMC_STOP_TRANSMISSION)
873c7bedef0SUlf Hansson 		use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd,
874c7bedef0SUlf Hansson 						    busy_timeout_ms);
875892bf100SUlf Hansson 
876892bf100SUlf Hansson 	err = mmc_wait_for_cmd(host, &cmd, 0);
877eb0d8f13SJaehoon Chung 	if (err) {
878892bf100SUlf Hansson 		pr_warn("%s: HPI error %d. Command response %#x\n",
879892bf100SUlf Hansson 			mmc_hostname(host), err, cmd.resp[0]);
880eb0d8f13SJaehoon Chung 		return err;
881eb0d8f13SJaehoon Chung 	}
882eb0d8f13SJaehoon Chung 
883892bf100SUlf Hansson 	/* No need to poll when using HW busy detection. */
884892bf100SUlf Hansson 	if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp)
885892bf100SUlf Hansson 		return 0;
886892bf100SUlf Hansson 
887490ff95fSUlf Hansson 	/* Let's poll to find out when the HPI request completes. */
88804f967adSUlf Hansson 	return mmc_poll_for_busy(card, busy_timeout_ms, false, MMC_BUSY_HPI);
889eb0d8f13SJaehoon Chung }
890148bcab2SUlf Hansson 
8910f2c0512SUlf Hansson /**
8920f2c0512SUlf Hansson  *	mmc_interrupt_hpi - Issue for High priority Interrupt
8930f2c0512SUlf Hansson  *	@card: the MMC card associated with the HPI transfer
8940f2c0512SUlf Hansson  *
8950f2c0512SUlf Hansson  *	Issued High Priority Interrupt, and check for card status
8960f2c0512SUlf Hansson  *	until out-of prg-state.
8970f2c0512SUlf Hansson  */
mmc_interrupt_hpi(struct mmc_card * card)89844aebc16SJason Yan static int mmc_interrupt_hpi(struct mmc_card *card)
8990f2c0512SUlf Hansson {
9000f2c0512SUlf Hansson 	int err;
9010f2c0512SUlf Hansson 	u32 status;
9020f2c0512SUlf Hansson 
9030f2c0512SUlf Hansson 	if (!card->ext_csd.hpi_en) {
9040f2c0512SUlf Hansson 		pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
9050f2c0512SUlf Hansson 		return 1;
9060f2c0512SUlf Hansson 	}
9070f2c0512SUlf Hansson 
9080f2c0512SUlf Hansson 	err = mmc_send_status(card, &status);
9090f2c0512SUlf Hansson 	if (err) {
9100f2c0512SUlf Hansson 		pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
9110f2c0512SUlf Hansson 		goto out;
9120f2c0512SUlf Hansson 	}
9130f2c0512SUlf Hansson 
9140f2c0512SUlf Hansson 	switch (R1_CURRENT_STATE(status)) {
9150f2c0512SUlf Hansson 	case R1_STATE_IDLE:
9160f2c0512SUlf Hansson 	case R1_STATE_READY:
9170f2c0512SUlf Hansson 	case R1_STATE_STBY:
9180f2c0512SUlf Hansson 	case R1_STATE_TRAN:
9190f2c0512SUlf Hansson 		/*
9200f2c0512SUlf Hansson 		 * In idle and transfer states, HPI is not needed and the caller
9210f2c0512SUlf Hansson 		 * can issue the next intended command immediately
9220f2c0512SUlf Hansson 		 */
9230f2c0512SUlf Hansson 		goto out;
9240f2c0512SUlf Hansson 	case R1_STATE_PRG:
9250f2c0512SUlf Hansson 		break;
9260f2c0512SUlf Hansson 	default:
9270f2c0512SUlf Hansson 		/* In all other states, it's illegal to issue HPI */
9280f2c0512SUlf Hansson 		pr_debug("%s: HPI cannot be sent. Card state=%d\n",
9290f2c0512SUlf Hansson 			mmc_hostname(card->host), R1_CURRENT_STATE(status));
9300f2c0512SUlf Hansson 		err = -EINVAL;
9310f2c0512SUlf Hansson 		goto out;
9320f2c0512SUlf Hansson 	}
9330f2c0512SUlf Hansson 
9349f94d047SUlf Hansson 	err = mmc_send_hpi_cmd(card);
9350f2c0512SUlf Hansson out:
9360f2c0512SUlf Hansson 	return err;
9370f2c0512SUlf Hansson }
9380f2c0512SUlf Hansson 
mmc_can_ext_csd(struct mmc_card * card)939148bcab2SUlf Hansson int mmc_can_ext_csd(struct mmc_card *card)
940148bcab2SUlf Hansson {
941148bcab2SUlf Hansson 	return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
942148bcab2SUlf Hansson }
943b658af71SAdrian Hunter 
mmc_read_bkops_status(struct mmc_card * card)9441cf8f7e5SUlf Hansson static int mmc_read_bkops_status(struct mmc_card *card)
9451cf8f7e5SUlf Hansson {
9461cf8f7e5SUlf Hansson 	int err;
9471cf8f7e5SUlf Hansson 	u8 *ext_csd;
9481cf8f7e5SUlf Hansson 
9491cf8f7e5SUlf Hansson 	err = mmc_get_ext_csd(card, &ext_csd);
9501cf8f7e5SUlf Hansson 	if (err)
9511cf8f7e5SUlf Hansson 		return err;
9521cf8f7e5SUlf Hansson 
9531cf8f7e5SUlf Hansson 	card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
9541cf8f7e5SUlf Hansson 	card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
9551cf8f7e5SUlf Hansson 	kfree(ext_csd);
9561cf8f7e5SUlf Hansson 	return 0;
9571cf8f7e5SUlf Hansson }
9581cf8f7e5SUlf Hansson 
9591cf8f7e5SUlf Hansson /**
9600c204979SUlf Hansson  *	mmc_run_bkops - Run BKOPS for supported cards
9610c204979SUlf Hansson  *	@card: MMC card to run BKOPS for
9621cf8f7e5SUlf Hansson  *
9630c204979SUlf Hansson  *	Run background operations synchronously for cards having manual BKOPS
9640c204979SUlf Hansson  *	enabled and in case it reports urgent BKOPS level.
9651cf8f7e5SUlf Hansson */
mmc_run_bkops(struct mmc_card * card)9660c204979SUlf Hansson void mmc_run_bkops(struct mmc_card *card)
9671cf8f7e5SUlf Hansson {
9681cf8f7e5SUlf Hansson 	int err;
9691cf8f7e5SUlf Hansson 
9700c204979SUlf Hansson 	if (!card->ext_csd.man_bkops_en)
9711cf8f7e5SUlf Hansson 		return;
9721cf8f7e5SUlf Hansson 
9731cf8f7e5SUlf Hansson 	err = mmc_read_bkops_status(card);
9741cf8f7e5SUlf Hansson 	if (err) {
9751cf8f7e5SUlf Hansson 		pr_err("%s: Failed to read bkops status: %d\n",
9761cf8f7e5SUlf Hansson 		       mmc_hostname(card->host), err);
9771cf8f7e5SUlf Hansson 		return;
9781cf8f7e5SUlf Hansson 	}
9791cf8f7e5SUlf Hansson 
9800c204979SUlf Hansson 	if (!card->ext_csd.raw_bkops_status ||
9810c204979SUlf Hansson 	    card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2)
9821cf8f7e5SUlf Hansson 		return;
9831cf8f7e5SUlf Hansson 
9841cf8f7e5SUlf Hansson 	mmc_retune_hold(card->host);
9851cf8f7e5SUlf Hansson 
9860c204979SUlf Hansson 	/*
9870c204979SUlf Hansson 	 * For urgent BKOPS status, LEVEL_2 and higher, let's execute
9880c204979SUlf Hansson 	 * synchronously. Future wise, we may consider to start BKOPS, for less
9890c204979SUlf Hansson 	 * urgent levels by using an asynchronous background task, when idle.
9900c204979SUlf Hansson 	 */
9910c204979SUlf Hansson 	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
99224ed3bd0SUlf Hansson 			 EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS);
993fe72d08aSBean Huo 	/*
994fe72d08aSBean Huo 	 * If the BKOPS timed out, the card is probably still busy in the
995fe72d08aSBean Huo 	 * R1_STATE_PRG. Rather than continue to wait, let's try to abort
996fe72d08aSBean Huo 	 * it with a HPI command to get back into R1_STATE_TRAN.
997fe72d08aSBean Huo 	 */
998fe72d08aSBean Huo 	if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
999fe72d08aSBean Huo 		pr_warn("%s: BKOPS aborted\n", mmc_hostname(card->host));
1000fe72d08aSBean Huo 	else if (err)
1001fe72d08aSBean Huo 		pr_warn("%s: Error %d running bkops\n",
10021cf8f7e5SUlf Hansson 			mmc_hostname(card->host), err);
10031cf8f7e5SUlf Hansson 
10041cf8f7e5SUlf Hansson 	mmc_retune_release(card->host);
10051cf8f7e5SUlf Hansson }
10060c204979SUlf Hansson EXPORT_SYMBOL(mmc_run_bkops);
10071cf8f7e5SUlf Hansson 
mmc_cmdq_switch(struct mmc_card * card,bool enable)1008b658af71SAdrian Hunter static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
1009b658af71SAdrian Hunter {
1010b658af71SAdrian Hunter 	u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
1011b658af71SAdrian Hunter 	int err;
1012b658af71SAdrian Hunter 
1013b658af71SAdrian Hunter 	if (!card->ext_csd.cmdq_support)
1014b658af71SAdrian Hunter 		return -EOPNOTSUPP;
1015b658af71SAdrian Hunter 
1016b658af71SAdrian Hunter 	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
1017b658af71SAdrian Hunter 			 val, card->ext_csd.generic_cmd6_time);
1018b658af71SAdrian Hunter 	if (!err)
1019b658af71SAdrian Hunter 		card->ext_csd.cmdq_en = enable;
1020b658af71SAdrian Hunter 
1021b658af71SAdrian Hunter 	return err;
1022b658af71SAdrian Hunter }
1023b658af71SAdrian Hunter 
mmc_cmdq_enable(struct mmc_card * card)1024b658af71SAdrian Hunter int mmc_cmdq_enable(struct mmc_card *card)
1025b658af71SAdrian Hunter {
1026b658af71SAdrian Hunter 	return mmc_cmdq_switch(card, true);
1027b658af71SAdrian Hunter }
1028b658af71SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
1029b658af71SAdrian Hunter 
mmc_cmdq_disable(struct mmc_card * card)1030b658af71SAdrian Hunter int mmc_cmdq_disable(struct mmc_card *card)
1031b658af71SAdrian Hunter {
1032b658af71SAdrian Hunter 	return mmc_cmdq_switch(card, false);
1033b658af71SAdrian Hunter }
1034b658af71SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_cmdq_disable);
103555c2b8b9SUlf Hansson 
mmc_sanitize(struct mmc_card * card,unsigned int timeout_ms)10364f111d04SBean Huo int mmc_sanitize(struct mmc_card *card, unsigned int timeout_ms)
103755c2b8b9SUlf Hansson {
103855c2b8b9SUlf Hansson 	struct mmc_host *host = card->host;
103955c2b8b9SUlf Hansson 	int err;
104055c2b8b9SUlf Hansson 
104155c2b8b9SUlf Hansson 	if (!mmc_can_sanitize(card)) {
104255c2b8b9SUlf Hansson 		pr_warn("%s: Sanitize not supported\n", mmc_hostname(host));
104355c2b8b9SUlf Hansson 		return -EOPNOTSUPP;
104455c2b8b9SUlf Hansson 	}
104555c2b8b9SUlf Hansson 
10464f111d04SBean Huo 	if (!timeout_ms)
10474f111d04SBean Huo 		timeout_ms = MMC_SANITIZE_TIMEOUT_MS;
10484f111d04SBean Huo 
104955c2b8b9SUlf Hansson 	pr_debug("%s: Sanitize in progress...\n", mmc_hostname(host));
105055c2b8b9SUlf Hansson 
105155c2b8b9SUlf Hansson 	mmc_retune_hold(host);
105255c2b8b9SUlf Hansson 
10535b96247cSBean Huo 	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START,
10545b96247cSBean Huo 			   1, timeout_ms, 0, true, false, 0);
105555c2b8b9SUlf Hansson 	if (err)
105655c2b8b9SUlf Hansson 		pr_err("%s: Sanitize failed err=%d\n", mmc_hostname(host), err);
105755c2b8b9SUlf Hansson 
105855c2b8b9SUlf Hansson 	/*
105955c2b8b9SUlf Hansson 	 * If the sanitize operation timed out, the card is probably still busy
106055c2b8b9SUlf Hansson 	 * in the R1_STATE_PRG. Rather than continue to wait, let's try to abort
106155c2b8b9SUlf Hansson 	 * it with a HPI command to get back into R1_STATE_TRAN.
106255c2b8b9SUlf Hansson 	 */
106355c2b8b9SUlf Hansson 	if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
106455c2b8b9SUlf Hansson 		pr_warn("%s: Sanitize aborted\n", mmc_hostname(host));
106555c2b8b9SUlf Hansson 
106655c2b8b9SUlf Hansson 	mmc_retune_release(host);
106755c2b8b9SUlf Hansson 
106855c2b8b9SUlf Hansson 	pr_debug("%s: Sanitize completed\n", mmc_hostname(host));
106955c2b8b9SUlf Hansson 	return err;
107055c2b8b9SUlf Hansson }
107155c2b8b9SUlf Hansson EXPORT_SYMBOL_GPL(mmc_sanitize);
1072