xref: /openbmc/linux/drivers/mmc/core/mmc_ops.c (revision 76bfc7cc)
12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2da7fbe58SPierre Ossman /*
370f10482SPierre Ossman  *  linux/drivers/mmc/core/mmc_ops.h
4da7fbe58SPierre Ossman  *
5da7fbe58SPierre Ossman  *  Copyright 2006-2007 Pierre Ossman
6da7fbe58SPierre Ossman  */
7da7fbe58SPierre Ossman 
85a0e3ad6STejun Heo #include <linux/slab.h>
93ef77af1SPaul Gortmaker #include <linux/export.h>
10da7fbe58SPierre Ossman #include <linux/types.h>
11da7fbe58SPierre Ossman #include <linux/scatterlist.h>
12da7fbe58SPierre Ossman 
13da7fbe58SPierre Ossman #include <linux/mmc/host.h>
14da7fbe58SPierre Ossman #include <linux/mmc/card.h>
15da7fbe58SPierre Ossman #include <linux/mmc/mmc.h>
16da7fbe58SPierre Ossman 
17da7fbe58SPierre Ossman #include "core.h"
181cf8f7e5SUlf Hansson #include "card.h"
19c6dbab9cSAdrian Hunter #include "host.h"
20da7fbe58SPierre Ossman #include "mmc_ops.h"
21da7fbe58SPierre Ossman 
2224ed3bd0SUlf Hansson #define MMC_BKOPS_TIMEOUT_MS		(120 * 1000) /* 120s */
2355c2b8b9SUlf Hansson #define MMC_SANITIZE_TIMEOUT_MS		(240 * 1000) /* 240s */
248fee476bSTrey Ramsay 
2504cdbbfaSUlf Hansson static const u8 tuning_blk_pattern_4bit[] = {
2604cdbbfaSUlf Hansson 	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
2704cdbbfaSUlf Hansson 	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
2804cdbbfaSUlf Hansson 	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
2904cdbbfaSUlf Hansson 	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
3004cdbbfaSUlf Hansson 	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
3104cdbbfaSUlf Hansson 	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
3204cdbbfaSUlf Hansson 	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
3304cdbbfaSUlf Hansson 	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
3404cdbbfaSUlf Hansson };
3504cdbbfaSUlf Hansson 
3604cdbbfaSUlf Hansson static const u8 tuning_blk_pattern_8bit[] = {
3704cdbbfaSUlf Hansson 	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
3804cdbbfaSUlf Hansson 	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
3904cdbbfaSUlf Hansson 	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
4004cdbbfaSUlf Hansson 	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
4104cdbbfaSUlf Hansson 	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
4204cdbbfaSUlf Hansson 	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
4304cdbbfaSUlf Hansson 	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
4404cdbbfaSUlf Hansson 	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
4504cdbbfaSUlf Hansson 	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
4604cdbbfaSUlf Hansson 	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
4704cdbbfaSUlf Hansson 	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
4804cdbbfaSUlf Hansson 	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
4904cdbbfaSUlf Hansson 	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
5004cdbbfaSUlf Hansson 	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
5104cdbbfaSUlf Hansson 	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
5204cdbbfaSUlf Hansson 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
5304cdbbfaSUlf Hansson };
5404cdbbfaSUlf Hansson 
5504f967adSUlf Hansson struct mmc_busy_data {
5604f967adSUlf Hansson 	struct mmc_card *card;
5704f967adSUlf Hansson 	bool retry_crc_err;
5804f967adSUlf Hansson 	enum mmc_busy_cmd busy_cmd;
5904f967adSUlf Hansson };
6004f967adSUlf Hansson 
61*76bfc7ccSHuijin Park struct mmc_op_cond_busy_data {
62*76bfc7ccSHuijin Park 	struct mmc_host *host;
63*76bfc7ccSHuijin Park 	u32 ocr;
64*76bfc7ccSHuijin Park 	struct mmc_command *cmd;
65*76bfc7ccSHuijin Park };
66*76bfc7ccSHuijin Park 
672185bc2cSUlf Hansson int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
68a27fbf2fSSeungwon Jeon {
69a27fbf2fSSeungwon Jeon 	int err;
70c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
71a27fbf2fSSeungwon Jeon 
72a27fbf2fSSeungwon Jeon 	cmd.opcode = MMC_SEND_STATUS;
73a27fbf2fSSeungwon Jeon 	if (!mmc_host_is_spi(card->host))
74a27fbf2fSSeungwon Jeon 		cmd.arg = card->rca << 16;
75a27fbf2fSSeungwon Jeon 	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
76a27fbf2fSSeungwon Jeon 
772185bc2cSUlf Hansson 	err = mmc_wait_for_cmd(card->host, &cmd, retries);
78a27fbf2fSSeungwon Jeon 	if (err)
79a27fbf2fSSeungwon Jeon 		return err;
80a27fbf2fSSeungwon Jeon 
81a27fbf2fSSeungwon Jeon 	/* NOTE: callers are required to understand the difference
82a27fbf2fSSeungwon Jeon 	 * between "native" and SPI format status words!
83a27fbf2fSSeungwon Jeon 	 */
84a27fbf2fSSeungwon Jeon 	if (status)
85a27fbf2fSSeungwon Jeon 		*status = cmd.resp[0];
86a27fbf2fSSeungwon Jeon 
87a27fbf2fSSeungwon Jeon 	return 0;
88a27fbf2fSSeungwon Jeon }
892185bc2cSUlf Hansson EXPORT_SYMBOL_GPL(__mmc_send_status);
902185bc2cSUlf Hansson 
912185bc2cSUlf Hansson int mmc_send_status(struct mmc_card *card, u32 *status)
922185bc2cSUlf Hansson {
932185bc2cSUlf Hansson 	return __mmc_send_status(card, status, MMC_CMD_RETRIES);
942185bc2cSUlf Hansson }
951bee324aSLinus Walleij EXPORT_SYMBOL_GPL(mmc_send_status);
96a27fbf2fSSeungwon Jeon 
97da7fbe58SPierre Ossman static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
98da7fbe58SPierre Ossman {
99c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
100da7fbe58SPierre Ossman 
101da7fbe58SPierre Ossman 	cmd.opcode = MMC_SELECT_CARD;
102da7fbe58SPierre Ossman 
103da7fbe58SPierre Ossman 	if (card) {
104da7fbe58SPierre Ossman 		cmd.arg = card->rca << 16;
105da7fbe58SPierre Ossman 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
106da7fbe58SPierre Ossman 	} else {
107da7fbe58SPierre Ossman 		cmd.arg = 0;
108da7fbe58SPierre Ossman 		cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
109da7fbe58SPierre Ossman 	}
110da7fbe58SPierre Ossman 
1110899e741SMasahiro Yamada 	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
112da7fbe58SPierre Ossman }
113da7fbe58SPierre Ossman 
114da7fbe58SPierre Ossman int mmc_select_card(struct mmc_card *card)
115da7fbe58SPierre Ossman {
116da7fbe58SPierre Ossman 
117da7fbe58SPierre Ossman 	return _mmc_select_card(card->host, card);
118da7fbe58SPierre Ossman }
119da7fbe58SPierre Ossman 
120da7fbe58SPierre Ossman int mmc_deselect_cards(struct mmc_host *host)
121da7fbe58SPierre Ossman {
122da7fbe58SPierre Ossman 	return _mmc_select_card(host, NULL);
123da7fbe58SPierre Ossman }
124da7fbe58SPierre Ossman 
1253d705d14SSascha Hauer /*
1263d705d14SSascha Hauer  * Write the value specified in the device tree or board code into the optional
1273d705d14SSascha Hauer  * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
1283d705d14SSascha Hauer  * drive strength of the DAT and CMD outputs. The actual meaning of a given
1293d705d14SSascha Hauer  * value is hardware dependant.
1303d705d14SSascha Hauer  * The presence of the DSR register can be determined from the CSD register,
1313d705d14SSascha Hauer  * bit 76.
1323d705d14SSascha Hauer  */
1333d705d14SSascha Hauer int mmc_set_dsr(struct mmc_host *host)
1343d705d14SSascha Hauer {
135c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
1363d705d14SSascha Hauer 
1373d705d14SSascha Hauer 	cmd.opcode = MMC_SET_DSR;
1383d705d14SSascha Hauer 
1393d705d14SSascha Hauer 	cmd.arg = (host->dsr << 16) | 0xffff;
1403d705d14SSascha Hauer 	cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
1413d705d14SSascha Hauer 
1423d705d14SSascha Hauer 	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
1433d705d14SSascha Hauer }
1443d705d14SSascha Hauer 
145da7fbe58SPierre Ossman int mmc_go_idle(struct mmc_host *host)
146da7fbe58SPierre Ossman {
147da7fbe58SPierre Ossman 	int err;
148c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
149da7fbe58SPierre Ossman 
150af517150SDavid Brownell 	/*
151af517150SDavid Brownell 	 * Non-SPI hosts need to prevent chipselect going active during
152af517150SDavid Brownell 	 * GO_IDLE; that would put chips into SPI mode.  Remind them of
153af517150SDavid Brownell 	 * that in case of hardware that won't pull up DAT3/nCS otherwise.
154af517150SDavid Brownell 	 *
155af517150SDavid Brownell 	 * SPI hosts ignore ios.chip_select; it's managed according to
15625985edcSLucas De Marchi 	 * rules that must accommodate non-MMC slaves which this layer
157af517150SDavid Brownell 	 * won't even know about.
158af517150SDavid Brownell 	 */
159af517150SDavid Brownell 	if (!mmc_host_is_spi(host)) {
160da7fbe58SPierre Ossman 		mmc_set_chip_select(host, MMC_CS_HIGH);
161da7fbe58SPierre Ossman 		mmc_delay(1);
162af517150SDavid Brownell 	}
163da7fbe58SPierre Ossman 
164da7fbe58SPierre Ossman 	cmd.opcode = MMC_GO_IDLE_STATE;
165da7fbe58SPierre Ossman 	cmd.arg = 0;
166af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
167da7fbe58SPierre Ossman 
168da7fbe58SPierre Ossman 	err = mmc_wait_for_cmd(host, &cmd, 0);
169da7fbe58SPierre Ossman 
170da7fbe58SPierre Ossman 	mmc_delay(1);
171da7fbe58SPierre Ossman 
172af517150SDavid Brownell 	if (!mmc_host_is_spi(host)) {
173da7fbe58SPierre Ossman 		mmc_set_chip_select(host, MMC_CS_DONTCARE);
174da7fbe58SPierre Ossman 		mmc_delay(1);
175af517150SDavid Brownell 	}
176af517150SDavid Brownell 
177af517150SDavid Brownell 	host->use_spi_crc = 0;
178da7fbe58SPierre Ossman 
179da7fbe58SPierre Ossman 	return err;
180da7fbe58SPierre Ossman }
181da7fbe58SPierre Ossman 
182*76bfc7ccSHuijin Park static int __mmc_send_op_cond_cb(void *cb_data, bool *busy)
183da7fbe58SPierre Ossman {
184*76bfc7ccSHuijin Park 	struct mmc_op_cond_busy_data *data = cb_data;
185*76bfc7ccSHuijin Park 	struct mmc_host *host = data->host;
186*76bfc7ccSHuijin Park 	struct mmc_command *cmd = data->cmd;
187*76bfc7ccSHuijin Park 	u32 ocr = data->ocr;
188*76bfc7ccSHuijin Park 	int err = 0;
189da7fbe58SPierre Ossman 
190*76bfc7ccSHuijin Park 	err = mmc_wait_for_cmd(host, cmd, 0);
19117b0429dSPierre Ossman 	if (err)
192*76bfc7ccSHuijin Park 		return err;
193da7fbe58SPierre Ossman 
194af517150SDavid Brownell 	if (mmc_host_is_spi(host)) {
195*76bfc7ccSHuijin Park 		if (!(cmd->resp[0] & R1_SPI_IDLE)) {
196*76bfc7ccSHuijin Park 			*busy = false;
197*76bfc7ccSHuijin Park 			return 0;
198*76bfc7ccSHuijin Park 		}
199af517150SDavid Brownell 	} else {
200*76bfc7ccSHuijin Park 		if (cmd->resp[0] & MMC_CARD_BUSY) {
201*76bfc7ccSHuijin Park 			*busy = false;
202*76bfc7ccSHuijin Park 			return 0;
203*76bfc7ccSHuijin Park 		}
204af517150SDavid Brownell 	}
205af517150SDavid Brownell 
206*76bfc7ccSHuijin Park 	*busy = true;
2074c94cb65SYoshihiro Shimoda 
2084c94cb65SYoshihiro Shimoda 	/*
2094c94cb65SYoshihiro Shimoda 	 * According to eMMC specification v5.1 section 6.4.3, we
2104c94cb65SYoshihiro Shimoda 	 * should issue CMD1 repeatedly in the idle state until
2114c94cb65SYoshihiro Shimoda 	 * the eMMC is ready. Otherwise some eMMC devices seem to enter
2124c94cb65SYoshihiro Shimoda 	 * the inactive mode after mmc_init_card() issued CMD0 when
2134c94cb65SYoshihiro Shimoda 	 * the eMMC device is busy.
2144c94cb65SYoshihiro Shimoda 	 */
2154c94cb65SYoshihiro Shimoda 	if (!ocr && !mmc_host_is_spi(host))
216*76bfc7ccSHuijin Park 		cmd->arg = cmd->resp[0] | BIT(30);
217*76bfc7ccSHuijin Park 
218*76bfc7ccSHuijin Park 	return 0;
219da7fbe58SPierre Ossman }
220da7fbe58SPierre Ossman 
221*76bfc7ccSHuijin Park int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
222*76bfc7ccSHuijin Park {
223*76bfc7ccSHuijin Park 	struct mmc_command cmd = {};
224*76bfc7ccSHuijin Park 	int err = 0;
225*76bfc7ccSHuijin Park 	struct mmc_op_cond_busy_data cb_data = {
226*76bfc7ccSHuijin Park 		.host = host,
227*76bfc7ccSHuijin Park 		.ocr = ocr,
228*76bfc7ccSHuijin Park 		.cmd = &cmd
229*76bfc7ccSHuijin Park 	};
230*76bfc7ccSHuijin Park 
231*76bfc7ccSHuijin Park 	cmd.opcode = MMC_SEND_OP_COND;
232*76bfc7ccSHuijin Park 	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
233*76bfc7ccSHuijin Park 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
234*76bfc7ccSHuijin Park 
235*76bfc7ccSHuijin Park 	err = __mmc_poll_for_busy(host, 1000, &__mmc_send_op_cond_cb, &cb_data);
236*76bfc7ccSHuijin Park 	if (err)
237*76bfc7ccSHuijin Park 		return err;
238*76bfc7ccSHuijin Park 
239af517150SDavid Brownell 	if (rocr && !mmc_host_is_spi(host))
240da7fbe58SPierre Ossman 		*rocr = cmd.resp[0];
241da7fbe58SPierre Ossman 
242da7fbe58SPierre Ossman 	return err;
243da7fbe58SPierre Ossman }
244da7fbe58SPierre Ossman 
245da7fbe58SPierre Ossman int mmc_set_relative_addr(struct mmc_card *card)
246da7fbe58SPierre Ossman {
247c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
248da7fbe58SPierre Ossman 
249da7fbe58SPierre Ossman 	cmd.opcode = MMC_SET_RELATIVE_ADDR;
250da7fbe58SPierre Ossman 	cmd.arg = card->rca << 16;
251da7fbe58SPierre Ossman 	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
252da7fbe58SPierre Ossman 
2530899e741SMasahiro Yamada 	return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
254da7fbe58SPierre Ossman }
255da7fbe58SPierre Ossman 
256af517150SDavid Brownell static int
257af517150SDavid Brownell mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
258da7fbe58SPierre Ossman {
259da7fbe58SPierre Ossman 	int err;
260c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
261da7fbe58SPierre Ossman 
262af517150SDavid Brownell 	cmd.opcode = opcode;
263af517150SDavid Brownell 	cmd.arg = arg;
264da7fbe58SPierre Ossman 	cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
265da7fbe58SPierre Ossman 
266af517150SDavid Brownell 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
26717b0429dSPierre Ossman 	if (err)
268da7fbe58SPierre Ossman 		return err;
269da7fbe58SPierre Ossman 
270af517150SDavid Brownell 	memcpy(cxd, cmd.resp, sizeof(u32) * 4);
271da7fbe58SPierre Ossman 
27217b0429dSPierre Ossman 	return 0;
273da7fbe58SPierre Ossman }
274da7fbe58SPierre Ossman 
2751a41313eSKyungsik Lee /*
2761a41313eSKyungsik Lee  * NOTE: void *buf, caller for the buf is required to use DMA-capable
2771a41313eSKyungsik Lee  * buffer or on-stack buffer (with some overhead in callee).
2781a41313eSKyungsik Lee  */
279cec18ad9SUlf Hansson int mmc_send_adtc_data(struct mmc_card *card, struct mmc_host *host, u32 opcode,
280cec18ad9SUlf Hansson 		       u32 args, void *buf, unsigned len)
281da7fbe58SPierre Ossman {
282c7836d15SMasahiro Yamada 	struct mmc_request mrq = {};
283c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
284c7836d15SMasahiro Yamada 	struct mmc_data data = {};
285da7fbe58SPierre Ossman 	struct scatterlist sg;
286da7fbe58SPierre Ossman 
287da7fbe58SPierre Ossman 	mrq.cmd = &cmd;
288da7fbe58SPierre Ossman 	mrq.data = &data;
289da7fbe58SPierre Ossman 
290af517150SDavid Brownell 	cmd.opcode = opcode;
291cec18ad9SUlf Hansson 	cmd.arg = args;
292da7fbe58SPierre Ossman 
293af517150SDavid Brownell 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
294af517150SDavid Brownell 	 * rely on callers to never use this with "native" calls for reading
295af517150SDavid Brownell 	 * CSD or CID.  Native versions of those commands use the R2 type,
296af517150SDavid Brownell 	 * not R1 plus a data block.
297af517150SDavid Brownell 	 */
298af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
299af517150SDavid Brownell 
300af517150SDavid Brownell 	data.blksz = len;
301da7fbe58SPierre Ossman 	data.blocks = 1;
302da7fbe58SPierre Ossman 	data.flags = MMC_DATA_READ;
303da7fbe58SPierre Ossman 	data.sg = &sg;
304da7fbe58SPierre Ossman 	data.sg_len = 1;
305da7fbe58SPierre Ossman 
306601ed60cSUlf Hansson 	sg_init_one(&sg, buf, len);
307da7fbe58SPierre Ossman 
308cda56ac2SAdrian Hunter 	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
3090d3e0460SMatthew Fleming 		/*
3100d3e0460SMatthew Fleming 		 * The spec states that CSR and CID accesses have a timeout
3110d3e0460SMatthew Fleming 		 * of 64 clock cycles.
3120d3e0460SMatthew Fleming 		 */
3130d3e0460SMatthew Fleming 		data.timeout_ns = 0;
3140d3e0460SMatthew Fleming 		data.timeout_clks = 64;
315cda56ac2SAdrian Hunter 	} else
316cda56ac2SAdrian Hunter 		mmc_set_data_timeout(&data, card);
317da7fbe58SPierre Ossman 
318af517150SDavid Brownell 	mmc_wait_for_req(host, &mrq);
319af517150SDavid Brownell 
32017b0429dSPierre Ossman 	if (cmd.error)
321da7fbe58SPierre Ossman 		return cmd.error;
32217b0429dSPierre Ossman 	if (data.error)
323da7fbe58SPierre Ossman 		return data.error;
324da7fbe58SPierre Ossman 
32517b0429dSPierre Ossman 	return 0;
326da7fbe58SPierre Ossman }
327da7fbe58SPierre Ossman 
328b53f0beeSYue Hu static int mmc_spi_send_cxd(struct mmc_host *host, u32 *cxd, u32 opcode)
329af517150SDavid Brownell {
33078e48073SPierre Ossman 	int ret, i;
331b53f0beeSYue Hu 	__be32 *cxd_tmp;
33278e48073SPierre Ossman 
333b53f0beeSYue Hu 	cxd_tmp = kzalloc(16, GFP_KERNEL);
334b53f0beeSYue Hu 	if (!cxd_tmp)
3351a41313eSKyungsik Lee 		return -ENOMEM;
3361a41313eSKyungsik Lee 
337cec18ad9SUlf Hansson 	ret = mmc_send_adtc_data(NULL, host, opcode, 0, cxd_tmp, 16);
33878e48073SPierre Ossman 	if (ret)
3391a41313eSKyungsik Lee 		goto err;
34078e48073SPierre Ossman 
34178e48073SPierre Ossman 	for (i = 0; i < 4; i++)
342b53f0beeSYue Hu 		cxd[i] = be32_to_cpu(cxd_tmp[i]);
34378e48073SPierre Ossman 
3441a41313eSKyungsik Lee err:
345b53f0beeSYue Hu 	kfree(cxd_tmp);
3461a41313eSKyungsik Lee 	return ret;
347af517150SDavid Brownell }
348af517150SDavid Brownell 
3490796e439SUlf Hansson int mmc_send_csd(struct mmc_card *card, u32 *csd)
3500796e439SUlf Hansson {
3510796e439SUlf Hansson 	if (mmc_host_is_spi(card->host))
352b53f0beeSYue Hu 		return mmc_spi_send_cxd(card->host, csd, MMC_SEND_CSD);
3530796e439SUlf Hansson 
3540796e439SUlf Hansson 	return mmc_send_cxd_native(card->host, card->rca << 16,	csd,
3550796e439SUlf Hansson 				MMC_SEND_CSD);
3560796e439SUlf Hansson }
3570796e439SUlf Hansson 
358a1473732SUlf Hansson int mmc_send_cid(struct mmc_host *host, u32 *cid)
359a1473732SUlf Hansson {
360a1473732SUlf Hansson 	if (mmc_host_is_spi(host))
361b53f0beeSYue Hu 		return mmc_spi_send_cxd(host, cid, MMC_SEND_CID);
362a1473732SUlf Hansson 
363c92e68d8SUlf Hansson 	return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
364a1473732SUlf Hansson }
365a1473732SUlf Hansson 
366e21aa519SUlf Hansson int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
367e21aa519SUlf Hansson {
368e21aa519SUlf Hansson 	int err;
369e21aa519SUlf Hansson 	u8 *ext_csd;
370e21aa519SUlf Hansson 
371e21aa519SUlf Hansson 	if (!card || !new_ext_csd)
372e21aa519SUlf Hansson 		return -EINVAL;
373e21aa519SUlf Hansson 
374e21aa519SUlf Hansson 	if (!mmc_can_ext_csd(card))
375e21aa519SUlf Hansson 		return -EOPNOTSUPP;
376e21aa519SUlf Hansson 
377e21aa519SUlf Hansson 	/*
378e21aa519SUlf Hansson 	 * As the ext_csd is so large and mostly unused, we don't store the
379e21aa519SUlf Hansson 	 * raw block in mmc_card.
380e21aa519SUlf Hansson 	 */
38122b78700SUlf Hansson 	ext_csd = kzalloc(512, GFP_KERNEL);
382e21aa519SUlf Hansson 	if (!ext_csd)
383e21aa519SUlf Hansson 		return -ENOMEM;
384e21aa519SUlf Hansson 
385cec18ad9SUlf Hansson 	err = mmc_send_adtc_data(card, card->host, MMC_SEND_EXT_CSD, 0, ext_csd,
3862fc91e8bSUlf Hansson 				512);
387e21aa519SUlf Hansson 	if (err)
388e21aa519SUlf Hansson 		kfree(ext_csd);
389e21aa519SUlf Hansson 	else
390e21aa519SUlf Hansson 		*new_ext_csd = ext_csd;
391e21aa519SUlf Hansson 
392e21aa519SUlf Hansson 	return err;
393e21aa519SUlf Hansson }
394e21aa519SUlf Hansson EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
395e21aa519SUlf Hansson 
396af517150SDavid Brownell int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
397af517150SDavid Brownell {
398c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
399af517150SDavid Brownell 	int err;
400af517150SDavid Brownell 
401af517150SDavid Brownell 	cmd.opcode = MMC_SPI_READ_OCR;
402af517150SDavid Brownell 	cmd.arg = highcap ? (1 << 30) : 0;
403af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R3;
404af517150SDavid Brownell 
405af517150SDavid Brownell 	err = mmc_wait_for_cmd(host, &cmd, 0);
406af517150SDavid Brownell 
407af517150SDavid Brownell 	*ocrp = cmd.resp[1];
408af517150SDavid Brownell 	return err;
409af517150SDavid Brownell }
410af517150SDavid Brownell 
411af517150SDavid Brownell int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
412af517150SDavid Brownell {
413c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
414af517150SDavid Brownell 	int err;
415af517150SDavid Brownell 
416af517150SDavid Brownell 	cmd.opcode = MMC_SPI_CRC_ON_OFF;
417af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1;
418af517150SDavid Brownell 	cmd.arg = use_crc;
419af517150SDavid Brownell 
420af517150SDavid Brownell 	err = mmc_wait_for_cmd(host, &cmd, 0);
421af517150SDavid Brownell 	if (!err)
422af517150SDavid Brownell 		host->use_spi_crc = use_crc;
423af517150SDavid Brownell 	return err;
424af517150SDavid Brownell }
425af517150SDavid Brownell 
42620348d19SUlf Hansson static int mmc_switch_status_error(struct mmc_host *host, u32 status)
427ed16f58dSAdrian Hunter {
428ed16f58dSAdrian Hunter 	if (mmc_host_is_spi(host)) {
429ed16f58dSAdrian Hunter 		if (status & R1_SPI_ILLEGAL_COMMAND)
430ed16f58dSAdrian Hunter 			return -EBADMSG;
431ed16f58dSAdrian Hunter 	} else {
432a94a7483SShawn Lin 		if (R1_STATUS(status))
433ed16f58dSAdrian Hunter 			pr_warn("%s: unexpected status %#x after switch\n",
434ed16f58dSAdrian Hunter 				mmc_hostname(host), status);
435ed16f58dSAdrian Hunter 		if (status & R1_SWITCH_ERROR)
436ed16f58dSAdrian Hunter 			return -EBADMSG;
437ed16f58dSAdrian Hunter 	}
438ed16f58dSAdrian Hunter 	return 0;
439ed16f58dSAdrian Hunter }
440ed16f58dSAdrian Hunter 
44120348d19SUlf Hansson /* Caller must hold re-tuning */
44260db8a47SUlf Hansson int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
44320348d19SUlf Hansson {
44420348d19SUlf Hansson 	u32 status;
44520348d19SUlf Hansson 	int err;
44620348d19SUlf Hansson 
44720348d19SUlf Hansson 	err = mmc_send_status(card, &status);
448ef3d2322SAdrian Hunter 	if (!crc_err_fatal && err == -EILSEQ)
449ef3d2322SAdrian Hunter 		return 0;
45020348d19SUlf Hansson 	if (err)
45120348d19SUlf Hansson 		return err;
45220348d19SUlf Hansson 
45320348d19SUlf Hansson 	return mmc_switch_status_error(card->host, status);
45420348d19SUlf Hansson }
45520348d19SUlf Hansson 
45604f967adSUlf Hansson static int mmc_busy_cb(void *cb_data, bool *busy)
4576972096aSUlf Hansson {
45804f967adSUlf Hansson 	struct mmc_busy_data *data = cb_data;
45904f967adSUlf Hansson 	struct mmc_host *host = data->card->host;
4606972096aSUlf Hansson 	u32 status = 0;
4616972096aSUlf Hansson 	int err;
4626972096aSUlf Hansson 
463972d5084SUlf Hansson 	if (data->busy_cmd != MMC_BUSY_IO && host->ops->card_busy) {
4646972096aSUlf Hansson 		*busy = host->ops->card_busy(host);
4656972096aSUlf Hansson 		return 0;
4666972096aSUlf Hansson 	}
4676972096aSUlf Hansson 
46804f967adSUlf Hansson 	err = mmc_send_status(data->card, &status);
46904f967adSUlf Hansson 	if (data->retry_crc_err && err == -EILSEQ) {
4706972096aSUlf Hansson 		*busy = true;
4716972096aSUlf Hansson 		return 0;
4726972096aSUlf Hansson 	}
4736972096aSUlf Hansson 	if (err)
4746972096aSUlf Hansson 		return err;
4756972096aSUlf Hansson 
47604f967adSUlf Hansson 	switch (data->busy_cmd) {
4770d84c3e6SUlf Hansson 	case MMC_BUSY_CMD6:
47804f967adSUlf Hansson 		err = mmc_switch_status_error(host, status);
4790d84c3e6SUlf Hansson 		break;
4800d84c3e6SUlf Hansson 	case MMC_BUSY_ERASE:
4810d84c3e6SUlf Hansson 		err = R1_STATUS(status) ? -EIO : 0;
4820d84c3e6SUlf Hansson 		break;
483490ff95fSUlf Hansson 	case MMC_BUSY_HPI:
484130206a6SUlf Hansson 	case MMC_BUSY_EXTR_SINGLE:
485972d5084SUlf Hansson 	case MMC_BUSY_IO:
486490ff95fSUlf Hansson 		break;
4870d84c3e6SUlf Hansson 	default:
4880d84c3e6SUlf Hansson 		err = -EINVAL;
4890d84c3e6SUlf Hansson 	}
4900d84c3e6SUlf Hansson 
4916972096aSUlf Hansson 	if (err)
4926972096aSUlf Hansson 		return err;
4936972096aSUlf Hansson 
4942a1c7cdaSUlf Hansson 	*busy = !mmc_ready_for_data(status);
4956972096aSUlf Hansson 	return 0;
4966972096aSUlf Hansson }
4976972096aSUlf Hansson 
4982ebbdaceSHuijin Park int __mmc_poll_for_busy(struct mmc_host *host, unsigned int timeout_ms,
49904f967adSUlf Hansson 			int (*busy_cb)(void *cb_data, bool *busy),
50004f967adSUlf Hansson 			void *cb_data)
501716bdb89SUlf Hansson {
502716bdb89SUlf Hansson 	int err;
503716bdb89SUlf Hansson 	unsigned long timeout;
504d46a24a9SUlf Hansson 	unsigned int udelay = 32, udelay_max = 32768;
505716bdb89SUlf Hansson 	bool expired = false;
506716bdb89SUlf Hansson 	bool busy = false;
507716bdb89SUlf Hansson 
508716bdb89SUlf Hansson 	timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
509716bdb89SUlf Hansson 	do {
510716bdb89SUlf Hansson 		/*
51170562644SUlf Hansson 		 * Due to the possibility of being preempted while polling,
51270562644SUlf Hansson 		 * check the expiration time first.
513716bdb89SUlf Hansson 		 */
514716bdb89SUlf Hansson 		expired = time_after(jiffies, timeout);
51570562644SUlf Hansson 
51604f967adSUlf Hansson 		err = (*busy_cb)(cb_data, &busy);
5175ec32f84SUlf Hansson 		if (err)
5185ec32f84SUlf Hansson 			return err;
519716bdb89SUlf Hansson 
52070562644SUlf Hansson 		/* Timeout if the device still remains busy. */
52170562644SUlf Hansson 		if (expired && busy) {
52270562644SUlf Hansson 			pr_err("%s: Card stuck being busy! %s\n",
523716bdb89SUlf Hansson 				mmc_hostname(host), __func__);
524716bdb89SUlf Hansson 			return -ETIMEDOUT;
525716bdb89SUlf Hansson 		}
526d46a24a9SUlf Hansson 
527d46a24a9SUlf Hansson 		/* Throttle the polling rate to avoid hogging the CPU. */
528d46a24a9SUlf Hansson 		if (busy) {
529d46a24a9SUlf Hansson 			usleep_range(udelay, udelay * 2);
530d46a24a9SUlf Hansson 			if (udelay < udelay_max)
531d46a24a9SUlf Hansson 				udelay *= 2;
532d46a24a9SUlf Hansson 		}
53370562644SUlf Hansson 	} while (busy);
534716bdb89SUlf Hansson 
5355ec32f84SUlf Hansson 	return 0;
536716bdb89SUlf Hansson }
5376966e609SUlf Hansson EXPORT_SYMBOL_GPL(__mmc_poll_for_busy);
538716bdb89SUlf Hansson 
5390d84c3e6SUlf Hansson int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
54004f967adSUlf Hansson 		      bool retry_crc_err, enum mmc_busy_cmd busy_cmd)
5410d84c3e6SUlf Hansson {
5422ebbdaceSHuijin Park 	struct mmc_host *host = card->host;
54304f967adSUlf Hansson 	struct mmc_busy_data cb_data;
54404f967adSUlf Hansson 
54504f967adSUlf Hansson 	cb_data.card = card;
54604f967adSUlf Hansson 	cb_data.retry_crc_err = retry_crc_err;
54704f967adSUlf Hansson 	cb_data.busy_cmd = busy_cmd;
54804f967adSUlf Hansson 
5492ebbdaceSHuijin Park 	return __mmc_poll_for_busy(host, timeout_ms, &mmc_busy_cb, &cb_data);
5500d84c3e6SUlf Hansson }
551972d5084SUlf Hansson EXPORT_SYMBOL_GPL(mmc_poll_for_busy);
5520d84c3e6SUlf Hansson 
553e62f1e0bSUlf Hansson bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
554e62f1e0bSUlf Hansson 			  unsigned int timeout_ms)
555e62f1e0bSUlf Hansson {
556e62f1e0bSUlf Hansson 	/*
557e62f1e0bSUlf Hansson 	 * If the max_busy_timeout of the host is specified, make sure it's
558e62f1e0bSUlf Hansson 	 * enough to fit the used timeout_ms. In case it's not, let's instruct
559e62f1e0bSUlf Hansson 	 * the host to avoid HW busy detection, by converting to a R1 response
560e62f1e0bSUlf Hansson 	 * instead of a R1B. Note, some hosts requires R1B, which also means
561e62f1e0bSUlf Hansson 	 * they are on their own when it comes to deal with the busy timeout.
562e62f1e0bSUlf Hansson 	 */
563e62f1e0bSUlf Hansson 	if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
564e62f1e0bSUlf Hansson 	    (timeout_ms > host->max_busy_timeout)) {
565e62f1e0bSUlf Hansson 		cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1 | MMC_RSP_R1;
566e62f1e0bSUlf Hansson 		return false;
567e62f1e0bSUlf Hansson 	}
568e62f1e0bSUlf Hansson 
569e62f1e0bSUlf Hansson 	cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1B | MMC_RSP_R1B;
570e62f1e0bSUlf Hansson 	cmd->busy_timeout = timeout_ms;
571e62f1e0bSUlf Hansson 	return true;
572e62f1e0bSUlf Hansson }
573e62f1e0bSUlf Hansson 
574d3a8d95dSAndrei Warkentin /**
575950d56acSJaehoon Chung  *	__mmc_switch - modify EXT_CSD register
576d3a8d95dSAndrei Warkentin  *	@card: the MMC card associated with the data transfer
577d3a8d95dSAndrei Warkentin  *	@set: cmd set values
578d3a8d95dSAndrei Warkentin  *	@index: EXT_CSD register index
579d3a8d95dSAndrei Warkentin  *	@value: value to program into EXT_CSD register
580d3a8d95dSAndrei Warkentin  *	@timeout_ms: timeout (ms) for operation performed by register write,
581d3a8d95dSAndrei Warkentin  *                   timeout of zero implies maximum possible timeout
582aa33ce3cSUlf Hansson  *	@timing: new timing to change to
583878e200bSUlf Hansson  *	@send_status: send status cmd to poll for busy
584625228faSUlf Hansson  *	@retry_crc_err: retry when CRC errors when polling with CMD13 for busy
5855e52a168SBean Huo  *	@retries: number of retries
586d3a8d95dSAndrei Warkentin  *
587d3a8d95dSAndrei Warkentin  *	Modifies the EXT_CSD register for selected card.
588d3a8d95dSAndrei Warkentin  */
589950d56acSJaehoon Chung int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
590aa33ce3cSUlf Hansson 		unsigned int timeout_ms, unsigned char timing,
5915e52a168SBean Huo 		bool send_status, bool retry_crc_err, unsigned int retries)
592da7fbe58SPierre Ossman {
593636bd13cSUlf Hansson 	struct mmc_host *host = card->host;
594da7fbe58SPierre Ossman 	int err;
595c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
596e62f1e0bSUlf Hansson 	bool use_r1b_resp;
597aa33ce3cSUlf Hansson 	unsigned char old_timing = host->ios.timing;
598b9ec2616SUlf Hansson 
599c6dbab9cSAdrian Hunter 	mmc_retune_hold(host);
600c6dbab9cSAdrian Hunter 
601533a6cfeSUlf Hansson 	if (!timeout_ms) {
602533a6cfeSUlf Hansson 		pr_warn("%s: unspecified timeout for CMD6 - use generic\n",
603533a6cfeSUlf Hansson 			mmc_hostname(host));
604533a6cfeSUlf Hansson 		timeout_ms = card->ext_csd.generic_cmd6_time;
605533a6cfeSUlf Hansson 	}
606533a6cfeSUlf Hansson 
607da7fbe58SPierre Ossman 	cmd.opcode = MMC_SWITCH;
608da7fbe58SPierre Ossman 	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
609da7fbe58SPierre Ossman 		  (index << 16) |
610da7fbe58SPierre Ossman 		  (value << 8) |
611da7fbe58SPierre Ossman 		  set;
612e62f1e0bSUlf Hansson 	use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, timeout_ms);
613b9ec2616SUlf Hansson 
6145e52a168SBean Huo 	err = mmc_wait_for_cmd(host, &cmd, retries);
61517b0429dSPierre Ossman 	if (err)
616c6dbab9cSAdrian Hunter 		goto out;
617da7fbe58SPierre Ossman 
618cb26ce06SUlf Hansson 	/*If SPI or used HW busy detection above, then we don't need to poll. */
619cb26ce06SUlf Hansson 	if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
620ee6ff743SUlf Hansson 		mmc_host_is_spi(host))
621aa33ce3cSUlf Hansson 		goto out_tim;
622a27fbf2fSSeungwon Jeon 
6231e0b069bSUlf Hansson 	/*
6241e0b069bSUlf Hansson 	 * If the host doesn't support HW polling via the ->card_busy() ops and
6251e0b069bSUlf Hansson 	 * when it's not allowed to poll by using CMD13, then we need to rely on
6261e0b069bSUlf Hansson 	 * waiting the stated timeout to be sufficient.
6271e0b069bSUlf Hansson 	 */
6281e0b069bSUlf Hansson 	if (!send_status && !host->ops->card_busy) {
6291e0b069bSUlf Hansson 		mmc_delay(timeout_ms);
6301e0b069bSUlf Hansson 		goto out_tim;
6311e0b069bSUlf Hansson 	}
6321e0b069bSUlf Hansson 
633716bdb89SUlf Hansson 	/* Let's try to poll to find out when the command is completed. */
63404f967adSUlf Hansson 	err = mmc_poll_for_busy(card, timeout_ms, retry_crc_err, MMC_BUSY_CMD6);
635ee6ff743SUlf Hansson 	if (err)
636ee6ff743SUlf Hansson 		goto out;
637aa33ce3cSUlf Hansson 
638aa33ce3cSUlf Hansson out_tim:
639ee6ff743SUlf Hansson 	/* Switch to new timing before check switch status. */
640ee6ff743SUlf Hansson 	if (timing)
641ee6ff743SUlf Hansson 		mmc_set_timing(host, timing);
642ee6ff743SUlf Hansson 
643ee6ff743SUlf Hansson 	if (send_status) {
64460db8a47SUlf Hansson 		err = mmc_switch_status(card, true);
645aa33ce3cSUlf Hansson 		if (err && timing)
646aa33ce3cSUlf Hansson 			mmc_set_timing(host, old_timing);
647ee6ff743SUlf Hansson 	}
648c6dbab9cSAdrian Hunter out:
649c6dbab9cSAdrian Hunter 	mmc_retune_release(host);
650ef0b27d4SAdrian Hunter 
651c6dbab9cSAdrian Hunter 	return err;
652da7fbe58SPierre Ossman }
653950d56acSJaehoon Chung 
654950d56acSJaehoon Chung int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
655950d56acSJaehoon Chung 		unsigned int timeout_ms)
656950d56acSJaehoon Chung {
657aa33ce3cSUlf Hansson 	return __mmc_switch(card, set, index, value, timeout_ms, 0,
6585e52a168SBean Huo 			    true, false, MMC_CMD_RETRIES);
659950d56acSJaehoon Chung }
660d3a8d95dSAndrei Warkentin EXPORT_SYMBOL_GPL(mmc_switch);
661da7fbe58SPierre Ossman 
6629979dbe5SChaotian Jing int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
663996903deSMinda Chen {
664c7836d15SMasahiro Yamada 	struct mmc_request mrq = {};
665c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
666c7836d15SMasahiro Yamada 	struct mmc_data data = {};
667996903deSMinda Chen 	struct scatterlist sg;
668fe5afb13SUlf Hansson 	struct mmc_ios *ios = &host->ios;
669996903deSMinda Chen 	const u8 *tuning_block_pattern;
670996903deSMinda Chen 	int size, err = 0;
671996903deSMinda Chen 	u8 *data_buf;
672996903deSMinda Chen 
673996903deSMinda Chen 	if (ios->bus_width == MMC_BUS_WIDTH_8) {
674996903deSMinda Chen 		tuning_block_pattern = tuning_blk_pattern_8bit;
675996903deSMinda Chen 		size = sizeof(tuning_blk_pattern_8bit);
676996903deSMinda Chen 	} else if (ios->bus_width == MMC_BUS_WIDTH_4) {
677996903deSMinda Chen 		tuning_block_pattern = tuning_blk_pattern_4bit;
678996903deSMinda Chen 		size = sizeof(tuning_blk_pattern_4bit);
679996903deSMinda Chen 	} else
680996903deSMinda Chen 		return -EINVAL;
681996903deSMinda Chen 
682996903deSMinda Chen 	data_buf = kzalloc(size, GFP_KERNEL);
683996903deSMinda Chen 	if (!data_buf)
684996903deSMinda Chen 		return -ENOMEM;
685996903deSMinda Chen 
686996903deSMinda Chen 	mrq.cmd = &cmd;
687996903deSMinda Chen 	mrq.data = &data;
688996903deSMinda Chen 
689996903deSMinda Chen 	cmd.opcode = opcode;
690996903deSMinda Chen 	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
691996903deSMinda Chen 
692996903deSMinda Chen 	data.blksz = size;
693996903deSMinda Chen 	data.blocks = 1;
694996903deSMinda Chen 	data.flags = MMC_DATA_READ;
695996903deSMinda Chen 
696996903deSMinda Chen 	/*
697996903deSMinda Chen 	 * According to the tuning specs, Tuning process
698996903deSMinda Chen 	 * is normally shorter 40 executions of CMD19,
699996903deSMinda Chen 	 * and timeout value should be shorter than 150 ms
700996903deSMinda Chen 	 */
701996903deSMinda Chen 	data.timeout_ns = 150 * NSEC_PER_MSEC;
702996903deSMinda Chen 
703996903deSMinda Chen 	data.sg = &sg;
704996903deSMinda Chen 	data.sg_len = 1;
705996903deSMinda Chen 	sg_init_one(&sg, data_buf, size);
706996903deSMinda Chen 
707fe5afb13SUlf Hansson 	mmc_wait_for_req(host, &mrq);
708996903deSMinda Chen 
7099979dbe5SChaotian Jing 	if (cmd_error)
7109979dbe5SChaotian Jing 		*cmd_error = cmd.error;
7119979dbe5SChaotian Jing 
712996903deSMinda Chen 	if (cmd.error) {
713996903deSMinda Chen 		err = cmd.error;
714996903deSMinda Chen 		goto out;
715996903deSMinda Chen 	}
716996903deSMinda Chen 
717996903deSMinda Chen 	if (data.error) {
718996903deSMinda Chen 		err = data.error;
719996903deSMinda Chen 		goto out;
720996903deSMinda Chen 	}
721996903deSMinda Chen 
722996903deSMinda Chen 	if (memcmp(data_buf, tuning_block_pattern, size))
723996903deSMinda Chen 		err = -EIO;
724996903deSMinda Chen 
725996903deSMinda Chen out:
726996903deSMinda Chen 	kfree(data_buf);
727996903deSMinda Chen 	return err;
728996903deSMinda Chen }
729996903deSMinda Chen EXPORT_SYMBOL_GPL(mmc_send_tuning);
730996903deSMinda Chen 
73121adc2e4SWolfram Sang int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode)
732e711f030SAdrian Hunter {
733c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
734e711f030SAdrian Hunter 
735e711f030SAdrian Hunter 	/*
736e711f030SAdrian Hunter 	 * eMMC specification specifies that CMD12 can be used to stop a tuning
737e711f030SAdrian Hunter 	 * command, but SD specification does not, so do nothing unless it is
738e711f030SAdrian Hunter 	 * eMMC.
739e711f030SAdrian Hunter 	 */
740e711f030SAdrian Hunter 	if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
741e711f030SAdrian Hunter 		return 0;
742e711f030SAdrian Hunter 
743e711f030SAdrian Hunter 	cmd.opcode = MMC_STOP_TRANSMISSION;
744e711f030SAdrian Hunter 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
745e711f030SAdrian Hunter 
746e711f030SAdrian Hunter 	/*
747e711f030SAdrian Hunter 	 * For drivers that override R1 to R1b, set an arbitrary timeout based
748e711f030SAdrian Hunter 	 * on the tuning timeout i.e. 150ms.
749e711f030SAdrian Hunter 	 */
750e711f030SAdrian Hunter 	cmd.busy_timeout = 150;
751e711f030SAdrian Hunter 
752e711f030SAdrian Hunter 	return mmc_wait_for_cmd(host, &cmd, 0);
753e711f030SAdrian Hunter }
75421adc2e4SWolfram Sang EXPORT_SYMBOL_GPL(mmc_send_abort_tuning);
755e711f030SAdrian Hunter 
75622113efdSAries Lee static int
75722113efdSAries Lee mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
75822113efdSAries Lee 		  u8 len)
75922113efdSAries Lee {
760c7836d15SMasahiro Yamada 	struct mmc_request mrq = {};
761c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
762c7836d15SMasahiro Yamada 	struct mmc_data data = {};
76322113efdSAries Lee 	struct scatterlist sg;
76422113efdSAries Lee 	u8 *data_buf;
76522113efdSAries Lee 	u8 *test_buf;
76622113efdSAries Lee 	int i, err;
76722113efdSAries Lee 	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
76822113efdSAries Lee 	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
76922113efdSAries Lee 
77022113efdSAries Lee 	/* dma onto stack is unsafe/nonportable, but callers to this
77122113efdSAries Lee 	 * routine normally provide temporary on-stack buffers ...
77222113efdSAries Lee 	 */
77322113efdSAries Lee 	data_buf = kmalloc(len, GFP_KERNEL);
77422113efdSAries Lee 	if (!data_buf)
77522113efdSAries Lee 		return -ENOMEM;
77622113efdSAries Lee 
77722113efdSAries Lee 	if (len == 8)
77822113efdSAries Lee 		test_buf = testdata_8bit;
77922113efdSAries Lee 	else if (len == 4)
78022113efdSAries Lee 		test_buf = testdata_4bit;
78122113efdSAries Lee 	else {
782a3c76eb9SGirish K S 		pr_err("%s: Invalid bus_width %d\n",
78322113efdSAries Lee 		       mmc_hostname(host), len);
78422113efdSAries Lee 		kfree(data_buf);
78522113efdSAries Lee 		return -EINVAL;
78622113efdSAries Lee 	}
78722113efdSAries Lee 
78822113efdSAries Lee 	if (opcode == MMC_BUS_TEST_W)
78922113efdSAries Lee 		memcpy(data_buf, test_buf, len);
79022113efdSAries Lee 
79122113efdSAries Lee 	mrq.cmd = &cmd;
79222113efdSAries Lee 	mrq.data = &data;
79322113efdSAries Lee 	cmd.opcode = opcode;
79422113efdSAries Lee 	cmd.arg = 0;
79522113efdSAries Lee 
79622113efdSAries Lee 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
79722113efdSAries Lee 	 * rely on callers to never use this with "native" calls for reading
79822113efdSAries Lee 	 * CSD or CID.  Native versions of those commands use the R2 type,
79922113efdSAries Lee 	 * not R1 plus a data block.
80022113efdSAries Lee 	 */
80122113efdSAries Lee 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
80222113efdSAries Lee 
80322113efdSAries Lee 	data.blksz = len;
80422113efdSAries Lee 	data.blocks = 1;
80522113efdSAries Lee 	if (opcode == MMC_BUS_TEST_R)
80622113efdSAries Lee 		data.flags = MMC_DATA_READ;
80722113efdSAries Lee 	else
80822113efdSAries Lee 		data.flags = MMC_DATA_WRITE;
80922113efdSAries Lee 
81022113efdSAries Lee 	data.sg = &sg;
81122113efdSAries Lee 	data.sg_len = 1;
81284532e33SMinjian Wu 	mmc_set_data_timeout(&data, card);
81322113efdSAries Lee 	sg_init_one(&sg, data_buf, len);
81422113efdSAries Lee 	mmc_wait_for_req(host, &mrq);
81522113efdSAries Lee 	err = 0;
81622113efdSAries Lee 	if (opcode == MMC_BUS_TEST_R) {
81722113efdSAries Lee 		for (i = 0; i < len / 4; i++)
81822113efdSAries Lee 			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
81922113efdSAries Lee 				err = -EIO;
82022113efdSAries Lee 				break;
82122113efdSAries Lee 			}
82222113efdSAries Lee 	}
82322113efdSAries Lee 	kfree(data_buf);
82422113efdSAries Lee 
82522113efdSAries Lee 	if (cmd.error)
82622113efdSAries Lee 		return cmd.error;
82722113efdSAries Lee 	if (data.error)
82822113efdSAries Lee 		return data.error;
82922113efdSAries Lee 
83022113efdSAries Lee 	return err;
83122113efdSAries Lee }
83222113efdSAries Lee 
83322113efdSAries Lee int mmc_bus_test(struct mmc_card *card, u8 bus_width)
83422113efdSAries Lee {
8350899e741SMasahiro Yamada 	int width;
83622113efdSAries Lee 
83722113efdSAries Lee 	if (bus_width == MMC_BUS_WIDTH_8)
83822113efdSAries Lee 		width = 8;
83922113efdSAries Lee 	else if (bus_width == MMC_BUS_WIDTH_4)
84022113efdSAries Lee 		width = 4;
84122113efdSAries Lee 	else if (bus_width == MMC_BUS_WIDTH_1)
84222113efdSAries Lee 		return 0; /* no need for test */
84322113efdSAries Lee 	else
84422113efdSAries Lee 		return -EINVAL;
84522113efdSAries Lee 
84622113efdSAries Lee 	/*
84722113efdSAries Lee 	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
84822113efdSAries Lee 	 * is a problem.  This improves chances that the test will work.
84922113efdSAries Lee 	 */
85022113efdSAries Lee 	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
8510899e741SMasahiro Yamada 	return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
85222113efdSAries Lee }
853eb0d8f13SJaehoon Chung 
8549f94d047SUlf Hansson static int mmc_send_hpi_cmd(struct mmc_card *card)
855eb0d8f13SJaehoon Chung {
856490ff95fSUlf Hansson 	unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time;
857892bf100SUlf Hansson 	struct mmc_host *host = card->host;
858c7bedef0SUlf Hansson 	bool use_r1b_resp = false;
859c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
860eb0d8f13SJaehoon Chung 	int err;
861eb0d8f13SJaehoon Chung 
862892bf100SUlf Hansson 	cmd.opcode = card->ext_csd.hpi_cmd;
863eb0d8f13SJaehoon Chung 	cmd.arg = card->rca << 16 | 1;
864892bf100SUlf Hansson 	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
865c7bedef0SUlf Hansson 
866c7bedef0SUlf Hansson 	if (cmd.opcode == MMC_STOP_TRANSMISSION)
867c7bedef0SUlf Hansson 		use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd,
868c7bedef0SUlf Hansson 						    busy_timeout_ms);
869892bf100SUlf Hansson 
870892bf100SUlf Hansson 	err = mmc_wait_for_cmd(host, &cmd, 0);
871eb0d8f13SJaehoon Chung 	if (err) {
872892bf100SUlf Hansson 		pr_warn("%s: HPI error %d. Command response %#x\n",
873892bf100SUlf Hansson 			mmc_hostname(host), err, cmd.resp[0]);
874eb0d8f13SJaehoon Chung 		return err;
875eb0d8f13SJaehoon Chung 	}
876eb0d8f13SJaehoon Chung 
877892bf100SUlf Hansson 	/* No need to poll when using HW busy detection. */
878892bf100SUlf Hansson 	if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp)
879892bf100SUlf Hansson 		return 0;
880892bf100SUlf Hansson 
881490ff95fSUlf Hansson 	/* Let's poll to find out when the HPI request completes. */
88204f967adSUlf Hansson 	return mmc_poll_for_busy(card, busy_timeout_ms, false, MMC_BUSY_HPI);
883eb0d8f13SJaehoon Chung }
884148bcab2SUlf Hansson 
8850f2c0512SUlf Hansson /**
8860f2c0512SUlf Hansson  *	mmc_interrupt_hpi - Issue for High priority Interrupt
8870f2c0512SUlf Hansson  *	@card: the MMC card associated with the HPI transfer
8880f2c0512SUlf Hansson  *
8890f2c0512SUlf Hansson  *	Issued High Priority Interrupt, and check for card status
8900f2c0512SUlf Hansson  *	until out-of prg-state.
8910f2c0512SUlf Hansson  */
89244aebc16SJason Yan static int mmc_interrupt_hpi(struct mmc_card *card)
8930f2c0512SUlf Hansson {
8940f2c0512SUlf Hansson 	int err;
8950f2c0512SUlf Hansson 	u32 status;
8960f2c0512SUlf Hansson 
8970f2c0512SUlf Hansson 	if (!card->ext_csd.hpi_en) {
8980f2c0512SUlf Hansson 		pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
8990f2c0512SUlf Hansson 		return 1;
9000f2c0512SUlf Hansson 	}
9010f2c0512SUlf Hansson 
9020f2c0512SUlf Hansson 	err = mmc_send_status(card, &status);
9030f2c0512SUlf Hansson 	if (err) {
9040f2c0512SUlf Hansson 		pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
9050f2c0512SUlf Hansson 		goto out;
9060f2c0512SUlf Hansson 	}
9070f2c0512SUlf Hansson 
9080f2c0512SUlf Hansson 	switch (R1_CURRENT_STATE(status)) {
9090f2c0512SUlf Hansson 	case R1_STATE_IDLE:
9100f2c0512SUlf Hansson 	case R1_STATE_READY:
9110f2c0512SUlf Hansson 	case R1_STATE_STBY:
9120f2c0512SUlf Hansson 	case R1_STATE_TRAN:
9130f2c0512SUlf Hansson 		/*
9140f2c0512SUlf Hansson 		 * In idle and transfer states, HPI is not needed and the caller
9150f2c0512SUlf Hansson 		 * can issue the next intended command immediately
9160f2c0512SUlf Hansson 		 */
9170f2c0512SUlf Hansson 		goto out;
9180f2c0512SUlf Hansson 	case R1_STATE_PRG:
9190f2c0512SUlf Hansson 		break;
9200f2c0512SUlf Hansson 	default:
9210f2c0512SUlf Hansson 		/* In all other states, it's illegal to issue HPI */
9220f2c0512SUlf Hansson 		pr_debug("%s: HPI cannot be sent. Card state=%d\n",
9230f2c0512SUlf Hansson 			mmc_hostname(card->host), R1_CURRENT_STATE(status));
9240f2c0512SUlf Hansson 		err = -EINVAL;
9250f2c0512SUlf Hansson 		goto out;
9260f2c0512SUlf Hansson 	}
9270f2c0512SUlf Hansson 
9289f94d047SUlf Hansson 	err = mmc_send_hpi_cmd(card);
9290f2c0512SUlf Hansson out:
9300f2c0512SUlf Hansson 	return err;
9310f2c0512SUlf Hansson }
9320f2c0512SUlf Hansson 
933148bcab2SUlf Hansson int mmc_can_ext_csd(struct mmc_card *card)
934148bcab2SUlf Hansson {
935148bcab2SUlf Hansson 	return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
936148bcab2SUlf Hansson }
937b658af71SAdrian Hunter 
9381cf8f7e5SUlf Hansson static int mmc_read_bkops_status(struct mmc_card *card)
9391cf8f7e5SUlf Hansson {
9401cf8f7e5SUlf Hansson 	int err;
9411cf8f7e5SUlf Hansson 	u8 *ext_csd;
9421cf8f7e5SUlf Hansson 
9431cf8f7e5SUlf Hansson 	err = mmc_get_ext_csd(card, &ext_csd);
9441cf8f7e5SUlf Hansson 	if (err)
9451cf8f7e5SUlf Hansson 		return err;
9461cf8f7e5SUlf Hansson 
9471cf8f7e5SUlf Hansson 	card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
9481cf8f7e5SUlf Hansson 	card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
9491cf8f7e5SUlf Hansson 	kfree(ext_csd);
9501cf8f7e5SUlf Hansson 	return 0;
9511cf8f7e5SUlf Hansson }
9521cf8f7e5SUlf Hansson 
9531cf8f7e5SUlf Hansson /**
9540c204979SUlf Hansson  *	mmc_run_bkops - Run BKOPS for supported cards
9550c204979SUlf Hansson  *	@card: MMC card to run BKOPS for
9561cf8f7e5SUlf Hansson  *
9570c204979SUlf Hansson  *	Run background operations synchronously for cards having manual BKOPS
9580c204979SUlf Hansson  *	enabled and in case it reports urgent BKOPS level.
9591cf8f7e5SUlf Hansson */
9600c204979SUlf Hansson void mmc_run_bkops(struct mmc_card *card)
9611cf8f7e5SUlf Hansson {
9621cf8f7e5SUlf Hansson 	int err;
9631cf8f7e5SUlf Hansson 
9640c204979SUlf Hansson 	if (!card->ext_csd.man_bkops_en)
9651cf8f7e5SUlf Hansson 		return;
9661cf8f7e5SUlf Hansson 
9671cf8f7e5SUlf Hansson 	err = mmc_read_bkops_status(card);
9681cf8f7e5SUlf Hansson 	if (err) {
9691cf8f7e5SUlf Hansson 		pr_err("%s: Failed to read bkops status: %d\n",
9701cf8f7e5SUlf Hansson 		       mmc_hostname(card->host), err);
9711cf8f7e5SUlf Hansson 		return;
9721cf8f7e5SUlf Hansson 	}
9731cf8f7e5SUlf Hansson 
9740c204979SUlf Hansson 	if (!card->ext_csd.raw_bkops_status ||
9750c204979SUlf Hansson 	    card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2)
9761cf8f7e5SUlf Hansson 		return;
9771cf8f7e5SUlf Hansson 
9781cf8f7e5SUlf Hansson 	mmc_retune_hold(card->host);
9791cf8f7e5SUlf Hansson 
9800c204979SUlf Hansson 	/*
9810c204979SUlf Hansson 	 * For urgent BKOPS status, LEVEL_2 and higher, let's execute
9820c204979SUlf Hansson 	 * synchronously. Future wise, we may consider to start BKOPS, for less
9830c204979SUlf Hansson 	 * urgent levels by using an asynchronous background task, when idle.
9840c204979SUlf Hansson 	 */
9850c204979SUlf Hansson 	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
98624ed3bd0SUlf Hansson 			 EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS);
987fe72d08aSBean Huo 	/*
988fe72d08aSBean Huo 	 * If the BKOPS timed out, the card is probably still busy in the
989fe72d08aSBean Huo 	 * R1_STATE_PRG. Rather than continue to wait, let's try to abort
990fe72d08aSBean Huo 	 * it with a HPI command to get back into R1_STATE_TRAN.
991fe72d08aSBean Huo 	 */
992fe72d08aSBean Huo 	if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
993fe72d08aSBean Huo 		pr_warn("%s: BKOPS aborted\n", mmc_hostname(card->host));
994fe72d08aSBean Huo 	else if (err)
995fe72d08aSBean Huo 		pr_warn("%s: Error %d running bkops\n",
9961cf8f7e5SUlf Hansson 			mmc_hostname(card->host), err);
9971cf8f7e5SUlf Hansson 
9981cf8f7e5SUlf Hansson 	mmc_retune_release(card->host);
9991cf8f7e5SUlf Hansson }
10000c204979SUlf Hansson EXPORT_SYMBOL(mmc_run_bkops);
10011cf8f7e5SUlf Hansson 
1002b658af71SAdrian Hunter static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
1003b658af71SAdrian Hunter {
1004b658af71SAdrian Hunter 	u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
1005b658af71SAdrian Hunter 	int err;
1006b658af71SAdrian Hunter 
1007b658af71SAdrian Hunter 	if (!card->ext_csd.cmdq_support)
1008b658af71SAdrian Hunter 		return -EOPNOTSUPP;
1009b658af71SAdrian Hunter 
1010b658af71SAdrian Hunter 	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
1011b658af71SAdrian Hunter 			 val, card->ext_csd.generic_cmd6_time);
1012b658af71SAdrian Hunter 	if (!err)
1013b658af71SAdrian Hunter 		card->ext_csd.cmdq_en = enable;
1014b658af71SAdrian Hunter 
1015b658af71SAdrian Hunter 	return err;
1016b658af71SAdrian Hunter }
1017b658af71SAdrian Hunter 
1018b658af71SAdrian Hunter int mmc_cmdq_enable(struct mmc_card *card)
1019b658af71SAdrian Hunter {
1020b658af71SAdrian Hunter 	return mmc_cmdq_switch(card, true);
1021b658af71SAdrian Hunter }
1022b658af71SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
1023b658af71SAdrian Hunter 
1024b658af71SAdrian Hunter int mmc_cmdq_disable(struct mmc_card *card)
1025b658af71SAdrian Hunter {
1026b658af71SAdrian Hunter 	return mmc_cmdq_switch(card, false);
1027b658af71SAdrian Hunter }
1028b658af71SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_cmdq_disable);
102955c2b8b9SUlf Hansson 
10304f111d04SBean Huo int mmc_sanitize(struct mmc_card *card, unsigned int timeout_ms)
103155c2b8b9SUlf Hansson {
103255c2b8b9SUlf Hansson 	struct mmc_host *host = card->host;
103355c2b8b9SUlf Hansson 	int err;
103455c2b8b9SUlf Hansson 
103555c2b8b9SUlf Hansson 	if (!mmc_can_sanitize(card)) {
103655c2b8b9SUlf Hansson 		pr_warn("%s: Sanitize not supported\n", mmc_hostname(host));
103755c2b8b9SUlf Hansson 		return -EOPNOTSUPP;
103855c2b8b9SUlf Hansson 	}
103955c2b8b9SUlf Hansson 
10404f111d04SBean Huo 	if (!timeout_ms)
10414f111d04SBean Huo 		timeout_ms = MMC_SANITIZE_TIMEOUT_MS;
10424f111d04SBean Huo 
104355c2b8b9SUlf Hansson 	pr_debug("%s: Sanitize in progress...\n", mmc_hostname(host));
104455c2b8b9SUlf Hansson 
104555c2b8b9SUlf Hansson 	mmc_retune_hold(host);
104655c2b8b9SUlf Hansson 
10475b96247cSBean Huo 	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START,
10485b96247cSBean Huo 			   1, timeout_ms, 0, true, false, 0);
104955c2b8b9SUlf Hansson 	if (err)
105055c2b8b9SUlf Hansson 		pr_err("%s: Sanitize failed err=%d\n", mmc_hostname(host), err);
105155c2b8b9SUlf Hansson 
105255c2b8b9SUlf Hansson 	/*
105355c2b8b9SUlf Hansson 	 * If the sanitize operation timed out, the card is probably still busy
105455c2b8b9SUlf Hansson 	 * in the R1_STATE_PRG. Rather than continue to wait, let's try to abort
105555c2b8b9SUlf Hansson 	 * it with a HPI command to get back into R1_STATE_TRAN.
105655c2b8b9SUlf Hansson 	 */
105755c2b8b9SUlf Hansson 	if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
105855c2b8b9SUlf Hansson 		pr_warn("%s: Sanitize aborted\n", mmc_hostname(host));
105955c2b8b9SUlf Hansson 
106055c2b8b9SUlf Hansson 	mmc_retune_release(host);
106155c2b8b9SUlf Hansson 
106255c2b8b9SUlf Hansson 	pr_debug("%s: Sanitize completed\n", mmc_hostname(host));
106355c2b8b9SUlf Hansson 	return err;
106455c2b8b9SUlf Hansson }
106555c2b8b9SUlf Hansson EXPORT_SYMBOL_GPL(mmc_sanitize);
1066