xref: /openbmc/linux/drivers/mmc/core/mmc_ops.c (revision b658af71)
1da7fbe58SPierre Ossman /*
270f10482SPierre Ossman  *  linux/drivers/mmc/core/mmc_ops.h
3da7fbe58SPierre Ossman  *
4da7fbe58SPierre Ossman  *  Copyright 2006-2007 Pierre Ossman
5da7fbe58SPierre Ossman  *
6da7fbe58SPierre Ossman  * This program is free software; you can redistribute it and/or modify
7da7fbe58SPierre Ossman  * it under the terms of the GNU General Public License as published by
8da7fbe58SPierre Ossman  * the Free Software Foundation; either version 2 of the License, or (at
9da7fbe58SPierre Ossman  * your option) any later version.
10da7fbe58SPierre Ossman  */
11da7fbe58SPierre Ossman 
125a0e3ad6STejun Heo #include <linux/slab.h>
133ef77af1SPaul Gortmaker #include <linux/export.h>
14da7fbe58SPierre Ossman #include <linux/types.h>
15da7fbe58SPierre Ossman #include <linux/scatterlist.h>
16da7fbe58SPierre Ossman 
17da7fbe58SPierre Ossman #include <linux/mmc/host.h>
18da7fbe58SPierre Ossman #include <linux/mmc/card.h>
19da7fbe58SPierre Ossman #include <linux/mmc/mmc.h>
20da7fbe58SPierre Ossman 
21da7fbe58SPierre Ossman #include "core.h"
22c6dbab9cSAdrian Hunter #include "host.h"
23da7fbe58SPierre Ossman #include "mmc_ops.h"
24da7fbe58SPierre Ossman 
258fee476bSTrey Ramsay #define MMC_OPS_TIMEOUT_MS	(10 * 60 * 1000) /* 10 minute timeout */
268fee476bSTrey Ramsay 
2704cdbbfaSUlf Hansson static const u8 tuning_blk_pattern_4bit[] = {
2804cdbbfaSUlf Hansson 	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
2904cdbbfaSUlf Hansson 	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
3004cdbbfaSUlf Hansson 	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
3104cdbbfaSUlf Hansson 	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
3204cdbbfaSUlf Hansson 	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
3304cdbbfaSUlf Hansson 	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
3404cdbbfaSUlf Hansson 	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
3504cdbbfaSUlf Hansson 	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
3604cdbbfaSUlf Hansson };
3704cdbbfaSUlf Hansson 
3804cdbbfaSUlf Hansson static const u8 tuning_blk_pattern_8bit[] = {
3904cdbbfaSUlf Hansson 	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
4004cdbbfaSUlf Hansson 	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
4104cdbbfaSUlf Hansson 	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
4204cdbbfaSUlf Hansson 	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
4304cdbbfaSUlf Hansson 	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
4404cdbbfaSUlf Hansson 	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
4504cdbbfaSUlf Hansson 	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
4604cdbbfaSUlf Hansson 	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
4704cdbbfaSUlf Hansson 	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
4804cdbbfaSUlf Hansson 	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
4904cdbbfaSUlf Hansson 	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
5004cdbbfaSUlf Hansson 	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
5104cdbbfaSUlf Hansson 	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
5204cdbbfaSUlf Hansson 	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
5304cdbbfaSUlf Hansson 	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
5404cdbbfaSUlf Hansson 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
5504cdbbfaSUlf Hansson };
5604cdbbfaSUlf Hansson 
5789e57aedSUlf Hansson int mmc_send_status(struct mmc_card *card, u32 *status)
58a27fbf2fSSeungwon Jeon {
59a27fbf2fSSeungwon Jeon 	int err;
60c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
61a27fbf2fSSeungwon Jeon 
62a27fbf2fSSeungwon Jeon 	cmd.opcode = MMC_SEND_STATUS;
63a27fbf2fSSeungwon Jeon 	if (!mmc_host_is_spi(card->host))
64a27fbf2fSSeungwon Jeon 		cmd.arg = card->rca << 16;
65a27fbf2fSSeungwon Jeon 	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
66a27fbf2fSSeungwon Jeon 
67a27fbf2fSSeungwon Jeon 	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
68a27fbf2fSSeungwon Jeon 	if (err)
69a27fbf2fSSeungwon Jeon 		return err;
70a27fbf2fSSeungwon Jeon 
71a27fbf2fSSeungwon Jeon 	/* NOTE: callers are required to understand the difference
72a27fbf2fSSeungwon Jeon 	 * between "native" and SPI format status words!
73a27fbf2fSSeungwon Jeon 	 */
74a27fbf2fSSeungwon Jeon 	if (status)
75a27fbf2fSSeungwon Jeon 		*status = cmd.resp[0];
76a27fbf2fSSeungwon Jeon 
77a27fbf2fSSeungwon Jeon 	return 0;
78a27fbf2fSSeungwon Jeon }
79a27fbf2fSSeungwon Jeon 
80da7fbe58SPierre Ossman static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
81da7fbe58SPierre Ossman {
82c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
83da7fbe58SPierre Ossman 
84da7fbe58SPierre Ossman 	cmd.opcode = MMC_SELECT_CARD;
85da7fbe58SPierre Ossman 
86da7fbe58SPierre Ossman 	if (card) {
87da7fbe58SPierre Ossman 		cmd.arg = card->rca << 16;
88da7fbe58SPierre Ossman 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
89da7fbe58SPierre Ossman 	} else {
90da7fbe58SPierre Ossman 		cmd.arg = 0;
91da7fbe58SPierre Ossman 		cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
92da7fbe58SPierre Ossman 	}
93da7fbe58SPierre Ossman 
940899e741SMasahiro Yamada 	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
95da7fbe58SPierre Ossman }
96da7fbe58SPierre Ossman 
97da7fbe58SPierre Ossman int mmc_select_card(struct mmc_card *card)
98da7fbe58SPierre Ossman {
99da7fbe58SPierre Ossman 
100da7fbe58SPierre Ossman 	return _mmc_select_card(card->host, card);
101da7fbe58SPierre Ossman }
102da7fbe58SPierre Ossman 
103da7fbe58SPierre Ossman int mmc_deselect_cards(struct mmc_host *host)
104da7fbe58SPierre Ossman {
105da7fbe58SPierre Ossman 	return _mmc_select_card(host, NULL);
106da7fbe58SPierre Ossman }
107da7fbe58SPierre Ossman 
1083d705d14SSascha Hauer /*
1093d705d14SSascha Hauer  * Write the value specified in the device tree or board code into the optional
1103d705d14SSascha Hauer  * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
1113d705d14SSascha Hauer  * drive strength of the DAT and CMD outputs. The actual meaning of a given
1123d705d14SSascha Hauer  * value is hardware dependant.
1133d705d14SSascha Hauer  * The presence of the DSR register can be determined from the CSD register,
1143d705d14SSascha Hauer  * bit 76.
1153d705d14SSascha Hauer  */
1163d705d14SSascha Hauer int mmc_set_dsr(struct mmc_host *host)
1173d705d14SSascha Hauer {
118c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
1193d705d14SSascha Hauer 
1203d705d14SSascha Hauer 	cmd.opcode = MMC_SET_DSR;
1213d705d14SSascha Hauer 
1223d705d14SSascha Hauer 	cmd.arg = (host->dsr << 16) | 0xffff;
1233d705d14SSascha Hauer 	cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
1243d705d14SSascha Hauer 
1253d705d14SSascha Hauer 	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
1263d705d14SSascha Hauer }
1273d705d14SSascha Hauer 
128da7fbe58SPierre Ossman int mmc_go_idle(struct mmc_host *host)
129da7fbe58SPierre Ossman {
130da7fbe58SPierre Ossman 	int err;
131c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
132da7fbe58SPierre Ossman 
133af517150SDavid Brownell 	/*
134af517150SDavid Brownell 	 * Non-SPI hosts need to prevent chipselect going active during
135af517150SDavid Brownell 	 * GO_IDLE; that would put chips into SPI mode.  Remind them of
136af517150SDavid Brownell 	 * that in case of hardware that won't pull up DAT3/nCS otherwise.
137af517150SDavid Brownell 	 *
138af517150SDavid Brownell 	 * SPI hosts ignore ios.chip_select; it's managed according to
13925985edcSLucas De Marchi 	 * rules that must accommodate non-MMC slaves which this layer
140af517150SDavid Brownell 	 * won't even know about.
141af517150SDavid Brownell 	 */
142af517150SDavid Brownell 	if (!mmc_host_is_spi(host)) {
143da7fbe58SPierre Ossman 		mmc_set_chip_select(host, MMC_CS_HIGH);
144da7fbe58SPierre Ossman 		mmc_delay(1);
145af517150SDavid Brownell 	}
146da7fbe58SPierre Ossman 
147da7fbe58SPierre Ossman 	cmd.opcode = MMC_GO_IDLE_STATE;
148da7fbe58SPierre Ossman 	cmd.arg = 0;
149af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
150da7fbe58SPierre Ossman 
151da7fbe58SPierre Ossman 	err = mmc_wait_for_cmd(host, &cmd, 0);
152da7fbe58SPierre Ossman 
153da7fbe58SPierre Ossman 	mmc_delay(1);
154da7fbe58SPierre Ossman 
155af517150SDavid Brownell 	if (!mmc_host_is_spi(host)) {
156da7fbe58SPierre Ossman 		mmc_set_chip_select(host, MMC_CS_DONTCARE);
157da7fbe58SPierre Ossman 		mmc_delay(1);
158af517150SDavid Brownell 	}
159af517150SDavid Brownell 
160af517150SDavid Brownell 	host->use_spi_crc = 0;
161da7fbe58SPierre Ossman 
162da7fbe58SPierre Ossman 	return err;
163da7fbe58SPierre Ossman }
164da7fbe58SPierre Ossman 
165da7fbe58SPierre Ossman int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
166da7fbe58SPierre Ossman {
167c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
168da7fbe58SPierre Ossman 	int i, err = 0;
169da7fbe58SPierre Ossman 
170da7fbe58SPierre Ossman 	cmd.opcode = MMC_SEND_OP_COND;
171af517150SDavid Brownell 	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
172af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
173da7fbe58SPierre Ossman 
174da7fbe58SPierre Ossman 	for (i = 100; i; i--) {
175da7fbe58SPierre Ossman 		err = mmc_wait_for_cmd(host, &cmd, 0);
17617b0429dSPierre Ossman 		if (err)
177da7fbe58SPierre Ossman 			break;
178da7fbe58SPierre Ossman 
179af517150SDavid Brownell 		/* if we're just probing, do a single pass */
180af517150SDavid Brownell 		if (ocr == 0)
181da7fbe58SPierre Ossman 			break;
182da7fbe58SPierre Ossman 
183af517150SDavid Brownell 		/* otherwise wait until reset completes */
184af517150SDavid Brownell 		if (mmc_host_is_spi(host)) {
185af517150SDavid Brownell 			if (!(cmd.resp[0] & R1_SPI_IDLE))
186af517150SDavid Brownell 				break;
187af517150SDavid Brownell 		} else {
188af517150SDavid Brownell 			if (cmd.resp[0] & MMC_CARD_BUSY)
189af517150SDavid Brownell 				break;
190af517150SDavid Brownell 		}
191af517150SDavid Brownell 
19217b0429dSPierre Ossman 		err = -ETIMEDOUT;
193da7fbe58SPierre Ossman 
194da7fbe58SPierre Ossman 		mmc_delay(10);
195da7fbe58SPierre Ossman 	}
196da7fbe58SPierre Ossman 
197af517150SDavid Brownell 	if (rocr && !mmc_host_is_spi(host))
198da7fbe58SPierre Ossman 		*rocr = cmd.resp[0];
199da7fbe58SPierre Ossman 
200da7fbe58SPierre Ossman 	return err;
201da7fbe58SPierre Ossman }
202da7fbe58SPierre Ossman 
203da7fbe58SPierre Ossman int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
204da7fbe58SPierre Ossman {
205da7fbe58SPierre Ossman 	int err;
206c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
207da7fbe58SPierre Ossman 
208da7fbe58SPierre Ossman 	cmd.opcode = MMC_ALL_SEND_CID;
209da7fbe58SPierre Ossman 	cmd.arg = 0;
210da7fbe58SPierre Ossman 	cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
211da7fbe58SPierre Ossman 
212da7fbe58SPierre Ossman 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
21317b0429dSPierre Ossman 	if (err)
214da7fbe58SPierre Ossman 		return err;
215da7fbe58SPierre Ossman 
216da7fbe58SPierre Ossman 	memcpy(cid, cmd.resp, sizeof(u32) * 4);
217da7fbe58SPierre Ossman 
21817b0429dSPierre Ossman 	return 0;
219da7fbe58SPierre Ossman }
220da7fbe58SPierre Ossman 
221da7fbe58SPierre Ossman int mmc_set_relative_addr(struct mmc_card *card)
222da7fbe58SPierre Ossman {
223c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
224da7fbe58SPierre Ossman 
225da7fbe58SPierre Ossman 	cmd.opcode = MMC_SET_RELATIVE_ADDR;
226da7fbe58SPierre Ossman 	cmd.arg = card->rca << 16;
227da7fbe58SPierre Ossman 	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
228da7fbe58SPierre Ossman 
2290899e741SMasahiro Yamada 	return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
230da7fbe58SPierre Ossman }
231da7fbe58SPierre Ossman 
232af517150SDavid Brownell static int
233af517150SDavid Brownell mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
234da7fbe58SPierre Ossman {
235da7fbe58SPierre Ossman 	int err;
236c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
237da7fbe58SPierre Ossman 
238af517150SDavid Brownell 	cmd.opcode = opcode;
239af517150SDavid Brownell 	cmd.arg = arg;
240da7fbe58SPierre Ossman 	cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
241da7fbe58SPierre Ossman 
242af517150SDavid Brownell 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
24317b0429dSPierre Ossman 	if (err)
244da7fbe58SPierre Ossman 		return err;
245da7fbe58SPierre Ossman 
246af517150SDavid Brownell 	memcpy(cxd, cmd.resp, sizeof(u32) * 4);
247da7fbe58SPierre Ossman 
24817b0429dSPierre Ossman 	return 0;
249da7fbe58SPierre Ossman }
250da7fbe58SPierre Ossman 
2511a41313eSKyungsik Lee /*
2521a41313eSKyungsik Lee  * NOTE: void *buf, caller for the buf is required to use DMA-capable
2531a41313eSKyungsik Lee  * buffer or on-stack buffer (with some overhead in callee).
2541a41313eSKyungsik Lee  */
255af517150SDavid Brownell static int
256af517150SDavid Brownell mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
257af517150SDavid Brownell 		u32 opcode, void *buf, unsigned len)
258da7fbe58SPierre Ossman {
259c7836d15SMasahiro Yamada 	struct mmc_request mrq = {};
260c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
261c7836d15SMasahiro Yamada 	struct mmc_data data = {};
262da7fbe58SPierre Ossman 	struct scatterlist sg;
263da7fbe58SPierre Ossman 
264da7fbe58SPierre Ossman 	mrq.cmd = &cmd;
265da7fbe58SPierre Ossman 	mrq.data = &data;
266da7fbe58SPierre Ossman 
267af517150SDavid Brownell 	cmd.opcode = opcode;
268da7fbe58SPierre Ossman 	cmd.arg = 0;
269da7fbe58SPierre Ossman 
270af517150SDavid Brownell 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
271af517150SDavid Brownell 	 * rely on callers to never use this with "native" calls for reading
272af517150SDavid Brownell 	 * CSD or CID.  Native versions of those commands use the R2 type,
273af517150SDavid Brownell 	 * not R1 plus a data block.
274af517150SDavid Brownell 	 */
275af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
276af517150SDavid Brownell 
277af517150SDavid Brownell 	data.blksz = len;
278da7fbe58SPierre Ossman 	data.blocks = 1;
279da7fbe58SPierre Ossman 	data.flags = MMC_DATA_READ;
280da7fbe58SPierre Ossman 	data.sg = &sg;
281da7fbe58SPierre Ossman 	data.sg_len = 1;
282da7fbe58SPierre Ossman 
283601ed60cSUlf Hansson 	sg_init_one(&sg, buf, len);
284da7fbe58SPierre Ossman 
285cda56ac2SAdrian Hunter 	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
2860d3e0460SMatthew Fleming 		/*
2870d3e0460SMatthew Fleming 		 * The spec states that CSR and CID accesses have a timeout
2880d3e0460SMatthew Fleming 		 * of 64 clock cycles.
2890d3e0460SMatthew Fleming 		 */
2900d3e0460SMatthew Fleming 		data.timeout_ns = 0;
2910d3e0460SMatthew Fleming 		data.timeout_clks = 64;
292cda56ac2SAdrian Hunter 	} else
293cda56ac2SAdrian Hunter 		mmc_set_data_timeout(&data, card);
294da7fbe58SPierre Ossman 
295af517150SDavid Brownell 	mmc_wait_for_req(host, &mrq);
296af517150SDavid Brownell 
29717b0429dSPierre Ossman 	if (cmd.error)
298da7fbe58SPierre Ossman 		return cmd.error;
29917b0429dSPierre Ossman 	if (data.error)
300da7fbe58SPierre Ossman 		return data.error;
301da7fbe58SPierre Ossman 
30217b0429dSPierre Ossman 	return 0;
303da7fbe58SPierre Ossman }
304da7fbe58SPierre Ossman 
305af517150SDavid Brownell int mmc_send_csd(struct mmc_card *card, u32 *csd)
306af517150SDavid Brownell {
30778e48073SPierre Ossman 	int ret, i;
3081a41313eSKyungsik Lee 	u32 *csd_tmp;
30978e48073SPierre Ossman 
310af517150SDavid Brownell 	if (!mmc_host_is_spi(card->host))
311af517150SDavid Brownell 		return mmc_send_cxd_native(card->host, card->rca << 16,
312af517150SDavid Brownell 				csd, MMC_SEND_CSD);
313af517150SDavid Brownell 
31422b78700SUlf Hansson 	csd_tmp = kzalloc(16, GFP_KERNEL);
3151a41313eSKyungsik Lee 	if (!csd_tmp)
3161a41313eSKyungsik Lee 		return -ENOMEM;
3171a41313eSKyungsik Lee 
3181a41313eSKyungsik Lee 	ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
31978e48073SPierre Ossman 	if (ret)
3201a41313eSKyungsik Lee 		goto err;
32178e48073SPierre Ossman 
32278e48073SPierre Ossman 	for (i = 0;i < 4;i++)
3231a41313eSKyungsik Lee 		csd[i] = be32_to_cpu(csd_tmp[i]);
32478e48073SPierre Ossman 
3251a41313eSKyungsik Lee err:
3261a41313eSKyungsik Lee 	kfree(csd_tmp);
3271a41313eSKyungsik Lee 	return ret;
328af517150SDavid Brownell }
329af517150SDavid Brownell 
330af517150SDavid Brownell int mmc_send_cid(struct mmc_host *host, u32 *cid)
331af517150SDavid Brownell {
33278e48073SPierre Ossman 	int ret, i;
3331a41313eSKyungsik Lee 	u32 *cid_tmp;
33478e48073SPierre Ossman 
335af517150SDavid Brownell 	if (!mmc_host_is_spi(host)) {
336af517150SDavid Brownell 		if (!host->card)
337af517150SDavid Brownell 			return -EINVAL;
338af517150SDavid Brownell 		return mmc_send_cxd_native(host, host->card->rca << 16,
339af517150SDavid Brownell 				cid, MMC_SEND_CID);
340af517150SDavid Brownell 	}
341af517150SDavid Brownell 
34222b78700SUlf Hansson 	cid_tmp = kzalloc(16, GFP_KERNEL);
3431a41313eSKyungsik Lee 	if (!cid_tmp)
3441a41313eSKyungsik Lee 		return -ENOMEM;
3451a41313eSKyungsik Lee 
3461a41313eSKyungsik Lee 	ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
34778e48073SPierre Ossman 	if (ret)
3481a41313eSKyungsik Lee 		goto err;
34978e48073SPierre Ossman 
35078e48073SPierre Ossman 	for (i = 0;i < 4;i++)
3511a41313eSKyungsik Lee 		cid[i] = be32_to_cpu(cid_tmp[i]);
35278e48073SPierre Ossman 
3531a41313eSKyungsik Lee err:
3541a41313eSKyungsik Lee 	kfree(cid_tmp);
3551a41313eSKyungsik Lee 	return ret;
356af517150SDavid Brownell }
357af517150SDavid Brownell 
358e21aa519SUlf Hansson int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
359e21aa519SUlf Hansson {
360e21aa519SUlf Hansson 	int err;
361e21aa519SUlf Hansson 	u8 *ext_csd;
362e21aa519SUlf Hansson 
363e21aa519SUlf Hansson 	if (!card || !new_ext_csd)
364e21aa519SUlf Hansson 		return -EINVAL;
365e21aa519SUlf Hansson 
366e21aa519SUlf Hansson 	if (!mmc_can_ext_csd(card))
367e21aa519SUlf Hansson 		return -EOPNOTSUPP;
368e21aa519SUlf Hansson 
369e21aa519SUlf Hansson 	/*
370e21aa519SUlf Hansson 	 * As the ext_csd is so large and mostly unused, we don't store the
371e21aa519SUlf Hansson 	 * raw block in mmc_card.
372e21aa519SUlf Hansson 	 */
37322b78700SUlf Hansson 	ext_csd = kzalloc(512, GFP_KERNEL);
374e21aa519SUlf Hansson 	if (!ext_csd)
375e21aa519SUlf Hansson 		return -ENOMEM;
376e21aa519SUlf Hansson 
3772fc91e8bSUlf Hansson 	err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
3782fc91e8bSUlf Hansson 				512);
379e21aa519SUlf Hansson 	if (err)
380e21aa519SUlf Hansson 		kfree(ext_csd);
381e21aa519SUlf Hansson 	else
382e21aa519SUlf Hansson 		*new_ext_csd = ext_csd;
383e21aa519SUlf Hansson 
384e21aa519SUlf Hansson 	return err;
385e21aa519SUlf Hansson }
386e21aa519SUlf Hansson EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
387e21aa519SUlf Hansson 
388af517150SDavid Brownell int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
389af517150SDavid Brownell {
390c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
391af517150SDavid Brownell 	int err;
392af517150SDavid Brownell 
393af517150SDavid Brownell 	cmd.opcode = MMC_SPI_READ_OCR;
394af517150SDavid Brownell 	cmd.arg = highcap ? (1 << 30) : 0;
395af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R3;
396af517150SDavid Brownell 
397af517150SDavid Brownell 	err = mmc_wait_for_cmd(host, &cmd, 0);
398af517150SDavid Brownell 
399af517150SDavid Brownell 	*ocrp = cmd.resp[1];
400af517150SDavid Brownell 	return err;
401af517150SDavid Brownell }
402af517150SDavid Brownell 
403af517150SDavid Brownell int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
404af517150SDavid Brownell {
405c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
406af517150SDavid Brownell 	int err;
407af517150SDavid Brownell 
408af517150SDavid Brownell 	cmd.opcode = MMC_SPI_CRC_ON_OFF;
409af517150SDavid Brownell 	cmd.flags = MMC_RSP_SPI_R1;
410af517150SDavid Brownell 	cmd.arg = use_crc;
411af517150SDavid Brownell 
412af517150SDavid Brownell 	err = mmc_wait_for_cmd(host, &cmd, 0);
413af517150SDavid Brownell 	if (!err)
414af517150SDavid Brownell 		host->use_spi_crc = use_crc;
415af517150SDavid Brownell 	return err;
416af517150SDavid Brownell }
417af517150SDavid Brownell 
41820348d19SUlf Hansson static int mmc_switch_status_error(struct mmc_host *host, u32 status)
419ed16f58dSAdrian Hunter {
420ed16f58dSAdrian Hunter 	if (mmc_host_is_spi(host)) {
421ed16f58dSAdrian Hunter 		if (status & R1_SPI_ILLEGAL_COMMAND)
422ed16f58dSAdrian Hunter 			return -EBADMSG;
423ed16f58dSAdrian Hunter 	} else {
424ed16f58dSAdrian Hunter 		if (status & 0xFDFFA000)
425ed16f58dSAdrian Hunter 			pr_warn("%s: unexpected status %#x after switch\n",
426ed16f58dSAdrian Hunter 				mmc_hostname(host), status);
427ed16f58dSAdrian Hunter 		if (status & R1_SWITCH_ERROR)
428ed16f58dSAdrian Hunter 			return -EBADMSG;
429ed16f58dSAdrian Hunter 	}
430ed16f58dSAdrian Hunter 	return 0;
431ed16f58dSAdrian Hunter }
432ed16f58dSAdrian Hunter 
43320348d19SUlf Hansson /* Caller must hold re-tuning */
434ef3d2322SAdrian Hunter int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
43520348d19SUlf Hansson {
43620348d19SUlf Hansson 	u32 status;
43720348d19SUlf Hansson 	int err;
43820348d19SUlf Hansson 
43920348d19SUlf Hansson 	err = mmc_send_status(card, &status);
440ef3d2322SAdrian Hunter 	if (!crc_err_fatal && err == -EILSEQ)
441ef3d2322SAdrian Hunter 		return 0;
44220348d19SUlf Hansson 	if (err)
44320348d19SUlf Hansson 		return err;
44420348d19SUlf Hansson 
44520348d19SUlf Hansson 	return mmc_switch_status_error(card->host, status);
44620348d19SUlf Hansson }
44720348d19SUlf Hansson 
448ef3d2322SAdrian Hunter int mmc_switch_status(struct mmc_card *card)
449ef3d2322SAdrian Hunter {
450ef3d2322SAdrian Hunter 	return __mmc_switch_status(card, true);
451ef3d2322SAdrian Hunter }
452ef3d2322SAdrian Hunter 
453716bdb89SUlf Hansson static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
454625228faSUlf Hansson 			bool send_status, bool retry_crc_err)
455716bdb89SUlf Hansson {
456716bdb89SUlf Hansson 	struct mmc_host *host = card->host;
457716bdb89SUlf Hansson 	int err;
458716bdb89SUlf Hansson 	unsigned long timeout;
459716bdb89SUlf Hansson 	u32 status = 0;
460716bdb89SUlf Hansson 	bool expired = false;
461716bdb89SUlf Hansson 	bool busy = false;
462716bdb89SUlf Hansson 
463716bdb89SUlf Hansson 	/* We have an unspecified cmd timeout, use the fallback value. */
464716bdb89SUlf Hansson 	if (!timeout_ms)
465716bdb89SUlf Hansson 		timeout_ms = MMC_OPS_TIMEOUT_MS;
466716bdb89SUlf Hansson 
467716bdb89SUlf Hansson 	/*
468716bdb89SUlf Hansson 	 * In cases when not allowed to poll by using CMD13 or because we aren't
469716bdb89SUlf Hansson 	 * capable of polling by using ->card_busy(), then rely on waiting the
470716bdb89SUlf Hansson 	 * stated timeout to be sufficient.
471716bdb89SUlf Hansson 	 */
472716bdb89SUlf Hansson 	if (!send_status && !host->ops->card_busy) {
473716bdb89SUlf Hansson 		mmc_delay(timeout_ms);
474716bdb89SUlf Hansson 		return 0;
475716bdb89SUlf Hansson 	}
476716bdb89SUlf Hansson 
477716bdb89SUlf Hansson 	timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
478716bdb89SUlf Hansson 	do {
479716bdb89SUlf Hansson 		/*
48070562644SUlf Hansson 		 * Due to the possibility of being preempted while polling,
48170562644SUlf Hansson 		 * check the expiration time first.
482716bdb89SUlf Hansson 		 */
483716bdb89SUlf Hansson 		expired = time_after(jiffies, timeout);
48470562644SUlf Hansson 
48570562644SUlf Hansson 		if (host->ops->card_busy) {
48670562644SUlf Hansson 			busy = host->ops->card_busy(host);
48770562644SUlf Hansson 		} else {
488437590a1SUlf Hansson 			err = mmc_send_status(card, &status);
4895ec32f84SUlf Hansson 			if (retry_crc_err && err == -EILSEQ) {
490437590a1SUlf Hansson 				busy = true;
4915ec32f84SUlf Hansson 			} else if (err) {
492716bdb89SUlf Hansson 				return err;
4935ec32f84SUlf Hansson 			} else {
4945ec32f84SUlf Hansson 				err = mmc_switch_status_error(host, status);
4955ec32f84SUlf Hansson 				if (err)
4965ec32f84SUlf Hansson 					return err;
49770562644SUlf Hansson 				busy = R1_CURRENT_STATE(status) == R1_STATE_PRG;
498716bdb89SUlf Hansson 			}
4995ec32f84SUlf Hansson 		}
500716bdb89SUlf Hansson 
50170562644SUlf Hansson 		/* Timeout if the device still remains busy. */
50270562644SUlf Hansson 		if (expired && busy) {
50370562644SUlf Hansson 			pr_err("%s: Card stuck being busy! %s\n",
504716bdb89SUlf Hansson 				mmc_hostname(host), __func__);
505716bdb89SUlf Hansson 			return -ETIMEDOUT;
506716bdb89SUlf Hansson 		}
50770562644SUlf Hansson 	} while (busy);
508716bdb89SUlf Hansson 
5095ec32f84SUlf Hansson 	return 0;
510716bdb89SUlf Hansson }
511716bdb89SUlf Hansson 
512d3a8d95dSAndrei Warkentin /**
513950d56acSJaehoon Chung  *	__mmc_switch - modify EXT_CSD register
514d3a8d95dSAndrei Warkentin  *	@card: the MMC card associated with the data transfer
515d3a8d95dSAndrei Warkentin  *	@set: cmd set values
516d3a8d95dSAndrei Warkentin  *	@index: EXT_CSD register index
517d3a8d95dSAndrei Warkentin  *	@value: value to program into EXT_CSD register
518d3a8d95dSAndrei Warkentin  *	@timeout_ms: timeout (ms) for operation performed by register write,
519d3a8d95dSAndrei Warkentin  *                   timeout of zero implies maximum possible timeout
520aa33ce3cSUlf Hansson  *	@timing: new timing to change to
521950d56acSJaehoon Chung  *	@use_busy_signal: use the busy signal as response type
522878e200bSUlf Hansson  *	@send_status: send status cmd to poll for busy
523625228faSUlf Hansson  *	@retry_crc_err: retry when CRC errors when polling with CMD13 for busy
524d3a8d95dSAndrei Warkentin  *
525d3a8d95dSAndrei Warkentin  *	Modifies the EXT_CSD register for selected card.
526d3a8d95dSAndrei Warkentin  */
527950d56acSJaehoon Chung int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
528aa33ce3cSUlf Hansson 		unsigned int timeout_ms, unsigned char timing,
529aa33ce3cSUlf Hansson 		bool use_busy_signal, bool send_status,	bool retry_crc_err)
530da7fbe58SPierre Ossman {
531636bd13cSUlf Hansson 	struct mmc_host *host = card->host;
532da7fbe58SPierre Ossman 	int err;
533c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
534b9ec2616SUlf Hansson 	bool use_r1b_resp = use_busy_signal;
535aa33ce3cSUlf Hansson 	unsigned char old_timing = host->ios.timing;
536b9ec2616SUlf Hansson 
537c6dbab9cSAdrian Hunter 	mmc_retune_hold(host);
538c6dbab9cSAdrian Hunter 
539b9ec2616SUlf Hansson 	/*
540b9ec2616SUlf Hansson 	 * If the cmd timeout and the max_busy_timeout of the host are both
541b9ec2616SUlf Hansson 	 * specified, let's validate them. A failure means we need to prevent
542b9ec2616SUlf Hansson 	 * the host from doing hw busy detection, which is done by converting
543b9ec2616SUlf Hansson 	 * to a R1 response instead of a R1B.
544b9ec2616SUlf Hansson 	 */
545b9ec2616SUlf Hansson 	if (timeout_ms && host->max_busy_timeout &&
546b9ec2616SUlf Hansson 		(timeout_ms > host->max_busy_timeout))
547b9ec2616SUlf Hansson 		use_r1b_resp = false;
548da7fbe58SPierre Ossman 
549da7fbe58SPierre Ossman 	cmd.opcode = MMC_SWITCH;
550da7fbe58SPierre Ossman 	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
551da7fbe58SPierre Ossman 		  (index << 16) |
552da7fbe58SPierre Ossman 		  (value << 8) |
553da7fbe58SPierre Ossman 		  set;
554950d56acSJaehoon Chung 	cmd.flags = MMC_CMD_AC;
555b9ec2616SUlf Hansson 	if (use_r1b_resp) {
556950d56acSJaehoon Chung 		cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
557b9ec2616SUlf Hansson 		/*
558b9ec2616SUlf Hansson 		 * A busy_timeout of zero means the host can decide to use
559b9ec2616SUlf Hansson 		 * whatever value it finds suitable.
560b9ec2616SUlf Hansson 		 */
5611d4d7744SUlf Hansson 		cmd.busy_timeout = timeout_ms;
562b9ec2616SUlf Hansson 	} else {
563b9ec2616SUlf Hansson 		cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
564b9ec2616SUlf Hansson 	}
565b9ec2616SUlf Hansson 
566775a9362SMaya Erez 	if (index == EXT_CSD_SANITIZE_START)
567775a9362SMaya Erez 		cmd.sanitize_busy = true;
568da7fbe58SPierre Ossman 
569636bd13cSUlf Hansson 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
57017b0429dSPierre Ossman 	if (err)
571c6dbab9cSAdrian Hunter 		goto out;
572da7fbe58SPierre Ossman 
573950d56acSJaehoon Chung 	/* No need to check card status in case of unblocking command */
574950d56acSJaehoon Chung 	if (!use_busy_signal)
575c6dbab9cSAdrian Hunter 		goto out;
576950d56acSJaehoon Chung 
577cb26ce06SUlf Hansson 	/*If SPI or used HW busy detection above, then we don't need to poll. */
578cb26ce06SUlf Hansson 	if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
579ee6ff743SUlf Hansson 		mmc_host_is_spi(host))
580aa33ce3cSUlf Hansson 		goto out_tim;
581a27fbf2fSSeungwon Jeon 
582716bdb89SUlf Hansson 	/* Let's try to poll to find out when the command is completed. */
583625228faSUlf Hansson 	err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err);
584ee6ff743SUlf Hansson 	if (err)
585ee6ff743SUlf Hansson 		goto out;
586aa33ce3cSUlf Hansson 
587aa33ce3cSUlf Hansson out_tim:
588ee6ff743SUlf Hansson 	/* Switch to new timing before check switch status. */
589ee6ff743SUlf Hansson 	if (timing)
590ee6ff743SUlf Hansson 		mmc_set_timing(host, timing);
591ee6ff743SUlf Hansson 
592ee6ff743SUlf Hansson 	if (send_status) {
593ee6ff743SUlf Hansson 		err = mmc_switch_status(card);
594aa33ce3cSUlf Hansson 		if (err && timing)
595aa33ce3cSUlf Hansson 			mmc_set_timing(host, old_timing);
596ee6ff743SUlf Hansson 	}
597c6dbab9cSAdrian Hunter out:
598c6dbab9cSAdrian Hunter 	mmc_retune_release(host);
599ef0b27d4SAdrian Hunter 
600c6dbab9cSAdrian Hunter 	return err;
601da7fbe58SPierre Ossman }
602950d56acSJaehoon Chung 
603950d56acSJaehoon Chung int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
604950d56acSJaehoon Chung 		unsigned int timeout_ms)
605950d56acSJaehoon Chung {
606aa33ce3cSUlf Hansson 	return __mmc_switch(card, set, index, value, timeout_ms, 0,
607aa33ce3cSUlf Hansson 			true, true, false);
608950d56acSJaehoon Chung }
609d3a8d95dSAndrei Warkentin EXPORT_SYMBOL_GPL(mmc_switch);
610da7fbe58SPierre Ossman 
6119979dbe5SChaotian Jing int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
612996903deSMinda Chen {
613c7836d15SMasahiro Yamada 	struct mmc_request mrq = {};
614c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
615c7836d15SMasahiro Yamada 	struct mmc_data data = {};
616996903deSMinda Chen 	struct scatterlist sg;
617fe5afb13SUlf Hansson 	struct mmc_ios *ios = &host->ios;
618996903deSMinda Chen 	const u8 *tuning_block_pattern;
619996903deSMinda Chen 	int size, err = 0;
620996903deSMinda Chen 	u8 *data_buf;
621996903deSMinda Chen 
622996903deSMinda Chen 	if (ios->bus_width == MMC_BUS_WIDTH_8) {
623996903deSMinda Chen 		tuning_block_pattern = tuning_blk_pattern_8bit;
624996903deSMinda Chen 		size = sizeof(tuning_blk_pattern_8bit);
625996903deSMinda Chen 	} else if (ios->bus_width == MMC_BUS_WIDTH_4) {
626996903deSMinda Chen 		tuning_block_pattern = tuning_blk_pattern_4bit;
627996903deSMinda Chen 		size = sizeof(tuning_blk_pattern_4bit);
628996903deSMinda Chen 	} else
629996903deSMinda Chen 		return -EINVAL;
630996903deSMinda Chen 
631996903deSMinda Chen 	data_buf = kzalloc(size, GFP_KERNEL);
632996903deSMinda Chen 	if (!data_buf)
633996903deSMinda Chen 		return -ENOMEM;
634996903deSMinda Chen 
635996903deSMinda Chen 	mrq.cmd = &cmd;
636996903deSMinda Chen 	mrq.data = &data;
637996903deSMinda Chen 
638996903deSMinda Chen 	cmd.opcode = opcode;
639996903deSMinda Chen 	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
640996903deSMinda Chen 
641996903deSMinda Chen 	data.blksz = size;
642996903deSMinda Chen 	data.blocks = 1;
643996903deSMinda Chen 	data.flags = MMC_DATA_READ;
644996903deSMinda Chen 
645996903deSMinda Chen 	/*
646996903deSMinda Chen 	 * According to the tuning specs, Tuning process
647996903deSMinda Chen 	 * is normally shorter 40 executions of CMD19,
648996903deSMinda Chen 	 * and timeout value should be shorter than 150 ms
649996903deSMinda Chen 	 */
650996903deSMinda Chen 	data.timeout_ns = 150 * NSEC_PER_MSEC;
651996903deSMinda Chen 
652996903deSMinda Chen 	data.sg = &sg;
653996903deSMinda Chen 	data.sg_len = 1;
654996903deSMinda Chen 	sg_init_one(&sg, data_buf, size);
655996903deSMinda Chen 
656fe5afb13SUlf Hansson 	mmc_wait_for_req(host, &mrq);
657996903deSMinda Chen 
6589979dbe5SChaotian Jing 	if (cmd_error)
6599979dbe5SChaotian Jing 		*cmd_error = cmd.error;
6609979dbe5SChaotian Jing 
661996903deSMinda Chen 	if (cmd.error) {
662996903deSMinda Chen 		err = cmd.error;
663996903deSMinda Chen 		goto out;
664996903deSMinda Chen 	}
665996903deSMinda Chen 
666996903deSMinda Chen 	if (data.error) {
667996903deSMinda Chen 		err = data.error;
668996903deSMinda Chen 		goto out;
669996903deSMinda Chen 	}
670996903deSMinda Chen 
671996903deSMinda Chen 	if (memcmp(data_buf, tuning_block_pattern, size))
672996903deSMinda Chen 		err = -EIO;
673996903deSMinda Chen 
674996903deSMinda Chen out:
675996903deSMinda Chen 	kfree(data_buf);
676996903deSMinda Chen 	return err;
677996903deSMinda Chen }
678996903deSMinda Chen EXPORT_SYMBOL_GPL(mmc_send_tuning);
679996903deSMinda Chen 
680e711f030SAdrian Hunter int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
681e711f030SAdrian Hunter {
682c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
683e711f030SAdrian Hunter 
684e711f030SAdrian Hunter 	/*
685e711f030SAdrian Hunter 	 * eMMC specification specifies that CMD12 can be used to stop a tuning
686e711f030SAdrian Hunter 	 * command, but SD specification does not, so do nothing unless it is
687e711f030SAdrian Hunter 	 * eMMC.
688e711f030SAdrian Hunter 	 */
689e711f030SAdrian Hunter 	if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
690e711f030SAdrian Hunter 		return 0;
691e711f030SAdrian Hunter 
692e711f030SAdrian Hunter 	cmd.opcode = MMC_STOP_TRANSMISSION;
693e711f030SAdrian Hunter 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
694e711f030SAdrian Hunter 
695e711f030SAdrian Hunter 	/*
696e711f030SAdrian Hunter 	 * For drivers that override R1 to R1b, set an arbitrary timeout based
697e711f030SAdrian Hunter 	 * on the tuning timeout i.e. 150ms.
698e711f030SAdrian Hunter 	 */
699e711f030SAdrian Hunter 	cmd.busy_timeout = 150;
700e711f030SAdrian Hunter 
701e711f030SAdrian Hunter 	return mmc_wait_for_cmd(host, &cmd, 0);
702e711f030SAdrian Hunter }
703e711f030SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_abort_tuning);
704e711f030SAdrian Hunter 
70522113efdSAries Lee static int
70622113efdSAries Lee mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
70722113efdSAries Lee 		  u8 len)
70822113efdSAries Lee {
709c7836d15SMasahiro Yamada 	struct mmc_request mrq = {};
710c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
711c7836d15SMasahiro Yamada 	struct mmc_data data = {};
71222113efdSAries Lee 	struct scatterlist sg;
71322113efdSAries Lee 	u8 *data_buf;
71422113efdSAries Lee 	u8 *test_buf;
71522113efdSAries Lee 	int i, err;
71622113efdSAries Lee 	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
71722113efdSAries Lee 	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
71822113efdSAries Lee 
71922113efdSAries Lee 	/* dma onto stack is unsafe/nonportable, but callers to this
72022113efdSAries Lee 	 * routine normally provide temporary on-stack buffers ...
72122113efdSAries Lee 	 */
72222113efdSAries Lee 	data_buf = kmalloc(len, GFP_KERNEL);
72322113efdSAries Lee 	if (!data_buf)
72422113efdSAries Lee 		return -ENOMEM;
72522113efdSAries Lee 
72622113efdSAries Lee 	if (len == 8)
72722113efdSAries Lee 		test_buf = testdata_8bit;
72822113efdSAries Lee 	else if (len == 4)
72922113efdSAries Lee 		test_buf = testdata_4bit;
73022113efdSAries Lee 	else {
731a3c76eb9SGirish K S 		pr_err("%s: Invalid bus_width %d\n",
73222113efdSAries Lee 		       mmc_hostname(host), len);
73322113efdSAries Lee 		kfree(data_buf);
73422113efdSAries Lee 		return -EINVAL;
73522113efdSAries Lee 	}
73622113efdSAries Lee 
73722113efdSAries Lee 	if (opcode == MMC_BUS_TEST_W)
73822113efdSAries Lee 		memcpy(data_buf, test_buf, len);
73922113efdSAries Lee 
74022113efdSAries Lee 	mrq.cmd = &cmd;
74122113efdSAries Lee 	mrq.data = &data;
74222113efdSAries Lee 	cmd.opcode = opcode;
74322113efdSAries Lee 	cmd.arg = 0;
74422113efdSAries Lee 
74522113efdSAries Lee 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
74622113efdSAries Lee 	 * rely on callers to never use this with "native" calls for reading
74722113efdSAries Lee 	 * CSD or CID.  Native versions of those commands use the R2 type,
74822113efdSAries Lee 	 * not R1 plus a data block.
74922113efdSAries Lee 	 */
75022113efdSAries Lee 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
75122113efdSAries Lee 
75222113efdSAries Lee 	data.blksz = len;
75322113efdSAries Lee 	data.blocks = 1;
75422113efdSAries Lee 	if (opcode == MMC_BUS_TEST_R)
75522113efdSAries Lee 		data.flags = MMC_DATA_READ;
75622113efdSAries Lee 	else
75722113efdSAries Lee 		data.flags = MMC_DATA_WRITE;
75822113efdSAries Lee 
75922113efdSAries Lee 	data.sg = &sg;
76022113efdSAries Lee 	data.sg_len = 1;
76184532e33SMinjian Wu 	mmc_set_data_timeout(&data, card);
76222113efdSAries Lee 	sg_init_one(&sg, data_buf, len);
76322113efdSAries Lee 	mmc_wait_for_req(host, &mrq);
76422113efdSAries Lee 	err = 0;
76522113efdSAries Lee 	if (opcode == MMC_BUS_TEST_R) {
76622113efdSAries Lee 		for (i = 0; i < len / 4; i++)
76722113efdSAries Lee 			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
76822113efdSAries Lee 				err = -EIO;
76922113efdSAries Lee 				break;
77022113efdSAries Lee 			}
77122113efdSAries Lee 	}
77222113efdSAries Lee 	kfree(data_buf);
77322113efdSAries Lee 
77422113efdSAries Lee 	if (cmd.error)
77522113efdSAries Lee 		return cmd.error;
77622113efdSAries Lee 	if (data.error)
77722113efdSAries Lee 		return data.error;
77822113efdSAries Lee 
77922113efdSAries Lee 	return err;
78022113efdSAries Lee }
78122113efdSAries Lee 
78222113efdSAries Lee int mmc_bus_test(struct mmc_card *card, u8 bus_width)
78322113efdSAries Lee {
7840899e741SMasahiro Yamada 	int width;
78522113efdSAries Lee 
78622113efdSAries Lee 	if (bus_width == MMC_BUS_WIDTH_8)
78722113efdSAries Lee 		width = 8;
78822113efdSAries Lee 	else if (bus_width == MMC_BUS_WIDTH_4)
78922113efdSAries Lee 		width = 4;
79022113efdSAries Lee 	else if (bus_width == MMC_BUS_WIDTH_1)
79122113efdSAries Lee 		return 0; /* no need for test */
79222113efdSAries Lee 	else
79322113efdSAries Lee 		return -EINVAL;
79422113efdSAries Lee 
79522113efdSAries Lee 	/*
79622113efdSAries Lee 	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
79722113efdSAries Lee 	 * is a problem.  This improves chances that the test will work.
79822113efdSAries Lee 	 */
79922113efdSAries Lee 	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
8000899e741SMasahiro Yamada 	return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
80122113efdSAries Lee }
802eb0d8f13SJaehoon Chung 
803eb0d8f13SJaehoon Chung int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
804eb0d8f13SJaehoon Chung {
805c7836d15SMasahiro Yamada 	struct mmc_command cmd = {};
806eb0d8f13SJaehoon Chung 	unsigned int opcode;
807eb0d8f13SJaehoon Chung 	int err;
808eb0d8f13SJaehoon Chung 
8092378975bSJaehoon Chung 	if (!card->ext_csd.hpi) {
8106606110dSJoe Perches 		pr_warn("%s: Card didn't support HPI command\n",
8112378975bSJaehoon Chung 			mmc_hostname(card->host));
8122378975bSJaehoon Chung 		return -EINVAL;
8132378975bSJaehoon Chung 	}
8142378975bSJaehoon Chung 
815eb0d8f13SJaehoon Chung 	opcode = card->ext_csd.hpi_cmd;
816eb0d8f13SJaehoon Chung 	if (opcode == MMC_STOP_TRANSMISSION)
8172378975bSJaehoon Chung 		cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
818eb0d8f13SJaehoon Chung 	else if (opcode == MMC_SEND_STATUS)
8192378975bSJaehoon Chung 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
820eb0d8f13SJaehoon Chung 
821eb0d8f13SJaehoon Chung 	cmd.opcode = opcode;
822eb0d8f13SJaehoon Chung 	cmd.arg = card->rca << 16 | 1;
823eb0d8f13SJaehoon Chung 
824eb0d8f13SJaehoon Chung 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
825eb0d8f13SJaehoon Chung 	if (err) {
826eb0d8f13SJaehoon Chung 		pr_warn("%s: error %d interrupting operation. "
827eb0d8f13SJaehoon Chung 			"HPI command response %#x\n", mmc_hostname(card->host),
828eb0d8f13SJaehoon Chung 			err, cmd.resp[0]);
829eb0d8f13SJaehoon Chung 		return err;
830eb0d8f13SJaehoon Chung 	}
831eb0d8f13SJaehoon Chung 	if (status)
832eb0d8f13SJaehoon Chung 		*status = cmd.resp[0];
833eb0d8f13SJaehoon Chung 
834eb0d8f13SJaehoon Chung 	return 0;
835eb0d8f13SJaehoon Chung }
836148bcab2SUlf Hansson 
837148bcab2SUlf Hansson int mmc_can_ext_csd(struct mmc_card *card)
838148bcab2SUlf Hansson {
839148bcab2SUlf Hansson 	return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
840148bcab2SUlf Hansson }
841b658af71SAdrian Hunter 
842b658af71SAdrian Hunter static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
843b658af71SAdrian Hunter {
844b658af71SAdrian Hunter 	u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
845b658af71SAdrian Hunter 	int err;
846b658af71SAdrian Hunter 
847b658af71SAdrian Hunter 	if (!card->ext_csd.cmdq_support)
848b658af71SAdrian Hunter 		return -EOPNOTSUPP;
849b658af71SAdrian Hunter 
850b658af71SAdrian Hunter 	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
851b658af71SAdrian Hunter 			 val, card->ext_csd.generic_cmd6_time);
852b658af71SAdrian Hunter 	if (!err)
853b658af71SAdrian Hunter 		card->ext_csd.cmdq_en = enable;
854b658af71SAdrian Hunter 
855b658af71SAdrian Hunter 	return err;
856b658af71SAdrian Hunter }
857b658af71SAdrian Hunter 
858b658af71SAdrian Hunter int mmc_cmdq_enable(struct mmc_card *card)
859b658af71SAdrian Hunter {
860b658af71SAdrian Hunter 	return mmc_cmdq_switch(card, true);
861b658af71SAdrian Hunter }
862b658af71SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
863b658af71SAdrian Hunter 
864b658af71SAdrian Hunter int mmc_cmdq_disable(struct mmc_card *card)
865b658af71SAdrian Hunter {
866b658af71SAdrian Hunter 	return mmc_cmdq_switch(card, false);
867b658af71SAdrian Hunter }
868b658af71SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_cmdq_disable);
869