xref: /openbmc/linux/drivers/mmc/core/mmc_ops.c (revision a61ad2b4)
1 /*
2  *  linux/drivers/mmc/core/mmc_ops.h
3  *
4  *  Copyright 2006-2007 Pierre Ossman
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or (at
9  * your option) any later version.
10  */
11 
12 #include <linux/slab.h>
13 #include <linux/types.h>
14 #include <linux/scatterlist.h>
15 
16 #include <linux/mmc/host.h>
17 #include <linux/mmc/card.h>
18 #include <linux/mmc/mmc.h>
19 
20 #include "core.h"
21 #include "mmc_ops.h"
22 
23 static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
24 {
25 	int err;
26 	struct mmc_command cmd = {0};
27 
28 	BUG_ON(!host);
29 
30 	cmd.opcode = MMC_SELECT_CARD;
31 
32 	if (card) {
33 		cmd.arg = card->rca << 16;
34 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
35 	} else {
36 		cmd.arg = 0;
37 		cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
38 	}
39 
40 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
41 	if (err)
42 		return err;
43 
44 	return 0;
45 }
46 
47 int mmc_select_card(struct mmc_card *card)
48 {
49 	BUG_ON(!card);
50 
51 	return _mmc_select_card(card->host, card);
52 }
53 
54 int mmc_deselect_cards(struct mmc_host *host)
55 {
56 	return _mmc_select_card(host, NULL);
57 }
58 
59 int mmc_card_sleepawake(struct mmc_host *host, int sleep)
60 {
61 	struct mmc_command cmd = {0};
62 	struct mmc_card *card = host->card;
63 	int err;
64 
65 	if (sleep)
66 		mmc_deselect_cards(host);
67 
68 	cmd.opcode = MMC_SLEEP_AWAKE;
69 	cmd.arg = card->rca << 16;
70 	if (sleep)
71 		cmd.arg |= 1 << 15;
72 
73 	cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
74 	err = mmc_wait_for_cmd(host, &cmd, 0);
75 	if (err)
76 		return err;
77 
78 	/*
79 	 * If the host does not wait while the card signals busy, then we will
80 	 * will have to wait the sleep/awake timeout.  Note, we cannot use the
81 	 * SEND_STATUS command to poll the status because that command (and most
82 	 * others) is invalid while the card sleeps.
83 	 */
84 	if (!(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
85 		mmc_delay(DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000));
86 
87 	if (!sleep)
88 		err = mmc_select_card(card);
89 
90 	return err;
91 }
92 
93 int mmc_go_idle(struct mmc_host *host)
94 {
95 	int err;
96 	struct mmc_command cmd = {0};
97 
98 	/*
99 	 * Non-SPI hosts need to prevent chipselect going active during
100 	 * GO_IDLE; that would put chips into SPI mode.  Remind them of
101 	 * that in case of hardware that won't pull up DAT3/nCS otherwise.
102 	 *
103 	 * SPI hosts ignore ios.chip_select; it's managed according to
104 	 * rules that must accommodate non-MMC slaves which this layer
105 	 * won't even know about.
106 	 */
107 	if (!mmc_host_is_spi(host)) {
108 		mmc_set_chip_select(host, MMC_CS_HIGH);
109 		mmc_delay(1);
110 	}
111 
112 	cmd.opcode = MMC_GO_IDLE_STATE;
113 	cmd.arg = 0;
114 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
115 
116 	err = mmc_wait_for_cmd(host, &cmd, 0);
117 
118 	mmc_delay(1);
119 
120 	if (!mmc_host_is_spi(host)) {
121 		mmc_set_chip_select(host, MMC_CS_DONTCARE);
122 		mmc_delay(1);
123 	}
124 
125 	host->use_spi_crc = 0;
126 
127 	return err;
128 }
129 
130 int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
131 {
132 	struct mmc_command cmd = {0};
133 	int i, err = 0;
134 
135 	BUG_ON(!host);
136 
137 	cmd.opcode = MMC_SEND_OP_COND;
138 	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
139 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
140 
141 	for (i = 100; i; i--) {
142 		err = mmc_wait_for_cmd(host, &cmd, 0);
143 		if (err)
144 			break;
145 
146 		/* if we're just probing, do a single pass */
147 		if (ocr == 0)
148 			break;
149 
150 		/* otherwise wait until reset completes */
151 		if (mmc_host_is_spi(host)) {
152 			if (!(cmd.resp[0] & R1_SPI_IDLE))
153 				break;
154 		} else {
155 			if (cmd.resp[0] & MMC_CARD_BUSY)
156 				break;
157 		}
158 
159 		err = -ETIMEDOUT;
160 
161 		mmc_delay(10);
162 	}
163 
164 	if (rocr && !mmc_host_is_spi(host))
165 		*rocr = cmd.resp[0];
166 
167 	return err;
168 }
169 
170 int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
171 {
172 	int err;
173 	struct mmc_command cmd = {0};
174 
175 	BUG_ON(!host);
176 	BUG_ON(!cid);
177 
178 	cmd.opcode = MMC_ALL_SEND_CID;
179 	cmd.arg = 0;
180 	cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
181 
182 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
183 	if (err)
184 		return err;
185 
186 	memcpy(cid, cmd.resp, sizeof(u32) * 4);
187 
188 	return 0;
189 }
190 
191 int mmc_set_relative_addr(struct mmc_card *card)
192 {
193 	int err;
194 	struct mmc_command cmd = {0};
195 
196 	BUG_ON(!card);
197 	BUG_ON(!card->host);
198 
199 	cmd.opcode = MMC_SET_RELATIVE_ADDR;
200 	cmd.arg = card->rca << 16;
201 	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
202 
203 	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
204 	if (err)
205 		return err;
206 
207 	return 0;
208 }
209 
210 static int
211 mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
212 {
213 	int err;
214 	struct mmc_command cmd = {0};
215 
216 	BUG_ON(!host);
217 	BUG_ON(!cxd);
218 
219 	cmd.opcode = opcode;
220 	cmd.arg = arg;
221 	cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
222 
223 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
224 	if (err)
225 		return err;
226 
227 	memcpy(cxd, cmd.resp, sizeof(u32) * 4);
228 
229 	return 0;
230 }
231 
232 static int
233 mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
234 		u32 opcode, void *buf, unsigned len)
235 {
236 	struct mmc_request mrq;
237 	struct mmc_command cmd = {0};
238 	struct mmc_data data = {0};
239 	struct scatterlist sg;
240 	void *data_buf;
241 
242 	/* dma onto stack is unsafe/nonportable, but callers to this
243 	 * routine normally provide temporary on-stack buffers ...
244 	 */
245 	data_buf = kmalloc(len, GFP_KERNEL);
246 	if (data_buf == NULL)
247 		return -ENOMEM;
248 
249 	memset(&mrq, 0, sizeof(struct mmc_request));
250 
251 	mrq.cmd = &cmd;
252 	mrq.data = &data;
253 
254 	cmd.opcode = opcode;
255 	cmd.arg = 0;
256 
257 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
258 	 * rely on callers to never use this with "native" calls for reading
259 	 * CSD or CID.  Native versions of those commands use the R2 type,
260 	 * not R1 plus a data block.
261 	 */
262 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
263 
264 	data.blksz = len;
265 	data.blocks = 1;
266 	data.flags = MMC_DATA_READ;
267 	data.sg = &sg;
268 	data.sg_len = 1;
269 
270 	sg_init_one(&sg, data_buf, len);
271 
272 	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
273 		/*
274 		 * The spec states that CSR and CID accesses have a timeout
275 		 * of 64 clock cycles.
276 		 */
277 		data.timeout_ns = 0;
278 		data.timeout_clks = 64;
279 	} else
280 		mmc_set_data_timeout(&data, card);
281 
282 	mmc_wait_for_req(host, &mrq);
283 
284 	memcpy(buf, data_buf, len);
285 	kfree(data_buf);
286 
287 	if (cmd.error)
288 		return cmd.error;
289 	if (data.error)
290 		return data.error;
291 
292 	return 0;
293 }
294 
295 int mmc_send_csd(struct mmc_card *card, u32 *csd)
296 {
297 	int ret, i;
298 
299 	if (!mmc_host_is_spi(card->host))
300 		return mmc_send_cxd_native(card->host, card->rca << 16,
301 				csd, MMC_SEND_CSD);
302 
303 	ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd, 16);
304 	if (ret)
305 		return ret;
306 
307 	for (i = 0;i < 4;i++)
308 		csd[i] = be32_to_cpu(csd[i]);
309 
310 	return 0;
311 }
312 
313 int mmc_send_cid(struct mmc_host *host, u32 *cid)
314 {
315 	int ret, i;
316 
317 	if (!mmc_host_is_spi(host)) {
318 		if (!host->card)
319 			return -EINVAL;
320 		return mmc_send_cxd_native(host, host->card->rca << 16,
321 				cid, MMC_SEND_CID);
322 	}
323 
324 	ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid, 16);
325 	if (ret)
326 		return ret;
327 
328 	for (i = 0;i < 4;i++)
329 		cid[i] = be32_to_cpu(cid[i]);
330 
331 	return 0;
332 }
333 
334 int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd)
335 {
336 	return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD,
337 			ext_csd, 512);
338 }
339 
340 int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
341 {
342 	struct mmc_command cmd = {0};
343 	int err;
344 
345 	cmd.opcode = MMC_SPI_READ_OCR;
346 	cmd.arg = highcap ? (1 << 30) : 0;
347 	cmd.flags = MMC_RSP_SPI_R3;
348 
349 	err = mmc_wait_for_cmd(host, &cmd, 0);
350 
351 	*ocrp = cmd.resp[1];
352 	return err;
353 }
354 
355 int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
356 {
357 	struct mmc_command cmd = {0};
358 	int err;
359 
360 	cmd.opcode = MMC_SPI_CRC_ON_OFF;
361 	cmd.flags = MMC_RSP_SPI_R1;
362 	cmd.arg = use_crc;
363 
364 	err = mmc_wait_for_cmd(host, &cmd, 0);
365 	if (!err)
366 		host->use_spi_crc = use_crc;
367 	return err;
368 }
369 
370 /**
371  *	mmc_switch - modify EXT_CSD register
372  *	@card: the MMC card associated with the data transfer
373  *	@set: cmd set values
374  *	@index: EXT_CSD register index
375  *	@value: value to program into EXT_CSD register
376  *	@timeout_ms: timeout (ms) for operation performed by register write,
377  *                   timeout of zero implies maximum possible timeout
378  *
379  *	Modifies the EXT_CSD register for selected card.
380  */
381 int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
382 	       unsigned int timeout_ms)
383 {
384 	int err;
385 	struct mmc_command cmd = {0};
386 	u32 status;
387 
388 	BUG_ON(!card);
389 	BUG_ON(!card->host);
390 
391 	cmd.opcode = MMC_SWITCH;
392 	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
393 		  (index << 16) |
394 		  (value << 8) |
395 		  set;
396 	cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
397 	cmd.cmd_timeout_ms = timeout_ms;
398 
399 	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
400 	if (err)
401 		return err;
402 
403 	/* Must check status to be sure of no errors */
404 	do {
405 		err = mmc_send_status(card, &status);
406 		if (err)
407 			return err;
408 		if (card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
409 			break;
410 		if (mmc_host_is_spi(card->host))
411 			break;
412 	} while (R1_CURRENT_STATE(status) == 7);
413 
414 	if (mmc_host_is_spi(card->host)) {
415 		if (status & R1_SPI_ILLEGAL_COMMAND)
416 			return -EBADMSG;
417 	} else {
418 		if (status & 0xFDFFA000)
419 			printk(KERN_WARNING "%s: unexpected status %#x after "
420 			       "switch", mmc_hostname(card->host), status);
421 		if (status & R1_SWITCH_ERROR)
422 			return -EBADMSG;
423 	}
424 
425 	return 0;
426 }
427 EXPORT_SYMBOL_GPL(mmc_switch);
428 
429 int mmc_send_status(struct mmc_card *card, u32 *status)
430 {
431 	int err;
432 	struct mmc_command cmd = {0};
433 
434 	BUG_ON(!card);
435 	BUG_ON(!card->host);
436 
437 	cmd.opcode = MMC_SEND_STATUS;
438 	if (!mmc_host_is_spi(card->host))
439 		cmd.arg = card->rca << 16;
440 	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
441 
442 	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
443 	if (err)
444 		return err;
445 
446 	/* NOTE: callers are required to understand the difference
447 	 * between "native" and SPI format status words!
448 	 */
449 	if (status)
450 		*status = cmd.resp[0];
451 
452 	return 0;
453 }
454 
455 static int
456 mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
457 		  u8 len)
458 {
459 	struct mmc_request mrq;
460 	struct mmc_command cmd = {0};
461 	struct mmc_data data = {0};
462 	struct scatterlist sg;
463 	u8 *data_buf;
464 	u8 *test_buf;
465 	int i, err;
466 	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
467 	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
468 
469 	/* dma onto stack is unsafe/nonportable, but callers to this
470 	 * routine normally provide temporary on-stack buffers ...
471 	 */
472 	data_buf = kmalloc(len, GFP_KERNEL);
473 	if (!data_buf)
474 		return -ENOMEM;
475 
476 	if (len == 8)
477 		test_buf = testdata_8bit;
478 	else if (len == 4)
479 		test_buf = testdata_4bit;
480 	else {
481 		printk(KERN_ERR "%s: Invalid bus_width %d\n",
482 		       mmc_hostname(host), len);
483 		kfree(data_buf);
484 		return -EINVAL;
485 	}
486 
487 	if (opcode == MMC_BUS_TEST_W)
488 		memcpy(data_buf, test_buf, len);
489 
490 	memset(&mrq, 0, sizeof(struct mmc_request));
491 
492 	mrq.cmd = &cmd;
493 	mrq.data = &data;
494 	cmd.opcode = opcode;
495 	cmd.arg = 0;
496 
497 	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
498 	 * rely on callers to never use this with "native" calls for reading
499 	 * CSD or CID.  Native versions of those commands use the R2 type,
500 	 * not R1 plus a data block.
501 	 */
502 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
503 
504 	data.blksz = len;
505 	data.blocks = 1;
506 	if (opcode == MMC_BUS_TEST_R)
507 		data.flags = MMC_DATA_READ;
508 	else
509 		data.flags = MMC_DATA_WRITE;
510 
511 	data.sg = &sg;
512 	data.sg_len = 1;
513 	sg_init_one(&sg, data_buf, len);
514 	mmc_wait_for_req(host, &mrq);
515 	err = 0;
516 	if (opcode == MMC_BUS_TEST_R) {
517 		for (i = 0; i < len / 4; i++)
518 			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
519 				err = -EIO;
520 				break;
521 			}
522 	}
523 	kfree(data_buf);
524 
525 	if (cmd.error)
526 		return cmd.error;
527 	if (data.error)
528 		return data.error;
529 
530 	return err;
531 }
532 
533 int mmc_bus_test(struct mmc_card *card, u8 bus_width)
534 {
535 	int err, width;
536 
537 	if (bus_width == MMC_BUS_WIDTH_8)
538 		width = 8;
539 	else if (bus_width == MMC_BUS_WIDTH_4)
540 		width = 4;
541 	else if (bus_width == MMC_BUS_WIDTH_1)
542 		return 0; /* no need for test */
543 	else
544 		return -EINVAL;
545 
546 	/*
547 	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
548 	 * is a problem.  This improves chances that the test will work.
549 	 */
550 	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
551 	err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
552 	return err;
553 }
554