xref: /openbmc/linux/drivers/mmc/host/mmc_spi.c (revision 171f1bc7)
1 /*
2  * mmc_spi.c - Access SD/MMC cards through SPI master controllers
3  *
4  * (C) Copyright 2005, Intec Automation,
5  *		Mike Lavender (mike@steroidmicros)
6  * (C) Copyright 2006-2007, David Brownell
7  * (C) Copyright 2007, Axis Communications,
8  *		Hans-Peter Nilsson (hp@axis.com)
9  * (C) Copyright 2007, ATRON electronic GmbH,
10  *		Jan Nikitenko <jan.nikitenko@gmail.com>
11  *
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26  */
27 #include <linux/sched.h>
28 #include <linux/delay.h>
29 #include <linux/slab.h>
30 #include <linux/module.h>
31 #include <linux/bio.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/crc7.h>
34 #include <linux/crc-itu-t.h>
35 #include <linux/scatterlist.h>
36 
37 #include <linux/mmc/host.h>
38 #include <linux/mmc/mmc.h>		/* for R1_SPI_* bit values */
39 
40 #include <linux/spi/spi.h>
41 #include <linux/spi/mmc_spi.h>
42 
43 #include <asm/unaligned.h>
44 
45 
46 /* NOTES:
47  *
48  * - For now, we won't try to interoperate with a real mmc/sd/sdio
49  *   controller, although some of them do have hardware support for
50  *   SPI protocol.  The main reason for such configs would be mmc-ish
51  *   cards like DataFlash, which don't support that "native" protocol.
52  *
53  *   We don't have a "DataFlash/MMC/SD/SDIO card slot" abstraction to
54  *   switch between driver stacks, and in any case if "native" mode
55  *   is available, it will be faster and hence preferable.
56  *
57  * - MMC depends on a different chipselect management policy than the
58  *   SPI interface currently supports for shared bus segments:  it needs
59  *   to issue multiple spi_message requests with the chipselect active,
60  *   using the results of one message to decide the next one to issue.
61  *
62  *   Pending updates to the programming interface, this driver expects
63  *   that it not share the bus with other drivers (precluding conflicts).
64  *
65  * - We tell the controller to keep the chipselect active from the
66  *   beginning of an mmc_host_ops.request until the end.  So beware
67  *   of SPI controller drivers that mis-handle the cs_change flag!
68  *
69  *   However, many cards seem OK with chipselect flapping up/down
70  *   during that time ... at least on unshared bus segments.
71  */
72 
73 
74 /*
75  * Local protocol constants, internal to data block protocols.
76  */
77 
78 /* Response tokens used to ack each block written: */
79 #define SPI_MMC_RESPONSE_CODE(x)	((x) & 0x1f)
80 #define SPI_RESPONSE_ACCEPTED		((2 << 1)|1)
81 #define SPI_RESPONSE_CRC_ERR		((5 << 1)|1)
82 #define SPI_RESPONSE_WRITE_ERR		((6 << 1)|1)
83 
84 /* Read and write blocks start with these tokens and end with crc;
85  * on error, read tokens act like a subset of R2_SPI_* values.
86  */
87 #define SPI_TOKEN_SINGLE	0xfe	/* single block r/w, multiblock read */
88 #define SPI_TOKEN_MULTI_WRITE	0xfc	/* multiblock write */
89 #define SPI_TOKEN_STOP_TRAN	0xfd	/* terminate multiblock write */
90 
91 #define MMC_SPI_BLOCKSIZE	512
92 
93 
94 /* These fixed timeouts come from the latest SD specs, which say to ignore
95  * the CSD values.  The R1B value is for card erase (e.g. the "I forgot the
96  * card's password" scenario); it's mostly applied to STOP_TRANSMISSION after
97  * reads which takes nowhere near that long.  Older cards may be able to use
98  * shorter timeouts ... but why bother?
99  */
100 #define r1b_timeout		(HZ * 3)
101 
102 /* One of the critical speed parameters is the amount of data which may
103  * be transferred in one command. If this value is too low, the SD card
104  * controller has to do multiple partial block writes (argggh!). With
105  * today (2008) SD cards there is little speed gain if we transfer more
106  * than 64 KBytes at a time. So use this value until there is any indication
107  * that we should do more here.
108  */
109 #define MMC_SPI_BLOCKSATONCE	128
110 
111 /****************************************************************************/
112 
113 /*
114  * Local Data Structures
115  */
116 
117 /* "scratch" is per-{command,block} data exchanged with the card */
118 struct scratch {
119 	u8			status[29];
120 	u8			data_token;
121 	__be16			crc_val;
122 };
123 
124 struct mmc_spi_host {
125 	struct mmc_host		*mmc;
126 	struct spi_device	*spi;
127 
128 	unsigned char		power_mode;
129 	u16			powerup_msecs;
130 
131 	struct mmc_spi_platform_data	*pdata;
132 
133 	/* for bulk data transfers */
134 	struct spi_transfer	token, t, crc, early_status;
135 	struct spi_message	m;
136 
137 	/* for status readback */
138 	struct spi_transfer	status;
139 	struct spi_message	readback;
140 
141 	/* underlying DMA-aware controller, or null */
142 	struct device		*dma_dev;
143 
144 	/* buffer used for commands and for message "overhead" */
145 	struct scratch		*data;
146 	dma_addr_t		data_dma;
147 
148 	/* Specs say to write ones most of the time, even when the card
149 	 * has no need to read its input data; and many cards won't care.
150 	 * This is our source of those ones.
151 	 */
152 	void			*ones;
153 	dma_addr_t		ones_dma;
154 };
155 
156 
157 /****************************************************************************/
158 
159 /*
160  * MMC-over-SPI protocol glue, used by the MMC stack interface
161  */
162 
163 static inline int mmc_cs_off(struct mmc_spi_host *host)
164 {
165 	/* chipselect will always be inactive after setup() */
166 	return spi_setup(host->spi);
167 }
168 
169 static int
170 mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len)
171 {
172 	int status;
173 
174 	if (len > sizeof(*host->data)) {
175 		WARN_ON(1);
176 		return -EIO;
177 	}
178 
179 	host->status.len = len;
180 
181 	if (host->dma_dev)
182 		dma_sync_single_for_device(host->dma_dev,
183 				host->data_dma, sizeof(*host->data),
184 				DMA_FROM_DEVICE);
185 
186 	status = spi_sync_locked(host->spi, &host->readback);
187 
188 	if (host->dma_dev)
189 		dma_sync_single_for_cpu(host->dma_dev,
190 				host->data_dma, sizeof(*host->data),
191 				DMA_FROM_DEVICE);
192 
193 	return status;
194 }
195 
196 static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout,
197 			unsigned n, u8 byte)
198 {
199 	u8		*cp = host->data->status;
200 	unsigned long start = jiffies;
201 
202 	while (1) {
203 		int		status;
204 		unsigned	i;
205 
206 		status = mmc_spi_readbytes(host, n);
207 		if (status < 0)
208 			return status;
209 
210 		for (i = 0; i < n; i++) {
211 			if (cp[i] != byte)
212 				return cp[i];
213 		}
214 
215 		if (time_is_before_jiffies(start + timeout))
216 			break;
217 
218 		/* If we need long timeouts, we may release the CPU.
219 		 * We use jiffies here because we want to have a relation
220 		 * between elapsed time and the blocking of the scheduler.
221 		 */
222 		if (time_is_before_jiffies(start+1))
223 			schedule();
224 	}
225 	return -ETIMEDOUT;
226 }
227 
228 static inline int
229 mmc_spi_wait_unbusy(struct mmc_spi_host *host, unsigned long timeout)
230 {
231 	return mmc_spi_skip(host, timeout, sizeof(host->data->status), 0);
232 }
233 
234 static int mmc_spi_readtoken(struct mmc_spi_host *host, unsigned long timeout)
235 {
236 	return mmc_spi_skip(host, timeout, 1, 0xff);
237 }
238 
239 
240 /*
241  * Note that for SPI, cmd->resp[0] is not the same data as "native" protocol
242  * hosts return!  The low byte holds R1_SPI bits.  The next byte may hold
243  * R2_SPI bits ... for SEND_STATUS, or after data read errors.
244  *
245  * cmd->resp[1] holds any four-byte response, for R3 (READ_OCR) and on
246  * newer cards R7 (IF_COND).
247  */
248 
249 static char *maptype(struct mmc_command *cmd)
250 {
251 	switch (mmc_spi_resp_type(cmd)) {
252 	case MMC_RSP_SPI_R1:	return "R1";
253 	case MMC_RSP_SPI_R1B:	return "R1B";
254 	case MMC_RSP_SPI_R2:	return "R2/R5";
255 	case MMC_RSP_SPI_R3:	return "R3/R4/R7";
256 	default:		return "?";
257 	}
258 }
259 
260 /* return zero, else negative errno after setting cmd->error */
261 static int mmc_spi_response_get(struct mmc_spi_host *host,
262 		struct mmc_command *cmd, int cs_on)
263 {
264 	u8	*cp = host->data->status;
265 	u8	*end = cp + host->t.len;
266 	int	value = 0;
267 	int	bitshift;
268 	u8 	leftover = 0;
269 	unsigned short rotator;
270 	int 	i;
271 	char	tag[32];
272 
273 	snprintf(tag, sizeof(tag), "  ... CMD%d response SPI_%s",
274 		cmd->opcode, maptype(cmd));
275 
276 	/* Except for data block reads, the whole response will already
277 	 * be stored in the scratch buffer.  It's somewhere after the
278 	 * command and the first byte we read after it.  We ignore that
279 	 * first byte.  After STOP_TRANSMISSION command it may include
280 	 * two data bits, but otherwise it's all ones.
281 	 */
282 	cp += 8;
283 	while (cp < end && *cp == 0xff)
284 		cp++;
285 
286 	/* Data block reads (R1 response types) may need more data... */
287 	if (cp == end) {
288 		cp = host->data->status;
289 		end = cp+1;
290 
291 		/* Card sends N(CR) (== 1..8) bytes of all-ones then one
292 		 * status byte ... and we already scanned 2 bytes.
293 		 *
294 		 * REVISIT block read paths use nasty byte-at-a-time I/O
295 		 * so it can always DMA directly into the target buffer.
296 		 * It'd probably be better to memcpy() the first chunk and
297 		 * avoid extra i/o calls...
298 		 *
299 		 * Note we check for more than 8 bytes, because in practice,
300 		 * some SD cards are slow...
301 		 */
302 		for (i = 2; i < 16; i++) {
303 			value = mmc_spi_readbytes(host, 1);
304 			if (value < 0)
305 				goto done;
306 			if (*cp != 0xff)
307 				goto checkstatus;
308 		}
309 		value = -ETIMEDOUT;
310 		goto done;
311 	}
312 
313 checkstatus:
314 	bitshift = 0;
315 	if (*cp & 0x80)	{
316 		/* Houston, we have an ugly card with a bit-shifted response */
317 		rotator = *cp++ << 8;
318 		/* read the next byte */
319 		if (cp == end) {
320 			value = mmc_spi_readbytes(host, 1);
321 			if (value < 0)
322 				goto done;
323 			cp = host->data->status;
324 			end = cp+1;
325 		}
326 		rotator |= *cp++;
327 		while (rotator & 0x8000) {
328 			bitshift++;
329 			rotator <<= 1;
330 		}
331 		cmd->resp[0] = rotator >> 8;
332 		leftover = rotator;
333 	} else {
334 		cmd->resp[0] = *cp++;
335 	}
336 	cmd->error = 0;
337 
338 	/* Status byte: the entire seven-bit R1 response.  */
339 	if (cmd->resp[0] != 0) {
340 		if ((R1_SPI_PARAMETER | R1_SPI_ADDRESS)
341 				& cmd->resp[0])
342 			value = -EFAULT; /* Bad address */
343 		else if (R1_SPI_ILLEGAL_COMMAND & cmd->resp[0])
344 			value = -ENOSYS; /* Function not implemented */
345 		else if (R1_SPI_COM_CRC & cmd->resp[0])
346 			value = -EILSEQ; /* Illegal byte sequence */
347 		else if ((R1_SPI_ERASE_SEQ | R1_SPI_ERASE_RESET)
348 				& cmd->resp[0])
349 			value = -EIO;    /* I/O error */
350 		/* else R1_SPI_IDLE, "it's resetting" */
351 	}
352 
353 	switch (mmc_spi_resp_type(cmd)) {
354 
355 	/* SPI R1B == R1 + busy; STOP_TRANSMISSION (for multiblock reads)
356 	 * and less-common stuff like various erase operations.
357 	 */
358 	case MMC_RSP_SPI_R1B:
359 		/* maybe we read all the busy tokens already */
360 		while (cp < end && *cp == 0)
361 			cp++;
362 		if (cp == end)
363 			mmc_spi_wait_unbusy(host, r1b_timeout);
364 		break;
365 
366 	/* SPI R2 == R1 + second status byte; SEND_STATUS
367 	 * SPI R5 == R1 + data byte; IO_RW_DIRECT
368 	 */
369 	case MMC_RSP_SPI_R2:
370 		/* read the next byte */
371 		if (cp == end) {
372 			value = mmc_spi_readbytes(host, 1);
373 			if (value < 0)
374 				goto done;
375 			cp = host->data->status;
376 			end = cp+1;
377 		}
378 		if (bitshift) {
379 			rotator = leftover << 8;
380 			rotator |= *cp << bitshift;
381 			cmd->resp[0] |= (rotator & 0xFF00);
382 		} else {
383 			cmd->resp[0] |= *cp << 8;
384 		}
385 		break;
386 
387 	/* SPI R3, R4, or R7 == R1 + 4 bytes */
388 	case MMC_RSP_SPI_R3:
389 		rotator = leftover << 8;
390 		cmd->resp[1] = 0;
391 		for (i = 0; i < 4; i++) {
392 			cmd->resp[1] <<= 8;
393 			/* read the next byte */
394 			if (cp == end) {
395 				value = mmc_spi_readbytes(host, 1);
396 				if (value < 0)
397 					goto done;
398 				cp = host->data->status;
399 				end = cp+1;
400 			}
401 			if (bitshift) {
402 				rotator |= *cp++ << bitshift;
403 				cmd->resp[1] |= (rotator >> 8);
404 				rotator <<= 8;
405 			} else {
406 				cmd->resp[1] |= *cp++;
407 			}
408 		}
409 		break;
410 
411 	/* SPI R1 == just one status byte */
412 	case MMC_RSP_SPI_R1:
413 		break;
414 
415 	default:
416 		dev_dbg(&host->spi->dev, "bad response type %04x\n",
417 				mmc_spi_resp_type(cmd));
418 		if (value >= 0)
419 			value = -EINVAL;
420 		goto done;
421 	}
422 
423 	if (value < 0)
424 		dev_dbg(&host->spi->dev, "%s: resp %04x %08x\n",
425 			tag, cmd->resp[0], cmd->resp[1]);
426 
427 	/* disable chipselect on errors and some success cases */
428 	if (value >= 0 && cs_on)
429 		return value;
430 done:
431 	if (value < 0)
432 		cmd->error = value;
433 	mmc_cs_off(host);
434 	return value;
435 }
436 
437 /* Issue command and read its response.
438  * Returns zero on success, negative for error.
439  *
440  * On error, caller must cope with mmc core retry mechanism.  That
441  * means immediate low-level resubmit, which affects the bus lock...
442  */
443 static int
444 mmc_spi_command_send(struct mmc_spi_host *host,
445 		struct mmc_request *mrq,
446 		struct mmc_command *cmd, int cs_on)
447 {
448 	struct scratch		*data = host->data;
449 	u8			*cp = data->status;
450 	u32			arg = cmd->arg;
451 	int			status;
452 	struct spi_transfer	*t;
453 
454 	/* We can handle most commands (except block reads) in one full
455 	 * duplex I/O operation before either starting the next transfer
456 	 * (data block or command) or else deselecting the card.
457 	 *
458 	 * First, write 7 bytes:
459 	 *  - an all-ones byte to ensure the card is ready
460 	 *  - opcode byte (plus start and transmission bits)
461 	 *  - four bytes of big-endian argument
462 	 *  - crc7 (plus end bit) ... always computed, it's cheap
463 	 *
464 	 * We init the whole buffer to all-ones, which is what we need
465 	 * to write while we're reading (later) response data.
466 	 */
467 	memset(cp++, 0xff, sizeof(data->status));
468 
469 	*cp++ = 0x40 | cmd->opcode;
470 	*cp++ = (u8)(arg >> 24);
471 	*cp++ = (u8)(arg >> 16);
472 	*cp++ = (u8)(arg >> 8);
473 	*cp++ = (u8)arg;
474 	*cp++ = (crc7(0, &data->status[1], 5) << 1) | 0x01;
475 
476 	/* Then, read up to 13 bytes (while writing all-ones):
477 	 *  - N(CR) (== 1..8) bytes of all-ones
478 	 *  - status byte (for all response types)
479 	 *  - the rest of the response, either:
480 	 *      + nothing, for R1 or R1B responses
481 	 *	+ second status byte, for R2 responses
482 	 *	+ four data bytes, for R3 and R7 responses
483 	 *
484 	 * Finally, read some more bytes ... in the nice cases we know in
485 	 * advance how many, and reading 1 more is always OK:
486 	 *  - N(EC) (== 0..N) bytes of all-ones, before deselect/finish
487 	 *  - N(RC) (== 1..N) bytes of all-ones, before next command
488 	 *  - N(WR) (== 1..N) bytes of all-ones, before data write
489 	 *
490 	 * So in those cases one full duplex I/O of at most 21 bytes will
491 	 * handle the whole command, leaving the card ready to receive a
492 	 * data block or new command.  We do that whenever we can, shaving
493 	 * CPU and IRQ costs (especially when using DMA or FIFOs).
494 	 *
495 	 * There are two other cases, where it's not generally practical
496 	 * to rely on a single I/O:
497 	 *
498 	 *  - R1B responses need at least N(EC) bytes of all-zeroes.
499 	 *
500 	 *    In this case we can *try* to fit it into one I/O, then
501 	 *    maybe read more data later.
502 	 *
503 	 *  - Data block reads are more troublesome, since a variable
504 	 *    number of padding bytes precede the token and data.
505 	 *      + N(CX) (== 0..8) bytes of all-ones, before CSD or CID
506 	 *      + N(AC) (== 1..many) bytes of all-ones
507 	 *
508 	 *    In this case we currently only have minimal speedups here:
509 	 *    when N(CR) == 1 we can avoid I/O in response_get().
510 	 */
511 	if (cs_on && (mrq->data->flags & MMC_DATA_READ)) {
512 		cp += 2;	/* min(N(CR)) + status */
513 		/* R1 */
514 	} else {
515 		cp += 10;	/* max(N(CR)) + status + min(N(RC),N(WR)) */
516 		if (cmd->flags & MMC_RSP_SPI_S2)	/* R2/R5 */
517 			cp++;
518 		else if (cmd->flags & MMC_RSP_SPI_B4)	/* R3/R4/R7 */
519 			cp += 4;
520 		else if (cmd->flags & MMC_RSP_BUSY)	/* R1B */
521 			cp = data->status + sizeof(data->status);
522 		/* else:  R1 (most commands) */
523 	}
524 
525 	dev_dbg(&host->spi->dev, "  mmc_spi: CMD%d, resp %s\n",
526 		cmd->opcode, maptype(cmd));
527 
528 	/* send command, leaving chipselect active */
529 	spi_message_init(&host->m);
530 
531 	t = &host->t;
532 	memset(t, 0, sizeof(*t));
533 	t->tx_buf = t->rx_buf = data->status;
534 	t->tx_dma = t->rx_dma = host->data_dma;
535 	t->len = cp - data->status;
536 	t->cs_change = 1;
537 	spi_message_add_tail(t, &host->m);
538 
539 	if (host->dma_dev) {
540 		host->m.is_dma_mapped = 1;
541 		dma_sync_single_for_device(host->dma_dev,
542 				host->data_dma, sizeof(*host->data),
543 				DMA_BIDIRECTIONAL);
544 	}
545 	status = spi_sync_locked(host->spi, &host->m);
546 
547 	if (host->dma_dev)
548 		dma_sync_single_for_cpu(host->dma_dev,
549 				host->data_dma, sizeof(*host->data),
550 				DMA_BIDIRECTIONAL);
551 	if (status < 0) {
552 		dev_dbg(&host->spi->dev, "  ... write returned %d\n", status);
553 		cmd->error = status;
554 		return status;
555 	}
556 
557 	/* after no-data commands and STOP_TRANSMISSION, chipselect off */
558 	return mmc_spi_response_get(host, cmd, cs_on);
559 }
560 
561 /* Build data message with up to four separate transfers.  For TX, we
562  * start by writing the data token.  And in most cases, we finish with
563  * a status transfer.
564  *
565  * We always provide TX data for data and CRC.  The MMC/SD protocol
566  * requires us to write ones; but Linux defaults to writing zeroes;
567  * so we explicitly initialize it to all ones on RX paths.
568  *
569  * We also handle DMA mapping, so the underlying SPI controller does
570  * not need to (re)do it for each message.
571  */
572 static void
573 mmc_spi_setup_data_message(
574 	struct mmc_spi_host	*host,
575 	int			multiple,
576 	enum dma_data_direction	direction)
577 {
578 	struct spi_transfer	*t;
579 	struct scratch		*scratch = host->data;
580 	dma_addr_t		dma = host->data_dma;
581 
582 	spi_message_init(&host->m);
583 	if (dma)
584 		host->m.is_dma_mapped = 1;
585 
586 	/* for reads, readblock() skips 0xff bytes before finding
587 	 * the token; for writes, this transfer issues that token.
588 	 */
589 	if (direction == DMA_TO_DEVICE) {
590 		t = &host->token;
591 		memset(t, 0, sizeof(*t));
592 		t->len = 1;
593 		if (multiple)
594 			scratch->data_token = SPI_TOKEN_MULTI_WRITE;
595 		else
596 			scratch->data_token = SPI_TOKEN_SINGLE;
597 		t->tx_buf = &scratch->data_token;
598 		if (dma)
599 			t->tx_dma = dma + offsetof(struct scratch, data_token);
600 		spi_message_add_tail(t, &host->m);
601 	}
602 
603 	/* Body of transfer is buffer, then CRC ...
604 	 * either TX-only, or RX with TX-ones.
605 	 */
606 	t = &host->t;
607 	memset(t, 0, sizeof(*t));
608 	t->tx_buf = host->ones;
609 	t->tx_dma = host->ones_dma;
610 	/* length and actual buffer info are written later */
611 	spi_message_add_tail(t, &host->m);
612 
613 	t = &host->crc;
614 	memset(t, 0, sizeof(*t));
615 	t->len = 2;
616 	if (direction == DMA_TO_DEVICE) {
617 		/* the actual CRC may get written later */
618 		t->tx_buf = &scratch->crc_val;
619 		if (dma)
620 			t->tx_dma = dma + offsetof(struct scratch, crc_val);
621 	} else {
622 		t->tx_buf = host->ones;
623 		t->tx_dma = host->ones_dma;
624 		t->rx_buf = &scratch->crc_val;
625 		if (dma)
626 			t->rx_dma = dma + offsetof(struct scratch, crc_val);
627 	}
628 	spi_message_add_tail(t, &host->m);
629 
630 	/*
631 	 * A single block read is followed by N(EC) [0+] all-ones bytes
632 	 * before deselect ... don't bother.
633 	 *
634 	 * Multiblock reads are followed by N(AC) [1+] all-ones bytes before
635 	 * the next block is read, or a STOP_TRANSMISSION is issued.  We'll
636 	 * collect that single byte, so readblock() doesn't need to.
637 	 *
638 	 * For a write, the one-byte data response follows immediately, then
639 	 * come zero or more busy bytes, then N(WR) [1+] all-ones bytes.
640 	 * Then single block reads may deselect, and multiblock ones issue
641 	 * the next token (next data block, or STOP_TRAN).  We can try to
642 	 * minimize I/O ops by using a single read to collect end-of-busy.
643 	 */
644 	if (multiple || direction == DMA_TO_DEVICE) {
645 		t = &host->early_status;
646 		memset(t, 0, sizeof(*t));
647 		t->len = (direction == DMA_TO_DEVICE)
648 				? sizeof(scratch->status)
649 				: 1;
650 		t->tx_buf = host->ones;
651 		t->tx_dma = host->ones_dma;
652 		t->rx_buf = scratch->status;
653 		if (dma)
654 			t->rx_dma = dma + offsetof(struct scratch, status);
655 		t->cs_change = 1;
656 		spi_message_add_tail(t, &host->m);
657 	}
658 }
659 
660 /*
661  * Write one block:
662  *  - caller handled preceding N(WR) [1+] all-ones bytes
663  *  - data block
664  *	+ token
665  *	+ data bytes
666  *	+ crc16
667  *  - an all-ones byte ... card writes a data-response byte
668  *  - followed by N(EC) [0+] all-ones bytes, card writes zero/'busy'
669  *
670  * Return negative errno, else success.
671  */
672 static int
673 mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t,
674 	unsigned long timeout)
675 {
676 	struct spi_device	*spi = host->spi;
677 	int			status, i;
678 	struct scratch		*scratch = host->data;
679 	u32			pattern;
680 
681 	if (host->mmc->use_spi_crc)
682 		scratch->crc_val = cpu_to_be16(
683 				crc_itu_t(0, t->tx_buf, t->len));
684 	if (host->dma_dev)
685 		dma_sync_single_for_device(host->dma_dev,
686 				host->data_dma, sizeof(*scratch),
687 				DMA_BIDIRECTIONAL);
688 
689 	status = spi_sync_locked(spi, &host->m);
690 
691 	if (status != 0) {
692 		dev_dbg(&spi->dev, "write error (%d)\n", status);
693 		return status;
694 	}
695 
696 	if (host->dma_dev)
697 		dma_sync_single_for_cpu(host->dma_dev,
698 				host->data_dma, sizeof(*scratch),
699 				DMA_BIDIRECTIONAL);
700 
701 	/*
702 	 * Get the transmission data-response reply.  It must follow
703 	 * immediately after the data block we transferred.  This reply
704 	 * doesn't necessarily tell whether the write operation succeeded;
705 	 * it just says if the transmission was ok and whether *earlier*
706 	 * writes succeeded; see the standard.
707 	 *
708 	 * In practice, there are (even modern SDHC-)cards which are late
709 	 * in sending the response, and miss the time frame by a few bits,
710 	 * so we have to cope with this situation and check the response
711 	 * bit-by-bit. Arggh!!!
712 	 */
713 	pattern  = scratch->status[0] << 24;
714 	pattern |= scratch->status[1] << 16;
715 	pattern |= scratch->status[2] << 8;
716 	pattern |= scratch->status[3];
717 
718 	/* First 3 bit of pattern are undefined */
719 	pattern |= 0xE0000000;
720 
721 	/* left-adjust to leading 0 bit */
722 	while (pattern & 0x80000000)
723 		pattern <<= 1;
724 	/* right-adjust for pattern matching. Code is in bit 4..0 now. */
725 	pattern >>= 27;
726 
727 	switch (pattern) {
728 	case SPI_RESPONSE_ACCEPTED:
729 		status = 0;
730 		break;
731 	case SPI_RESPONSE_CRC_ERR:
732 		/* host shall then issue MMC_STOP_TRANSMISSION */
733 		status = -EILSEQ;
734 		break;
735 	case SPI_RESPONSE_WRITE_ERR:
736 		/* host shall then issue MMC_STOP_TRANSMISSION,
737 		 * and should MMC_SEND_STATUS to sort it out
738 		 */
739 		status = -EIO;
740 		break;
741 	default:
742 		status = -EPROTO;
743 		break;
744 	}
745 	if (status != 0) {
746 		dev_dbg(&spi->dev, "write error %02x (%d)\n",
747 			scratch->status[0], status);
748 		return status;
749 	}
750 
751 	t->tx_buf += t->len;
752 	if (host->dma_dev)
753 		t->tx_dma += t->len;
754 
755 	/* Return when not busy.  If we didn't collect that status yet,
756 	 * we'll need some more I/O.
757 	 */
758 	for (i = 4; i < sizeof(scratch->status); i++) {
759 		/* card is non-busy if the most recent bit is 1 */
760 		if (scratch->status[i] & 0x01)
761 			return 0;
762 	}
763 	return mmc_spi_wait_unbusy(host, timeout);
764 }
765 
766 /*
767  * Read one block:
768  *  - skip leading all-ones bytes ... either
769  *      + N(AC) [1..f(clock,CSD)] usually, else
770  *      + N(CX) [0..8] when reading CSD or CID
771  *  - data block
772  *	+ token ... if error token, no data or crc
773  *	+ data bytes
774  *	+ crc16
775  *
776  * After single block reads, we're done; N(EC) [0+] all-ones bytes follow
777  * before dropping chipselect.
778  *
779  * For multiblock reads, caller either reads the next block or issues a
780  * STOP_TRANSMISSION command.
781  */
782 static int
783 mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
784 	unsigned long timeout)
785 {
786 	struct spi_device	*spi = host->spi;
787 	int			status;
788 	struct scratch		*scratch = host->data;
789 	unsigned int 		bitshift;
790 	u8			leftover;
791 
792 	/* At least one SD card sends an all-zeroes byte when N(CX)
793 	 * applies, before the all-ones bytes ... just cope with that.
794 	 */
795 	status = mmc_spi_readbytes(host, 1);
796 	if (status < 0)
797 		return status;
798 	status = scratch->status[0];
799 	if (status == 0xff || status == 0)
800 		status = mmc_spi_readtoken(host, timeout);
801 
802 	if (status < 0) {
803 		dev_dbg(&spi->dev, "read error %02x (%d)\n", status, status);
804 		return status;
805 	}
806 
807 	/* The token may be bit-shifted...
808 	 * the first 0-bit precedes the data stream.
809 	 */
810 	bitshift = 7;
811 	while (status & 0x80) {
812 		status <<= 1;
813 		bitshift--;
814 	}
815 	leftover = status << 1;
816 
817 	if (host->dma_dev) {
818 		dma_sync_single_for_device(host->dma_dev,
819 				host->data_dma, sizeof(*scratch),
820 				DMA_BIDIRECTIONAL);
821 		dma_sync_single_for_device(host->dma_dev,
822 				t->rx_dma, t->len,
823 				DMA_FROM_DEVICE);
824 	}
825 
826 	status = spi_sync_locked(spi, &host->m);
827 
828 	if (host->dma_dev) {
829 		dma_sync_single_for_cpu(host->dma_dev,
830 				host->data_dma, sizeof(*scratch),
831 				DMA_BIDIRECTIONAL);
832 		dma_sync_single_for_cpu(host->dma_dev,
833 				t->rx_dma, t->len,
834 				DMA_FROM_DEVICE);
835 	}
836 
837 	if (bitshift) {
838 		/* Walk through the data and the crc and do
839 		 * all the magic to get byte-aligned data.
840 		 */
841 		u8 *cp = t->rx_buf;
842 		unsigned int len;
843 		unsigned int bitright = 8 - bitshift;
844 		u8 temp;
845 		for (len = t->len; len; len--) {
846 			temp = *cp;
847 			*cp++ = leftover | (temp >> bitshift);
848 			leftover = temp << bitright;
849 		}
850 		cp = (u8 *) &scratch->crc_val;
851 		temp = *cp;
852 		*cp++ = leftover | (temp >> bitshift);
853 		leftover = temp << bitright;
854 		temp = *cp;
855 		*cp = leftover | (temp >> bitshift);
856 	}
857 
858 	if (host->mmc->use_spi_crc) {
859 		u16 crc = crc_itu_t(0, t->rx_buf, t->len);
860 
861 		be16_to_cpus(&scratch->crc_val);
862 		if (scratch->crc_val != crc) {
863 			dev_dbg(&spi->dev, "read - crc error: crc_val=0x%04x, "
864 					"computed=0x%04x len=%d\n",
865 					scratch->crc_val, crc, t->len);
866 			return -EILSEQ;
867 		}
868 	}
869 
870 	t->rx_buf += t->len;
871 	if (host->dma_dev)
872 		t->rx_dma += t->len;
873 
874 	return 0;
875 }
876 
877 /*
878  * An MMC/SD data stage includes one or more blocks, optional CRCs,
879  * and inline handshaking.  That handhaking makes it unlike most
880  * other SPI protocol stacks.
881  */
882 static void
883 mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
884 		struct mmc_data *data, u32 blk_size)
885 {
886 	struct spi_device	*spi = host->spi;
887 	struct device		*dma_dev = host->dma_dev;
888 	struct spi_transfer	*t;
889 	enum dma_data_direction	direction;
890 	struct scatterlist	*sg;
891 	unsigned		n_sg;
892 	int			multiple = (data->blocks > 1);
893 	u32			clock_rate;
894 	unsigned long		timeout;
895 
896 	if (data->flags & MMC_DATA_READ)
897 		direction = DMA_FROM_DEVICE;
898 	else
899 		direction = DMA_TO_DEVICE;
900 	mmc_spi_setup_data_message(host, multiple, direction);
901 	t = &host->t;
902 
903 	if (t->speed_hz)
904 		clock_rate = t->speed_hz;
905 	else
906 		clock_rate = spi->max_speed_hz;
907 
908 	timeout = data->timeout_ns +
909 		  data->timeout_clks * 1000000 / clock_rate;
910 	timeout = usecs_to_jiffies((unsigned int)(timeout / 1000)) + 1;
911 
912 	/* Handle scatterlist segments one at a time, with synch for
913 	 * each 512-byte block
914 	 */
915 	for (sg = data->sg, n_sg = data->sg_len; n_sg; n_sg--, sg++) {
916 		int			status = 0;
917 		dma_addr_t		dma_addr = 0;
918 		void			*kmap_addr;
919 		unsigned		length = sg->length;
920 		enum dma_data_direction	dir = direction;
921 
922 		/* set up dma mapping for controller drivers that might
923 		 * use DMA ... though they may fall back to PIO
924 		 */
925 		if (dma_dev) {
926 			/* never invalidate whole *shared* pages ... */
927 			if ((sg->offset != 0 || length != PAGE_SIZE)
928 					&& dir == DMA_FROM_DEVICE)
929 				dir = DMA_BIDIRECTIONAL;
930 
931 			dma_addr = dma_map_page(dma_dev, sg_page(sg), 0,
932 						PAGE_SIZE, dir);
933 			if (direction == DMA_TO_DEVICE)
934 				t->tx_dma = dma_addr + sg->offset;
935 			else
936 				t->rx_dma = dma_addr + sg->offset;
937 		}
938 
939 		/* allow pio too; we don't allow highmem */
940 		kmap_addr = kmap(sg_page(sg));
941 		if (direction == DMA_TO_DEVICE)
942 			t->tx_buf = kmap_addr + sg->offset;
943 		else
944 			t->rx_buf = kmap_addr + sg->offset;
945 
946 		/* transfer each block, and update request status */
947 		while (length) {
948 			t->len = min(length, blk_size);
949 
950 			dev_dbg(&host->spi->dev,
951 				"    mmc_spi: %s block, %d bytes\n",
952 				(direction == DMA_TO_DEVICE)
953 				? "write"
954 				: "read",
955 				t->len);
956 
957 			if (direction == DMA_TO_DEVICE)
958 				status = mmc_spi_writeblock(host, t, timeout);
959 			else
960 				status = mmc_spi_readblock(host, t, timeout);
961 			if (status < 0)
962 				break;
963 
964 			data->bytes_xfered += t->len;
965 			length -= t->len;
966 
967 			if (!multiple)
968 				break;
969 		}
970 
971 		/* discard mappings */
972 		if (direction == DMA_FROM_DEVICE)
973 			flush_kernel_dcache_page(sg_page(sg));
974 		kunmap(sg_page(sg));
975 		if (dma_dev)
976 			dma_unmap_page(dma_dev, dma_addr, PAGE_SIZE, dir);
977 
978 		if (status < 0) {
979 			data->error = status;
980 			dev_dbg(&spi->dev, "%s status %d\n",
981 				(direction == DMA_TO_DEVICE)
982 					? "write" : "read",
983 				status);
984 			break;
985 		}
986 	}
987 
988 	/* NOTE some docs describe an MMC-only SET_BLOCK_COUNT (CMD23) that
989 	 * can be issued before multiblock writes.  Unlike its more widely
990 	 * documented analogue for SD cards (SET_WR_BLK_ERASE_COUNT, ACMD23),
991 	 * that can affect the STOP_TRAN logic.   Complete (and current)
992 	 * MMC specs should sort that out before Linux starts using CMD23.
993 	 */
994 	if (direction == DMA_TO_DEVICE && multiple) {
995 		struct scratch	*scratch = host->data;
996 		int		tmp;
997 		const unsigned	statlen = sizeof(scratch->status);
998 
999 		dev_dbg(&spi->dev, "    mmc_spi: STOP_TRAN\n");
1000 
1001 		/* Tweak the per-block message we set up earlier by morphing
1002 		 * it to hold single buffer with the token followed by some
1003 		 * all-ones bytes ... skip N(BR) (0..1), scan the rest for
1004 		 * "not busy any longer" status, and leave chip selected.
1005 		 */
1006 		INIT_LIST_HEAD(&host->m.transfers);
1007 		list_add(&host->early_status.transfer_list,
1008 				&host->m.transfers);
1009 
1010 		memset(scratch->status, 0xff, statlen);
1011 		scratch->status[0] = SPI_TOKEN_STOP_TRAN;
1012 
1013 		host->early_status.tx_buf = host->early_status.rx_buf;
1014 		host->early_status.tx_dma = host->early_status.rx_dma;
1015 		host->early_status.len = statlen;
1016 
1017 		if (host->dma_dev)
1018 			dma_sync_single_for_device(host->dma_dev,
1019 					host->data_dma, sizeof(*scratch),
1020 					DMA_BIDIRECTIONAL);
1021 
1022 		tmp = spi_sync_locked(spi, &host->m);
1023 
1024 		if (host->dma_dev)
1025 			dma_sync_single_for_cpu(host->dma_dev,
1026 					host->data_dma, sizeof(*scratch),
1027 					DMA_BIDIRECTIONAL);
1028 
1029 		if (tmp < 0) {
1030 			if (!data->error)
1031 				data->error = tmp;
1032 			return;
1033 		}
1034 
1035 		/* Ideally we collected "not busy" status with one I/O,
1036 		 * avoiding wasteful byte-at-a-time scanning... but more
1037 		 * I/O is often needed.
1038 		 */
1039 		for (tmp = 2; tmp < statlen; tmp++) {
1040 			if (scratch->status[tmp] != 0)
1041 				return;
1042 		}
1043 		tmp = mmc_spi_wait_unbusy(host, timeout);
1044 		if (tmp < 0 && !data->error)
1045 			data->error = tmp;
1046 	}
1047 }
1048 
1049 /****************************************************************************/
1050 
1051 /*
1052  * MMC driver implementation -- the interface to the MMC stack
1053  */
1054 
1055 static void mmc_spi_request(struct mmc_host *mmc, struct mmc_request *mrq)
1056 {
1057 	struct mmc_spi_host	*host = mmc_priv(mmc);
1058 	int			status = -EINVAL;
1059 	int			crc_retry = 5;
1060 	struct mmc_command	stop;
1061 
1062 #ifdef DEBUG
1063 	/* MMC core and layered drivers *MUST* issue SPI-aware commands */
1064 	{
1065 		struct mmc_command	*cmd;
1066 		int			invalid = 0;
1067 
1068 		cmd = mrq->cmd;
1069 		if (!mmc_spi_resp_type(cmd)) {
1070 			dev_dbg(&host->spi->dev, "bogus command\n");
1071 			cmd->error = -EINVAL;
1072 			invalid = 1;
1073 		}
1074 
1075 		cmd = mrq->stop;
1076 		if (cmd && !mmc_spi_resp_type(cmd)) {
1077 			dev_dbg(&host->spi->dev, "bogus STOP command\n");
1078 			cmd->error = -EINVAL;
1079 			invalid = 1;
1080 		}
1081 
1082 		if (invalid) {
1083 			dump_stack();
1084 			mmc_request_done(host->mmc, mrq);
1085 			return;
1086 		}
1087 	}
1088 #endif
1089 
1090 	/* request exclusive bus access */
1091 	spi_bus_lock(host->spi->master);
1092 
1093 crc_recover:
1094 	/* issue command; then optionally data and stop */
1095 	status = mmc_spi_command_send(host, mrq, mrq->cmd, mrq->data != NULL);
1096 	if (status == 0 && mrq->data) {
1097 		mmc_spi_data_do(host, mrq->cmd, mrq->data, mrq->data->blksz);
1098 
1099 		/*
1100 		 * The SPI bus is not always reliable for large data transfers.
1101 		 * If an occasional crc error is reported by the SD device with
1102 		 * data read/write over SPI, it may be recovered by repeating
1103 		 * the last SD command again. The retry count is set to 5 to
1104 		 * ensure the driver passes stress tests.
1105 		 */
1106 		if (mrq->data->error == -EILSEQ && crc_retry) {
1107 			stop.opcode = MMC_STOP_TRANSMISSION;
1108 			stop.arg = 0;
1109 			stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1110 			status = mmc_spi_command_send(host, mrq, &stop, 0);
1111 			crc_retry--;
1112 			mrq->data->error = 0;
1113 			goto crc_recover;
1114 		}
1115 
1116 		if (mrq->stop)
1117 			status = mmc_spi_command_send(host, mrq, mrq->stop, 0);
1118 		else
1119 			mmc_cs_off(host);
1120 	}
1121 
1122 	/* release the bus */
1123 	spi_bus_unlock(host->spi->master);
1124 
1125 	mmc_request_done(host->mmc, mrq);
1126 }
1127 
1128 /* See Section 6.4.1, in SD "Simplified Physical Layer Specification 2.0"
1129  *
1130  * NOTE that here we can't know that the card has just been powered up;
1131  * not all MMC/SD sockets support power switching.
1132  *
1133  * FIXME when the card is still in SPI mode, e.g. from a previous kernel,
1134  * this doesn't seem to do the right thing at all...
1135  */
1136 static void mmc_spi_initsequence(struct mmc_spi_host *host)
1137 {
1138 	/* Try to be very sure any previous command has completed;
1139 	 * wait till not-busy, skip debris from any old commands.
1140 	 */
1141 	mmc_spi_wait_unbusy(host, r1b_timeout);
1142 	mmc_spi_readbytes(host, 10);
1143 
1144 	/*
1145 	 * Do a burst with chipselect active-high.  We need to do this to
1146 	 * meet the requirement of 74 clock cycles with both chipselect
1147 	 * and CMD (MOSI) high before CMD0 ... after the card has been
1148 	 * powered up to Vdd(min), and so is ready to take commands.
1149 	 *
1150 	 * Some cards are particularly needy of this (e.g. Viking "SD256")
1151 	 * while most others don't seem to care.
1152 	 *
1153 	 * Note that this is one of the places MMC/SD plays games with the
1154 	 * SPI protocol.  Another is that when chipselect is released while
1155 	 * the card returns BUSY status, the clock must issue several cycles
1156 	 * with chipselect high before the card will stop driving its output.
1157 	 */
1158 	host->spi->mode |= SPI_CS_HIGH;
1159 	if (spi_setup(host->spi) != 0) {
1160 		/* Just warn; most cards work without it. */
1161 		dev_warn(&host->spi->dev,
1162 				"can't change chip-select polarity\n");
1163 		host->spi->mode &= ~SPI_CS_HIGH;
1164 	} else {
1165 		mmc_spi_readbytes(host, 18);
1166 
1167 		host->spi->mode &= ~SPI_CS_HIGH;
1168 		if (spi_setup(host->spi) != 0) {
1169 			/* Wot, we can't get the same setup we had before? */
1170 			dev_err(&host->spi->dev,
1171 					"can't restore chip-select polarity\n");
1172 		}
1173 	}
1174 }
1175 
1176 static char *mmc_powerstring(u8 power_mode)
1177 {
1178 	switch (power_mode) {
1179 	case MMC_POWER_OFF: return "off";
1180 	case MMC_POWER_UP:  return "up";
1181 	case MMC_POWER_ON:  return "on";
1182 	}
1183 	return "?";
1184 }
1185 
1186 static void mmc_spi_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1187 {
1188 	struct mmc_spi_host *host = mmc_priv(mmc);
1189 
1190 	if (host->power_mode != ios->power_mode) {
1191 		int		canpower;
1192 
1193 		canpower = host->pdata && host->pdata->setpower;
1194 
1195 		dev_dbg(&host->spi->dev, "mmc_spi: power %s (%d)%s\n",
1196 				mmc_powerstring(ios->power_mode),
1197 				ios->vdd,
1198 				canpower ? ", can switch" : "");
1199 
1200 		/* switch power on/off if possible, accounting for
1201 		 * max 250msec powerup time if needed.
1202 		 */
1203 		if (canpower) {
1204 			switch (ios->power_mode) {
1205 			case MMC_POWER_OFF:
1206 			case MMC_POWER_UP:
1207 				host->pdata->setpower(&host->spi->dev,
1208 						ios->vdd);
1209 				if (ios->power_mode == MMC_POWER_UP)
1210 					msleep(host->powerup_msecs);
1211 			}
1212 		}
1213 
1214 		/* See 6.4.1 in the simplified SD card physical spec 2.0 */
1215 		if (ios->power_mode == MMC_POWER_ON)
1216 			mmc_spi_initsequence(host);
1217 
1218 		/* If powering down, ground all card inputs to avoid power
1219 		 * delivery from data lines!  On a shared SPI bus, this
1220 		 * will probably be temporary; 6.4.2 of the simplified SD
1221 		 * spec says this must last at least 1msec.
1222 		 *
1223 		 *   - Clock low means CPOL 0, e.g. mode 0
1224 		 *   - MOSI low comes from writing zero
1225 		 *   - Chipselect is usually active low...
1226 		 */
1227 		if (canpower && ios->power_mode == MMC_POWER_OFF) {
1228 			int mres;
1229 			u8 nullbyte = 0;
1230 
1231 			host->spi->mode &= ~(SPI_CPOL|SPI_CPHA);
1232 			mres = spi_setup(host->spi);
1233 			if (mres < 0)
1234 				dev_dbg(&host->spi->dev,
1235 					"switch to SPI mode 0 failed\n");
1236 
1237 			if (spi_write(host->spi, &nullbyte, 1) < 0)
1238 				dev_dbg(&host->spi->dev,
1239 					"put spi signals to low failed\n");
1240 
1241 			/*
1242 			 * Now clock should be low due to spi mode 0;
1243 			 * MOSI should be low because of written 0x00;
1244 			 * chipselect should be low (it is active low)
1245 			 * power supply is off, so now MMC is off too!
1246 			 *
1247 			 * FIXME no, chipselect can be high since the
1248 			 * device is inactive and SPI_CS_HIGH is clear...
1249 			 */
1250 			msleep(10);
1251 			if (mres == 0) {
1252 				host->spi->mode |= (SPI_CPOL|SPI_CPHA);
1253 				mres = spi_setup(host->spi);
1254 				if (mres < 0)
1255 					dev_dbg(&host->spi->dev,
1256 						"switch back to SPI mode 3"
1257 						" failed\n");
1258 			}
1259 		}
1260 
1261 		host->power_mode = ios->power_mode;
1262 	}
1263 
1264 	if (host->spi->max_speed_hz != ios->clock && ios->clock != 0) {
1265 		int		status;
1266 
1267 		host->spi->max_speed_hz = ios->clock;
1268 		status = spi_setup(host->spi);
1269 		dev_dbg(&host->spi->dev,
1270 			"mmc_spi:  clock to %d Hz, %d\n",
1271 			host->spi->max_speed_hz, status);
1272 	}
1273 }
1274 
1275 static int mmc_spi_get_ro(struct mmc_host *mmc)
1276 {
1277 	struct mmc_spi_host *host = mmc_priv(mmc);
1278 
1279 	if (host->pdata && host->pdata->get_ro)
1280 		return !!host->pdata->get_ro(mmc->parent);
1281 	/*
1282 	 * Board doesn't support read only detection; let the mmc core
1283 	 * decide what to do.
1284 	 */
1285 	return -ENOSYS;
1286 }
1287 
1288 static int mmc_spi_get_cd(struct mmc_host *mmc)
1289 {
1290 	struct mmc_spi_host *host = mmc_priv(mmc);
1291 
1292 	if (host->pdata && host->pdata->get_cd)
1293 		return !!host->pdata->get_cd(mmc->parent);
1294 	return -ENOSYS;
1295 }
1296 
1297 static const struct mmc_host_ops mmc_spi_ops = {
1298 	.request	= mmc_spi_request,
1299 	.set_ios	= mmc_spi_set_ios,
1300 	.get_ro		= mmc_spi_get_ro,
1301 	.get_cd		= mmc_spi_get_cd,
1302 };
1303 
1304 
1305 /****************************************************************************/
1306 
1307 /*
1308  * SPI driver implementation
1309  */
1310 
1311 static irqreturn_t
1312 mmc_spi_detect_irq(int irq, void *mmc)
1313 {
1314 	struct mmc_spi_host *host = mmc_priv(mmc);
1315 	u16 delay_msec = max(host->pdata->detect_delay, (u16)100);
1316 
1317 	mmc_detect_change(mmc, msecs_to_jiffies(delay_msec));
1318 	return IRQ_HANDLED;
1319 }
1320 
1321 static int mmc_spi_probe(struct spi_device *spi)
1322 {
1323 	void			*ones;
1324 	struct mmc_host		*mmc;
1325 	struct mmc_spi_host	*host;
1326 	int			status;
1327 
1328 	/* We rely on full duplex transfers, mostly to reduce
1329 	 * per-transfer overheads (by making fewer transfers).
1330 	 */
1331 	if (spi->master->flags & SPI_MASTER_HALF_DUPLEX)
1332 		return -EINVAL;
1333 
1334 	/* MMC and SD specs only seem to care that sampling is on the
1335 	 * rising edge ... meaning SPI modes 0 or 3.  So either SPI mode
1336 	 * should be legit.  We'll use mode 0 since the steady state is 0,
1337 	 * which is appropriate for hotplugging, unless the platform data
1338 	 * specify mode 3 (if hardware is not compatible to mode 0).
1339 	 */
1340 	if (spi->mode != SPI_MODE_3)
1341 		spi->mode = SPI_MODE_0;
1342 	spi->bits_per_word = 8;
1343 
1344 	status = spi_setup(spi);
1345 	if (status < 0) {
1346 		dev_dbg(&spi->dev, "needs SPI mode %02x, %d KHz; %d\n",
1347 				spi->mode, spi->max_speed_hz / 1000,
1348 				status);
1349 		return status;
1350 	}
1351 
1352 	/* We need a supply of ones to transmit.  This is the only time
1353 	 * the CPU touches these, so cache coherency isn't a concern.
1354 	 *
1355 	 * NOTE if many systems use more than one MMC-over-SPI connector
1356 	 * it'd save some memory to share this.  That's evidently rare.
1357 	 */
1358 	status = -ENOMEM;
1359 	ones = kmalloc(MMC_SPI_BLOCKSIZE, GFP_KERNEL);
1360 	if (!ones)
1361 		goto nomem;
1362 	memset(ones, 0xff, MMC_SPI_BLOCKSIZE);
1363 
1364 	mmc = mmc_alloc_host(sizeof(*host), &spi->dev);
1365 	if (!mmc)
1366 		goto nomem;
1367 
1368 	mmc->ops = &mmc_spi_ops;
1369 	mmc->max_blk_size = MMC_SPI_BLOCKSIZE;
1370 	mmc->max_segs = MMC_SPI_BLOCKSATONCE;
1371 	mmc->max_req_size = MMC_SPI_BLOCKSATONCE * MMC_SPI_BLOCKSIZE;
1372 	mmc->max_blk_count = MMC_SPI_BLOCKSATONCE;
1373 
1374 	mmc->caps = MMC_CAP_SPI;
1375 
1376 	/* SPI doesn't need the lowspeed device identification thing for
1377 	 * MMC or SD cards, since it never comes up in open drain mode.
1378 	 * That's good; some SPI masters can't handle very low speeds!
1379 	 *
1380 	 * However, low speed SDIO cards need not handle over 400 KHz;
1381 	 * that's the only reason not to use a few MHz for f_min (until
1382 	 * the upper layer reads the target frequency from the CSD).
1383 	 */
1384 	mmc->f_min = 400000;
1385 	mmc->f_max = spi->max_speed_hz;
1386 
1387 	host = mmc_priv(mmc);
1388 	host->mmc = mmc;
1389 	host->spi = spi;
1390 
1391 	host->ones = ones;
1392 
1393 	/* Platform data is used to hook up things like card sensing
1394 	 * and power switching gpios.
1395 	 */
1396 	host->pdata = mmc_spi_get_pdata(spi);
1397 	if (host->pdata)
1398 		mmc->ocr_avail = host->pdata->ocr_mask;
1399 	if (!mmc->ocr_avail) {
1400 		dev_warn(&spi->dev, "ASSUMING 3.2-3.4 V slot power\n");
1401 		mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34;
1402 	}
1403 	if (host->pdata && host->pdata->setpower) {
1404 		host->powerup_msecs = host->pdata->powerup_msecs;
1405 		if (!host->powerup_msecs || host->powerup_msecs > 250)
1406 			host->powerup_msecs = 250;
1407 	}
1408 
1409 	dev_set_drvdata(&spi->dev, mmc);
1410 
1411 	/* preallocate dma buffers */
1412 	host->data = kmalloc(sizeof(*host->data), GFP_KERNEL);
1413 	if (!host->data)
1414 		goto fail_nobuf1;
1415 
1416 	if (spi->master->dev.parent->dma_mask) {
1417 		struct device	*dev = spi->master->dev.parent;
1418 
1419 		host->dma_dev = dev;
1420 		host->ones_dma = dma_map_single(dev, ones,
1421 				MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
1422 		host->data_dma = dma_map_single(dev, host->data,
1423 				sizeof(*host->data), DMA_BIDIRECTIONAL);
1424 
1425 		/* REVISIT in theory those map operations can fail... */
1426 
1427 		dma_sync_single_for_cpu(host->dma_dev,
1428 				host->data_dma, sizeof(*host->data),
1429 				DMA_BIDIRECTIONAL);
1430 	}
1431 
1432 	/* setup message for status/busy readback */
1433 	spi_message_init(&host->readback);
1434 	host->readback.is_dma_mapped = (host->dma_dev != NULL);
1435 
1436 	spi_message_add_tail(&host->status, &host->readback);
1437 	host->status.tx_buf = host->ones;
1438 	host->status.tx_dma = host->ones_dma;
1439 	host->status.rx_buf = &host->data->status;
1440 	host->status.rx_dma = host->data_dma + offsetof(struct scratch, status);
1441 	host->status.cs_change = 1;
1442 
1443 	/* register card detect irq */
1444 	if (host->pdata && host->pdata->init) {
1445 		status = host->pdata->init(&spi->dev, mmc_spi_detect_irq, mmc);
1446 		if (status != 0)
1447 			goto fail_glue_init;
1448 	}
1449 
1450 	/* pass platform capabilities, if any */
1451 	if (host->pdata)
1452 		mmc->caps |= host->pdata->caps;
1453 
1454 	status = mmc_add_host(mmc);
1455 	if (status != 0)
1456 		goto fail_add_host;
1457 
1458 	dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n",
1459 			dev_name(&mmc->class_dev),
1460 			host->dma_dev ? "" : ", no DMA",
1461 			(host->pdata && host->pdata->get_ro)
1462 				? "" : ", no WP",
1463 			(host->pdata && host->pdata->setpower)
1464 				? "" : ", no poweroff",
1465 			(mmc->caps & MMC_CAP_NEEDS_POLL)
1466 				? ", cd polling" : "");
1467 	return 0;
1468 
1469 fail_add_host:
1470 	mmc_remove_host (mmc);
1471 fail_glue_init:
1472 	if (host->dma_dev)
1473 		dma_unmap_single(host->dma_dev, host->data_dma,
1474 				sizeof(*host->data), DMA_BIDIRECTIONAL);
1475 	kfree(host->data);
1476 
1477 fail_nobuf1:
1478 	mmc_free_host(mmc);
1479 	mmc_spi_put_pdata(spi);
1480 	dev_set_drvdata(&spi->dev, NULL);
1481 
1482 nomem:
1483 	kfree(ones);
1484 	return status;
1485 }
1486 
1487 
1488 static int __devexit mmc_spi_remove(struct spi_device *spi)
1489 {
1490 	struct mmc_host		*mmc = dev_get_drvdata(&spi->dev);
1491 	struct mmc_spi_host	*host;
1492 
1493 	if (mmc) {
1494 		host = mmc_priv(mmc);
1495 
1496 		/* prevent new mmc_detect_change() calls */
1497 		if (host->pdata && host->pdata->exit)
1498 			host->pdata->exit(&spi->dev, mmc);
1499 
1500 		mmc_remove_host(mmc);
1501 
1502 		if (host->dma_dev) {
1503 			dma_unmap_single(host->dma_dev, host->ones_dma,
1504 				MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
1505 			dma_unmap_single(host->dma_dev, host->data_dma,
1506 				sizeof(*host->data), DMA_BIDIRECTIONAL);
1507 		}
1508 
1509 		kfree(host->data);
1510 		kfree(host->ones);
1511 
1512 		spi->max_speed_hz = mmc->f_max;
1513 		mmc_free_host(mmc);
1514 		mmc_spi_put_pdata(spi);
1515 		dev_set_drvdata(&spi->dev, NULL);
1516 	}
1517 	return 0;
1518 }
1519 
1520 static struct of_device_id mmc_spi_of_match_table[] __devinitdata = {
1521 	{ .compatible = "mmc-spi-slot", },
1522 	{},
1523 };
1524 
1525 static struct spi_driver mmc_spi_driver = {
1526 	.driver = {
1527 		.name =		"mmc_spi",
1528 		.bus =		&spi_bus_type,
1529 		.owner =	THIS_MODULE,
1530 		.of_match_table = mmc_spi_of_match_table,
1531 	},
1532 	.probe =	mmc_spi_probe,
1533 	.remove =	__devexit_p(mmc_spi_remove),
1534 };
1535 
1536 
1537 static int __init mmc_spi_init(void)
1538 {
1539 	return spi_register_driver(&mmc_spi_driver);
1540 }
1541 module_init(mmc_spi_init);
1542 
1543 
1544 static void __exit mmc_spi_exit(void)
1545 {
1546 	spi_unregister_driver(&mmc_spi_driver);
1547 }
1548 module_exit(mmc_spi_exit);
1549 
1550 
1551 MODULE_AUTHOR("Mike Lavender, David Brownell, "
1552 		"Hans-Peter Nilsson, Jan Nikitenko");
1553 MODULE_DESCRIPTION("SPI SD/MMC host driver");
1554 MODULE_LICENSE("GPL");
1555 MODULE_ALIAS("spi:mmc_spi");
1556