xref: /openbmc/linux/drivers/mmc/host/mmc_spi.c (revision 96de0e252cedffad61b3cb5e05662c591898e69a)
1 /*
2  * mmc_spi.c - Access SD/MMC cards through SPI master controllers
3  *
4  * (C) Copyright 2005, Intec Automation,
5  *		Mike Lavender (mike@steroidmicros)
6  * (C) Copyright 2006-2007, David Brownell
7  * (C) Copyright 2007, Axis Communications,
8  *		Hans-Peter Nilsson (hp@axis.com)
9  * (C) Copyright 2007, ATRON electronic GmbH,
10  *		Jan Nikitenko <jan.nikitenko@gmail.com>
11  *
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26  */
27 #include <linux/hrtimer.h>
28 #include <linux/delay.h>
29 #include <linux/bio.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/crc7.h>
32 #include <linux/crc-itu-t.h>
33 #include <linux/scatterlist.h>
34 
35 #include <linux/mmc/host.h>
36 #include <linux/mmc/mmc.h>		/* for R1_SPI_* bit values */
37 
38 #include <linux/spi/spi.h>
39 #include <linux/spi/mmc_spi.h>
40 
41 #include <asm/unaligned.h>
42 
43 
44 /* NOTES:
45  *
46  * - For now, we won't try to interoperate with a real mmc/sd/sdio
47  *   controller, although some of them do have hardware support for
48  *   SPI protocol.  The main reason for such configs would be mmc-ish
49  *   cards like DataFlash, which don't support that "native" protocol.
50  *
51  *   We don't have a "DataFlash/MMC/SD/SDIO card slot" abstraction to
52  *   switch between driver stacks, and in any case if "native" mode
53  *   is available, it will be faster and hence preferable.
54  *
55  * - MMC depends on a different chipselect management policy than the
56  *   SPI interface currently supports for shared bus segments:  it needs
57  *   to issue multiple spi_message requests with the chipselect active,
58  *   using the results of one message to decide the next one to issue.
59  *
60  *   Pending updates to the programming interface, this driver expects
61  *   that it not share the bus with other drivers (precluding conflicts).
62  *
63  * - We tell the controller to keep the chipselect active from the
64  *   beginning of an mmc_host_ops.request until the end.  So beware
65  *   of SPI controller drivers that mis-handle the cs_change flag!
66  *
67  *   However, many cards seem OK with chipselect flapping up/down
68  *   during that time ... at least on unshared bus segments.
69  */
70 
71 
72 /*
73  * Local protocol constants, internal to data block protocols.
74  */
75 
76 /* Response tokens used to ack each block written: */
77 #define SPI_MMC_RESPONSE_CODE(x)	((x) & 0x1f)
78 #define SPI_RESPONSE_ACCEPTED		((2 << 1)|1)
79 #define SPI_RESPONSE_CRC_ERR		((5 << 1)|1)
80 #define SPI_RESPONSE_WRITE_ERR		((6 << 1)|1)
81 
82 /* Read and write blocks start with these tokens and end with crc;
83  * on error, read tokens act like a subset of R2_SPI_* values.
84  */
85 #define SPI_TOKEN_SINGLE	0xfe	/* single block r/w, multiblock read */
86 #define SPI_TOKEN_MULTI_WRITE	0xfc	/* multiblock write */
87 #define SPI_TOKEN_STOP_TRAN	0xfd	/* terminate multiblock write */
88 
89 #define MMC_SPI_BLOCKSIZE	512
90 
91 
92 /* These fixed timeouts come from the latest SD specs, which say to ignore
93  * the CSD values.  The R1B value is for card erase (e.g. the "I forgot the
94  * card's password" scenario); it's mostly applied to STOP_TRANSMISSION after
95  * reads which takes nowhere near that long.  Older cards may be able to use
96  * shorter timeouts ... but why bother?
97  */
98 #define readblock_timeout	ktime_set(0, 100 * 1000 * 1000)
99 #define writeblock_timeout	ktime_set(0, 250 * 1000 * 1000)
100 #define r1b_timeout		ktime_set(3, 0)
101 
102 
103 /****************************************************************************/
104 
105 /*
106  * Local Data Structures
107  */
108 
109 /* "scratch" is per-{command,block} data exchanged with the card */
110 struct scratch {
111 	u8			status[29];
112 	u8			data_token;
113 	__be16			crc_val;
114 };
115 
116 struct mmc_spi_host {
117 	struct mmc_host		*mmc;
118 	struct spi_device	*spi;
119 
120 	unsigned char		power_mode;
121 	u16			powerup_msecs;
122 
123 	struct mmc_spi_platform_data	*pdata;
124 
125 	/* for bulk data transfers */
126 	struct spi_transfer	token, t, crc, early_status;
127 	struct spi_message	m;
128 
129 	/* for status readback */
130 	struct spi_transfer	status;
131 	struct spi_message	readback;
132 
133 	/* underlying DMA-aware controller, or null */
134 	struct device		*dma_dev;
135 
136 	/* buffer used for commands and for message "overhead" */
137 	struct scratch		*data;
138 	dma_addr_t		data_dma;
139 
140 	/* Specs say to write ones most of the time, even when the card
141 	 * has no need to read its input data; and many cards won't care.
142 	 * This is our source of those ones.
143 	 */
144 	void			*ones;
145 	dma_addr_t		ones_dma;
146 };
147 
148 
149 /****************************************************************************/
150 
151 /*
152  * MMC-over-SPI protocol glue, used by the MMC stack interface
153  */
154 
155 static inline int mmc_cs_off(struct mmc_spi_host *host)
156 {
157 	/* chipselect will always be inactive after setup() */
158 	return spi_setup(host->spi);
159 }
160 
161 static int
162 mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len)
163 {
164 	int status;
165 
166 	if (len > sizeof(*host->data)) {
167 		WARN_ON(1);
168 		return -EIO;
169 	}
170 
171 	host->status.len = len;
172 
173 	if (host->dma_dev)
174 		dma_sync_single_for_device(host->dma_dev,
175 				host->data_dma, sizeof(*host->data),
176 				DMA_FROM_DEVICE);
177 
178 	status = spi_sync(host->spi, &host->readback);
179 	if (status == 0)
180 		status = host->readback.status;
181 
182 	if (host->dma_dev)
183 		dma_sync_single_for_cpu(host->dma_dev,
184 				host->data_dma, sizeof(*host->data),
185 				DMA_FROM_DEVICE);
186 
187 	return status;
188 }
189 
190 static int
191 mmc_spi_skip(struct mmc_spi_host *host, ktime_t timeout, unsigned n, u8 byte)
192 {
193 	u8		*cp = host->data->status;
194 
195 	timeout = ktime_add(timeout, ktime_get());
196 
197 	while (1) {
198 		int		status;
199 		unsigned	i;
200 
201 		status = mmc_spi_readbytes(host, n);
202 		if (status < 0)
203 			return status;
204 
205 		for (i = 0; i < n; i++) {
206 			if (cp[i] != byte)
207 				return cp[i];
208 		}
209 
210 		/* REVISIT investigate msleep() to avoid busy-wait I/O
211 		 * in at least some cases.
212 		 */
213 		if (ktime_to_ns(ktime_sub(ktime_get(), timeout)) > 0)
214 			break;
215 	}
216 	return -ETIMEDOUT;
217 }
218 
219 static inline int
220 mmc_spi_wait_unbusy(struct mmc_spi_host *host, ktime_t timeout)
221 {
222 	return mmc_spi_skip(host, timeout, sizeof(host->data->status), 0);
223 }
224 
225 static int mmc_spi_readtoken(struct mmc_spi_host *host)
226 {
227 	return mmc_spi_skip(host, readblock_timeout, 1, 0xff);
228 }
229 
230 
231 /*
232  * Note that for SPI, cmd->resp[0] is not the same data as "native" protocol
233  * hosts return!  The low byte holds R1_SPI bits.  The next byte may hold
234  * R2_SPI bits ... for SEND_STATUS, or after data read errors.
235  *
236  * cmd->resp[1] holds any four-byte response, for R3 (READ_OCR) and on
237  * newer cards R7 (IF_COND).
238  */
239 
240 static char *maptype(struct mmc_command *cmd)
241 {
242 	switch (mmc_spi_resp_type(cmd)) {
243 	case MMC_RSP_SPI_R1:	return "R1";
244 	case MMC_RSP_SPI_R1B:	return "R1B";
245 	case MMC_RSP_SPI_R2:	return "R2/R5";
246 	case MMC_RSP_SPI_R3:	return "R3/R4/R7";
247 	default:		return "?";
248 	}
249 }
250 
251 /* return zero, else negative errno after setting cmd->error */
252 static int mmc_spi_response_get(struct mmc_spi_host *host,
253 		struct mmc_command *cmd, int cs_on)
254 {
255 	u8	*cp = host->data->status;
256 	u8	*end = cp + host->t.len;
257 	int	value = 0;
258 	char	tag[32];
259 
260 	snprintf(tag, sizeof(tag), "  ... CMD%d response SPI_%s",
261 		cmd->opcode, maptype(cmd));
262 
263 	/* Except for data block reads, the whole response will already
264 	 * be stored in the scratch buffer.  It's somewhere after the
265 	 * command and the first byte we read after it.  We ignore that
266 	 * first byte.  After STOP_TRANSMISSION command it may include
267 	 * two data bits, but otherwise it's all ones.
268 	 */
269 	cp += 8;
270 	while (cp < end && *cp == 0xff)
271 		cp++;
272 
273 	/* Data block reads (R1 response types) may need more data... */
274 	if (cp == end) {
275 		unsigned	i;
276 
277 		cp = host->data->status;
278 
279 		/* Card sends N(CR) (== 1..8) bytes of all-ones then one
280 		 * status byte ... and we already scanned 2 bytes.
281 		 *
282 		 * REVISIT block read paths use nasty byte-at-a-time I/O
283 		 * so it can always DMA directly into the target buffer.
284 		 * It'd probably be better to memcpy() the first chunk and
285 		 * avoid extra i/o calls...
286 		 */
287 		for (i = 2; i < 9; i++) {
288 			value = mmc_spi_readbytes(host, 1);
289 			if (value < 0)
290 				goto done;
291 			if (*cp != 0xff)
292 				goto checkstatus;
293 		}
294 		value = -ETIMEDOUT;
295 		goto done;
296 	}
297 
298 checkstatus:
299 	if (*cp & 0x80) {
300 		dev_dbg(&host->spi->dev, "%s: INVALID RESPONSE, %02x\n",
301 					tag, *cp);
302 		value = -EBADR;
303 		goto done;
304 	}
305 
306 	cmd->resp[0] = *cp++;
307 	cmd->error = 0;
308 
309 	/* Status byte: the entire seven-bit R1 response.  */
310 	if (cmd->resp[0] != 0) {
311 		if ((R1_SPI_PARAMETER | R1_SPI_ADDRESS
312 					| R1_SPI_ILLEGAL_COMMAND)
313 				& cmd->resp[0])
314 			value = -EINVAL;
315 		else if (R1_SPI_COM_CRC & cmd->resp[0])
316 			value = -EILSEQ;
317 		else if ((R1_SPI_ERASE_SEQ | R1_SPI_ERASE_RESET)
318 				& cmd->resp[0])
319 			value = -EIO;
320 		/* else R1_SPI_IDLE, "it's resetting" */
321 	}
322 
323 	switch (mmc_spi_resp_type(cmd)) {
324 
325 	/* SPI R1B == R1 + busy; STOP_TRANSMISSION (for multiblock reads)
326 	 * and less-common stuff like various erase operations.
327 	 */
328 	case MMC_RSP_SPI_R1B:
329 		/* maybe we read all the busy tokens already */
330 		while (cp < end && *cp == 0)
331 			cp++;
332 		if (cp == end)
333 			mmc_spi_wait_unbusy(host, r1b_timeout);
334 		break;
335 
336 	/* SPI R2 == R1 + second status byte; SEND_STATUS
337 	 * SPI R5 == R1 + data byte; IO_RW_DIRECT
338 	 */
339 	case MMC_RSP_SPI_R2:
340 		cmd->resp[0] |= *cp << 8;
341 		break;
342 
343 	/* SPI R3, R4, or R7 == R1 + 4 bytes */
344 	case MMC_RSP_SPI_R3:
345 		cmd->resp[1] = be32_to_cpu(get_unaligned((u32 *)cp));
346 		break;
347 
348 	/* SPI R1 == just one status byte */
349 	case MMC_RSP_SPI_R1:
350 		break;
351 
352 	default:
353 		dev_dbg(&host->spi->dev, "bad response type %04x\n",
354 				mmc_spi_resp_type(cmd));
355 		if (value >= 0)
356 			value = -EINVAL;
357 		goto done;
358 	}
359 
360 	if (value < 0)
361 		dev_dbg(&host->spi->dev, "%s: resp %04x %08x\n",
362 			tag, cmd->resp[0], cmd->resp[1]);
363 
364 	/* disable chipselect on errors and some success cases */
365 	if (value >= 0 && cs_on)
366 		return value;
367 done:
368 	if (value < 0)
369 		cmd->error = value;
370 	mmc_cs_off(host);
371 	return value;
372 }
373 
374 /* Issue command and read its response.
375  * Returns zero on success, negative for error.
376  *
377  * On error, caller must cope with mmc core retry mechanism.  That
378  * means immediate low-level resubmit, which affects the bus lock...
379  */
380 static int
381 mmc_spi_command_send(struct mmc_spi_host *host,
382 		struct mmc_request *mrq,
383 		struct mmc_command *cmd, int cs_on)
384 {
385 	struct scratch		*data = host->data;
386 	u8			*cp = data->status;
387 	u32			arg = cmd->arg;
388 	int			status;
389 	struct spi_transfer	*t;
390 
391 	/* We can handle most commands (except block reads) in one full
392 	 * duplex I/O operation before either starting the next transfer
393 	 * (data block or command) or else deselecting the card.
394 	 *
395 	 * First, write 7 bytes:
396 	 *  - an all-ones byte to ensure the card is ready
397 	 *  - opcode byte (plus start and transmission bits)
398 	 *  - four bytes of big-endian argument
399 	 *  - crc7 (plus end bit) ... always computed, it's cheap
400 	 *
401 	 * We init the whole buffer to all-ones, which is what we need
402 	 * to write while we're reading (later) response data.
403 	 */
404 	memset(cp++, 0xff, sizeof(data->status));
405 
406 	*cp++ = 0x40 | cmd->opcode;
407 	*cp++ = (u8)(arg >> 24);
408 	*cp++ = (u8)(arg >> 16);
409 	*cp++ = (u8)(arg >> 8);
410 	*cp++ = (u8)arg;
411 	*cp++ = (crc7(0, &data->status[1], 5) << 1) | 0x01;
412 
413 	/* Then, read up to 13 bytes (while writing all-ones):
414 	 *  - N(CR) (== 1..8) bytes of all-ones
415 	 *  - status byte (for all response types)
416 	 *  - the rest of the response, either:
417 	 *      + nothing, for R1 or R1B responses
418 	 *	+ second status byte, for R2 responses
419 	 *	+ four data bytes, for R3 and R7 responses
420 	 *
421 	 * Finally, read some more bytes ... in the nice cases we know in
422 	 * advance how many, and reading 1 more is always OK:
423 	 *  - N(EC) (== 0..N) bytes of all-ones, before deselect/finish
424 	 *  - N(RC) (== 1..N) bytes of all-ones, before next command
425 	 *  - N(WR) (== 1..N) bytes of all-ones, before data write
426 	 *
427 	 * So in those cases one full duplex I/O of at most 21 bytes will
428 	 * handle the whole command, leaving the card ready to receive a
429 	 * data block or new command.  We do that whenever we can, shaving
430 	 * CPU and IRQ costs (especially when using DMA or FIFOs).
431 	 *
432 	 * There are two other cases, where it's not generally practical
433 	 * to rely on a single I/O:
434 	 *
435 	 *  - R1B responses need at least N(EC) bytes of all-zeroes.
436 	 *
437 	 *    In this case we can *try* to fit it into one I/O, then
438 	 *    maybe read more data later.
439 	 *
440 	 *  - Data block reads are more troublesome, since a variable
441 	 *    number of padding bytes precede the token and data.
442 	 *      + N(CX) (== 0..8) bytes of all-ones, before CSD or CID
443 	 *      + N(AC) (== 1..many) bytes of all-ones
444 	 *
445 	 *    In this case we currently only have minimal speedups here:
446 	 *    when N(CR) == 1 we can avoid I/O in response_get().
447 	 */
448 	if (cs_on && (mrq->data->flags & MMC_DATA_READ)) {
449 		cp += 2;	/* min(N(CR)) + status */
450 		/* R1 */
451 	} else {
452 		cp += 10;	/* max(N(CR)) + status + min(N(RC),N(WR)) */
453 		if (cmd->flags & MMC_RSP_SPI_S2)	/* R2/R5 */
454 			cp++;
455 		else if (cmd->flags & MMC_RSP_SPI_B4)	/* R3/R4/R7 */
456 			cp += 4;
457 		else if (cmd->flags & MMC_RSP_BUSY)	/* R1B */
458 			cp = data->status + sizeof(data->status);
459 		/* else:  R1 (most commands) */
460 	}
461 
462 	dev_dbg(&host->spi->dev, "  mmc_spi: CMD%d, resp %s\n",
463 		cmd->opcode, maptype(cmd));
464 
465 	/* send command, leaving chipselect active */
466 	spi_message_init(&host->m);
467 
468 	t = &host->t;
469 	memset(t, 0, sizeof(*t));
470 	t->tx_buf = t->rx_buf = data->status;
471 	t->tx_dma = t->rx_dma = host->data_dma;
472 	t->len = cp - data->status;
473 	t->cs_change = 1;
474 	spi_message_add_tail(t, &host->m);
475 
476 	if (host->dma_dev) {
477 		host->m.is_dma_mapped = 1;
478 		dma_sync_single_for_device(host->dma_dev,
479 				host->data_dma, sizeof(*host->data),
480 				DMA_BIDIRECTIONAL);
481 	}
482 	status = spi_sync(host->spi, &host->m);
483 	if (status == 0)
484 		status = host->m.status;
485 
486 	if (host->dma_dev)
487 		dma_sync_single_for_cpu(host->dma_dev,
488 				host->data_dma, sizeof(*host->data),
489 				DMA_BIDIRECTIONAL);
490 	if (status < 0) {
491 		dev_dbg(&host->spi->dev, "  ... write returned %d\n", status);
492 		cmd->error = status;
493 		return status;
494 	}
495 
496 	/* after no-data commands and STOP_TRANSMISSION, chipselect off */
497 	return mmc_spi_response_get(host, cmd, cs_on);
498 }
499 
500 /* Build data message with up to four separate transfers.  For TX, we
501  * start by writing the data token.  And in most cases, we finish with
502  * a status transfer.
503  *
504  * We always provide TX data for data and CRC.  The MMC/SD protocol
505  * requires us to write ones; but Linux defaults to writing zeroes;
506  * so we explicitly initialize it to all ones on RX paths.
507  *
508  * We also handle DMA mapping, so the underlying SPI controller does
509  * not need to (re)do it for each message.
510  */
511 static void
512 mmc_spi_setup_data_message(
513 	struct mmc_spi_host	*host,
514 	int			multiple,
515 	enum dma_data_direction	direction)
516 {
517 	struct spi_transfer	*t;
518 	struct scratch		*scratch = host->data;
519 	dma_addr_t		dma = host->data_dma;
520 
521 	spi_message_init(&host->m);
522 	if (dma)
523 		host->m.is_dma_mapped = 1;
524 
525 	/* for reads, readblock() skips 0xff bytes before finding
526 	 * the token; for writes, this transfer issues that token.
527 	 */
528 	if (direction == DMA_TO_DEVICE) {
529 		t = &host->token;
530 		memset(t, 0, sizeof(*t));
531 		t->len = 1;
532 		if (multiple)
533 			scratch->data_token = SPI_TOKEN_MULTI_WRITE;
534 		else
535 			scratch->data_token = SPI_TOKEN_SINGLE;
536 		t->tx_buf = &scratch->data_token;
537 		if (dma)
538 			t->tx_dma = dma + offsetof(struct scratch, data_token);
539 		spi_message_add_tail(t, &host->m);
540 	}
541 
542 	/* Body of transfer is buffer, then CRC ...
543 	 * either TX-only, or RX with TX-ones.
544 	 */
545 	t = &host->t;
546 	memset(t, 0, sizeof(*t));
547 	t->tx_buf = host->ones;
548 	t->tx_dma = host->ones_dma;
549 	/* length and actual buffer info are written later */
550 	spi_message_add_tail(t, &host->m);
551 
552 	t = &host->crc;
553 	memset(t, 0, sizeof(*t));
554 	t->len = 2;
555 	if (direction == DMA_TO_DEVICE) {
556 		/* the actual CRC may get written later */
557 		t->tx_buf = &scratch->crc_val;
558 		if (dma)
559 			t->tx_dma = dma + offsetof(struct scratch, crc_val);
560 	} else {
561 		t->tx_buf = host->ones;
562 		t->tx_dma = host->ones_dma;
563 		t->rx_buf = &scratch->crc_val;
564 		if (dma)
565 			t->rx_dma = dma + offsetof(struct scratch, crc_val);
566 	}
567 	spi_message_add_tail(t, &host->m);
568 
569 	/*
570 	 * A single block read is followed by N(EC) [0+] all-ones bytes
571 	 * before deselect ... don't bother.
572 	 *
573 	 * Multiblock reads are followed by N(AC) [1+] all-ones bytes before
574 	 * the next block is read, or a STOP_TRANSMISSION is issued.  We'll
575 	 * collect that single byte, so readblock() doesn't need to.
576 	 *
577 	 * For a write, the one-byte data response follows immediately, then
578 	 * come zero or more busy bytes, then N(WR) [1+] all-ones bytes.
579 	 * Then single block reads may deselect, and multiblock ones issue
580 	 * the next token (next data block, or STOP_TRAN).  We can try to
581 	 * minimize I/O ops by using a single read to collect end-of-busy.
582 	 */
583 	if (multiple || direction == DMA_TO_DEVICE) {
584 		t = &host->early_status;
585 		memset(t, 0, sizeof(*t));
586 		t->len = (direction == DMA_TO_DEVICE)
587 				? sizeof(scratch->status)
588 				: 1;
589 		t->tx_buf = host->ones;
590 		t->tx_dma = host->ones_dma;
591 		t->rx_buf = scratch->status;
592 		if (dma)
593 			t->rx_dma = dma + offsetof(struct scratch, status);
594 		t->cs_change = 1;
595 		spi_message_add_tail(t, &host->m);
596 	}
597 }
598 
599 /*
600  * Write one block:
601  *  - caller handled preceding N(WR) [1+] all-ones bytes
602  *  - data block
603  *	+ token
604  *	+ data bytes
605  *	+ crc16
606  *  - an all-ones byte ... card writes a data-response byte
607  *  - followed by N(EC) [0+] all-ones bytes, card writes zero/'busy'
608  *
609  * Return negative errno, else success.
610  */
611 static int
612 mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t)
613 {
614 	struct spi_device	*spi = host->spi;
615 	int			status, i;
616 	struct scratch		*scratch = host->data;
617 
618 	if (host->mmc->use_spi_crc)
619 		scratch->crc_val = cpu_to_be16(
620 				crc_itu_t(0, t->tx_buf, t->len));
621 	if (host->dma_dev)
622 		dma_sync_single_for_device(host->dma_dev,
623 				host->data_dma, sizeof(*scratch),
624 				DMA_BIDIRECTIONAL);
625 
626 	status = spi_sync(spi, &host->m);
627 	if (status == 0)
628 		status = host->m.status;
629 
630 	if (status != 0) {
631 		dev_dbg(&spi->dev, "write error (%d)\n", status);
632 		return status;
633 	}
634 
635 	if (host->dma_dev)
636 		dma_sync_single_for_cpu(host->dma_dev,
637 				host->data_dma, sizeof(*scratch),
638 				DMA_BIDIRECTIONAL);
639 
640 	/*
641 	 * Get the transmission data-response reply.  It must follow
642 	 * immediately after the data block we transferred.  This reply
643 	 * doesn't necessarily tell whether the write operation succeeded;
644 	 * it just says if the transmission was ok and whether *earlier*
645 	 * writes succeeded; see the standard.
646 	 */
647 	switch (SPI_MMC_RESPONSE_CODE(scratch->status[0])) {
648 	case SPI_RESPONSE_ACCEPTED:
649 		status = 0;
650 		break;
651 	case SPI_RESPONSE_CRC_ERR:
652 		/* host shall then issue MMC_STOP_TRANSMISSION */
653 		status = -EILSEQ;
654 		break;
655 	case SPI_RESPONSE_WRITE_ERR:
656 		/* host shall then issue MMC_STOP_TRANSMISSION,
657 		 * and should MMC_SEND_STATUS to sort it out
658 		 */
659 		status = -EIO;
660 		break;
661 	default:
662 		status = -EPROTO;
663 		break;
664 	}
665 	if (status != 0) {
666 		dev_dbg(&spi->dev, "write error %02x (%d)\n",
667 			scratch->status[0], status);
668 		return status;
669 	}
670 
671 	t->tx_buf += t->len;
672 	if (host->dma_dev)
673 		t->tx_dma += t->len;
674 
675 	/* Return when not busy.  If we didn't collect that status yet,
676 	 * we'll need some more I/O.
677 	 */
678 	for (i = 1; i < sizeof(scratch->status); i++) {
679 		if (scratch->status[i] != 0)
680 			return 0;
681 	}
682 	return mmc_spi_wait_unbusy(host, writeblock_timeout);
683 }
684 
685 /*
686  * Read one block:
687  *  - skip leading all-ones bytes ... either
688  *      + N(AC) [1..f(clock,CSD)] usually, else
689  *      + N(CX) [0..8] when reading CSD or CID
690  *  - data block
691  *	+ token ... if error token, no data or crc
692  *	+ data bytes
693  *	+ crc16
694  *
695  * After single block reads, we're done; N(EC) [0+] all-ones bytes follow
696  * before dropping chipselect.
697  *
698  * For multiblock reads, caller either reads the next block or issues a
699  * STOP_TRANSMISSION command.
700  */
701 static int
702 mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t)
703 {
704 	struct spi_device	*spi = host->spi;
705 	int			status;
706 	struct scratch		*scratch = host->data;
707 
708 	/* At least one SD card sends an all-zeroes byte when N(CX)
709 	 * applies, before the all-ones bytes ... just cope with that.
710 	 */
711 	status = mmc_spi_readbytes(host, 1);
712 	if (status < 0)
713 		return status;
714 	status = scratch->status[0];
715 	if (status == 0xff || status == 0)
716 		status = mmc_spi_readtoken(host);
717 
718 	if (status == SPI_TOKEN_SINGLE) {
719 		if (host->dma_dev) {
720 			dma_sync_single_for_device(host->dma_dev,
721 					host->data_dma, sizeof(*scratch),
722 					DMA_BIDIRECTIONAL);
723 			dma_sync_single_for_device(host->dma_dev,
724 					t->rx_dma, t->len,
725 					DMA_FROM_DEVICE);
726 		}
727 
728 		status = spi_sync(spi, &host->m);
729 		if (status == 0)
730 			status = host->m.status;
731 
732 		if (host->dma_dev) {
733 			dma_sync_single_for_cpu(host->dma_dev,
734 					host->data_dma, sizeof(*scratch),
735 					DMA_BIDIRECTIONAL);
736 			dma_sync_single_for_cpu(host->dma_dev,
737 					t->rx_dma, t->len,
738 					DMA_FROM_DEVICE);
739 		}
740 
741 	} else {
742 		dev_dbg(&spi->dev, "read error %02x (%d)\n", status, status);
743 
744 		/* we've read extra garbage, timed out, etc */
745 		if (status < 0)
746 			return status;
747 
748 		/* low four bits are an R2 subset, fifth seems to be
749 		 * vendor specific ... map them all to generic error..
750 		 */
751 		return -EIO;
752 	}
753 
754 	if (host->mmc->use_spi_crc) {
755 		u16 crc = crc_itu_t(0, t->rx_buf, t->len);
756 
757 		be16_to_cpus(&scratch->crc_val);
758 		if (scratch->crc_val != crc) {
759 			dev_dbg(&spi->dev, "read - crc error: crc_val=0x%04x, "
760 					"computed=0x%04x len=%d\n",
761 					scratch->crc_val, crc, t->len);
762 			return -EILSEQ;
763 		}
764 	}
765 
766 	t->rx_buf += t->len;
767 	if (host->dma_dev)
768 		t->rx_dma += t->len;
769 
770 	return 0;
771 }
772 
773 /*
774  * An MMC/SD data stage includes one or more blocks, optional CRCs,
775  * and inline handshaking.  That handhaking makes it unlike most
776  * other SPI protocol stacks.
777  */
778 static void
779 mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
780 		struct mmc_data *data, u32 blk_size)
781 {
782 	struct spi_device	*spi = host->spi;
783 	struct device		*dma_dev = host->dma_dev;
784 	struct spi_transfer	*t;
785 	enum dma_data_direction	direction;
786 	struct scatterlist	*sg;
787 	unsigned		n_sg;
788 	int			multiple = (data->blocks > 1);
789 
790 	if (data->flags & MMC_DATA_READ)
791 		direction = DMA_FROM_DEVICE;
792 	else
793 		direction = DMA_TO_DEVICE;
794 	mmc_spi_setup_data_message(host, multiple, direction);
795 	t = &host->t;
796 
797 	/* Handle scatterlist segments one at a time, with synch for
798 	 * each 512-byte block
799 	 */
800 	for (sg = data->sg, n_sg = data->sg_len; n_sg; n_sg--, sg++) {
801 		int			status = 0;
802 		dma_addr_t		dma_addr = 0;
803 		void			*kmap_addr;
804 		unsigned		length = sg->length;
805 		enum dma_data_direction	dir = direction;
806 
807 		/* set up dma mapping for controller drivers that might
808 		 * use DMA ... though they may fall back to PIO
809 		 */
810 		if (dma_dev) {
811 			/* never invalidate whole *shared* pages ... */
812 			if ((sg->offset != 0 || length != PAGE_SIZE)
813 					&& dir == DMA_FROM_DEVICE)
814 				dir = DMA_BIDIRECTIONAL;
815 
816 			dma_addr = dma_map_page(dma_dev, sg->page, 0,
817 						PAGE_SIZE, dir);
818 			if (direction == DMA_TO_DEVICE)
819 				t->tx_dma = dma_addr + sg->offset;
820 			else
821 				t->rx_dma = dma_addr + sg->offset;
822 		}
823 
824 		/* allow pio too; we don't allow highmem */
825 		kmap_addr = kmap(sg->page);
826 		if (direction == DMA_TO_DEVICE)
827 			t->tx_buf = kmap_addr + sg->offset;
828 		else
829 			t->rx_buf = kmap_addr + sg->offset;
830 
831 		/* transfer each block, and update request status */
832 		while (length) {
833 			t->len = min(length, blk_size);
834 
835 			dev_dbg(&host->spi->dev,
836 				"    mmc_spi: %s block, %d bytes\n",
837 				(direction == DMA_TO_DEVICE)
838 				? "write"
839 				: "read",
840 				t->len);
841 
842 			if (direction == DMA_TO_DEVICE)
843 				status = mmc_spi_writeblock(host, t);
844 			else
845 				status = mmc_spi_readblock(host, t);
846 			if (status < 0)
847 				break;
848 
849 			data->bytes_xfered += t->len;
850 			length -= t->len;
851 
852 			if (!multiple)
853 				break;
854 		}
855 
856 		/* discard mappings */
857 		if (direction == DMA_FROM_DEVICE)
858 			flush_kernel_dcache_page(sg->page);
859 		kunmap(sg->page);
860 		if (dma_dev)
861 			dma_unmap_page(dma_dev, dma_addr, PAGE_SIZE, dir);
862 
863 		if (status < 0) {
864 			data->error = status;
865 			dev_dbg(&spi->dev, "%s status %d\n",
866 				(direction == DMA_TO_DEVICE)
867 					? "write" : "read",
868 				status);
869 			break;
870 		}
871 	}
872 
873 	/* NOTE some docs describe an MMC-only SET_BLOCK_COUNT (CMD23) that
874 	 * can be issued before multiblock writes.  Unlike its more widely
875 	 * documented analogue for SD cards (SET_WR_BLK_ERASE_COUNT, ACMD23),
876 	 * that can affect the STOP_TRAN logic.   Complete (and current)
877 	 * MMC specs should sort that out before Linux starts using CMD23.
878 	 */
879 	if (direction == DMA_TO_DEVICE && multiple) {
880 		struct scratch	*scratch = host->data;
881 		int		tmp;
882 		const unsigned	statlen = sizeof(scratch->status);
883 
884 		dev_dbg(&spi->dev, "    mmc_spi: STOP_TRAN\n");
885 
886 		/* Tweak the per-block message we set up earlier by morphing
887 		 * it to hold single buffer with the token followed by some
888 		 * all-ones bytes ... skip N(BR) (0..1), scan the rest for
889 		 * "not busy any longer" status, and leave chip selected.
890 		 */
891 		INIT_LIST_HEAD(&host->m.transfers);
892 		list_add(&host->early_status.transfer_list,
893 				&host->m.transfers);
894 
895 		memset(scratch->status, 0xff, statlen);
896 		scratch->status[0] = SPI_TOKEN_STOP_TRAN;
897 
898 		host->early_status.tx_buf = host->early_status.rx_buf;
899 		host->early_status.tx_dma = host->early_status.rx_dma;
900 		host->early_status.len = statlen;
901 
902 		if (host->dma_dev)
903 			dma_sync_single_for_device(host->dma_dev,
904 					host->data_dma, sizeof(*scratch),
905 					DMA_BIDIRECTIONAL);
906 
907 		tmp = spi_sync(spi, &host->m);
908 		if (tmp == 0)
909 			tmp = host->m.status;
910 
911 		if (host->dma_dev)
912 			dma_sync_single_for_cpu(host->dma_dev,
913 					host->data_dma, sizeof(*scratch),
914 					DMA_BIDIRECTIONAL);
915 
916 		if (tmp < 0) {
917 			if (!data->error)
918 				data->error = tmp;
919 			return;
920 		}
921 
922 		/* Ideally we collected "not busy" status with one I/O,
923 		 * avoiding wasteful byte-at-a-time scanning... but more
924 		 * I/O is often needed.
925 		 */
926 		for (tmp = 2; tmp < statlen; tmp++) {
927 			if (scratch->status[tmp] != 0)
928 				return;
929 		}
930 		tmp = mmc_spi_wait_unbusy(host, writeblock_timeout);
931 		if (tmp < 0 && !data->error)
932 			data->error = tmp;
933 	}
934 }
935 
936 /****************************************************************************/
937 
938 /*
939  * MMC driver implementation -- the interface to the MMC stack
940  */
941 
942 static void mmc_spi_request(struct mmc_host *mmc, struct mmc_request *mrq)
943 {
944 	struct mmc_spi_host	*host = mmc_priv(mmc);
945 	int			status = -EINVAL;
946 
947 #ifdef DEBUG
948 	/* MMC core and layered drivers *MUST* issue SPI-aware commands */
949 	{
950 		struct mmc_command	*cmd;
951 		int			invalid = 0;
952 
953 		cmd = mrq->cmd;
954 		if (!mmc_spi_resp_type(cmd)) {
955 			dev_dbg(&host->spi->dev, "bogus command\n");
956 			cmd->error = -EINVAL;
957 			invalid = 1;
958 		}
959 
960 		cmd = mrq->stop;
961 		if (cmd && !mmc_spi_resp_type(cmd)) {
962 			dev_dbg(&host->spi->dev, "bogus STOP command\n");
963 			cmd->error = -EINVAL;
964 			invalid = 1;
965 		}
966 
967 		if (invalid) {
968 			dump_stack();
969 			mmc_request_done(host->mmc, mrq);
970 			return;
971 		}
972 	}
973 #endif
974 
975 	/* issue command; then optionally data and stop */
976 	status = mmc_spi_command_send(host, mrq, mrq->cmd, mrq->data != NULL);
977 	if (status == 0 && mrq->data) {
978 		mmc_spi_data_do(host, mrq->cmd, mrq->data, mrq->data->blksz);
979 		if (mrq->stop)
980 			status = mmc_spi_command_send(host, mrq, mrq->stop, 0);
981 		else
982 			mmc_cs_off(host);
983 	}
984 
985 	mmc_request_done(host->mmc, mrq);
986 }
987 
988 /* See Section 6.4.1, in SD "Simplified Physical Layer Specification 2.0"
989  *
990  * NOTE that here we can't know that the card has just been powered up;
991  * not all MMC/SD sockets support power switching.
992  *
993  * FIXME when the card is still in SPI mode, e.g. from a previous kernel,
994  * this doesn't seem to do the right thing at all...
995  */
996 static void mmc_spi_initsequence(struct mmc_spi_host *host)
997 {
998 	/* Try to be very sure any previous command has completed;
999 	 * wait till not-busy, skip debris from any old commands.
1000 	 */
1001 	mmc_spi_wait_unbusy(host, r1b_timeout);
1002 	mmc_spi_readbytes(host, 10);
1003 
1004 	/*
1005 	 * Do a burst with chipselect active-high.  We need to do this to
1006 	 * meet the requirement of 74 clock cycles with both chipselect
1007 	 * and CMD (MOSI) high before CMD0 ... after the card has been
1008 	 * powered up to Vdd(min), and so is ready to take commands.
1009 	 *
1010 	 * Some cards are particularly needy of this (e.g. Viking "SD256")
1011 	 * while most others don't seem to care.
1012 	 *
1013 	 * Note that this is one of the places MMC/SD plays games with the
1014 	 * SPI protocol.  Another is that when chipselect is released while
1015 	 * the card returns BUSY status, the clock must issue several cycles
1016 	 * with chipselect high before the card will stop driving its output.
1017 	 */
1018 	host->spi->mode |= SPI_CS_HIGH;
1019 	if (spi_setup(host->spi) != 0) {
1020 		/* Just warn; most cards work without it. */
1021 		dev_warn(&host->spi->dev,
1022 				"can't change chip-select polarity\n");
1023 		host->spi->mode &= ~SPI_CS_HIGH;
1024 	} else {
1025 		mmc_spi_readbytes(host, 18);
1026 
1027 		host->spi->mode &= ~SPI_CS_HIGH;
1028 		if (spi_setup(host->spi) != 0) {
1029 			/* Wot, we can't get the same setup we had before? */
1030 			dev_err(&host->spi->dev,
1031 					"can't restore chip-select polarity\n");
1032 		}
1033 	}
1034 }
1035 
1036 static char *mmc_powerstring(u8 power_mode)
1037 {
1038 	switch (power_mode) {
1039 	case MMC_POWER_OFF: return "off";
1040 	case MMC_POWER_UP:  return "up";
1041 	case MMC_POWER_ON:  return "on";
1042 	}
1043 	return "?";
1044 }
1045 
1046 static void mmc_spi_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1047 {
1048 	struct mmc_spi_host *host = mmc_priv(mmc);
1049 
1050 	if (host->power_mode != ios->power_mode) {
1051 		int		canpower;
1052 
1053 		canpower = host->pdata && host->pdata->setpower;
1054 
1055 		dev_dbg(&host->spi->dev, "mmc_spi: power %s (%d)%s\n",
1056 				mmc_powerstring(ios->power_mode),
1057 				ios->vdd,
1058 				canpower ? ", can switch" : "");
1059 
1060 		/* switch power on/off if possible, accounting for
1061 		 * max 250msec powerup time if needed.
1062 		 */
1063 		if (canpower) {
1064 			switch (ios->power_mode) {
1065 			case MMC_POWER_OFF:
1066 			case MMC_POWER_UP:
1067 				host->pdata->setpower(&host->spi->dev,
1068 						ios->vdd);
1069 				if (ios->power_mode == MMC_POWER_UP)
1070 					msleep(host->powerup_msecs);
1071 			}
1072 		}
1073 
1074 		/* See 6.4.1 in the simplified SD card physical spec 2.0 */
1075 		if (ios->power_mode == MMC_POWER_ON)
1076 			mmc_spi_initsequence(host);
1077 
1078 		/* If powering down, ground all card inputs to avoid power
1079 		 * delivery from data lines!  On a shared SPI bus, this
1080 		 * will probably be temporary; 6.4.2 of the simplified SD
1081 		 * spec says this must last at least 1msec.
1082 		 *
1083 		 *   - Clock low means CPOL 0, e.g. mode 0
1084 		 *   - MOSI low comes from writing zero
1085 		 *   - Chipselect is usually active low...
1086 		 */
1087 		if (canpower && ios->power_mode == MMC_POWER_OFF) {
1088 			int mres;
1089 
1090 			host->spi->mode &= ~(SPI_CPOL|SPI_CPHA);
1091 			mres = spi_setup(host->spi);
1092 			if (mres < 0)
1093 				dev_dbg(&host->spi->dev,
1094 					"switch to SPI mode 0 failed\n");
1095 
1096 			if (spi_w8r8(host->spi, 0x00) < 0)
1097 				dev_dbg(&host->spi->dev,
1098 					"put spi signals to low failed\n");
1099 
1100 			/*
1101 			 * Now clock should be low due to spi mode 0;
1102 			 * MOSI should be low because of written 0x00;
1103 			 * chipselect should be low (it is active low)
1104 			 * power supply is off, so now MMC is off too!
1105 			 *
1106 			 * FIXME no, chipselect can be high since the
1107 			 * device is inactive and SPI_CS_HIGH is clear...
1108 			 */
1109 			msleep(10);
1110 			if (mres == 0) {
1111 				host->spi->mode |= (SPI_CPOL|SPI_CPHA);
1112 				mres = spi_setup(host->spi);
1113 				if (mres < 0)
1114 					dev_dbg(&host->spi->dev,
1115 						"switch back to SPI mode 3"
1116 						" failed\n");
1117 			}
1118 		}
1119 
1120 		host->power_mode = ios->power_mode;
1121 	}
1122 
1123 	if (host->spi->max_speed_hz != ios->clock && ios->clock != 0) {
1124 		int		status;
1125 
1126 		host->spi->max_speed_hz = ios->clock;
1127 		status = spi_setup(host->spi);
1128 		dev_dbg(&host->spi->dev,
1129 			"mmc_spi:  clock to %d Hz, %d\n",
1130 			host->spi->max_speed_hz, status);
1131 	}
1132 }
1133 
1134 static int mmc_spi_get_ro(struct mmc_host *mmc)
1135 {
1136 	struct mmc_spi_host *host = mmc_priv(mmc);
1137 
1138 	if (host->pdata && host->pdata->get_ro)
1139 		return host->pdata->get_ro(mmc->parent);
1140 	/* board doesn't support read only detection; assume writeable */
1141 	return 0;
1142 }
1143 
1144 
1145 static const struct mmc_host_ops mmc_spi_ops = {
1146 	.request	= mmc_spi_request,
1147 	.set_ios	= mmc_spi_set_ios,
1148 	.get_ro		= mmc_spi_get_ro,
1149 };
1150 
1151 
1152 /****************************************************************************/
1153 
1154 /*
1155  * SPI driver implementation
1156  */
1157 
1158 static irqreturn_t
1159 mmc_spi_detect_irq(int irq, void *mmc)
1160 {
1161 	struct mmc_spi_host *host = mmc_priv(mmc);
1162 	u16 delay_msec = max(host->pdata->detect_delay, (u16)100);
1163 
1164 	mmc_detect_change(mmc, msecs_to_jiffies(delay_msec));
1165 	return IRQ_HANDLED;
1166 }
1167 
1168 static int mmc_spi_probe(struct spi_device *spi)
1169 {
1170 	void			*ones;
1171 	struct mmc_host		*mmc;
1172 	struct mmc_spi_host	*host;
1173 	int			status;
1174 
1175 	/* MMC and SD specs only seem to care that sampling is on the
1176 	 * rising edge ... meaning SPI modes 0 or 3.  So either SPI mode
1177 	 * should be legit.  We'll use mode 0 since it seems to be a
1178 	 * bit less troublesome on some hardware ... unclear why.
1179 	 */
1180 	spi->mode = SPI_MODE_0;
1181 	spi->bits_per_word = 8;
1182 
1183 	status = spi_setup(spi);
1184 	if (status < 0) {
1185 		dev_dbg(&spi->dev, "needs SPI mode %02x, %d KHz; %d\n",
1186 				spi->mode, spi->max_speed_hz / 1000,
1187 				status);
1188 		return status;
1189 	}
1190 
1191 	/* We can use the bus safely iff nobody else will interfere with
1192 	 * us.  That is, either we have the experimental exclusive access
1193 	 * primitives ... or else there's nobody to share it with.
1194 	 */
1195 	if (spi->master->num_chipselect > 1) {
1196 		struct device	*parent = spi->dev.parent;
1197 
1198 		/* If there are multiple devices on this bus, we
1199 		 * can't proceed.
1200 		 */
1201 		spin_lock(&parent->klist_children.k_lock);
1202 		if (parent->klist_children.k_list.next
1203 				!= parent->klist_children.k_list.prev)
1204 			status = -EMLINK;
1205 		else
1206 			status = 0;
1207 		spin_unlock(&parent->klist_children.k_lock);
1208 		if (status < 0) {
1209 			dev_err(&spi->dev, "can't share SPI bus\n");
1210 			return status;
1211 		}
1212 
1213 		/* REVISIT we can't guarantee another device won't
1214 		 * be added later.  It's uncommon though ... for now,
1215 		 * work as if this is safe.
1216 		 */
1217 		dev_warn(&spi->dev, "ASSUMING unshared SPI bus!\n");
1218 	}
1219 
1220 	/* We need a supply of ones to transmit.  This is the only time
1221 	 * the CPU touches these, so cache coherency isn't a concern.
1222 	 *
1223 	 * NOTE if many systems use more than one MMC-over-SPI connector
1224 	 * it'd save some memory to share this.  That's evidently rare.
1225 	 */
1226 	status = -ENOMEM;
1227 	ones = kmalloc(MMC_SPI_BLOCKSIZE, GFP_KERNEL);
1228 	if (!ones)
1229 		goto nomem;
1230 	memset(ones, 0xff, MMC_SPI_BLOCKSIZE);
1231 
1232 	mmc = mmc_alloc_host(sizeof(*host), &spi->dev);
1233 	if (!mmc)
1234 		goto nomem;
1235 
1236 	mmc->ops = &mmc_spi_ops;
1237 	mmc->max_blk_size = MMC_SPI_BLOCKSIZE;
1238 
1239 	/* As long as we keep track of the number of successfully
1240 	 * transmitted blocks, we're good for multiwrite.
1241 	 */
1242 	mmc->caps = MMC_CAP_SPI | MMC_CAP_MULTIWRITE;
1243 
1244 	/* SPI doesn't need the lowspeed device identification thing for
1245 	 * MMC or SD cards, since it never comes up in open drain mode.
1246 	 * That's good; some SPI masters can't handle very low speeds!
1247 	 *
1248 	 * However, low speed SDIO cards need not handle over 400 KHz;
1249 	 * that's the only reason not to use a few MHz for f_min (until
1250 	 * the upper layer reads the target frequency from the CSD).
1251 	 */
1252 	mmc->f_min = 400000;
1253 	mmc->f_max = spi->max_speed_hz;
1254 
1255 	host = mmc_priv(mmc);
1256 	host->mmc = mmc;
1257 	host->spi = spi;
1258 
1259 	host->ones = ones;
1260 
1261 	/* Platform data is used to hook up things like card sensing
1262 	 * and power switching gpios.
1263 	 */
1264 	host->pdata = spi->dev.platform_data;
1265 	if (host->pdata)
1266 		mmc->ocr_avail = host->pdata->ocr_mask;
1267 	if (!mmc->ocr_avail) {
1268 		dev_warn(&spi->dev, "ASSUMING 3.2-3.4 V slot power\n");
1269 		mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34;
1270 	}
1271 	if (host->pdata && host->pdata->setpower) {
1272 		host->powerup_msecs = host->pdata->powerup_msecs;
1273 		if (!host->powerup_msecs || host->powerup_msecs > 250)
1274 			host->powerup_msecs = 250;
1275 	}
1276 
1277 	dev_set_drvdata(&spi->dev, mmc);
1278 
1279 	/* preallocate dma buffers */
1280 	host->data = kmalloc(sizeof(*host->data), GFP_KERNEL);
1281 	if (!host->data)
1282 		goto fail_nobuf1;
1283 
1284 	if (spi->master->dev.parent->dma_mask) {
1285 		struct device	*dev = spi->master->dev.parent;
1286 
1287 		host->dma_dev = dev;
1288 		host->ones_dma = dma_map_single(dev, ones,
1289 				MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
1290 		host->data_dma = dma_map_single(dev, host->data,
1291 				sizeof(*host->data), DMA_BIDIRECTIONAL);
1292 
1293 		/* REVISIT in theory those map operations can fail... */
1294 
1295 		dma_sync_single_for_cpu(host->dma_dev,
1296 				host->data_dma, sizeof(*host->data),
1297 				DMA_BIDIRECTIONAL);
1298 	}
1299 
1300 	/* setup message for status/busy readback */
1301 	spi_message_init(&host->readback);
1302 	host->readback.is_dma_mapped = (host->dma_dev != NULL);
1303 
1304 	spi_message_add_tail(&host->status, &host->readback);
1305 	host->status.tx_buf = host->ones;
1306 	host->status.tx_dma = host->ones_dma;
1307 	host->status.rx_buf = &host->data->status;
1308 	host->status.rx_dma = host->data_dma + offsetof(struct scratch, status);
1309 	host->status.cs_change = 1;
1310 
1311 	/* register card detect irq */
1312 	if (host->pdata && host->pdata->init) {
1313 		status = host->pdata->init(&spi->dev, mmc_spi_detect_irq, mmc);
1314 		if (status != 0)
1315 			goto fail_glue_init;
1316 	}
1317 
1318 	status = mmc_add_host(mmc);
1319 	if (status != 0)
1320 		goto fail_add_host;
1321 
1322 	dev_info(&spi->dev, "SD/MMC host %s%s%s%s\n",
1323 			mmc->class_dev.bus_id,
1324 			host->dma_dev ? "" : ", no DMA",
1325 			(host->pdata && host->pdata->get_ro)
1326 				? "" : ", no WP",
1327 			(host->pdata && host->pdata->setpower)
1328 				? "" : ", no poweroff");
1329 	return 0;
1330 
1331 fail_add_host:
1332 	mmc_remove_host (mmc);
1333 fail_glue_init:
1334 	if (host->dma_dev)
1335 		dma_unmap_single(host->dma_dev, host->data_dma,
1336 				sizeof(*host->data), DMA_BIDIRECTIONAL);
1337 	kfree(host->data);
1338 
1339 fail_nobuf1:
1340 	mmc_free_host(mmc);
1341 	dev_set_drvdata(&spi->dev, NULL);
1342 
1343 nomem:
1344 	kfree(ones);
1345 	return status;
1346 }
1347 
1348 
1349 static int __devexit mmc_spi_remove(struct spi_device *spi)
1350 {
1351 	struct mmc_host		*mmc = dev_get_drvdata(&spi->dev);
1352 	struct mmc_spi_host	*host;
1353 
1354 	if (mmc) {
1355 		host = mmc_priv(mmc);
1356 
1357 		/* prevent new mmc_detect_change() calls */
1358 		if (host->pdata && host->pdata->exit)
1359 			host->pdata->exit(&spi->dev, mmc);
1360 
1361 		mmc_remove_host(mmc);
1362 
1363 		if (host->dma_dev) {
1364 			dma_unmap_single(host->dma_dev, host->ones_dma,
1365 				MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
1366 			dma_unmap_single(host->dma_dev, host->data_dma,
1367 				sizeof(*host->data), DMA_BIDIRECTIONAL);
1368 		}
1369 
1370 		kfree(host->data);
1371 		kfree(host->ones);
1372 
1373 		spi->max_speed_hz = mmc->f_max;
1374 		mmc_free_host(mmc);
1375 		dev_set_drvdata(&spi->dev, NULL);
1376 	}
1377 	return 0;
1378 }
1379 
1380 
1381 static struct spi_driver mmc_spi_driver = {
1382 	.driver = {
1383 		.name =		"mmc_spi",
1384 		.bus =		&spi_bus_type,
1385 		.owner =	THIS_MODULE,
1386 	},
1387 	.probe =	mmc_spi_probe,
1388 	.remove =	__devexit_p(mmc_spi_remove),
1389 };
1390 
1391 
1392 static int __init mmc_spi_init(void)
1393 {
1394 	return spi_register_driver(&mmc_spi_driver);
1395 }
1396 module_init(mmc_spi_init);
1397 
1398 
1399 static void __exit mmc_spi_exit(void)
1400 {
1401 	spi_unregister_driver(&mmc_spi_driver);
1402 }
1403 module_exit(mmc_spi_exit);
1404 
1405 
1406 MODULE_AUTHOR("Mike Lavender, David Brownell, "
1407 		"Hans-Peter Nilsson, Jan Nikitenko");
1408 MODULE_DESCRIPTION("SPI SD/MMC host driver");
1409 MODULE_LICENSE("GPL");
1410