1aad5f19eSAndy Shevchenko // SPDX-License-Identifier: GPL-2.0-or-later
215a0580cSDavid Brownell /*
3aad5f19eSAndy Shevchenko * Access SD/MMC cards through SPI master controllers
415a0580cSDavid Brownell *
515a0580cSDavid Brownell * (C) Copyright 2005, Intec Automation,
615a0580cSDavid Brownell * Mike Lavender (mike@steroidmicros)
715a0580cSDavid Brownell * (C) Copyright 2006-2007, David Brownell
815a0580cSDavid Brownell * (C) Copyright 2007, Axis Communications,
915a0580cSDavid Brownell * Hans-Peter Nilsson (hp@axis.com)
1015a0580cSDavid Brownell * (C) Copyright 2007, ATRON electronic GmbH,
1115a0580cSDavid Brownell * Jan Nikitenko <jan.nikitenko@gmail.com>
1215a0580cSDavid Brownell */
1356e303ebSWolfgang Muees #include <linux/sched.h>
1415a0580cSDavid Brownell #include <linux/delay.h>
155a0e3ad6STejun Heo #include <linux/slab.h>
1688b47679SPaul Gortmaker #include <linux/module.h>
1723fd5045SDavid Brownell #include <linux/bio.h>
18095cb607SAndy Shevchenko #include <linux/dma-direction.h>
1915a0580cSDavid Brownell #include <linux/crc7.h>
2015a0580cSDavid Brownell #include <linux/crc-itu-t.h>
21e5712a6aSAl Viro #include <linux/scatterlist.h>
2215a0580cSDavid Brownell
2315a0580cSDavid Brownell #include <linux/mmc/host.h>
2415a0580cSDavid Brownell #include <linux/mmc/mmc.h> /* for R1_SPI_* bit values */
25bf287a90SLaurent Pinchart #include <linux/mmc/slot-gpio.h>
2615a0580cSDavid Brownell
2715a0580cSDavid Brownell #include <linux/spi/spi.h>
2815a0580cSDavid Brownell #include <linux/spi/mmc_spi.h>
2915a0580cSDavid Brownell
3015a0580cSDavid Brownell #include <asm/unaligned.h>
3115a0580cSDavid Brownell
3215a0580cSDavid Brownell
3315a0580cSDavid Brownell /* NOTES:
3415a0580cSDavid Brownell *
3515a0580cSDavid Brownell * - For now, we won't try to interoperate with a real mmc/sd/sdio
3615a0580cSDavid Brownell * controller, although some of them do have hardware support for
3715a0580cSDavid Brownell * SPI protocol. The main reason for such configs would be mmc-ish
3815a0580cSDavid Brownell * cards like DataFlash, which don't support that "native" protocol.
3915a0580cSDavid Brownell *
4015a0580cSDavid Brownell * We don't have a "DataFlash/MMC/SD/SDIO card slot" abstraction to
4115a0580cSDavid Brownell * switch between driver stacks, and in any case if "native" mode
4215a0580cSDavid Brownell * is available, it will be faster and hence preferable.
4315a0580cSDavid Brownell *
4415a0580cSDavid Brownell * - MMC depends on a different chipselect management policy than the
4515a0580cSDavid Brownell * SPI interface currently supports for shared bus segments: it needs
4615a0580cSDavid Brownell * to issue multiple spi_message requests with the chipselect active,
4715a0580cSDavid Brownell * using the results of one message to decide the next one to issue.
4815a0580cSDavid Brownell *
4915a0580cSDavid Brownell * Pending updates to the programming interface, this driver expects
5015a0580cSDavid Brownell * that it not share the bus with other drivers (precluding conflicts).
5115a0580cSDavid Brownell *
5215a0580cSDavid Brownell * - We tell the controller to keep the chipselect active from the
5315a0580cSDavid Brownell * beginning of an mmc_host_ops.request until the end. So beware
5415a0580cSDavid Brownell * of SPI controller drivers that mis-handle the cs_change flag!
5515a0580cSDavid Brownell *
5615a0580cSDavid Brownell * However, many cards seem OK with chipselect flapping up/down
5715a0580cSDavid Brownell * during that time ... at least on unshared bus segments.
5815a0580cSDavid Brownell */
5915a0580cSDavid Brownell
6015a0580cSDavid Brownell
6115a0580cSDavid Brownell /*
6215a0580cSDavid Brownell * Local protocol constants, internal to data block protocols.
6315a0580cSDavid Brownell */
6415a0580cSDavid Brownell
6515a0580cSDavid Brownell /* Response tokens used to ack each block written: */
6615a0580cSDavid Brownell #define SPI_MMC_RESPONSE_CODE(x) ((x) & 0x1f)
6715a0580cSDavid Brownell #define SPI_RESPONSE_ACCEPTED ((2 << 1)|1)
6815a0580cSDavid Brownell #define SPI_RESPONSE_CRC_ERR ((5 << 1)|1)
6915a0580cSDavid Brownell #define SPI_RESPONSE_WRITE_ERR ((6 << 1)|1)
7015a0580cSDavid Brownell
7115a0580cSDavid Brownell /* Read and write blocks start with these tokens and end with crc;
7215a0580cSDavid Brownell * on error, read tokens act like a subset of R2_SPI_* values.
7315a0580cSDavid Brownell */
7415a0580cSDavid Brownell #define SPI_TOKEN_SINGLE 0xfe /* single block r/w, multiblock read */
7515a0580cSDavid Brownell #define SPI_TOKEN_MULTI_WRITE 0xfc /* multiblock write */
7615a0580cSDavid Brownell #define SPI_TOKEN_STOP_TRAN 0xfd /* terminate multiblock write */
7715a0580cSDavid Brownell
7815a0580cSDavid Brownell #define MMC_SPI_BLOCKSIZE 512
7915a0580cSDavid Brownell
8078a67b92SUlf Hansson #define MMC_SPI_R1B_TIMEOUT_MS 3000
8178a67b92SUlf Hansson #define MMC_SPI_INIT_TIMEOUT_MS 3000
8215a0580cSDavid Brownell
835cf20aa5SWolfgang Muees /* One of the critical speed parameters is the amount of data which may
8425985edcSLucas De Marchi * be transferred in one command. If this value is too low, the SD card
855cf20aa5SWolfgang Muees * controller has to do multiple partial block writes (argggh!). With
865cf20aa5SWolfgang Muees * today (2008) SD cards there is little speed gain if we transfer more
875cf20aa5SWolfgang Muees * than 64 KBytes at a time. So use this value until there is any indication
885cf20aa5SWolfgang Muees * that we should do more here.
895cf20aa5SWolfgang Muees */
905cf20aa5SWolfgang Muees #define MMC_SPI_BLOCKSATONCE 128
9115a0580cSDavid Brownell
9215a0580cSDavid Brownell /****************************************************************************/
9315a0580cSDavid Brownell
9415a0580cSDavid Brownell /*
9515a0580cSDavid Brownell * Local Data Structures
9615a0580cSDavid Brownell */
9715a0580cSDavid Brownell
9815a0580cSDavid Brownell /* "scratch" is per-{command,block} data exchanged with the card */
9915a0580cSDavid Brownell struct scratch {
10015a0580cSDavid Brownell u8 status[29];
10115a0580cSDavid Brownell u8 data_token;
10215a0580cSDavid Brownell __be16 crc_val;
10315a0580cSDavid Brownell };
10415a0580cSDavid Brownell
10515a0580cSDavid Brownell struct mmc_spi_host {
10615a0580cSDavid Brownell struct mmc_host *mmc;
10715a0580cSDavid Brownell struct spi_device *spi;
10815a0580cSDavid Brownell
10915a0580cSDavid Brownell unsigned char power_mode;
11015a0580cSDavid Brownell u16 powerup_msecs;
11115a0580cSDavid Brownell
11215a0580cSDavid Brownell struct mmc_spi_platform_data *pdata;
11315a0580cSDavid Brownell
11415a0580cSDavid Brownell /* for bulk data transfers */
11515a0580cSDavid Brownell struct spi_transfer token, t, crc, early_status;
11615a0580cSDavid Brownell struct spi_message m;
11715a0580cSDavid Brownell
11815a0580cSDavid Brownell /* for status readback */
11915a0580cSDavid Brownell struct spi_transfer status;
12015a0580cSDavid Brownell struct spi_message readback;
12115a0580cSDavid Brownell
12215a0580cSDavid Brownell /* buffer used for commands and for message "overhead" */
12315a0580cSDavid Brownell struct scratch *data;
12415a0580cSDavid Brownell
12515a0580cSDavid Brownell /* Specs say to write ones most of the time, even when the card
12615a0580cSDavid Brownell * has no need to read its input data; and many cards won't care.
12715a0580cSDavid Brownell * This is our source of those ones.
12815a0580cSDavid Brownell */
12915a0580cSDavid Brownell void *ones;
13015a0580cSDavid Brownell };
13115a0580cSDavid Brownell
13215a0580cSDavid Brownell
13315a0580cSDavid Brownell /****************************************************************************/
13415a0580cSDavid Brownell
13515a0580cSDavid Brownell /*
13615a0580cSDavid Brownell * MMC-over-SPI protocol glue, used by the MMC stack interface
13715a0580cSDavid Brownell */
13815a0580cSDavid Brownell
mmc_cs_off(struct mmc_spi_host * host)13915a0580cSDavid Brownell static inline int mmc_cs_off(struct mmc_spi_host *host)
14015a0580cSDavid Brownell {
14115a0580cSDavid Brownell /* chipselect will always be inactive after setup() */
14215a0580cSDavid Brownell return spi_setup(host->spi);
14315a0580cSDavid Brownell }
14415a0580cSDavid Brownell
mmc_spi_readbytes(struct mmc_spi_host * host,unsigned int len)145095cb607SAndy Shevchenko static int mmc_spi_readbytes(struct mmc_spi_host *host, unsigned int len)
14615a0580cSDavid Brownell {
14715a0580cSDavid Brownell if (len > sizeof(*host->data)) {
14815a0580cSDavid Brownell WARN_ON(1);
14915a0580cSDavid Brownell return -EIO;
15015a0580cSDavid Brownell }
15115a0580cSDavid Brownell
15215a0580cSDavid Brownell host->status.len = len;
15315a0580cSDavid Brownell
154095cb607SAndy Shevchenko return spi_sync_locked(host->spi, &host->readback);
15515a0580cSDavid Brownell }
15615a0580cSDavid Brownell
mmc_spi_skip(struct mmc_spi_host * host,unsigned long timeout,unsigned n,u8 byte)15756e303ebSWolfgang Muees static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout,
15856e303ebSWolfgang Muees unsigned n, u8 byte)
15915a0580cSDavid Brownell {
16015a0580cSDavid Brownell u8 *cp = host->data->status;
16156e303ebSWolfgang Muees unsigned long start = jiffies;
16215a0580cSDavid Brownell
16369606847SAndy Shevchenko do {
16415a0580cSDavid Brownell int status;
16515a0580cSDavid Brownell unsigned i;
16615a0580cSDavid Brownell
16715a0580cSDavid Brownell status = mmc_spi_readbytes(host, n);
16815a0580cSDavid Brownell if (status < 0)
16915a0580cSDavid Brownell return status;
17015a0580cSDavid Brownell
17115a0580cSDavid Brownell for (i = 0; i < n; i++) {
17215a0580cSDavid Brownell if (cp[i] != byte)
17315a0580cSDavid Brownell return cp[i];
17415a0580cSDavid Brownell }
17515a0580cSDavid Brownell
17669606847SAndy Shevchenko /* If we need long timeouts, we may release the CPU */
17769606847SAndy Shevchenko cond_resched();
17869606847SAndy Shevchenko } while (time_is_after_jiffies(start + timeout));
17915a0580cSDavid Brownell return -ETIMEDOUT;
18015a0580cSDavid Brownell }
18115a0580cSDavid Brownell
18215a0580cSDavid Brownell static inline int
mmc_spi_wait_unbusy(struct mmc_spi_host * host,unsigned long timeout)18356e303ebSWolfgang Muees mmc_spi_wait_unbusy(struct mmc_spi_host *host, unsigned long timeout)
18415a0580cSDavid Brownell {
18515a0580cSDavid Brownell return mmc_spi_skip(host, timeout, sizeof(host->data->status), 0);
18615a0580cSDavid Brownell }
18715a0580cSDavid Brownell
mmc_spi_readtoken(struct mmc_spi_host * host,unsigned long timeout)18856e303ebSWolfgang Muees static int mmc_spi_readtoken(struct mmc_spi_host *host, unsigned long timeout)
18915a0580cSDavid Brownell {
190162350ebSMatthew Fleming return mmc_spi_skip(host, timeout, 1, 0xff);
19115a0580cSDavid Brownell }
19215a0580cSDavid Brownell
19315a0580cSDavid Brownell
19415a0580cSDavid Brownell /*
19515a0580cSDavid Brownell * Note that for SPI, cmd->resp[0] is not the same data as "native" protocol
19615a0580cSDavid Brownell * hosts return! The low byte holds R1_SPI bits. The next byte may hold
19715a0580cSDavid Brownell * R2_SPI bits ... for SEND_STATUS, or after data read errors.
19815a0580cSDavid Brownell *
19915a0580cSDavid Brownell * cmd->resp[1] holds any four-byte response, for R3 (READ_OCR) and on
20015a0580cSDavid Brownell * newer cards R7 (IF_COND).
20115a0580cSDavid Brownell */
20215a0580cSDavid Brownell
maptype(struct mmc_command * cmd)20315a0580cSDavid Brownell static char *maptype(struct mmc_command *cmd)
20415a0580cSDavid Brownell {
20515a0580cSDavid Brownell switch (mmc_spi_resp_type(cmd)) {
20615a0580cSDavid Brownell case MMC_RSP_SPI_R1: return "R1";
20715a0580cSDavid Brownell case MMC_RSP_SPI_R1B: return "R1B";
20815a0580cSDavid Brownell case MMC_RSP_SPI_R2: return "R2/R5";
20915a0580cSDavid Brownell case MMC_RSP_SPI_R3: return "R3/R4/R7";
21015a0580cSDavid Brownell default: return "?";
21115a0580cSDavid Brownell }
21215a0580cSDavid Brownell }
21315a0580cSDavid Brownell
21415a0580cSDavid Brownell /* return zero, else negative errno after setting cmd->error */
mmc_spi_response_get(struct mmc_spi_host * host,struct mmc_command * cmd,int cs_on)21515a0580cSDavid Brownell static int mmc_spi_response_get(struct mmc_spi_host *host,
21615a0580cSDavid Brownell struct mmc_command *cmd, int cs_on)
21715a0580cSDavid Brownell {
2185671ad66SUlf Hansson unsigned long timeout_ms;
21915a0580cSDavid Brownell u8 *cp = host->data->status;
22015a0580cSDavid Brownell u8 *end = cp + host->t.len;
22115a0580cSDavid Brownell int value = 0;
222ab5a643cSWolfgang Muees int bitshift;
223ab5a643cSWolfgang Muees u8 leftover = 0;
224ab5a643cSWolfgang Muees unsigned short rotator;
225ab5a643cSWolfgang Muees int i;
22615a0580cSDavid Brownell
22715a0580cSDavid Brownell /* Except for data block reads, the whole response will already
22815a0580cSDavid Brownell * be stored in the scratch buffer. It's somewhere after the
22915a0580cSDavid Brownell * command and the first byte we read after it. We ignore that
23015a0580cSDavid Brownell * first byte. After STOP_TRANSMISSION command it may include
23115a0580cSDavid Brownell * two data bits, but otherwise it's all ones.
23215a0580cSDavid Brownell */
23315a0580cSDavid Brownell cp += 8;
23415a0580cSDavid Brownell while (cp < end && *cp == 0xff)
23515a0580cSDavid Brownell cp++;
23615a0580cSDavid Brownell
23715a0580cSDavid Brownell /* Data block reads (R1 response types) may need more data... */
23815a0580cSDavid Brownell if (cp == end) {
23915a0580cSDavid Brownell cp = host->data->status;
240ab5a643cSWolfgang Muees end = cp+1;
24115a0580cSDavid Brownell
24215a0580cSDavid Brownell /* Card sends N(CR) (== 1..8) bytes of all-ones then one
24315a0580cSDavid Brownell * status byte ... and we already scanned 2 bytes.
24415a0580cSDavid Brownell *
24515a0580cSDavid Brownell * REVISIT block read paths use nasty byte-at-a-time I/O
24615a0580cSDavid Brownell * so it can always DMA directly into the target buffer.
24715a0580cSDavid Brownell * It'd probably be better to memcpy() the first chunk and
24815a0580cSDavid Brownell * avoid extra i/o calls...
249ea15ba5cSWolfgang Muees *
250ea15ba5cSWolfgang Muees * Note we check for more than 8 bytes, because in practice,
251ea15ba5cSWolfgang Muees * some SD cards are slow...
25215a0580cSDavid Brownell */
253ea15ba5cSWolfgang Muees for (i = 2; i < 16; i++) {
25415a0580cSDavid Brownell value = mmc_spi_readbytes(host, 1);
25515a0580cSDavid Brownell if (value < 0)
25615a0580cSDavid Brownell goto done;
25715a0580cSDavid Brownell if (*cp != 0xff)
25815a0580cSDavid Brownell goto checkstatus;
25915a0580cSDavid Brownell }
26015a0580cSDavid Brownell value = -ETIMEDOUT;
26115a0580cSDavid Brownell goto done;
26215a0580cSDavid Brownell }
26315a0580cSDavid Brownell
26415a0580cSDavid Brownell checkstatus:
265ab5a643cSWolfgang Muees bitshift = 0;
26615a0580cSDavid Brownell if (*cp & 0x80) {
267ab5a643cSWolfgang Muees /* Houston, we have an ugly card with a bit-shifted response */
268ab5a643cSWolfgang Muees rotator = *cp++ << 8;
269ab5a643cSWolfgang Muees /* read the next byte */
270ab5a643cSWolfgang Muees if (cp == end) {
271ab5a643cSWolfgang Muees value = mmc_spi_readbytes(host, 1);
272ab5a643cSWolfgang Muees if (value < 0)
27315a0580cSDavid Brownell goto done;
274ab5a643cSWolfgang Muees cp = host->data->status;
275ab5a643cSWolfgang Muees end = cp+1;
27615a0580cSDavid Brownell }
277ab5a643cSWolfgang Muees rotator |= *cp++;
278ab5a643cSWolfgang Muees while (rotator & 0x8000) {
279ab5a643cSWolfgang Muees bitshift++;
280ab5a643cSWolfgang Muees rotator <<= 1;
281ab5a643cSWolfgang Muees }
282ab5a643cSWolfgang Muees cmd->resp[0] = rotator >> 8;
283ab5a643cSWolfgang Muees leftover = rotator;
284ab5a643cSWolfgang Muees } else {
28515a0580cSDavid Brownell cmd->resp[0] = *cp++;
286ab5a643cSWolfgang Muees }
28715a0580cSDavid Brownell cmd->error = 0;
28815a0580cSDavid Brownell
28915a0580cSDavid Brownell /* Status byte: the entire seven-bit R1 response. */
29015a0580cSDavid Brownell if (cmd->resp[0] != 0) {
291fdd858dbSWolfgang Muees if ((R1_SPI_PARAMETER | R1_SPI_ADDRESS)
29215a0580cSDavid Brownell & cmd->resp[0])
293fdd858dbSWolfgang Muees value = -EFAULT; /* Bad address */
294fdd858dbSWolfgang Muees else if (R1_SPI_ILLEGAL_COMMAND & cmd->resp[0])
295fdd858dbSWolfgang Muees value = -ENOSYS; /* Function not implemented */
29615a0580cSDavid Brownell else if (R1_SPI_COM_CRC & cmd->resp[0])
297fdd858dbSWolfgang Muees value = -EILSEQ; /* Illegal byte sequence */
29815a0580cSDavid Brownell else if ((R1_SPI_ERASE_SEQ | R1_SPI_ERASE_RESET)
29915a0580cSDavid Brownell & cmd->resp[0])
300fdd858dbSWolfgang Muees value = -EIO; /* I/O error */
30115a0580cSDavid Brownell /* else R1_SPI_IDLE, "it's resetting" */
30215a0580cSDavid Brownell }
30315a0580cSDavid Brownell
30415a0580cSDavid Brownell switch (mmc_spi_resp_type(cmd)) {
30515a0580cSDavid Brownell
30615a0580cSDavid Brownell /* SPI R1B == R1 + busy; STOP_TRANSMISSION (for multiblock reads)
30715a0580cSDavid Brownell * and less-common stuff like various erase operations.
30815a0580cSDavid Brownell */
30915a0580cSDavid Brownell case MMC_RSP_SPI_R1B:
31015a0580cSDavid Brownell /* maybe we read all the busy tokens already */
31115a0580cSDavid Brownell while (cp < end && *cp == 0)
31215a0580cSDavid Brownell cp++;
3135671ad66SUlf Hansson if (cp == end) {
3145671ad66SUlf Hansson timeout_ms = cmd->busy_timeout ? cmd->busy_timeout :
3155671ad66SUlf Hansson MMC_SPI_R1B_TIMEOUT_MS;
3165671ad66SUlf Hansson mmc_spi_wait_unbusy(host, msecs_to_jiffies(timeout_ms));
3175671ad66SUlf Hansson }
31815a0580cSDavid Brownell break;
31915a0580cSDavid Brownell
32015a0580cSDavid Brownell /* SPI R2 == R1 + second status byte; SEND_STATUS
32115a0580cSDavid Brownell * SPI R5 == R1 + data byte; IO_RW_DIRECT
32215a0580cSDavid Brownell */
32315a0580cSDavid Brownell case MMC_RSP_SPI_R2:
324ab5a643cSWolfgang Muees /* read the next byte */
325ab5a643cSWolfgang Muees if (cp == end) {
326ab5a643cSWolfgang Muees value = mmc_spi_readbytes(host, 1);
327ab5a643cSWolfgang Muees if (value < 0)
328ab5a643cSWolfgang Muees goto done;
329ab5a643cSWolfgang Muees cp = host->data->status;
330ab5a643cSWolfgang Muees end = cp+1;
331ab5a643cSWolfgang Muees }
332ab5a643cSWolfgang Muees if (bitshift) {
333ab5a643cSWolfgang Muees rotator = leftover << 8;
334ab5a643cSWolfgang Muees rotator |= *cp << bitshift;
335ab5a643cSWolfgang Muees cmd->resp[0] |= (rotator & 0xFF00);
336ab5a643cSWolfgang Muees } else {
33715a0580cSDavid Brownell cmd->resp[0] |= *cp << 8;
338ab5a643cSWolfgang Muees }
33915a0580cSDavid Brownell break;
34015a0580cSDavid Brownell
34115a0580cSDavid Brownell /* SPI R3, R4, or R7 == R1 + 4 bytes */
34215a0580cSDavid Brownell case MMC_RSP_SPI_R3:
343ab5a643cSWolfgang Muees rotator = leftover << 8;
344ab5a643cSWolfgang Muees cmd->resp[1] = 0;
345ab5a643cSWolfgang Muees for (i = 0; i < 4; i++) {
346ab5a643cSWolfgang Muees cmd->resp[1] <<= 8;
347ab5a643cSWolfgang Muees /* read the next byte */
348ab5a643cSWolfgang Muees if (cp == end) {
349ab5a643cSWolfgang Muees value = mmc_spi_readbytes(host, 1);
350ab5a643cSWolfgang Muees if (value < 0)
351ab5a643cSWolfgang Muees goto done;
352ab5a643cSWolfgang Muees cp = host->data->status;
353ab5a643cSWolfgang Muees end = cp+1;
354ab5a643cSWolfgang Muees }
355ab5a643cSWolfgang Muees if (bitshift) {
356ab5a643cSWolfgang Muees rotator |= *cp++ << bitshift;
357ab5a643cSWolfgang Muees cmd->resp[1] |= (rotator >> 8);
358ab5a643cSWolfgang Muees rotator <<= 8;
359ab5a643cSWolfgang Muees } else {
360ab5a643cSWolfgang Muees cmd->resp[1] |= *cp++;
361ab5a643cSWolfgang Muees }
362ab5a643cSWolfgang Muees }
36315a0580cSDavid Brownell break;
36415a0580cSDavid Brownell
36515a0580cSDavid Brownell /* SPI R1 == just one status byte */
36615a0580cSDavid Brownell case MMC_RSP_SPI_R1:
36715a0580cSDavid Brownell break;
36815a0580cSDavid Brownell
36915a0580cSDavid Brownell default:
37015a0580cSDavid Brownell dev_dbg(&host->spi->dev, "bad response type %04x\n",
37115a0580cSDavid Brownell mmc_spi_resp_type(cmd));
37215a0580cSDavid Brownell if (value >= 0)
37315a0580cSDavid Brownell value = -EINVAL;
37415a0580cSDavid Brownell goto done;
37515a0580cSDavid Brownell }
37615a0580cSDavid Brownell
37715a0580cSDavid Brownell if (value < 0)
378*91884250SBartosz Golaszewski dev_dbg(&host->spi->dev,
379*91884250SBartosz Golaszewski " ... CMD%d response SPI_%s: resp %04x %08x\n",
380*91884250SBartosz Golaszewski cmd->opcode, maptype(cmd), cmd->resp[0], cmd->resp[1]);
38115a0580cSDavid Brownell
38215a0580cSDavid Brownell /* disable chipselect on errors and some success cases */
38315a0580cSDavid Brownell if (value >= 0 && cs_on)
38415a0580cSDavid Brownell return value;
38515a0580cSDavid Brownell done:
38615a0580cSDavid Brownell if (value < 0)
38715a0580cSDavid Brownell cmd->error = value;
38815a0580cSDavid Brownell mmc_cs_off(host);
38915a0580cSDavid Brownell return value;
39015a0580cSDavid Brownell }
39115a0580cSDavid Brownell
39215a0580cSDavid Brownell /* Issue command and read its response.
39315a0580cSDavid Brownell * Returns zero on success, negative for error.
39415a0580cSDavid Brownell *
39515a0580cSDavid Brownell * On error, caller must cope with mmc core retry mechanism. That
39615a0580cSDavid Brownell * means immediate low-level resubmit, which affects the bus lock...
39715a0580cSDavid Brownell */
39815a0580cSDavid Brownell static int
mmc_spi_command_send(struct mmc_spi_host * host,struct mmc_request * mrq,struct mmc_command * cmd,int cs_on)39915a0580cSDavid Brownell mmc_spi_command_send(struct mmc_spi_host *host,
40015a0580cSDavid Brownell struct mmc_request *mrq,
40115a0580cSDavid Brownell struct mmc_command *cmd, int cs_on)
40215a0580cSDavid Brownell {
40315a0580cSDavid Brownell struct scratch *data = host->data;
40415a0580cSDavid Brownell u8 *cp = data->status;
40515a0580cSDavid Brownell int status;
40615a0580cSDavid Brownell struct spi_transfer *t;
40715a0580cSDavid Brownell
40815a0580cSDavid Brownell /* We can handle most commands (except block reads) in one full
40915a0580cSDavid Brownell * duplex I/O operation before either starting the next transfer
41015a0580cSDavid Brownell * (data block or command) or else deselecting the card.
41115a0580cSDavid Brownell *
41215a0580cSDavid Brownell * First, write 7 bytes:
41315a0580cSDavid Brownell * - an all-ones byte to ensure the card is ready
41415a0580cSDavid Brownell * - opcode byte (plus start and transmission bits)
41515a0580cSDavid Brownell * - four bytes of big-endian argument
41615a0580cSDavid Brownell * - crc7 (plus end bit) ... always computed, it's cheap
41715a0580cSDavid Brownell *
41815a0580cSDavid Brownell * We init the whole buffer to all-ones, which is what we need
41915a0580cSDavid Brownell * to write while we're reading (later) response data.
42015a0580cSDavid Brownell */
4219b60fa4aSGeorge Spelvin memset(cp, 0xff, sizeof(data->status));
42215a0580cSDavid Brownell
4239b60fa4aSGeorge Spelvin cp[1] = 0x40 | cmd->opcode;
4249b60fa4aSGeorge Spelvin put_unaligned_be32(cmd->arg, cp + 2);
4259b60fa4aSGeorge Spelvin cp[6] = crc7_be(0, cp + 1, 5) | 0x01;
4269b60fa4aSGeorge Spelvin cp += 7;
42715a0580cSDavid Brownell
42815a0580cSDavid Brownell /* Then, read up to 13 bytes (while writing all-ones):
42915a0580cSDavid Brownell * - N(CR) (== 1..8) bytes of all-ones
43015a0580cSDavid Brownell * - status byte (for all response types)
43115a0580cSDavid Brownell * - the rest of the response, either:
43215a0580cSDavid Brownell * + nothing, for R1 or R1B responses
43315a0580cSDavid Brownell * + second status byte, for R2 responses
43415a0580cSDavid Brownell * + four data bytes, for R3 and R7 responses
43515a0580cSDavid Brownell *
43615a0580cSDavid Brownell * Finally, read some more bytes ... in the nice cases we know in
43715a0580cSDavid Brownell * advance how many, and reading 1 more is always OK:
43815a0580cSDavid Brownell * - N(EC) (== 0..N) bytes of all-ones, before deselect/finish
43915a0580cSDavid Brownell * - N(RC) (== 1..N) bytes of all-ones, before next command
44015a0580cSDavid Brownell * - N(WR) (== 1..N) bytes of all-ones, before data write
44115a0580cSDavid Brownell *
44215a0580cSDavid Brownell * So in those cases one full duplex I/O of at most 21 bytes will
44315a0580cSDavid Brownell * handle the whole command, leaving the card ready to receive a
44415a0580cSDavid Brownell * data block or new command. We do that whenever we can, shaving
44515a0580cSDavid Brownell * CPU and IRQ costs (especially when using DMA or FIFOs).
44615a0580cSDavid Brownell *
44715a0580cSDavid Brownell * There are two other cases, where it's not generally practical
44815a0580cSDavid Brownell * to rely on a single I/O:
44915a0580cSDavid Brownell *
45015a0580cSDavid Brownell * - R1B responses need at least N(EC) bytes of all-zeroes.
45115a0580cSDavid Brownell *
45215a0580cSDavid Brownell * In this case we can *try* to fit it into one I/O, then
45315a0580cSDavid Brownell * maybe read more data later.
45415a0580cSDavid Brownell *
45515a0580cSDavid Brownell * - Data block reads are more troublesome, since a variable
45615a0580cSDavid Brownell * number of padding bytes precede the token and data.
45715a0580cSDavid Brownell * + N(CX) (== 0..8) bytes of all-ones, before CSD or CID
45815a0580cSDavid Brownell * + N(AC) (== 1..many) bytes of all-ones
45915a0580cSDavid Brownell *
46015a0580cSDavid Brownell * In this case we currently only have minimal speedups here:
46115a0580cSDavid Brownell * when N(CR) == 1 we can avoid I/O in response_get().
46215a0580cSDavid Brownell */
46315a0580cSDavid Brownell if (cs_on && (mrq->data->flags & MMC_DATA_READ)) {
46415a0580cSDavid Brownell cp += 2; /* min(N(CR)) + status */
46515a0580cSDavid Brownell /* R1 */
46615a0580cSDavid Brownell } else {
46715a0580cSDavid Brownell cp += 10; /* max(N(CR)) + status + min(N(RC),N(WR)) */
46815a0580cSDavid Brownell if (cmd->flags & MMC_RSP_SPI_S2) /* R2/R5 */
46915a0580cSDavid Brownell cp++;
47015a0580cSDavid Brownell else if (cmd->flags & MMC_RSP_SPI_B4) /* R3/R4/R7 */
47115a0580cSDavid Brownell cp += 4;
47215a0580cSDavid Brownell else if (cmd->flags & MMC_RSP_BUSY) /* R1B */
47315a0580cSDavid Brownell cp = data->status + sizeof(data->status);
47415a0580cSDavid Brownell /* else: R1 (most commands) */
47515a0580cSDavid Brownell }
47615a0580cSDavid Brownell
47791445d5eSAndy Shevchenko dev_dbg(&host->spi->dev, " CMD%d, resp %s\n",
47815a0580cSDavid Brownell cmd->opcode, maptype(cmd));
47915a0580cSDavid Brownell
48015a0580cSDavid Brownell /* send command, leaving chipselect active */
48115a0580cSDavid Brownell spi_message_init(&host->m);
48215a0580cSDavid Brownell
48315a0580cSDavid Brownell t = &host->t;
48415a0580cSDavid Brownell memset(t, 0, sizeof(*t));
48515a0580cSDavid Brownell t->tx_buf = t->rx_buf = data->status;
48615a0580cSDavid Brownell t->len = cp - data->status;
48715a0580cSDavid Brownell t->cs_change = 1;
48815a0580cSDavid Brownell spi_message_add_tail(t, &host->m);
48915a0580cSDavid Brownell
4904751c1c7SErnst Schwab status = spi_sync_locked(host->spi, &host->m);
49115a0580cSDavid Brownell if (status < 0) {
49215a0580cSDavid Brownell dev_dbg(&host->spi->dev, " ... write returned %d\n", status);
49315a0580cSDavid Brownell cmd->error = status;
49415a0580cSDavid Brownell return status;
49515a0580cSDavid Brownell }
49615a0580cSDavid Brownell
49715a0580cSDavid Brownell /* after no-data commands and STOP_TRANSMISSION, chipselect off */
49815a0580cSDavid Brownell return mmc_spi_response_get(host, cmd, cs_on);
49915a0580cSDavid Brownell }
50015a0580cSDavid Brownell
50115a0580cSDavid Brownell /* Build data message with up to four separate transfers. For TX, we
50215a0580cSDavid Brownell * start by writing the data token. And in most cases, we finish with
50315a0580cSDavid Brownell * a status transfer.
50415a0580cSDavid Brownell *
50515a0580cSDavid Brownell * We always provide TX data for data and CRC. The MMC/SD protocol
50615a0580cSDavid Brownell * requires us to write ones; but Linux defaults to writing zeroes;
50715a0580cSDavid Brownell * so we explicitly initialize it to all ones on RX paths.
50815a0580cSDavid Brownell */
50915a0580cSDavid Brownell static void
mmc_spi_setup_data_message(struct mmc_spi_host * host,bool multiple,enum dma_data_direction direction)51015a0580cSDavid Brownell mmc_spi_setup_data_message(
51115a0580cSDavid Brownell struct mmc_spi_host *host,
5121fdafaaeSAndy Shevchenko bool multiple,
51315a0580cSDavid Brownell enum dma_data_direction direction)
51415a0580cSDavid Brownell {
51515a0580cSDavid Brownell struct spi_transfer *t;
51615a0580cSDavid Brownell struct scratch *scratch = host->data;
51715a0580cSDavid Brownell
51815a0580cSDavid Brownell spi_message_init(&host->m);
51915a0580cSDavid Brownell
52015a0580cSDavid Brownell /* for reads, readblock() skips 0xff bytes before finding
52115a0580cSDavid Brownell * the token; for writes, this transfer issues that token.
52215a0580cSDavid Brownell */
52315a0580cSDavid Brownell if (direction == DMA_TO_DEVICE) {
52415a0580cSDavid Brownell t = &host->token;
52515a0580cSDavid Brownell memset(t, 0, sizeof(*t));
52615a0580cSDavid Brownell t->len = 1;
52715a0580cSDavid Brownell if (multiple)
52815a0580cSDavid Brownell scratch->data_token = SPI_TOKEN_MULTI_WRITE;
52915a0580cSDavid Brownell else
53015a0580cSDavid Brownell scratch->data_token = SPI_TOKEN_SINGLE;
53115a0580cSDavid Brownell t->tx_buf = &scratch->data_token;
53215a0580cSDavid Brownell spi_message_add_tail(t, &host->m);
53315a0580cSDavid Brownell }
53415a0580cSDavid Brownell
53515a0580cSDavid Brownell /* Body of transfer is buffer, then CRC ...
53615a0580cSDavid Brownell * either TX-only, or RX with TX-ones.
53715a0580cSDavid Brownell */
53815a0580cSDavid Brownell t = &host->t;
53915a0580cSDavid Brownell memset(t, 0, sizeof(*t));
54015a0580cSDavid Brownell t->tx_buf = host->ones;
54115a0580cSDavid Brownell /* length and actual buffer info are written later */
54215a0580cSDavid Brownell spi_message_add_tail(t, &host->m);
54315a0580cSDavid Brownell
54415a0580cSDavid Brownell t = &host->crc;
54515a0580cSDavid Brownell memset(t, 0, sizeof(*t));
54615a0580cSDavid Brownell t->len = 2;
54715a0580cSDavid Brownell if (direction == DMA_TO_DEVICE) {
54815a0580cSDavid Brownell /* the actual CRC may get written later */
54915a0580cSDavid Brownell t->tx_buf = &scratch->crc_val;
55015a0580cSDavid Brownell } else {
55115a0580cSDavid Brownell t->tx_buf = host->ones;
55215a0580cSDavid Brownell t->rx_buf = &scratch->crc_val;
55315a0580cSDavid Brownell }
55415a0580cSDavid Brownell spi_message_add_tail(t, &host->m);
55515a0580cSDavid Brownell
55615a0580cSDavid Brownell /*
55715a0580cSDavid Brownell * A single block read is followed by N(EC) [0+] all-ones bytes
55815a0580cSDavid Brownell * before deselect ... don't bother.
55915a0580cSDavid Brownell *
56015a0580cSDavid Brownell * Multiblock reads are followed by N(AC) [1+] all-ones bytes before
56115a0580cSDavid Brownell * the next block is read, or a STOP_TRANSMISSION is issued. We'll
56215a0580cSDavid Brownell * collect that single byte, so readblock() doesn't need to.
56315a0580cSDavid Brownell *
56415a0580cSDavid Brownell * For a write, the one-byte data response follows immediately, then
56515a0580cSDavid Brownell * come zero or more busy bytes, then N(WR) [1+] all-ones bytes.
56615a0580cSDavid Brownell * Then single block reads may deselect, and multiblock ones issue
56715a0580cSDavid Brownell * the next token (next data block, or STOP_TRAN). We can try to
56815a0580cSDavid Brownell * minimize I/O ops by using a single read to collect end-of-busy.
56915a0580cSDavid Brownell */
57015a0580cSDavid Brownell if (multiple || direction == DMA_TO_DEVICE) {
57115a0580cSDavid Brownell t = &host->early_status;
57215a0580cSDavid Brownell memset(t, 0, sizeof(*t));
5731ae51603SAndy Shevchenko t->len = (direction == DMA_TO_DEVICE) ? sizeof(scratch->status) : 1;
57415a0580cSDavid Brownell t->tx_buf = host->ones;
57515a0580cSDavid Brownell t->rx_buf = scratch->status;
57615a0580cSDavid Brownell t->cs_change = 1;
57715a0580cSDavid Brownell spi_message_add_tail(t, &host->m);
57815a0580cSDavid Brownell }
57915a0580cSDavid Brownell }
58015a0580cSDavid Brownell
58115a0580cSDavid Brownell /*
58215a0580cSDavid Brownell * Write one block:
58315a0580cSDavid Brownell * - caller handled preceding N(WR) [1+] all-ones bytes
58415a0580cSDavid Brownell * - data block
58515a0580cSDavid Brownell * + token
58615a0580cSDavid Brownell * + data bytes
58715a0580cSDavid Brownell * + crc16
58815a0580cSDavid Brownell * - an all-ones byte ... card writes a data-response byte
58915a0580cSDavid Brownell * - followed by N(EC) [0+] all-ones bytes, card writes zero/'busy'
59015a0580cSDavid Brownell *
59115a0580cSDavid Brownell * Return negative errno, else success.
59215a0580cSDavid Brownell */
59315a0580cSDavid Brownell static int
mmc_spi_writeblock(struct mmc_spi_host * host,struct spi_transfer * t,unsigned long timeout)594162350ebSMatthew Fleming mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t,
59556e303ebSWolfgang Muees unsigned long timeout)
59615a0580cSDavid Brownell {
59715a0580cSDavid Brownell struct spi_device *spi = host->spi;
59815a0580cSDavid Brownell int status, i;
59915a0580cSDavid Brownell struct scratch *scratch = host->data;
600f079a8fcSWolfgang Muees u32 pattern;
60115a0580cSDavid Brownell
60215a0580cSDavid Brownell if (host->mmc->use_spi_crc)
6031ae51603SAndy Shevchenko scratch->crc_val = cpu_to_be16(crc_itu_t(0, t->tx_buf, t->len));
60415a0580cSDavid Brownell
6054751c1c7SErnst Schwab status = spi_sync_locked(spi, &host->m);
60615a0580cSDavid Brownell if (status != 0) {
60715a0580cSDavid Brownell dev_dbg(&spi->dev, "write error (%d)\n", status);
60815a0580cSDavid Brownell return status;
60915a0580cSDavid Brownell }
61015a0580cSDavid Brownell
61115a0580cSDavid Brownell /*
61215a0580cSDavid Brownell * Get the transmission data-response reply. It must follow
61315a0580cSDavid Brownell * immediately after the data block we transferred. This reply
61415a0580cSDavid Brownell * doesn't necessarily tell whether the write operation succeeded;
61515a0580cSDavid Brownell * it just says if the transmission was ok and whether *earlier*
61615a0580cSDavid Brownell * writes succeeded; see the standard.
617f079a8fcSWolfgang Muees *
618f079a8fcSWolfgang Muees * In practice, there are (even modern SDHC-)cards which are late
619f079a8fcSWolfgang Muees * in sending the response, and miss the time frame by a few bits,
620f079a8fcSWolfgang Muees * so we have to cope with this situation and check the response
621f079a8fcSWolfgang Muees * bit-by-bit. Arggh!!!
62215a0580cSDavid Brownell */
6239b60fa4aSGeorge Spelvin pattern = get_unaligned_be32(scratch->status);
624f079a8fcSWolfgang Muees
625f079a8fcSWolfgang Muees /* First 3 bit of pattern are undefined */
626f079a8fcSWolfgang Muees pattern |= 0xE0000000;
627f079a8fcSWolfgang Muees
628f079a8fcSWolfgang Muees /* left-adjust to leading 0 bit */
629f079a8fcSWolfgang Muees while (pattern & 0x80000000)
630f079a8fcSWolfgang Muees pattern <<= 1;
631f079a8fcSWolfgang Muees /* right-adjust for pattern matching. Code is in bit 4..0 now. */
632f079a8fcSWolfgang Muees pattern >>= 27;
633f079a8fcSWolfgang Muees
634f079a8fcSWolfgang Muees switch (pattern) {
63515a0580cSDavid Brownell case SPI_RESPONSE_ACCEPTED:
63615a0580cSDavid Brownell status = 0;
63715a0580cSDavid Brownell break;
63815a0580cSDavid Brownell case SPI_RESPONSE_CRC_ERR:
63915a0580cSDavid Brownell /* host shall then issue MMC_STOP_TRANSMISSION */
64015a0580cSDavid Brownell status = -EILSEQ;
64115a0580cSDavid Brownell break;
64215a0580cSDavid Brownell case SPI_RESPONSE_WRITE_ERR:
64315a0580cSDavid Brownell /* host shall then issue MMC_STOP_TRANSMISSION,
64415a0580cSDavid Brownell * and should MMC_SEND_STATUS to sort it out
64515a0580cSDavid Brownell */
64615a0580cSDavid Brownell status = -EIO;
64715a0580cSDavid Brownell break;
64815a0580cSDavid Brownell default:
64915a0580cSDavid Brownell status = -EPROTO;
65015a0580cSDavid Brownell break;
65115a0580cSDavid Brownell }
65215a0580cSDavid Brownell if (status != 0) {
65315a0580cSDavid Brownell dev_dbg(&spi->dev, "write error %02x (%d)\n",
65415a0580cSDavid Brownell scratch->status[0], status);
65515a0580cSDavid Brownell return status;
65615a0580cSDavid Brownell }
65715a0580cSDavid Brownell
65815a0580cSDavid Brownell t->tx_buf += t->len;
65915a0580cSDavid Brownell
66015a0580cSDavid Brownell /* Return when not busy. If we didn't collect that status yet,
66115a0580cSDavid Brownell * we'll need some more I/O.
66215a0580cSDavid Brownell */
663f079a8fcSWolfgang Muees for (i = 4; i < sizeof(scratch->status); i++) {
664f079a8fcSWolfgang Muees /* card is non-busy if the most recent bit is 1 */
665f079a8fcSWolfgang Muees if (scratch->status[i] & 0x01)
66615a0580cSDavid Brownell return 0;
66715a0580cSDavid Brownell }
668162350ebSMatthew Fleming return mmc_spi_wait_unbusy(host, timeout);
66915a0580cSDavid Brownell }
67015a0580cSDavid Brownell
67115a0580cSDavid Brownell /*
67215a0580cSDavid Brownell * Read one block:
67315a0580cSDavid Brownell * - skip leading all-ones bytes ... either
67415a0580cSDavid Brownell * + N(AC) [1..f(clock,CSD)] usually, else
67515a0580cSDavid Brownell * + N(CX) [0..8] when reading CSD or CID
67615a0580cSDavid Brownell * - data block
67715a0580cSDavid Brownell * + token ... if error token, no data or crc
67815a0580cSDavid Brownell * + data bytes
67915a0580cSDavid Brownell * + crc16
68015a0580cSDavid Brownell *
68115a0580cSDavid Brownell * After single block reads, we're done; N(EC) [0+] all-ones bytes follow
68215a0580cSDavid Brownell * before dropping chipselect.
68315a0580cSDavid Brownell *
68415a0580cSDavid Brownell * For multiblock reads, caller either reads the next block or issues a
68515a0580cSDavid Brownell * STOP_TRANSMISSION command.
68615a0580cSDavid Brownell */
68715a0580cSDavid Brownell static int
mmc_spi_readblock(struct mmc_spi_host * host,struct spi_transfer * t,unsigned long timeout)688162350ebSMatthew Fleming mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
68956e303ebSWolfgang Muees unsigned long timeout)
69015a0580cSDavid Brownell {
69115a0580cSDavid Brownell struct spi_device *spi = host->spi;
69215a0580cSDavid Brownell int status;
69315a0580cSDavid Brownell struct scratch *scratch = host->data;
694ab5a643cSWolfgang Muees unsigned int bitshift;
695ab5a643cSWolfgang Muees u8 leftover;
69615a0580cSDavid Brownell
69715a0580cSDavid Brownell /* At least one SD card sends an all-zeroes byte when N(CX)
69815a0580cSDavid Brownell * applies, before the all-ones bytes ... just cope with that.
69915a0580cSDavid Brownell */
70015a0580cSDavid Brownell status = mmc_spi_readbytes(host, 1);
70115a0580cSDavid Brownell if (status < 0)
70215a0580cSDavid Brownell return status;
70315a0580cSDavid Brownell status = scratch->status[0];
70415a0580cSDavid Brownell if (status == 0xff || status == 0)
705162350ebSMatthew Fleming status = mmc_spi_readtoken(host, timeout);
70615a0580cSDavid Brownell
707ab5a643cSWolfgang Muees if (status < 0) {
708ab5a643cSWolfgang Muees dev_dbg(&spi->dev, "read error %02x (%d)\n", status, status);
709ab5a643cSWolfgang Muees return status;
710ab5a643cSWolfgang Muees }
711ab5a643cSWolfgang Muees
712ab5a643cSWolfgang Muees /* The token may be bit-shifted...
713ab5a643cSWolfgang Muees * the first 0-bit precedes the data stream.
714ab5a643cSWolfgang Muees */
715ab5a643cSWolfgang Muees bitshift = 7;
716ab5a643cSWolfgang Muees while (status & 0x80) {
717ab5a643cSWolfgang Muees status <<= 1;
718ab5a643cSWolfgang Muees bitshift--;
719ab5a643cSWolfgang Muees }
720ab5a643cSWolfgang Muees leftover = status << 1;
721ab5a643cSWolfgang Muees
7224751c1c7SErnst Schwab status = spi_sync_locked(spi, &host->m);
72361102598SKangjie Lu if (status < 0) {
72461102598SKangjie Lu dev_dbg(&spi->dev, "read error %d\n", status);
72561102598SKangjie Lu return status;
72661102598SKangjie Lu }
72715a0580cSDavid Brownell
728ab5a643cSWolfgang Muees if (bitshift) {
729ab5a643cSWolfgang Muees /* Walk through the data and the crc and do
730ab5a643cSWolfgang Muees * all the magic to get byte-aligned data.
73115a0580cSDavid Brownell */
732ab5a643cSWolfgang Muees u8 *cp = t->rx_buf;
733ab5a643cSWolfgang Muees unsigned int len;
734ab5a643cSWolfgang Muees unsigned int bitright = 8 - bitshift;
735ab5a643cSWolfgang Muees u8 temp;
736ab5a643cSWolfgang Muees for (len = t->len; len; len--) {
737ab5a643cSWolfgang Muees temp = *cp;
738ab5a643cSWolfgang Muees *cp++ = leftover | (temp >> bitshift);
739ab5a643cSWolfgang Muees leftover = temp << bitright;
740ab5a643cSWolfgang Muees }
741ab5a643cSWolfgang Muees cp = (u8 *) &scratch->crc_val;
742ab5a643cSWolfgang Muees temp = *cp;
743ab5a643cSWolfgang Muees *cp++ = leftover | (temp >> bitshift);
744ab5a643cSWolfgang Muees leftover = temp << bitright;
745ab5a643cSWolfgang Muees temp = *cp;
746ab5a643cSWolfgang Muees *cp = leftover | (temp >> bitshift);
74715a0580cSDavid Brownell }
74815a0580cSDavid Brownell
74915a0580cSDavid Brownell if (host->mmc->use_spi_crc) {
75015a0580cSDavid Brownell u16 crc = crc_itu_t(0, t->rx_buf, t->len);
75115a0580cSDavid Brownell
75215a0580cSDavid Brownell be16_to_cpus(&scratch->crc_val);
75315a0580cSDavid Brownell if (scratch->crc_val != crc) {
75438b21685SAndy Shevchenko dev_dbg(&spi->dev,
75538b21685SAndy Shevchenko "read - crc error: crc_val=0x%04x, computed=0x%04x len=%d\n",
75615a0580cSDavid Brownell scratch->crc_val, crc, t->len);
75715a0580cSDavid Brownell return -EILSEQ;
75815a0580cSDavid Brownell }
75915a0580cSDavid Brownell }
76015a0580cSDavid Brownell
76115a0580cSDavid Brownell t->rx_buf += t->len;
76215a0580cSDavid Brownell
76315a0580cSDavid Brownell return 0;
76415a0580cSDavid Brownell }
76515a0580cSDavid Brownell
76615a0580cSDavid Brownell /*
76715a0580cSDavid Brownell * An MMC/SD data stage includes one or more blocks, optional CRCs,
76815a0580cSDavid Brownell * and inline handshaking. That handhaking makes it unlike most
76915a0580cSDavid Brownell * other SPI protocol stacks.
77015a0580cSDavid Brownell */
77115a0580cSDavid Brownell static void
mmc_spi_data_do(struct mmc_spi_host * host,struct mmc_command * cmd,struct mmc_data * data,u32 blk_size)77215a0580cSDavid Brownell mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
77315a0580cSDavid Brownell struct mmc_data *data, u32 blk_size)
77415a0580cSDavid Brownell {
77515a0580cSDavid Brownell struct spi_device *spi = host->spi;
77615a0580cSDavid Brownell struct spi_transfer *t;
777c5dbed92SAndy Shevchenko enum dma_data_direction direction = mmc_get_dma_dir(data);
77815a0580cSDavid Brownell struct scatterlist *sg;
77915a0580cSDavid Brownell unsigned n_sg;
7801fdafaaeSAndy Shevchenko bool multiple = (data->blocks > 1);
781c5dbed92SAndy Shevchenko const char *write_or_read = (direction == DMA_TO_DEVICE) ? "write" : "read";
782162350ebSMatthew Fleming u32 clock_rate;
78356e303ebSWolfgang Muees unsigned long timeout;
78415a0580cSDavid Brownell
78515a0580cSDavid Brownell mmc_spi_setup_data_message(host, multiple, direction);
78615a0580cSDavid Brownell t = &host->t;
78715a0580cSDavid Brownell
788162350ebSMatthew Fleming if (t->speed_hz)
789162350ebSMatthew Fleming clock_rate = t->speed_hz;
790162350ebSMatthew Fleming else
791162350ebSMatthew Fleming clock_rate = spi->max_speed_hz;
792162350ebSMatthew Fleming
79381e41be9STobias Schramm timeout = data->timeout_ns / 1000 +
79456e303ebSWolfgang Muees data->timeout_clks * 1000000 / clock_rate;
79581e41be9STobias Schramm timeout = usecs_to_jiffies((unsigned int)timeout) + 1;
796162350ebSMatthew Fleming
79715a0580cSDavid Brownell /* Handle scatterlist segments one at a time, with synch for
79815a0580cSDavid Brownell * each 512-byte block
79915a0580cSDavid Brownell */
800b8deb11dSAndy Shevchenko for_each_sg(data->sg, sg, data->sg_len, n_sg) {
80115a0580cSDavid Brownell int status = 0;
80215a0580cSDavid Brownell void *kmap_addr;
80315a0580cSDavid Brownell unsigned length = sg->length;
80415a0580cSDavid Brownell
80515a0580cSDavid Brownell /* allow pio too; we don't allow highmem */
80645711f1aSJens Axboe kmap_addr = kmap(sg_page(sg));
80715a0580cSDavid Brownell if (direction == DMA_TO_DEVICE)
80815a0580cSDavid Brownell t->tx_buf = kmap_addr + sg->offset;
80915a0580cSDavid Brownell else
81015a0580cSDavid Brownell t->rx_buf = kmap_addr + sg->offset;
81115a0580cSDavid Brownell
81215a0580cSDavid Brownell /* transfer each block, and update request status */
81315a0580cSDavid Brownell while (length) {
81415a0580cSDavid Brownell t->len = min(length, blk_size);
81515a0580cSDavid Brownell
816c5dbed92SAndy Shevchenko dev_dbg(&spi->dev, " %s block, %d bytes\n", write_or_read, t->len);
81715a0580cSDavid Brownell
81815a0580cSDavid Brownell if (direction == DMA_TO_DEVICE)
819162350ebSMatthew Fleming status = mmc_spi_writeblock(host, t, timeout);
82015a0580cSDavid Brownell else
821162350ebSMatthew Fleming status = mmc_spi_readblock(host, t, timeout);
82215a0580cSDavid Brownell if (status < 0)
82315a0580cSDavid Brownell break;
82415a0580cSDavid Brownell
82515a0580cSDavid Brownell data->bytes_xfered += t->len;
82615a0580cSDavid Brownell length -= t->len;
82715a0580cSDavid Brownell
82815a0580cSDavid Brownell if (!multiple)
82915a0580cSDavid Brownell break;
83015a0580cSDavid Brownell }
83115a0580cSDavid Brownell
83215a0580cSDavid Brownell /* discard mappings */
83315a0580cSDavid Brownell if (direction == DMA_FROM_DEVICE)
83464a05fe6SChristoph Hellwig flush_dcache_page(sg_page(sg));
83545711f1aSJens Axboe kunmap(sg_page(sg));
83615a0580cSDavid Brownell
83715a0580cSDavid Brownell if (status < 0) {
83815a0580cSDavid Brownell data->error = status;
839c5dbed92SAndy Shevchenko dev_dbg(&spi->dev, "%s status %d\n", write_or_read, status);
84015a0580cSDavid Brownell break;
84115a0580cSDavid Brownell }
84215a0580cSDavid Brownell }
84315a0580cSDavid Brownell
84415a0580cSDavid Brownell /* NOTE some docs describe an MMC-only SET_BLOCK_COUNT (CMD23) that
84515a0580cSDavid Brownell * can be issued before multiblock writes. Unlike its more widely
84615a0580cSDavid Brownell * documented analogue for SD cards (SET_WR_BLK_ERASE_COUNT, ACMD23),
84715a0580cSDavid Brownell * that can affect the STOP_TRAN logic. Complete (and current)
84815a0580cSDavid Brownell * MMC specs should sort that out before Linux starts using CMD23.
84915a0580cSDavid Brownell */
85015a0580cSDavid Brownell if (direction == DMA_TO_DEVICE && multiple) {
85115a0580cSDavid Brownell struct scratch *scratch = host->data;
85215a0580cSDavid Brownell int tmp;
85315a0580cSDavid Brownell const unsigned statlen = sizeof(scratch->status);
85415a0580cSDavid Brownell
85591445d5eSAndy Shevchenko dev_dbg(&spi->dev, " STOP_TRAN\n");
85615a0580cSDavid Brownell
85715a0580cSDavid Brownell /* Tweak the per-block message we set up earlier by morphing
85815a0580cSDavid Brownell * it to hold single buffer with the token followed by some
85915a0580cSDavid Brownell * all-ones bytes ... skip N(BR) (0..1), scan the rest for
86015a0580cSDavid Brownell * "not busy any longer" status, and leave chip selected.
86115a0580cSDavid Brownell */
86215a0580cSDavid Brownell INIT_LIST_HEAD(&host->m.transfers);
86315a0580cSDavid Brownell list_add(&host->early_status.transfer_list,
86415a0580cSDavid Brownell &host->m.transfers);
86515a0580cSDavid Brownell
86615a0580cSDavid Brownell memset(scratch->status, 0xff, statlen);
86715a0580cSDavid Brownell scratch->status[0] = SPI_TOKEN_STOP_TRAN;
86815a0580cSDavid Brownell
86915a0580cSDavid Brownell host->early_status.tx_buf = host->early_status.rx_buf;
87015a0580cSDavid Brownell host->early_status.len = statlen;
87115a0580cSDavid Brownell
8724751c1c7SErnst Schwab tmp = spi_sync_locked(spi, &host->m);
87315a0580cSDavid Brownell if (tmp < 0) {
87415a0580cSDavid Brownell if (!data->error)
87515a0580cSDavid Brownell data->error = tmp;
87615a0580cSDavid Brownell return;
87715a0580cSDavid Brownell }
87815a0580cSDavid Brownell
87915a0580cSDavid Brownell /* Ideally we collected "not busy" status with one I/O,
88015a0580cSDavid Brownell * avoiding wasteful byte-at-a-time scanning... but more
88115a0580cSDavid Brownell * I/O is often needed.
88215a0580cSDavid Brownell */
88315a0580cSDavid Brownell for (tmp = 2; tmp < statlen; tmp++) {
88415a0580cSDavid Brownell if (scratch->status[tmp] != 0)
88515a0580cSDavid Brownell return;
88615a0580cSDavid Brownell }
887162350ebSMatthew Fleming tmp = mmc_spi_wait_unbusy(host, timeout);
88815a0580cSDavid Brownell if (tmp < 0 && !data->error)
88915a0580cSDavid Brownell data->error = tmp;
89015a0580cSDavid Brownell }
89115a0580cSDavid Brownell }
89215a0580cSDavid Brownell
89315a0580cSDavid Brownell /****************************************************************************/
89415a0580cSDavid Brownell
89515a0580cSDavid Brownell /*
89615a0580cSDavid Brownell * MMC driver implementation -- the interface to the MMC stack
89715a0580cSDavid Brownell */
89815a0580cSDavid Brownell
mmc_spi_request(struct mmc_host * mmc,struct mmc_request * mrq)89915a0580cSDavid Brownell static void mmc_spi_request(struct mmc_host *mmc, struct mmc_request *mrq)
90015a0580cSDavid Brownell {
90115a0580cSDavid Brownell struct mmc_spi_host *host = mmc_priv(mmc);
90215a0580cSDavid Brownell int status = -EINVAL;
903061c6c84SSonic Zhang int crc_retry = 5;
904061c6c84SSonic Zhang struct mmc_command stop;
90515a0580cSDavid Brownell
90615a0580cSDavid Brownell #ifdef DEBUG
90715a0580cSDavid Brownell /* MMC core and layered drivers *MUST* issue SPI-aware commands */
90815a0580cSDavid Brownell {
90915a0580cSDavid Brownell struct mmc_command *cmd;
91015a0580cSDavid Brownell int invalid = 0;
91115a0580cSDavid Brownell
91215a0580cSDavid Brownell cmd = mrq->cmd;
91315a0580cSDavid Brownell if (!mmc_spi_resp_type(cmd)) {
91415a0580cSDavid Brownell dev_dbg(&host->spi->dev, "bogus command\n");
91515a0580cSDavid Brownell cmd->error = -EINVAL;
91615a0580cSDavid Brownell invalid = 1;
91715a0580cSDavid Brownell }
91815a0580cSDavid Brownell
91915a0580cSDavid Brownell cmd = mrq->stop;
92015a0580cSDavid Brownell if (cmd && !mmc_spi_resp_type(cmd)) {
92115a0580cSDavid Brownell dev_dbg(&host->spi->dev, "bogus STOP command\n");
92215a0580cSDavid Brownell cmd->error = -EINVAL;
92315a0580cSDavid Brownell invalid = 1;
92415a0580cSDavid Brownell }
92515a0580cSDavid Brownell
92615a0580cSDavid Brownell if (invalid) {
92715a0580cSDavid Brownell dump_stack();
92815a0580cSDavid Brownell mmc_request_done(host->mmc, mrq);
92915a0580cSDavid Brownell return;
93015a0580cSDavid Brownell }
93115a0580cSDavid Brownell }
93215a0580cSDavid Brownell #endif
93315a0580cSDavid Brownell
9344751c1c7SErnst Schwab /* request exclusive bus access */
9354751c1c7SErnst Schwab spi_bus_lock(host->spi->master);
9364751c1c7SErnst Schwab
937061c6c84SSonic Zhang crc_recover:
93815a0580cSDavid Brownell /* issue command; then optionally data and stop */
93915a0580cSDavid Brownell status = mmc_spi_command_send(host, mrq, mrq->cmd, mrq->data != NULL);
94015a0580cSDavid Brownell if (status == 0 && mrq->data) {
94115a0580cSDavid Brownell mmc_spi_data_do(host, mrq->cmd, mrq->data, mrq->data->blksz);
942061c6c84SSonic Zhang
943061c6c84SSonic Zhang /*
944061c6c84SSonic Zhang * The SPI bus is not always reliable for large data transfers.
945061c6c84SSonic Zhang * If an occasional crc error is reported by the SD device with
946061c6c84SSonic Zhang * data read/write over SPI, it may be recovered by repeating
947061c6c84SSonic Zhang * the last SD command again. The retry count is set to 5 to
948061c6c84SSonic Zhang * ensure the driver passes stress tests.
949061c6c84SSonic Zhang */
950061c6c84SSonic Zhang if (mrq->data->error == -EILSEQ && crc_retry) {
951061c6c84SSonic Zhang stop.opcode = MMC_STOP_TRANSMISSION;
952061c6c84SSonic Zhang stop.arg = 0;
953061c6c84SSonic Zhang stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
954061c6c84SSonic Zhang status = mmc_spi_command_send(host, mrq, &stop, 0);
955061c6c84SSonic Zhang crc_retry--;
956061c6c84SSonic Zhang mrq->data->error = 0;
957061c6c84SSonic Zhang goto crc_recover;
958061c6c84SSonic Zhang }
959061c6c84SSonic Zhang
96015a0580cSDavid Brownell if (mrq->stop)
96115a0580cSDavid Brownell status = mmc_spi_command_send(host, mrq, mrq->stop, 0);
96215a0580cSDavid Brownell else
96315a0580cSDavid Brownell mmc_cs_off(host);
96415a0580cSDavid Brownell }
96515a0580cSDavid Brownell
9664751c1c7SErnst Schwab /* release the bus */
9674751c1c7SErnst Schwab spi_bus_unlock(host->spi->master);
9684751c1c7SErnst Schwab
96915a0580cSDavid Brownell mmc_request_done(host->mmc, mrq);
97015a0580cSDavid Brownell }
97115a0580cSDavid Brownell
97215a0580cSDavid Brownell /* See Section 6.4.1, in SD "Simplified Physical Layer Specification 2.0"
97315a0580cSDavid Brownell *
97415a0580cSDavid Brownell * NOTE that here we can't know that the card has just been powered up;
97515a0580cSDavid Brownell * not all MMC/SD sockets support power switching.
97615a0580cSDavid Brownell *
97715a0580cSDavid Brownell * FIXME when the card is still in SPI mode, e.g. from a previous kernel,
97815a0580cSDavid Brownell * this doesn't seem to do the right thing at all...
97915a0580cSDavid Brownell */
mmc_spi_initsequence(struct mmc_spi_host * host)98015a0580cSDavid Brownell static void mmc_spi_initsequence(struct mmc_spi_host *host)
98115a0580cSDavid Brownell {
98215a0580cSDavid Brownell /* Try to be very sure any previous command has completed;
98315a0580cSDavid Brownell * wait till not-busy, skip debris from any old commands.
98415a0580cSDavid Brownell */
98578a67b92SUlf Hansson mmc_spi_wait_unbusy(host, msecs_to_jiffies(MMC_SPI_INIT_TIMEOUT_MS));
98615a0580cSDavid Brownell mmc_spi_readbytes(host, 10);
98715a0580cSDavid Brownell
98815a0580cSDavid Brownell /*
98915a0580cSDavid Brownell * Do a burst with chipselect active-high. We need to do this to
99015a0580cSDavid Brownell * meet the requirement of 74 clock cycles with both chipselect
99115a0580cSDavid Brownell * and CMD (MOSI) high before CMD0 ... after the card has been
99215a0580cSDavid Brownell * powered up to Vdd(min), and so is ready to take commands.
99315a0580cSDavid Brownell *
99415a0580cSDavid Brownell * Some cards are particularly needy of this (e.g. Viking "SD256")
99515a0580cSDavid Brownell * while most others don't seem to care.
99615a0580cSDavid Brownell *
99715a0580cSDavid Brownell * Note that this is one of the places MMC/SD plays games with the
99815a0580cSDavid Brownell * SPI protocol. Another is that when chipselect is released while
99915a0580cSDavid Brownell * the card returns BUSY status, the clock must issue several cycles
100015a0580cSDavid Brownell * with chipselect high before the card will stop driving its output.
1001af3ed119SLinus Walleij *
1002af3ed119SLinus Walleij * SPI_CS_HIGH means "asserted" here. In some cases like when using
1003af3ed119SLinus Walleij * GPIOs for chip select, SPI_CS_HIGH is set but this will be logically
1004af3ed119SLinus Walleij * inverted by gpiolib, so if we want to ascertain to drive it high
1005af3ed119SLinus Walleij * we should toggle the default with an XOR as we do here.
100615a0580cSDavid Brownell */
1007af3ed119SLinus Walleij host->spi->mode ^= SPI_CS_HIGH;
100815a0580cSDavid Brownell if (spi_setup(host->spi) != 0) {
100915a0580cSDavid Brownell /* Just warn; most cards work without it. */
101015a0580cSDavid Brownell dev_warn(&host->spi->dev,
101115a0580cSDavid Brownell "can't change chip-select polarity\n");
1012af3ed119SLinus Walleij host->spi->mode ^= SPI_CS_HIGH;
101315a0580cSDavid Brownell } else {
101415a0580cSDavid Brownell mmc_spi_readbytes(host, 18);
101515a0580cSDavid Brownell
1016af3ed119SLinus Walleij host->spi->mode ^= SPI_CS_HIGH;
101715a0580cSDavid Brownell if (spi_setup(host->spi) != 0) {
101815a0580cSDavid Brownell /* Wot, we can't get the same setup we had before? */
101915a0580cSDavid Brownell dev_err(&host->spi->dev,
102015a0580cSDavid Brownell "can't restore chip-select polarity\n");
102115a0580cSDavid Brownell }
102215a0580cSDavid Brownell }
102315a0580cSDavid Brownell }
102415a0580cSDavid Brownell
mmc_powerstring(u8 power_mode)102515a0580cSDavid Brownell static char *mmc_powerstring(u8 power_mode)
102615a0580cSDavid Brownell {
102715a0580cSDavid Brownell switch (power_mode) {
102815a0580cSDavid Brownell case MMC_POWER_OFF: return "off";
102915a0580cSDavid Brownell case MMC_POWER_UP: return "up";
103015a0580cSDavid Brownell case MMC_POWER_ON: return "on";
103115a0580cSDavid Brownell }
103215a0580cSDavid Brownell return "?";
103315a0580cSDavid Brownell }
103415a0580cSDavid Brownell
mmc_spi_set_ios(struct mmc_host * mmc,struct mmc_ios * ios)103515a0580cSDavid Brownell static void mmc_spi_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
103615a0580cSDavid Brownell {
103715a0580cSDavid Brownell struct mmc_spi_host *host = mmc_priv(mmc);
103815a0580cSDavid Brownell
103915a0580cSDavid Brownell if (host->power_mode != ios->power_mode) {
104015a0580cSDavid Brownell int canpower;
104115a0580cSDavid Brownell
104215a0580cSDavid Brownell canpower = host->pdata && host->pdata->setpower;
104315a0580cSDavid Brownell
104491445d5eSAndy Shevchenko dev_dbg(&host->spi->dev, "power %s (%d)%s\n",
104515a0580cSDavid Brownell mmc_powerstring(ios->power_mode),
104615a0580cSDavid Brownell ios->vdd,
104715a0580cSDavid Brownell canpower ? ", can switch" : "");
104815a0580cSDavid Brownell
104915a0580cSDavid Brownell /* switch power on/off if possible, accounting for
105015a0580cSDavid Brownell * max 250msec powerup time if needed.
105115a0580cSDavid Brownell */
105215a0580cSDavid Brownell if (canpower) {
105315a0580cSDavid Brownell switch (ios->power_mode) {
105415a0580cSDavid Brownell case MMC_POWER_OFF:
105515a0580cSDavid Brownell case MMC_POWER_UP:
105615a0580cSDavid Brownell host->pdata->setpower(&host->spi->dev,
105715a0580cSDavid Brownell ios->vdd);
105815a0580cSDavid Brownell if (ios->power_mode == MMC_POWER_UP)
105915a0580cSDavid Brownell msleep(host->powerup_msecs);
106015a0580cSDavid Brownell }
106115a0580cSDavid Brownell }
106215a0580cSDavid Brownell
106315a0580cSDavid Brownell /* See 6.4.1 in the simplified SD card physical spec 2.0 */
106415a0580cSDavid Brownell if (ios->power_mode == MMC_POWER_ON)
106515a0580cSDavid Brownell mmc_spi_initsequence(host);
106615a0580cSDavid Brownell
106715a0580cSDavid Brownell /* If powering down, ground all card inputs to avoid power
106815a0580cSDavid Brownell * delivery from data lines! On a shared SPI bus, this
106915a0580cSDavid Brownell * will probably be temporary; 6.4.2 of the simplified SD
107015a0580cSDavid Brownell * spec says this must last at least 1msec.
107115a0580cSDavid Brownell *
107215a0580cSDavid Brownell * - Clock low means CPOL 0, e.g. mode 0
107315a0580cSDavid Brownell * - MOSI low comes from writing zero
107415a0580cSDavid Brownell * - Chipselect is usually active low...
107515a0580cSDavid Brownell */
107615a0580cSDavid Brownell if (canpower && ios->power_mode == MMC_POWER_OFF) {
107715a0580cSDavid Brownell int mres;
10781685a03eSJan Nikitenko u8 nullbyte = 0;
107915a0580cSDavid Brownell
108015a0580cSDavid Brownell host->spi->mode &= ~(SPI_CPOL|SPI_CPHA);
108115a0580cSDavid Brownell mres = spi_setup(host->spi);
108215a0580cSDavid Brownell if (mres < 0)
108315a0580cSDavid Brownell dev_dbg(&host->spi->dev,
108415a0580cSDavid Brownell "switch to SPI mode 0 failed\n");
108515a0580cSDavid Brownell
10861685a03eSJan Nikitenko if (spi_write(host->spi, &nullbyte, 1) < 0)
108715a0580cSDavid Brownell dev_dbg(&host->spi->dev,
108815a0580cSDavid Brownell "put spi signals to low failed\n");
108915a0580cSDavid Brownell
109015a0580cSDavid Brownell /*
109115a0580cSDavid Brownell * Now clock should be low due to spi mode 0;
109215a0580cSDavid Brownell * MOSI should be low because of written 0x00;
109315a0580cSDavid Brownell * chipselect should be low (it is active low)
109415a0580cSDavid Brownell * power supply is off, so now MMC is off too!
109515a0580cSDavid Brownell *
109615a0580cSDavid Brownell * FIXME no, chipselect can be high since the
109715a0580cSDavid Brownell * device is inactive and SPI_CS_HIGH is clear...
109815a0580cSDavid Brownell */
109915a0580cSDavid Brownell msleep(10);
110015a0580cSDavid Brownell if (mres == 0) {
110115a0580cSDavid Brownell host->spi->mode |= (SPI_CPOL|SPI_CPHA);
110215a0580cSDavid Brownell mres = spi_setup(host->spi);
110315a0580cSDavid Brownell if (mres < 0)
110415a0580cSDavid Brownell dev_dbg(&host->spi->dev,
110538b21685SAndy Shevchenko "switch back to SPI mode 3 failed\n");
110615a0580cSDavid Brownell }
110715a0580cSDavid Brownell }
110815a0580cSDavid Brownell
110915a0580cSDavid Brownell host->power_mode = ios->power_mode;
111015a0580cSDavid Brownell }
111115a0580cSDavid Brownell
111215a0580cSDavid Brownell if (host->spi->max_speed_hz != ios->clock && ios->clock != 0) {
111315a0580cSDavid Brownell int status;
111415a0580cSDavid Brownell
111515a0580cSDavid Brownell host->spi->max_speed_hz = ios->clock;
111615a0580cSDavid Brownell status = spi_setup(host->spi);
111791445d5eSAndy Shevchenko dev_dbg(&host->spi->dev, " clock to %d Hz, %d\n",
111815a0580cSDavid Brownell host->spi->max_speed_hz, status);
111915a0580cSDavid Brownell }
112015a0580cSDavid Brownell }
112115a0580cSDavid Brownell
112215a0580cSDavid Brownell static const struct mmc_host_ops mmc_spi_ops = {
112315a0580cSDavid Brownell .request = mmc_spi_request,
112415a0580cSDavid Brownell .set_ios = mmc_spi_set_ios,
112562b6af5cSLaurent Pinchart .get_ro = mmc_gpio_get_ro,
112662b6af5cSLaurent Pinchart .get_cd = mmc_gpio_get_cd,
112715a0580cSDavid Brownell };
112815a0580cSDavid Brownell
112915a0580cSDavid Brownell
113015a0580cSDavid Brownell /****************************************************************************/
113115a0580cSDavid Brownell
113215a0580cSDavid Brownell /*
113315a0580cSDavid Brownell * SPI driver implementation
113415a0580cSDavid Brownell */
113515a0580cSDavid Brownell
113615a0580cSDavid Brownell static irqreturn_t
mmc_spi_detect_irq(int irq,void * mmc)113715a0580cSDavid Brownell mmc_spi_detect_irq(int irq, void *mmc)
113815a0580cSDavid Brownell {
113915a0580cSDavid Brownell struct mmc_spi_host *host = mmc_priv(mmc);
114015a0580cSDavid Brownell u16 delay_msec = max(host->pdata->detect_delay, (u16)100);
114115a0580cSDavid Brownell
114215a0580cSDavid Brownell mmc_detect_change(mmc, msecs_to_jiffies(delay_msec));
114315a0580cSDavid Brownell return IRQ_HANDLED;
114415a0580cSDavid Brownell }
114515a0580cSDavid Brownell
mmc_spi_probe(struct spi_device * spi)114615a0580cSDavid Brownell static int mmc_spi_probe(struct spi_device *spi)
114715a0580cSDavid Brownell {
114815a0580cSDavid Brownell void *ones;
114915a0580cSDavid Brownell struct mmc_host *mmc;
115015a0580cSDavid Brownell struct mmc_spi_host *host;
115115a0580cSDavid Brownell int status;
1152bf287a90SLaurent Pinchart bool has_ro = false;
115315a0580cSDavid Brownell
115470d6027fSDavid Brownell /* We rely on full duplex transfers, mostly to reduce
115570d6027fSDavid Brownell * per-transfer overheads (by making fewer transfers).
115670d6027fSDavid Brownell */
115770d6027fSDavid Brownell if (spi->master->flags & SPI_MASTER_HALF_DUPLEX)
115870d6027fSDavid Brownell return -EINVAL;
115970d6027fSDavid Brownell
116015a0580cSDavid Brownell /* MMC and SD specs only seem to care that sampling is on the
116115a0580cSDavid Brownell * rising edge ... meaning SPI modes 0 or 3. So either SPI mode
116248881caeSWolfgang Muees * should be legit. We'll use mode 0 since the steady state is 0,
116348881caeSWolfgang Muees * which is appropriate for hotplugging, unless the platform data
116448881caeSWolfgang Muees * specify mode 3 (if hardware is not compatible to mode 0).
116515a0580cSDavid Brownell */
116648881caeSWolfgang Muees if (spi->mode != SPI_MODE_3)
116715a0580cSDavid Brownell spi->mode = SPI_MODE_0;
116815a0580cSDavid Brownell spi->bits_per_word = 8;
116915a0580cSDavid Brownell
117015a0580cSDavid Brownell status = spi_setup(spi);
117115a0580cSDavid Brownell if (status < 0) {
117215a0580cSDavid Brownell dev_dbg(&spi->dev, "needs SPI mode %02x, %d KHz; %d\n",
117315a0580cSDavid Brownell spi->mode, spi->max_speed_hz / 1000,
117415a0580cSDavid Brownell status);
117515a0580cSDavid Brownell return status;
117615a0580cSDavid Brownell }
117715a0580cSDavid Brownell
117815a0580cSDavid Brownell /* We need a supply of ones to transmit. This is the only time
117915a0580cSDavid Brownell * the CPU touches these, so cache coherency isn't a concern.
118015a0580cSDavid Brownell *
118115a0580cSDavid Brownell * NOTE if many systems use more than one MMC-over-SPI connector
118215a0580cSDavid Brownell * it'd save some memory to share this. That's evidently rare.
118315a0580cSDavid Brownell */
118415a0580cSDavid Brownell status = -ENOMEM;
118515a0580cSDavid Brownell ones = kmalloc(MMC_SPI_BLOCKSIZE, GFP_KERNEL);
118615a0580cSDavid Brownell if (!ones)
118715a0580cSDavid Brownell goto nomem;
118815a0580cSDavid Brownell memset(ones, 0xff, MMC_SPI_BLOCKSIZE);
118915a0580cSDavid Brownell
119015a0580cSDavid Brownell mmc = mmc_alloc_host(sizeof(*host), &spi->dev);
119115a0580cSDavid Brownell if (!mmc)
119215a0580cSDavid Brownell goto nomem;
119315a0580cSDavid Brownell
119415a0580cSDavid Brownell mmc->ops = &mmc_spi_ops;
119515a0580cSDavid Brownell mmc->max_blk_size = MMC_SPI_BLOCKSIZE;
1196a36274e0SMartin K. Petersen mmc->max_segs = MMC_SPI_BLOCKSATONCE;
11975cf20aa5SWolfgang Muees mmc->max_req_size = MMC_SPI_BLOCKSATONCE * MMC_SPI_BLOCKSIZE;
11985cf20aa5SWolfgang Muees mmc->max_blk_count = MMC_SPI_BLOCKSATONCE;
119915a0580cSDavid Brownell
120023af6039SPierre Ossman mmc->caps = MMC_CAP_SPI;
120115a0580cSDavid Brownell
120215a0580cSDavid Brownell /* SPI doesn't need the lowspeed device identification thing for
120315a0580cSDavid Brownell * MMC or SD cards, since it never comes up in open drain mode.
120415a0580cSDavid Brownell * That's good; some SPI masters can't handle very low speeds!
120515a0580cSDavid Brownell *
120615a0580cSDavid Brownell * However, low speed SDIO cards need not handle over 400 KHz;
120715a0580cSDavid Brownell * that's the only reason not to use a few MHz for f_min (until
120815a0580cSDavid Brownell * the upper layer reads the target frequency from the CSD).
120915a0580cSDavid Brownell */
121015a0580cSDavid Brownell mmc->f_min = 400000;
121115a0580cSDavid Brownell mmc->f_max = spi->max_speed_hz;
121215a0580cSDavid Brownell
121315a0580cSDavid Brownell host = mmc_priv(mmc);
121415a0580cSDavid Brownell host->mmc = mmc;
121515a0580cSDavid Brownell host->spi = spi;
121615a0580cSDavid Brownell
121715a0580cSDavid Brownell host->ones = ones;
121815a0580cSDavid Brownell
12196dab809bSAndy Shevchenko dev_set_drvdata(&spi->dev, mmc);
12206dab809bSAndy Shevchenko
122115a0580cSDavid Brownell /* Platform data is used to hook up things like card sensing
122215a0580cSDavid Brownell * and power switching gpios.
122315a0580cSDavid Brownell */
12249c43df57SAnton Vorontsov host->pdata = mmc_spi_get_pdata(spi);
122515a0580cSDavid Brownell if (host->pdata)
122615a0580cSDavid Brownell mmc->ocr_avail = host->pdata->ocr_mask;
122715a0580cSDavid Brownell if (!mmc->ocr_avail) {
122815a0580cSDavid Brownell dev_warn(&spi->dev, "ASSUMING 3.2-3.4 V slot power\n");
122915a0580cSDavid Brownell mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34;
123015a0580cSDavid Brownell }
123115a0580cSDavid Brownell if (host->pdata && host->pdata->setpower) {
123215a0580cSDavid Brownell host->powerup_msecs = host->pdata->powerup_msecs;
123315a0580cSDavid Brownell if (!host->powerup_msecs || host->powerup_msecs > 250)
123415a0580cSDavid Brownell host->powerup_msecs = 250;
123515a0580cSDavid Brownell }
123615a0580cSDavid Brownell
1237095cb607SAndy Shevchenko /* Preallocate buffers */
123815a0580cSDavid Brownell host->data = kmalloc(sizeof(*host->data), GFP_KERNEL);
123915a0580cSDavid Brownell if (!host->data)
124015a0580cSDavid Brownell goto fail_nobuf1;
124115a0580cSDavid Brownell
124215a0580cSDavid Brownell /* setup message for status/busy readback */
124315a0580cSDavid Brownell spi_message_init(&host->readback);
124415a0580cSDavid Brownell
124515a0580cSDavid Brownell spi_message_add_tail(&host->status, &host->readback);
124615a0580cSDavid Brownell host->status.tx_buf = host->ones;
124715a0580cSDavid Brownell host->status.rx_buf = &host->data->status;
124815a0580cSDavid Brownell host->status.cs_change = 1;
124915a0580cSDavid Brownell
125015a0580cSDavid Brownell /* register card detect irq */
125115a0580cSDavid Brownell if (host->pdata && host->pdata->init) {
125215a0580cSDavid Brownell status = host->pdata->init(&spi->dev, mmc_spi_detect_irq, mmc);
125315a0580cSDavid Brownell if (status != 0)
125415a0580cSDavid Brownell goto fail_glue_init;
125515a0580cSDavid Brownell }
125615a0580cSDavid Brownell
1257619ef4b4SAnton Vorontsov /* pass platform capabilities, if any */
1258bf287a90SLaurent Pinchart if (host->pdata) {
1259619ef4b4SAnton Vorontsov mmc->caps |= host->pdata->caps;
1260bf287a90SLaurent Pinchart mmc->caps2 |= host->pdata->caps2;
1261bf287a90SLaurent Pinchart }
1262619ef4b4SAnton Vorontsov
126315a0580cSDavid Brownell status = mmc_add_host(mmc);
126415a0580cSDavid Brownell if (status != 0)
1265cf4c9d2aSYang Yingliang goto fail_glue_init;
126615a0580cSDavid Brownell
12675716fb9bSLinus Walleij /*
12685716fb9bSLinus Walleij * Index 0 is card detect
12695716fb9bSLinus Walleij * Old boardfiles were specifying 1 ms as debounce
12705716fb9bSLinus Walleij */
1271d0052ad9SMichał Mirosław status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000);
12725716fb9bSLinus Walleij if (status == -EPROBE_DEFER)
1273cf4c9d2aSYang Yingliang goto fail_gpiod_request;
12745716fb9bSLinus Walleij if (!status) {
12755716fb9bSLinus Walleij /*
12765716fb9bSLinus Walleij * The platform has a CD GPIO signal that may support
1277bcdc9f26SMagnus Damm * interrupts, so let mmc_gpiod_request_cd_irq() decide
1278bcdc9f26SMagnus Damm * if polling is needed or not.
1279bcdc9f26SMagnus Damm */
1280bcdc9f26SMagnus Damm mmc->caps &= ~MMC_CAP_NEEDS_POLL;
1281d4d11449SStephen Warren mmc_gpiod_request_cd_irq(mmc);
1282bf287a90SLaurent Pinchart }
1283c9bd505dSJonathan Neuschäfer mmc_detect_change(mmc, 0);
1284bf287a90SLaurent Pinchart
12855716fb9bSLinus Walleij /* Index 1 is write protect/read only */
1286d0052ad9SMichał Mirosław status = mmc_gpiod_request_ro(mmc, NULL, 1, 0);
12875716fb9bSLinus Walleij if (status == -EPROBE_DEFER)
1288cf4c9d2aSYang Yingliang goto fail_gpiod_request;
12895716fb9bSLinus Walleij if (!status)
12905716fb9bSLinus Walleij has_ro = true;
1291bf287a90SLaurent Pinchart
1292095cb607SAndy Shevchenko dev_info(&spi->dev, "SD/MMC host %s%s%s%s\n",
1293d1b26863SKay Sievers dev_name(&mmc->class_dev),
1294bf287a90SLaurent Pinchart has_ro ? "" : ", no WP",
129515a0580cSDavid Brownell (host->pdata && host->pdata->setpower)
1296619ef4b4SAnton Vorontsov ? "" : ", no poweroff",
1297619ef4b4SAnton Vorontsov (mmc->caps & MMC_CAP_NEEDS_POLL)
1298619ef4b4SAnton Vorontsov ? ", cd polling" : "");
129915a0580cSDavid Brownell return 0;
130015a0580cSDavid Brownell
1301cf4c9d2aSYang Yingliang fail_gpiod_request:
130215a0580cSDavid Brownell mmc_remove_host(mmc);
130315a0580cSDavid Brownell fail_glue_init:
130415a0580cSDavid Brownell kfree(host->data);
130515a0580cSDavid Brownell fail_nobuf1:
13069c43df57SAnton Vorontsov mmc_spi_put_pdata(spi);
13076dab809bSAndy Shevchenko mmc_free_host(mmc);
130815a0580cSDavid Brownell nomem:
130915a0580cSDavid Brownell kfree(ones);
131015a0580cSDavid Brownell return status;
131115a0580cSDavid Brownell }
131215a0580cSDavid Brownell
131315a0580cSDavid Brownell
mmc_spi_remove(struct spi_device * spi)1314a0386bbaSUwe Kleine-König static void mmc_spi_remove(struct spi_device *spi)
131515a0580cSDavid Brownell {
131615a0580cSDavid Brownell struct mmc_host *mmc = dev_get_drvdata(&spi->dev);
131770a557e6SAndy Shevchenko struct mmc_spi_host *host = mmc_priv(mmc);
131815a0580cSDavid Brownell
131915a0580cSDavid Brownell /* prevent new mmc_detect_change() calls */
132015a0580cSDavid Brownell if (host->pdata && host->pdata->exit)
132115a0580cSDavid Brownell host->pdata->exit(&spi->dev, mmc);
132215a0580cSDavid Brownell
132315a0580cSDavid Brownell mmc_remove_host(mmc);
132415a0580cSDavid Brownell
132515a0580cSDavid Brownell kfree(host->data);
132615a0580cSDavid Brownell kfree(host->ones);
132715a0580cSDavid Brownell
132815a0580cSDavid Brownell spi->max_speed_hz = mmc->f_max;
13299c43df57SAnton Vorontsov mmc_spi_put_pdata(spi);
13306dab809bSAndy Shevchenko mmc_free_host(mmc);
133115a0580cSDavid Brownell }
133215a0580cSDavid Brownell
13335f719948SJon Hunter static const struct spi_device_id mmc_spi_dev_ids[] = {
13345f719948SJon Hunter { "mmc-spi-slot"},
13355f719948SJon Hunter { },
13365f719948SJon Hunter };
13375f719948SJon Hunter MODULE_DEVICE_TABLE(spi, mmc_spi_dev_ids);
13385f719948SJon Hunter
13392530fd73SFabian Frederick static const struct of_device_id mmc_spi_of_match_table[] = {
13402ffe8c5fSGrant Likely { .compatible = "mmc-spi-slot", },
1341fbe0f834SAnton Vorontsov {},
13422ffe8c5fSGrant Likely };
1343bf7241d0SJavier Martinez Canillas MODULE_DEVICE_TABLE(of, mmc_spi_of_match_table);
134415a0580cSDavid Brownell
134515a0580cSDavid Brownell static struct spi_driver mmc_spi_driver = {
134615a0580cSDavid Brownell .driver = {
134715a0580cSDavid Brownell .name = "mmc_spi",
13482ffe8c5fSGrant Likely .of_match_table = mmc_spi_of_match_table,
134915a0580cSDavid Brownell },
13505f719948SJon Hunter .id_table = mmc_spi_dev_ids,
135115a0580cSDavid Brownell .probe = mmc_spi_probe,
13520433c143SBill Pemberton .remove = mmc_spi_remove,
135315a0580cSDavid Brownell };
135415a0580cSDavid Brownell
13556f478825SSachin Kamat module_spi_driver(mmc_spi_driver);
135615a0580cSDavid Brownell
135738b21685SAndy Shevchenko MODULE_AUTHOR("Mike Lavender, David Brownell, Hans-Peter Nilsson, Jan Nikitenko");
135815a0580cSDavid Brownell MODULE_DESCRIPTION("SPI SD/MMC host driver");
135915a0580cSDavid Brownell MODULE_LICENSE("GPL");
1360e0626e38SAnton Vorontsov MODULE_ALIAS("spi:mmc_spi");
1361