1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Access SD/MMC cards through SPI master controllers 4 * 5 * (C) Copyright 2005, Intec Automation, 6 * Mike Lavender (mike@steroidmicros) 7 * (C) Copyright 2006-2007, David Brownell 8 * (C) Copyright 2007, Axis Communications, 9 * Hans-Peter Nilsson (hp@axis.com) 10 * (C) Copyright 2007, ATRON electronic GmbH, 11 * Jan Nikitenko <jan.nikitenko@gmail.com> 12 */ 13 #include <linux/sched.h> 14 #include <linux/delay.h> 15 #include <linux/slab.h> 16 #include <linux/module.h> 17 #include <linux/bio.h> 18 #include <linux/dma-mapping.h> 19 #include <linux/crc7.h> 20 #include <linux/crc-itu-t.h> 21 #include <linux/scatterlist.h> 22 23 #include <linux/mmc/host.h> 24 #include <linux/mmc/mmc.h> /* for R1_SPI_* bit values */ 25 #include <linux/mmc/slot-gpio.h> 26 27 #include <linux/spi/spi.h> 28 #include <linux/spi/mmc_spi.h> 29 30 #include <asm/unaligned.h> 31 32 33 /* NOTES: 34 * 35 * - For now, we won't try to interoperate with a real mmc/sd/sdio 36 * controller, although some of them do have hardware support for 37 * SPI protocol. The main reason for such configs would be mmc-ish 38 * cards like DataFlash, which don't support that "native" protocol. 39 * 40 * We don't have a "DataFlash/MMC/SD/SDIO card slot" abstraction to 41 * switch between driver stacks, and in any case if "native" mode 42 * is available, it will be faster and hence preferable. 43 * 44 * - MMC depends on a different chipselect management policy than the 45 * SPI interface currently supports for shared bus segments: it needs 46 * to issue multiple spi_message requests with the chipselect active, 47 * using the results of one message to decide the next one to issue. 48 * 49 * Pending updates to the programming interface, this driver expects 50 * that it not share the bus with other drivers (precluding conflicts). 51 * 52 * - We tell the controller to keep the chipselect active from the 53 * beginning of an mmc_host_ops.request until the end. So beware 54 * of SPI controller drivers that mis-handle the cs_change flag! 55 * 56 * However, many cards seem OK with chipselect flapping up/down 57 * during that time ... at least on unshared bus segments. 58 */ 59 60 61 /* 62 * Local protocol constants, internal to data block protocols. 63 */ 64 65 /* Response tokens used to ack each block written: */ 66 #define SPI_MMC_RESPONSE_CODE(x) ((x) & 0x1f) 67 #define SPI_RESPONSE_ACCEPTED ((2 << 1)|1) 68 #define SPI_RESPONSE_CRC_ERR ((5 << 1)|1) 69 #define SPI_RESPONSE_WRITE_ERR ((6 << 1)|1) 70 71 /* Read and write blocks start with these tokens and end with crc; 72 * on error, read tokens act like a subset of R2_SPI_* values. 73 */ 74 #define SPI_TOKEN_SINGLE 0xfe /* single block r/w, multiblock read */ 75 #define SPI_TOKEN_MULTI_WRITE 0xfc /* multiblock write */ 76 #define SPI_TOKEN_STOP_TRAN 0xfd /* terminate multiblock write */ 77 78 #define MMC_SPI_BLOCKSIZE 512 79 80 81 /* These fixed timeouts come from the latest SD specs, which say to ignore 82 * the CSD values. The R1B value is for card erase (e.g. the "I forgot the 83 * card's password" scenario); it's mostly applied to STOP_TRANSMISSION after 84 * reads which takes nowhere near that long. Older cards may be able to use 85 * shorter timeouts ... but why bother? 86 */ 87 #define r1b_timeout (HZ * 3) 88 89 /* One of the critical speed parameters is the amount of data which may 90 * be transferred in one command. If this value is too low, the SD card 91 * controller has to do multiple partial block writes (argggh!). With 92 * today (2008) SD cards there is little speed gain if we transfer more 93 * than 64 KBytes at a time. So use this value until there is any indication 94 * that we should do more here. 95 */ 96 #define MMC_SPI_BLOCKSATONCE 128 97 98 /****************************************************************************/ 99 100 /* 101 * Local Data Structures 102 */ 103 104 /* "scratch" is per-{command,block} data exchanged with the card */ 105 struct scratch { 106 u8 status[29]; 107 u8 data_token; 108 __be16 crc_val; 109 }; 110 111 struct mmc_spi_host { 112 struct mmc_host *mmc; 113 struct spi_device *spi; 114 115 unsigned char power_mode; 116 u16 powerup_msecs; 117 118 struct mmc_spi_platform_data *pdata; 119 120 /* for bulk data transfers */ 121 struct spi_transfer token, t, crc, early_status; 122 struct spi_message m; 123 124 /* for status readback */ 125 struct spi_transfer status; 126 struct spi_message readback; 127 128 /* underlying DMA-aware controller, or null */ 129 struct device *dma_dev; 130 131 /* buffer used for commands and for message "overhead" */ 132 struct scratch *data; 133 dma_addr_t data_dma; 134 135 /* Specs say to write ones most of the time, even when the card 136 * has no need to read its input data; and many cards won't care. 137 * This is our source of those ones. 138 */ 139 void *ones; 140 dma_addr_t ones_dma; 141 }; 142 143 144 /****************************************************************************/ 145 146 /* 147 * MMC-over-SPI protocol glue, used by the MMC stack interface 148 */ 149 150 static inline int mmc_cs_off(struct mmc_spi_host *host) 151 { 152 /* chipselect will always be inactive after setup() */ 153 return spi_setup(host->spi); 154 } 155 156 static int 157 mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len) 158 { 159 int status; 160 161 if (len > sizeof(*host->data)) { 162 WARN_ON(1); 163 return -EIO; 164 } 165 166 host->status.len = len; 167 168 if (host->dma_dev) 169 dma_sync_single_for_device(host->dma_dev, 170 host->data_dma, sizeof(*host->data), 171 DMA_FROM_DEVICE); 172 173 status = spi_sync_locked(host->spi, &host->readback); 174 175 if (host->dma_dev) 176 dma_sync_single_for_cpu(host->dma_dev, 177 host->data_dma, sizeof(*host->data), 178 DMA_FROM_DEVICE); 179 180 return status; 181 } 182 183 static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout, 184 unsigned n, u8 byte) 185 { 186 u8 *cp = host->data->status; 187 unsigned long start = jiffies; 188 189 while (1) { 190 int status; 191 unsigned i; 192 193 status = mmc_spi_readbytes(host, n); 194 if (status < 0) 195 return status; 196 197 for (i = 0; i < n; i++) { 198 if (cp[i] != byte) 199 return cp[i]; 200 } 201 202 if (time_is_before_jiffies(start + timeout)) 203 break; 204 205 /* If we need long timeouts, we may release the CPU. 206 * We use jiffies here because we want to have a relation 207 * between elapsed time and the blocking of the scheduler. 208 */ 209 if (time_is_before_jiffies(start + 1)) 210 schedule(); 211 } 212 return -ETIMEDOUT; 213 } 214 215 static inline int 216 mmc_spi_wait_unbusy(struct mmc_spi_host *host, unsigned long timeout) 217 { 218 return mmc_spi_skip(host, timeout, sizeof(host->data->status), 0); 219 } 220 221 static int mmc_spi_readtoken(struct mmc_spi_host *host, unsigned long timeout) 222 { 223 return mmc_spi_skip(host, timeout, 1, 0xff); 224 } 225 226 227 /* 228 * Note that for SPI, cmd->resp[0] is not the same data as "native" protocol 229 * hosts return! The low byte holds R1_SPI bits. The next byte may hold 230 * R2_SPI bits ... for SEND_STATUS, or after data read errors. 231 * 232 * cmd->resp[1] holds any four-byte response, for R3 (READ_OCR) and on 233 * newer cards R7 (IF_COND). 234 */ 235 236 static char *maptype(struct mmc_command *cmd) 237 { 238 switch (mmc_spi_resp_type(cmd)) { 239 case MMC_RSP_SPI_R1: return "R1"; 240 case MMC_RSP_SPI_R1B: return "R1B"; 241 case MMC_RSP_SPI_R2: return "R2/R5"; 242 case MMC_RSP_SPI_R3: return "R3/R4/R7"; 243 default: return "?"; 244 } 245 } 246 247 /* return zero, else negative errno after setting cmd->error */ 248 static int mmc_spi_response_get(struct mmc_spi_host *host, 249 struct mmc_command *cmd, int cs_on) 250 { 251 u8 *cp = host->data->status; 252 u8 *end = cp + host->t.len; 253 int value = 0; 254 int bitshift; 255 u8 leftover = 0; 256 unsigned short rotator; 257 int i; 258 char tag[32]; 259 260 snprintf(tag, sizeof(tag), " ... CMD%d response SPI_%s", 261 cmd->opcode, maptype(cmd)); 262 263 /* Except for data block reads, the whole response will already 264 * be stored in the scratch buffer. It's somewhere after the 265 * command and the first byte we read after it. We ignore that 266 * first byte. After STOP_TRANSMISSION command it may include 267 * two data bits, but otherwise it's all ones. 268 */ 269 cp += 8; 270 while (cp < end && *cp == 0xff) 271 cp++; 272 273 /* Data block reads (R1 response types) may need more data... */ 274 if (cp == end) { 275 cp = host->data->status; 276 end = cp+1; 277 278 /* Card sends N(CR) (== 1..8) bytes of all-ones then one 279 * status byte ... and we already scanned 2 bytes. 280 * 281 * REVISIT block read paths use nasty byte-at-a-time I/O 282 * so it can always DMA directly into the target buffer. 283 * It'd probably be better to memcpy() the first chunk and 284 * avoid extra i/o calls... 285 * 286 * Note we check for more than 8 bytes, because in practice, 287 * some SD cards are slow... 288 */ 289 for (i = 2; i < 16; i++) { 290 value = mmc_spi_readbytes(host, 1); 291 if (value < 0) 292 goto done; 293 if (*cp != 0xff) 294 goto checkstatus; 295 } 296 value = -ETIMEDOUT; 297 goto done; 298 } 299 300 checkstatus: 301 bitshift = 0; 302 if (*cp & 0x80) { 303 /* Houston, we have an ugly card with a bit-shifted response */ 304 rotator = *cp++ << 8; 305 /* read the next byte */ 306 if (cp == end) { 307 value = mmc_spi_readbytes(host, 1); 308 if (value < 0) 309 goto done; 310 cp = host->data->status; 311 end = cp+1; 312 } 313 rotator |= *cp++; 314 while (rotator & 0x8000) { 315 bitshift++; 316 rotator <<= 1; 317 } 318 cmd->resp[0] = rotator >> 8; 319 leftover = rotator; 320 } else { 321 cmd->resp[0] = *cp++; 322 } 323 cmd->error = 0; 324 325 /* Status byte: the entire seven-bit R1 response. */ 326 if (cmd->resp[0] != 0) { 327 if ((R1_SPI_PARAMETER | R1_SPI_ADDRESS) 328 & cmd->resp[0]) 329 value = -EFAULT; /* Bad address */ 330 else if (R1_SPI_ILLEGAL_COMMAND & cmd->resp[0]) 331 value = -ENOSYS; /* Function not implemented */ 332 else if (R1_SPI_COM_CRC & cmd->resp[0]) 333 value = -EILSEQ; /* Illegal byte sequence */ 334 else if ((R1_SPI_ERASE_SEQ | R1_SPI_ERASE_RESET) 335 & cmd->resp[0]) 336 value = -EIO; /* I/O error */ 337 /* else R1_SPI_IDLE, "it's resetting" */ 338 } 339 340 switch (mmc_spi_resp_type(cmd)) { 341 342 /* SPI R1B == R1 + busy; STOP_TRANSMISSION (for multiblock reads) 343 * and less-common stuff like various erase operations. 344 */ 345 case MMC_RSP_SPI_R1B: 346 /* maybe we read all the busy tokens already */ 347 while (cp < end && *cp == 0) 348 cp++; 349 if (cp == end) 350 mmc_spi_wait_unbusy(host, r1b_timeout); 351 break; 352 353 /* SPI R2 == R1 + second status byte; SEND_STATUS 354 * SPI R5 == R1 + data byte; IO_RW_DIRECT 355 */ 356 case MMC_RSP_SPI_R2: 357 /* read the next byte */ 358 if (cp == end) { 359 value = mmc_spi_readbytes(host, 1); 360 if (value < 0) 361 goto done; 362 cp = host->data->status; 363 end = cp+1; 364 } 365 if (bitshift) { 366 rotator = leftover << 8; 367 rotator |= *cp << bitshift; 368 cmd->resp[0] |= (rotator & 0xFF00); 369 } else { 370 cmd->resp[0] |= *cp << 8; 371 } 372 break; 373 374 /* SPI R3, R4, or R7 == R1 + 4 bytes */ 375 case MMC_RSP_SPI_R3: 376 rotator = leftover << 8; 377 cmd->resp[1] = 0; 378 for (i = 0; i < 4; i++) { 379 cmd->resp[1] <<= 8; 380 /* read the next byte */ 381 if (cp == end) { 382 value = mmc_spi_readbytes(host, 1); 383 if (value < 0) 384 goto done; 385 cp = host->data->status; 386 end = cp+1; 387 } 388 if (bitshift) { 389 rotator |= *cp++ << bitshift; 390 cmd->resp[1] |= (rotator >> 8); 391 rotator <<= 8; 392 } else { 393 cmd->resp[1] |= *cp++; 394 } 395 } 396 break; 397 398 /* SPI R1 == just one status byte */ 399 case MMC_RSP_SPI_R1: 400 break; 401 402 default: 403 dev_dbg(&host->spi->dev, "bad response type %04x\n", 404 mmc_spi_resp_type(cmd)); 405 if (value >= 0) 406 value = -EINVAL; 407 goto done; 408 } 409 410 if (value < 0) 411 dev_dbg(&host->spi->dev, "%s: resp %04x %08x\n", 412 tag, cmd->resp[0], cmd->resp[1]); 413 414 /* disable chipselect on errors and some success cases */ 415 if (value >= 0 && cs_on) 416 return value; 417 done: 418 if (value < 0) 419 cmd->error = value; 420 mmc_cs_off(host); 421 return value; 422 } 423 424 /* Issue command and read its response. 425 * Returns zero on success, negative for error. 426 * 427 * On error, caller must cope with mmc core retry mechanism. That 428 * means immediate low-level resubmit, which affects the bus lock... 429 */ 430 static int 431 mmc_spi_command_send(struct mmc_spi_host *host, 432 struct mmc_request *mrq, 433 struct mmc_command *cmd, int cs_on) 434 { 435 struct scratch *data = host->data; 436 u8 *cp = data->status; 437 int status; 438 struct spi_transfer *t; 439 440 /* We can handle most commands (except block reads) in one full 441 * duplex I/O operation before either starting the next transfer 442 * (data block or command) or else deselecting the card. 443 * 444 * First, write 7 bytes: 445 * - an all-ones byte to ensure the card is ready 446 * - opcode byte (plus start and transmission bits) 447 * - four bytes of big-endian argument 448 * - crc7 (plus end bit) ... always computed, it's cheap 449 * 450 * We init the whole buffer to all-ones, which is what we need 451 * to write while we're reading (later) response data. 452 */ 453 memset(cp, 0xff, sizeof(data->status)); 454 455 cp[1] = 0x40 | cmd->opcode; 456 put_unaligned_be32(cmd->arg, cp + 2); 457 cp[6] = crc7_be(0, cp + 1, 5) | 0x01; 458 cp += 7; 459 460 /* Then, read up to 13 bytes (while writing all-ones): 461 * - N(CR) (== 1..8) bytes of all-ones 462 * - status byte (for all response types) 463 * - the rest of the response, either: 464 * + nothing, for R1 or R1B responses 465 * + second status byte, for R2 responses 466 * + four data bytes, for R3 and R7 responses 467 * 468 * Finally, read some more bytes ... in the nice cases we know in 469 * advance how many, and reading 1 more is always OK: 470 * - N(EC) (== 0..N) bytes of all-ones, before deselect/finish 471 * - N(RC) (== 1..N) bytes of all-ones, before next command 472 * - N(WR) (== 1..N) bytes of all-ones, before data write 473 * 474 * So in those cases one full duplex I/O of at most 21 bytes will 475 * handle the whole command, leaving the card ready to receive a 476 * data block or new command. We do that whenever we can, shaving 477 * CPU and IRQ costs (especially when using DMA or FIFOs). 478 * 479 * There are two other cases, where it's not generally practical 480 * to rely on a single I/O: 481 * 482 * - R1B responses need at least N(EC) bytes of all-zeroes. 483 * 484 * In this case we can *try* to fit it into one I/O, then 485 * maybe read more data later. 486 * 487 * - Data block reads are more troublesome, since a variable 488 * number of padding bytes precede the token and data. 489 * + N(CX) (== 0..8) bytes of all-ones, before CSD or CID 490 * + N(AC) (== 1..many) bytes of all-ones 491 * 492 * In this case we currently only have minimal speedups here: 493 * when N(CR) == 1 we can avoid I/O in response_get(). 494 */ 495 if (cs_on && (mrq->data->flags & MMC_DATA_READ)) { 496 cp += 2; /* min(N(CR)) + status */ 497 /* R1 */ 498 } else { 499 cp += 10; /* max(N(CR)) + status + min(N(RC),N(WR)) */ 500 if (cmd->flags & MMC_RSP_SPI_S2) /* R2/R5 */ 501 cp++; 502 else if (cmd->flags & MMC_RSP_SPI_B4) /* R3/R4/R7 */ 503 cp += 4; 504 else if (cmd->flags & MMC_RSP_BUSY) /* R1B */ 505 cp = data->status + sizeof(data->status); 506 /* else: R1 (most commands) */ 507 } 508 509 dev_dbg(&host->spi->dev, " mmc_spi: CMD%d, resp %s\n", 510 cmd->opcode, maptype(cmd)); 511 512 /* send command, leaving chipselect active */ 513 spi_message_init(&host->m); 514 515 t = &host->t; 516 memset(t, 0, sizeof(*t)); 517 t->tx_buf = t->rx_buf = data->status; 518 t->tx_dma = t->rx_dma = host->data_dma; 519 t->len = cp - data->status; 520 t->cs_change = 1; 521 spi_message_add_tail(t, &host->m); 522 523 if (host->dma_dev) { 524 host->m.is_dma_mapped = 1; 525 dma_sync_single_for_device(host->dma_dev, 526 host->data_dma, sizeof(*host->data), 527 DMA_BIDIRECTIONAL); 528 } 529 status = spi_sync_locked(host->spi, &host->m); 530 531 if (host->dma_dev) 532 dma_sync_single_for_cpu(host->dma_dev, 533 host->data_dma, sizeof(*host->data), 534 DMA_BIDIRECTIONAL); 535 if (status < 0) { 536 dev_dbg(&host->spi->dev, " ... write returned %d\n", status); 537 cmd->error = status; 538 return status; 539 } 540 541 /* after no-data commands and STOP_TRANSMISSION, chipselect off */ 542 return mmc_spi_response_get(host, cmd, cs_on); 543 } 544 545 /* Build data message with up to four separate transfers. For TX, we 546 * start by writing the data token. And in most cases, we finish with 547 * a status transfer. 548 * 549 * We always provide TX data for data and CRC. The MMC/SD protocol 550 * requires us to write ones; but Linux defaults to writing zeroes; 551 * so we explicitly initialize it to all ones on RX paths. 552 * 553 * We also handle DMA mapping, so the underlying SPI controller does 554 * not need to (re)do it for each message. 555 */ 556 static void 557 mmc_spi_setup_data_message( 558 struct mmc_spi_host *host, 559 int multiple, 560 enum dma_data_direction direction) 561 { 562 struct spi_transfer *t; 563 struct scratch *scratch = host->data; 564 dma_addr_t dma = host->data_dma; 565 566 spi_message_init(&host->m); 567 if (dma) 568 host->m.is_dma_mapped = 1; 569 570 /* for reads, readblock() skips 0xff bytes before finding 571 * the token; for writes, this transfer issues that token. 572 */ 573 if (direction == DMA_TO_DEVICE) { 574 t = &host->token; 575 memset(t, 0, sizeof(*t)); 576 t->len = 1; 577 if (multiple) 578 scratch->data_token = SPI_TOKEN_MULTI_WRITE; 579 else 580 scratch->data_token = SPI_TOKEN_SINGLE; 581 t->tx_buf = &scratch->data_token; 582 if (dma) 583 t->tx_dma = dma + offsetof(struct scratch, data_token); 584 spi_message_add_tail(t, &host->m); 585 } 586 587 /* Body of transfer is buffer, then CRC ... 588 * either TX-only, or RX with TX-ones. 589 */ 590 t = &host->t; 591 memset(t, 0, sizeof(*t)); 592 t->tx_buf = host->ones; 593 t->tx_dma = host->ones_dma; 594 /* length and actual buffer info are written later */ 595 spi_message_add_tail(t, &host->m); 596 597 t = &host->crc; 598 memset(t, 0, sizeof(*t)); 599 t->len = 2; 600 if (direction == DMA_TO_DEVICE) { 601 /* the actual CRC may get written later */ 602 t->tx_buf = &scratch->crc_val; 603 if (dma) 604 t->tx_dma = dma + offsetof(struct scratch, crc_val); 605 } else { 606 t->tx_buf = host->ones; 607 t->tx_dma = host->ones_dma; 608 t->rx_buf = &scratch->crc_val; 609 if (dma) 610 t->rx_dma = dma + offsetof(struct scratch, crc_val); 611 } 612 spi_message_add_tail(t, &host->m); 613 614 /* 615 * A single block read is followed by N(EC) [0+] all-ones bytes 616 * before deselect ... don't bother. 617 * 618 * Multiblock reads are followed by N(AC) [1+] all-ones bytes before 619 * the next block is read, or a STOP_TRANSMISSION is issued. We'll 620 * collect that single byte, so readblock() doesn't need to. 621 * 622 * For a write, the one-byte data response follows immediately, then 623 * come zero or more busy bytes, then N(WR) [1+] all-ones bytes. 624 * Then single block reads may deselect, and multiblock ones issue 625 * the next token (next data block, or STOP_TRAN). We can try to 626 * minimize I/O ops by using a single read to collect end-of-busy. 627 */ 628 if (multiple || direction == DMA_TO_DEVICE) { 629 t = &host->early_status; 630 memset(t, 0, sizeof(*t)); 631 t->len = (direction == DMA_TO_DEVICE) ? sizeof(scratch->status) : 1; 632 t->tx_buf = host->ones; 633 t->tx_dma = host->ones_dma; 634 t->rx_buf = scratch->status; 635 if (dma) 636 t->rx_dma = dma + offsetof(struct scratch, status); 637 t->cs_change = 1; 638 spi_message_add_tail(t, &host->m); 639 } 640 } 641 642 /* 643 * Write one block: 644 * - caller handled preceding N(WR) [1+] all-ones bytes 645 * - data block 646 * + token 647 * + data bytes 648 * + crc16 649 * - an all-ones byte ... card writes a data-response byte 650 * - followed by N(EC) [0+] all-ones bytes, card writes zero/'busy' 651 * 652 * Return negative errno, else success. 653 */ 654 static int 655 mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t, 656 unsigned long timeout) 657 { 658 struct spi_device *spi = host->spi; 659 int status, i; 660 struct scratch *scratch = host->data; 661 u32 pattern; 662 663 if (host->mmc->use_spi_crc) 664 scratch->crc_val = cpu_to_be16(crc_itu_t(0, t->tx_buf, t->len)); 665 if (host->dma_dev) 666 dma_sync_single_for_device(host->dma_dev, 667 host->data_dma, sizeof(*scratch), 668 DMA_BIDIRECTIONAL); 669 670 status = spi_sync_locked(spi, &host->m); 671 672 if (status != 0) { 673 dev_dbg(&spi->dev, "write error (%d)\n", status); 674 return status; 675 } 676 677 if (host->dma_dev) 678 dma_sync_single_for_cpu(host->dma_dev, 679 host->data_dma, sizeof(*scratch), 680 DMA_BIDIRECTIONAL); 681 682 /* 683 * Get the transmission data-response reply. It must follow 684 * immediately after the data block we transferred. This reply 685 * doesn't necessarily tell whether the write operation succeeded; 686 * it just says if the transmission was ok and whether *earlier* 687 * writes succeeded; see the standard. 688 * 689 * In practice, there are (even modern SDHC-)cards which are late 690 * in sending the response, and miss the time frame by a few bits, 691 * so we have to cope with this situation and check the response 692 * bit-by-bit. Arggh!!! 693 */ 694 pattern = get_unaligned_be32(scratch->status); 695 696 /* First 3 bit of pattern are undefined */ 697 pattern |= 0xE0000000; 698 699 /* left-adjust to leading 0 bit */ 700 while (pattern & 0x80000000) 701 pattern <<= 1; 702 /* right-adjust for pattern matching. Code is in bit 4..0 now. */ 703 pattern >>= 27; 704 705 switch (pattern) { 706 case SPI_RESPONSE_ACCEPTED: 707 status = 0; 708 break; 709 case SPI_RESPONSE_CRC_ERR: 710 /* host shall then issue MMC_STOP_TRANSMISSION */ 711 status = -EILSEQ; 712 break; 713 case SPI_RESPONSE_WRITE_ERR: 714 /* host shall then issue MMC_STOP_TRANSMISSION, 715 * and should MMC_SEND_STATUS to sort it out 716 */ 717 status = -EIO; 718 break; 719 default: 720 status = -EPROTO; 721 break; 722 } 723 if (status != 0) { 724 dev_dbg(&spi->dev, "write error %02x (%d)\n", 725 scratch->status[0], status); 726 return status; 727 } 728 729 t->tx_buf += t->len; 730 if (host->dma_dev) 731 t->tx_dma += t->len; 732 733 /* Return when not busy. If we didn't collect that status yet, 734 * we'll need some more I/O. 735 */ 736 for (i = 4; i < sizeof(scratch->status); i++) { 737 /* card is non-busy if the most recent bit is 1 */ 738 if (scratch->status[i] & 0x01) 739 return 0; 740 } 741 return mmc_spi_wait_unbusy(host, timeout); 742 } 743 744 /* 745 * Read one block: 746 * - skip leading all-ones bytes ... either 747 * + N(AC) [1..f(clock,CSD)] usually, else 748 * + N(CX) [0..8] when reading CSD or CID 749 * - data block 750 * + token ... if error token, no data or crc 751 * + data bytes 752 * + crc16 753 * 754 * After single block reads, we're done; N(EC) [0+] all-ones bytes follow 755 * before dropping chipselect. 756 * 757 * For multiblock reads, caller either reads the next block or issues a 758 * STOP_TRANSMISSION command. 759 */ 760 static int 761 mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t, 762 unsigned long timeout) 763 { 764 struct spi_device *spi = host->spi; 765 int status; 766 struct scratch *scratch = host->data; 767 unsigned int bitshift; 768 u8 leftover; 769 770 /* At least one SD card sends an all-zeroes byte when N(CX) 771 * applies, before the all-ones bytes ... just cope with that. 772 */ 773 status = mmc_spi_readbytes(host, 1); 774 if (status < 0) 775 return status; 776 status = scratch->status[0]; 777 if (status == 0xff || status == 0) 778 status = mmc_spi_readtoken(host, timeout); 779 780 if (status < 0) { 781 dev_dbg(&spi->dev, "read error %02x (%d)\n", status, status); 782 return status; 783 } 784 785 /* The token may be bit-shifted... 786 * the first 0-bit precedes the data stream. 787 */ 788 bitshift = 7; 789 while (status & 0x80) { 790 status <<= 1; 791 bitshift--; 792 } 793 leftover = status << 1; 794 795 if (host->dma_dev) { 796 dma_sync_single_for_device(host->dma_dev, 797 host->data_dma, sizeof(*scratch), 798 DMA_BIDIRECTIONAL); 799 dma_sync_single_for_device(host->dma_dev, 800 t->rx_dma, t->len, 801 DMA_FROM_DEVICE); 802 } 803 804 status = spi_sync_locked(spi, &host->m); 805 if (status < 0) { 806 dev_dbg(&spi->dev, "read error %d\n", status); 807 return status; 808 } 809 810 if (host->dma_dev) { 811 dma_sync_single_for_cpu(host->dma_dev, 812 host->data_dma, sizeof(*scratch), 813 DMA_BIDIRECTIONAL); 814 dma_sync_single_for_cpu(host->dma_dev, 815 t->rx_dma, t->len, 816 DMA_FROM_DEVICE); 817 } 818 819 if (bitshift) { 820 /* Walk through the data and the crc and do 821 * all the magic to get byte-aligned data. 822 */ 823 u8 *cp = t->rx_buf; 824 unsigned int len; 825 unsigned int bitright = 8 - bitshift; 826 u8 temp; 827 for (len = t->len; len; len--) { 828 temp = *cp; 829 *cp++ = leftover | (temp >> bitshift); 830 leftover = temp << bitright; 831 } 832 cp = (u8 *) &scratch->crc_val; 833 temp = *cp; 834 *cp++ = leftover | (temp >> bitshift); 835 leftover = temp << bitright; 836 temp = *cp; 837 *cp = leftover | (temp >> bitshift); 838 } 839 840 if (host->mmc->use_spi_crc) { 841 u16 crc = crc_itu_t(0, t->rx_buf, t->len); 842 843 be16_to_cpus(&scratch->crc_val); 844 if (scratch->crc_val != crc) { 845 dev_dbg(&spi->dev, 846 "read - crc error: crc_val=0x%04x, computed=0x%04x len=%d\n", 847 scratch->crc_val, crc, t->len); 848 return -EILSEQ; 849 } 850 } 851 852 t->rx_buf += t->len; 853 if (host->dma_dev) 854 t->rx_dma += t->len; 855 856 return 0; 857 } 858 859 /* 860 * An MMC/SD data stage includes one or more blocks, optional CRCs, 861 * and inline handshaking. That handhaking makes it unlike most 862 * other SPI protocol stacks. 863 */ 864 static void 865 mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd, 866 struct mmc_data *data, u32 blk_size) 867 { 868 struct spi_device *spi = host->spi; 869 struct device *dma_dev = host->dma_dev; 870 struct spi_transfer *t; 871 enum dma_data_direction direction; 872 struct scatterlist *sg; 873 unsigned n_sg; 874 int multiple = (data->blocks > 1); 875 u32 clock_rate; 876 unsigned long timeout; 877 878 direction = mmc_get_dma_dir(data); 879 mmc_spi_setup_data_message(host, multiple, direction); 880 t = &host->t; 881 882 if (t->speed_hz) 883 clock_rate = t->speed_hz; 884 else 885 clock_rate = spi->max_speed_hz; 886 887 timeout = data->timeout_ns + 888 data->timeout_clks * 1000000 / clock_rate; 889 timeout = usecs_to_jiffies((unsigned int)(timeout / 1000)) + 1; 890 891 /* Handle scatterlist segments one at a time, with synch for 892 * each 512-byte block 893 */ 894 for_each_sg(data->sg, sg, data->sg_len, n_sg) { 895 int status = 0; 896 dma_addr_t dma_addr = 0; 897 void *kmap_addr; 898 unsigned length = sg->length; 899 enum dma_data_direction dir = direction; 900 901 /* set up dma mapping for controller drivers that might 902 * use DMA ... though they may fall back to PIO 903 */ 904 if (dma_dev) { 905 /* never invalidate whole *shared* pages ... */ 906 if ((sg->offset != 0 || length != PAGE_SIZE) 907 && dir == DMA_FROM_DEVICE) 908 dir = DMA_BIDIRECTIONAL; 909 910 dma_addr = dma_map_page(dma_dev, sg_page(sg), 0, 911 PAGE_SIZE, dir); 912 if (dma_mapping_error(dma_dev, dma_addr)) { 913 data->error = -EFAULT; 914 break; 915 } 916 if (direction == DMA_TO_DEVICE) 917 t->tx_dma = dma_addr + sg->offset; 918 else 919 t->rx_dma = dma_addr + sg->offset; 920 } 921 922 /* allow pio too; we don't allow highmem */ 923 kmap_addr = kmap(sg_page(sg)); 924 if (direction == DMA_TO_DEVICE) 925 t->tx_buf = kmap_addr + sg->offset; 926 else 927 t->rx_buf = kmap_addr + sg->offset; 928 929 /* transfer each block, and update request status */ 930 while (length) { 931 t->len = min(length, blk_size); 932 933 dev_dbg(&host->spi->dev, 934 " mmc_spi: %s block, %d bytes\n", 935 (direction == DMA_TO_DEVICE) ? "write" : "read", 936 t->len); 937 938 if (direction == DMA_TO_DEVICE) 939 status = mmc_spi_writeblock(host, t, timeout); 940 else 941 status = mmc_spi_readblock(host, t, timeout); 942 if (status < 0) 943 break; 944 945 data->bytes_xfered += t->len; 946 length -= t->len; 947 948 if (!multiple) 949 break; 950 } 951 952 /* discard mappings */ 953 if (direction == DMA_FROM_DEVICE) 954 flush_kernel_dcache_page(sg_page(sg)); 955 kunmap(sg_page(sg)); 956 if (dma_dev) 957 dma_unmap_page(dma_dev, dma_addr, PAGE_SIZE, dir); 958 959 if (status < 0) { 960 data->error = status; 961 dev_dbg(&spi->dev, "%s status %d\n", 962 (direction == DMA_TO_DEVICE) ? "write" : "read", 963 status); 964 break; 965 } 966 } 967 968 /* NOTE some docs describe an MMC-only SET_BLOCK_COUNT (CMD23) that 969 * can be issued before multiblock writes. Unlike its more widely 970 * documented analogue for SD cards (SET_WR_BLK_ERASE_COUNT, ACMD23), 971 * that can affect the STOP_TRAN logic. Complete (and current) 972 * MMC specs should sort that out before Linux starts using CMD23. 973 */ 974 if (direction == DMA_TO_DEVICE && multiple) { 975 struct scratch *scratch = host->data; 976 int tmp; 977 const unsigned statlen = sizeof(scratch->status); 978 979 dev_dbg(&spi->dev, " mmc_spi: STOP_TRAN\n"); 980 981 /* Tweak the per-block message we set up earlier by morphing 982 * it to hold single buffer with the token followed by some 983 * all-ones bytes ... skip N(BR) (0..1), scan the rest for 984 * "not busy any longer" status, and leave chip selected. 985 */ 986 INIT_LIST_HEAD(&host->m.transfers); 987 list_add(&host->early_status.transfer_list, 988 &host->m.transfers); 989 990 memset(scratch->status, 0xff, statlen); 991 scratch->status[0] = SPI_TOKEN_STOP_TRAN; 992 993 host->early_status.tx_buf = host->early_status.rx_buf; 994 host->early_status.tx_dma = host->early_status.rx_dma; 995 host->early_status.len = statlen; 996 997 if (host->dma_dev) 998 dma_sync_single_for_device(host->dma_dev, 999 host->data_dma, sizeof(*scratch), 1000 DMA_BIDIRECTIONAL); 1001 1002 tmp = spi_sync_locked(spi, &host->m); 1003 1004 if (host->dma_dev) 1005 dma_sync_single_for_cpu(host->dma_dev, 1006 host->data_dma, sizeof(*scratch), 1007 DMA_BIDIRECTIONAL); 1008 1009 if (tmp < 0) { 1010 if (!data->error) 1011 data->error = tmp; 1012 return; 1013 } 1014 1015 /* Ideally we collected "not busy" status with one I/O, 1016 * avoiding wasteful byte-at-a-time scanning... but more 1017 * I/O is often needed. 1018 */ 1019 for (tmp = 2; tmp < statlen; tmp++) { 1020 if (scratch->status[tmp] != 0) 1021 return; 1022 } 1023 tmp = mmc_spi_wait_unbusy(host, timeout); 1024 if (tmp < 0 && !data->error) 1025 data->error = tmp; 1026 } 1027 } 1028 1029 /****************************************************************************/ 1030 1031 /* 1032 * MMC driver implementation -- the interface to the MMC stack 1033 */ 1034 1035 static void mmc_spi_request(struct mmc_host *mmc, struct mmc_request *mrq) 1036 { 1037 struct mmc_spi_host *host = mmc_priv(mmc); 1038 int status = -EINVAL; 1039 int crc_retry = 5; 1040 struct mmc_command stop; 1041 1042 #ifdef DEBUG 1043 /* MMC core and layered drivers *MUST* issue SPI-aware commands */ 1044 { 1045 struct mmc_command *cmd; 1046 int invalid = 0; 1047 1048 cmd = mrq->cmd; 1049 if (!mmc_spi_resp_type(cmd)) { 1050 dev_dbg(&host->spi->dev, "bogus command\n"); 1051 cmd->error = -EINVAL; 1052 invalid = 1; 1053 } 1054 1055 cmd = mrq->stop; 1056 if (cmd && !mmc_spi_resp_type(cmd)) { 1057 dev_dbg(&host->spi->dev, "bogus STOP command\n"); 1058 cmd->error = -EINVAL; 1059 invalid = 1; 1060 } 1061 1062 if (invalid) { 1063 dump_stack(); 1064 mmc_request_done(host->mmc, mrq); 1065 return; 1066 } 1067 } 1068 #endif 1069 1070 /* request exclusive bus access */ 1071 spi_bus_lock(host->spi->master); 1072 1073 crc_recover: 1074 /* issue command; then optionally data and stop */ 1075 status = mmc_spi_command_send(host, mrq, mrq->cmd, mrq->data != NULL); 1076 if (status == 0 && mrq->data) { 1077 mmc_spi_data_do(host, mrq->cmd, mrq->data, mrq->data->blksz); 1078 1079 /* 1080 * The SPI bus is not always reliable for large data transfers. 1081 * If an occasional crc error is reported by the SD device with 1082 * data read/write over SPI, it may be recovered by repeating 1083 * the last SD command again. The retry count is set to 5 to 1084 * ensure the driver passes stress tests. 1085 */ 1086 if (mrq->data->error == -EILSEQ && crc_retry) { 1087 stop.opcode = MMC_STOP_TRANSMISSION; 1088 stop.arg = 0; 1089 stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 1090 status = mmc_spi_command_send(host, mrq, &stop, 0); 1091 crc_retry--; 1092 mrq->data->error = 0; 1093 goto crc_recover; 1094 } 1095 1096 if (mrq->stop) 1097 status = mmc_spi_command_send(host, mrq, mrq->stop, 0); 1098 else 1099 mmc_cs_off(host); 1100 } 1101 1102 /* release the bus */ 1103 spi_bus_unlock(host->spi->master); 1104 1105 mmc_request_done(host->mmc, mrq); 1106 } 1107 1108 /* See Section 6.4.1, in SD "Simplified Physical Layer Specification 2.0" 1109 * 1110 * NOTE that here we can't know that the card has just been powered up; 1111 * not all MMC/SD sockets support power switching. 1112 * 1113 * FIXME when the card is still in SPI mode, e.g. from a previous kernel, 1114 * this doesn't seem to do the right thing at all... 1115 */ 1116 static void mmc_spi_initsequence(struct mmc_spi_host *host) 1117 { 1118 /* Try to be very sure any previous command has completed; 1119 * wait till not-busy, skip debris from any old commands. 1120 */ 1121 mmc_spi_wait_unbusy(host, r1b_timeout); 1122 mmc_spi_readbytes(host, 10); 1123 1124 /* 1125 * Do a burst with chipselect active-high. We need to do this to 1126 * meet the requirement of 74 clock cycles with both chipselect 1127 * and CMD (MOSI) high before CMD0 ... after the card has been 1128 * powered up to Vdd(min), and so is ready to take commands. 1129 * 1130 * Some cards are particularly needy of this (e.g. Viking "SD256") 1131 * while most others don't seem to care. 1132 * 1133 * Note that this is one of the places MMC/SD plays games with the 1134 * SPI protocol. Another is that when chipselect is released while 1135 * the card returns BUSY status, the clock must issue several cycles 1136 * with chipselect high before the card will stop driving its output. 1137 */ 1138 host->spi->mode |= SPI_CS_HIGH; 1139 if (spi_setup(host->spi) != 0) { 1140 /* Just warn; most cards work without it. */ 1141 dev_warn(&host->spi->dev, 1142 "can't change chip-select polarity\n"); 1143 host->spi->mode &= ~SPI_CS_HIGH; 1144 } else { 1145 mmc_spi_readbytes(host, 18); 1146 1147 host->spi->mode &= ~SPI_CS_HIGH; 1148 if (spi_setup(host->spi) != 0) { 1149 /* Wot, we can't get the same setup we had before? */ 1150 dev_err(&host->spi->dev, 1151 "can't restore chip-select polarity\n"); 1152 } 1153 } 1154 } 1155 1156 static char *mmc_powerstring(u8 power_mode) 1157 { 1158 switch (power_mode) { 1159 case MMC_POWER_OFF: return "off"; 1160 case MMC_POWER_UP: return "up"; 1161 case MMC_POWER_ON: return "on"; 1162 } 1163 return "?"; 1164 } 1165 1166 static void mmc_spi_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1167 { 1168 struct mmc_spi_host *host = mmc_priv(mmc); 1169 1170 if (host->power_mode != ios->power_mode) { 1171 int canpower; 1172 1173 canpower = host->pdata && host->pdata->setpower; 1174 1175 dev_dbg(&host->spi->dev, "mmc_spi: power %s (%d)%s\n", 1176 mmc_powerstring(ios->power_mode), 1177 ios->vdd, 1178 canpower ? ", can switch" : ""); 1179 1180 /* switch power on/off if possible, accounting for 1181 * max 250msec powerup time if needed. 1182 */ 1183 if (canpower) { 1184 switch (ios->power_mode) { 1185 case MMC_POWER_OFF: 1186 case MMC_POWER_UP: 1187 host->pdata->setpower(&host->spi->dev, 1188 ios->vdd); 1189 if (ios->power_mode == MMC_POWER_UP) 1190 msleep(host->powerup_msecs); 1191 } 1192 } 1193 1194 /* See 6.4.1 in the simplified SD card physical spec 2.0 */ 1195 if (ios->power_mode == MMC_POWER_ON) 1196 mmc_spi_initsequence(host); 1197 1198 /* If powering down, ground all card inputs to avoid power 1199 * delivery from data lines! On a shared SPI bus, this 1200 * will probably be temporary; 6.4.2 of the simplified SD 1201 * spec says this must last at least 1msec. 1202 * 1203 * - Clock low means CPOL 0, e.g. mode 0 1204 * - MOSI low comes from writing zero 1205 * - Chipselect is usually active low... 1206 */ 1207 if (canpower && ios->power_mode == MMC_POWER_OFF) { 1208 int mres; 1209 u8 nullbyte = 0; 1210 1211 host->spi->mode &= ~(SPI_CPOL|SPI_CPHA); 1212 mres = spi_setup(host->spi); 1213 if (mres < 0) 1214 dev_dbg(&host->spi->dev, 1215 "switch to SPI mode 0 failed\n"); 1216 1217 if (spi_write(host->spi, &nullbyte, 1) < 0) 1218 dev_dbg(&host->spi->dev, 1219 "put spi signals to low failed\n"); 1220 1221 /* 1222 * Now clock should be low due to spi mode 0; 1223 * MOSI should be low because of written 0x00; 1224 * chipselect should be low (it is active low) 1225 * power supply is off, so now MMC is off too! 1226 * 1227 * FIXME no, chipselect can be high since the 1228 * device is inactive and SPI_CS_HIGH is clear... 1229 */ 1230 msleep(10); 1231 if (mres == 0) { 1232 host->spi->mode |= (SPI_CPOL|SPI_CPHA); 1233 mres = spi_setup(host->spi); 1234 if (mres < 0) 1235 dev_dbg(&host->spi->dev, 1236 "switch back to SPI mode 3 failed\n"); 1237 } 1238 } 1239 1240 host->power_mode = ios->power_mode; 1241 } 1242 1243 if (host->spi->max_speed_hz != ios->clock && ios->clock != 0) { 1244 int status; 1245 1246 host->spi->max_speed_hz = ios->clock; 1247 status = spi_setup(host->spi); 1248 dev_dbg(&host->spi->dev, 1249 "mmc_spi: clock to %d Hz, %d\n", 1250 host->spi->max_speed_hz, status); 1251 } 1252 } 1253 1254 static const struct mmc_host_ops mmc_spi_ops = { 1255 .request = mmc_spi_request, 1256 .set_ios = mmc_spi_set_ios, 1257 .get_ro = mmc_gpio_get_ro, 1258 .get_cd = mmc_gpio_get_cd, 1259 }; 1260 1261 1262 /****************************************************************************/ 1263 1264 /* 1265 * SPI driver implementation 1266 */ 1267 1268 static irqreturn_t 1269 mmc_spi_detect_irq(int irq, void *mmc) 1270 { 1271 struct mmc_spi_host *host = mmc_priv(mmc); 1272 u16 delay_msec = max(host->pdata->detect_delay, (u16)100); 1273 1274 mmc_detect_change(mmc, msecs_to_jiffies(delay_msec)); 1275 return IRQ_HANDLED; 1276 } 1277 1278 static int mmc_spi_probe(struct spi_device *spi) 1279 { 1280 void *ones; 1281 struct mmc_host *mmc; 1282 struct mmc_spi_host *host; 1283 int status; 1284 bool has_ro = false; 1285 1286 /* We rely on full duplex transfers, mostly to reduce 1287 * per-transfer overheads (by making fewer transfers). 1288 */ 1289 if (spi->master->flags & SPI_MASTER_HALF_DUPLEX) 1290 return -EINVAL; 1291 1292 /* MMC and SD specs only seem to care that sampling is on the 1293 * rising edge ... meaning SPI modes 0 or 3. So either SPI mode 1294 * should be legit. We'll use mode 0 since the steady state is 0, 1295 * which is appropriate for hotplugging, unless the platform data 1296 * specify mode 3 (if hardware is not compatible to mode 0). 1297 */ 1298 if (spi->mode != SPI_MODE_3) 1299 spi->mode = SPI_MODE_0; 1300 spi->bits_per_word = 8; 1301 1302 status = spi_setup(spi); 1303 if (status < 0) { 1304 dev_dbg(&spi->dev, "needs SPI mode %02x, %d KHz; %d\n", 1305 spi->mode, spi->max_speed_hz / 1000, 1306 status); 1307 return status; 1308 } 1309 1310 /* We need a supply of ones to transmit. This is the only time 1311 * the CPU touches these, so cache coherency isn't a concern. 1312 * 1313 * NOTE if many systems use more than one MMC-over-SPI connector 1314 * it'd save some memory to share this. That's evidently rare. 1315 */ 1316 status = -ENOMEM; 1317 ones = kmalloc(MMC_SPI_BLOCKSIZE, GFP_KERNEL); 1318 if (!ones) 1319 goto nomem; 1320 memset(ones, 0xff, MMC_SPI_BLOCKSIZE); 1321 1322 mmc = mmc_alloc_host(sizeof(*host), &spi->dev); 1323 if (!mmc) 1324 goto nomem; 1325 1326 mmc->ops = &mmc_spi_ops; 1327 mmc->max_blk_size = MMC_SPI_BLOCKSIZE; 1328 mmc->max_segs = MMC_SPI_BLOCKSATONCE; 1329 mmc->max_req_size = MMC_SPI_BLOCKSATONCE * MMC_SPI_BLOCKSIZE; 1330 mmc->max_blk_count = MMC_SPI_BLOCKSATONCE; 1331 1332 mmc->caps = MMC_CAP_SPI; 1333 1334 /* SPI doesn't need the lowspeed device identification thing for 1335 * MMC or SD cards, since it never comes up in open drain mode. 1336 * That's good; some SPI masters can't handle very low speeds! 1337 * 1338 * However, low speed SDIO cards need not handle over 400 KHz; 1339 * that's the only reason not to use a few MHz for f_min (until 1340 * the upper layer reads the target frequency from the CSD). 1341 */ 1342 mmc->f_min = 400000; 1343 mmc->f_max = spi->max_speed_hz; 1344 1345 host = mmc_priv(mmc); 1346 host->mmc = mmc; 1347 host->spi = spi; 1348 1349 host->ones = ones; 1350 1351 /* Platform data is used to hook up things like card sensing 1352 * and power switching gpios. 1353 */ 1354 host->pdata = mmc_spi_get_pdata(spi); 1355 if (host->pdata) 1356 mmc->ocr_avail = host->pdata->ocr_mask; 1357 if (!mmc->ocr_avail) { 1358 dev_warn(&spi->dev, "ASSUMING 3.2-3.4 V slot power\n"); 1359 mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34; 1360 } 1361 if (host->pdata && host->pdata->setpower) { 1362 host->powerup_msecs = host->pdata->powerup_msecs; 1363 if (!host->powerup_msecs || host->powerup_msecs > 250) 1364 host->powerup_msecs = 250; 1365 } 1366 1367 dev_set_drvdata(&spi->dev, mmc); 1368 1369 /* preallocate dma buffers */ 1370 host->data = kmalloc(sizeof(*host->data), GFP_KERNEL); 1371 if (!host->data) 1372 goto fail_nobuf1; 1373 1374 if (spi->master->dev.parent->dma_mask) { 1375 struct device *dev = spi->master->dev.parent; 1376 1377 host->dma_dev = dev; 1378 host->ones_dma = dma_map_single(dev, ones, 1379 MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE); 1380 if (dma_mapping_error(dev, host->ones_dma)) 1381 goto fail_ones_dma; 1382 host->data_dma = dma_map_single(dev, host->data, 1383 sizeof(*host->data), DMA_BIDIRECTIONAL); 1384 if (dma_mapping_error(dev, host->data_dma)) 1385 goto fail_data_dma; 1386 1387 dma_sync_single_for_cpu(host->dma_dev, 1388 host->data_dma, sizeof(*host->data), 1389 DMA_BIDIRECTIONAL); 1390 } 1391 1392 /* setup message for status/busy readback */ 1393 spi_message_init(&host->readback); 1394 host->readback.is_dma_mapped = (host->dma_dev != NULL); 1395 1396 spi_message_add_tail(&host->status, &host->readback); 1397 host->status.tx_buf = host->ones; 1398 host->status.tx_dma = host->ones_dma; 1399 host->status.rx_buf = &host->data->status; 1400 host->status.rx_dma = host->data_dma + offsetof(struct scratch, status); 1401 host->status.cs_change = 1; 1402 1403 /* register card detect irq */ 1404 if (host->pdata && host->pdata->init) { 1405 status = host->pdata->init(&spi->dev, mmc_spi_detect_irq, mmc); 1406 if (status != 0) 1407 goto fail_glue_init; 1408 } 1409 1410 /* pass platform capabilities, if any */ 1411 if (host->pdata) { 1412 mmc->caps |= host->pdata->caps; 1413 mmc->caps2 |= host->pdata->caps2; 1414 } 1415 1416 status = mmc_add_host(mmc); 1417 if (status != 0) 1418 goto fail_add_host; 1419 1420 /* 1421 * Index 0 is card detect 1422 * Old boardfiles were specifying 1 ms as debounce 1423 */ 1424 status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000, NULL); 1425 if (status == -EPROBE_DEFER) 1426 goto fail_add_host; 1427 if (!status) { 1428 /* 1429 * The platform has a CD GPIO signal that may support 1430 * interrupts, so let mmc_gpiod_request_cd_irq() decide 1431 * if polling is needed or not. 1432 */ 1433 mmc->caps &= ~MMC_CAP_NEEDS_POLL; 1434 mmc_gpiod_request_cd_irq(mmc); 1435 } 1436 mmc_detect_change(mmc, 0); 1437 1438 /* Index 1 is write protect/read only */ 1439 status = mmc_gpiod_request_ro(mmc, NULL, 1, 0, NULL); 1440 if (status == -EPROBE_DEFER) 1441 goto fail_add_host; 1442 if (!status) 1443 has_ro = true; 1444 1445 dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n", 1446 dev_name(&mmc->class_dev), 1447 host->dma_dev ? "" : ", no DMA", 1448 has_ro ? "" : ", no WP", 1449 (host->pdata && host->pdata->setpower) 1450 ? "" : ", no poweroff", 1451 (mmc->caps & MMC_CAP_NEEDS_POLL) 1452 ? ", cd polling" : ""); 1453 return 0; 1454 1455 fail_add_host: 1456 mmc_remove_host(mmc); 1457 fail_glue_init: 1458 if (host->dma_dev) 1459 dma_unmap_single(host->dma_dev, host->data_dma, 1460 sizeof(*host->data), DMA_BIDIRECTIONAL); 1461 fail_data_dma: 1462 if (host->dma_dev) 1463 dma_unmap_single(host->dma_dev, host->ones_dma, 1464 MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE); 1465 fail_ones_dma: 1466 kfree(host->data); 1467 1468 fail_nobuf1: 1469 mmc_free_host(mmc); 1470 mmc_spi_put_pdata(spi); 1471 1472 nomem: 1473 kfree(ones); 1474 return status; 1475 } 1476 1477 1478 static int mmc_spi_remove(struct spi_device *spi) 1479 { 1480 struct mmc_host *mmc = dev_get_drvdata(&spi->dev); 1481 struct mmc_spi_host *host = mmc_priv(mmc); 1482 1483 /* prevent new mmc_detect_change() calls */ 1484 if (host->pdata && host->pdata->exit) 1485 host->pdata->exit(&spi->dev, mmc); 1486 1487 mmc_remove_host(mmc); 1488 1489 if (host->dma_dev) { 1490 dma_unmap_single(host->dma_dev, host->ones_dma, 1491 MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE); 1492 dma_unmap_single(host->dma_dev, host->data_dma, 1493 sizeof(*host->data), DMA_BIDIRECTIONAL); 1494 } 1495 1496 kfree(host->data); 1497 kfree(host->ones); 1498 1499 spi->max_speed_hz = mmc->f_max; 1500 mmc_free_host(mmc); 1501 mmc_spi_put_pdata(spi); 1502 return 0; 1503 } 1504 1505 static const struct of_device_id mmc_spi_of_match_table[] = { 1506 { .compatible = "mmc-spi-slot", }, 1507 {}, 1508 }; 1509 MODULE_DEVICE_TABLE(of, mmc_spi_of_match_table); 1510 1511 static struct spi_driver mmc_spi_driver = { 1512 .driver = { 1513 .name = "mmc_spi", 1514 .of_match_table = mmc_spi_of_match_table, 1515 }, 1516 .probe = mmc_spi_probe, 1517 .remove = mmc_spi_remove, 1518 }; 1519 1520 module_spi_driver(mmc_spi_driver); 1521 1522 MODULE_AUTHOR("Mike Lavender, David Brownell, Hans-Peter Nilsson, Jan Nikitenko"); 1523 MODULE_DESCRIPTION("SPI SD/MMC host driver"); 1524 MODULE_LICENSE("GPL"); 1525 MODULE_ALIAS("spi:mmc_spi"); 1526