1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * linux/drivers/mmc/core/mmc_ops.h 4 * 5 * Copyright 2006-2007 Pierre Ossman 6 */ 7 8 #include <linux/slab.h> 9 #include <linux/export.h> 10 #include <linux/types.h> 11 #include <linux/scatterlist.h> 12 13 #include <linux/mmc/host.h> 14 #include <linux/mmc/card.h> 15 #include <linux/mmc/mmc.h> 16 17 #include "core.h" 18 #include "card.h" 19 #include "host.h" 20 #include "mmc_ops.h" 21 22 #define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10min*/ 23 #define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */ 24 #define MMC_CACHE_FLUSH_TIMEOUT_MS (30 * 1000) /* 30s */ 25 26 static const u8 tuning_blk_pattern_4bit[] = { 27 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc, 28 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef, 29 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb, 30 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef, 31 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c, 32 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee, 33 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff, 34 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde, 35 }; 36 37 static const u8 tuning_blk_pattern_8bit[] = { 38 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00, 39 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc, 40 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff, 41 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff, 42 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd, 43 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb, 44 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff, 45 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff, 46 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 47 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 48 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 49 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 50 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 51 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 52 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 53 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 54 }; 55 56 int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries) 57 { 58 int err; 59 struct mmc_command cmd = {}; 60 61 cmd.opcode = MMC_SEND_STATUS; 62 if (!mmc_host_is_spi(card->host)) 63 cmd.arg = card->rca << 16; 64 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 65 66 err = mmc_wait_for_cmd(card->host, &cmd, retries); 67 if (err) 68 return err; 69 70 /* NOTE: callers are required to understand the difference 71 * between "native" and SPI format status words! 72 */ 73 if (status) 74 *status = cmd.resp[0]; 75 76 return 0; 77 } 78 EXPORT_SYMBOL_GPL(__mmc_send_status); 79 80 int mmc_send_status(struct mmc_card *card, u32 *status) 81 { 82 return __mmc_send_status(card, status, MMC_CMD_RETRIES); 83 } 84 EXPORT_SYMBOL_GPL(mmc_send_status); 85 86 static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card) 87 { 88 struct mmc_command cmd = {}; 89 90 cmd.opcode = MMC_SELECT_CARD; 91 92 if (card) { 93 cmd.arg = card->rca << 16; 94 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 95 } else { 96 cmd.arg = 0; 97 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC; 98 } 99 100 return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 101 } 102 103 int mmc_select_card(struct mmc_card *card) 104 { 105 106 return _mmc_select_card(card->host, card); 107 } 108 109 int mmc_deselect_cards(struct mmc_host *host) 110 { 111 return _mmc_select_card(host, NULL); 112 } 113 114 /* 115 * Write the value specified in the device tree or board code into the optional 116 * 16 bit Driver Stage Register. This can be used to tune raise/fall times and 117 * drive strength of the DAT and CMD outputs. The actual meaning of a given 118 * value is hardware dependant. 119 * The presence of the DSR register can be determined from the CSD register, 120 * bit 76. 121 */ 122 int mmc_set_dsr(struct mmc_host *host) 123 { 124 struct mmc_command cmd = {}; 125 126 cmd.opcode = MMC_SET_DSR; 127 128 cmd.arg = (host->dsr << 16) | 0xffff; 129 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC; 130 131 return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 132 } 133 134 int mmc_go_idle(struct mmc_host *host) 135 { 136 int err; 137 struct mmc_command cmd = {}; 138 139 /* 140 * Non-SPI hosts need to prevent chipselect going active during 141 * GO_IDLE; that would put chips into SPI mode. Remind them of 142 * that in case of hardware that won't pull up DAT3/nCS otherwise. 143 * 144 * SPI hosts ignore ios.chip_select; it's managed according to 145 * rules that must accommodate non-MMC slaves which this layer 146 * won't even know about. 147 */ 148 if (!mmc_host_is_spi(host)) { 149 mmc_set_chip_select(host, MMC_CS_HIGH); 150 mmc_delay(1); 151 } 152 153 cmd.opcode = MMC_GO_IDLE_STATE; 154 cmd.arg = 0; 155 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC; 156 157 err = mmc_wait_for_cmd(host, &cmd, 0); 158 159 mmc_delay(1); 160 161 if (!mmc_host_is_spi(host)) { 162 mmc_set_chip_select(host, MMC_CS_DONTCARE); 163 mmc_delay(1); 164 } 165 166 host->use_spi_crc = 0; 167 168 return err; 169 } 170 171 int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) 172 { 173 struct mmc_command cmd = {}; 174 int i, err = 0; 175 176 cmd.opcode = MMC_SEND_OP_COND; 177 cmd.arg = mmc_host_is_spi(host) ? 0 : ocr; 178 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR; 179 180 for (i = 100; i; i--) { 181 err = mmc_wait_for_cmd(host, &cmd, 0); 182 if (err) 183 break; 184 185 /* wait until reset completes */ 186 if (mmc_host_is_spi(host)) { 187 if (!(cmd.resp[0] & R1_SPI_IDLE)) 188 break; 189 } else { 190 if (cmd.resp[0] & MMC_CARD_BUSY) 191 break; 192 } 193 194 err = -ETIMEDOUT; 195 196 mmc_delay(10); 197 198 /* 199 * According to eMMC specification v5.1 section 6.4.3, we 200 * should issue CMD1 repeatedly in the idle state until 201 * the eMMC is ready. Otherwise some eMMC devices seem to enter 202 * the inactive mode after mmc_init_card() issued CMD0 when 203 * the eMMC device is busy. 204 */ 205 if (!ocr && !mmc_host_is_spi(host)) 206 cmd.arg = cmd.resp[0] | BIT(30); 207 } 208 209 if (rocr && !mmc_host_is_spi(host)) 210 *rocr = cmd.resp[0]; 211 212 return err; 213 } 214 215 int mmc_set_relative_addr(struct mmc_card *card) 216 { 217 struct mmc_command cmd = {}; 218 219 cmd.opcode = MMC_SET_RELATIVE_ADDR; 220 cmd.arg = card->rca << 16; 221 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 222 223 return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); 224 } 225 226 static int 227 mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode) 228 { 229 int err; 230 struct mmc_command cmd = {}; 231 232 cmd.opcode = opcode; 233 cmd.arg = arg; 234 cmd.flags = MMC_RSP_R2 | MMC_CMD_AC; 235 236 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 237 if (err) 238 return err; 239 240 memcpy(cxd, cmd.resp, sizeof(u32) * 4); 241 242 return 0; 243 } 244 245 /* 246 * NOTE: void *buf, caller for the buf is required to use DMA-capable 247 * buffer or on-stack buffer (with some overhead in callee). 248 */ 249 static int 250 mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host, 251 u32 opcode, void *buf, unsigned len) 252 { 253 struct mmc_request mrq = {}; 254 struct mmc_command cmd = {}; 255 struct mmc_data data = {}; 256 struct scatterlist sg; 257 258 mrq.cmd = &cmd; 259 mrq.data = &data; 260 261 cmd.opcode = opcode; 262 cmd.arg = 0; 263 264 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we 265 * rely on callers to never use this with "native" calls for reading 266 * CSD or CID. Native versions of those commands use the R2 type, 267 * not R1 plus a data block. 268 */ 269 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 270 271 data.blksz = len; 272 data.blocks = 1; 273 data.flags = MMC_DATA_READ; 274 data.sg = &sg; 275 data.sg_len = 1; 276 277 sg_init_one(&sg, buf, len); 278 279 if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) { 280 /* 281 * The spec states that CSR and CID accesses have a timeout 282 * of 64 clock cycles. 283 */ 284 data.timeout_ns = 0; 285 data.timeout_clks = 64; 286 } else 287 mmc_set_data_timeout(&data, card); 288 289 mmc_wait_for_req(host, &mrq); 290 291 if (cmd.error) 292 return cmd.error; 293 if (data.error) 294 return data.error; 295 296 return 0; 297 } 298 299 static int mmc_spi_send_csd(struct mmc_card *card, u32 *csd) 300 { 301 int ret, i; 302 __be32 *csd_tmp; 303 304 csd_tmp = kzalloc(16, GFP_KERNEL); 305 if (!csd_tmp) 306 return -ENOMEM; 307 308 ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16); 309 if (ret) 310 goto err; 311 312 for (i = 0; i < 4; i++) 313 csd[i] = be32_to_cpu(csd_tmp[i]); 314 315 err: 316 kfree(csd_tmp); 317 return ret; 318 } 319 320 int mmc_send_csd(struct mmc_card *card, u32 *csd) 321 { 322 if (mmc_host_is_spi(card->host)) 323 return mmc_spi_send_csd(card, csd); 324 325 return mmc_send_cxd_native(card->host, card->rca << 16, csd, 326 MMC_SEND_CSD); 327 } 328 329 static int mmc_spi_send_cid(struct mmc_host *host, u32 *cid) 330 { 331 int ret, i; 332 __be32 *cid_tmp; 333 334 cid_tmp = kzalloc(16, GFP_KERNEL); 335 if (!cid_tmp) 336 return -ENOMEM; 337 338 ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16); 339 if (ret) 340 goto err; 341 342 for (i = 0; i < 4; i++) 343 cid[i] = be32_to_cpu(cid_tmp[i]); 344 345 err: 346 kfree(cid_tmp); 347 return ret; 348 } 349 350 int mmc_send_cid(struct mmc_host *host, u32 *cid) 351 { 352 if (mmc_host_is_spi(host)) 353 return mmc_spi_send_cid(host, cid); 354 355 return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID); 356 } 357 358 int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd) 359 { 360 int err; 361 u8 *ext_csd; 362 363 if (!card || !new_ext_csd) 364 return -EINVAL; 365 366 if (!mmc_can_ext_csd(card)) 367 return -EOPNOTSUPP; 368 369 /* 370 * As the ext_csd is so large and mostly unused, we don't store the 371 * raw block in mmc_card. 372 */ 373 ext_csd = kzalloc(512, GFP_KERNEL); 374 if (!ext_csd) 375 return -ENOMEM; 376 377 err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd, 378 512); 379 if (err) 380 kfree(ext_csd); 381 else 382 *new_ext_csd = ext_csd; 383 384 return err; 385 } 386 EXPORT_SYMBOL_GPL(mmc_get_ext_csd); 387 388 int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp) 389 { 390 struct mmc_command cmd = {}; 391 int err; 392 393 cmd.opcode = MMC_SPI_READ_OCR; 394 cmd.arg = highcap ? (1 << 30) : 0; 395 cmd.flags = MMC_RSP_SPI_R3; 396 397 err = mmc_wait_for_cmd(host, &cmd, 0); 398 399 *ocrp = cmd.resp[1]; 400 return err; 401 } 402 403 int mmc_spi_set_crc(struct mmc_host *host, int use_crc) 404 { 405 struct mmc_command cmd = {}; 406 int err; 407 408 cmd.opcode = MMC_SPI_CRC_ON_OFF; 409 cmd.flags = MMC_RSP_SPI_R1; 410 cmd.arg = use_crc; 411 412 err = mmc_wait_for_cmd(host, &cmd, 0); 413 if (!err) 414 host->use_spi_crc = use_crc; 415 return err; 416 } 417 418 static int mmc_switch_status_error(struct mmc_host *host, u32 status) 419 { 420 if (mmc_host_is_spi(host)) { 421 if (status & R1_SPI_ILLEGAL_COMMAND) 422 return -EBADMSG; 423 } else { 424 if (R1_STATUS(status)) 425 pr_warn("%s: unexpected status %#x after switch\n", 426 mmc_hostname(host), status); 427 if (status & R1_SWITCH_ERROR) 428 return -EBADMSG; 429 } 430 return 0; 431 } 432 433 /* Caller must hold re-tuning */ 434 int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal) 435 { 436 u32 status; 437 int err; 438 439 err = mmc_send_status(card, &status); 440 if (!crc_err_fatal && err == -EILSEQ) 441 return 0; 442 if (err) 443 return err; 444 445 return mmc_switch_status_error(card->host, status); 446 } 447 448 int mmc_switch_status(struct mmc_card *card) 449 { 450 return __mmc_switch_status(card, true); 451 } 452 453 static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, 454 bool send_status, bool retry_crc_err) 455 { 456 struct mmc_host *host = card->host; 457 int err; 458 unsigned long timeout; 459 u32 status = 0; 460 bool expired = false; 461 bool busy = false; 462 463 /* 464 * In cases when not allowed to poll by using CMD13 or because we aren't 465 * capable of polling by using ->card_busy(), then rely on waiting the 466 * stated timeout to be sufficient. 467 */ 468 if (!send_status && !host->ops->card_busy) { 469 mmc_delay(timeout_ms); 470 return 0; 471 } 472 473 timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1; 474 do { 475 /* 476 * Due to the possibility of being preempted while polling, 477 * check the expiration time first. 478 */ 479 expired = time_after(jiffies, timeout); 480 481 if (host->ops->card_busy) { 482 busy = host->ops->card_busy(host); 483 } else { 484 err = mmc_send_status(card, &status); 485 if (retry_crc_err && err == -EILSEQ) { 486 busy = true; 487 } else if (err) { 488 return err; 489 } else { 490 err = mmc_switch_status_error(host, status); 491 if (err) 492 return err; 493 busy = R1_CURRENT_STATE(status) == R1_STATE_PRG; 494 } 495 } 496 497 /* Timeout if the device still remains busy. */ 498 if (expired && busy) { 499 pr_err("%s: Card stuck being busy! %s\n", 500 mmc_hostname(host), __func__); 501 return -ETIMEDOUT; 502 } 503 } while (busy); 504 505 return 0; 506 } 507 508 /** 509 * __mmc_switch - modify EXT_CSD register 510 * @card: the MMC card associated with the data transfer 511 * @set: cmd set values 512 * @index: EXT_CSD register index 513 * @value: value to program into EXT_CSD register 514 * @timeout_ms: timeout (ms) for operation performed by register write, 515 * timeout of zero implies maximum possible timeout 516 * @timing: new timing to change to 517 * @use_busy_signal: use the busy signal as response type 518 * @send_status: send status cmd to poll for busy 519 * @retry_crc_err: retry when CRC errors when polling with CMD13 for busy 520 * 521 * Modifies the EXT_CSD register for selected card. 522 */ 523 int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, 524 unsigned int timeout_ms, unsigned char timing, 525 bool use_busy_signal, bool send_status, bool retry_crc_err) 526 { 527 struct mmc_host *host = card->host; 528 int err; 529 struct mmc_command cmd = {}; 530 bool use_r1b_resp = use_busy_signal; 531 unsigned char old_timing = host->ios.timing; 532 533 mmc_retune_hold(host); 534 535 if (!timeout_ms) { 536 pr_warn("%s: unspecified timeout for CMD6 - use generic\n", 537 mmc_hostname(host)); 538 timeout_ms = card->ext_csd.generic_cmd6_time; 539 } 540 541 /* 542 * If the max_busy_timeout of the host is specified, make sure it's 543 * enough to fit the used timeout_ms. In case it's not, let's instruct 544 * the host to avoid HW busy detection, by converting to a R1 response 545 * instead of a R1B. Note, some hosts requires R1B, which also means 546 * they are on their own when it comes to deal with the busy timeout. 547 */ 548 if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout && 549 (timeout_ms > host->max_busy_timeout)) 550 use_r1b_resp = false; 551 552 cmd.opcode = MMC_SWITCH; 553 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | 554 (index << 16) | 555 (value << 8) | 556 set; 557 cmd.flags = MMC_CMD_AC; 558 if (use_r1b_resp) { 559 cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B; 560 cmd.busy_timeout = timeout_ms; 561 } else { 562 cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1; 563 } 564 565 if (index == EXT_CSD_SANITIZE_START) 566 cmd.sanitize_busy = true; 567 568 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 569 if (err) 570 goto out; 571 572 /* No need to check card status in case of unblocking command */ 573 if (!use_busy_signal) 574 goto out; 575 576 /*If SPI or used HW busy detection above, then we don't need to poll. */ 577 if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) || 578 mmc_host_is_spi(host)) 579 goto out_tim; 580 581 /* Let's try to poll to find out when the command is completed. */ 582 err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err); 583 if (err) 584 goto out; 585 586 out_tim: 587 /* Switch to new timing before check switch status. */ 588 if (timing) 589 mmc_set_timing(host, timing); 590 591 if (send_status) { 592 err = mmc_switch_status(card); 593 if (err && timing) 594 mmc_set_timing(host, old_timing); 595 } 596 out: 597 mmc_retune_release(host); 598 599 return err; 600 } 601 602 int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, 603 unsigned int timeout_ms) 604 { 605 return __mmc_switch(card, set, index, value, timeout_ms, 0, 606 true, true, false); 607 } 608 EXPORT_SYMBOL_GPL(mmc_switch); 609 610 int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error) 611 { 612 struct mmc_request mrq = {}; 613 struct mmc_command cmd = {}; 614 struct mmc_data data = {}; 615 struct scatterlist sg; 616 struct mmc_ios *ios = &host->ios; 617 const u8 *tuning_block_pattern; 618 int size, err = 0; 619 u8 *data_buf; 620 621 if (ios->bus_width == MMC_BUS_WIDTH_8) { 622 tuning_block_pattern = tuning_blk_pattern_8bit; 623 size = sizeof(tuning_blk_pattern_8bit); 624 } else if (ios->bus_width == MMC_BUS_WIDTH_4) { 625 tuning_block_pattern = tuning_blk_pattern_4bit; 626 size = sizeof(tuning_blk_pattern_4bit); 627 } else 628 return -EINVAL; 629 630 data_buf = kzalloc(size, GFP_KERNEL); 631 if (!data_buf) 632 return -ENOMEM; 633 634 mrq.cmd = &cmd; 635 mrq.data = &data; 636 637 cmd.opcode = opcode; 638 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 639 640 data.blksz = size; 641 data.blocks = 1; 642 data.flags = MMC_DATA_READ; 643 644 /* 645 * According to the tuning specs, Tuning process 646 * is normally shorter 40 executions of CMD19, 647 * and timeout value should be shorter than 150 ms 648 */ 649 data.timeout_ns = 150 * NSEC_PER_MSEC; 650 651 data.sg = &sg; 652 data.sg_len = 1; 653 sg_init_one(&sg, data_buf, size); 654 655 mmc_wait_for_req(host, &mrq); 656 657 if (cmd_error) 658 *cmd_error = cmd.error; 659 660 if (cmd.error) { 661 err = cmd.error; 662 goto out; 663 } 664 665 if (data.error) { 666 err = data.error; 667 goto out; 668 } 669 670 if (memcmp(data_buf, tuning_block_pattern, size)) 671 err = -EIO; 672 673 out: 674 kfree(data_buf); 675 return err; 676 } 677 EXPORT_SYMBOL_GPL(mmc_send_tuning); 678 679 int mmc_abort_tuning(struct mmc_host *host, u32 opcode) 680 { 681 struct mmc_command cmd = {}; 682 683 /* 684 * eMMC specification specifies that CMD12 can be used to stop a tuning 685 * command, but SD specification does not, so do nothing unless it is 686 * eMMC. 687 */ 688 if (opcode != MMC_SEND_TUNING_BLOCK_HS200) 689 return 0; 690 691 cmd.opcode = MMC_STOP_TRANSMISSION; 692 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 693 694 /* 695 * For drivers that override R1 to R1b, set an arbitrary timeout based 696 * on the tuning timeout i.e. 150ms. 697 */ 698 cmd.busy_timeout = 150; 699 700 return mmc_wait_for_cmd(host, &cmd, 0); 701 } 702 EXPORT_SYMBOL_GPL(mmc_abort_tuning); 703 704 static int 705 mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode, 706 u8 len) 707 { 708 struct mmc_request mrq = {}; 709 struct mmc_command cmd = {}; 710 struct mmc_data data = {}; 711 struct scatterlist sg; 712 u8 *data_buf; 713 u8 *test_buf; 714 int i, err; 715 static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 }; 716 static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 }; 717 718 /* dma onto stack is unsafe/nonportable, but callers to this 719 * routine normally provide temporary on-stack buffers ... 720 */ 721 data_buf = kmalloc(len, GFP_KERNEL); 722 if (!data_buf) 723 return -ENOMEM; 724 725 if (len == 8) 726 test_buf = testdata_8bit; 727 else if (len == 4) 728 test_buf = testdata_4bit; 729 else { 730 pr_err("%s: Invalid bus_width %d\n", 731 mmc_hostname(host), len); 732 kfree(data_buf); 733 return -EINVAL; 734 } 735 736 if (opcode == MMC_BUS_TEST_W) 737 memcpy(data_buf, test_buf, len); 738 739 mrq.cmd = &cmd; 740 mrq.data = &data; 741 cmd.opcode = opcode; 742 cmd.arg = 0; 743 744 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we 745 * rely on callers to never use this with "native" calls for reading 746 * CSD or CID. Native versions of those commands use the R2 type, 747 * not R1 plus a data block. 748 */ 749 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 750 751 data.blksz = len; 752 data.blocks = 1; 753 if (opcode == MMC_BUS_TEST_R) 754 data.flags = MMC_DATA_READ; 755 else 756 data.flags = MMC_DATA_WRITE; 757 758 data.sg = &sg; 759 data.sg_len = 1; 760 mmc_set_data_timeout(&data, card); 761 sg_init_one(&sg, data_buf, len); 762 mmc_wait_for_req(host, &mrq); 763 err = 0; 764 if (opcode == MMC_BUS_TEST_R) { 765 for (i = 0; i < len / 4; i++) 766 if ((test_buf[i] ^ data_buf[i]) != 0xff) { 767 err = -EIO; 768 break; 769 } 770 } 771 kfree(data_buf); 772 773 if (cmd.error) 774 return cmd.error; 775 if (data.error) 776 return data.error; 777 778 return err; 779 } 780 781 int mmc_bus_test(struct mmc_card *card, u8 bus_width) 782 { 783 int width; 784 785 if (bus_width == MMC_BUS_WIDTH_8) 786 width = 8; 787 else if (bus_width == MMC_BUS_WIDTH_4) 788 width = 4; 789 else if (bus_width == MMC_BUS_WIDTH_1) 790 return 0; /* no need for test */ 791 else 792 return -EINVAL; 793 794 /* 795 * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there 796 * is a problem. This improves chances that the test will work. 797 */ 798 mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width); 799 return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width); 800 } 801 802 static int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status) 803 { 804 struct mmc_command cmd = {}; 805 unsigned int opcode; 806 int err; 807 808 opcode = card->ext_csd.hpi_cmd; 809 if (opcode == MMC_STOP_TRANSMISSION) 810 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; 811 else if (opcode == MMC_SEND_STATUS) 812 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 813 814 cmd.opcode = opcode; 815 cmd.arg = card->rca << 16 | 1; 816 817 err = mmc_wait_for_cmd(card->host, &cmd, 0); 818 if (err) { 819 pr_warn("%s: error %d interrupting operation. " 820 "HPI command response %#x\n", mmc_hostname(card->host), 821 err, cmd.resp[0]); 822 return err; 823 } 824 if (status) 825 *status = cmd.resp[0]; 826 827 return 0; 828 } 829 830 /** 831 * mmc_interrupt_hpi - Issue for High priority Interrupt 832 * @card: the MMC card associated with the HPI transfer 833 * 834 * Issued High Priority Interrupt, and check for card status 835 * until out-of prg-state. 836 */ 837 int mmc_interrupt_hpi(struct mmc_card *card) 838 { 839 int err; 840 u32 status; 841 unsigned long prg_wait; 842 843 if (!card->ext_csd.hpi_en) { 844 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host)); 845 return 1; 846 } 847 848 err = mmc_send_status(card, &status); 849 if (err) { 850 pr_err("%s: Get card status fail\n", mmc_hostname(card->host)); 851 goto out; 852 } 853 854 switch (R1_CURRENT_STATE(status)) { 855 case R1_STATE_IDLE: 856 case R1_STATE_READY: 857 case R1_STATE_STBY: 858 case R1_STATE_TRAN: 859 /* 860 * In idle and transfer states, HPI is not needed and the caller 861 * can issue the next intended command immediately 862 */ 863 goto out; 864 case R1_STATE_PRG: 865 break; 866 default: 867 /* In all other states, it's illegal to issue HPI */ 868 pr_debug("%s: HPI cannot be sent. Card state=%d\n", 869 mmc_hostname(card->host), R1_CURRENT_STATE(status)); 870 err = -EINVAL; 871 goto out; 872 } 873 874 err = mmc_send_hpi_cmd(card, &status); 875 if (err) 876 goto out; 877 878 prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time); 879 do { 880 err = mmc_send_status(card, &status); 881 882 if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN) 883 break; 884 if (time_after(jiffies, prg_wait)) 885 err = -ETIMEDOUT; 886 } while (!err); 887 888 out: 889 return err; 890 } 891 892 int mmc_can_ext_csd(struct mmc_card *card) 893 { 894 return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3); 895 } 896 897 static int mmc_read_bkops_status(struct mmc_card *card) 898 { 899 int err; 900 u8 *ext_csd; 901 902 err = mmc_get_ext_csd(card, &ext_csd); 903 if (err) 904 return err; 905 906 card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS]; 907 card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS]; 908 kfree(ext_csd); 909 return 0; 910 } 911 912 /** 913 * mmc_run_bkops - Run BKOPS for supported cards 914 * @card: MMC card to run BKOPS for 915 * 916 * Run background operations synchronously for cards having manual BKOPS 917 * enabled and in case it reports urgent BKOPS level. 918 */ 919 void mmc_run_bkops(struct mmc_card *card) 920 { 921 int err; 922 923 if (!card->ext_csd.man_bkops_en) 924 return; 925 926 err = mmc_read_bkops_status(card); 927 if (err) { 928 pr_err("%s: Failed to read bkops status: %d\n", 929 mmc_hostname(card->host), err); 930 return; 931 } 932 933 if (!card->ext_csd.raw_bkops_status || 934 card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2) 935 return; 936 937 mmc_retune_hold(card->host); 938 939 /* 940 * For urgent BKOPS status, LEVEL_2 and higher, let's execute 941 * synchronously. Future wise, we may consider to start BKOPS, for less 942 * urgent levels by using an asynchronous background task, when idle. 943 */ 944 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 945 EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS); 946 if (err) 947 pr_warn("%s: Error %d starting bkops\n", 948 mmc_hostname(card->host), err); 949 950 mmc_retune_release(card->host); 951 } 952 EXPORT_SYMBOL(mmc_run_bkops); 953 954 /* 955 * Flush the cache to the non-volatile storage. 956 */ 957 int mmc_flush_cache(struct mmc_card *card) 958 { 959 int err = 0; 960 961 if (mmc_card_mmc(card) && 962 (card->ext_csd.cache_size > 0) && 963 (card->ext_csd.cache_ctrl & 1)) { 964 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 965 EXT_CSD_FLUSH_CACHE, 1, 966 MMC_CACHE_FLUSH_TIMEOUT_MS); 967 if (err) 968 pr_err("%s: cache flush error %d\n", 969 mmc_hostname(card->host), err); 970 } 971 972 return err; 973 } 974 EXPORT_SYMBOL(mmc_flush_cache); 975 976 static int mmc_cmdq_switch(struct mmc_card *card, bool enable) 977 { 978 u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0; 979 int err; 980 981 if (!card->ext_csd.cmdq_support) 982 return -EOPNOTSUPP; 983 984 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN, 985 val, card->ext_csd.generic_cmd6_time); 986 if (!err) 987 card->ext_csd.cmdq_en = enable; 988 989 return err; 990 } 991 992 int mmc_cmdq_enable(struct mmc_card *card) 993 { 994 return mmc_cmdq_switch(card, true); 995 } 996 EXPORT_SYMBOL_GPL(mmc_cmdq_enable); 997 998 int mmc_cmdq_disable(struct mmc_card *card) 999 { 1000 return mmc_cmdq_switch(card, false); 1001 } 1002 EXPORT_SYMBOL_GPL(mmc_cmdq_disable); 1003