1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * linux/drivers/mmc/core/mmc_ops.h 4 * 5 * Copyright 2006-2007 Pierre Ossman 6 */ 7 8 #include <linux/slab.h> 9 #include <linux/export.h> 10 #include <linux/types.h> 11 #include <linux/scatterlist.h> 12 13 #include <linux/mmc/host.h> 14 #include <linux/mmc/card.h> 15 #include <linux/mmc/mmc.h> 16 17 #include "core.h" 18 #include "card.h" 19 #include "host.h" 20 #include "mmc_ops.h" 21 22 #define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */ 23 #define MMC_CACHE_FLUSH_TIMEOUT_MS (30 * 1000) /* 30s */ 24 #define MMC_SANITIZE_TIMEOUT_MS (240 * 1000) /* 240s */ 25 26 static const u8 tuning_blk_pattern_4bit[] = { 27 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc, 28 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef, 29 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb, 30 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef, 31 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c, 32 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee, 33 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff, 34 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde, 35 }; 36 37 static const u8 tuning_blk_pattern_8bit[] = { 38 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00, 39 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc, 40 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff, 41 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff, 42 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd, 43 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb, 44 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff, 45 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff, 46 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 47 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 48 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 49 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 50 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 51 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 52 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 53 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 54 }; 55 56 int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries) 57 { 58 int err; 59 struct mmc_command cmd = {}; 60 61 cmd.opcode = MMC_SEND_STATUS; 62 if (!mmc_host_is_spi(card->host)) 63 cmd.arg = card->rca << 16; 64 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 65 66 err = mmc_wait_for_cmd(card->host, &cmd, retries); 67 if (err) 68 return err; 69 70 /* NOTE: callers are required to understand the difference 71 * between "native" and SPI format status words! 72 */ 73 if (status) 74 *status = cmd.resp[0]; 75 76 return 0; 77 } 78 EXPORT_SYMBOL_GPL(__mmc_send_status); 79 80 int mmc_send_status(struct mmc_card *card, u32 *status) 81 { 82 return __mmc_send_status(card, status, MMC_CMD_RETRIES); 83 } 84 EXPORT_SYMBOL_GPL(mmc_send_status); 85 86 static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card) 87 { 88 struct mmc_command cmd = {}; 89 90 cmd.opcode = MMC_SELECT_CARD; 91 92 if (card) { 93 cmd.arg = card->rca << 16; 94 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 95 } else { 96 cmd.arg = 0; 97 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC; 98 } 99 100 return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 101 } 102 103 int mmc_select_card(struct mmc_card *card) 104 { 105 106 return _mmc_select_card(card->host, card); 107 } 108 109 int mmc_deselect_cards(struct mmc_host *host) 110 { 111 return _mmc_select_card(host, NULL); 112 } 113 114 /* 115 * Write the value specified in the device tree or board code into the optional 116 * 16 bit Driver Stage Register. This can be used to tune raise/fall times and 117 * drive strength of the DAT and CMD outputs. The actual meaning of a given 118 * value is hardware dependant. 119 * The presence of the DSR register can be determined from the CSD register, 120 * bit 76. 121 */ 122 int mmc_set_dsr(struct mmc_host *host) 123 { 124 struct mmc_command cmd = {}; 125 126 cmd.opcode = MMC_SET_DSR; 127 128 cmd.arg = (host->dsr << 16) | 0xffff; 129 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC; 130 131 return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 132 } 133 134 int mmc_go_idle(struct mmc_host *host) 135 { 136 int err; 137 struct mmc_command cmd = {}; 138 139 /* 140 * Non-SPI hosts need to prevent chipselect going active during 141 * GO_IDLE; that would put chips into SPI mode. Remind them of 142 * that in case of hardware that won't pull up DAT3/nCS otherwise. 143 * 144 * SPI hosts ignore ios.chip_select; it's managed according to 145 * rules that must accommodate non-MMC slaves which this layer 146 * won't even know about. 147 */ 148 if (!mmc_host_is_spi(host)) { 149 mmc_set_chip_select(host, MMC_CS_HIGH); 150 mmc_delay(1); 151 } 152 153 cmd.opcode = MMC_GO_IDLE_STATE; 154 cmd.arg = 0; 155 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC; 156 157 err = mmc_wait_for_cmd(host, &cmd, 0); 158 159 mmc_delay(1); 160 161 if (!mmc_host_is_spi(host)) { 162 mmc_set_chip_select(host, MMC_CS_DONTCARE); 163 mmc_delay(1); 164 } 165 166 host->use_spi_crc = 0; 167 168 return err; 169 } 170 171 int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) 172 { 173 struct mmc_command cmd = {}; 174 int i, err = 0; 175 176 cmd.opcode = MMC_SEND_OP_COND; 177 cmd.arg = mmc_host_is_spi(host) ? 0 : ocr; 178 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR; 179 180 for (i = 100; i; i--) { 181 err = mmc_wait_for_cmd(host, &cmd, 0); 182 if (err) 183 break; 184 185 /* wait until reset completes */ 186 if (mmc_host_is_spi(host)) { 187 if (!(cmd.resp[0] & R1_SPI_IDLE)) 188 break; 189 } else { 190 if (cmd.resp[0] & MMC_CARD_BUSY) 191 break; 192 } 193 194 err = -ETIMEDOUT; 195 196 mmc_delay(10); 197 198 /* 199 * According to eMMC specification v5.1 section 6.4.3, we 200 * should issue CMD1 repeatedly in the idle state until 201 * the eMMC is ready. Otherwise some eMMC devices seem to enter 202 * the inactive mode after mmc_init_card() issued CMD0 when 203 * the eMMC device is busy. 204 */ 205 if (!ocr && !mmc_host_is_spi(host)) 206 cmd.arg = cmd.resp[0] | BIT(30); 207 } 208 209 if (rocr && !mmc_host_is_spi(host)) 210 *rocr = cmd.resp[0]; 211 212 return err; 213 } 214 215 int mmc_set_relative_addr(struct mmc_card *card) 216 { 217 struct mmc_command cmd = {}; 218 219 cmd.opcode = MMC_SET_RELATIVE_ADDR; 220 cmd.arg = card->rca << 16; 221 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 222 223 return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); 224 } 225 226 static int 227 mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode) 228 { 229 int err; 230 struct mmc_command cmd = {}; 231 232 cmd.opcode = opcode; 233 cmd.arg = arg; 234 cmd.flags = MMC_RSP_R2 | MMC_CMD_AC; 235 236 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 237 if (err) 238 return err; 239 240 memcpy(cxd, cmd.resp, sizeof(u32) * 4); 241 242 return 0; 243 } 244 245 /* 246 * NOTE: void *buf, caller for the buf is required to use DMA-capable 247 * buffer or on-stack buffer (with some overhead in callee). 248 */ 249 static int 250 mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host, 251 u32 opcode, void *buf, unsigned len) 252 { 253 struct mmc_request mrq = {}; 254 struct mmc_command cmd = {}; 255 struct mmc_data data = {}; 256 struct scatterlist sg; 257 258 mrq.cmd = &cmd; 259 mrq.data = &data; 260 261 cmd.opcode = opcode; 262 cmd.arg = 0; 263 264 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we 265 * rely on callers to never use this with "native" calls for reading 266 * CSD or CID. Native versions of those commands use the R2 type, 267 * not R1 plus a data block. 268 */ 269 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 270 271 data.blksz = len; 272 data.blocks = 1; 273 data.flags = MMC_DATA_READ; 274 data.sg = &sg; 275 data.sg_len = 1; 276 277 sg_init_one(&sg, buf, len); 278 279 if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) { 280 /* 281 * The spec states that CSR and CID accesses have a timeout 282 * of 64 clock cycles. 283 */ 284 data.timeout_ns = 0; 285 data.timeout_clks = 64; 286 } else 287 mmc_set_data_timeout(&data, card); 288 289 mmc_wait_for_req(host, &mrq); 290 291 if (cmd.error) 292 return cmd.error; 293 if (data.error) 294 return data.error; 295 296 return 0; 297 } 298 299 static int mmc_spi_send_cxd(struct mmc_host *host, u32 *cxd, u32 opcode) 300 { 301 int ret, i; 302 __be32 *cxd_tmp; 303 304 cxd_tmp = kzalloc(16, GFP_KERNEL); 305 if (!cxd_tmp) 306 return -ENOMEM; 307 308 ret = mmc_send_cxd_data(NULL, host, opcode, cxd_tmp, 16); 309 if (ret) 310 goto err; 311 312 for (i = 0; i < 4; i++) 313 cxd[i] = be32_to_cpu(cxd_tmp[i]); 314 315 err: 316 kfree(cxd_tmp); 317 return ret; 318 } 319 320 int mmc_send_csd(struct mmc_card *card, u32 *csd) 321 { 322 if (mmc_host_is_spi(card->host)) 323 return mmc_spi_send_cxd(card->host, csd, MMC_SEND_CSD); 324 325 return mmc_send_cxd_native(card->host, card->rca << 16, csd, 326 MMC_SEND_CSD); 327 } 328 329 int mmc_send_cid(struct mmc_host *host, u32 *cid) 330 { 331 if (mmc_host_is_spi(host)) 332 return mmc_spi_send_cxd(host, cid, MMC_SEND_CID); 333 334 return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID); 335 } 336 337 int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd) 338 { 339 int err; 340 u8 *ext_csd; 341 342 if (!card || !new_ext_csd) 343 return -EINVAL; 344 345 if (!mmc_can_ext_csd(card)) 346 return -EOPNOTSUPP; 347 348 /* 349 * As the ext_csd is so large and mostly unused, we don't store the 350 * raw block in mmc_card. 351 */ 352 ext_csd = kzalloc(512, GFP_KERNEL); 353 if (!ext_csd) 354 return -ENOMEM; 355 356 err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd, 357 512); 358 if (err) 359 kfree(ext_csd); 360 else 361 *new_ext_csd = ext_csd; 362 363 return err; 364 } 365 EXPORT_SYMBOL_GPL(mmc_get_ext_csd); 366 367 int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp) 368 { 369 struct mmc_command cmd = {}; 370 int err; 371 372 cmd.opcode = MMC_SPI_READ_OCR; 373 cmd.arg = highcap ? (1 << 30) : 0; 374 cmd.flags = MMC_RSP_SPI_R3; 375 376 err = mmc_wait_for_cmd(host, &cmd, 0); 377 378 *ocrp = cmd.resp[1]; 379 return err; 380 } 381 382 int mmc_spi_set_crc(struct mmc_host *host, int use_crc) 383 { 384 struct mmc_command cmd = {}; 385 int err; 386 387 cmd.opcode = MMC_SPI_CRC_ON_OFF; 388 cmd.flags = MMC_RSP_SPI_R1; 389 cmd.arg = use_crc; 390 391 err = mmc_wait_for_cmd(host, &cmd, 0); 392 if (!err) 393 host->use_spi_crc = use_crc; 394 return err; 395 } 396 397 static int mmc_switch_status_error(struct mmc_host *host, u32 status) 398 { 399 if (mmc_host_is_spi(host)) { 400 if (status & R1_SPI_ILLEGAL_COMMAND) 401 return -EBADMSG; 402 } else { 403 if (R1_STATUS(status)) 404 pr_warn("%s: unexpected status %#x after switch\n", 405 mmc_hostname(host), status); 406 if (status & R1_SWITCH_ERROR) 407 return -EBADMSG; 408 } 409 return 0; 410 } 411 412 /* Caller must hold re-tuning */ 413 int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal) 414 { 415 u32 status; 416 int err; 417 418 err = mmc_send_status(card, &status); 419 if (!crc_err_fatal && err == -EILSEQ) 420 return 0; 421 if (err) 422 return err; 423 424 return mmc_switch_status_error(card->host, status); 425 } 426 427 static int mmc_busy_status(struct mmc_card *card, bool retry_crc_err, 428 enum mmc_busy_cmd busy_cmd, bool *busy) 429 { 430 struct mmc_host *host = card->host; 431 u32 status = 0; 432 int err; 433 434 if (host->ops->card_busy) { 435 *busy = host->ops->card_busy(host); 436 return 0; 437 } 438 439 err = mmc_send_status(card, &status); 440 if (retry_crc_err && err == -EILSEQ) { 441 *busy = true; 442 return 0; 443 } 444 if (err) 445 return err; 446 447 switch (busy_cmd) { 448 case MMC_BUSY_CMD6: 449 err = mmc_switch_status_error(card->host, status); 450 break; 451 case MMC_BUSY_ERASE: 452 err = R1_STATUS(status) ? -EIO : 0; 453 break; 454 case MMC_BUSY_HPI: 455 break; 456 default: 457 err = -EINVAL; 458 } 459 460 if (err) 461 return err; 462 463 *busy = !mmc_ready_for_data(status); 464 return 0; 465 } 466 467 static int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, 468 bool retry_crc_err, enum mmc_busy_cmd busy_cmd) 469 { 470 struct mmc_host *host = card->host; 471 int err; 472 unsigned long timeout; 473 unsigned int udelay = 32, udelay_max = 32768; 474 bool expired = false; 475 bool busy = false; 476 477 timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1; 478 do { 479 /* 480 * Due to the possibility of being preempted while polling, 481 * check the expiration time first. 482 */ 483 expired = time_after(jiffies, timeout); 484 485 err = mmc_busy_status(card, retry_crc_err, busy_cmd, &busy); 486 if (err) 487 return err; 488 489 /* Timeout if the device still remains busy. */ 490 if (expired && busy) { 491 pr_err("%s: Card stuck being busy! %s\n", 492 mmc_hostname(host), __func__); 493 return -ETIMEDOUT; 494 } 495 496 /* Throttle the polling rate to avoid hogging the CPU. */ 497 if (busy) { 498 usleep_range(udelay, udelay * 2); 499 if (udelay < udelay_max) 500 udelay *= 2; 501 } 502 } while (busy); 503 504 return 0; 505 } 506 507 int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, 508 enum mmc_busy_cmd busy_cmd) 509 { 510 return __mmc_poll_for_busy(card, timeout_ms, false, busy_cmd); 511 } 512 513 bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd, 514 unsigned int timeout_ms) 515 { 516 /* 517 * If the max_busy_timeout of the host is specified, make sure it's 518 * enough to fit the used timeout_ms. In case it's not, let's instruct 519 * the host to avoid HW busy detection, by converting to a R1 response 520 * instead of a R1B. Note, some hosts requires R1B, which also means 521 * they are on their own when it comes to deal with the busy timeout. 522 */ 523 if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout && 524 (timeout_ms > host->max_busy_timeout)) { 525 cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1 | MMC_RSP_R1; 526 return false; 527 } 528 529 cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1B | MMC_RSP_R1B; 530 cmd->busy_timeout = timeout_ms; 531 return true; 532 } 533 534 /** 535 * __mmc_switch - modify EXT_CSD register 536 * @card: the MMC card associated with the data transfer 537 * @set: cmd set values 538 * @index: EXT_CSD register index 539 * @value: value to program into EXT_CSD register 540 * @timeout_ms: timeout (ms) for operation performed by register write, 541 * timeout of zero implies maximum possible timeout 542 * @timing: new timing to change to 543 * @send_status: send status cmd to poll for busy 544 * @retry_crc_err: retry when CRC errors when polling with CMD13 for busy 545 * @retries: number of retries 546 * 547 * Modifies the EXT_CSD register for selected card. 548 */ 549 int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, 550 unsigned int timeout_ms, unsigned char timing, 551 bool send_status, bool retry_crc_err, unsigned int retries) 552 { 553 struct mmc_host *host = card->host; 554 int err; 555 struct mmc_command cmd = {}; 556 bool use_r1b_resp; 557 unsigned char old_timing = host->ios.timing; 558 559 mmc_retune_hold(host); 560 561 if (!timeout_ms) { 562 pr_warn("%s: unspecified timeout for CMD6 - use generic\n", 563 mmc_hostname(host)); 564 timeout_ms = card->ext_csd.generic_cmd6_time; 565 } 566 567 cmd.opcode = MMC_SWITCH; 568 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | 569 (index << 16) | 570 (value << 8) | 571 set; 572 use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, timeout_ms); 573 574 err = mmc_wait_for_cmd(host, &cmd, retries); 575 if (err) 576 goto out; 577 578 /*If SPI or used HW busy detection above, then we don't need to poll. */ 579 if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) || 580 mmc_host_is_spi(host)) 581 goto out_tim; 582 583 /* 584 * If the host doesn't support HW polling via the ->card_busy() ops and 585 * when it's not allowed to poll by using CMD13, then we need to rely on 586 * waiting the stated timeout to be sufficient. 587 */ 588 if (!send_status && !host->ops->card_busy) { 589 mmc_delay(timeout_ms); 590 goto out_tim; 591 } 592 593 /* Let's try to poll to find out when the command is completed. */ 594 err = __mmc_poll_for_busy(card, timeout_ms, retry_crc_err, 595 MMC_BUSY_CMD6); 596 if (err) 597 goto out; 598 599 out_tim: 600 /* Switch to new timing before check switch status. */ 601 if (timing) 602 mmc_set_timing(host, timing); 603 604 if (send_status) { 605 err = mmc_switch_status(card, true); 606 if (err && timing) 607 mmc_set_timing(host, old_timing); 608 } 609 out: 610 mmc_retune_release(host); 611 612 return err; 613 } 614 615 int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, 616 unsigned int timeout_ms) 617 { 618 return __mmc_switch(card, set, index, value, timeout_ms, 0, 619 true, false, MMC_CMD_RETRIES); 620 } 621 EXPORT_SYMBOL_GPL(mmc_switch); 622 623 int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error) 624 { 625 struct mmc_request mrq = {}; 626 struct mmc_command cmd = {}; 627 struct mmc_data data = {}; 628 struct scatterlist sg; 629 struct mmc_ios *ios = &host->ios; 630 const u8 *tuning_block_pattern; 631 int size, err = 0; 632 u8 *data_buf; 633 634 if (ios->bus_width == MMC_BUS_WIDTH_8) { 635 tuning_block_pattern = tuning_blk_pattern_8bit; 636 size = sizeof(tuning_blk_pattern_8bit); 637 } else if (ios->bus_width == MMC_BUS_WIDTH_4) { 638 tuning_block_pattern = tuning_blk_pattern_4bit; 639 size = sizeof(tuning_blk_pattern_4bit); 640 } else 641 return -EINVAL; 642 643 data_buf = kzalloc(size, GFP_KERNEL); 644 if (!data_buf) 645 return -ENOMEM; 646 647 mrq.cmd = &cmd; 648 mrq.data = &data; 649 650 cmd.opcode = opcode; 651 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 652 653 data.blksz = size; 654 data.blocks = 1; 655 data.flags = MMC_DATA_READ; 656 657 /* 658 * According to the tuning specs, Tuning process 659 * is normally shorter 40 executions of CMD19, 660 * and timeout value should be shorter than 150 ms 661 */ 662 data.timeout_ns = 150 * NSEC_PER_MSEC; 663 664 data.sg = &sg; 665 data.sg_len = 1; 666 sg_init_one(&sg, data_buf, size); 667 668 mmc_wait_for_req(host, &mrq); 669 670 if (cmd_error) 671 *cmd_error = cmd.error; 672 673 if (cmd.error) { 674 err = cmd.error; 675 goto out; 676 } 677 678 if (data.error) { 679 err = data.error; 680 goto out; 681 } 682 683 if (memcmp(data_buf, tuning_block_pattern, size)) 684 err = -EIO; 685 686 out: 687 kfree(data_buf); 688 return err; 689 } 690 EXPORT_SYMBOL_GPL(mmc_send_tuning); 691 692 int mmc_abort_tuning(struct mmc_host *host, u32 opcode) 693 { 694 struct mmc_command cmd = {}; 695 696 /* 697 * eMMC specification specifies that CMD12 can be used to stop a tuning 698 * command, but SD specification does not, so do nothing unless it is 699 * eMMC. 700 */ 701 if (opcode != MMC_SEND_TUNING_BLOCK_HS200) 702 return 0; 703 704 cmd.opcode = MMC_STOP_TRANSMISSION; 705 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 706 707 /* 708 * For drivers that override R1 to R1b, set an arbitrary timeout based 709 * on the tuning timeout i.e. 150ms. 710 */ 711 cmd.busy_timeout = 150; 712 713 return mmc_wait_for_cmd(host, &cmd, 0); 714 } 715 EXPORT_SYMBOL_GPL(mmc_abort_tuning); 716 717 static int 718 mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode, 719 u8 len) 720 { 721 struct mmc_request mrq = {}; 722 struct mmc_command cmd = {}; 723 struct mmc_data data = {}; 724 struct scatterlist sg; 725 u8 *data_buf; 726 u8 *test_buf; 727 int i, err; 728 static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 }; 729 static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 }; 730 731 /* dma onto stack is unsafe/nonportable, but callers to this 732 * routine normally provide temporary on-stack buffers ... 733 */ 734 data_buf = kmalloc(len, GFP_KERNEL); 735 if (!data_buf) 736 return -ENOMEM; 737 738 if (len == 8) 739 test_buf = testdata_8bit; 740 else if (len == 4) 741 test_buf = testdata_4bit; 742 else { 743 pr_err("%s: Invalid bus_width %d\n", 744 mmc_hostname(host), len); 745 kfree(data_buf); 746 return -EINVAL; 747 } 748 749 if (opcode == MMC_BUS_TEST_W) 750 memcpy(data_buf, test_buf, len); 751 752 mrq.cmd = &cmd; 753 mrq.data = &data; 754 cmd.opcode = opcode; 755 cmd.arg = 0; 756 757 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we 758 * rely on callers to never use this with "native" calls for reading 759 * CSD or CID. Native versions of those commands use the R2 type, 760 * not R1 plus a data block. 761 */ 762 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 763 764 data.blksz = len; 765 data.blocks = 1; 766 if (opcode == MMC_BUS_TEST_R) 767 data.flags = MMC_DATA_READ; 768 else 769 data.flags = MMC_DATA_WRITE; 770 771 data.sg = &sg; 772 data.sg_len = 1; 773 mmc_set_data_timeout(&data, card); 774 sg_init_one(&sg, data_buf, len); 775 mmc_wait_for_req(host, &mrq); 776 err = 0; 777 if (opcode == MMC_BUS_TEST_R) { 778 for (i = 0; i < len / 4; i++) 779 if ((test_buf[i] ^ data_buf[i]) != 0xff) { 780 err = -EIO; 781 break; 782 } 783 } 784 kfree(data_buf); 785 786 if (cmd.error) 787 return cmd.error; 788 if (data.error) 789 return data.error; 790 791 return err; 792 } 793 794 int mmc_bus_test(struct mmc_card *card, u8 bus_width) 795 { 796 int width; 797 798 if (bus_width == MMC_BUS_WIDTH_8) 799 width = 8; 800 else if (bus_width == MMC_BUS_WIDTH_4) 801 width = 4; 802 else if (bus_width == MMC_BUS_WIDTH_1) 803 return 0; /* no need for test */ 804 else 805 return -EINVAL; 806 807 /* 808 * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there 809 * is a problem. This improves chances that the test will work. 810 */ 811 mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width); 812 return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width); 813 } 814 815 static int mmc_send_hpi_cmd(struct mmc_card *card) 816 { 817 unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time; 818 struct mmc_host *host = card->host; 819 bool use_r1b_resp = false; 820 struct mmc_command cmd = {}; 821 int err; 822 823 cmd.opcode = card->ext_csd.hpi_cmd; 824 cmd.arg = card->rca << 16 | 1; 825 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 826 827 if (cmd.opcode == MMC_STOP_TRANSMISSION) 828 use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, 829 busy_timeout_ms); 830 831 err = mmc_wait_for_cmd(host, &cmd, 0); 832 if (err) { 833 pr_warn("%s: HPI error %d. Command response %#x\n", 834 mmc_hostname(host), err, cmd.resp[0]); 835 return err; 836 } 837 838 /* No need to poll when using HW busy detection. */ 839 if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp) 840 return 0; 841 842 /* Let's poll to find out when the HPI request completes. */ 843 return mmc_poll_for_busy(card, busy_timeout_ms, MMC_BUSY_HPI); 844 } 845 846 /** 847 * mmc_interrupt_hpi - Issue for High priority Interrupt 848 * @card: the MMC card associated with the HPI transfer 849 * 850 * Issued High Priority Interrupt, and check for card status 851 * until out-of prg-state. 852 */ 853 static int mmc_interrupt_hpi(struct mmc_card *card) 854 { 855 int err; 856 u32 status; 857 858 if (!card->ext_csd.hpi_en) { 859 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host)); 860 return 1; 861 } 862 863 err = mmc_send_status(card, &status); 864 if (err) { 865 pr_err("%s: Get card status fail\n", mmc_hostname(card->host)); 866 goto out; 867 } 868 869 switch (R1_CURRENT_STATE(status)) { 870 case R1_STATE_IDLE: 871 case R1_STATE_READY: 872 case R1_STATE_STBY: 873 case R1_STATE_TRAN: 874 /* 875 * In idle and transfer states, HPI is not needed and the caller 876 * can issue the next intended command immediately 877 */ 878 goto out; 879 case R1_STATE_PRG: 880 break; 881 default: 882 /* In all other states, it's illegal to issue HPI */ 883 pr_debug("%s: HPI cannot be sent. Card state=%d\n", 884 mmc_hostname(card->host), R1_CURRENT_STATE(status)); 885 err = -EINVAL; 886 goto out; 887 } 888 889 err = mmc_send_hpi_cmd(card); 890 out: 891 return err; 892 } 893 894 int mmc_can_ext_csd(struct mmc_card *card) 895 { 896 return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3); 897 } 898 899 static int mmc_read_bkops_status(struct mmc_card *card) 900 { 901 int err; 902 u8 *ext_csd; 903 904 err = mmc_get_ext_csd(card, &ext_csd); 905 if (err) 906 return err; 907 908 card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS]; 909 card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS]; 910 kfree(ext_csd); 911 return 0; 912 } 913 914 /** 915 * mmc_run_bkops - Run BKOPS for supported cards 916 * @card: MMC card to run BKOPS for 917 * 918 * Run background operations synchronously for cards having manual BKOPS 919 * enabled and in case it reports urgent BKOPS level. 920 */ 921 void mmc_run_bkops(struct mmc_card *card) 922 { 923 int err; 924 925 if (!card->ext_csd.man_bkops_en) 926 return; 927 928 err = mmc_read_bkops_status(card); 929 if (err) { 930 pr_err("%s: Failed to read bkops status: %d\n", 931 mmc_hostname(card->host), err); 932 return; 933 } 934 935 if (!card->ext_csd.raw_bkops_status || 936 card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2) 937 return; 938 939 mmc_retune_hold(card->host); 940 941 /* 942 * For urgent BKOPS status, LEVEL_2 and higher, let's execute 943 * synchronously. Future wise, we may consider to start BKOPS, for less 944 * urgent levels by using an asynchronous background task, when idle. 945 */ 946 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 947 EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS); 948 if (err) 949 pr_warn("%s: Error %d starting bkops\n", 950 mmc_hostname(card->host), err); 951 952 mmc_retune_release(card->host); 953 } 954 EXPORT_SYMBOL(mmc_run_bkops); 955 956 /* 957 * Flush the cache to the non-volatile storage. 958 */ 959 int mmc_flush_cache(struct mmc_card *card) 960 { 961 int err = 0; 962 963 if (mmc_cache_enabled(card->host)) { 964 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 965 EXT_CSD_FLUSH_CACHE, 1, 966 MMC_CACHE_FLUSH_TIMEOUT_MS); 967 if (err) 968 pr_err("%s: cache flush error %d\n", 969 mmc_hostname(card->host), err); 970 } 971 972 return err; 973 } 974 EXPORT_SYMBOL(mmc_flush_cache); 975 976 static int mmc_cmdq_switch(struct mmc_card *card, bool enable) 977 { 978 u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0; 979 int err; 980 981 if (!card->ext_csd.cmdq_support) 982 return -EOPNOTSUPP; 983 984 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN, 985 val, card->ext_csd.generic_cmd6_time); 986 if (!err) 987 card->ext_csd.cmdq_en = enable; 988 989 return err; 990 } 991 992 int mmc_cmdq_enable(struct mmc_card *card) 993 { 994 return mmc_cmdq_switch(card, true); 995 } 996 EXPORT_SYMBOL_GPL(mmc_cmdq_enable); 997 998 int mmc_cmdq_disable(struct mmc_card *card) 999 { 1000 return mmc_cmdq_switch(card, false); 1001 } 1002 EXPORT_SYMBOL_GPL(mmc_cmdq_disable); 1003 1004 int mmc_sanitize(struct mmc_card *card, unsigned int timeout_ms) 1005 { 1006 struct mmc_host *host = card->host; 1007 int err; 1008 1009 if (!mmc_can_sanitize(card)) { 1010 pr_warn("%s: Sanitize not supported\n", mmc_hostname(host)); 1011 return -EOPNOTSUPP; 1012 } 1013 1014 if (!timeout_ms) 1015 timeout_ms = MMC_SANITIZE_TIMEOUT_MS; 1016 1017 pr_debug("%s: Sanitize in progress...\n", mmc_hostname(host)); 1018 1019 mmc_retune_hold(host); 1020 1021 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START, 1022 1, timeout_ms, 0, true, false, 0); 1023 if (err) 1024 pr_err("%s: Sanitize failed err=%d\n", mmc_hostname(host), err); 1025 1026 /* 1027 * If the sanitize operation timed out, the card is probably still busy 1028 * in the R1_STATE_PRG. Rather than continue to wait, let's try to abort 1029 * it with a HPI command to get back into R1_STATE_TRAN. 1030 */ 1031 if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card)) 1032 pr_warn("%s: Sanitize aborted\n", mmc_hostname(host)); 1033 1034 mmc_retune_release(host); 1035 1036 pr_debug("%s: Sanitize completed\n", mmc_hostname(host)); 1037 return err; 1038 } 1039 EXPORT_SYMBOL_GPL(mmc_sanitize); 1040