1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * linux/drivers/mmc/core/mmc_ops.h 4 * 5 * Copyright 2006-2007 Pierre Ossman 6 */ 7 8 #include <linux/slab.h> 9 #include <linux/export.h> 10 #include <linux/types.h> 11 #include <linux/scatterlist.h> 12 13 #include <linux/mmc/host.h> 14 #include <linux/mmc/card.h> 15 #include <linux/mmc/mmc.h> 16 17 #include "core.h" 18 #include "card.h" 19 #include "host.h" 20 #include "mmc_ops.h" 21 22 #define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */ 23 #define MMC_SANITIZE_TIMEOUT_MS (240 * 1000) /* 240s */ 24 25 static const u8 tuning_blk_pattern_4bit[] = { 26 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc, 27 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef, 28 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb, 29 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef, 30 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c, 31 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee, 32 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff, 33 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde, 34 }; 35 36 static const u8 tuning_blk_pattern_8bit[] = { 37 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00, 38 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc, 39 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff, 40 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff, 41 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd, 42 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb, 43 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff, 44 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff, 45 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 46 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 47 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 48 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 49 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 50 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 51 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 52 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 53 }; 54 55 struct mmc_busy_data { 56 struct mmc_card *card; 57 bool retry_crc_err; 58 enum mmc_busy_cmd busy_cmd; 59 }; 60 61 int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries) 62 { 63 int err; 64 struct mmc_command cmd = {}; 65 66 cmd.opcode = MMC_SEND_STATUS; 67 if (!mmc_host_is_spi(card->host)) 68 cmd.arg = card->rca << 16; 69 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 70 71 err = mmc_wait_for_cmd(card->host, &cmd, retries); 72 if (err) 73 return err; 74 75 /* NOTE: callers are required to understand the difference 76 * between "native" and SPI format status words! 77 */ 78 if (status) 79 *status = cmd.resp[0]; 80 81 return 0; 82 } 83 EXPORT_SYMBOL_GPL(__mmc_send_status); 84 85 int mmc_send_status(struct mmc_card *card, u32 *status) 86 { 87 return __mmc_send_status(card, status, MMC_CMD_RETRIES); 88 } 89 EXPORT_SYMBOL_GPL(mmc_send_status); 90 91 static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card) 92 { 93 struct mmc_command cmd = {}; 94 95 cmd.opcode = MMC_SELECT_CARD; 96 97 if (card) { 98 cmd.arg = card->rca << 16; 99 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 100 } else { 101 cmd.arg = 0; 102 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC; 103 } 104 105 return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 106 } 107 108 int mmc_select_card(struct mmc_card *card) 109 { 110 111 return _mmc_select_card(card->host, card); 112 } 113 114 int mmc_deselect_cards(struct mmc_host *host) 115 { 116 return _mmc_select_card(host, NULL); 117 } 118 119 /* 120 * Write the value specified in the device tree or board code into the optional 121 * 16 bit Driver Stage Register. This can be used to tune raise/fall times and 122 * drive strength of the DAT and CMD outputs. The actual meaning of a given 123 * value is hardware dependant. 124 * The presence of the DSR register can be determined from the CSD register, 125 * bit 76. 126 */ 127 int mmc_set_dsr(struct mmc_host *host) 128 { 129 struct mmc_command cmd = {}; 130 131 cmd.opcode = MMC_SET_DSR; 132 133 cmd.arg = (host->dsr << 16) | 0xffff; 134 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC; 135 136 return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 137 } 138 139 int mmc_go_idle(struct mmc_host *host) 140 { 141 int err; 142 struct mmc_command cmd = {}; 143 144 /* 145 * Non-SPI hosts need to prevent chipselect going active during 146 * GO_IDLE; that would put chips into SPI mode. Remind them of 147 * that in case of hardware that won't pull up DAT3/nCS otherwise. 148 * 149 * SPI hosts ignore ios.chip_select; it's managed according to 150 * rules that must accommodate non-MMC slaves which this layer 151 * won't even know about. 152 */ 153 if (!mmc_host_is_spi(host)) { 154 mmc_set_chip_select(host, MMC_CS_HIGH); 155 mmc_delay(1); 156 } 157 158 cmd.opcode = MMC_GO_IDLE_STATE; 159 cmd.arg = 0; 160 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC; 161 162 err = mmc_wait_for_cmd(host, &cmd, 0); 163 164 mmc_delay(1); 165 166 if (!mmc_host_is_spi(host)) { 167 mmc_set_chip_select(host, MMC_CS_DONTCARE); 168 mmc_delay(1); 169 } 170 171 host->use_spi_crc = 0; 172 173 return err; 174 } 175 176 int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) 177 { 178 struct mmc_command cmd = {}; 179 int i, err = 0; 180 181 cmd.opcode = MMC_SEND_OP_COND; 182 cmd.arg = mmc_host_is_spi(host) ? 0 : ocr; 183 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR; 184 185 for (i = 100; i; i--) { 186 err = mmc_wait_for_cmd(host, &cmd, 0); 187 if (err) 188 break; 189 190 /* wait until reset completes */ 191 if (mmc_host_is_spi(host)) { 192 if (!(cmd.resp[0] & R1_SPI_IDLE)) 193 break; 194 } else { 195 if (cmd.resp[0] & MMC_CARD_BUSY) 196 break; 197 } 198 199 err = -ETIMEDOUT; 200 201 mmc_delay(10); 202 203 /* 204 * According to eMMC specification v5.1 section 6.4.3, we 205 * should issue CMD1 repeatedly in the idle state until 206 * the eMMC is ready. Otherwise some eMMC devices seem to enter 207 * the inactive mode after mmc_init_card() issued CMD0 when 208 * the eMMC device is busy. 209 */ 210 if (!ocr && !mmc_host_is_spi(host)) 211 cmd.arg = cmd.resp[0] | BIT(30); 212 } 213 214 if (rocr && !mmc_host_is_spi(host)) 215 *rocr = cmd.resp[0]; 216 217 return err; 218 } 219 220 int mmc_set_relative_addr(struct mmc_card *card) 221 { 222 struct mmc_command cmd = {}; 223 224 cmd.opcode = MMC_SET_RELATIVE_ADDR; 225 cmd.arg = card->rca << 16; 226 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 227 228 return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); 229 } 230 231 static int 232 mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode) 233 { 234 int err; 235 struct mmc_command cmd = {}; 236 237 cmd.opcode = opcode; 238 cmd.arg = arg; 239 cmd.flags = MMC_RSP_R2 | MMC_CMD_AC; 240 241 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 242 if (err) 243 return err; 244 245 memcpy(cxd, cmd.resp, sizeof(u32) * 4); 246 247 return 0; 248 } 249 250 /* 251 * NOTE: void *buf, caller for the buf is required to use DMA-capable 252 * buffer or on-stack buffer (with some overhead in callee). 253 */ 254 int mmc_send_adtc_data(struct mmc_card *card, struct mmc_host *host, u32 opcode, 255 u32 args, void *buf, unsigned len) 256 { 257 struct mmc_request mrq = {}; 258 struct mmc_command cmd = {}; 259 struct mmc_data data = {}; 260 struct scatterlist sg; 261 262 mrq.cmd = &cmd; 263 mrq.data = &data; 264 265 cmd.opcode = opcode; 266 cmd.arg = args; 267 268 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we 269 * rely on callers to never use this with "native" calls for reading 270 * CSD or CID. Native versions of those commands use the R2 type, 271 * not R1 plus a data block. 272 */ 273 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 274 275 data.blksz = len; 276 data.blocks = 1; 277 data.flags = MMC_DATA_READ; 278 data.sg = &sg; 279 data.sg_len = 1; 280 281 sg_init_one(&sg, buf, len); 282 283 if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) { 284 /* 285 * The spec states that CSR and CID accesses have a timeout 286 * of 64 clock cycles. 287 */ 288 data.timeout_ns = 0; 289 data.timeout_clks = 64; 290 } else 291 mmc_set_data_timeout(&data, card); 292 293 mmc_wait_for_req(host, &mrq); 294 295 if (cmd.error) 296 return cmd.error; 297 if (data.error) 298 return data.error; 299 300 return 0; 301 } 302 303 static int mmc_spi_send_cxd(struct mmc_host *host, u32 *cxd, u32 opcode) 304 { 305 int ret, i; 306 __be32 *cxd_tmp; 307 308 cxd_tmp = kzalloc(16, GFP_KERNEL); 309 if (!cxd_tmp) 310 return -ENOMEM; 311 312 ret = mmc_send_adtc_data(NULL, host, opcode, 0, cxd_tmp, 16); 313 if (ret) 314 goto err; 315 316 for (i = 0; i < 4; i++) 317 cxd[i] = be32_to_cpu(cxd_tmp[i]); 318 319 err: 320 kfree(cxd_tmp); 321 return ret; 322 } 323 324 int mmc_send_csd(struct mmc_card *card, u32 *csd) 325 { 326 if (mmc_host_is_spi(card->host)) 327 return mmc_spi_send_cxd(card->host, csd, MMC_SEND_CSD); 328 329 return mmc_send_cxd_native(card->host, card->rca << 16, csd, 330 MMC_SEND_CSD); 331 } 332 333 int mmc_send_cid(struct mmc_host *host, u32 *cid) 334 { 335 if (mmc_host_is_spi(host)) 336 return mmc_spi_send_cxd(host, cid, MMC_SEND_CID); 337 338 return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID); 339 } 340 341 int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd) 342 { 343 int err; 344 u8 *ext_csd; 345 346 if (!card || !new_ext_csd) 347 return -EINVAL; 348 349 if (!mmc_can_ext_csd(card)) 350 return -EOPNOTSUPP; 351 352 /* 353 * As the ext_csd is so large and mostly unused, we don't store the 354 * raw block in mmc_card. 355 */ 356 ext_csd = kzalloc(512, GFP_KERNEL); 357 if (!ext_csd) 358 return -ENOMEM; 359 360 err = mmc_send_adtc_data(card, card->host, MMC_SEND_EXT_CSD, 0, ext_csd, 361 512); 362 if (err) 363 kfree(ext_csd); 364 else 365 *new_ext_csd = ext_csd; 366 367 return err; 368 } 369 EXPORT_SYMBOL_GPL(mmc_get_ext_csd); 370 371 int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp) 372 { 373 struct mmc_command cmd = {}; 374 int err; 375 376 cmd.opcode = MMC_SPI_READ_OCR; 377 cmd.arg = highcap ? (1 << 30) : 0; 378 cmd.flags = MMC_RSP_SPI_R3; 379 380 err = mmc_wait_for_cmd(host, &cmd, 0); 381 382 *ocrp = cmd.resp[1]; 383 return err; 384 } 385 386 int mmc_spi_set_crc(struct mmc_host *host, int use_crc) 387 { 388 struct mmc_command cmd = {}; 389 int err; 390 391 cmd.opcode = MMC_SPI_CRC_ON_OFF; 392 cmd.flags = MMC_RSP_SPI_R1; 393 cmd.arg = use_crc; 394 395 err = mmc_wait_for_cmd(host, &cmd, 0); 396 if (!err) 397 host->use_spi_crc = use_crc; 398 return err; 399 } 400 401 static int mmc_switch_status_error(struct mmc_host *host, u32 status) 402 { 403 if (mmc_host_is_spi(host)) { 404 if (status & R1_SPI_ILLEGAL_COMMAND) 405 return -EBADMSG; 406 } else { 407 if (R1_STATUS(status)) 408 pr_warn("%s: unexpected status %#x after switch\n", 409 mmc_hostname(host), status); 410 if (status & R1_SWITCH_ERROR) 411 return -EBADMSG; 412 } 413 return 0; 414 } 415 416 /* Caller must hold re-tuning */ 417 int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal) 418 { 419 u32 status; 420 int err; 421 422 err = mmc_send_status(card, &status); 423 if (!crc_err_fatal && err == -EILSEQ) 424 return 0; 425 if (err) 426 return err; 427 428 return mmc_switch_status_error(card->host, status); 429 } 430 431 static int mmc_busy_cb(void *cb_data, bool *busy) 432 { 433 struct mmc_busy_data *data = cb_data; 434 struct mmc_host *host = data->card->host; 435 u32 status = 0; 436 int err; 437 438 if (data->busy_cmd != MMC_BUSY_IO && host->ops->card_busy) { 439 *busy = host->ops->card_busy(host); 440 return 0; 441 } 442 443 err = mmc_send_status(data->card, &status); 444 if (data->retry_crc_err && err == -EILSEQ) { 445 *busy = true; 446 return 0; 447 } 448 if (err) 449 return err; 450 451 switch (data->busy_cmd) { 452 case MMC_BUSY_CMD6: 453 err = mmc_switch_status_error(host, status); 454 break; 455 case MMC_BUSY_ERASE: 456 err = R1_STATUS(status) ? -EIO : 0; 457 break; 458 case MMC_BUSY_HPI: 459 case MMC_BUSY_EXTR_SINGLE: 460 case MMC_BUSY_IO: 461 break; 462 default: 463 err = -EINVAL; 464 } 465 466 if (err) 467 return err; 468 469 *busy = !mmc_ready_for_data(status); 470 return 0; 471 } 472 473 int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, 474 int (*busy_cb)(void *cb_data, bool *busy), 475 void *cb_data) 476 { 477 struct mmc_host *host = card->host; 478 int err; 479 unsigned long timeout; 480 unsigned int udelay = 32, udelay_max = 32768; 481 bool expired = false; 482 bool busy = false; 483 484 timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1; 485 do { 486 /* 487 * Due to the possibility of being preempted while polling, 488 * check the expiration time first. 489 */ 490 expired = time_after(jiffies, timeout); 491 492 err = (*busy_cb)(cb_data, &busy); 493 if (err) 494 return err; 495 496 /* Timeout if the device still remains busy. */ 497 if (expired && busy) { 498 pr_err("%s: Card stuck being busy! %s\n", 499 mmc_hostname(host), __func__); 500 return -ETIMEDOUT; 501 } 502 503 /* Throttle the polling rate to avoid hogging the CPU. */ 504 if (busy) { 505 usleep_range(udelay, udelay * 2); 506 if (udelay < udelay_max) 507 udelay *= 2; 508 } 509 } while (busy); 510 511 return 0; 512 } 513 514 int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, 515 bool retry_crc_err, enum mmc_busy_cmd busy_cmd) 516 { 517 struct mmc_busy_data cb_data; 518 519 cb_data.card = card; 520 cb_data.retry_crc_err = retry_crc_err; 521 cb_data.busy_cmd = busy_cmd; 522 523 return __mmc_poll_for_busy(card, timeout_ms, &mmc_busy_cb, &cb_data); 524 } 525 EXPORT_SYMBOL_GPL(mmc_poll_for_busy); 526 527 bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd, 528 unsigned int timeout_ms) 529 { 530 /* 531 * If the max_busy_timeout of the host is specified, make sure it's 532 * enough to fit the used timeout_ms. In case it's not, let's instruct 533 * the host to avoid HW busy detection, by converting to a R1 response 534 * instead of a R1B. Note, some hosts requires R1B, which also means 535 * they are on their own when it comes to deal with the busy timeout. 536 */ 537 if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout && 538 (timeout_ms > host->max_busy_timeout)) { 539 cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1 | MMC_RSP_R1; 540 return false; 541 } 542 543 cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1B | MMC_RSP_R1B; 544 cmd->busy_timeout = timeout_ms; 545 return true; 546 } 547 548 /** 549 * __mmc_switch - modify EXT_CSD register 550 * @card: the MMC card associated with the data transfer 551 * @set: cmd set values 552 * @index: EXT_CSD register index 553 * @value: value to program into EXT_CSD register 554 * @timeout_ms: timeout (ms) for operation performed by register write, 555 * timeout of zero implies maximum possible timeout 556 * @timing: new timing to change to 557 * @send_status: send status cmd to poll for busy 558 * @retry_crc_err: retry when CRC errors when polling with CMD13 for busy 559 * @retries: number of retries 560 * 561 * Modifies the EXT_CSD register for selected card. 562 */ 563 int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, 564 unsigned int timeout_ms, unsigned char timing, 565 bool send_status, bool retry_crc_err, unsigned int retries) 566 { 567 struct mmc_host *host = card->host; 568 int err; 569 struct mmc_command cmd = {}; 570 bool use_r1b_resp; 571 unsigned char old_timing = host->ios.timing; 572 573 mmc_retune_hold(host); 574 575 if (!timeout_ms) { 576 pr_warn("%s: unspecified timeout for CMD6 - use generic\n", 577 mmc_hostname(host)); 578 timeout_ms = card->ext_csd.generic_cmd6_time; 579 } 580 581 cmd.opcode = MMC_SWITCH; 582 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | 583 (index << 16) | 584 (value << 8) | 585 set; 586 use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, timeout_ms); 587 588 err = mmc_wait_for_cmd(host, &cmd, retries); 589 if (err) 590 goto out; 591 592 /*If SPI or used HW busy detection above, then we don't need to poll. */ 593 if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) || 594 mmc_host_is_spi(host)) 595 goto out_tim; 596 597 /* 598 * If the host doesn't support HW polling via the ->card_busy() ops and 599 * when it's not allowed to poll by using CMD13, then we need to rely on 600 * waiting the stated timeout to be sufficient. 601 */ 602 if (!send_status && !host->ops->card_busy) { 603 mmc_delay(timeout_ms); 604 goto out_tim; 605 } 606 607 /* Let's try to poll to find out when the command is completed. */ 608 err = mmc_poll_for_busy(card, timeout_ms, retry_crc_err, MMC_BUSY_CMD6); 609 if (err) 610 goto out; 611 612 out_tim: 613 /* Switch to new timing before check switch status. */ 614 if (timing) 615 mmc_set_timing(host, timing); 616 617 if (send_status) { 618 err = mmc_switch_status(card, true); 619 if (err && timing) 620 mmc_set_timing(host, old_timing); 621 } 622 out: 623 mmc_retune_release(host); 624 625 return err; 626 } 627 628 int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, 629 unsigned int timeout_ms) 630 { 631 return __mmc_switch(card, set, index, value, timeout_ms, 0, 632 true, false, MMC_CMD_RETRIES); 633 } 634 EXPORT_SYMBOL_GPL(mmc_switch); 635 636 int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error) 637 { 638 struct mmc_request mrq = {}; 639 struct mmc_command cmd = {}; 640 struct mmc_data data = {}; 641 struct scatterlist sg; 642 struct mmc_ios *ios = &host->ios; 643 const u8 *tuning_block_pattern; 644 int size, err = 0; 645 u8 *data_buf; 646 647 if (ios->bus_width == MMC_BUS_WIDTH_8) { 648 tuning_block_pattern = tuning_blk_pattern_8bit; 649 size = sizeof(tuning_blk_pattern_8bit); 650 } else if (ios->bus_width == MMC_BUS_WIDTH_4) { 651 tuning_block_pattern = tuning_blk_pattern_4bit; 652 size = sizeof(tuning_blk_pattern_4bit); 653 } else 654 return -EINVAL; 655 656 data_buf = kzalloc(size, GFP_KERNEL); 657 if (!data_buf) 658 return -ENOMEM; 659 660 mrq.cmd = &cmd; 661 mrq.data = &data; 662 663 cmd.opcode = opcode; 664 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 665 666 data.blksz = size; 667 data.blocks = 1; 668 data.flags = MMC_DATA_READ; 669 670 /* 671 * According to the tuning specs, Tuning process 672 * is normally shorter 40 executions of CMD19, 673 * and timeout value should be shorter than 150 ms 674 */ 675 data.timeout_ns = 150 * NSEC_PER_MSEC; 676 677 data.sg = &sg; 678 data.sg_len = 1; 679 sg_init_one(&sg, data_buf, size); 680 681 mmc_wait_for_req(host, &mrq); 682 683 if (cmd_error) 684 *cmd_error = cmd.error; 685 686 if (cmd.error) { 687 err = cmd.error; 688 goto out; 689 } 690 691 if (data.error) { 692 err = data.error; 693 goto out; 694 } 695 696 if (memcmp(data_buf, tuning_block_pattern, size)) 697 err = -EIO; 698 699 out: 700 kfree(data_buf); 701 return err; 702 } 703 EXPORT_SYMBOL_GPL(mmc_send_tuning); 704 705 int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode) 706 { 707 struct mmc_command cmd = {}; 708 709 /* 710 * eMMC specification specifies that CMD12 can be used to stop a tuning 711 * command, but SD specification does not, so do nothing unless it is 712 * eMMC. 713 */ 714 if (opcode != MMC_SEND_TUNING_BLOCK_HS200) 715 return 0; 716 717 cmd.opcode = MMC_STOP_TRANSMISSION; 718 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 719 720 /* 721 * For drivers that override R1 to R1b, set an arbitrary timeout based 722 * on the tuning timeout i.e. 150ms. 723 */ 724 cmd.busy_timeout = 150; 725 726 return mmc_wait_for_cmd(host, &cmd, 0); 727 } 728 EXPORT_SYMBOL_GPL(mmc_send_abort_tuning); 729 730 static int 731 mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode, 732 u8 len) 733 { 734 struct mmc_request mrq = {}; 735 struct mmc_command cmd = {}; 736 struct mmc_data data = {}; 737 struct scatterlist sg; 738 u8 *data_buf; 739 u8 *test_buf; 740 int i, err; 741 static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 }; 742 static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 }; 743 744 /* dma onto stack is unsafe/nonportable, but callers to this 745 * routine normally provide temporary on-stack buffers ... 746 */ 747 data_buf = kmalloc(len, GFP_KERNEL); 748 if (!data_buf) 749 return -ENOMEM; 750 751 if (len == 8) 752 test_buf = testdata_8bit; 753 else if (len == 4) 754 test_buf = testdata_4bit; 755 else { 756 pr_err("%s: Invalid bus_width %d\n", 757 mmc_hostname(host), len); 758 kfree(data_buf); 759 return -EINVAL; 760 } 761 762 if (opcode == MMC_BUS_TEST_W) 763 memcpy(data_buf, test_buf, len); 764 765 mrq.cmd = &cmd; 766 mrq.data = &data; 767 cmd.opcode = opcode; 768 cmd.arg = 0; 769 770 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we 771 * rely on callers to never use this with "native" calls for reading 772 * CSD or CID. Native versions of those commands use the R2 type, 773 * not R1 plus a data block. 774 */ 775 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 776 777 data.blksz = len; 778 data.blocks = 1; 779 if (opcode == MMC_BUS_TEST_R) 780 data.flags = MMC_DATA_READ; 781 else 782 data.flags = MMC_DATA_WRITE; 783 784 data.sg = &sg; 785 data.sg_len = 1; 786 mmc_set_data_timeout(&data, card); 787 sg_init_one(&sg, data_buf, len); 788 mmc_wait_for_req(host, &mrq); 789 err = 0; 790 if (opcode == MMC_BUS_TEST_R) { 791 for (i = 0; i < len / 4; i++) 792 if ((test_buf[i] ^ data_buf[i]) != 0xff) { 793 err = -EIO; 794 break; 795 } 796 } 797 kfree(data_buf); 798 799 if (cmd.error) 800 return cmd.error; 801 if (data.error) 802 return data.error; 803 804 return err; 805 } 806 807 int mmc_bus_test(struct mmc_card *card, u8 bus_width) 808 { 809 int width; 810 811 if (bus_width == MMC_BUS_WIDTH_8) 812 width = 8; 813 else if (bus_width == MMC_BUS_WIDTH_4) 814 width = 4; 815 else if (bus_width == MMC_BUS_WIDTH_1) 816 return 0; /* no need for test */ 817 else 818 return -EINVAL; 819 820 /* 821 * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there 822 * is a problem. This improves chances that the test will work. 823 */ 824 mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width); 825 return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width); 826 } 827 828 static int mmc_send_hpi_cmd(struct mmc_card *card) 829 { 830 unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time; 831 struct mmc_host *host = card->host; 832 bool use_r1b_resp = false; 833 struct mmc_command cmd = {}; 834 int err; 835 836 cmd.opcode = card->ext_csd.hpi_cmd; 837 cmd.arg = card->rca << 16 | 1; 838 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 839 840 if (cmd.opcode == MMC_STOP_TRANSMISSION) 841 use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, 842 busy_timeout_ms); 843 844 err = mmc_wait_for_cmd(host, &cmd, 0); 845 if (err) { 846 pr_warn("%s: HPI error %d. Command response %#x\n", 847 mmc_hostname(host), err, cmd.resp[0]); 848 return err; 849 } 850 851 /* No need to poll when using HW busy detection. */ 852 if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp) 853 return 0; 854 855 /* Let's poll to find out when the HPI request completes. */ 856 return mmc_poll_for_busy(card, busy_timeout_ms, false, MMC_BUSY_HPI); 857 } 858 859 /** 860 * mmc_interrupt_hpi - Issue for High priority Interrupt 861 * @card: the MMC card associated with the HPI transfer 862 * 863 * Issued High Priority Interrupt, and check for card status 864 * until out-of prg-state. 865 */ 866 static int mmc_interrupt_hpi(struct mmc_card *card) 867 { 868 int err; 869 u32 status; 870 871 if (!card->ext_csd.hpi_en) { 872 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host)); 873 return 1; 874 } 875 876 err = mmc_send_status(card, &status); 877 if (err) { 878 pr_err("%s: Get card status fail\n", mmc_hostname(card->host)); 879 goto out; 880 } 881 882 switch (R1_CURRENT_STATE(status)) { 883 case R1_STATE_IDLE: 884 case R1_STATE_READY: 885 case R1_STATE_STBY: 886 case R1_STATE_TRAN: 887 /* 888 * In idle and transfer states, HPI is not needed and the caller 889 * can issue the next intended command immediately 890 */ 891 goto out; 892 case R1_STATE_PRG: 893 break; 894 default: 895 /* In all other states, it's illegal to issue HPI */ 896 pr_debug("%s: HPI cannot be sent. Card state=%d\n", 897 mmc_hostname(card->host), R1_CURRENT_STATE(status)); 898 err = -EINVAL; 899 goto out; 900 } 901 902 err = mmc_send_hpi_cmd(card); 903 out: 904 return err; 905 } 906 907 int mmc_can_ext_csd(struct mmc_card *card) 908 { 909 return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3); 910 } 911 912 static int mmc_read_bkops_status(struct mmc_card *card) 913 { 914 int err; 915 u8 *ext_csd; 916 917 err = mmc_get_ext_csd(card, &ext_csd); 918 if (err) 919 return err; 920 921 card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS]; 922 card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS]; 923 kfree(ext_csd); 924 return 0; 925 } 926 927 /** 928 * mmc_run_bkops - Run BKOPS for supported cards 929 * @card: MMC card to run BKOPS for 930 * 931 * Run background operations synchronously for cards having manual BKOPS 932 * enabled and in case it reports urgent BKOPS level. 933 */ 934 void mmc_run_bkops(struct mmc_card *card) 935 { 936 int err; 937 938 if (!card->ext_csd.man_bkops_en) 939 return; 940 941 err = mmc_read_bkops_status(card); 942 if (err) { 943 pr_err("%s: Failed to read bkops status: %d\n", 944 mmc_hostname(card->host), err); 945 return; 946 } 947 948 if (!card->ext_csd.raw_bkops_status || 949 card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2) 950 return; 951 952 mmc_retune_hold(card->host); 953 954 /* 955 * For urgent BKOPS status, LEVEL_2 and higher, let's execute 956 * synchronously. Future wise, we may consider to start BKOPS, for less 957 * urgent levels by using an asynchronous background task, when idle. 958 */ 959 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 960 EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS); 961 if (err) 962 pr_warn("%s: Error %d starting bkops\n", 963 mmc_hostname(card->host), err); 964 965 mmc_retune_release(card->host); 966 } 967 EXPORT_SYMBOL(mmc_run_bkops); 968 969 static int mmc_cmdq_switch(struct mmc_card *card, bool enable) 970 { 971 u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0; 972 int err; 973 974 if (!card->ext_csd.cmdq_support) 975 return -EOPNOTSUPP; 976 977 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN, 978 val, card->ext_csd.generic_cmd6_time); 979 if (!err) 980 card->ext_csd.cmdq_en = enable; 981 982 return err; 983 } 984 985 int mmc_cmdq_enable(struct mmc_card *card) 986 { 987 return mmc_cmdq_switch(card, true); 988 } 989 EXPORT_SYMBOL_GPL(mmc_cmdq_enable); 990 991 int mmc_cmdq_disable(struct mmc_card *card) 992 { 993 return mmc_cmdq_switch(card, false); 994 } 995 EXPORT_SYMBOL_GPL(mmc_cmdq_disable); 996 997 int mmc_sanitize(struct mmc_card *card, unsigned int timeout_ms) 998 { 999 struct mmc_host *host = card->host; 1000 int err; 1001 1002 if (!mmc_can_sanitize(card)) { 1003 pr_warn("%s: Sanitize not supported\n", mmc_hostname(host)); 1004 return -EOPNOTSUPP; 1005 } 1006 1007 if (!timeout_ms) 1008 timeout_ms = MMC_SANITIZE_TIMEOUT_MS; 1009 1010 pr_debug("%s: Sanitize in progress...\n", mmc_hostname(host)); 1011 1012 mmc_retune_hold(host); 1013 1014 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START, 1015 1, timeout_ms, 0, true, false, 0); 1016 if (err) 1017 pr_err("%s: Sanitize failed err=%d\n", mmc_hostname(host), err); 1018 1019 /* 1020 * If the sanitize operation timed out, the card is probably still busy 1021 * in the R1_STATE_PRG. Rather than continue to wait, let's try to abort 1022 * it with a HPI command to get back into R1_STATE_TRAN. 1023 */ 1024 if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card)) 1025 pr_warn("%s: Sanitize aborted\n", mmc_hostname(host)); 1026 1027 mmc_retune_release(host); 1028 1029 pr_debug("%s: Sanitize completed\n", mmc_hostname(host)); 1030 return err; 1031 } 1032 EXPORT_SYMBOL_GPL(mmc_sanitize); 1033