1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * linux/drivers/mmc/core/mmc_ops.h 4 * 5 * Copyright 2006-2007 Pierre Ossman 6 */ 7 8 #include <linux/slab.h> 9 #include <linux/export.h> 10 #include <linux/types.h> 11 #include <linux/scatterlist.h> 12 13 #include <linux/mmc/host.h> 14 #include <linux/mmc/card.h> 15 #include <linux/mmc/mmc.h> 16 17 #include "core.h" 18 #include "card.h" 19 #include "host.h" 20 #include "mmc_ops.h" 21 22 #define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */ 23 #define MMC_SANITIZE_TIMEOUT_MS (240 * 1000) /* 240s */ 24 25 static const u8 tuning_blk_pattern_4bit[] = { 26 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc, 27 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef, 28 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb, 29 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef, 30 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c, 31 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee, 32 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff, 33 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde, 34 }; 35 36 static const u8 tuning_blk_pattern_8bit[] = { 37 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00, 38 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc, 39 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff, 40 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff, 41 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd, 42 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb, 43 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff, 44 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff, 45 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 46 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 47 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 48 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 49 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 50 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 51 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 52 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 53 }; 54 55 struct mmc_busy_data { 56 struct mmc_card *card; 57 bool retry_crc_err; 58 enum mmc_busy_cmd busy_cmd; 59 }; 60 61 int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries) 62 { 63 int err; 64 struct mmc_command cmd = {}; 65 66 cmd.opcode = MMC_SEND_STATUS; 67 if (!mmc_host_is_spi(card->host)) 68 cmd.arg = card->rca << 16; 69 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 70 71 err = mmc_wait_for_cmd(card->host, &cmd, retries); 72 if (err) 73 return err; 74 75 /* NOTE: callers are required to understand the difference 76 * between "native" and SPI format status words! 77 */ 78 if (status) 79 *status = cmd.resp[0]; 80 81 return 0; 82 } 83 EXPORT_SYMBOL_GPL(__mmc_send_status); 84 85 int mmc_send_status(struct mmc_card *card, u32 *status) 86 { 87 return __mmc_send_status(card, status, MMC_CMD_RETRIES); 88 } 89 EXPORT_SYMBOL_GPL(mmc_send_status); 90 91 static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card) 92 { 93 struct mmc_command cmd = {}; 94 95 cmd.opcode = MMC_SELECT_CARD; 96 97 if (card) { 98 cmd.arg = card->rca << 16; 99 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 100 } else { 101 cmd.arg = 0; 102 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC; 103 } 104 105 return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 106 } 107 108 int mmc_select_card(struct mmc_card *card) 109 { 110 111 return _mmc_select_card(card->host, card); 112 } 113 114 int mmc_deselect_cards(struct mmc_host *host) 115 { 116 return _mmc_select_card(host, NULL); 117 } 118 119 /* 120 * Write the value specified in the device tree or board code into the optional 121 * 16 bit Driver Stage Register. This can be used to tune raise/fall times and 122 * drive strength of the DAT and CMD outputs. The actual meaning of a given 123 * value is hardware dependant. 124 * The presence of the DSR register can be determined from the CSD register, 125 * bit 76. 126 */ 127 int mmc_set_dsr(struct mmc_host *host) 128 { 129 struct mmc_command cmd = {}; 130 131 cmd.opcode = MMC_SET_DSR; 132 133 cmd.arg = (host->dsr << 16) | 0xffff; 134 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC; 135 136 return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 137 } 138 139 int mmc_go_idle(struct mmc_host *host) 140 { 141 int err; 142 struct mmc_command cmd = {}; 143 144 /* 145 * Non-SPI hosts need to prevent chipselect going active during 146 * GO_IDLE; that would put chips into SPI mode. Remind them of 147 * that in case of hardware that won't pull up DAT3/nCS otherwise. 148 * 149 * SPI hosts ignore ios.chip_select; it's managed according to 150 * rules that must accommodate non-MMC slaves which this layer 151 * won't even know about. 152 */ 153 if (!mmc_host_is_spi(host)) { 154 mmc_set_chip_select(host, MMC_CS_HIGH); 155 mmc_delay(1); 156 } 157 158 cmd.opcode = MMC_GO_IDLE_STATE; 159 cmd.arg = 0; 160 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC; 161 162 err = mmc_wait_for_cmd(host, &cmd, 0); 163 164 mmc_delay(1); 165 166 if (!mmc_host_is_spi(host)) { 167 mmc_set_chip_select(host, MMC_CS_DONTCARE); 168 mmc_delay(1); 169 } 170 171 host->use_spi_crc = 0; 172 173 return err; 174 } 175 176 int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) 177 { 178 struct mmc_command cmd = {}; 179 int i, err = 0; 180 181 cmd.opcode = MMC_SEND_OP_COND; 182 cmd.arg = mmc_host_is_spi(host) ? 0 : ocr; 183 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR; 184 185 for (i = 100; i; i--) { 186 err = mmc_wait_for_cmd(host, &cmd, 0); 187 if (err) 188 break; 189 190 /* wait until reset completes */ 191 if (mmc_host_is_spi(host)) { 192 if (!(cmd.resp[0] & R1_SPI_IDLE)) 193 break; 194 } else { 195 if (cmd.resp[0] & MMC_CARD_BUSY) 196 break; 197 } 198 199 err = -ETIMEDOUT; 200 201 mmc_delay(10); 202 203 /* 204 * According to eMMC specification v5.1 section 6.4.3, we 205 * should issue CMD1 repeatedly in the idle state until 206 * the eMMC is ready. Otherwise some eMMC devices seem to enter 207 * the inactive mode after mmc_init_card() issued CMD0 when 208 * the eMMC device is busy. 209 */ 210 if (!ocr && !mmc_host_is_spi(host)) 211 cmd.arg = cmd.resp[0] | BIT(30); 212 } 213 214 if (rocr && !mmc_host_is_spi(host)) 215 *rocr = cmd.resp[0]; 216 217 return err; 218 } 219 220 int mmc_set_relative_addr(struct mmc_card *card) 221 { 222 struct mmc_command cmd = {}; 223 224 cmd.opcode = MMC_SET_RELATIVE_ADDR; 225 cmd.arg = card->rca << 16; 226 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 227 228 return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); 229 } 230 231 static int 232 mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode) 233 { 234 int err; 235 struct mmc_command cmd = {}; 236 237 cmd.opcode = opcode; 238 cmd.arg = arg; 239 cmd.flags = MMC_RSP_R2 | MMC_CMD_AC; 240 241 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 242 if (err) 243 return err; 244 245 memcpy(cxd, cmd.resp, sizeof(u32) * 4); 246 247 return 0; 248 } 249 250 /* 251 * NOTE: void *buf, caller for the buf is required to use DMA-capable 252 * buffer or on-stack buffer (with some overhead in callee). 253 */ 254 int mmc_send_adtc_data(struct mmc_card *card, struct mmc_host *host, u32 opcode, 255 u32 args, void *buf, unsigned len) 256 { 257 struct mmc_request mrq = {}; 258 struct mmc_command cmd = {}; 259 struct mmc_data data = {}; 260 struct scatterlist sg; 261 262 mrq.cmd = &cmd; 263 mrq.data = &data; 264 265 cmd.opcode = opcode; 266 cmd.arg = args; 267 268 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we 269 * rely on callers to never use this with "native" calls for reading 270 * CSD or CID. Native versions of those commands use the R2 type, 271 * not R1 plus a data block. 272 */ 273 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 274 275 data.blksz = len; 276 data.blocks = 1; 277 data.flags = MMC_DATA_READ; 278 data.sg = &sg; 279 data.sg_len = 1; 280 281 sg_init_one(&sg, buf, len); 282 283 if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) { 284 /* 285 * The spec states that CSR and CID accesses have a timeout 286 * of 64 clock cycles. 287 */ 288 data.timeout_ns = 0; 289 data.timeout_clks = 64; 290 } else 291 mmc_set_data_timeout(&data, card); 292 293 mmc_wait_for_req(host, &mrq); 294 295 if (cmd.error) 296 return cmd.error; 297 if (data.error) 298 return data.error; 299 300 return 0; 301 } 302 303 static int mmc_spi_send_cxd(struct mmc_host *host, u32 *cxd, u32 opcode) 304 { 305 int ret, i; 306 __be32 *cxd_tmp; 307 308 cxd_tmp = kzalloc(16, GFP_KERNEL); 309 if (!cxd_tmp) 310 return -ENOMEM; 311 312 ret = mmc_send_adtc_data(NULL, host, opcode, 0, cxd_tmp, 16); 313 if (ret) 314 goto err; 315 316 for (i = 0; i < 4; i++) 317 cxd[i] = be32_to_cpu(cxd_tmp[i]); 318 319 err: 320 kfree(cxd_tmp); 321 return ret; 322 } 323 324 int mmc_send_csd(struct mmc_card *card, u32 *csd) 325 { 326 if (mmc_host_is_spi(card->host)) 327 return mmc_spi_send_cxd(card->host, csd, MMC_SEND_CSD); 328 329 return mmc_send_cxd_native(card->host, card->rca << 16, csd, 330 MMC_SEND_CSD); 331 } 332 333 int mmc_send_cid(struct mmc_host *host, u32 *cid) 334 { 335 if (mmc_host_is_spi(host)) 336 return mmc_spi_send_cxd(host, cid, MMC_SEND_CID); 337 338 return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID); 339 } 340 341 int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd) 342 { 343 int err; 344 u8 *ext_csd; 345 346 if (!card || !new_ext_csd) 347 return -EINVAL; 348 349 if (!mmc_can_ext_csd(card)) 350 return -EOPNOTSUPP; 351 352 /* 353 * As the ext_csd is so large and mostly unused, we don't store the 354 * raw block in mmc_card. 355 */ 356 ext_csd = kzalloc(512, GFP_KERNEL); 357 if (!ext_csd) 358 return -ENOMEM; 359 360 err = mmc_send_adtc_data(card, card->host, MMC_SEND_EXT_CSD, 0, ext_csd, 361 512); 362 if (err) 363 kfree(ext_csd); 364 else 365 *new_ext_csd = ext_csd; 366 367 return err; 368 } 369 EXPORT_SYMBOL_GPL(mmc_get_ext_csd); 370 371 int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp) 372 { 373 struct mmc_command cmd = {}; 374 int err; 375 376 cmd.opcode = MMC_SPI_READ_OCR; 377 cmd.arg = highcap ? (1 << 30) : 0; 378 cmd.flags = MMC_RSP_SPI_R3; 379 380 err = mmc_wait_for_cmd(host, &cmd, 0); 381 382 *ocrp = cmd.resp[1]; 383 return err; 384 } 385 386 int mmc_spi_set_crc(struct mmc_host *host, int use_crc) 387 { 388 struct mmc_command cmd = {}; 389 int err; 390 391 cmd.opcode = MMC_SPI_CRC_ON_OFF; 392 cmd.flags = MMC_RSP_SPI_R1; 393 cmd.arg = use_crc; 394 395 err = mmc_wait_for_cmd(host, &cmd, 0); 396 if (!err) 397 host->use_spi_crc = use_crc; 398 return err; 399 } 400 401 static int mmc_switch_status_error(struct mmc_host *host, u32 status) 402 { 403 if (mmc_host_is_spi(host)) { 404 if (status & R1_SPI_ILLEGAL_COMMAND) 405 return -EBADMSG; 406 } else { 407 if (R1_STATUS(status)) 408 pr_warn("%s: unexpected status %#x after switch\n", 409 mmc_hostname(host), status); 410 if (status & R1_SWITCH_ERROR) 411 return -EBADMSG; 412 } 413 return 0; 414 } 415 416 /* Caller must hold re-tuning */ 417 int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal) 418 { 419 u32 status; 420 int err; 421 422 err = mmc_send_status(card, &status); 423 if (!crc_err_fatal && err == -EILSEQ) 424 return 0; 425 if (err) 426 return err; 427 428 return mmc_switch_status_error(card->host, status); 429 } 430 431 static int mmc_busy_cb(void *cb_data, bool *busy) 432 { 433 struct mmc_busy_data *data = cb_data; 434 struct mmc_host *host = data->card->host; 435 u32 status = 0; 436 int err; 437 438 if (host->ops->card_busy) { 439 *busy = host->ops->card_busy(host); 440 return 0; 441 } 442 443 err = mmc_send_status(data->card, &status); 444 if (data->retry_crc_err && err == -EILSEQ) { 445 *busy = true; 446 return 0; 447 } 448 if (err) 449 return err; 450 451 switch (data->busy_cmd) { 452 case MMC_BUSY_CMD6: 453 err = mmc_switch_status_error(host, status); 454 break; 455 case MMC_BUSY_ERASE: 456 err = R1_STATUS(status) ? -EIO : 0; 457 break; 458 case MMC_BUSY_HPI: 459 case MMC_BUSY_EXTR_SINGLE: 460 break; 461 default: 462 err = -EINVAL; 463 } 464 465 if (err) 466 return err; 467 468 *busy = !mmc_ready_for_data(status); 469 return 0; 470 } 471 472 int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, 473 int (*busy_cb)(void *cb_data, bool *busy), 474 void *cb_data) 475 { 476 struct mmc_host *host = card->host; 477 int err; 478 unsigned long timeout; 479 unsigned int udelay = 32, udelay_max = 32768; 480 bool expired = false; 481 bool busy = false; 482 483 timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1; 484 do { 485 /* 486 * Due to the possibility of being preempted while polling, 487 * check the expiration time first. 488 */ 489 expired = time_after(jiffies, timeout); 490 491 err = (*busy_cb)(cb_data, &busy); 492 if (err) 493 return err; 494 495 /* Timeout if the device still remains busy. */ 496 if (expired && busy) { 497 pr_err("%s: Card stuck being busy! %s\n", 498 mmc_hostname(host), __func__); 499 return -ETIMEDOUT; 500 } 501 502 /* Throttle the polling rate to avoid hogging the CPU. */ 503 if (busy) { 504 usleep_range(udelay, udelay * 2); 505 if (udelay < udelay_max) 506 udelay *= 2; 507 } 508 } while (busy); 509 510 return 0; 511 } 512 513 int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, 514 bool retry_crc_err, enum mmc_busy_cmd busy_cmd) 515 { 516 struct mmc_busy_data cb_data; 517 518 cb_data.card = card; 519 cb_data.retry_crc_err = retry_crc_err; 520 cb_data.busy_cmd = busy_cmd; 521 522 return __mmc_poll_for_busy(card, timeout_ms, &mmc_busy_cb, &cb_data); 523 } 524 525 bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd, 526 unsigned int timeout_ms) 527 { 528 /* 529 * If the max_busy_timeout of the host is specified, make sure it's 530 * enough to fit the used timeout_ms. In case it's not, let's instruct 531 * the host to avoid HW busy detection, by converting to a R1 response 532 * instead of a R1B. Note, some hosts requires R1B, which also means 533 * they are on their own when it comes to deal with the busy timeout. 534 */ 535 if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout && 536 (timeout_ms > host->max_busy_timeout)) { 537 cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1 | MMC_RSP_R1; 538 return false; 539 } 540 541 cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1B | MMC_RSP_R1B; 542 cmd->busy_timeout = timeout_ms; 543 return true; 544 } 545 546 /** 547 * __mmc_switch - modify EXT_CSD register 548 * @card: the MMC card associated with the data transfer 549 * @set: cmd set values 550 * @index: EXT_CSD register index 551 * @value: value to program into EXT_CSD register 552 * @timeout_ms: timeout (ms) for operation performed by register write, 553 * timeout of zero implies maximum possible timeout 554 * @timing: new timing to change to 555 * @send_status: send status cmd to poll for busy 556 * @retry_crc_err: retry when CRC errors when polling with CMD13 for busy 557 * @retries: number of retries 558 * 559 * Modifies the EXT_CSD register for selected card. 560 */ 561 int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, 562 unsigned int timeout_ms, unsigned char timing, 563 bool send_status, bool retry_crc_err, unsigned int retries) 564 { 565 struct mmc_host *host = card->host; 566 int err; 567 struct mmc_command cmd = {}; 568 bool use_r1b_resp; 569 unsigned char old_timing = host->ios.timing; 570 571 mmc_retune_hold(host); 572 573 if (!timeout_ms) { 574 pr_warn("%s: unspecified timeout for CMD6 - use generic\n", 575 mmc_hostname(host)); 576 timeout_ms = card->ext_csd.generic_cmd6_time; 577 } 578 579 cmd.opcode = MMC_SWITCH; 580 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | 581 (index << 16) | 582 (value << 8) | 583 set; 584 use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, timeout_ms); 585 586 err = mmc_wait_for_cmd(host, &cmd, retries); 587 if (err) 588 goto out; 589 590 /*If SPI or used HW busy detection above, then we don't need to poll. */ 591 if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) || 592 mmc_host_is_spi(host)) 593 goto out_tim; 594 595 /* 596 * If the host doesn't support HW polling via the ->card_busy() ops and 597 * when it's not allowed to poll by using CMD13, then we need to rely on 598 * waiting the stated timeout to be sufficient. 599 */ 600 if (!send_status && !host->ops->card_busy) { 601 mmc_delay(timeout_ms); 602 goto out_tim; 603 } 604 605 /* Let's try to poll to find out when the command is completed. */ 606 err = mmc_poll_for_busy(card, timeout_ms, retry_crc_err, MMC_BUSY_CMD6); 607 if (err) 608 goto out; 609 610 out_tim: 611 /* Switch to new timing before check switch status. */ 612 if (timing) 613 mmc_set_timing(host, timing); 614 615 if (send_status) { 616 err = mmc_switch_status(card, true); 617 if (err && timing) 618 mmc_set_timing(host, old_timing); 619 } 620 out: 621 mmc_retune_release(host); 622 623 return err; 624 } 625 626 int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, 627 unsigned int timeout_ms) 628 { 629 return __mmc_switch(card, set, index, value, timeout_ms, 0, 630 true, false, MMC_CMD_RETRIES); 631 } 632 EXPORT_SYMBOL_GPL(mmc_switch); 633 634 int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error) 635 { 636 struct mmc_request mrq = {}; 637 struct mmc_command cmd = {}; 638 struct mmc_data data = {}; 639 struct scatterlist sg; 640 struct mmc_ios *ios = &host->ios; 641 const u8 *tuning_block_pattern; 642 int size, err = 0; 643 u8 *data_buf; 644 645 if (ios->bus_width == MMC_BUS_WIDTH_8) { 646 tuning_block_pattern = tuning_blk_pattern_8bit; 647 size = sizeof(tuning_blk_pattern_8bit); 648 } else if (ios->bus_width == MMC_BUS_WIDTH_4) { 649 tuning_block_pattern = tuning_blk_pattern_4bit; 650 size = sizeof(tuning_blk_pattern_4bit); 651 } else 652 return -EINVAL; 653 654 data_buf = kzalloc(size, GFP_KERNEL); 655 if (!data_buf) 656 return -ENOMEM; 657 658 mrq.cmd = &cmd; 659 mrq.data = &data; 660 661 cmd.opcode = opcode; 662 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 663 664 data.blksz = size; 665 data.blocks = 1; 666 data.flags = MMC_DATA_READ; 667 668 /* 669 * According to the tuning specs, Tuning process 670 * is normally shorter 40 executions of CMD19, 671 * and timeout value should be shorter than 150 ms 672 */ 673 data.timeout_ns = 150 * NSEC_PER_MSEC; 674 675 data.sg = &sg; 676 data.sg_len = 1; 677 sg_init_one(&sg, data_buf, size); 678 679 mmc_wait_for_req(host, &mrq); 680 681 if (cmd_error) 682 *cmd_error = cmd.error; 683 684 if (cmd.error) { 685 err = cmd.error; 686 goto out; 687 } 688 689 if (data.error) { 690 err = data.error; 691 goto out; 692 } 693 694 if (memcmp(data_buf, tuning_block_pattern, size)) 695 err = -EIO; 696 697 out: 698 kfree(data_buf); 699 return err; 700 } 701 EXPORT_SYMBOL_GPL(mmc_send_tuning); 702 703 int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode) 704 { 705 struct mmc_command cmd = {}; 706 707 /* 708 * eMMC specification specifies that CMD12 can be used to stop a tuning 709 * command, but SD specification does not, so do nothing unless it is 710 * eMMC. 711 */ 712 if (opcode != MMC_SEND_TUNING_BLOCK_HS200) 713 return 0; 714 715 cmd.opcode = MMC_STOP_TRANSMISSION; 716 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 717 718 /* 719 * For drivers that override R1 to R1b, set an arbitrary timeout based 720 * on the tuning timeout i.e. 150ms. 721 */ 722 cmd.busy_timeout = 150; 723 724 return mmc_wait_for_cmd(host, &cmd, 0); 725 } 726 EXPORT_SYMBOL_GPL(mmc_send_abort_tuning); 727 728 static int 729 mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode, 730 u8 len) 731 { 732 struct mmc_request mrq = {}; 733 struct mmc_command cmd = {}; 734 struct mmc_data data = {}; 735 struct scatterlist sg; 736 u8 *data_buf; 737 u8 *test_buf; 738 int i, err; 739 static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 }; 740 static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 }; 741 742 /* dma onto stack is unsafe/nonportable, but callers to this 743 * routine normally provide temporary on-stack buffers ... 744 */ 745 data_buf = kmalloc(len, GFP_KERNEL); 746 if (!data_buf) 747 return -ENOMEM; 748 749 if (len == 8) 750 test_buf = testdata_8bit; 751 else if (len == 4) 752 test_buf = testdata_4bit; 753 else { 754 pr_err("%s: Invalid bus_width %d\n", 755 mmc_hostname(host), len); 756 kfree(data_buf); 757 return -EINVAL; 758 } 759 760 if (opcode == MMC_BUS_TEST_W) 761 memcpy(data_buf, test_buf, len); 762 763 mrq.cmd = &cmd; 764 mrq.data = &data; 765 cmd.opcode = opcode; 766 cmd.arg = 0; 767 768 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we 769 * rely on callers to never use this with "native" calls for reading 770 * CSD or CID. Native versions of those commands use the R2 type, 771 * not R1 plus a data block. 772 */ 773 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 774 775 data.blksz = len; 776 data.blocks = 1; 777 if (opcode == MMC_BUS_TEST_R) 778 data.flags = MMC_DATA_READ; 779 else 780 data.flags = MMC_DATA_WRITE; 781 782 data.sg = &sg; 783 data.sg_len = 1; 784 mmc_set_data_timeout(&data, card); 785 sg_init_one(&sg, data_buf, len); 786 mmc_wait_for_req(host, &mrq); 787 err = 0; 788 if (opcode == MMC_BUS_TEST_R) { 789 for (i = 0; i < len / 4; i++) 790 if ((test_buf[i] ^ data_buf[i]) != 0xff) { 791 err = -EIO; 792 break; 793 } 794 } 795 kfree(data_buf); 796 797 if (cmd.error) 798 return cmd.error; 799 if (data.error) 800 return data.error; 801 802 return err; 803 } 804 805 int mmc_bus_test(struct mmc_card *card, u8 bus_width) 806 { 807 int width; 808 809 if (bus_width == MMC_BUS_WIDTH_8) 810 width = 8; 811 else if (bus_width == MMC_BUS_WIDTH_4) 812 width = 4; 813 else if (bus_width == MMC_BUS_WIDTH_1) 814 return 0; /* no need for test */ 815 else 816 return -EINVAL; 817 818 /* 819 * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there 820 * is a problem. This improves chances that the test will work. 821 */ 822 mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width); 823 return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width); 824 } 825 826 static int mmc_send_hpi_cmd(struct mmc_card *card) 827 { 828 unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time; 829 struct mmc_host *host = card->host; 830 bool use_r1b_resp = false; 831 struct mmc_command cmd = {}; 832 int err; 833 834 cmd.opcode = card->ext_csd.hpi_cmd; 835 cmd.arg = card->rca << 16 | 1; 836 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 837 838 if (cmd.opcode == MMC_STOP_TRANSMISSION) 839 use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, 840 busy_timeout_ms); 841 842 err = mmc_wait_for_cmd(host, &cmd, 0); 843 if (err) { 844 pr_warn("%s: HPI error %d. Command response %#x\n", 845 mmc_hostname(host), err, cmd.resp[0]); 846 return err; 847 } 848 849 /* No need to poll when using HW busy detection. */ 850 if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp) 851 return 0; 852 853 /* Let's poll to find out when the HPI request completes. */ 854 return mmc_poll_for_busy(card, busy_timeout_ms, false, MMC_BUSY_HPI); 855 } 856 857 /** 858 * mmc_interrupt_hpi - Issue for High priority Interrupt 859 * @card: the MMC card associated with the HPI transfer 860 * 861 * Issued High Priority Interrupt, and check for card status 862 * until out-of prg-state. 863 */ 864 static int mmc_interrupt_hpi(struct mmc_card *card) 865 { 866 int err; 867 u32 status; 868 869 if (!card->ext_csd.hpi_en) { 870 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host)); 871 return 1; 872 } 873 874 err = mmc_send_status(card, &status); 875 if (err) { 876 pr_err("%s: Get card status fail\n", mmc_hostname(card->host)); 877 goto out; 878 } 879 880 switch (R1_CURRENT_STATE(status)) { 881 case R1_STATE_IDLE: 882 case R1_STATE_READY: 883 case R1_STATE_STBY: 884 case R1_STATE_TRAN: 885 /* 886 * In idle and transfer states, HPI is not needed and the caller 887 * can issue the next intended command immediately 888 */ 889 goto out; 890 case R1_STATE_PRG: 891 break; 892 default: 893 /* In all other states, it's illegal to issue HPI */ 894 pr_debug("%s: HPI cannot be sent. Card state=%d\n", 895 mmc_hostname(card->host), R1_CURRENT_STATE(status)); 896 err = -EINVAL; 897 goto out; 898 } 899 900 err = mmc_send_hpi_cmd(card); 901 out: 902 return err; 903 } 904 905 int mmc_can_ext_csd(struct mmc_card *card) 906 { 907 return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3); 908 } 909 910 static int mmc_read_bkops_status(struct mmc_card *card) 911 { 912 int err; 913 u8 *ext_csd; 914 915 err = mmc_get_ext_csd(card, &ext_csd); 916 if (err) 917 return err; 918 919 card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS]; 920 card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS]; 921 kfree(ext_csd); 922 return 0; 923 } 924 925 /** 926 * mmc_run_bkops - Run BKOPS for supported cards 927 * @card: MMC card to run BKOPS for 928 * 929 * Run background operations synchronously for cards having manual BKOPS 930 * enabled and in case it reports urgent BKOPS level. 931 */ 932 void mmc_run_bkops(struct mmc_card *card) 933 { 934 int err; 935 936 if (!card->ext_csd.man_bkops_en) 937 return; 938 939 err = mmc_read_bkops_status(card); 940 if (err) { 941 pr_err("%s: Failed to read bkops status: %d\n", 942 mmc_hostname(card->host), err); 943 return; 944 } 945 946 if (!card->ext_csd.raw_bkops_status || 947 card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2) 948 return; 949 950 mmc_retune_hold(card->host); 951 952 /* 953 * For urgent BKOPS status, LEVEL_2 and higher, let's execute 954 * synchronously. Future wise, we may consider to start BKOPS, for less 955 * urgent levels by using an asynchronous background task, when idle. 956 */ 957 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 958 EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS); 959 if (err) 960 pr_warn("%s: Error %d starting bkops\n", 961 mmc_hostname(card->host), err); 962 963 mmc_retune_release(card->host); 964 } 965 EXPORT_SYMBOL(mmc_run_bkops); 966 967 static int mmc_cmdq_switch(struct mmc_card *card, bool enable) 968 { 969 u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0; 970 int err; 971 972 if (!card->ext_csd.cmdq_support) 973 return -EOPNOTSUPP; 974 975 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN, 976 val, card->ext_csd.generic_cmd6_time); 977 if (!err) 978 card->ext_csd.cmdq_en = enable; 979 980 return err; 981 } 982 983 int mmc_cmdq_enable(struct mmc_card *card) 984 { 985 return mmc_cmdq_switch(card, true); 986 } 987 EXPORT_SYMBOL_GPL(mmc_cmdq_enable); 988 989 int mmc_cmdq_disable(struct mmc_card *card) 990 { 991 return mmc_cmdq_switch(card, false); 992 } 993 EXPORT_SYMBOL_GPL(mmc_cmdq_disable); 994 995 int mmc_sanitize(struct mmc_card *card, unsigned int timeout_ms) 996 { 997 struct mmc_host *host = card->host; 998 int err; 999 1000 if (!mmc_can_sanitize(card)) { 1001 pr_warn("%s: Sanitize not supported\n", mmc_hostname(host)); 1002 return -EOPNOTSUPP; 1003 } 1004 1005 if (!timeout_ms) 1006 timeout_ms = MMC_SANITIZE_TIMEOUT_MS; 1007 1008 pr_debug("%s: Sanitize in progress...\n", mmc_hostname(host)); 1009 1010 mmc_retune_hold(host); 1011 1012 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START, 1013 1, timeout_ms, 0, true, false, 0); 1014 if (err) 1015 pr_err("%s: Sanitize failed err=%d\n", mmc_hostname(host), err); 1016 1017 /* 1018 * If the sanitize operation timed out, the card is probably still busy 1019 * in the R1_STATE_PRG. Rather than continue to wait, let's try to abort 1020 * it with a HPI command to get back into R1_STATE_TRAN. 1021 */ 1022 if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card)) 1023 pr_warn("%s: Sanitize aborted\n", mmc_hostname(host)); 1024 1025 mmc_retune_release(host); 1026 1027 pr_debug("%s: Sanitize completed\n", mmc_hostname(host)); 1028 return err; 1029 } 1030 EXPORT_SYMBOL_GPL(mmc_sanitize); 1031