1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * linux/drivers/mmc/core/mmc_ops.h 4 * 5 * Copyright 2006-2007 Pierre Ossman 6 */ 7 8 #include <linux/slab.h> 9 #include <linux/export.h> 10 #include <linux/types.h> 11 #include <linux/scatterlist.h> 12 13 #include <linux/mmc/host.h> 14 #include <linux/mmc/card.h> 15 #include <linux/mmc/mmc.h> 16 17 #include "core.h" 18 #include "card.h" 19 #include "host.h" 20 #include "mmc_ops.h" 21 22 #define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */ 23 #define MMC_SANITIZE_TIMEOUT_MS (240 * 1000) /* 240s */ 24 25 static const u8 tuning_blk_pattern_4bit[] = { 26 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc, 27 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef, 28 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb, 29 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef, 30 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c, 31 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee, 32 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff, 33 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde, 34 }; 35 36 static const u8 tuning_blk_pattern_8bit[] = { 37 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00, 38 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc, 39 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff, 40 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff, 41 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd, 42 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb, 43 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff, 44 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff, 45 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 46 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 47 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 48 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 49 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 50 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 51 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 52 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 53 }; 54 55 struct mmc_busy_data { 56 struct mmc_card *card; 57 bool retry_crc_err; 58 enum mmc_busy_cmd busy_cmd; 59 }; 60 61 struct mmc_op_cond_busy_data { 62 struct mmc_host *host; 63 u32 ocr; 64 struct mmc_command *cmd; 65 }; 66 67 int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries) 68 { 69 int err; 70 struct mmc_command cmd = {}; 71 72 cmd.opcode = MMC_SEND_STATUS; 73 if (!mmc_host_is_spi(card->host)) 74 cmd.arg = card->rca << 16; 75 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 76 77 err = mmc_wait_for_cmd(card->host, &cmd, retries); 78 if (err) 79 return err; 80 81 /* NOTE: callers are required to understand the difference 82 * between "native" and SPI format status words! 83 */ 84 if (status) 85 *status = cmd.resp[0]; 86 87 return 0; 88 } 89 EXPORT_SYMBOL_GPL(__mmc_send_status); 90 91 int mmc_send_status(struct mmc_card *card, u32 *status) 92 { 93 return __mmc_send_status(card, status, MMC_CMD_RETRIES); 94 } 95 EXPORT_SYMBOL_GPL(mmc_send_status); 96 97 static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card) 98 { 99 struct mmc_command cmd = {}; 100 101 cmd.opcode = MMC_SELECT_CARD; 102 103 if (card) { 104 cmd.arg = card->rca << 16; 105 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 106 } else { 107 cmd.arg = 0; 108 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC; 109 } 110 111 return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 112 } 113 114 int mmc_select_card(struct mmc_card *card) 115 { 116 117 return _mmc_select_card(card->host, card); 118 } 119 120 int mmc_deselect_cards(struct mmc_host *host) 121 { 122 return _mmc_select_card(host, NULL); 123 } 124 125 /* 126 * Write the value specified in the device tree or board code into the optional 127 * 16 bit Driver Stage Register. This can be used to tune raise/fall times and 128 * drive strength of the DAT and CMD outputs. The actual meaning of a given 129 * value is hardware dependant. 130 * The presence of the DSR register can be determined from the CSD register, 131 * bit 76. 132 */ 133 int mmc_set_dsr(struct mmc_host *host) 134 { 135 struct mmc_command cmd = {}; 136 137 cmd.opcode = MMC_SET_DSR; 138 139 cmd.arg = (host->dsr << 16) | 0xffff; 140 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC; 141 142 return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 143 } 144 145 int mmc_go_idle(struct mmc_host *host) 146 { 147 int err; 148 struct mmc_command cmd = {}; 149 150 /* 151 * Non-SPI hosts need to prevent chipselect going active during 152 * GO_IDLE; that would put chips into SPI mode. Remind them of 153 * that in case of hardware that won't pull up DAT3/nCS otherwise. 154 * 155 * SPI hosts ignore ios.chip_select; it's managed according to 156 * rules that must accommodate non-MMC slaves which this layer 157 * won't even know about. 158 */ 159 if (!mmc_host_is_spi(host)) { 160 mmc_set_chip_select(host, MMC_CS_HIGH); 161 mmc_delay(1); 162 } 163 164 cmd.opcode = MMC_GO_IDLE_STATE; 165 cmd.arg = 0; 166 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC; 167 168 err = mmc_wait_for_cmd(host, &cmd, 0); 169 170 mmc_delay(1); 171 172 if (!mmc_host_is_spi(host)) { 173 mmc_set_chip_select(host, MMC_CS_DONTCARE); 174 mmc_delay(1); 175 } 176 177 host->use_spi_crc = 0; 178 179 return err; 180 } 181 182 static int __mmc_send_op_cond_cb(void *cb_data, bool *busy) 183 { 184 struct mmc_op_cond_busy_data *data = cb_data; 185 struct mmc_host *host = data->host; 186 struct mmc_command *cmd = data->cmd; 187 u32 ocr = data->ocr; 188 int err = 0; 189 190 err = mmc_wait_for_cmd(host, cmd, 0); 191 if (err) 192 return err; 193 194 if (mmc_host_is_spi(host)) { 195 if (!(cmd->resp[0] & R1_SPI_IDLE)) { 196 *busy = false; 197 return 0; 198 } 199 } else { 200 if (cmd->resp[0] & MMC_CARD_BUSY) { 201 *busy = false; 202 return 0; 203 } 204 } 205 206 *busy = true; 207 208 /* 209 * According to eMMC specification v5.1 section 6.4.3, we 210 * should issue CMD1 repeatedly in the idle state until 211 * the eMMC is ready. Otherwise some eMMC devices seem to enter 212 * the inactive mode after mmc_init_card() issued CMD0 when 213 * the eMMC device is busy. 214 */ 215 if (!ocr && !mmc_host_is_spi(host)) 216 cmd->arg = cmd->resp[0] | BIT(30); 217 218 return 0; 219 } 220 221 int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) 222 { 223 struct mmc_command cmd = {}; 224 int err = 0; 225 struct mmc_op_cond_busy_data cb_data = { 226 .host = host, 227 .ocr = ocr, 228 .cmd = &cmd 229 }; 230 231 cmd.opcode = MMC_SEND_OP_COND; 232 cmd.arg = mmc_host_is_spi(host) ? 0 : ocr; 233 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR; 234 235 err = __mmc_poll_for_busy(host, 1000, &__mmc_send_op_cond_cb, &cb_data); 236 if (err) 237 return err; 238 239 if (rocr && !mmc_host_is_spi(host)) 240 *rocr = cmd.resp[0]; 241 242 return err; 243 } 244 245 int mmc_set_relative_addr(struct mmc_card *card) 246 { 247 struct mmc_command cmd = {}; 248 249 cmd.opcode = MMC_SET_RELATIVE_ADDR; 250 cmd.arg = card->rca << 16; 251 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 252 253 return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); 254 } 255 256 static int 257 mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode) 258 { 259 int err; 260 struct mmc_command cmd = {}; 261 262 cmd.opcode = opcode; 263 cmd.arg = arg; 264 cmd.flags = MMC_RSP_R2 | MMC_CMD_AC; 265 266 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 267 if (err) 268 return err; 269 270 memcpy(cxd, cmd.resp, sizeof(u32) * 4); 271 272 return 0; 273 } 274 275 /* 276 * NOTE: void *buf, caller for the buf is required to use DMA-capable 277 * buffer or on-stack buffer (with some overhead in callee). 278 */ 279 int mmc_send_adtc_data(struct mmc_card *card, struct mmc_host *host, u32 opcode, 280 u32 args, void *buf, unsigned len) 281 { 282 struct mmc_request mrq = {}; 283 struct mmc_command cmd = {}; 284 struct mmc_data data = {}; 285 struct scatterlist sg; 286 287 mrq.cmd = &cmd; 288 mrq.data = &data; 289 290 cmd.opcode = opcode; 291 cmd.arg = args; 292 293 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we 294 * rely on callers to never use this with "native" calls for reading 295 * CSD or CID. Native versions of those commands use the R2 type, 296 * not R1 plus a data block. 297 */ 298 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 299 300 data.blksz = len; 301 data.blocks = 1; 302 data.flags = MMC_DATA_READ; 303 data.sg = &sg; 304 data.sg_len = 1; 305 306 sg_init_one(&sg, buf, len); 307 308 if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) { 309 /* 310 * The spec states that CSR and CID accesses have a timeout 311 * of 64 clock cycles. 312 */ 313 data.timeout_ns = 0; 314 data.timeout_clks = 64; 315 } else 316 mmc_set_data_timeout(&data, card); 317 318 mmc_wait_for_req(host, &mrq); 319 320 if (cmd.error) 321 return cmd.error; 322 if (data.error) 323 return data.error; 324 325 return 0; 326 } 327 328 static int mmc_spi_send_cxd(struct mmc_host *host, u32 *cxd, u32 opcode) 329 { 330 int ret, i; 331 __be32 *cxd_tmp; 332 333 cxd_tmp = kzalloc(16, GFP_KERNEL); 334 if (!cxd_tmp) 335 return -ENOMEM; 336 337 ret = mmc_send_adtc_data(NULL, host, opcode, 0, cxd_tmp, 16); 338 if (ret) 339 goto err; 340 341 for (i = 0; i < 4; i++) 342 cxd[i] = be32_to_cpu(cxd_tmp[i]); 343 344 err: 345 kfree(cxd_tmp); 346 return ret; 347 } 348 349 int mmc_send_csd(struct mmc_card *card, u32 *csd) 350 { 351 if (mmc_host_is_spi(card->host)) 352 return mmc_spi_send_cxd(card->host, csd, MMC_SEND_CSD); 353 354 return mmc_send_cxd_native(card->host, card->rca << 16, csd, 355 MMC_SEND_CSD); 356 } 357 358 int mmc_send_cid(struct mmc_host *host, u32 *cid) 359 { 360 if (mmc_host_is_spi(host)) 361 return mmc_spi_send_cxd(host, cid, MMC_SEND_CID); 362 363 return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID); 364 } 365 366 int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd) 367 { 368 int err; 369 u8 *ext_csd; 370 371 if (!card || !new_ext_csd) 372 return -EINVAL; 373 374 if (!mmc_can_ext_csd(card)) 375 return -EOPNOTSUPP; 376 377 /* 378 * As the ext_csd is so large and mostly unused, we don't store the 379 * raw block in mmc_card. 380 */ 381 ext_csd = kzalloc(512, GFP_KERNEL); 382 if (!ext_csd) 383 return -ENOMEM; 384 385 err = mmc_send_adtc_data(card, card->host, MMC_SEND_EXT_CSD, 0, ext_csd, 386 512); 387 if (err) 388 kfree(ext_csd); 389 else 390 *new_ext_csd = ext_csd; 391 392 return err; 393 } 394 EXPORT_SYMBOL_GPL(mmc_get_ext_csd); 395 396 int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp) 397 { 398 struct mmc_command cmd = {}; 399 int err; 400 401 cmd.opcode = MMC_SPI_READ_OCR; 402 cmd.arg = highcap ? (1 << 30) : 0; 403 cmd.flags = MMC_RSP_SPI_R3; 404 405 err = mmc_wait_for_cmd(host, &cmd, 0); 406 407 *ocrp = cmd.resp[1]; 408 return err; 409 } 410 411 int mmc_spi_set_crc(struct mmc_host *host, int use_crc) 412 { 413 struct mmc_command cmd = {}; 414 int err; 415 416 cmd.opcode = MMC_SPI_CRC_ON_OFF; 417 cmd.flags = MMC_RSP_SPI_R1; 418 cmd.arg = use_crc; 419 420 err = mmc_wait_for_cmd(host, &cmd, 0); 421 if (!err) 422 host->use_spi_crc = use_crc; 423 return err; 424 } 425 426 static int mmc_switch_status_error(struct mmc_host *host, u32 status) 427 { 428 if (mmc_host_is_spi(host)) { 429 if (status & R1_SPI_ILLEGAL_COMMAND) 430 return -EBADMSG; 431 } else { 432 if (R1_STATUS(status)) 433 pr_warn("%s: unexpected status %#x after switch\n", 434 mmc_hostname(host), status); 435 if (status & R1_SWITCH_ERROR) 436 return -EBADMSG; 437 } 438 return 0; 439 } 440 441 /* Caller must hold re-tuning */ 442 int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal) 443 { 444 u32 status; 445 int err; 446 447 err = mmc_send_status(card, &status); 448 if (!crc_err_fatal && err == -EILSEQ) 449 return 0; 450 if (err) 451 return err; 452 453 return mmc_switch_status_error(card->host, status); 454 } 455 456 static int mmc_busy_cb(void *cb_data, bool *busy) 457 { 458 struct mmc_busy_data *data = cb_data; 459 struct mmc_host *host = data->card->host; 460 u32 status = 0; 461 int err; 462 463 if (data->busy_cmd != MMC_BUSY_IO && host->ops->card_busy) { 464 *busy = host->ops->card_busy(host); 465 return 0; 466 } 467 468 err = mmc_send_status(data->card, &status); 469 if (data->retry_crc_err && err == -EILSEQ) { 470 *busy = true; 471 return 0; 472 } 473 if (err) 474 return err; 475 476 switch (data->busy_cmd) { 477 case MMC_BUSY_CMD6: 478 err = mmc_switch_status_error(host, status); 479 break; 480 case MMC_BUSY_ERASE: 481 err = R1_STATUS(status) ? -EIO : 0; 482 break; 483 case MMC_BUSY_HPI: 484 case MMC_BUSY_EXTR_SINGLE: 485 case MMC_BUSY_IO: 486 break; 487 default: 488 err = -EINVAL; 489 } 490 491 if (err) 492 return err; 493 494 *busy = !mmc_ready_for_data(status); 495 return 0; 496 } 497 498 int __mmc_poll_for_busy(struct mmc_host *host, unsigned int timeout_ms, 499 int (*busy_cb)(void *cb_data, bool *busy), 500 void *cb_data) 501 { 502 int err; 503 unsigned long timeout; 504 unsigned int udelay = 32, udelay_max = 32768; 505 bool expired = false; 506 bool busy = false; 507 508 timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1; 509 do { 510 /* 511 * Due to the possibility of being preempted while polling, 512 * check the expiration time first. 513 */ 514 expired = time_after(jiffies, timeout); 515 516 err = (*busy_cb)(cb_data, &busy); 517 if (err) 518 return err; 519 520 /* Timeout if the device still remains busy. */ 521 if (expired && busy) { 522 pr_err("%s: Card stuck being busy! %s\n", 523 mmc_hostname(host), __func__); 524 return -ETIMEDOUT; 525 } 526 527 /* Throttle the polling rate to avoid hogging the CPU. */ 528 if (busy) { 529 usleep_range(udelay, udelay * 2); 530 if (udelay < udelay_max) 531 udelay *= 2; 532 } 533 } while (busy); 534 535 return 0; 536 } 537 EXPORT_SYMBOL_GPL(__mmc_poll_for_busy); 538 539 int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, 540 bool retry_crc_err, enum mmc_busy_cmd busy_cmd) 541 { 542 struct mmc_host *host = card->host; 543 struct mmc_busy_data cb_data; 544 545 cb_data.card = card; 546 cb_data.retry_crc_err = retry_crc_err; 547 cb_data.busy_cmd = busy_cmd; 548 549 return __mmc_poll_for_busy(host, timeout_ms, &mmc_busy_cb, &cb_data); 550 } 551 EXPORT_SYMBOL_GPL(mmc_poll_for_busy); 552 553 bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd, 554 unsigned int timeout_ms) 555 { 556 /* 557 * If the max_busy_timeout of the host is specified, make sure it's 558 * enough to fit the used timeout_ms. In case it's not, let's instruct 559 * the host to avoid HW busy detection, by converting to a R1 response 560 * instead of a R1B. Note, some hosts requires R1B, which also means 561 * they are on their own when it comes to deal with the busy timeout. 562 */ 563 if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout && 564 (timeout_ms > host->max_busy_timeout)) { 565 cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1 | MMC_RSP_R1; 566 return false; 567 } 568 569 cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1B | MMC_RSP_R1B; 570 cmd->busy_timeout = timeout_ms; 571 return true; 572 } 573 574 /** 575 * __mmc_switch - modify EXT_CSD register 576 * @card: the MMC card associated with the data transfer 577 * @set: cmd set values 578 * @index: EXT_CSD register index 579 * @value: value to program into EXT_CSD register 580 * @timeout_ms: timeout (ms) for operation performed by register write, 581 * timeout of zero implies maximum possible timeout 582 * @timing: new timing to change to 583 * @send_status: send status cmd to poll for busy 584 * @retry_crc_err: retry when CRC errors when polling with CMD13 for busy 585 * @retries: number of retries 586 * 587 * Modifies the EXT_CSD register for selected card. 588 */ 589 int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, 590 unsigned int timeout_ms, unsigned char timing, 591 bool send_status, bool retry_crc_err, unsigned int retries) 592 { 593 struct mmc_host *host = card->host; 594 int err; 595 struct mmc_command cmd = {}; 596 bool use_r1b_resp; 597 unsigned char old_timing = host->ios.timing; 598 599 mmc_retune_hold(host); 600 601 if (!timeout_ms) { 602 pr_warn("%s: unspecified timeout for CMD6 - use generic\n", 603 mmc_hostname(host)); 604 timeout_ms = card->ext_csd.generic_cmd6_time; 605 } 606 607 cmd.opcode = MMC_SWITCH; 608 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | 609 (index << 16) | 610 (value << 8) | 611 set; 612 use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, timeout_ms); 613 614 err = mmc_wait_for_cmd(host, &cmd, retries); 615 if (err) 616 goto out; 617 618 /*If SPI or used HW busy detection above, then we don't need to poll. */ 619 if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) || 620 mmc_host_is_spi(host)) 621 goto out_tim; 622 623 /* 624 * If the host doesn't support HW polling via the ->card_busy() ops and 625 * when it's not allowed to poll by using CMD13, then we need to rely on 626 * waiting the stated timeout to be sufficient. 627 */ 628 if (!send_status && !host->ops->card_busy) { 629 mmc_delay(timeout_ms); 630 goto out_tim; 631 } 632 633 /* Let's try to poll to find out when the command is completed. */ 634 err = mmc_poll_for_busy(card, timeout_ms, retry_crc_err, MMC_BUSY_CMD6); 635 if (err) 636 goto out; 637 638 out_tim: 639 /* Switch to new timing before check switch status. */ 640 if (timing) 641 mmc_set_timing(host, timing); 642 643 if (send_status) { 644 err = mmc_switch_status(card, true); 645 if (err && timing) 646 mmc_set_timing(host, old_timing); 647 } 648 out: 649 mmc_retune_release(host); 650 651 return err; 652 } 653 654 int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, 655 unsigned int timeout_ms) 656 { 657 return __mmc_switch(card, set, index, value, timeout_ms, 0, 658 true, false, MMC_CMD_RETRIES); 659 } 660 EXPORT_SYMBOL_GPL(mmc_switch); 661 662 int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error) 663 { 664 struct mmc_request mrq = {}; 665 struct mmc_command cmd = {}; 666 struct mmc_data data = {}; 667 struct scatterlist sg; 668 struct mmc_ios *ios = &host->ios; 669 const u8 *tuning_block_pattern; 670 int size, err = 0; 671 u8 *data_buf; 672 673 if (ios->bus_width == MMC_BUS_WIDTH_8) { 674 tuning_block_pattern = tuning_blk_pattern_8bit; 675 size = sizeof(tuning_blk_pattern_8bit); 676 } else if (ios->bus_width == MMC_BUS_WIDTH_4) { 677 tuning_block_pattern = tuning_blk_pattern_4bit; 678 size = sizeof(tuning_blk_pattern_4bit); 679 } else 680 return -EINVAL; 681 682 data_buf = kzalloc(size, GFP_KERNEL); 683 if (!data_buf) 684 return -ENOMEM; 685 686 mrq.cmd = &cmd; 687 mrq.data = &data; 688 689 cmd.opcode = opcode; 690 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 691 692 data.blksz = size; 693 data.blocks = 1; 694 data.flags = MMC_DATA_READ; 695 696 /* 697 * According to the tuning specs, Tuning process 698 * is normally shorter 40 executions of CMD19, 699 * and timeout value should be shorter than 150 ms 700 */ 701 data.timeout_ns = 150 * NSEC_PER_MSEC; 702 703 data.sg = &sg; 704 data.sg_len = 1; 705 sg_init_one(&sg, data_buf, size); 706 707 mmc_wait_for_req(host, &mrq); 708 709 if (cmd_error) 710 *cmd_error = cmd.error; 711 712 if (cmd.error) { 713 err = cmd.error; 714 goto out; 715 } 716 717 if (data.error) { 718 err = data.error; 719 goto out; 720 } 721 722 if (memcmp(data_buf, tuning_block_pattern, size)) 723 err = -EIO; 724 725 out: 726 kfree(data_buf); 727 return err; 728 } 729 EXPORT_SYMBOL_GPL(mmc_send_tuning); 730 731 int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode) 732 { 733 struct mmc_command cmd = {}; 734 735 /* 736 * eMMC specification specifies that CMD12 can be used to stop a tuning 737 * command, but SD specification does not, so do nothing unless it is 738 * eMMC. 739 */ 740 if (opcode != MMC_SEND_TUNING_BLOCK_HS200) 741 return 0; 742 743 cmd.opcode = MMC_STOP_TRANSMISSION; 744 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 745 746 /* 747 * For drivers that override R1 to R1b, set an arbitrary timeout based 748 * on the tuning timeout i.e. 150ms. 749 */ 750 cmd.busy_timeout = 150; 751 752 return mmc_wait_for_cmd(host, &cmd, 0); 753 } 754 EXPORT_SYMBOL_GPL(mmc_send_abort_tuning); 755 756 static int 757 mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode, 758 u8 len) 759 { 760 struct mmc_request mrq = {}; 761 struct mmc_command cmd = {}; 762 struct mmc_data data = {}; 763 struct scatterlist sg; 764 u8 *data_buf; 765 u8 *test_buf; 766 int i, err; 767 static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 }; 768 static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 }; 769 770 /* dma onto stack is unsafe/nonportable, but callers to this 771 * routine normally provide temporary on-stack buffers ... 772 */ 773 data_buf = kmalloc(len, GFP_KERNEL); 774 if (!data_buf) 775 return -ENOMEM; 776 777 if (len == 8) 778 test_buf = testdata_8bit; 779 else if (len == 4) 780 test_buf = testdata_4bit; 781 else { 782 pr_err("%s: Invalid bus_width %d\n", 783 mmc_hostname(host), len); 784 kfree(data_buf); 785 return -EINVAL; 786 } 787 788 if (opcode == MMC_BUS_TEST_W) 789 memcpy(data_buf, test_buf, len); 790 791 mrq.cmd = &cmd; 792 mrq.data = &data; 793 cmd.opcode = opcode; 794 cmd.arg = 0; 795 796 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we 797 * rely on callers to never use this with "native" calls for reading 798 * CSD or CID. Native versions of those commands use the R2 type, 799 * not R1 plus a data block. 800 */ 801 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 802 803 data.blksz = len; 804 data.blocks = 1; 805 if (opcode == MMC_BUS_TEST_R) 806 data.flags = MMC_DATA_READ; 807 else 808 data.flags = MMC_DATA_WRITE; 809 810 data.sg = &sg; 811 data.sg_len = 1; 812 mmc_set_data_timeout(&data, card); 813 sg_init_one(&sg, data_buf, len); 814 mmc_wait_for_req(host, &mrq); 815 err = 0; 816 if (opcode == MMC_BUS_TEST_R) { 817 for (i = 0; i < len / 4; i++) 818 if ((test_buf[i] ^ data_buf[i]) != 0xff) { 819 err = -EIO; 820 break; 821 } 822 } 823 kfree(data_buf); 824 825 if (cmd.error) 826 return cmd.error; 827 if (data.error) 828 return data.error; 829 830 return err; 831 } 832 833 int mmc_bus_test(struct mmc_card *card, u8 bus_width) 834 { 835 int width; 836 837 if (bus_width == MMC_BUS_WIDTH_8) 838 width = 8; 839 else if (bus_width == MMC_BUS_WIDTH_4) 840 width = 4; 841 else if (bus_width == MMC_BUS_WIDTH_1) 842 return 0; /* no need for test */ 843 else 844 return -EINVAL; 845 846 /* 847 * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there 848 * is a problem. This improves chances that the test will work. 849 */ 850 mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width); 851 return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width); 852 } 853 854 static int mmc_send_hpi_cmd(struct mmc_card *card) 855 { 856 unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time; 857 struct mmc_host *host = card->host; 858 bool use_r1b_resp = false; 859 struct mmc_command cmd = {}; 860 int err; 861 862 cmd.opcode = card->ext_csd.hpi_cmd; 863 cmd.arg = card->rca << 16 | 1; 864 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 865 866 if (cmd.opcode == MMC_STOP_TRANSMISSION) 867 use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, 868 busy_timeout_ms); 869 870 err = mmc_wait_for_cmd(host, &cmd, 0); 871 if (err) { 872 pr_warn("%s: HPI error %d. Command response %#x\n", 873 mmc_hostname(host), err, cmd.resp[0]); 874 return err; 875 } 876 877 /* No need to poll when using HW busy detection. */ 878 if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp) 879 return 0; 880 881 /* Let's poll to find out when the HPI request completes. */ 882 return mmc_poll_for_busy(card, busy_timeout_ms, false, MMC_BUSY_HPI); 883 } 884 885 /** 886 * mmc_interrupt_hpi - Issue for High priority Interrupt 887 * @card: the MMC card associated with the HPI transfer 888 * 889 * Issued High Priority Interrupt, and check for card status 890 * until out-of prg-state. 891 */ 892 static int mmc_interrupt_hpi(struct mmc_card *card) 893 { 894 int err; 895 u32 status; 896 897 if (!card->ext_csd.hpi_en) { 898 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host)); 899 return 1; 900 } 901 902 err = mmc_send_status(card, &status); 903 if (err) { 904 pr_err("%s: Get card status fail\n", mmc_hostname(card->host)); 905 goto out; 906 } 907 908 switch (R1_CURRENT_STATE(status)) { 909 case R1_STATE_IDLE: 910 case R1_STATE_READY: 911 case R1_STATE_STBY: 912 case R1_STATE_TRAN: 913 /* 914 * In idle and transfer states, HPI is not needed and the caller 915 * can issue the next intended command immediately 916 */ 917 goto out; 918 case R1_STATE_PRG: 919 break; 920 default: 921 /* In all other states, it's illegal to issue HPI */ 922 pr_debug("%s: HPI cannot be sent. Card state=%d\n", 923 mmc_hostname(card->host), R1_CURRENT_STATE(status)); 924 err = -EINVAL; 925 goto out; 926 } 927 928 err = mmc_send_hpi_cmd(card); 929 out: 930 return err; 931 } 932 933 int mmc_can_ext_csd(struct mmc_card *card) 934 { 935 return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3); 936 } 937 938 static int mmc_read_bkops_status(struct mmc_card *card) 939 { 940 int err; 941 u8 *ext_csd; 942 943 err = mmc_get_ext_csd(card, &ext_csd); 944 if (err) 945 return err; 946 947 card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS]; 948 card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS]; 949 kfree(ext_csd); 950 return 0; 951 } 952 953 /** 954 * mmc_run_bkops - Run BKOPS for supported cards 955 * @card: MMC card to run BKOPS for 956 * 957 * Run background operations synchronously for cards having manual BKOPS 958 * enabled and in case it reports urgent BKOPS level. 959 */ 960 void mmc_run_bkops(struct mmc_card *card) 961 { 962 int err; 963 964 if (!card->ext_csd.man_bkops_en) 965 return; 966 967 err = mmc_read_bkops_status(card); 968 if (err) { 969 pr_err("%s: Failed to read bkops status: %d\n", 970 mmc_hostname(card->host), err); 971 return; 972 } 973 974 if (!card->ext_csd.raw_bkops_status || 975 card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2) 976 return; 977 978 mmc_retune_hold(card->host); 979 980 /* 981 * For urgent BKOPS status, LEVEL_2 and higher, let's execute 982 * synchronously. Future wise, we may consider to start BKOPS, for less 983 * urgent levels by using an asynchronous background task, when idle. 984 */ 985 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 986 EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS); 987 /* 988 * If the BKOPS timed out, the card is probably still busy in the 989 * R1_STATE_PRG. Rather than continue to wait, let's try to abort 990 * it with a HPI command to get back into R1_STATE_TRAN. 991 */ 992 if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card)) 993 pr_warn("%s: BKOPS aborted\n", mmc_hostname(card->host)); 994 else if (err) 995 pr_warn("%s: Error %d running bkops\n", 996 mmc_hostname(card->host), err); 997 998 mmc_retune_release(card->host); 999 } 1000 EXPORT_SYMBOL(mmc_run_bkops); 1001 1002 static int mmc_cmdq_switch(struct mmc_card *card, bool enable) 1003 { 1004 u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0; 1005 int err; 1006 1007 if (!card->ext_csd.cmdq_support) 1008 return -EOPNOTSUPP; 1009 1010 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN, 1011 val, card->ext_csd.generic_cmd6_time); 1012 if (!err) 1013 card->ext_csd.cmdq_en = enable; 1014 1015 return err; 1016 } 1017 1018 int mmc_cmdq_enable(struct mmc_card *card) 1019 { 1020 return mmc_cmdq_switch(card, true); 1021 } 1022 EXPORT_SYMBOL_GPL(mmc_cmdq_enable); 1023 1024 int mmc_cmdq_disable(struct mmc_card *card) 1025 { 1026 return mmc_cmdq_switch(card, false); 1027 } 1028 EXPORT_SYMBOL_GPL(mmc_cmdq_disable); 1029 1030 int mmc_sanitize(struct mmc_card *card, unsigned int timeout_ms) 1031 { 1032 struct mmc_host *host = card->host; 1033 int err; 1034 1035 if (!mmc_can_sanitize(card)) { 1036 pr_warn("%s: Sanitize not supported\n", mmc_hostname(host)); 1037 return -EOPNOTSUPP; 1038 } 1039 1040 if (!timeout_ms) 1041 timeout_ms = MMC_SANITIZE_TIMEOUT_MS; 1042 1043 pr_debug("%s: Sanitize in progress...\n", mmc_hostname(host)); 1044 1045 mmc_retune_hold(host); 1046 1047 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START, 1048 1, timeout_ms, 0, true, false, 0); 1049 if (err) 1050 pr_err("%s: Sanitize failed err=%d\n", mmc_hostname(host), err); 1051 1052 /* 1053 * If the sanitize operation timed out, the card is probably still busy 1054 * in the R1_STATE_PRG. Rather than continue to wait, let's try to abort 1055 * it with a HPI command to get back into R1_STATE_TRAN. 1056 */ 1057 if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card)) 1058 pr_warn("%s: Sanitize aborted\n", mmc_hostname(host)); 1059 1060 mmc_retune_release(host); 1061 1062 pr_debug("%s: Sanitize completed\n", mmc_hostname(host)); 1063 return err; 1064 } 1065 EXPORT_SYMBOL_GPL(mmc_sanitize); 1066