12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 2da7fbe58SPierre Ossman /* 370f10482SPierre Ossman * linux/drivers/mmc/core/mmc_ops.h 4da7fbe58SPierre Ossman * 5da7fbe58SPierre Ossman * Copyright 2006-2007 Pierre Ossman 6da7fbe58SPierre Ossman */ 7da7fbe58SPierre Ossman 85a0e3ad6STejun Heo #include <linux/slab.h> 93ef77af1SPaul Gortmaker #include <linux/export.h> 10da7fbe58SPierre Ossman #include <linux/types.h> 11da7fbe58SPierre Ossman #include <linux/scatterlist.h> 12da7fbe58SPierre Ossman 13da7fbe58SPierre Ossman #include <linux/mmc/host.h> 14da7fbe58SPierre Ossman #include <linux/mmc/card.h> 15da7fbe58SPierre Ossman #include <linux/mmc/mmc.h> 16da7fbe58SPierre Ossman 17da7fbe58SPierre Ossman #include "core.h" 181cf8f7e5SUlf Hansson #include "card.h" 19c6dbab9cSAdrian Hunter #include "host.h" 20da7fbe58SPierre Ossman #include "mmc_ops.h" 21da7fbe58SPierre Ossman 2224ed3bd0SUlf Hansson #define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */ 2355c2b8b9SUlf Hansson #define MMC_SANITIZE_TIMEOUT_MS (240 * 1000) /* 240s */ 248fee476bSTrey Ramsay 2504cdbbfaSUlf Hansson static const u8 tuning_blk_pattern_4bit[] = { 2604cdbbfaSUlf Hansson 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc, 2704cdbbfaSUlf Hansson 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef, 2804cdbbfaSUlf Hansson 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb, 2904cdbbfaSUlf Hansson 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef, 3004cdbbfaSUlf Hansson 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c, 3104cdbbfaSUlf Hansson 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee, 3204cdbbfaSUlf Hansson 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff, 3304cdbbfaSUlf Hansson 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde, 3404cdbbfaSUlf Hansson }; 3504cdbbfaSUlf Hansson 3604cdbbfaSUlf Hansson static const u8 tuning_blk_pattern_8bit[] = { 3704cdbbfaSUlf Hansson 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00, 3804cdbbfaSUlf Hansson 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc, 3904cdbbfaSUlf Hansson 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff, 4004cdbbfaSUlf Hansson 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff, 4104cdbbfaSUlf Hansson 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd, 4204cdbbfaSUlf Hansson 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb, 4304cdbbfaSUlf Hansson 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff, 4404cdbbfaSUlf Hansson 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff, 4504cdbbfaSUlf Hansson 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 4604cdbbfaSUlf Hansson 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 4704cdbbfaSUlf Hansson 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 4804cdbbfaSUlf Hansson 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 4904cdbbfaSUlf Hansson 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 5004cdbbfaSUlf Hansson 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 5104cdbbfaSUlf Hansson 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 5204cdbbfaSUlf Hansson 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 5304cdbbfaSUlf Hansson }; 5404cdbbfaSUlf Hansson 5504f967adSUlf Hansson struct mmc_busy_data { 5604f967adSUlf Hansson struct mmc_card *card; 5704f967adSUlf Hansson bool retry_crc_err; 5804f967adSUlf Hansson enum mmc_busy_cmd busy_cmd; 5904f967adSUlf Hansson }; 6004f967adSUlf Hansson 612185bc2cSUlf Hansson int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries) 62a27fbf2fSSeungwon Jeon { 63a27fbf2fSSeungwon Jeon int err; 64c7836d15SMasahiro Yamada struct mmc_command cmd = {}; 65a27fbf2fSSeungwon Jeon 66a27fbf2fSSeungwon Jeon cmd.opcode = MMC_SEND_STATUS; 67a27fbf2fSSeungwon Jeon if (!mmc_host_is_spi(card->host)) 68a27fbf2fSSeungwon Jeon cmd.arg = card->rca << 16; 69a27fbf2fSSeungwon Jeon cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 70a27fbf2fSSeungwon Jeon 712185bc2cSUlf Hansson err = mmc_wait_for_cmd(card->host, &cmd, retries); 72a27fbf2fSSeungwon Jeon if (err) 73a27fbf2fSSeungwon Jeon return err; 74a27fbf2fSSeungwon Jeon 75a27fbf2fSSeungwon Jeon /* NOTE: callers are required to understand the difference 76a27fbf2fSSeungwon Jeon * between "native" and SPI format status words! 77a27fbf2fSSeungwon Jeon */ 78a27fbf2fSSeungwon Jeon if (status) 79a27fbf2fSSeungwon Jeon *status = cmd.resp[0]; 80a27fbf2fSSeungwon Jeon 81a27fbf2fSSeungwon Jeon return 0; 82a27fbf2fSSeungwon Jeon } 832185bc2cSUlf Hansson EXPORT_SYMBOL_GPL(__mmc_send_status); 842185bc2cSUlf Hansson 852185bc2cSUlf Hansson int mmc_send_status(struct mmc_card *card, u32 *status) 862185bc2cSUlf Hansson { 872185bc2cSUlf Hansson return __mmc_send_status(card, status, MMC_CMD_RETRIES); 882185bc2cSUlf Hansson } 891bee324aSLinus Walleij EXPORT_SYMBOL_GPL(mmc_send_status); 90a27fbf2fSSeungwon Jeon 91da7fbe58SPierre Ossman static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card) 92da7fbe58SPierre Ossman { 93c7836d15SMasahiro Yamada struct mmc_command cmd = {}; 94da7fbe58SPierre Ossman 95da7fbe58SPierre Ossman cmd.opcode = MMC_SELECT_CARD; 96da7fbe58SPierre Ossman 97da7fbe58SPierre Ossman if (card) { 98da7fbe58SPierre Ossman cmd.arg = card->rca << 16; 99da7fbe58SPierre Ossman cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 100da7fbe58SPierre Ossman } else { 101da7fbe58SPierre Ossman cmd.arg = 0; 102da7fbe58SPierre Ossman cmd.flags = MMC_RSP_NONE | MMC_CMD_AC; 103da7fbe58SPierre Ossman } 104da7fbe58SPierre Ossman 1050899e741SMasahiro Yamada return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 106da7fbe58SPierre Ossman } 107da7fbe58SPierre Ossman 108da7fbe58SPierre Ossman int mmc_select_card(struct mmc_card *card) 109da7fbe58SPierre Ossman { 110da7fbe58SPierre Ossman 111da7fbe58SPierre Ossman return _mmc_select_card(card->host, card); 112da7fbe58SPierre Ossman } 113da7fbe58SPierre Ossman 114da7fbe58SPierre Ossman int mmc_deselect_cards(struct mmc_host *host) 115da7fbe58SPierre Ossman { 116da7fbe58SPierre Ossman return _mmc_select_card(host, NULL); 117da7fbe58SPierre Ossman } 118da7fbe58SPierre Ossman 1193d705d14SSascha Hauer /* 1203d705d14SSascha Hauer * Write the value specified in the device tree or board code into the optional 1213d705d14SSascha Hauer * 16 bit Driver Stage Register. This can be used to tune raise/fall times and 1223d705d14SSascha Hauer * drive strength of the DAT and CMD outputs. The actual meaning of a given 1233d705d14SSascha Hauer * value is hardware dependant. 1243d705d14SSascha Hauer * The presence of the DSR register can be determined from the CSD register, 1253d705d14SSascha Hauer * bit 76. 1263d705d14SSascha Hauer */ 1273d705d14SSascha Hauer int mmc_set_dsr(struct mmc_host *host) 1283d705d14SSascha Hauer { 129c7836d15SMasahiro Yamada struct mmc_command cmd = {}; 1303d705d14SSascha Hauer 1313d705d14SSascha Hauer cmd.opcode = MMC_SET_DSR; 1323d705d14SSascha Hauer 1333d705d14SSascha Hauer cmd.arg = (host->dsr << 16) | 0xffff; 1343d705d14SSascha Hauer cmd.flags = MMC_RSP_NONE | MMC_CMD_AC; 1353d705d14SSascha Hauer 1363d705d14SSascha Hauer return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 1373d705d14SSascha Hauer } 1383d705d14SSascha Hauer 139da7fbe58SPierre Ossman int mmc_go_idle(struct mmc_host *host) 140da7fbe58SPierre Ossman { 141da7fbe58SPierre Ossman int err; 142c7836d15SMasahiro Yamada struct mmc_command cmd = {}; 143da7fbe58SPierre Ossman 144af517150SDavid Brownell /* 145af517150SDavid Brownell * Non-SPI hosts need to prevent chipselect going active during 146af517150SDavid Brownell * GO_IDLE; that would put chips into SPI mode. Remind them of 147af517150SDavid Brownell * that in case of hardware that won't pull up DAT3/nCS otherwise. 148af517150SDavid Brownell * 149af517150SDavid Brownell * SPI hosts ignore ios.chip_select; it's managed according to 15025985edcSLucas De Marchi * rules that must accommodate non-MMC slaves which this layer 151af517150SDavid Brownell * won't even know about. 152af517150SDavid Brownell */ 153af517150SDavid Brownell if (!mmc_host_is_spi(host)) { 154da7fbe58SPierre Ossman mmc_set_chip_select(host, MMC_CS_HIGH); 155da7fbe58SPierre Ossman mmc_delay(1); 156af517150SDavid Brownell } 157da7fbe58SPierre Ossman 158da7fbe58SPierre Ossman cmd.opcode = MMC_GO_IDLE_STATE; 159da7fbe58SPierre Ossman cmd.arg = 0; 160af517150SDavid Brownell cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC; 161da7fbe58SPierre Ossman 162da7fbe58SPierre Ossman err = mmc_wait_for_cmd(host, &cmd, 0); 163da7fbe58SPierre Ossman 164da7fbe58SPierre Ossman mmc_delay(1); 165da7fbe58SPierre Ossman 166af517150SDavid Brownell if (!mmc_host_is_spi(host)) { 167da7fbe58SPierre Ossman mmc_set_chip_select(host, MMC_CS_DONTCARE); 168da7fbe58SPierre Ossman mmc_delay(1); 169af517150SDavid Brownell } 170af517150SDavid Brownell 171af517150SDavid Brownell host->use_spi_crc = 0; 172da7fbe58SPierre Ossman 173da7fbe58SPierre Ossman return err; 174da7fbe58SPierre Ossman } 175da7fbe58SPierre Ossman 176da7fbe58SPierre Ossman int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) 177da7fbe58SPierre Ossman { 178c7836d15SMasahiro Yamada struct mmc_command cmd = {}; 179da7fbe58SPierre Ossman int i, err = 0; 180da7fbe58SPierre Ossman 181da7fbe58SPierre Ossman cmd.opcode = MMC_SEND_OP_COND; 182af517150SDavid Brownell cmd.arg = mmc_host_is_spi(host) ? 0 : ocr; 183af517150SDavid Brownell cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR; 184da7fbe58SPierre Ossman 185da7fbe58SPierre Ossman for (i = 100; i; i--) { 186da7fbe58SPierre Ossman err = mmc_wait_for_cmd(host, &cmd, 0); 18717b0429dSPierre Ossman if (err) 188da7fbe58SPierre Ossman break; 189da7fbe58SPierre Ossman 1904c94cb65SYoshihiro Shimoda /* wait until reset completes */ 191af517150SDavid Brownell if (mmc_host_is_spi(host)) { 192af517150SDavid Brownell if (!(cmd.resp[0] & R1_SPI_IDLE)) 193af517150SDavid Brownell break; 194af517150SDavid Brownell } else { 195af517150SDavid Brownell if (cmd.resp[0] & MMC_CARD_BUSY) 196af517150SDavid Brownell break; 197af517150SDavid Brownell } 198af517150SDavid Brownell 19917b0429dSPierre Ossman err = -ETIMEDOUT; 200da7fbe58SPierre Ossman 201da7fbe58SPierre Ossman mmc_delay(10); 2024c94cb65SYoshihiro Shimoda 2034c94cb65SYoshihiro Shimoda /* 2044c94cb65SYoshihiro Shimoda * According to eMMC specification v5.1 section 6.4.3, we 2054c94cb65SYoshihiro Shimoda * should issue CMD1 repeatedly in the idle state until 2064c94cb65SYoshihiro Shimoda * the eMMC is ready. Otherwise some eMMC devices seem to enter 2074c94cb65SYoshihiro Shimoda * the inactive mode after mmc_init_card() issued CMD0 when 2084c94cb65SYoshihiro Shimoda * the eMMC device is busy. 2094c94cb65SYoshihiro Shimoda */ 2104c94cb65SYoshihiro Shimoda if (!ocr && !mmc_host_is_spi(host)) 2114c94cb65SYoshihiro Shimoda cmd.arg = cmd.resp[0] | BIT(30); 212da7fbe58SPierre Ossman } 213da7fbe58SPierre Ossman 214af517150SDavid Brownell if (rocr && !mmc_host_is_spi(host)) 215da7fbe58SPierre Ossman *rocr = cmd.resp[0]; 216da7fbe58SPierre Ossman 217da7fbe58SPierre Ossman return err; 218da7fbe58SPierre Ossman } 219da7fbe58SPierre Ossman 220da7fbe58SPierre Ossman int mmc_set_relative_addr(struct mmc_card *card) 221da7fbe58SPierre Ossman { 222c7836d15SMasahiro Yamada struct mmc_command cmd = {}; 223da7fbe58SPierre Ossman 224da7fbe58SPierre Ossman cmd.opcode = MMC_SET_RELATIVE_ADDR; 225da7fbe58SPierre Ossman cmd.arg = card->rca << 16; 226da7fbe58SPierre Ossman cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 227da7fbe58SPierre Ossman 2280899e741SMasahiro Yamada return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); 229da7fbe58SPierre Ossman } 230da7fbe58SPierre Ossman 231af517150SDavid Brownell static int 232af517150SDavid Brownell mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode) 233da7fbe58SPierre Ossman { 234da7fbe58SPierre Ossman int err; 235c7836d15SMasahiro Yamada struct mmc_command cmd = {}; 236da7fbe58SPierre Ossman 237af517150SDavid Brownell cmd.opcode = opcode; 238af517150SDavid Brownell cmd.arg = arg; 239da7fbe58SPierre Ossman cmd.flags = MMC_RSP_R2 | MMC_CMD_AC; 240da7fbe58SPierre Ossman 241af517150SDavid Brownell err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 24217b0429dSPierre Ossman if (err) 243da7fbe58SPierre Ossman return err; 244da7fbe58SPierre Ossman 245af517150SDavid Brownell memcpy(cxd, cmd.resp, sizeof(u32) * 4); 246da7fbe58SPierre Ossman 24717b0429dSPierre Ossman return 0; 248da7fbe58SPierre Ossman } 249da7fbe58SPierre Ossman 2501a41313eSKyungsik Lee /* 2511a41313eSKyungsik Lee * NOTE: void *buf, caller for the buf is required to use DMA-capable 2521a41313eSKyungsik Lee * buffer or on-stack buffer (with some overhead in callee). 2531a41313eSKyungsik Lee */ 254cec18ad9SUlf Hansson int mmc_send_adtc_data(struct mmc_card *card, struct mmc_host *host, u32 opcode, 255cec18ad9SUlf Hansson u32 args, void *buf, unsigned len) 256da7fbe58SPierre Ossman { 257c7836d15SMasahiro Yamada struct mmc_request mrq = {}; 258c7836d15SMasahiro Yamada struct mmc_command cmd = {}; 259c7836d15SMasahiro Yamada struct mmc_data data = {}; 260da7fbe58SPierre Ossman struct scatterlist sg; 261da7fbe58SPierre Ossman 262da7fbe58SPierre Ossman mrq.cmd = &cmd; 263da7fbe58SPierre Ossman mrq.data = &data; 264da7fbe58SPierre Ossman 265af517150SDavid Brownell cmd.opcode = opcode; 266cec18ad9SUlf Hansson cmd.arg = args; 267da7fbe58SPierre Ossman 268af517150SDavid Brownell /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we 269af517150SDavid Brownell * rely on callers to never use this with "native" calls for reading 270af517150SDavid Brownell * CSD or CID. Native versions of those commands use the R2 type, 271af517150SDavid Brownell * not R1 plus a data block. 272af517150SDavid Brownell */ 273af517150SDavid Brownell cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 274af517150SDavid Brownell 275af517150SDavid Brownell data.blksz = len; 276da7fbe58SPierre Ossman data.blocks = 1; 277da7fbe58SPierre Ossman data.flags = MMC_DATA_READ; 278da7fbe58SPierre Ossman data.sg = &sg; 279da7fbe58SPierre Ossman data.sg_len = 1; 280da7fbe58SPierre Ossman 281601ed60cSUlf Hansson sg_init_one(&sg, buf, len); 282da7fbe58SPierre Ossman 283cda56ac2SAdrian Hunter if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) { 2840d3e0460SMatthew Fleming /* 2850d3e0460SMatthew Fleming * The spec states that CSR and CID accesses have a timeout 2860d3e0460SMatthew Fleming * of 64 clock cycles. 2870d3e0460SMatthew Fleming */ 2880d3e0460SMatthew Fleming data.timeout_ns = 0; 2890d3e0460SMatthew Fleming data.timeout_clks = 64; 290cda56ac2SAdrian Hunter } else 291cda56ac2SAdrian Hunter mmc_set_data_timeout(&data, card); 292da7fbe58SPierre Ossman 293af517150SDavid Brownell mmc_wait_for_req(host, &mrq); 294af517150SDavid Brownell 29517b0429dSPierre Ossman if (cmd.error) 296da7fbe58SPierre Ossman return cmd.error; 29717b0429dSPierre Ossman if (data.error) 298da7fbe58SPierre Ossman return data.error; 299da7fbe58SPierre Ossman 30017b0429dSPierre Ossman return 0; 301da7fbe58SPierre Ossman } 302da7fbe58SPierre Ossman 303b53f0beeSYue Hu static int mmc_spi_send_cxd(struct mmc_host *host, u32 *cxd, u32 opcode) 304af517150SDavid Brownell { 30578e48073SPierre Ossman int ret, i; 306b53f0beeSYue Hu __be32 *cxd_tmp; 30778e48073SPierre Ossman 308b53f0beeSYue Hu cxd_tmp = kzalloc(16, GFP_KERNEL); 309b53f0beeSYue Hu if (!cxd_tmp) 3101a41313eSKyungsik Lee return -ENOMEM; 3111a41313eSKyungsik Lee 312cec18ad9SUlf Hansson ret = mmc_send_adtc_data(NULL, host, opcode, 0, cxd_tmp, 16); 31378e48073SPierre Ossman if (ret) 3141a41313eSKyungsik Lee goto err; 31578e48073SPierre Ossman 31678e48073SPierre Ossman for (i = 0; i < 4; i++) 317b53f0beeSYue Hu cxd[i] = be32_to_cpu(cxd_tmp[i]); 31878e48073SPierre Ossman 3191a41313eSKyungsik Lee err: 320b53f0beeSYue Hu kfree(cxd_tmp); 3211a41313eSKyungsik Lee return ret; 322af517150SDavid Brownell } 323af517150SDavid Brownell 3240796e439SUlf Hansson int mmc_send_csd(struct mmc_card *card, u32 *csd) 3250796e439SUlf Hansson { 3260796e439SUlf Hansson if (mmc_host_is_spi(card->host)) 327b53f0beeSYue Hu return mmc_spi_send_cxd(card->host, csd, MMC_SEND_CSD); 3280796e439SUlf Hansson 3290796e439SUlf Hansson return mmc_send_cxd_native(card->host, card->rca << 16, csd, 3300796e439SUlf Hansson MMC_SEND_CSD); 3310796e439SUlf Hansson } 3320796e439SUlf Hansson 333a1473732SUlf Hansson int mmc_send_cid(struct mmc_host *host, u32 *cid) 334a1473732SUlf Hansson { 335a1473732SUlf Hansson if (mmc_host_is_spi(host)) 336b53f0beeSYue Hu return mmc_spi_send_cxd(host, cid, MMC_SEND_CID); 337a1473732SUlf Hansson 338c92e68d8SUlf Hansson return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID); 339a1473732SUlf Hansson } 340a1473732SUlf Hansson 341e21aa519SUlf Hansson int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd) 342e21aa519SUlf Hansson { 343e21aa519SUlf Hansson int err; 344e21aa519SUlf Hansson u8 *ext_csd; 345e21aa519SUlf Hansson 346e21aa519SUlf Hansson if (!card || !new_ext_csd) 347e21aa519SUlf Hansson return -EINVAL; 348e21aa519SUlf Hansson 349e21aa519SUlf Hansson if (!mmc_can_ext_csd(card)) 350e21aa519SUlf Hansson return -EOPNOTSUPP; 351e21aa519SUlf Hansson 352e21aa519SUlf Hansson /* 353e21aa519SUlf Hansson * As the ext_csd is so large and mostly unused, we don't store the 354e21aa519SUlf Hansson * raw block in mmc_card. 355e21aa519SUlf Hansson */ 35622b78700SUlf Hansson ext_csd = kzalloc(512, GFP_KERNEL); 357e21aa519SUlf Hansson if (!ext_csd) 358e21aa519SUlf Hansson return -ENOMEM; 359e21aa519SUlf Hansson 360cec18ad9SUlf Hansson err = mmc_send_adtc_data(card, card->host, MMC_SEND_EXT_CSD, 0, ext_csd, 3612fc91e8bSUlf Hansson 512); 362e21aa519SUlf Hansson if (err) 363e21aa519SUlf Hansson kfree(ext_csd); 364e21aa519SUlf Hansson else 365e21aa519SUlf Hansson *new_ext_csd = ext_csd; 366e21aa519SUlf Hansson 367e21aa519SUlf Hansson return err; 368e21aa519SUlf Hansson } 369e21aa519SUlf Hansson EXPORT_SYMBOL_GPL(mmc_get_ext_csd); 370e21aa519SUlf Hansson 371af517150SDavid Brownell int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp) 372af517150SDavid Brownell { 373c7836d15SMasahiro Yamada struct mmc_command cmd = {}; 374af517150SDavid Brownell int err; 375af517150SDavid Brownell 376af517150SDavid Brownell cmd.opcode = MMC_SPI_READ_OCR; 377af517150SDavid Brownell cmd.arg = highcap ? (1 << 30) : 0; 378af517150SDavid Brownell cmd.flags = MMC_RSP_SPI_R3; 379af517150SDavid Brownell 380af517150SDavid Brownell err = mmc_wait_for_cmd(host, &cmd, 0); 381af517150SDavid Brownell 382af517150SDavid Brownell *ocrp = cmd.resp[1]; 383af517150SDavid Brownell return err; 384af517150SDavid Brownell } 385af517150SDavid Brownell 386af517150SDavid Brownell int mmc_spi_set_crc(struct mmc_host *host, int use_crc) 387af517150SDavid Brownell { 388c7836d15SMasahiro Yamada struct mmc_command cmd = {}; 389af517150SDavid Brownell int err; 390af517150SDavid Brownell 391af517150SDavid Brownell cmd.opcode = MMC_SPI_CRC_ON_OFF; 392af517150SDavid Brownell cmd.flags = MMC_RSP_SPI_R1; 393af517150SDavid Brownell cmd.arg = use_crc; 394af517150SDavid Brownell 395af517150SDavid Brownell err = mmc_wait_for_cmd(host, &cmd, 0); 396af517150SDavid Brownell if (!err) 397af517150SDavid Brownell host->use_spi_crc = use_crc; 398af517150SDavid Brownell return err; 399af517150SDavid Brownell } 400af517150SDavid Brownell 40120348d19SUlf Hansson static int mmc_switch_status_error(struct mmc_host *host, u32 status) 402ed16f58dSAdrian Hunter { 403ed16f58dSAdrian Hunter if (mmc_host_is_spi(host)) { 404ed16f58dSAdrian Hunter if (status & R1_SPI_ILLEGAL_COMMAND) 405ed16f58dSAdrian Hunter return -EBADMSG; 406ed16f58dSAdrian Hunter } else { 407a94a7483SShawn Lin if (R1_STATUS(status)) 408ed16f58dSAdrian Hunter pr_warn("%s: unexpected status %#x after switch\n", 409ed16f58dSAdrian Hunter mmc_hostname(host), status); 410ed16f58dSAdrian Hunter if (status & R1_SWITCH_ERROR) 411ed16f58dSAdrian Hunter return -EBADMSG; 412ed16f58dSAdrian Hunter } 413ed16f58dSAdrian Hunter return 0; 414ed16f58dSAdrian Hunter } 415ed16f58dSAdrian Hunter 41620348d19SUlf Hansson /* Caller must hold re-tuning */ 41760db8a47SUlf Hansson int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal) 41820348d19SUlf Hansson { 41920348d19SUlf Hansson u32 status; 42020348d19SUlf Hansson int err; 42120348d19SUlf Hansson 42220348d19SUlf Hansson err = mmc_send_status(card, &status); 423ef3d2322SAdrian Hunter if (!crc_err_fatal && err == -EILSEQ) 424ef3d2322SAdrian Hunter return 0; 42520348d19SUlf Hansson if (err) 42620348d19SUlf Hansson return err; 42720348d19SUlf Hansson 42820348d19SUlf Hansson return mmc_switch_status_error(card->host, status); 42920348d19SUlf Hansson } 43020348d19SUlf Hansson 43104f967adSUlf Hansson static int mmc_busy_cb(void *cb_data, bool *busy) 4326972096aSUlf Hansson { 43304f967adSUlf Hansson struct mmc_busy_data *data = cb_data; 43404f967adSUlf Hansson struct mmc_host *host = data->card->host; 4356972096aSUlf Hansson u32 status = 0; 4366972096aSUlf Hansson int err; 4376972096aSUlf Hansson 438972d5084SUlf Hansson if (data->busy_cmd != MMC_BUSY_IO && host->ops->card_busy) { 4396972096aSUlf Hansson *busy = host->ops->card_busy(host); 4406972096aSUlf Hansson return 0; 4416972096aSUlf Hansson } 4426972096aSUlf Hansson 44304f967adSUlf Hansson err = mmc_send_status(data->card, &status); 44404f967adSUlf Hansson if (data->retry_crc_err && err == -EILSEQ) { 4456972096aSUlf Hansson *busy = true; 4466972096aSUlf Hansson return 0; 4476972096aSUlf Hansson } 4486972096aSUlf Hansson if (err) 4496972096aSUlf Hansson return err; 4506972096aSUlf Hansson 45104f967adSUlf Hansson switch (data->busy_cmd) { 4520d84c3e6SUlf Hansson case MMC_BUSY_CMD6: 45304f967adSUlf Hansson err = mmc_switch_status_error(host, status); 4540d84c3e6SUlf Hansson break; 4550d84c3e6SUlf Hansson case MMC_BUSY_ERASE: 4560d84c3e6SUlf Hansson err = R1_STATUS(status) ? -EIO : 0; 4570d84c3e6SUlf Hansson break; 458490ff95fSUlf Hansson case MMC_BUSY_HPI: 459130206a6SUlf Hansson case MMC_BUSY_EXTR_SINGLE: 460972d5084SUlf Hansson case MMC_BUSY_IO: 461490ff95fSUlf Hansson break; 4620d84c3e6SUlf Hansson default: 4630d84c3e6SUlf Hansson err = -EINVAL; 4640d84c3e6SUlf Hansson } 4650d84c3e6SUlf Hansson 4666972096aSUlf Hansson if (err) 4676972096aSUlf Hansson return err; 4686972096aSUlf Hansson 4692a1c7cdaSUlf Hansson *busy = !mmc_ready_for_data(status); 4706972096aSUlf Hansson return 0; 4716972096aSUlf Hansson } 4726972096aSUlf Hansson 47304f967adSUlf Hansson int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, 47404f967adSUlf Hansson int (*busy_cb)(void *cb_data, bool *busy), 47504f967adSUlf Hansson void *cb_data) 476716bdb89SUlf Hansson { 477716bdb89SUlf Hansson struct mmc_host *host = card->host; 478716bdb89SUlf Hansson int err; 479716bdb89SUlf Hansson unsigned long timeout; 480d46a24a9SUlf Hansson unsigned int udelay = 32, udelay_max = 32768; 481716bdb89SUlf Hansson bool expired = false; 482716bdb89SUlf Hansson bool busy = false; 483716bdb89SUlf Hansson 484716bdb89SUlf Hansson timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1; 485716bdb89SUlf Hansson do { 486716bdb89SUlf Hansson /* 48770562644SUlf Hansson * Due to the possibility of being preempted while polling, 48870562644SUlf Hansson * check the expiration time first. 489716bdb89SUlf Hansson */ 490716bdb89SUlf Hansson expired = time_after(jiffies, timeout); 49170562644SUlf Hansson 49204f967adSUlf Hansson err = (*busy_cb)(cb_data, &busy); 4935ec32f84SUlf Hansson if (err) 4945ec32f84SUlf Hansson return err; 495716bdb89SUlf Hansson 49670562644SUlf Hansson /* Timeout if the device still remains busy. */ 49770562644SUlf Hansson if (expired && busy) { 49870562644SUlf Hansson pr_err("%s: Card stuck being busy! %s\n", 499716bdb89SUlf Hansson mmc_hostname(host), __func__); 500716bdb89SUlf Hansson return -ETIMEDOUT; 501716bdb89SUlf Hansson } 502d46a24a9SUlf Hansson 503d46a24a9SUlf Hansson /* Throttle the polling rate to avoid hogging the CPU. */ 504d46a24a9SUlf Hansson if (busy) { 505d46a24a9SUlf Hansson usleep_range(udelay, udelay * 2); 506d46a24a9SUlf Hansson if (udelay < udelay_max) 507d46a24a9SUlf Hansson udelay *= 2; 508d46a24a9SUlf Hansson } 50970562644SUlf Hansson } while (busy); 510716bdb89SUlf Hansson 5115ec32f84SUlf Hansson return 0; 512716bdb89SUlf Hansson } 513*6966e609SUlf Hansson EXPORT_SYMBOL_GPL(__mmc_poll_for_busy); 514716bdb89SUlf Hansson 5150d84c3e6SUlf Hansson int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, 51604f967adSUlf Hansson bool retry_crc_err, enum mmc_busy_cmd busy_cmd) 5170d84c3e6SUlf Hansson { 51804f967adSUlf Hansson struct mmc_busy_data cb_data; 51904f967adSUlf Hansson 52004f967adSUlf Hansson cb_data.card = card; 52104f967adSUlf Hansson cb_data.retry_crc_err = retry_crc_err; 52204f967adSUlf Hansson cb_data.busy_cmd = busy_cmd; 52304f967adSUlf Hansson 52404f967adSUlf Hansson return __mmc_poll_for_busy(card, timeout_ms, &mmc_busy_cb, &cb_data); 5250d84c3e6SUlf Hansson } 526972d5084SUlf Hansson EXPORT_SYMBOL_GPL(mmc_poll_for_busy); 5270d84c3e6SUlf Hansson 528e62f1e0bSUlf Hansson bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd, 529e62f1e0bSUlf Hansson unsigned int timeout_ms) 530e62f1e0bSUlf Hansson { 531e62f1e0bSUlf Hansson /* 532e62f1e0bSUlf Hansson * If the max_busy_timeout of the host is specified, make sure it's 533e62f1e0bSUlf Hansson * enough to fit the used timeout_ms. In case it's not, let's instruct 534e62f1e0bSUlf Hansson * the host to avoid HW busy detection, by converting to a R1 response 535e62f1e0bSUlf Hansson * instead of a R1B. Note, some hosts requires R1B, which also means 536e62f1e0bSUlf Hansson * they are on their own when it comes to deal with the busy timeout. 537e62f1e0bSUlf Hansson */ 538e62f1e0bSUlf Hansson if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout && 539e62f1e0bSUlf Hansson (timeout_ms > host->max_busy_timeout)) { 540e62f1e0bSUlf Hansson cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1 | MMC_RSP_R1; 541e62f1e0bSUlf Hansson return false; 542e62f1e0bSUlf Hansson } 543e62f1e0bSUlf Hansson 544e62f1e0bSUlf Hansson cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1B | MMC_RSP_R1B; 545e62f1e0bSUlf Hansson cmd->busy_timeout = timeout_ms; 546e62f1e0bSUlf Hansson return true; 547e62f1e0bSUlf Hansson } 548e62f1e0bSUlf Hansson 549d3a8d95dSAndrei Warkentin /** 550950d56acSJaehoon Chung * __mmc_switch - modify EXT_CSD register 551d3a8d95dSAndrei Warkentin * @card: the MMC card associated with the data transfer 552d3a8d95dSAndrei Warkentin * @set: cmd set values 553d3a8d95dSAndrei Warkentin * @index: EXT_CSD register index 554d3a8d95dSAndrei Warkentin * @value: value to program into EXT_CSD register 555d3a8d95dSAndrei Warkentin * @timeout_ms: timeout (ms) for operation performed by register write, 556d3a8d95dSAndrei Warkentin * timeout of zero implies maximum possible timeout 557aa33ce3cSUlf Hansson * @timing: new timing to change to 558878e200bSUlf Hansson * @send_status: send status cmd to poll for busy 559625228faSUlf Hansson * @retry_crc_err: retry when CRC errors when polling with CMD13 for busy 5605e52a168SBean Huo * @retries: number of retries 561d3a8d95dSAndrei Warkentin * 562d3a8d95dSAndrei Warkentin * Modifies the EXT_CSD register for selected card. 563d3a8d95dSAndrei Warkentin */ 564950d56acSJaehoon Chung int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, 565aa33ce3cSUlf Hansson unsigned int timeout_ms, unsigned char timing, 5665e52a168SBean Huo bool send_status, bool retry_crc_err, unsigned int retries) 567da7fbe58SPierre Ossman { 568636bd13cSUlf Hansson struct mmc_host *host = card->host; 569da7fbe58SPierre Ossman int err; 570c7836d15SMasahiro Yamada struct mmc_command cmd = {}; 571e62f1e0bSUlf Hansson bool use_r1b_resp; 572aa33ce3cSUlf Hansson unsigned char old_timing = host->ios.timing; 573b9ec2616SUlf Hansson 574c6dbab9cSAdrian Hunter mmc_retune_hold(host); 575c6dbab9cSAdrian Hunter 576533a6cfeSUlf Hansson if (!timeout_ms) { 577533a6cfeSUlf Hansson pr_warn("%s: unspecified timeout for CMD6 - use generic\n", 578533a6cfeSUlf Hansson mmc_hostname(host)); 579533a6cfeSUlf Hansson timeout_ms = card->ext_csd.generic_cmd6_time; 580533a6cfeSUlf Hansson } 581533a6cfeSUlf Hansson 582da7fbe58SPierre Ossman cmd.opcode = MMC_SWITCH; 583da7fbe58SPierre Ossman cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | 584da7fbe58SPierre Ossman (index << 16) | 585da7fbe58SPierre Ossman (value << 8) | 586da7fbe58SPierre Ossman set; 587e62f1e0bSUlf Hansson use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, timeout_ms); 588b9ec2616SUlf Hansson 5895e52a168SBean Huo err = mmc_wait_for_cmd(host, &cmd, retries); 59017b0429dSPierre Ossman if (err) 591c6dbab9cSAdrian Hunter goto out; 592da7fbe58SPierre Ossman 593cb26ce06SUlf Hansson /*If SPI or used HW busy detection above, then we don't need to poll. */ 594cb26ce06SUlf Hansson if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) || 595ee6ff743SUlf Hansson mmc_host_is_spi(host)) 596aa33ce3cSUlf Hansson goto out_tim; 597a27fbf2fSSeungwon Jeon 5981e0b069bSUlf Hansson /* 5991e0b069bSUlf Hansson * If the host doesn't support HW polling via the ->card_busy() ops and 6001e0b069bSUlf Hansson * when it's not allowed to poll by using CMD13, then we need to rely on 6011e0b069bSUlf Hansson * waiting the stated timeout to be sufficient. 6021e0b069bSUlf Hansson */ 6031e0b069bSUlf Hansson if (!send_status && !host->ops->card_busy) { 6041e0b069bSUlf Hansson mmc_delay(timeout_ms); 6051e0b069bSUlf Hansson goto out_tim; 6061e0b069bSUlf Hansson } 6071e0b069bSUlf Hansson 608716bdb89SUlf Hansson /* Let's try to poll to find out when the command is completed. */ 60904f967adSUlf Hansson err = mmc_poll_for_busy(card, timeout_ms, retry_crc_err, MMC_BUSY_CMD6); 610ee6ff743SUlf Hansson if (err) 611ee6ff743SUlf Hansson goto out; 612aa33ce3cSUlf Hansson 613aa33ce3cSUlf Hansson out_tim: 614ee6ff743SUlf Hansson /* Switch to new timing before check switch status. */ 615ee6ff743SUlf Hansson if (timing) 616ee6ff743SUlf Hansson mmc_set_timing(host, timing); 617ee6ff743SUlf Hansson 618ee6ff743SUlf Hansson if (send_status) { 61960db8a47SUlf Hansson err = mmc_switch_status(card, true); 620aa33ce3cSUlf Hansson if (err && timing) 621aa33ce3cSUlf Hansson mmc_set_timing(host, old_timing); 622ee6ff743SUlf Hansson } 623c6dbab9cSAdrian Hunter out: 624c6dbab9cSAdrian Hunter mmc_retune_release(host); 625ef0b27d4SAdrian Hunter 626c6dbab9cSAdrian Hunter return err; 627da7fbe58SPierre Ossman } 628950d56acSJaehoon Chung 629950d56acSJaehoon Chung int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, 630950d56acSJaehoon Chung unsigned int timeout_ms) 631950d56acSJaehoon Chung { 632aa33ce3cSUlf Hansson return __mmc_switch(card, set, index, value, timeout_ms, 0, 6335e52a168SBean Huo true, false, MMC_CMD_RETRIES); 634950d56acSJaehoon Chung } 635d3a8d95dSAndrei Warkentin EXPORT_SYMBOL_GPL(mmc_switch); 636da7fbe58SPierre Ossman 6379979dbe5SChaotian Jing int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error) 638996903deSMinda Chen { 639c7836d15SMasahiro Yamada struct mmc_request mrq = {}; 640c7836d15SMasahiro Yamada struct mmc_command cmd = {}; 641c7836d15SMasahiro Yamada struct mmc_data data = {}; 642996903deSMinda Chen struct scatterlist sg; 643fe5afb13SUlf Hansson struct mmc_ios *ios = &host->ios; 644996903deSMinda Chen const u8 *tuning_block_pattern; 645996903deSMinda Chen int size, err = 0; 646996903deSMinda Chen u8 *data_buf; 647996903deSMinda Chen 648996903deSMinda Chen if (ios->bus_width == MMC_BUS_WIDTH_8) { 649996903deSMinda Chen tuning_block_pattern = tuning_blk_pattern_8bit; 650996903deSMinda Chen size = sizeof(tuning_blk_pattern_8bit); 651996903deSMinda Chen } else if (ios->bus_width == MMC_BUS_WIDTH_4) { 652996903deSMinda Chen tuning_block_pattern = tuning_blk_pattern_4bit; 653996903deSMinda Chen size = sizeof(tuning_blk_pattern_4bit); 654996903deSMinda Chen } else 655996903deSMinda Chen return -EINVAL; 656996903deSMinda Chen 657996903deSMinda Chen data_buf = kzalloc(size, GFP_KERNEL); 658996903deSMinda Chen if (!data_buf) 659996903deSMinda Chen return -ENOMEM; 660996903deSMinda Chen 661996903deSMinda Chen mrq.cmd = &cmd; 662996903deSMinda Chen mrq.data = &data; 663996903deSMinda Chen 664996903deSMinda Chen cmd.opcode = opcode; 665996903deSMinda Chen cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 666996903deSMinda Chen 667996903deSMinda Chen data.blksz = size; 668996903deSMinda Chen data.blocks = 1; 669996903deSMinda Chen data.flags = MMC_DATA_READ; 670996903deSMinda Chen 671996903deSMinda Chen /* 672996903deSMinda Chen * According to the tuning specs, Tuning process 673996903deSMinda Chen * is normally shorter 40 executions of CMD19, 674996903deSMinda Chen * and timeout value should be shorter than 150 ms 675996903deSMinda Chen */ 676996903deSMinda Chen data.timeout_ns = 150 * NSEC_PER_MSEC; 677996903deSMinda Chen 678996903deSMinda Chen data.sg = &sg; 679996903deSMinda Chen data.sg_len = 1; 680996903deSMinda Chen sg_init_one(&sg, data_buf, size); 681996903deSMinda Chen 682fe5afb13SUlf Hansson mmc_wait_for_req(host, &mrq); 683996903deSMinda Chen 6849979dbe5SChaotian Jing if (cmd_error) 6859979dbe5SChaotian Jing *cmd_error = cmd.error; 6869979dbe5SChaotian Jing 687996903deSMinda Chen if (cmd.error) { 688996903deSMinda Chen err = cmd.error; 689996903deSMinda Chen goto out; 690996903deSMinda Chen } 691996903deSMinda Chen 692996903deSMinda Chen if (data.error) { 693996903deSMinda Chen err = data.error; 694996903deSMinda Chen goto out; 695996903deSMinda Chen } 696996903deSMinda Chen 697996903deSMinda Chen if (memcmp(data_buf, tuning_block_pattern, size)) 698996903deSMinda Chen err = -EIO; 699996903deSMinda Chen 700996903deSMinda Chen out: 701996903deSMinda Chen kfree(data_buf); 702996903deSMinda Chen return err; 703996903deSMinda Chen } 704996903deSMinda Chen EXPORT_SYMBOL_GPL(mmc_send_tuning); 705996903deSMinda Chen 70621adc2e4SWolfram Sang int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode) 707e711f030SAdrian Hunter { 708c7836d15SMasahiro Yamada struct mmc_command cmd = {}; 709e711f030SAdrian Hunter 710e711f030SAdrian Hunter /* 711e711f030SAdrian Hunter * eMMC specification specifies that CMD12 can be used to stop a tuning 712e711f030SAdrian Hunter * command, but SD specification does not, so do nothing unless it is 713e711f030SAdrian Hunter * eMMC. 714e711f030SAdrian Hunter */ 715e711f030SAdrian Hunter if (opcode != MMC_SEND_TUNING_BLOCK_HS200) 716e711f030SAdrian Hunter return 0; 717e711f030SAdrian Hunter 718e711f030SAdrian Hunter cmd.opcode = MMC_STOP_TRANSMISSION; 719e711f030SAdrian Hunter cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 720e711f030SAdrian Hunter 721e711f030SAdrian Hunter /* 722e711f030SAdrian Hunter * For drivers that override R1 to R1b, set an arbitrary timeout based 723e711f030SAdrian Hunter * on the tuning timeout i.e. 150ms. 724e711f030SAdrian Hunter */ 725e711f030SAdrian Hunter cmd.busy_timeout = 150; 726e711f030SAdrian Hunter 727e711f030SAdrian Hunter return mmc_wait_for_cmd(host, &cmd, 0); 728e711f030SAdrian Hunter } 72921adc2e4SWolfram Sang EXPORT_SYMBOL_GPL(mmc_send_abort_tuning); 730e711f030SAdrian Hunter 73122113efdSAries Lee static int 73222113efdSAries Lee mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode, 73322113efdSAries Lee u8 len) 73422113efdSAries Lee { 735c7836d15SMasahiro Yamada struct mmc_request mrq = {}; 736c7836d15SMasahiro Yamada struct mmc_command cmd = {}; 737c7836d15SMasahiro Yamada struct mmc_data data = {}; 73822113efdSAries Lee struct scatterlist sg; 73922113efdSAries Lee u8 *data_buf; 74022113efdSAries Lee u8 *test_buf; 74122113efdSAries Lee int i, err; 74222113efdSAries Lee static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 }; 74322113efdSAries Lee static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 }; 74422113efdSAries Lee 74522113efdSAries Lee /* dma onto stack is unsafe/nonportable, but callers to this 74622113efdSAries Lee * routine normally provide temporary on-stack buffers ... 74722113efdSAries Lee */ 74822113efdSAries Lee data_buf = kmalloc(len, GFP_KERNEL); 74922113efdSAries Lee if (!data_buf) 75022113efdSAries Lee return -ENOMEM; 75122113efdSAries Lee 75222113efdSAries Lee if (len == 8) 75322113efdSAries Lee test_buf = testdata_8bit; 75422113efdSAries Lee else if (len == 4) 75522113efdSAries Lee test_buf = testdata_4bit; 75622113efdSAries Lee else { 757a3c76eb9SGirish K S pr_err("%s: Invalid bus_width %d\n", 75822113efdSAries Lee mmc_hostname(host), len); 75922113efdSAries Lee kfree(data_buf); 76022113efdSAries Lee return -EINVAL; 76122113efdSAries Lee } 76222113efdSAries Lee 76322113efdSAries Lee if (opcode == MMC_BUS_TEST_W) 76422113efdSAries Lee memcpy(data_buf, test_buf, len); 76522113efdSAries Lee 76622113efdSAries Lee mrq.cmd = &cmd; 76722113efdSAries Lee mrq.data = &data; 76822113efdSAries Lee cmd.opcode = opcode; 76922113efdSAries Lee cmd.arg = 0; 77022113efdSAries Lee 77122113efdSAries Lee /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we 77222113efdSAries Lee * rely on callers to never use this with "native" calls for reading 77322113efdSAries Lee * CSD or CID. Native versions of those commands use the R2 type, 77422113efdSAries Lee * not R1 plus a data block. 77522113efdSAries Lee */ 77622113efdSAries Lee cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 77722113efdSAries Lee 77822113efdSAries Lee data.blksz = len; 77922113efdSAries Lee data.blocks = 1; 78022113efdSAries Lee if (opcode == MMC_BUS_TEST_R) 78122113efdSAries Lee data.flags = MMC_DATA_READ; 78222113efdSAries Lee else 78322113efdSAries Lee data.flags = MMC_DATA_WRITE; 78422113efdSAries Lee 78522113efdSAries Lee data.sg = &sg; 78622113efdSAries Lee data.sg_len = 1; 78784532e33SMinjian Wu mmc_set_data_timeout(&data, card); 78822113efdSAries Lee sg_init_one(&sg, data_buf, len); 78922113efdSAries Lee mmc_wait_for_req(host, &mrq); 79022113efdSAries Lee err = 0; 79122113efdSAries Lee if (opcode == MMC_BUS_TEST_R) { 79222113efdSAries Lee for (i = 0; i < len / 4; i++) 79322113efdSAries Lee if ((test_buf[i] ^ data_buf[i]) != 0xff) { 79422113efdSAries Lee err = -EIO; 79522113efdSAries Lee break; 79622113efdSAries Lee } 79722113efdSAries Lee } 79822113efdSAries Lee kfree(data_buf); 79922113efdSAries Lee 80022113efdSAries Lee if (cmd.error) 80122113efdSAries Lee return cmd.error; 80222113efdSAries Lee if (data.error) 80322113efdSAries Lee return data.error; 80422113efdSAries Lee 80522113efdSAries Lee return err; 80622113efdSAries Lee } 80722113efdSAries Lee 80822113efdSAries Lee int mmc_bus_test(struct mmc_card *card, u8 bus_width) 80922113efdSAries Lee { 8100899e741SMasahiro Yamada int width; 81122113efdSAries Lee 81222113efdSAries Lee if (bus_width == MMC_BUS_WIDTH_8) 81322113efdSAries Lee width = 8; 81422113efdSAries Lee else if (bus_width == MMC_BUS_WIDTH_4) 81522113efdSAries Lee width = 4; 81622113efdSAries Lee else if (bus_width == MMC_BUS_WIDTH_1) 81722113efdSAries Lee return 0; /* no need for test */ 81822113efdSAries Lee else 81922113efdSAries Lee return -EINVAL; 82022113efdSAries Lee 82122113efdSAries Lee /* 82222113efdSAries Lee * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there 82322113efdSAries Lee * is a problem. This improves chances that the test will work. 82422113efdSAries Lee */ 82522113efdSAries Lee mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width); 8260899e741SMasahiro Yamada return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width); 82722113efdSAries Lee } 828eb0d8f13SJaehoon Chung 8299f94d047SUlf Hansson static int mmc_send_hpi_cmd(struct mmc_card *card) 830eb0d8f13SJaehoon Chung { 831490ff95fSUlf Hansson unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time; 832892bf100SUlf Hansson struct mmc_host *host = card->host; 833c7bedef0SUlf Hansson bool use_r1b_resp = false; 834c7836d15SMasahiro Yamada struct mmc_command cmd = {}; 835eb0d8f13SJaehoon Chung int err; 836eb0d8f13SJaehoon Chung 837892bf100SUlf Hansson cmd.opcode = card->ext_csd.hpi_cmd; 838eb0d8f13SJaehoon Chung cmd.arg = card->rca << 16 | 1; 839892bf100SUlf Hansson cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 840c7bedef0SUlf Hansson 841c7bedef0SUlf Hansson if (cmd.opcode == MMC_STOP_TRANSMISSION) 842c7bedef0SUlf Hansson use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, 843c7bedef0SUlf Hansson busy_timeout_ms); 844892bf100SUlf Hansson 845892bf100SUlf Hansson err = mmc_wait_for_cmd(host, &cmd, 0); 846eb0d8f13SJaehoon Chung if (err) { 847892bf100SUlf Hansson pr_warn("%s: HPI error %d. Command response %#x\n", 848892bf100SUlf Hansson mmc_hostname(host), err, cmd.resp[0]); 849eb0d8f13SJaehoon Chung return err; 850eb0d8f13SJaehoon Chung } 851eb0d8f13SJaehoon Chung 852892bf100SUlf Hansson /* No need to poll when using HW busy detection. */ 853892bf100SUlf Hansson if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp) 854892bf100SUlf Hansson return 0; 855892bf100SUlf Hansson 856490ff95fSUlf Hansson /* Let's poll to find out when the HPI request completes. */ 85704f967adSUlf Hansson return mmc_poll_for_busy(card, busy_timeout_ms, false, MMC_BUSY_HPI); 858eb0d8f13SJaehoon Chung } 859148bcab2SUlf Hansson 8600f2c0512SUlf Hansson /** 8610f2c0512SUlf Hansson * mmc_interrupt_hpi - Issue for High priority Interrupt 8620f2c0512SUlf Hansson * @card: the MMC card associated with the HPI transfer 8630f2c0512SUlf Hansson * 8640f2c0512SUlf Hansson * Issued High Priority Interrupt, and check for card status 8650f2c0512SUlf Hansson * until out-of prg-state. 8660f2c0512SUlf Hansson */ 86744aebc16SJason Yan static int mmc_interrupt_hpi(struct mmc_card *card) 8680f2c0512SUlf Hansson { 8690f2c0512SUlf Hansson int err; 8700f2c0512SUlf Hansson u32 status; 8710f2c0512SUlf Hansson 8720f2c0512SUlf Hansson if (!card->ext_csd.hpi_en) { 8730f2c0512SUlf Hansson pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host)); 8740f2c0512SUlf Hansson return 1; 8750f2c0512SUlf Hansson } 8760f2c0512SUlf Hansson 8770f2c0512SUlf Hansson err = mmc_send_status(card, &status); 8780f2c0512SUlf Hansson if (err) { 8790f2c0512SUlf Hansson pr_err("%s: Get card status fail\n", mmc_hostname(card->host)); 8800f2c0512SUlf Hansson goto out; 8810f2c0512SUlf Hansson } 8820f2c0512SUlf Hansson 8830f2c0512SUlf Hansson switch (R1_CURRENT_STATE(status)) { 8840f2c0512SUlf Hansson case R1_STATE_IDLE: 8850f2c0512SUlf Hansson case R1_STATE_READY: 8860f2c0512SUlf Hansson case R1_STATE_STBY: 8870f2c0512SUlf Hansson case R1_STATE_TRAN: 8880f2c0512SUlf Hansson /* 8890f2c0512SUlf Hansson * In idle and transfer states, HPI is not needed and the caller 8900f2c0512SUlf Hansson * can issue the next intended command immediately 8910f2c0512SUlf Hansson */ 8920f2c0512SUlf Hansson goto out; 8930f2c0512SUlf Hansson case R1_STATE_PRG: 8940f2c0512SUlf Hansson break; 8950f2c0512SUlf Hansson default: 8960f2c0512SUlf Hansson /* In all other states, it's illegal to issue HPI */ 8970f2c0512SUlf Hansson pr_debug("%s: HPI cannot be sent. Card state=%d\n", 8980f2c0512SUlf Hansson mmc_hostname(card->host), R1_CURRENT_STATE(status)); 8990f2c0512SUlf Hansson err = -EINVAL; 9000f2c0512SUlf Hansson goto out; 9010f2c0512SUlf Hansson } 9020f2c0512SUlf Hansson 9039f94d047SUlf Hansson err = mmc_send_hpi_cmd(card); 9040f2c0512SUlf Hansson out: 9050f2c0512SUlf Hansson return err; 9060f2c0512SUlf Hansson } 9070f2c0512SUlf Hansson 908148bcab2SUlf Hansson int mmc_can_ext_csd(struct mmc_card *card) 909148bcab2SUlf Hansson { 910148bcab2SUlf Hansson return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3); 911148bcab2SUlf Hansson } 912b658af71SAdrian Hunter 9131cf8f7e5SUlf Hansson static int mmc_read_bkops_status(struct mmc_card *card) 9141cf8f7e5SUlf Hansson { 9151cf8f7e5SUlf Hansson int err; 9161cf8f7e5SUlf Hansson u8 *ext_csd; 9171cf8f7e5SUlf Hansson 9181cf8f7e5SUlf Hansson err = mmc_get_ext_csd(card, &ext_csd); 9191cf8f7e5SUlf Hansson if (err) 9201cf8f7e5SUlf Hansson return err; 9211cf8f7e5SUlf Hansson 9221cf8f7e5SUlf Hansson card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS]; 9231cf8f7e5SUlf Hansson card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS]; 9241cf8f7e5SUlf Hansson kfree(ext_csd); 9251cf8f7e5SUlf Hansson return 0; 9261cf8f7e5SUlf Hansson } 9271cf8f7e5SUlf Hansson 9281cf8f7e5SUlf Hansson /** 9290c204979SUlf Hansson * mmc_run_bkops - Run BKOPS for supported cards 9300c204979SUlf Hansson * @card: MMC card to run BKOPS for 9311cf8f7e5SUlf Hansson * 9320c204979SUlf Hansson * Run background operations synchronously for cards having manual BKOPS 9330c204979SUlf Hansson * enabled and in case it reports urgent BKOPS level. 9341cf8f7e5SUlf Hansson */ 9350c204979SUlf Hansson void mmc_run_bkops(struct mmc_card *card) 9361cf8f7e5SUlf Hansson { 9371cf8f7e5SUlf Hansson int err; 9381cf8f7e5SUlf Hansson 9390c204979SUlf Hansson if (!card->ext_csd.man_bkops_en) 9401cf8f7e5SUlf Hansson return; 9411cf8f7e5SUlf Hansson 9421cf8f7e5SUlf Hansson err = mmc_read_bkops_status(card); 9431cf8f7e5SUlf Hansson if (err) { 9441cf8f7e5SUlf Hansson pr_err("%s: Failed to read bkops status: %d\n", 9451cf8f7e5SUlf Hansson mmc_hostname(card->host), err); 9461cf8f7e5SUlf Hansson return; 9471cf8f7e5SUlf Hansson } 9481cf8f7e5SUlf Hansson 9490c204979SUlf Hansson if (!card->ext_csd.raw_bkops_status || 9500c204979SUlf Hansson card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2) 9511cf8f7e5SUlf Hansson return; 9521cf8f7e5SUlf Hansson 9531cf8f7e5SUlf Hansson mmc_retune_hold(card->host); 9541cf8f7e5SUlf Hansson 9550c204979SUlf Hansson /* 9560c204979SUlf Hansson * For urgent BKOPS status, LEVEL_2 and higher, let's execute 9570c204979SUlf Hansson * synchronously. Future wise, we may consider to start BKOPS, for less 9580c204979SUlf Hansson * urgent levels by using an asynchronous background task, when idle. 9590c204979SUlf Hansson */ 9600c204979SUlf Hansson err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 96124ed3bd0SUlf Hansson EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS); 9620c204979SUlf Hansson if (err) 9631cf8f7e5SUlf Hansson pr_warn("%s: Error %d starting bkops\n", 9641cf8f7e5SUlf Hansson mmc_hostname(card->host), err); 9651cf8f7e5SUlf Hansson 9661cf8f7e5SUlf Hansson mmc_retune_release(card->host); 9671cf8f7e5SUlf Hansson } 9680c204979SUlf Hansson EXPORT_SYMBOL(mmc_run_bkops); 9691cf8f7e5SUlf Hansson 970b658af71SAdrian Hunter static int mmc_cmdq_switch(struct mmc_card *card, bool enable) 971b658af71SAdrian Hunter { 972b658af71SAdrian Hunter u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0; 973b658af71SAdrian Hunter int err; 974b658af71SAdrian Hunter 975b658af71SAdrian Hunter if (!card->ext_csd.cmdq_support) 976b658af71SAdrian Hunter return -EOPNOTSUPP; 977b658af71SAdrian Hunter 978b658af71SAdrian Hunter err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN, 979b658af71SAdrian Hunter val, card->ext_csd.generic_cmd6_time); 980b658af71SAdrian Hunter if (!err) 981b658af71SAdrian Hunter card->ext_csd.cmdq_en = enable; 982b658af71SAdrian Hunter 983b658af71SAdrian Hunter return err; 984b658af71SAdrian Hunter } 985b658af71SAdrian Hunter 986b658af71SAdrian Hunter int mmc_cmdq_enable(struct mmc_card *card) 987b658af71SAdrian Hunter { 988b658af71SAdrian Hunter return mmc_cmdq_switch(card, true); 989b658af71SAdrian Hunter } 990b658af71SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_cmdq_enable); 991b658af71SAdrian Hunter 992b658af71SAdrian Hunter int mmc_cmdq_disable(struct mmc_card *card) 993b658af71SAdrian Hunter { 994b658af71SAdrian Hunter return mmc_cmdq_switch(card, false); 995b658af71SAdrian Hunter } 996b658af71SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_cmdq_disable); 99755c2b8b9SUlf Hansson 9984f111d04SBean Huo int mmc_sanitize(struct mmc_card *card, unsigned int timeout_ms) 99955c2b8b9SUlf Hansson { 100055c2b8b9SUlf Hansson struct mmc_host *host = card->host; 100155c2b8b9SUlf Hansson int err; 100255c2b8b9SUlf Hansson 100355c2b8b9SUlf Hansson if (!mmc_can_sanitize(card)) { 100455c2b8b9SUlf Hansson pr_warn("%s: Sanitize not supported\n", mmc_hostname(host)); 100555c2b8b9SUlf Hansson return -EOPNOTSUPP; 100655c2b8b9SUlf Hansson } 100755c2b8b9SUlf Hansson 10084f111d04SBean Huo if (!timeout_ms) 10094f111d04SBean Huo timeout_ms = MMC_SANITIZE_TIMEOUT_MS; 10104f111d04SBean Huo 101155c2b8b9SUlf Hansson pr_debug("%s: Sanitize in progress...\n", mmc_hostname(host)); 101255c2b8b9SUlf Hansson 101355c2b8b9SUlf Hansson mmc_retune_hold(host); 101455c2b8b9SUlf Hansson 10155b96247cSBean Huo err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START, 10165b96247cSBean Huo 1, timeout_ms, 0, true, false, 0); 101755c2b8b9SUlf Hansson if (err) 101855c2b8b9SUlf Hansson pr_err("%s: Sanitize failed err=%d\n", mmc_hostname(host), err); 101955c2b8b9SUlf Hansson 102055c2b8b9SUlf Hansson /* 102155c2b8b9SUlf Hansson * If the sanitize operation timed out, the card is probably still busy 102255c2b8b9SUlf Hansson * in the R1_STATE_PRG. Rather than continue to wait, let's try to abort 102355c2b8b9SUlf Hansson * it with a HPI command to get back into R1_STATE_TRAN. 102455c2b8b9SUlf Hansson */ 102555c2b8b9SUlf Hansson if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card)) 102655c2b8b9SUlf Hansson pr_warn("%s: Sanitize aborted\n", mmc_hostname(host)); 102755c2b8b9SUlf Hansson 102855c2b8b9SUlf Hansson mmc_retune_release(host); 102955c2b8b9SUlf Hansson 103055c2b8b9SUlf Hansson pr_debug("%s: Sanitize completed\n", mmc_hostname(host)); 103155c2b8b9SUlf Hansson return err; 103255c2b8b9SUlf Hansson } 103355c2b8b9SUlf Hansson EXPORT_SYMBOL_GPL(mmc_sanitize); 1034