12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 2da7fbe58SPierre Ossman /* 370f10482SPierre Ossman * linux/drivers/mmc/core/mmc_ops.h 4da7fbe58SPierre Ossman * 5da7fbe58SPierre Ossman * Copyright 2006-2007 Pierre Ossman 6da7fbe58SPierre Ossman */ 7da7fbe58SPierre Ossman 85a0e3ad6STejun Heo #include <linux/slab.h> 93ef77af1SPaul Gortmaker #include <linux/export.h> 10da7fbe58SPierre Ossman #include <linux/types.h> 11da7fbe58SPierre Ossman #include <linux/scatterlist.h> 12da7fbe58SPierre Ossman 13da7fbe58SPierre Ossman #include <linux/mmc/host.h> 14da7fbe58SPierre Ossman #include <linux/mmc/card.h> 15da7fbe58SPierre Ossman #include <linux/mmc/mmc.h> 16da7fbe58SPierre Ossman 17da7fbe58SPierre Ossman #include "core.h" 181cf8f7e5SUlf Hansson #include "card.h" 19c6dbab9cSAdrian Hunter #include "host.h" 20da7fbe58SPierre Ossman #include "mmc_ops.h" 21da7fbe58SPierre Ossman 2224ed3bd0SUlf Hansson #define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */ 2324ed3bd0SUlf Hansson #define MMC_CACHE_FLUSH_TIMEOUT_MS (30 * 1000) /* 30s */ 2455c2b8b9SUlf Hansson #define MMC_SANITIZE_TIMEOUT_MS (240 * 1000) /* 240s */ 258fee476bSTrey Ramsay 2604cdbbfaSUlf Hansson static const u8 tuning_blk_pattern_4bit[] = { 2704cdbbfaSUlf Hansson 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc, 2804cdbbfaSUlf Hansson 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef, 2904cdbbfaSUlf Hansson 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb, 3004cdbbfaSUlf Hansson 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef, 3104cdbbfaSUlf Hansson 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c, 3204cdbbfaSUlf Hansson 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee, 3304cdbbfaSUlf Hansson 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff, 3404cdbbfaSUlf Hansson 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde, 3504cdbbfaSUlf Hansson }; 3604cdbbfaSUlf Hansson 3704cdbbfaSUlf Hansson static const u8 tuning_blk_pattern_8bit[] = { 3804cdbbfaSUlf Hansson 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00, 3904cdbbfaSUlf Hansson 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc, 4004cdbbfaSUlf Hansson 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff, 4104cdbbfaSUlf Hansson 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff, 4204cdbbfaSUlf Hansson 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd, 4304cdbbfaSUlf Hansson 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb, 4404cdbbfaSUlf Hansson 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff, 4504cdbbfaSUlf Hansson 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff, 4604cdbbfaSUlf Hansson 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 4704cdbbfaSUlf Hansson 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 4804cdbbfaSUlf Hansson 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 4904cdbbfaSUlf Hansson 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 5004cdbbfaSUlf Hansson 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 5104cdbbfaSUlf Hansson 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 5204cdbbfaSUlf Hansson 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 5304cdbbfaSUlf Hansson 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 5404cdbbfaSUlf Hansson }; 5504cdbbfaSUlf Hansson 562185bc2cSUlf Hansson int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries) 57a27fbf2fSSeungwon Jeon { 58a27fbf2fSSeungwon Jeon int err; 59c7836d15SMasahiro Yamada struct mmc_command cmd = {}; 60a27fbf2fSSeungwon Jeon 61a27fbf2fSSeungwon Jeon cmd.opcode = MMC_SEND_STATUS; 62a27fbf2fSSeungwon Jeon if (!mmc_host_is_spi(card->host)) 63a27fbf2fSSeungwon Jeon cmd.arg = card->rca << 16; 64a27fbf2fSSeungwon Jeon cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 65a27fbf2fSSeungwon Jeon 662185bc2cSUlf Hansson err = mmc_wait_for_cmd(card->host, &cmd, retries); 67a27fbf2fSSeungwon Jeon if (err) 68a27fbf2fSSeungwon Jeon return err; 69a27fbf2fSSeungwon Jeon 70a27fbf2fSSeungwon Jeon /* NOTE: callers are required to understand the difference 71a27fbf2fSSeungwon Jeon * between "native" and SPI format status words! 72a27fbf2fSSeungwon Jeon */ 73a27fbf2fSSeungwon Jeon if (status) 74a27fbf2fSSeungwon Jeon *status = cmd.resp[0]; 75a27fbf2fSSeungwon Jeon 76a27fbf2fSSeungwon Jeon return 0; 77a27fbf2fSSeungwon Jeon } 782185bc2cSUlf Hansson EXPORT_SYMBOL_GPL(__mmc_send_status); 792185bc2cSUlf Hansson 802185bc2cSUlf Hansson int mmc_send_status(struct mmc_card *card, u32 *status) 812185bc2cSUlf Hansson { 822185bc2cSUlf Hansson return __mmc_send_status(card, status, MMC_CMD_RETRIES); 832185bc2cSUlf Hansson } 841bee324aSLinus Walleij EXPORT_SYMBOL_GPL(mmc_send_status); 85a27fbf2fSSeungwon Jeon 86da7fbe58SPierre Ossman static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card) 87da7fbe58SPierre Ossman { 88c7836d15SMasahiro Yamada struct mmc_command cmd = {}; 89da7fbe58SPierre Ossman 90da7fbe58SPierre Ossman cmd.opcode = MMC_SELECT_CARD; 91da7fbe58SPierre Ossman 92da7fbe58SPierre Ossman if (card) { 93da7fbe58SPierre Ossman cmd.arg = card->rca << 16; 94da7fbe58SPierre Ossman cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 95da7fbe58SPierre Ossman } else { 96da7fbe58SPierre Ossman cmd.arg = 0; 97da7fbe58SPierre Ossman cmd.flags = MMC_RSP_NONE | MMC_CMD_AC; 98da7fbe58SPierre Ossman } 99da7fbe58SPierre Ossman 1000899e741SMasahiro Yamada return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 101da7fbe58SPierre Ossman } 102da7fbe58SPierre Ossman 103da7fbe58SPierre Ossman int mmc_select_card(struct mmc_card *card) 104da7fbe58SPierre Ossman { 105da7fbe58SPierre Ossman 106da7fbe58SPierre Ossman return _mmc_select_card(card->host, card); 107da7fbe58SPierre Ossman } 108da7fbe58SPierre Ossman 109da7fbe58SPierre Ossman int mmc_deselect_cards(struct mmc_host *host) 110da7fbe58SPierre Ossman { 111da7fbe58SPierre Ossman return _mmc_select_card(host, NULL); 112da7fbe58SPierre Ossman } 113da7fbe58SPierre Ossman 1143d705d14SSascha Hauer /* 1153d705d14SSascha Hauer * Write the value specified in the device tree or board code into the optional 1163d705d14SSascha Hauer * 16 bit Driver Stage Register. This can be used to tune raise/fall times and 1173d705d14SSascha Hauer * drive strength of the DAT and CMD outputs. The actual meaning of a given 1183d705d14SSascha Hauer * value is hardware dependant. 1193d705d14SSascha Hauer * The presence of the DSR register can be determined from the CSD register, 1203d705d14SSascha Hauer * bit 76. 1213d705d14SSascha Hauer */ 1223d705d14SSascha Hauer int mmc_set_dsr(struct mmc_host *host) 1233d705d14SSascha Hauer { 124c7836d15SMasahiro Yamada struct mmc_command cmd = {}; 1253d705d14SSascha Hauer 1263d705d14SSascha Hauer cmd.opcode = MMC_SET_DSR; 1273d705d14SSascha Hauer 1283d705d14SSascha Hauer cmd.arg = (host->dsr << 16) | 0xffff; 1293d705d14SSascha Hauer cmd.flags = MMC_RSP_NONE | MMC_CMD_AC; 1303d705d14SSascha Hauer 1313d705d14SSascha Hauer return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 1323d705d14SSascha Hauer } 1333d705d14SSascha Hauer 134da7fbe58SPierre Ossman int mmc_go_idle(struct mmc_host *host) 135da7fbe58SPierre Ossman { 136da7fbe58SPierre Ossman int err; 137c7836d15SMasahiro Yamada struct mmc_command cmd = {}; 138da7fbe58SPierre Ossman 139af517150SDavid Brownell /* 140af517150SDavid Brownell * Non-SPI hosts need to prevent chipselect going active during 141af517150SDavid Brownell * GO_IDLE; that would put chips into SPI mode. Remind them of 142af517150SDavid Brownell * that in case of hardware that won't pull up DAT3/nCS otherwise. 143af517150SDavid Brownell * 144af517150SDavid Brownell * SPI hosts ignore ios.chip_select; it's managed according to 14525985edcSLucas De Marchi * rules that must accommodate non-MMC slaves which this layer 146af517150SDavid Brownell * won't even know about. 147af517150SDavid Brownell */ 148af517150SDavid Brownell if (!mmc_host_is_spi(host)) { 149da7fbe58SPierre Ossman mmc_set_chip_select(host, MMC_CS_HIGH); 150da7fbe58SPierre Ossman mmc_delay(1); 151af517150SDavid Brownell } 152da7fbe58SPierre Ossman 153da7fbe58SPierre Ossman cmd.opcode = MMC_GO_IDLE_STATE; 154da7fbe58SPierre Ossman cmd.arg = 0; 155af517150SDavid Brownell cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC; 156da7fbe58SPierre Ossman 157da7fbe58SPierre Ossman err = mmc_wait_for_cmd(host, &cmd, 0); 158da7fbe58SPierre Ossman 159da7fbe58SPierre Ossman mmc_delay(1); 160da7fbe58SPierre Ossman 161af517150SDavid Brownell if (!mmc_host_is_spi(host)) { 162da7fbe58SPierre Ossman mmc_set_chip_select(host, MMC_CS_DONTCARE); 163da7fbe58SPierre Ossman mmc_delay(1); 164af517150SDavid Brownell } 165af517150SDavid Brownell 166af517150SDavid Brownell host->use_spi_crc = 0; 167da7fbe58SPierre Ossman 168da7fbe58SPierre Ossman return err; 169da7fbe58SPierre Ossman } 170da7fbe58SPierre Ossman 171da7fbe58SPierre Ossman int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) 172da7fbe58SPierre Ossman { 173c7836d15SMasahiro Yamada struct mmc_command cmd = {}; 174da7fbe58SPierre Ossman int i, err = 0; 175da7fbe58SPierre Ossman 176da7fbe58SPierre Ossman cmd.opcode = MMC_SEND_OP_COND; 177af517150SDavid Brownell cmd.arg = mmc_host_is_spi(host) ? 0 : ocr; 178af517150SDavid Brownell cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR; 179da7fbe58SPierre Ossman 180da7fbe58SPierre Ossman for (i = 100; i; i--) { 181da7fbe58SPierre Ossman err = mmc_wait_for_cmd(host, &cmd, 0); 18217b0429dSPierre Ossman if (err) 183da7fbe58SPierre Ossman break; 184da7fbe58SPierre Ossman 1854c94cb65SYoshihiro Shimoda /* wait until reset completes */ 186af517150SDavid Brownell if (mmc_host_is_spi(host)) { 187af517150SDavid Brownell if (!(cmd.resp[0] & R1_SPI_IDLE)) 188af517150SDavid Brownell break; 189af517150SDavid Brownell } else { 190af517150SDavid Brownell if (cmd.resp[0] & MMC_CARD_BUSY) 191af517150SDavid Brownell break; 192af517150SDavid Brownell } 193af517150SDavid Brownell 19417b0429dSPierre Ossman err = -ETIMEDOUT; 195da7fbe58SPierre Ossman 196da7fbe58SPierre Ossman mmc_delay(10); 1974c94cb65SYoshihiro Shimoda 1984c94cb65SYoshihiro Shimoda /* 1994c94cb65SYoshihiro Shimoda * According to eMMC specification v5.1 section 6.4.3, we 2004c94cb65SYoshihiro Shimoda * should issue CMD1 repeatedly in the idle state until 2014c94cb65SYoshihiro Shimoda * the eMMC is ready. Otherwise some eMMC devices seem to enter 2024c94cb65SYoshihiro Shimoda * the inactive mode after mmc_init_card() issued CMD0 when 2034c94cb65SYoshihiro Shimoda * the eMMC device is busy. 2044c94cb65SYoshihiro Shimoda */ 2054c94cb65SYoshihiro Shimoda if (!ocr && !mmc_host_is_spi(host)) 2064c94cb65SYoshihiro Shimoda cmd.arg = cmd.resp[0] | BIT(30); 207da7fbe58SPierre Ossman } 208da7fbe58SPierre Ossman 209af517150SDavid Brownell if (rocr && !mmc_host_is_spi(host)) 210da7fbe58SPierre Ossman *rocr = cmd.resp[0]; 211da7fbe58SPierre Ossman 212da7fbe58SPierre Ossman return err; 213da7fbe58SPierre Ossman } 214da7fbe58SPierre Ossman 215da7fbe58SPierre Ossman int mmc_set_relative_addr(struct mmc_card *card) 216da7fbe58SPierre Ossman { 217c7836d15SMasahiro Yamada struct mmc_command cmd = {}; 218da7fbe58SPierre Ossman 219da7fbe58SPierre Ossman cmd.opcode = MMC_SET_RELATIVE_ADDR; 220da7fbe58SPierre Ossman cmd.arg = card->rca << 16; 221da7fbe58SPierre Ossman cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 222da7fbe58SPierre Ossman 2230899e741SMasahiro Yamada return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); 224da7fbe58SPierre Ossman } 225da7fbe58SPierre Ossman 226af517150SDavid Brownell static int 227af517150SDavid Brownell mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode) 228da7fbe58SPierre Ossman { 229da7fbe58SPierre Ossman int err; 230c7836d15SMasahiro Yamada struct mmc_command cmd = {}; 231da7fbe58SPierre Ossman 232af517150SDavid Brownell cmd.opcode = opcode; 233af517150SDavid Brownell cmd.arg = arg; 234da7fbe58SPierre Ossman cmd.flags = MMC_RSP_R2 | MMC_CMD_AC; 235da7fbe58SPierre Ossman 236af517150SDavid Brownell err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 23717b0429dSPierre Ossman if (err) 238da7fbe58SPierre Ossman return err; 239da7fbe58SPierre Ossman 240af517150SDavid Brownell memcpy(cxd, cmd.resp, sizeof(u32) * 4); 241da7fbe58SPierre Ossman 24217b0429dSPierre Ossman return 0; 243da7fbe58SPierre Ossman } 244da7fbe58SPierre Ossman 2451a41313eSKyungsik Lee /* 2461a41313eSKyungsik Lee * NOTE: void *buf, caller for the buf is required to use DMA-capable 2471a41313eSKyungsik Lee * buffer or on-stack buffer (with some overhead in callee). 2481a41313eSKyungsik Lee */ 249af517150SDavid Brownell static int 250af517150SDavid Brownell mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host, 251af517150SDavid Brownell u32 opcode, void *buf, unsigned len) 252da7fbe58SPierre Ossman { 253c7836d15SMasahiro Yamada struct mmc_request mrq = {}; 254c7836d15SMasahiro Yamada struct mmc_command cmd = {}; 255c7836d15SMasahiro Yamada struct mmc_data data = {}; 256da7fbe58SPierre Ossman struct scatterlist sg; 257da7fbe58SPierre Ossman 258da7fbe58SPierre Ossman mrq.cmd = &cmd; 259da7fbe58SPierre Ossman mrq.data = &data; 260da7fbe58SPierre Ossman 261af517150SDavid Brownell cmd.opcode = opcode; 262da7fbe58SPierre Ossman cmd.arg = 0; 263da7fbe58SPierre Ossman 264af517150SDavid Brownell /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we 265af517150SDavid Brownell * rely on callers to never use this with "native" calls for reading 266af517150SDavid Brownell * CSD or CID. Native versions of those commands use the R2 type, 267af517150SDavid Brownell * not R1 plus a data block. 268af517150SDavid Brownell */ 269af517150SDavid Brownell cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 270af517150SDavid Brownell 271af517150SDavid Brownell data.blksz = len; 272da7fbe58SPierre Ossman data.blocks = 1; 273da7fbe58SPierre Ossman data.flags = MMC_DATA_READ; 274da7fbe58SPierre Ossman data.sg = &sg; 275da7fbe58SPierre Ossman data.sg_len = 1; 276da7fbe58SPierre Ossman 277601ed60cSUlf Hansson sg_init_one(&sg, buf, len); 278da7fbe58SPierre Ossman 279cda56ac2SAdrian Hunter if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) { 2800d3e0460SMatthew Fleming /* 2810d3e0460SMatthew Fleming * The spec states that CSR and CID accesses have a timeout 2820d3e0460SMatthew Fleming * of 64 clock cycles. 2830d3e0460SMatthew Fleming */ 2840d3e0460SMatthew Fleming data.timeout_ns = 0; 2850d3e0460SMatthew Fleming data.timeout_clks = 64; 286cda56ac2SAdrian Hunter } else 287cda56ac2SAdrian Hunter mmc_set_data_timeout(&data, card); 288da7fbe58SPierre Ossman 289af517150SDavid Brownell mmc_wait_for_req(host, &mrq); 290af517150SDavid Brownell 29117b0429dSPierre Ossman if (cmd.error) 292da7fbe58SPierre Ossman return cmd.error; 29317b0429dSPierre Ossman if (data.error) 294da7fbe58SPierre Ossman return data.error; 295da7fbe58SPierre Ossman 29617b0429dSPierre Ossman return 0; 297da7fbe58SPierre Ossman } 298da7fbe58SPierre Ossman 2990796e439SUlf Hansson static int mmc_spi_send_csd(struct mmc_card *card, u32 *csd) 300af517150SDavid Brownell { 30178e48073SPierre Ossman int ret, i; 30206c9ccb7SWinkler, Tomas __be32 *csd_tmp; 30378e48073SPierre Ossman 30422b78700SUlf Hansson csd_tmp = kzalloc(16, GFP_KERNEL); 3051a41313eSKyungsik Lee if (!csd_tmp) 3061a41313eSKyungsik Lee return -ENOMEM; 3071a41313eSKyungsik Lee 3081a41313eSKyungsik Lee ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16); 30978e48073SPierre Ossman if (ret) 3101a41313eSKyungsik Lee goto err; 31178e48073SPierre Ossman 31278e48073SPierre Ossman for (i = 0; i < 4; i++) 3131a41313eSKyungsik Lee csd[i] = be32_to_cpu(csd_tmp[i]); 31478e48073SPierre Ossman 3151a41313eSKyungsik Lee err: 3161a41313eSKyungsik Lee kfree(csd_tmp); 3171a41313eSKyungsik Lee return ret; 318af517150SDavid Brownell } 319af517150SDavid Brownell 3200796e439SUlf Hansson int mmc_send_csd(struct mmc_card *card, u32 *csd) 3210796e439SUlf Hansson { 3220796e439SUlf Hansson if (mmc_host_is_spi(card->host)) 3230796e439SUlf Hansson return mmc_spi_send_csd(card, csd); 3240796e439SUlf Hansson 3250796e439SUlf Hansson return mmc_send_cxd_native(card->host, card->rca << 16, csd, 3260796e439SUlf Hansson MMC_SEND_CSD); 3270796e439SUlf Hansson } 3280796e439SUlf Hansson 329a1473732SUlf Hansson static int mmc_spi_send_cid(struct mmc_host *host, u32 *cid) 330af517150SDavid Brownell { 33178e48073SPierre Ossman int ret, i; 33206c9ccb7SWinkler, Tomas __be32 *cid_tmp; 33378e48073SPierre Ossman 33422b78700SUlf Hansson cid_tmp = kzalloc(16, GFP_KERNEL); 3351a41313eSKyungsik Lee if (!cid_tmp) 3361a41313eSKyungsik Lee return -ENOMEM; 3371a41313eSKyungsik Lee 3381a41313eSKyungsik Lee ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16); 33978e48073SPierre Ossman if (ret) 3401a41313eSKyungsik Lee goto err; 34178e48073SPierre Ossman 34278e48073SPierre Ossman for (i = 0; i < 4; i++) 3431a41313eSKyungsik Lee cid[i] = be32_to_cpu(cid_tmp[i]); 34478e48073SPierre Ossman 3451a41313eSKyungsik Lee err: 3461a41313eSKyungsik Lee kfree(cid_tmp); 3471a41313eSKyungsik Lee return ret; 348af517150SDavid Brownell } 349af517150SDavid Brownell 350a1473732SUlf Hansson int mmc_send_cid(struct mmc_host *host, u32 *cid) 351a1473732SUlf Hansson { 352a1473732SUlf Hansson if (mmc_host_is_spi(host)) 353a1473732SUlf Hansson return mmc_spi_send_cid(host, cid); 354a1473732SUlf Hansson 355c92e68d8SUlf Hansson return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID); 356a1473732SUlf Hansson } 357a1473732SUlf Hansson 358e21aa519SUlf Hansson int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd) 359e21aa519SUlf Hansson { 360e21aa519SUlf Hansson int err; 361e21aa519SUlf Hansson u8 *ext_csd; 362e21aa519SUlf Hansson 363e21aa519SUlf Hansson if (!card || !new_ext_csd) 364e21aa519SUlf Hansson return -EINVAL; 365e21aa519SUlf Hansson 366e21aa519SUlf Hansson if (!mmc_can_ext_csd(card)) 367e21aa519SUlf Hansson return -EOPNOTSUPP; 368e21aa519SUlf Hansson 369e21aa519SUlf Hansson /* 370e21aa519SUlf Hansson * As the ext_csd is so large and mostly unused, we don't store the 371e21aa519SUlf Hansson * raw block in mmc_card. 372e21aa519SUlf Hansson */ 37322b78700SUlf Hansson ext_csd = kzalloc(512, GFP_KERNEL); 374e21aa519SUlf Hansson if (!ext_csd) 375e21aa519SUlf Hansson return -ENOMEM; 376e21aa519SUlf Hansson 3772fc91e8bSUlf Hansson err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd, 3782fc91e8bSUlf Hansson 512); 379e21aa519SUlf Hansson if (err) 380e21aa519SUlf Hansson kfree(ext_csd); 381e21aa519SUlf Hansson else 382e21aa519SUlf Hansson *new_ext_csd = ext_csd; 383e21aa519SUlf Hansson 384e21aa519SUlf Hansson return err; 385e21aa519SUlf Hansson } 386e21aa519SUlf Hansson EXPORT_SYMBOL_GPL(mmc_get_ext_csd); 387e21aa519SUlf Hansson 388af517150SDavid Brownell int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp) 389af517150SDavid Brownell { 390c7836d15SMasahiro Yamada struct mmc_command cmd = {}; 391af517150SDavid Brownell int err; 392af517150SDavid Brownell 393af517150SDavid Brownell cmd.opcode = MMC_SPI_READ_OCR; 394af517150SDavid Brownell cmd.arg = highcap ? (1 << 30) : 0; 395af517150SDavid Brownell cmd.flags = MMC_RSP_SPI_R3; 396af517150SDavid Brownell 397af517150SDavid Brownell err = mmc_wait_for_cmd(host, &cmd, 0); 398af517150SDavid Brownell 399af517150SDavid Brownell *ocrp = cmd.resp[1]; 400af517150SDavid Brownell return err; 401af517150SDavid Brownell } 402af517150SDavid Brownell 403af517150SDavid Brownell int mmc_spi_set_crc(struct mmc_host *host, int use_crc) 404af517150SDavid Brownell { 405c7836d15SMasahiro Yamada struct mmc_command cmd = {}; 406af517150SDavid Brownell int err; 407af517150SDavid Brownell 408af517150SDavid Brownell cmd.opcode = MMC_SPI_CRC_ON_OFF; 409af517150SDavid Brownell cmd.flags = MMC_RSP_SPI_R1; 410af517150SDavid Brownell cmd.arg = use_crc; 411af517150SDavid Brownell 412af517150SDavid Brownell err = mmc_wait_for_cmd(host, &cmd, 0); 413af517150SDavid Brownell if (!err) 414af517150SDavid Brownell host->use_spi_crc = use_crc; 415af517150SDavid Brownell return err; 416af517150SDavid Brownell } 417af517150SDavid Brownell 41820348d19SUlf Hansson static int mmc_switch_status_error(struct mmc_host *host, u32 status) 419ed16f58dSAdrian Hunter { 420ed16f58dSAdrian Hunter if (mmc_host_is_spi(host)) { 421ed16f58dSAdrian Hunter if (status & R1_SPI_ILLEGAL_COMMAND) 422ed16f58dSAdrian Hunter return -EBADMSG; 423ed16f58dSAdrian Hunter } else { 424a94a7483SShawn Lin if (R1_STATUS(status)) 425ed16f58dSAdrian Hunter pr_warn("%s: unexpected status %#x after switch\n", 426ed16f58dSAdrian Hunter mmc_hostname(host), status); 427ed16f58dSAdrian Hunter if (status & R1_SWITCH_ERROR) 428ed16f58dSAdrian Hunter return -EBADMSG; 429ed16f58dSAdrian Hunter } 430ed16f58dSAdrian Hunter return 0; 431ed16f58dSAdrian Hunter } 432ed16f58dSAdrian Hunter 43320348d19SUlf Hansson /* Caller must hold re-tuning */ 43460db8a47SUlf Hansson int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal) 43520348d19SUlf Hansson { 43620348d19SUlf Hansson u32 status; 43720348d19SUlf Hansson int err; 43820348d19SUlf Hansson 43920348d19SUlf Hansson err = mmc_send_status(card, &status); 440ef3d2322SAdrian Hunter if (!crc_err_fatal && err == -EILSEQ) 441ef3d2322SAdrian Hunter return 0; 44220348d19SUlf Hansson if (err) 44320348d19SUlf Hansson return err; 44420348d19SUlf Hansson 44520348d19SUlf Hansson return mmc_switch_status_error(card->host, status); 44620348d19SUlf Hansson } 44720348d19SUlf Hansson 4486972096aSUlf Hansson static int mmc_busy_status(struct mmc_card *card, bool retry_crc_err, 4490d84c3e6SUlf Hansson enum mmc_busy_cmd busy_cmd, bool *busy) 4506972096aSUlf Hansson { 4516972096aSUlf Hansson struct mmc_host *host = card->host; 4526972096aSUlf Hansson u32 status = 0; 4536972096aSUlf Hansson int err; 4546972096aSUlf Hansson 4556972096aSUlf Hansson if (host->ops->card_busy) { 4566972096aSUlf Hansson *busy = host->ops->card_busy(host); 4576972096aSUlf Hansson return 0; 4586972096aSUlf Hansson } 4596972096aSUlf Hansson 4606972096aSUlf Hansson err = mmc_send_status(card, &status); 4616972096aSUlf Hansson if (retry_crc_err && err == -EILSEQ) { 4626972096aSUlf Hansson *busy = true; 4636972096aSUlf Hansson return 0; 4646972096aSUlf Hansson } 4656972096aSUlf Hansson if (err) 4666972096aSUlf Hansson return err; 4676972096aSUlf Hansson 4680d84c3e6SUlf Hansson switch (busy_cmd) { 4690d84c3e6SUlf Hansson case MMC_BUSY_CMD6: 4706972096aSUlf Hansson err = mmc_switch_status_error(card->host, status); 4710d84c3e6SUlf Hansson break; 4720d84c3e6SUlf Hansson case MMC_BUSY_ERASE: 4730d84c3e6SUlf Hansson err = R1_STATUS(status) ? -EIO : 0; 4740d84c3e6SUlf Hansson break; 475490ff95fSUlf Hansson case MMC_BUSY_HPI: 476490ff95fSUlf Hansson break; 4770d84c3e6SUlf Hansson default: 4780d84c3e6SUlf Hansson err = -EINVAL; 4790d84c3e6SUlf Hansson } 4800d84c3e6SUlf Hansson 4816972096aSUlf Hansson if (err) 4826972096aSUlf Hansson return err; 4836972096aSUlf Hansson 4842a1c7cdaSUlf Hansson *busy = !mmc_ready_for_data(status); 4856972096aSUlf Hansson return 0; 4866972096aSUlf Hansson } 4876972096aSUlf Hansson 4880d84c3e6SUlf Hansson static int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, 4890d84c3e6SUlf Hansson bool send_status, bool retry_crc_err, 4900d84c3e6SUlf Hansson enum mmc_busy_cmd busy_cmd) 491716bdb89SUlf Hansson { 492716bdb89SUlf Hansson struct mmc_host *host = card->host; 493716bdb89SUlf Hansson int err; 494716bdb89SUlf Hansson unsigned long timeout; 495d46a24a9SUlf Hansson unsigned int udelay = 32, udelay_max = 32768; 496716bdb89SUlf Hansson bool expired = false; 497716bdb89SUlf Hansson bool busy = false; 498716bdb89SUlf Hansson 499716bdb89SUlf Hansson /* 500716bdb89SUlf Hansson * In cases when not allowed to poll by using CMD13 or because we aren't 501716bdb89SUlf Hansson * capable of polling by using ->card_busy(), then rely on waiting the 502716bdb89SUlf Hansson * stated timeout to be sufficient. 503716bdb89SUlf Hansson */ 504716bdb89SUlf Hansson if (!send_status && !host->ops->card_busy) { 505716bdb89SUlf Hansson mmc_delay(timeout_ms); 506716bdb89SUlf Hansson return 0; 507716bdb89SUlf Hansson } 508716bdb89SUlf Hansson 509716bdb89SUlf Hansson timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1; 510716bdb89SUlf Hansson do { 511716bdb89SUlf Hansson /* 51270562644SUlf Hansson * Due to the possibility of being preempted while polling, 51370562644SUlf Hansson * check the expiration time first. 514716bdb89SUlf Hansson */ 515716bdb89SUlf Hansson expired = time_after(jiffies, timeout); 51670562644SUlf Hansson 5170d84c3e6SUlf Hansson err = mmc_busy_status(card, retry_crc_err, busy_cmd, &busy); 5185ec32f84SUlf Hansson if (err) 5195ec32f84SUlf Hansson return err; 520716bdb89SUlf Hansson 52170562644SUlf Hansson /* Timeout if the device still remains busy. */ 52270562644SUlf Hansson if (expired && busy) { 52370562644SUlf Hansson pr_err("%s: Card stuck being busy! %s\n", 524716bdb89SUlf Hansson mmc_hostname(host), __func__); 525716bdb89SUlf Hansson return -ETIMEDOUT; 526716bdb89SUlf Hansson } 527d46a24a9SUlf Hansson 528d46a24a9SUlf Hansson /* Throttle the polling rate to avoid hogging the CPU. */ 529d46a24a9SUlf Hansson if (busy) { 530d46a24a9SUlf Hansson usleep_range(udelay, udelay * 2); 531d46a24a9SUlf Hansson if (udelay < udelay_max) 532d46a24a9SUlf Hansson udelay *= 2; 533d46a24a9SUlf Hansson } 53470562644SUlf Hansson } while (busy); 535716bdb89SUlf Hansson 5365ec32f84SUlf Hansson return 0; 537716bdb89SUlf Hansson } 538716bdb89SUlf Hansson 5390d84c3e6SUlf Hansson int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, 5400d84c3e6SUlf Hansson enum mmc_busy_cmd busy_cmd) 5410d84c3e6SUlf Hansson { 5420d84c3e6SUlf Hansson return __mmc_poll_for_busy(card, timeout_ms, true, false, busy_cmd); 5430d84c3e6SUlf Hansson } 5440d84c3e6SUlf Hansson 545d3a8d95dSAndrei Warkentin /** 546950d56acSJaehoon Chung * __mmc_switch - modify EXT_CSD register 547d3a8d95dSAndrei Warkentin * @card: the MMC card associated with the data transfer 548d3a8d95dSAndrei Warkentin * @set: cmd set values 549d3a8d95dSAndrei Warkentin * @index: EXT_CSD register index 550d3a8d95dSAndrei Warkentin * @value: value to program into EXT_CSD register 551d3a8d95dSAndrei Warkentin * @timeout_ms: timeout (ms) for operation performed by register write, 552d3a8d95dSAndrei Warkentin * timeout of zero implies maximum possible timeout 553aa33ce3cSUlf Hansson * @timing: new timing to change to 554878e200bSUlf Hansson * @send_status: send status cmd to poll for busy 555625228faSUlf Hansson * @retry_crc_err: retry when CRC errors when polling with CMD13 for busy 556d3a8d95dSAndrei Warkentin * 557d3a8d95dSAndrei Warkentin * Modifies the EXT_CSD register for selected card. 558d3a8d95dSAndrei Warkentin */ 559950d56acSJaehoon Chung int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, 560aa33ce3cSUlf Hansson unsigned int timeout_ms, unsigned char timing, 56102098ccdSUlf Hansson bool send_status, bool retry_crc_err) 562da7fbe58SPierre Ossman { 563636bd13cSUlf Hansson struct mmc_host *host = card->host; 564da7fbe58SPierre Ossman int err; 565c7836d15SMasahiro Yamada struct mmc_command cmd = {}; 56602098ccdSUlf Hansson bool use_r1b_resp = true; 567aa33ce3cSUlf Hansson unsigned char old_timing = host->ios.timing; 568b9ec2616SUlf Hansson 569c6dbab9cSAdrian Hunter mmc_retune_hold(host); 570c6dbab9cSAdrian Hunter 571533a6cfeSUlf Hansson if (!timeout_ms) { 572533a6cfeSUlf Hansson pr_warn("%s: unspecified timeout for CMD6 - use generic\n", 573533a6cfeSUlf Hansson mmc_hostname(host)); 574533a6cfeSUlf Hansson timeout_ms = card->ext_csd.generic_cmd6_time; 575533a6cfeSUlf Hansson } 576533a6cfeSUlf Hansson 577b9ec2616SUlf Hansson /* 578533a6cfeSUlf Hansson * If the max_busy_timeout of the host is specified, make sure it's 579533a6cfeSUlf Hansson * enough to fit the used timeout_ms. In case it's not, let's instruct 580533a6cfeSUlf Hansson * the host to avoid HW busy detection, by converting to a R1 response 5811292e3efSUlf Hansson * instead of a R1B. Note, some hosts requires R1B, which also means 5821292e3efSUlf Hansson * they are on their own when it comes to deal with the busy timeout. 583b9ec2616SUlf Hansson */ 5841292e3efSUlf Hansson if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout && 5851292e3efSUlf Hansson (timeout_ms > host->max_busy_timeout)) 586b9ec2616SUlf Hansson use_r1b_resp = false; 587da7fbe58SPierre Ossman 588da7fbe58SPierre Ossman cmd.opcode = MMC_SWITCH; 589da7fbe58SPierre Ossman cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | 590da7fbe58SPierre Ossman (index << 16) | 591da7fbe58SPierre Ossman (value << 8) | 592da7fbe58SPierre Ossman set; 593950d56acSJaehoon Chung cmd.flags = MMC_CMD_AC; 594b9ec2616SUlf Hansson if (use_r1b_resp) { 595950d56acSJaehoon Chung cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B; 5961d4d7744SUlf Hansson cmd.busy_timeout = timeout_ms; 597b9ec2616SUlf Hansson } else { 598b9ec2616SUlf Hansson cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1; 599b9ec2616SUlf Hansson } 600b9ec2616SUlf Hansson 6018ad8e02cSJan Kaisrlik err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); 60217b0429dSPierre Ossman if (err) 603c6dbab9cSAdrian Hunter goto out; 604da7fbe58SPierre Ossman 605cb26ce06SUlf Hansson /*If SPI or used HW busy detection above, then we don't need to poll. */ 606cb26ce06SUlf Hansson if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) || 607ee6ff743SUlf Hansson mmc_host_is_spi(host)) 608aa33ce3cSUlf Hansson goto out_tim; 609a27fbf2fSSeungwon Jeon 610716bdb89SUlf Hansson /* Let's try to poll to find out when the command is completed. */ 6110d84c3e6SUlf Hansson err = __mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err, 6120d84c3e6SUlf Hansson MMC_BUSY_CMD6); 613ee6ff743SUlf Hansson if (err) 614ee6ff743SUlf Hansson goto out; 615aa33ce3cSUlf Hansson 616aa33ce3cSUlf Hansson out_tim: 617ee6ff743SUlf Hansson /* Switch to new timing before check switch status. */ 618ee6ff743SUlf Hansson if (timing) 619ee6ff743SUlf Hansson mmc_set_timing(host, timing); 620ee6ff743SUlf Hansson 621ee6ff743SUlf Hansson if (send_status) { 62260db8a47SUlf Hansson err = mmc_switch_status(card, true); 623aa33ce3cSUlf Hansson if (err && timing) 624aa33ce3cSUlf Hansson mmc_set_timing(host, old_timing); 625ee6ff743SUlf Hansson } 626c6dbab9cSAdrian Hunter out: 627c6dbab9cSAdrian Hunter mmc_retune_release(host); 628ef0b27d4SAdrian Hunter 629c6dbab9cSAdrian Hunter return err; 630da7fbe58SPierre Ossman } 631950d56acSJaehoon Chung 632950d56acSJaehoon Chung int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, 633950d56acSJaehoon Chung unsigned int timeout_ms) 634950d56acSJaehoon Chung { 635aa33ce3cSUlf Hansson return __mmc_switch(card, set, index, value, timeout_ms, 0, 63602098ccdSUlf Hansson true, false); 637950d56acSJaehoon Chung } 638d3a8d95dSAndrei Warkentin EXPORT_SYMBOL_GPL(mmc_switch); 639da7fbe58SPierre Ossman 6409979dbe5SChaotian Jing int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error) 641996903deSMinda Chen { 642c7836d15SMasahiro Yamada struct mmc_request mrq = {}; 643c7836d15SMasahiro Yamada struct mmc_command cmd = {}; 644c7836d15SMasahiro Yamada struct mmc_data data = {}; 645996903deSMinda Chen struct scatterlist sg; 646fe5afb13SUlf Hansson struct mmc_ios *ios = &host->ios; 647996903deSMinda Chen const u8 *tuning_block_pattern; 648996903deSMinda Chen int size, err = 0; 649996903deSMinda Chen u8 *data_buf; 650996903deSMinda Chen 651996903deSMinda Chen if (ios->bus_width == MMC_BUS_WIDTH_8) { 652996903deSMinda Chen tuning_block_pattern = tuning_blk_pattern_8bit; 653996903deSMinda Chen size = sizeof(tuning_blk_pattern_8bit); 654996903deSMinda Chen } else if (ios->bus_width == MMC_BUS_WIDTH_4) { 655996903deSMinda Chen tuning_block_pattern = tuning_blk_pattern_4bit; 656996903deSMinda Chen size = sizeof(tuning_blk_pattern_4bit); 657996903deSMinda Chen } else 658996903deSMinda Chen return -EINVAL; 659996903deSMinda Chen 660996903deSMinda Chen data_buf = kzalloc(size, GFP_KERNEL); 661996903deSMinda Chen if (!data_buf) 662996903deSMinda Chen return -ENOMEM; 663996903deSMinda Chen 664996903deSMinda Chen mrq.cmd = &cmd; 665996903deSMinda Chen mrq.data = &data; 666996903deSMinda Chen 667996903deSMinda Chen cmd.opcode = opcode; 668996903deSMinda Chen cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 669996903deSMinda Chen 670996903deSMinda Chen data.blksz = size; 671996903deSMinda Chen data.blocks = 1; 672996903deSMinda Chen data.flags = MMC_DATA_READ; 673996903deSMinda Chen 674996903deSMinda Chen /* 675996903deSMinda Chen * According to the tuning specs, Tuning process 676996903deSMinda Chen * is normally shorter 40 executions of CMD19, 677996903deSMinda Chen * and timeout value should be shorter than 150 ms 678996903deSMinda Chen */ 679996903deSMinda Chen data.timeout_ns = 150 * NSEC_PER_MSEC; 680996903deSMinda Chen 681996903deSMinda Chen data.sg = &sg; 682996903deSMinda Chen data.sg_len = 1; 683996903deSMinda Chen sg_init_one(&sg, data_buf, size); 684996903deSMinda Chen 685fe5afb13SUlf Hansson mmc_wait_for_req(host, &mrq); 686996903deSMinda Chen 6879979dbe5SChaotian Jing if (cmd_error) 6889979dbe5SChaotian Jing *cmd_error = cmd.error; 6899979dbe5SChaotian Jing 690996903deSMinda Chen if (cmd.error) { 691996903deSMinda Chen err = cmd.error; 692996903deSMinda Chen goto out; 693996903deSMinda Chen } 694996903deSMinda Chen 695996903deSMinda Chen if (data.error) { 696996903deSMinda Chen err = data.error; 697996903deSMinda Chen goto out; 698996903deSMinda Chen } 699996903deSMinda Chen 700996903deSMinda Chen if (memcmp(data_buf, tuning_block_pattern, size)) 701996903deSMinda Chen err = -EIO; 702996903deSMinda Chen 703996903deSMinda Chen out: 704996903deSMinda Chen kfree(data_buf); 705996903deSMinda Chen return err; 706996903deSMinda Chen } 707996903deSMinda Chen EXPORT_SYMBOL_GPL(mmc_send_tuning); 708996903deSMinda Chen 709e711f030SAdrian Hunter int mmc_abort_tuning(struct mmc_host *host, u32 opcode) 710e711f030SAdrian Hunter { 711c7836d15SMasahiro Yamada struct mmc_command cmd = {}; 712e711f030SAdrian Hunter 713e711f030SAdrian Hunter /* 714e711f030SAdrian Hunter * eMMC specification specifies that CMD12 can be used to stop a tuning 715e711f030SAdrian Hunter * command, but SD specification does not, so do nothing unless it is 716e711f030SAdrian Hunter * eMMC. 717e711f030SAdrian Hunter */ 718e711f030SAdrian Hunter if (opcode != MMC_SEND_TUNING_BLOCK_HS200) 719e711f030SAdrian Hunter return 0; 720e711f030SAdrian Hunter 721e711f030SAdrian Hunter cmd.opcode = MMC_STOP_TRANSMISSION; 722e711f030SAdrian Hunter cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 723e711f030SAdrian Hunter 724e711f030SAdrian Hunter /* 725e711f030SAdrian Hunter * For drivers that override R1 to R1b, set an arbitrary timeout based 726e711f030SAdrian Hunter * on the tuning timeout i.e. 150ms. 727e711f030SAdrian Hunter */ 728e711f030SAdrian Hunter cmd.busy_timeout = 150; 729e711f030SAdrian Hunter 730e711f030SAdrian Hunter return mmc_wait_for_cmd(host, &cmd, 0); 731e711f030SAdrian Hunter } 732e711f030SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_abort_tuning); 733e711f030SAdrian Hunter 73422113efdSAries Lee static int 73522113efdSAries Lee mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode, 73622113efdSAries Lee u8 len) 73722113efdSAries Lee { 738c7836d15SMasahiro Yamada struct mmc_request mrq = {}; 739c7836d15SMasahiro Yamada struct mmc_command cmd = {}; 740c7836d15SMasahiro Yamada struct mmc_data data = {}; 74122113efdSAries Lee struct scatterlist sg; 74222113efdSAries Lee u8 *data_buf; 74322113efdSAries Lee u8 *test_buf; 74422113efdSAries Lee int i, err; 74522113efdSAries Lee static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 }; 74622113efdSAries Lee static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 }; 74722113efdSAries Lee 74822113efdSAries Lee /* dma onto stack is unsafe/nonportable, but callers to this 74922113efdSAries Lee * routine normally provide temporary on-stack buffers ... 75022113efdSAries Lee */ 75122113efdSAries Lee data_buf = kmalloc(len, GFP_KERNEL); 75222113efdSAries Lee if (!data_buf) 75322113efdSAries Lee return -ENOMEM; 75422113efdSAries Lee 75522113efdSAries Lee if (len == 8) 75622113efdSAries Lee test_buf = testdata_8bit; 75722113efdSAries Lee else if (len == 4) 75822113efdSAries Lee test_buf = testdata_4bit; 75922113efdSAries Lee else { 760a3c76eb9SGirish K S pr_err("%s: Invalid bus_width %d\n", 76122113efdSAries Lee mmc_hostname(host), len); 76222113efdSAries Lee kfree(data_buf); 76322113efdSAries Lee return -EINVAL; 76422113efdSAries Lee } 76522113efdSAries Lee 76622113efdSAries Lee if (opcode == MMC_BUS_TEST_W) 76722113efdSAries Lee memcpy(data_buf, test_buf, len); 76822113efdSAries Lee 76922113efdSAries Lee mrq.cmd = &cmd; 77022113efdSAries Lee mrq.data = &data; 77122113efdSAries Lee cmd.opcode = opcode; 77222113efdSAries Lee cmd.arg = 0; 77322113efdSAries Lee 77422113efdSAries Lee /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we 77522113efdSAries Lee * rely on callers to never use this with "native" calls for reading 77622113efdSAries Lee * CSD or CID. Native versions of those commands use the R2 type, 77722113efdSAries Lee * not R1 plus a data block. 77822113efdSAries Lee */ 77922113efdSAries Lee cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 78022113efdSAries Lee 78122113efdSAries Lee data.blksz = len; 78222113efdSAries Lee data.blocks = 1; 78322113efdSAries Lee if (opcode == MMC_BUS_TEST_R) 78422113efdSAries Lee data.flags = MMC_DATA_READ; 78522113efdSAries Lee else 78622113efdSAries Lee data.flags = MMC_DATA_WRITE; 78722113efdSAries Lee 78822113efdSAries Lee data.sg = &sg; 78922113efdSAries Lee data.sg_len = 1; 79084532e33SMinjian Wu mmc_set_data_timeout(&data, card); 79122113efdSAries Lee sg_init_one(&sg, data_buf, len); 79222113efdSAries Lee mmc_wait_for_req(host, &mrq); 79322113efdSAries Lee err = 0; 79422113efdSAries Lee if (opcode == MMC_BUS_TEST_R) { 79522113efdSAries Lee for (i = 0; i < len / 4; i++) 79622113efdSAries Lee if ((test_buf[i] ^ data_buf[i]) != 0xff) { 79722113efdSAries Lee err = -EIO; 79822113efdSAries Lee break; 79922113efdSAries Lee } 80022113efdSAries Lee } 80122113efdSAries Lee kfree(data_buf); 80222113efdSAries Lee 80322113efdSAries Lee if (cmd.error) 80422113efdSAries Lee return cmd.error; 80522113efdSAries Lee if (data.error) 80622113efdSAries Lee return data.error; 80722113efdSAries Lee 80822113efdSAries Lee return err; 80922113efdSAries Lee } 81022113efdSAries Lee 81122113efdSAries Lee int mmc_bus_test(struct mmc_card *card, u8 bus_width) 81222113efdSAries Lee { 8130899e741SMasahiro Yamada int width; 81422113efdSAries Lee 81522113efdSAries Lee if (bus_width == MMC_BUS_WIDTH_8) 81622113efdSAries Lee width = 8; 81722113efdSAries Lee else if (bus_width == MMC_BUS_WIDTH_4) 81822113efdSAries Lee width = 4; 81922113efdSAries Lee else if (bus_width == MMC_BUS_WIDTH_1) 82022113efdSAries Lee return 0; /* no need for test */ 82122113efdSAries Lee else 82222113efdSAries Lee return -EINVAL; 82322113efdSAries Lee 82422113efdSAries Lee /* 82522113efdSAries Lee * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there 82622113efdSAries Lee * is a problem. This improves chances that the test will work. 82722113efdSAries Lee */ 82822113efdSAries Lee mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width); 8290899e741SMasahiro Yamada return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width); 83022113efdSAries Lee } 831eb0d8f13SJaehoon Chung 8329f94d047SUlf Hansson static int mmc_send_hpi_cmd(struct mmc_card *card) 833eb0d8f13SJaehoon Chung { 834490ff95fSUlf Hansson unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time; 835892bf100SUlf Hansson struct mmc_host *host = card->host; 836892bf100SUlf Hansson bool use_r1b_resp = true; 837c7836d15SMasahiro Yamada struct mmc_command cmd = {}; 838eb0d8f13SJaehoon Chung int err; 839eb0d8f13SJaehoon Chung 840892bf100SUlf Hansson cmd.opcode = card->ext_csd.hpi_cmd; 841eb0d8f13SJaehoon Chung cmd.arg = card->rca << 16 | 1; 842eb0d8f13SJaehoon Chung 843892bf100SUlf Hansson /* 844892bf100SUlf Hansson * Make sure the host's max_busy_timeout fit the needed timeout for HPI. 845892bf100SUlf Hansson * In case it doesn't, let's instruct the host to avoid HW busy 846892bf100SUlf Hansson * detection, by using a R1 response instead of R1B. 847892bf100SUlf Hansson */ 848892bf100SUlf Hansson if (host->max_busy_timeout && busy_timeout_ms > host->max_busy_timeout) 849892bf100SUlf Hansson use_r1b_resp = false; 850892bf100SUlf Hansson 851892bf100SUlf Hansson if (cmd.opcode == MMC_STOP_TRANSMISSION && use_r1b_resp) { 852892bf100SUlf Hansson cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; 853892bf100SUlf Hansson cmd.busy_timeout = busy_timeout_ms; 854892bf100SUlf Hansson } else { 855892bf100SUlf Hansson cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 856892bf100SUlf Hansson use_r1b_resp = false; 857892bf100SUlf Hansson } 858892bf100SUlf Hansson 859892bf100SUlf Hansson err = mmc_wait_for_cmd(host, &cmd, 0); 860eb0d8f13SJaehoon Chung if (err) { 861892bf100SUlf Hansson pr_warn("%s: HPI error %d. Command response %#x\n", 862892bf100SUlf Hansson mmc_hostname(host), err, cmd.resp[0]); 863eb0d8f13SJaehoon Chung return err; 864eb0d8f13SJaehoon Chung } 865eb0d8f13SJaehoon Chung 866892bf100SUlf Hansson /* No need to poll when using HW busy detection. */ 867892bf100SUlf Hansson if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp) 868892bf100SUlf Hansson return 0; 869892bf100SUlf Hansson 870490ff95fSUlf Hansson /* Let's poll to find out when the HPI request completes. */ 871490ff95fSUlf Hansson return mmc_poll_for_busy(card, busy_timeout_ms, MMC_BUSY_HPI); 872eb0d8f13SJaehoon Chung } 873148bcab2SUlf Hansson 8740f2c0512SUlf Hansson /** 8750f2c0512SUlf Hansson * mmc_interrupt_hpi - Issue for High priority Interrupt 8760f2c0512SUlf Hansson * @card: the MMC card associated with the HPI transfer 8770f2c0512SUlf Hansson * 8780f2c0512SUlf Hansson * Issued High Priority Interrupt, and check for card status 8790f2c0512SUlf Hansson * until out-of prg-state. 8800f2c0512SUlf Hansson */ 8810f2c0512SUlf Hansson int mmc_interrupt_hpi(struct mmc_card *card) 8820f2c0512SUlf Hansson { 8830f2c0512SUlf Hansson int err; 8840f2c0512SUlf Hansson u32 status; 8850f2c0512SUlf Hansson 8860f2c0512SUlf Hansson if (!card->ext_csd.hpi_en) { 8870f2c0512SUlf Hansson pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host)); 8880f2c0512SUlf Hansson return 1; 8890f2c0512SUlf Hansson } 8900f2c0512SUlf Hansson 8910f2c0512SUlf Hansson err = mmc_send_status(card, &status); 8920f2c0512SUlf Hansson if (err) { 8930f2c0512SUlf Hansson pr_err("%s: Get card status fail\n", mmc_hostname(card->host)); 8940f2c0512SUlf Hansson goto out; 8950f2c0512SUlf Hansson } 8960f2c0512SUlf Hansson 8970f2c0512SUlf Hansson switch (R1_CURRENT_STATE(status)) { 8980f2c0512SUlf Hansson case R1_STATE_IDLE: 8990f2c0512SUlf Hansson case R1_STATE_READY: 9000f2c0512SUlf Hansson case R1_STATE_STBY: 9010f2c0512SUlf Hansson case R1_STATE_TRAN: 9020f2c0512SUlf Hansson /* 9030f2c0512SUlf Hansson * In idle and transfer states, HPI is not needed and the caller 9040f2c0512SUlf Hansson * can issue the next intended command immediately 9050f2c0512SUlf Hansson */ 9060f2c0512SUlf Hansson goto out; 9070f2c0512SUlf Hansson case R1_STATE_PRG: 9080f2c0512SUlf Hansson break; 9090f2c0512SUlf Hansson default: 9100f2c0512SUlf Hansson /* In all other states, it's illegal to issue HPI */ 9110f2c0512SUlf Hansson pr_debug("%s: HPI cannot be sent. Card state=%d\n", 9120f2c0512SUlf Hansson mmc_hostname(card->host), R1_CURRENT_STATE(status)); 9130f2c0512SUlf Hansson err = -EINVAL; 9140f2c0512SUlf Hansson goto out; 9150f2c0512SUlf Hansson } 9160f2c0512SUlf Hansson 9179f94d047SUlf Hansson err = mmc_send_hpi_cmd(card); 9180f2c0512SUlf Hansson out: 9190f2c0512SUlf Hansson return err; 9200f2c0512SUlf Hansson } 9210f2c0512SUlf Hansson 922148bcab2SUlf Hansson int mmc_can_ext_csd(struct mmc_card *card) 923148bcab2SUlf Hansson { 924148bcab2SUlf Hansson return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3); 925148bcab2SUlf Hansson } 926b658af71SAdrian Hunter 9271cf8f7e5SUlf Hansson static int mmc_read_bkops_status(struct mmc_card *card) 9281cf8f7e5SUlf Hansson { 9291cf8f7e5SUlf Hansson int err; 9301cf8f7e5SUlf Hansson u8 *ext_csd; 9311cf8f7e5SUlf Hansson 9321cf8f7e5SUlf Hansson err = mmc_get_ext_csd(card, &ext_csd); 9331cf8f7e5SUlf Hansson if (err) 9341cf8f7e5SUlf Hansson return err; 9351cf8f7e5SUlf Hansson 9361cf8f7e5SUlf Hansson card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS]; 9371cf8f7e5SUlf Hansson card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS]; 9381cf8f7e5SUlf Hansson kfree(ext_csd); 9391cf8f7e5SUlf Hansson return 0; 9401cf8f7e5SUlf Hansson } 9411cf8f7e5SUlf Hansson 9421cf8f7e5SUlf Hansson /** 9430c204979SUlf Hansson * mmc_run_bkops - Run BKOPS for supported cards 9440c204979SUlf Hansson * @card: MMC card to run BKOPS for 9451cf8f7e5SUlf Hansson * 9460c204979SUlf Hansson * Run background operations synchronously for cards having manual BKOPS 9470c204979SUlf Hansson * enabled and in case it reports urgent BKOPS level. 9481cf8f7e5SUlf Hansson */ 9490c204979SUlf Hansson void mmc_run_bkops(struct mmc_card *card) 9501cf8f7e5SUlf Hansson { 9511cf8f7e5SUlf Hansson int err; 9521cf8f7e5SUlf Hansson 9530c204979SUlf Hansson if (!card->ext_csd.man_bkops_en) 9541cf8f7e5SUlf Hansson return; 9551cf8f7e5SUlf Hansson 9561cf8f7e5SUlf Hansson err = mmc_read_bkops_status(card); 9571cf8f7e5SUlf Hansson if (err) { 9581cf8f7e5SUlf Hansson pr_err("%s: Failed to read bkops status: %d\n", 9591cf8f7e5SUlf Hansson mmc_hostname(card->host), err); 9601cf8f7e5SUlf Hansson return; 9611cf8f7e5SUlf Hansson } 9621cf8f7e5SUlf Hansson 9630c204979SUlf Hansson if (!card->ext_csd.raw_bkops_status || 9640c204979SUlf Hansson card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2) 9651cf8f7e5SUlf Hansson return; 9661cf8f7e5SUlf Hansson 9671cf8f7e5SUlf Hansson mmc_retune_hold(card->host); 9681cf8f7e5SUlf Hansson 9690c204979SUlf Hansson /* 9700c204979SUlf Hansson * For urgent BKOPS status, LEVEL_2 and higher, let's execute 9710c204979SUlf Hansson * synchronously. Future wise, we may consider to start BKOPS, for less 9720c204979SUlf Hansson * urgent levels by using an asynchronous background task, when idle. 9730c204979SUlf Hansson */ 9740c204979SUlf Hansson err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 97524ed3bd0SUlf Hansson EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS); 9760c204979SUlf Hansson if (err) 9771cf8f7e5SUlf Hansson pr_warn("%s: Error %d starting bkops\n", 9781cf8f7e5SUlf Hansson mmc_hostname(card->host), err); 9791cf8f7e5SUlf Hansson 9801cf8f7e5SUlf Hansson mmc_retune_release(card->host); 9811cf8f7e5SUlf Hansson } 9820c204979SUlf Hansson EXPORT_SYMBOL(mmc_run_bkops); 9831cf8f7e5SUlf Hansson 984d9df1737SUlf Hansson /* 985d9df1737SUlf Hansson * Flush the cache to the non-volatile storage. 986d9df1737SUlf Hansson */ 987d9df1737SUlf Hansson int mmc_flush_cache(struct mmc_card *card) 988d9df1737SUlf Hansson { 989d9df1737SUlf Hansson int err = 0; 990d9df1737SUlf Hansson 991d9df1737SUlf Hansson if (mmc_card_mmc(card) && 992d9df1737SUlf Hansson (card->ext_csd.cache_size > 0) && 993d9df1737SUlf Hansson (card->ext_csd.cache_ctrl & 1)) { 994d9df1737SUlf Hansson err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 99524ed3bd0SUlf Hansson EXT_CSD_FLUSH_CACHE, 1, 99624ed3bd0SUlf Hansson MMC_CACHE_FLUSH_TIMEOUT_MS); 997d9df1737SUlf Hansson if (err) 998d9df1737SUlf Hansson pr_err("%s: cache flush error %d\n", 999d9df1737SUlf Hansson mmc_hostname(card->host), err); 1000d9df1737SUlf Hansson } 1001d9df1737SUlf Hansson 1002d9df1737SUlf Hansson return err; 1003d9df1737SUlf Hansson } 1004d9df1737SUlf Hansson EXPORT_SYMBOL(mmc_flush_cache); 1005d9df1737SUlf Hansson 1006b658af71SAdrian Hunter static int mmc_cmdq_switch(struct mmc_card *card, bool enable) 1007b658af71SAdrian Hunter { 1008b658af71SAdrian Hunter u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0; 1009b658af71SAdrian Hunter int err; 1010b658af71SAdrian Hunter 1011b658af71SAdrian Hunter if (!card->ext_csd.cmdq_support) 1012b658af71SAdrian Hunter return -EOPNOTSUPP; 1013b658af71SAdrian Hunter 1014b658af71SAdrian Hunter err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN, 1015b658af71SAdrian Hunter val, card->ext_csd.generic_cmd6_time); 1016b658af71SAdrian Hunter if (!err) 1017b658af71SAdrian Hunter card->ext_csd.cmdq_en = enable; 1018b658af71SAdrian Hunter 1019b658af71SAdrian Hunter return err; 1020b658af71SAdrian Hunter } 1021b658af71SAdrian Hunter 1022b658af71SAdrian Hunter int mmc_cmdq_enable(struct mmc_card *card) 1023b658af71SAdrian Hunter { 1024b658af71SAdrian Hunter return mmc_cmdq_switch(card, true); 1025b658af71SAdrian Hunter } 1026b658af71SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_cmdq_enable); 1027b658af71SAdrian Hunter 1028b658af71SAdrian Hunter int mmc_cmdq_disable(struct mmc_card *card) 1029b658af71SAdrian Hunter { 1030b658af71SAdrian Hunter return mmc_cmdq_switch(card, false); 1031b658af71SAdrian Hunter } 1032b658af71SAdrian Hunter EXPORT_SYMBOL_GPL(mmc_cmdq_disable); 103355c2b8b9SUlf Hansson 103455c2b8b9SUlf Hansson int mmc_sanitize(struct mmc_card *card) 103555c2b8b9SUlf Hansson { 103655c2b8b9SUlf Hansson struct mmc_host *host = card->host; 103755c2b8b9SUlf Hansson int err; 103855c2b8b9SUlf Hansson 103955c2b8b9SUlf Hansson if (!mmc_can_sanitize(card)) { 104055c2b8b9SUlf Hansson pr_warn("%s: Sanitize not supported\n", mmc_hostname(host)); 104155c2b8b9SUlf Hansson return -EOPNOTSUPP; 104255c2b8b9SUlf Hansson } 104355c2b8b9SUlf Hansson 104455c2b8b9SUlf Hansson pr_debug("%s: Sanitize in progress...\n", mmc_hostname(host)); 104555c2b8b9SUlf Hansson 104655c2b8b9SUlf Hansson mmc_retune_hold(host); 104755c2b8b9SUlf Hansson 104855c2b8b9SUlf Hansson err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START, 104955c2b8b9SUlf Hansson 1, MMC_SANITIZE_TIMEOUT_MS); 105055c2b8b9SUlf Hansson if (err) 105155c2b8b9SUlf Hansson pr_err("%s: Sanitize failed err=%d\n", mmc_hostname(host), err); 105255c2b8b9SUlf Hansson 105355c2b8b9SUlf Hansson /* 105455c2b8b9SUlf Hansson * If the sanitize operation timed out, the card is probably still busy 105555c2b8b9SUlf Hansson * in the R1_STATE_PRG. Rather than continue to wait, let's try to abort 105655c2b8b9SUlf Hansson * it with a HPI command to get back into R1_STATE_TRAN. 105755c2b8b9SUlf Hansson */ 105855c2b8b9SUlf Hansson if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card)) 105955c2b8b9SUlf Hansson pr_warn("%s: Sanitize aborted\n", mmc_hostname(host)); 106055c2b8b9SUlf Hansson 106155c2b8b9SUlf Hansson mmc_retune_release(host); 106255c2b8b9SUlf Hansson 106355c2b8b9SUlf Hansson pr_debug("%s: Sanitize completed\n", mmc_hostname(host)); 106455c2b8b9SUlf Hansson return err; 106555c2b8b9SUlf Hansson } 106655c2b8b9SUlf Hansson EXPORT_SYMBOL_GPL(mmc_sanitize); 1067