1 /* 2 * linux/drivers/mmc/core/mmc.c 3 * 4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved. 5 * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved. 6 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/err.h> 14 #include <linux/slab.h> 15 #include <linux/stat.h> 16 #include <linux/pm_runtime.h> 17 18 #include <linux/mmc/host.h> 19 #include <linux/mmc/card.h> 20 #include <linux/mmc/mmc.h> 21 22 #include "core.h" 23 #include "bus.h" 24 #include "mmc_ops.h" 25 #include "sd_ops.h" 26 27 static const unsigned int tran_exp[] = { 28 10000, 100000, 1000000, 10000000, 29 0, 0, 0, 0 30 }; 31 32 static const unsigned char tran_mant[] = { 33 0, 10, 12, 13, 15, 20, 25, 30, 34 35, 40, 45, 50, 55, 60, 70, 80, 35 }; 36 37 static const unsigned int tacc_exp[] = { 38 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 39 }; 40 41 static const unsigned int tacc_mant[] = { 42 0, 10, 12, 13, 15, 20, 25, 30, 43 35, 40, 45, 50, 55, 60, 70, 80, 44 }; 45 46 #define UNSTUFF_BITS(resp,start,size) \ 47 ({ \ 48 const int __size = size; \ 49 const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \ 50 const int __off = 3 - ((start) / 32); \ 51 const int __shft = (start) & 31; \ 52 u32 __res; \ 53 \ 54 __res = resp[__off] >> __shft; \ 55 if (__size + __shft > 32) \ 56 __res |= resp[__off-1] << ((32 - __shft) % 32); \ 57 __res & __mask; \ 58 }) 59 60 /* 61 * Given the decoded CSD structure, decode the raw CID to our CID structure. 62 */ 63 static int mmc_decode_cid(struct mmc_card *card) 64 { 65 u32 *resp = card->raw_cid; 66 67 /* 68 * The selection of the format here is based upon published 69 * specs from sandisk and from what people have reported. 70 */ 71 switch (card->csd.mmca_vsn) { 72 case 0: /* MMC v1.0 - v1.2 */ 73 case 1: /* MMC v1.4 */ 74 card->cid.manfid = UNSTUFF_BITS(resp, 104, 24); 75 card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8); 76 card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8); 77 card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8); 78 card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8); 79 card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8); 80 card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8); 81 card->cid.prod_name[6] = UNSTUFF_BITS(resp, 48, 8); 82 card->cid.hwrev = UNSTUFF_BITS(resp, 44, 4); 83 card->cid.fwrev = UNSTUFF_BITS(resp, 40, 4); 84 card->cid.serial = UNSTUFF_BITS(resp, 16, 24); 85 card->cid.month = UNSTUFF_BITS(resp, 12, 4); 86 card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997; 87 break; 88 89 case 2: /* MMC v2.0 - v2.2 */ 90 case 3: /* MMC v3.1 - v3.3 */ 91 case 4: /* MMC v4 */ 92 card->cid.manfid = UNSTUFF_BITS(resp, 120, 8); 93 card->cid.oemid = UNSTUFF_BITS(resp, 104, 16); 94 card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8); 95 card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8); 96 card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8); 97 card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8); 98 card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8); 99 card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8); 100 card->cid.prv = UNSTUFF_BITS(resp, 48, 8); 101 card->cid.serial = UNSTUFF_BITS(resp, 16, 32); 102 card->cid.month = UNSTUFF_BITS(resp, 12, 4); 103 card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997; 104 break; 105 106 default: 107 pr_err("%s: card has unknown MMCA version %d\n", 108 mmc_hostname(card->host), card->csd.mmca_vsn); 109 return -EINVAL; 110 } 111 112 return 0; 113 } 114 115 static void mmc_set_erase_size(struct mmc_card *card) 116 { 117 if (card->ext_csd.erase_group_def & 1) 118 card->erase_size = card->ext_csd.hc_erase_size; 119 else 120 card->erase_size = card->csd.erase_size; 121 122 mmc_init_erase(card); 123 } 124 125 /* 126 * Given a 128-bit response, decode to our card CSD structure. 127 */ 128 static int mmc_decode_csd(struct mmc_card *card) 129 { 130 struct mmc_csd *csd = &card->csd; 131 unsigned int e, m, a, b; 132 u32 *resp = card->raw_csd; 133 134 /* 135 * We only understand CSD structure v1.1 and v1.2. 136 * v1.2 has extra information in bits 15, 11 and 10. 137 * We also support eMMC v4.4 & v4.41. 138 */ 139 csd->structure = UNSTUFF_BITS(resp, 126, 2); 140 if (csd->structure == 0) { 141 pr_err("%s: unrecognised CSD structure version %d\n", 142 mmc_hostname(card->host), csd->structure); 143 return -EINVAL; 144 } 145 146 csd->mmca_vsn = UNSTUFF_BITS(resp, 122, 4); 147 m = UNSTUFF_BITS(resp, 115, 4); 148 e = UNSTUFF_BITS(resp, 112, 3); 149 csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10; 150 csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100; 151 152 m = UNSTUFF_BITS(resp, 99, 4); 153 e = UNSTUFF_BITS(resp, 96, 3); 154 csd->max_dtr = tran_exp[e] * tran_mant[m]; 155 csd->cmdclass = UNSTUFF_BITS(resp, 84, 12); 156 157 e = UNSTUFF_BITS(resp, 47, 3); 158 m = UNSTUFF_BITS(resp, 62, 12); 159 csd->capacity = (1 + m) << (e + 2); 160 161 csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4); 162 csd->read_partial = UNSTUFF_BITS(resp, 79, 1); 163 csd->write_misalign = UNSTUFF_BITS(resp, 78, 1); 164 csd->read_misalign = UNSTUFF_BITS(resp, 77, 1); 165 csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3); 166 csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4); 167 csd->write_partial = UNSTUFF_BITS(resp, 21, 1); 168 169 if (csd->write_blkbits >= 9) { 170 a = UNSTUFF_BITS(resp, 42, 5); 171 b = UNSTUFF_BITS(resp, 37, 5); 172 csd->erase_size = (a + 1) * (b + 1); 173 csd->erase_size <<= csd->write_blkbits - 9; 174 } 175 176 return 0; 177 } 178 179 /* 180 * Read extended CSD. 181 */ 182 static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd) 183 { 184 int err; 185 u8 *ext_csd; 186 187 BUG_ON(!card); 188 BUG_ON(!new_ext_csd); 189 190 *new_ext_csd = NULL; 191 192 if (card->csd.mmca_vsn < CSD_SPEC_VER_4) 193 return 0; 194 195 /* 196 * As the ext_csd is so large and mostly unused, we don't store the 197 * raw block in mmc_card. 198 */ 199 ext_csd = kmalloc(512, GFP_KERNEL); 200 if (!ext_csd) { 201 pr_err("%s: could not allocate a buffer to " 202 "receive the ext_csd.\n", mmc_hostname(card->host)); 203 return -ENOMEM; 204 } 205 206 err = mmc_send_ext_csd(card, ext_csd); 207 if (err) { 208 kfree(ext_csd); 209 *new_ext_csd = NULL; 210 211 /* If the host or the card can't do the switch, 212 * fail more gracefully. */ 213 if ((err != -EINVAL) 214 && (err != -ENOSYS) 215 && (err != -EFAULT)) 216 return err; 217 218 /* 219 * High capacity cards should have this "magic" size 220 * stored in their CSD. 221 */ 222 if (card->csd.capacity == (4096 * 512)) { 223 pr_err("%s: unable to read EXT_CSD " 224 "on a possible high capacity card. " 225 "Card will be ignored.\n", 226 mmc_hostname(card->host)); 227 } else { 228 pr_warning("%s: unable to read " 229 "EXT_CSD, performance might " 230 "suffer.\n", 231 mmc_hostname(card->host)); 232 err = 0; 233 } 234 } else 235 *new_ext_csd = ext_csd; 236 237 return err; 238 } 239 240 static void mmc_select_card_type(struct mmc_card *card) 241 { 242 struct mmc_host *host = card->host; 243 u8 card_type = card->ext_csd.raw_card_type & EXT_CSD_CARD_TYPE_MASK; 244 u32 caps = host->caps, caps2 = host->caps2; 245 unsigned int hs_max_dtr = 0; 246 247 if (card_type & EXT_CSD_CARD_TYPE_26) 248 hs_max_dtr = MMC_HIGH_26_MAX_DTR; 249 250 if (caps & MMC_CAP_MMC_HIGHSPEED && 251 card_type & EXT_CSD_CARD_TYPE_52) 252 hs_max_dtr = MMC_HIGH_52_MAX_DTR; 253 254 if ((caps & MMC_CAP_1_8V_DDR && 255 card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) || 256 (caps & MMC_CAP_1_2V_DDR && 257 card_type & EXT_CSD_CARD_TYPE_DDR_1_2V)) 258 hs_max_dtr = MMC_HIGH_DDR_MAX_DTR; 259 260 if ((caps2 & MMC_CAP2_HS200_1_8V_SDR && 261 card_type & EXT_CSD_CARD_TYPE_SDR_1_8V) || 262 (caps2 & MMC_CAP2_HS200_1_2V_SDR && 263 card_type & EXT_CSD_CARD_TYPE_SDR_1_2V)) 264 hs_max_dtr = MMC_HS200_MAX_DTR; 265 266 card->ext_csd.hs_max_dtr = hs_max_dtr; 267 card->ext_csd.card_type = card_type; 268 } 269 270 /* 271 * Decode extended CSD. 272 */ 273 static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) 274 { 275 int err = 0, idx; 276 unsigned int part_size; 277 u8 hc_erase_grp_sz = 0, hc_wp_grp_sz = 0; 278 279 BUG_ON(!card); 280 281 if (!ext_csd) 282 return 0; 283 284 /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */ 285 card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE]; 286 if (card->csd.structure == 3) { 287 if (card->ext_csd.raw_ext_csd_structure > 2) { 288 pr_err("%s: unrecognised EXT_CSD structure " 289 "version %d\n", mmc_hostname(card->host), 290 card->ext_csd.raw_ext_csd_structure); 291 err = -EINVAL; 292 goto out; 293 } 294 } 295 296 card->ext_csd.rev = ext_csd[EXT_CSD_REV]; 297 if (card->ext_csd.rev > 7) { 298 pr_err("%s: unrecognised EXT_CSD revision %d\n", 299 mmc_hostname(card->host), card->ext_csd.rev); 300 err = -EINVAL; 301 goto out; 302 } 303 304 card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0]; 305 card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1]; 306 card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2]; 307 card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3]; 308 if (card->ext_csd.rev >= 2) { 309 card->ext_csd.sectors = 310 ext_csd[EXT_CSD_SEC_CNT + 0] << 0 | 311 ext_csd[EXT_CSD_SEC_CNT + 1] << 8 | 312 ext_csd[EXT_CSD_SEC_CNT + 2] << 16 | 313 ext_csd[EXT_CSD_SEC_CNT + 3] << 24; 314 315 /* Cards with density > 2GiB are sector addressed */ 316 if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512) 317 mmc_card_set_blockaddr(card); 318 } 319 320 card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE]; 321 mmc_select_card_type(card); 322 323 card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT]; 324 card->ext_csd.raw_erase_timeout_mult = 325 ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]; 326 card->ext_csd.raw_hc_erase_grp_size = 327 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 328 if (card->ext_csd.rev >= 3) { 329 u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT]; 330 card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG]; 331 332 /* EXT_CSD value is in units of 10ms, but we store in ms */ 333 card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME]; 334 335 /* Sleep / awake timeout in 100ns units */ 336 if (sa_shift > 0 && sa_shift <= 0x17) 337 card->ext_csd.sa_timeout = 338 1 << ext_csd[EXT_CSD_S_A_TIMEOUT]; 339 card->ext_csd.erase_group_def = 340 ext_csd[EXT_CSD_ERASE_GROUP_DEF]; 341 card->ext_csd.hc_erase_timeout = 300 * 342 ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]; 343 card->ext_csd.hc_erase_size = 344 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10; 345 346 card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C]; 347 348 /* 349 * There are two boot regions of equal size, defined in 350 * multiples of 128K. 351 */ 352 if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_boot_partition_access(card->host)) { 353 for (idx = 0; idx < MMC_NUM_BOOT_PARTITION; idx++) { 354 part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17; 355 mmc_part_add(card, part_size, 356 EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx, 357 "boot%d", idx, true, 358 MMC_BLK_DATA_AREA_BOOT); 359 } 360 } 361 } 362 363 card->ext_csd.raw_hc_erase_gap_size = 364 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 365 card->ext_csd.raw_sec_trim_mult = 366 ext_csd[EXT_CSD_SEC_TRIM_MULT]; 367 card->ext_csd.raw_sec_erase_mult = 368 ext_csd[EXT_CSD_SEC_ERASE_MULT]; 369 card->ext_csd.raw_sec_feature_support = 370 ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]; 371 card->ext_csd.raw_trim_mult = 372 ext_csd[EXT_CSD_TRIM_MULT]; 373 card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT]; 374 if (card->ext_csd.rev >= 4) { 375 /* 376 * Enhanced area feature support -- check whether the eMMC 377 * card has the Enhanced area enabled. If so, export enhanced 378 * area offset and size to user by adding sysfs interface. 379 */ 380 if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) && 381 (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) { 382 hc_erase_grp_sz = 383 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 384 hc_wp_grp_sz = 385 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 386 387 card->ext_csd.enhanced_area_en = 1; 388 /* 389 * calculate the enhanced data area offset, in bytes 390 */ 391 card->ext_csd.enhanced_area_offset = 392 (ext_csd[139] << 24) + (ext_csd[138] << 16) + 393 (ext_csd[137] << 8) + ext_csd[136]; 394 if (mmc_card_blockaddr(card)) 395 card->ext_csd.enhanced_area_offset <<= 9; 396 /* 397 * calculate the enhanced data area size, in kilobytes 398 */ 399 card->ext_csd.enhanced_area_size = 400 (ext_csd[142] << 16) + (ext_csd[141] << 8) + 401 ext_csd[140]; 402 card->ext_csd.enhanced_area_size *= 403 (size_t)(hc_erase_grp_sz * hc_wp_grp_sz); 404 card->ext_csd.enhanced_area_size <<= 9; 405 } else { 406 /* 407 * If the enhanced area is not enabled, disable these 408 * device attributes. 409 */ 410 card->ext_csd.enhanced_area_offset = -EINVAL; 411 card->ext_csd.enhanced_area_size = -EINVAL; 412 } 413 414 /* 415 * General purpose partition feature support -- 416 * If ext_csd has the size of general purpose partitions, 417 * set size, part_cfg, partition name in mmc_part. 418 */ 419 if (ext_csd[EXT_CSD_PARTITION_SUPPORT] & 420 EXT_CSD_PART_SUPPORT_PART_EN) { 421 if (card->ext_csd.enhanced_area_en != 1) { 422 hc_erase_grp_sz = 423 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 424 hc_wp_grp_sz = 425 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 426 427 card->ext_csd.enhanced_area_en = 1; 428 } 429 430 for (idx = 0; idx < MMC_NUM_GP_PARTITION; idx++) { 431 if (!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3] && 432 !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] && 433 !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2]) 434 continue; 435 part_size = 436 (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2] 437 << 16) + 438 (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] 439 << 8) + 440 ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3]; 441 part_size *= (size_t)(hc_erase_grp_sz * 442 hc_wp_grp_sz); 443 mmc_part_add(card, part_size << 19, 444 EXT_CSD_PART_CONFIG_ACC_GP0 + idx, 445 "gp%d", idx, false, 446 MMC_BLK_DATA_AREA_GP); 447 } 448 } 449 card->ext_csd.sec_trim_mult = 450 ext_csd[EXT_CSD_SEC_TRIM_MULT]; 451 card->ext_csd.sec_erase_mult = 452 ext_csd[EXT_CSD_SEC_ERASE_MULT]; 453 card->ext_csd.sec_feature_support = 454 ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]; 455 card->ext_csd.trim_timeout = 300 * 456 ext_csd[EXT_CSD_TRIM_MULT]; 457 458 /* 459 * Note that the call to mmc_part_add above defaults to read 460 * only. If this default assumption is changed, the call must 461 * take into account the value of boot_locked below. 462 */ 463 card->ext_csd.boot_ro_lock = ext_csd[EXT_CSD_BOOT_WP]; 464 card->ext_csd.boot_ro_lockable = true; 465 466 /* Save power class values */ 467 card->ext_csd.raw_pwr_cl_52_195 = 468 ext_csd[EXT_CSD_PWR_CL_52_195]; 469 card->ext_csd.raw_pwr_cl_26_195 = 470 ext_csd[EXT_CSD_PWR_CL_26_195]; 471 card->ext_csd.raw_pwr_cl_52_360 = 472 ext_csd[EXT_CSD_PWR_CL_52_360]; 473 card->ext_csd.raw_pwr_cl_26_360 = 474 ext_csd[EXT_CSD_PWR_CL_26_360]; 475 card->ext_csd.raw_pwr_cl_200_195 = 476 ext_csd[EXT_CSD_PWR_CL_200_195]; 477 card->ext_csd.raw_pwr_cl_200_360 = 478 ext_csd[EXT_CSD_PWR_CL_200_360]; 479 card->ext_csd.raw_pwr_cl_ddr_52_195 = 480 ext_csd[EXT_CSD_PWR_CL_DDR_52_195]; 481 card->ext_csd.raw_pwr_cl_ddr_52_360 = 482 ext_csd[EXT_CSD_PWR_CL_DDR_52_360]; 483 } 484 485 if (card->ext_csd.rev >= 5) { 486 /* Adjust production date as per JEDEC JESD84-B451 */ 487 if (card->cid.year < 2010) 488 card->cid.year += 16; 489 490 /* check whether the eMMC card supports BKOPS */ 491 if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) { 492 card->ext_csd.bkops = 1; 493 card->ext_csd.bkops_en = ext_csd[EXT_CSD_BKOPS_EN]; 494 card->ext_csd.raw_bkops_status = 495 ext_csd[EXT_CSD_BKOPS_STATUS]; 496 if (!card->ext_csd.bkops_en) 497 pr_info("%s: BKOPS_EN bit is not set\n", 498 mmc_hostname(card->host)); 499 } 500 501 /* check whether the eMMC card supports HPI */ 502 if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) { 503 card->ext_csd.hpi = 1; 504 if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2) 505 card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION; 506 else 507 card->ext_csd.hpi_cmd = MMC_SEND_STATUS; 508 /* 509 * Indicate the maximum timeout to close 510 * a command interrupted by HPI 511 */ 512 card->ext_csd.out_of_int_time = 513 ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10; 514 } 515 516 card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM]; 517 card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION]; 518 519 /* 520 * RPMB regions are defined in multiples of 128K. 521 */ 522 card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT]; 523 if (ext_csd[EXT_CSD_RPMB_MULT] && mmc_host_cmd23(card->host)) { 524 mmc_part_add(card, ext_csd[EXT_CSD_RPMB_MULT] << 17, 525 EXT_CSD_PART_CONFIG_ACC_RPMB, 526 "rpmb", 0, false, 527 MMC_BLK_DATA_AREA_RPMB); 528 } 529 } 530 531 card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT]; 532 if (ext_csd[EXT_CSD_ERASED_MEM_CONT]) 533 card->erased_byte = 0xFF; 534 else 535 card->erased_byte = 0x0; 536 537 /* eMMC v4.5 or later */ 538 if (card->ext_csd.rev >= 6) { 539 card->ext_csd.feature_support |= MMC_DISCARD_FEATURE; 540 541 card->ext_csd.generic_cmd6_time = 10 * 542 ext_csd[EXT_CSD_GENERIC_CMD6_TIME]; 543 card->ext_csd.power_off_longtime = 10 * 544 ext_csd[EXT_CSD_POWER_OFF_LONG_TIME]; 545 546 card->ext_csd.cache_size = 547 ext_csd[EXT_CSD_CACHE_SIZE + 0] << 0 | 548 ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 | 549 ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 | 550 ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24; 551 552 if (ext_csd[EXT_CSD_DATA_SECTOR_SIZE] == 1) 553 card->ext_csd.data_sector_size = 4096; 554 else 555 card->ext_csd.data_sector_size = 512; 556 557 if ((ext_csd[EXT_CSD_DATA_TAG_SUPPORT] & 1) && 558 (ext_csd[EXT_CSD_TAG_UNIT_SIZE] <= 8)) { 559 card->ext_csd.data_tag_unit_size = 560 ((unsigned int) 1 << ext_csd[EXT_CSD_TAG_UNIT_SIZE]) * 561 (card->ext_csd.data_sector_size); 562 } else { 563 card->ext_csd.data_tag_unit_size = 0; 564 } 565 566 card->ext_csd.max_packed_writes = 567 ext_csd[EXT_CSD_MAX_PACKED_WRITES]; 568 card->ext_csd.max_packed_reads = 569 ext_csd[EXT_CSD_MAX_PACKED_READS]; 570 } else { 571 card->ext_csd.data_sector_size = 512; 572 } 573 574 out: 575 return err; 576 } 577 578 static inline void mmc_free_ext_csd(u8 *ext_csd) 579 { 580 kfree(ext_csd); 581 } 582 583 584 static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width) 585 { 586 u8 *bw_ext_csd; 587 int err; 588 589 if (bus_width == MMC_BUS_WIDTH_1) 590 return 0; 591 592 err = mmc_get_ext_csd(card, &bw_ext_csd); 593 594 if (err || bw_ext_csd == NULL) { 595 err = -EINVAL; 596 goto out; 597 } 598 599 /* only compare read only fields */ 600 err = !((card->ext_csd.raw_partition_support == 601 bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) && 602 (card->ext_csd.raw_erased_mem_count == 603 bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) && 604 (card->ext_csd.rev == 605 bw_ext_csd[EXT_CSD_REV]) && 606 (card->ext_csd.raw_ext_csd_structure == 607 bw_ext_csd[EXT_CSD_STRUCTURE]) && 608 (card->ext_csd.raw_card_type == 609 bw_ext_csd[EXT_CSD_CARD_TYPE]) && 610 (card->ext_csd.raw_s_a_timeout == 611 bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) && 612 (card->ext_csd.raw_hc_erase_gap_size == 613 bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) && 614 (card->ext_csd.raw_erase_timeout_mult == 615 bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) && 616 (card->ext_csd.raw_hc_erase_grp_size == 617 bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) && 618 (card->ext_csd.raw_sec_trim_mult == 619 bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) && 620 (card->ext_csd.raw_sec_erase_mult == 621 bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) && 622 (card->ext_csd.raw_sec_feature_support == 623 bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) && 624 (card->ext_csd.raw_trim_mult == 625 bw_ext_csd[EXT_CSD_TRIM_MULT]) && 626 (card->ext_csd.raw_sectors[0] == 627 bw_ext_csd[EXT_CSD_SEC_CNT + 0]) && 628 (card->ext_csd.raw_sectors[1] == 629 bw_ext_csd[EXT_CSD_SEC_CNT + 1]) && 630 (card->ext_csd.raw_sectors[2] == 631 bw_ext_csd[EXT_CSD_SEC_CNT + 2]) && 632 (card->ext_csd.raw_sectors[3] == 633 bw_ext_csd[EXT_CSD_SEC_CNT + 3]) && 634 (card->ext_csd.raw_pwr_cl_52_195 == 635 bw_ext_csd[EXT_CSD_PWR_CL_52_195]) && 636 (card->ext_csd.raw_pwr_cl_26_195 == 637 bw_ext_csd[EXT_CSD_PWR_CL_26_195]) && 638 (card->ext_csd.raw_pwr_cl_52_360 == 639 bw_ext_csd[EXT_CSD_PWR_CL_52_360]) && 640 (card->ext_csd.raw_pwr_cl_26_360 == 641 bw_ext_csd[EXT_CSD_PWR_CL_26_360]) && 642 (card->ext_csd.raw_pwr_cl_200_195 == 643 bw_ext_csd[EXT_CSD_PWR_CL_200_195]) && 644 (card->ext_csd.raw_pwr_cl_200_360 == 645 bw_ext_csd[EXT_CSD_PWR_CL_200_360]) && 646 (card->ext_csd.raw_pwr_cl_ddr_52_195 == 647 bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_195]) && 648 (card->ext_csd.raw_pwr_cl_ddr_52_360 == 649 bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_360])); 650 if (err) 651 err = -EINVAL; 652 653 out: 654 mmc_free_ext_csd(bw_ext_csd); 655 return err; 656 } 657 658 MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1], 659 card->raw_cid[2], card->raw_cid[3]); 660 MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1], 661 card->raw_csd[2], card->raw_csd[3]); 662 MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year); 663 MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9); 664 MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9); 665 MMC_DEV_ATTR(fwrev, "0x%x\n", card->cid.fwrev); 666 MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev); 667 MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid); 668 MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name); 669 MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid); 670 MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv); 671 MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial); 672 MMC_DEV_ATTR(enhanced_area_offset, "%llu\n", 673 card->ext_csd.enhanced_area_offset); 674 MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size); 675 MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult); 676 MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors); 677 678 static struct attribute *mmc_std_attrs[] = { 679 &dev_attr_cid.attr, 680 &dev_attr_csd.attr, 681 &dev_attr_date.attr, 682 &dev_attr_erase_size.attr, 683 &dev_attr_preferred_erase_size.attr, 684 &dev_attr_fwrev.attr, 685 &dev_attr_hwrev.attr, 686 &dev_attr_manfid.attr, 687 &dev_attr_name.attr, 688 &dev_attr_oemid.attr, 689 &dev_attr_prv.attr, 690 &dev_attr_serial.attr, 691 &dev_attr_enhanced_area_offset.attr, 692 &dev_attr_enhanced_area_size.attr, 693 &dev_attr_raw_rpmb_size_mult.attr, 694 &dev_attr_rel_sectors.attr, 695 NULL, 696 }; 697 698 static struct attribute_group mmc_std_attr_group = { 699 .attrs = mmc_std_attrs, 700 }; 701 702 static const struct attribute_group *mmc_attr_groups[] = { 703 &mmc_std_attr_group, 704 NULL, 705 }; 706 707 static struct device_type mmc_type = { 708 .groups = mmc_attr_groups, 709 }; 710 711 /* 712 * Select the PowerClass for the current bus width 713 * If power class is defined for 4/8 bit bus in the 714 * extended CSD register, select it by executing the 715 * mmc_switch command. 716 */ 717 static int mmc_select_powerclass(struct mmc_card *card, 718 unsigned int bus_width) 719 { 720 int err = 0; 721 unsigned int pwrclass_val = 0; 722 struct mmc_host *host; 723 724 BUG_ON(!card); 725 726 host = card->host; 727 BUG_ON(!host); 728 729 /* Power class selection is supported for versions >= 4.0 */ 730 if (card->csd.mmca_vsn < CSD_SPEC_VER_4) 731 return 0; 732 733 /* Power class values are defined only for 4/8 bit bus */ 734 if (bus_width == EXT_CSD_BUS_WIDTH_1) 735 return 0; 736 737 switch (1 << host->ios.vdd) { 738 case MMC_VDD_165_195: 739 if (host->ios.clock <= 26000000) 740 pwrclass_val = card->ext_csd.raw_pwr_cl_26_195; 741 else if (host->ios.clock <= 52000000) 742 pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ? 743 card->ext_csd.raw_pwr_cl_52_195 : 744 card->ext_csd.raw_pwr_cl_ddr_52_195; 745 else if (host->ios.clock <= 200000000) 746 pwrclass_val = card->ext_csd.raw_pwr_cl_200_195; 747 break; 748 case MMC_VDD_27_28: 749 case MMC_VDD_28_29: 750 case MMC_VDD_29_30: 751 case MMC_VDD_30_31: 752 case MMC_VDD_31_32: 753 case MMC_VDD_32_33: 754 case MMC_VDD_33_34: 755 case MMC_VDD_34_35: 756 case MMC_VDD_35_36: 757 if (host->ios.clock <= 26000000) 758 pwrclass_val = card->ext_csd.raw_pwr_cl_26_360; 759 else if (host->ios.clock <= 52000000) 760 pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ? 761 card->ext_csd.raw_pwr_cl_52_360 : 762 card->ext_csd.raw_pwr_cl_ddr_52_360; 763 else if (host->ios.clock <= 200000000) 764 pwrclass_val = card->ext_csd.raw_pwr_cl_200_360; 765 break; 766 default: 767 pr_warning("%s: Voltage range not supported " 768 "for power class.\n", mmc_hostname(host)); 769 return -EINVAL; 770 } 771 772 if (bus_width & (EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_BUS_WIDTH_8)) 773 pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_8BIT_MASK) >> 774 EXT_CSD_PWR_CL_8BIT_SHIFT; 775 else 776 pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_4BIT_MASK) >> 777 EXT_CSD_PWR_CL_4BIT_SHIFT; 778 779 /* If the power class is different from the default value */ 780 if (pwrclass_val > 0) { 781 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 782 EXT_CSD_POWER_CLASS, 783 pwrclass_val, 784 card->ext_csd.generic_cmd6_time); 785 } 786 787 return err; 788 } 789 790 /* 791 * Selects the desired buswidth and switch to the HS200 mode 792 * if bus width set without error 793 */ 794 static int mmc_select_hs200(struct mmc_card *card) 795 { 796 int idx, err = -EINVAL; 797 struct mmc_host *host; 798 static unsigned ext_csd_bits[] = { 799 EXT_CSD_BUS_WIDTH_4, 800 EXT_CSD_BUS_WIDTH_8, 801 }; 802 static unsigned bus_widths[] = { 803 MMC_BUS_WIDTH_4, 804 MMC_BUS_WIDTH_8, 805 }; 806 807 BUG_ON(!card); 808 809 host = card->host; 810 811 if (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_2V && 812 host->caps2 & MMC_CAP2_HS200_1_2V_SDR) 813 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120); 814 815 if (err && card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_8V && 816 host->caps2 & MMC_CAP2_HS200_1_8V_SDR) 817 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180); 818 819 /* If fails try again during next card power cycle */ 820 if (err) 821 goto err; 822 823 idx = (host->caps & MMC_CAP_8_BIT_DATA) ? 1 : 0; 824 825 /* 826 * Unlike SD, MMC cards dont have a configuration register to notify 827 * supported bus width. So bus test command should be run to identify 828 * the supported bus width or compare the ext csd values of current 829 * bus width and ext csd values of 1 bit mode read earlier. 830 */ 831 for (; idx >= 0; idx--) { 832 833 /* 834 * Host is capable of 8bit transfer, then switch 835 * the device to work in 8bit transfer mode. If the 836 * mmc switch command returns error then switch to 837 * 4bit transfer mode. On success set the corresponding 838 * bus width on the host. 839 */ 840 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 841 EXT_CSD_BUS_WIDTH, 842 ext_csd_bits[idx], 843 card->ext_csd.generic_cmd6_time); 844 if (err) 845 continue; 846 847 mmc_set_bus_width(card->host, bus_widths[idx]); 848 849 if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST)) 850 err = mmc_compare_ext_csds(card, bus_widths[idx]); 851 else 852 err = mmc_bus_test(card, bus_widths[idx]); 853 if (!err) 854 break; 855 } 856 857 /* switch to HS200 mode if bus width set successfully */ 858 if (!err) 859 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 860 EXT_CSD_HS_TIMING, 2, 0); 861 err: 862 return err; 863 } 864 865 /* 866 * Handle the detection and initialisation of a card. 867 * 868 * In the case of a resume, "oldcard" will contain the card 869 * we're trying to reinitialise. 870 */ 871 static int mmc_init_card(struct mmc_host *host, u32 ocr, 872 struct mmc_card *oldcard) 873 { 874 struct mmc_card *card; 875 int err, ddr = 0; 876 u32 cid[4]; 877 unsigned int max_dtr; 878 u32 rocr; 879 u8 *ext_csd = NULL; 880 881 BUG_ON(!host); 882 WARN_ON(!host->claimed); 883 884 /* Set correct bus mode for MMC before attempting init */ 885 if (!mmc_host_is_spi(host)) 886 mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN); 887 888 /* 889 * Since we're changing the OCR value, we seem to 890 * need to tell some cards to go back to the idle 891 * state. We wait 1ms to give cards time to 892 * respond. 893 * mmc_go_idle is needed for eMMC that are asleep 894 */ 895 mmc_go_idle(host); 896 897 /* The extra bit indicates that we support high capacity */ 898 err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr); 899 if (err) 900 goto err; 901 902 /* 903 * For SPI, enable CRC as appropriate. 904 */ 905 if (mmc_host_is_spi(host)) { 906 err = mmc_spi_set_crc(host, use_spi_crc); 907 if (err) 908 goto err; 909 } 910 911 /* 912 * Fetch CID from card. 913 */ 914 if (mmc_host_is_spi(host)) 915 err = mmc_send_cid(host, cid); 916 else 917 err = mmc_all_send_cid(host, cid); 918 if (err) 919 goto err; 920 921 if (oldcard) { 922 if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) { 923 err = -ENOENT; 924 goto err; 925 } 926 927 card = oldcard; 928 } else { 929 /* 930 * Allocate card structure. 931 */ 932 card = mmc_alloc_card(host, &mmc_type); 933 if (IS_ERR(card)) { 934 err = PTR_ERR(card); 935 goto err; 936 } 937 938 card->ocr = ocr; 939 card->type = MMC_TYPE_MMC; 940 card->rca = 1; 941 memcpy(card->raw_cid, cid, sizeof(card->raw_cid)); 942 } 943 944 /* 945 * For native busses: set card RCA and quit open drain mode. 946 */ 947 if (!mmc_host_is_spi(host)) { 948 err = mmc_set_relative_addr(card); 949 if (err) 950 goto free_card; 951 952 mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL); 953 } 954 955 if (!oldcard) { 956 /* 957 * Fetch CSD from card. 958 */ 959 err = mmc_send_csd(card, card->raw_csd); 960 if (err) 961 goto free_card; 962 963 err = mmc_decode_csd(card); 964 if (err) 965 goto free_card; 966 err = mmc_decode_cid(card); 967 if (err) 968 goto free_card; 969 } 970 971 /* 972 * Select card, as all following commands rely on that. 973 */ 974 if (!mmc_host_is_spi(host)) { 975 err = mmc_select_card(card); 976 if (err) 977 goto free_card; 978 } 979 980 if (!oldcard) { 981 /* 982 * Fetch and process extended CSD. 983 */ 984 985 err = mmc_get_ext_csd(card, &ext_csd); 986 if (err) 987 goto free_card; 988 err = mmc_read_ext_csd(card, ext_csd); 989 if (err) 990 goto free_card; 991 992 /* If doing byte addressing, check if required to do sector 993 * addressing. Handle the case of <2GB cards needing sector 994 * addressing. See section 8.1 JEDEC Standard JED84-A441; 995 * ocr register has bit 30 set for sector addressing. 996 */ 997 if (!(mmc_card_blockaddr(card)) && (rocr & (1<<30))) 998 mmc_card_set_blockaddr(card); 999 1000 /* Erase size depends on CSD and Extended CSD */ 1001 mmc_set_erase_size(card); 1002 } 1003 1004 /* 1005 * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF 1006 * bit. This bit will be lost every time after a reset or power off. 1007 */ 1008 if (card->ext_csd.enhanced_area_en || 1009 (card->ext_csd.rev >= 3 && (host->caps2 & MMC_CAP2_HC_ERASE_SZ))) { 1010 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1011 EXT_CSD_ERASE_GROUP_DEF, 1, 1012 card->ext_csd.generic_cmd6_time); 1013 1014 if (err && err != -EBADMSG) 1015 goto free_card; 1016 1017 if (err) { 1018 err = 0; 1019 /* 1020 * Just disable enhanced area off & sz 1021 * will try to enable ERASE_GROUP_DEF 1022 * during next time reinit 1023 */ 1024 card->ext_csd.enhanced_area_offset = -EINVAL; 1025 card->ext_csd.enhanced_area_size = -EINVAL; 1026 } else { 1027 card->ext_csd.erase_group_def = 1; 1028 /* 1029 * enable ERASE_GRP_DEF successfully. 1030 * This will affect the erase size, so 1031 * here need to reset erase size 1032 */ 1033 mmc_set_erase_size(card); 1034 } 1035 } 1036 1037 /* 1038 * Ensure eMMC user default partition is enabled 1039 */ 1040 if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) { 1041 card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; 1042 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG, 1043 card->ext_csd.part_config, 1044 card->ext_csd.part_time); 1045 if (err && err != -EBADMSG) 1046 goto free_card; 1047 } 1048 1049 /* 1050 * Enable power_off_notification byte in the ext_csd register 1051 */ 1052 if (card->ext_csd.rev >= 6) { 1053 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1054 EXT_CSD_POWER_OFF_NOTIFICATION, 1055 EXT_CSD_POWER_ON, 1056 card->ext_csd.generic_cmd6_time); 1057 if (err && err != -EBADMSG) 1058 goto free_card; 1059 1060 /* 1061 * The err can be -EBADMSG or 0, 1062 * so check for success and update the flag 1063 */ 1064 if (!err) 1065 card->ext_csd.power_off_notification = EXT_CSD_POWER_ON; 1066 } 1067 1068 /* 1069 * Activate high speed (if supported) 1070 */ 1071 if (card->ext_csd.hs_max_dtr != 0) { 1072 err = 0; 1073 if (card->ext_csd.hs_max_dtr > 52000000 && 1074 host->caps2 & MMC_CAP2_HS200) 1075 err = mmc_select_hs200(card); 1076 else if (host->caps & MMC_CAP_MMC_HIGHSPEED) 1077 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1078 EXT_CSD_HS_TIMING, 1, 1079 card->ext_csd.generic_cmd6_time); 1080 1081 if (err && err != -EBADMSG) 1082 goto free_card; 1083 1084 if (err) { 1085 pr_warning("%s: switch to highspeed failed\n", 1086 mmc_hostname(card->host)); 1087 err = 0; 1088 } else { 1089 if (card->ext_csd.hs_max_dtr > 52000000 && 1090 host->caps2 & MMC_CAP2_HS200) { 1091 mmc_card_set_hs200(card); 1092 mmc_set_timing(card->host, 1093 MMC_TIMING_MMC_HS200); 1094 } else { 1095 mmc_card_set_highspeed(card); 1096 mmc_set_timing(card->host, MMC_TIMING_MMC_HS); 1097 } 1098 } 1099 } 1100 1101 /* 1102 * Compute bus speed. 1103 */ 1104 max_dtr = (unsigned int)-1; 1105 1106 if (mmc_card_highspeed(card) || mmc_card_hs200(card)) { 1107 if (max_dtr > card->ext_csd.hs_max_dtr) 1108 max_dtr = card->ext_csd.hs_max_dtr; 1109 if (mmc_card_highspeed(card) && (max_dtr > 52000000)) 1110 max_dtr = 52000000; 1111 } else if (max_dtr > card->csd.max_dtr) { 1112 max_dtr = card->csd.max_dtr; 1113 } 1114 1115 mmc_set_clock(host, max_dtr); 1116 1117 /* 1118 * Indicate DDR mode (if supported). 1119 */ 1120 if (mmc_card_highspeed(card)) { 1121 if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) 1122 && ((host->caps & (MMC_CAP_1_8V_DDR | 1123 MMC_CAP_UHS_DDR50)) 1124 == (MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50))) 1125 ddr = MMC_1_8V_DDR_MODE; 1126 else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) 1127 && ((host->caps & (MMC_CAP_1_2V_DDR | 1128 MMC_CAP_UHS_DDR50)) 1129 == (MMC_CAP_1_2V_DDR | MMC_CAP_UHS_DDR50))) 1130 ddr = MMC_1_2V_DDR_MODE; 1131 } 1132 1133 /* 1134 * Indicate HS200 SDR mode (if supported). 1135 */ 1136 if (mmc_card_hs200(card)) { 1137 u32 ext_csd_bits; 1138 u32 bus_width = card->host->ios.bus_width; 1139 1140 /* 1141 * For devices supporting HS200 mode, the bus width has 1142 * to be set before executing the tuning function. If 1143 * set before tuning, then device will respond with CRC 1144 * errors for responses on CMD line. So for HS200 the 1145 * sequence will be 1146 * 1. set bus width 4bit / 8 bit (1 bit not supported) 1147 * 2. switch to HS200 mode 1148 * 3. set the clock to > 52Mhz <=200MHz and 1149 * 4. execute tuning for HS200 1150 */ 1151 if ((host->caps2 & MMC_CAP2_HS200) && 1152 card->host->ops->execute_tuning) { 1153 mmc_host_clk_hold(card->host); 1154 err = card->host->ops->execute_tuning(card->host, 1155 MMC_SEND_TUNING_BLOCK_HS200); 1156 mmc_host_clk_release(card->host); 1157 } 1158 if (err) { 1159 pr_warning("%s: tuning execution failed\n", 1160 mmc_hostname(card->host)); 1161 goto err; 1162 } 1163 1164 ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ? 1165 EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4; 1166 err = mmc_select_powerclass(card, ext_csd_bits); 1167 if (err) 1168 pr_warning("%s: power class selection to bus width %d" 1169 " failed\n", mmc_hostname(card->host), 1170 1 << bus_width); 1171 } 1172 1173 /* 1174 * Activate wide bus and DDR (if supported). 1175 */ 1176 if (!mmc_card_hs200(card) && 1177 (card->csd.mmca_vsn >= CSD_SPEC_VER_4) && 1178 (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) { 1179 static unsigned ext_csd_bits[][2] = { 1180 { EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 }, 1181 { EXT_CSD_BUS_WIDTH_4, EXT_CSD_DDR_BUS_WIDTH_4 }, 1182 { EXT_CSD_BUS_WIDTH_1, EXT_CSD_BUS_WIDTH_1 }, 1183 }; 1184 static unsigned bus_widths[] = { 1185 MMC_BUS_WIDTH_8, 1186 MMC_BUS_WIDTH_4, 1187 MMC_BUS_WIDTH_1 1188 }; 1189 unsigned idx, bus_width = 0; 1190 1191 if (host->caps & MMC_CAP_8_BIT_DATA) 1192 idx = 0; 1193 else 1194 idx = 1; 1195 for (; idx < ARRAY_SIZE(bus_widths); idx++) { 1196 bus_width = bus_widths[idx]; 1197 if (bus_width == MMC_BUS_WIDTH_1) 1198 ddr = 0; /* no DDR for 1-bit width */ 1199 err = mmc_select_powerclass(card, ext_csd_bits[idx][0]); 1200 if (err) 1201 pr_warning("%s: power class selection to " 1202 "bus width %d failed\n", 1203 mmc_hostname(card->host), 1204 1 << bus_width); 1205 1206 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1207 EXT_CSD_BUS_WIDTH, 1208 ext_csd_bits[idx][0], 1209 card->ext_csd.generic_cmd6_time); 1210 if (!err) { 1211 mmc_set_bus_width(card->host, bus_width); 1212 1213 /* 1214 * If controller can't handle bus width test, 1215 * compare ext_csd previously read in 1 bit mode 1216 * against ext_csd at new bus width 1217 */ 1218 if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST)) 1219 err = mmc_compare_ext_csds(card, 1220 bus_width); 1221 else 1222 err = mmc_bus_test(card, bus_width); 1223 if (!err) 1224 break; 1225 } 1226 } 1227 1228 if (!err && ddr) { 1229 err = mmc_select_powerclass(card, ext_csd_bits[idx][1]); 1230 if (err) 1231 pr_warning("%s: power class selection to " 1232 "bus width %d ddr %d failed\n", 1233 mmc_hostname(card->host), 1234 1 << bus_width, ddr); 1235 1236 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1237 EXT_CSD_BUS_WIDTH, 1238 ext_csd_bits[idx][1], 1239 card->ext_csd.generic_cmd6_time); 1240 } 1241 if (err) { 1242 pr_warning("%s: switch to bus width %d ddr %d " 1243 "failed\n", mmc_hostname(card->host), 1244 1 << bus_width, ddr); 1245 goto free_card; 1246 } else if (ddr) { 1247 /* 1248 * eMMC cards can support 3.3V to 1.2V i/o (vccq) 1249 * signaling. 1250 * 1251 * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq. 1252 * 1253 * 1.8V vccq at 3.3V core voltage (vcc) is not required 1254 * in the JEDEC spec for DDR. 1255 * 1256 * Do not force change in vccq since we are obviously 1257 * working and no change to vccq is needed. 1258 * 1259 * WARNING: eMMC rules are NOT the same as SD DDR 1260 */ 1261 if (ddr == MMC_1_2V_DDR_MODE) { 1262 err = __mmc_set_signal_voltage(host, 1263 MMC_SIGNAL_VOLTAGE_120); 1264 if (err) 1265 goto err; 1266 } 1267 mmc_card_set_ddr_mode(card); 1268 mmc_set_timing(card->host, MMC_TIMING_UHS_DDR50); 1269 mmc_set_bus_width(card->host, bus_width); 1270 } 1271 } 1272 1273 /* 1274 * Enable HPI feature (if supported) 1275 */ 1276 if (card->ext_csd.hpi) { 1277 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1278 EXT_CSD_HPI_MGMT, 1, 1279 card->ext_csd.generic_cmd6_time); 1280 if (err && err != -EBADMSG) 1281 goto free_card; 1282 if (err) { 1283 pr_warning("%s: Enabling HPI failed\n", 1284 mmc_hostname(card->host)); 1285 err = 0; 1286 } else 1287 card->ext_csd.hpi_en = 1; 1288 } 1289 1290 /* 1291 * If cache size is higher than 0, this indicates 1292 * the existence of cache and it can be turned on. 1293 */ 1294 if ((host->caps2 & MMC_CAP2_CACHE_CTRL) && 1295 card->ext_csd.cache_size > 0) { 1296 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1297 EXT_CSD_CACHE_CTRL, 1, 1298 card->ext_csd.generic_cmd6_time); 1299 if (err && err != -EBADMSG) 1300 goto free_card; 1301 1302 /* 1303 * Only if no error, cache is turned on successfully. 1304 */ 1305 if (err) { 1306 pr_warning("%s: Cache is supported, " 1307 "but failed to turn on (%d)\n", 1308 mmc_hostname(card->host), err); 1309 card->ext_csd.cache_ctrl = 0; 1310 err = 0; 1311 } else { 1312 card->ext_csd.cache_ctrl = 1; 1313 } 1314 } 1315 1316 /* 1317 * The mandatory minimum values are defined for packed command. 1318 * read: 5, write: 3 1319 */ 1320 if (card->ext_csd.max_packed_writes >= 3 && 1321 card->ext_csd.max_packed_reads >= 5 && 1322 host->caps2 & MMC_CAP2_PACKED_CMD) { 1323 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1324 EXT_CSD_EXP_EVENTS_CTRL, 1325 EXT_CSD_PACKED_EVENT_EN, 1326 card->ext_csd.generic_cmd6_time); 1327 if (err && err != -EBADMSG) 1328 goto free_card; 1329 if (err) { 1330 pr_warn("%s: Enabling packed event failed\n", 1331 mmc_hostname(card->host)); 1332 card->ext_csd.packed_event_en = 0; 1333 err = 0; 1334 } else { 1335 card->ext_csd.packed_event_en = 1; 1336 } 1337 } 1338 1339 if (!oldcard) 1340 host->card = card; 1341 1342 mmc_free_ext_csd(ext_csd); 1343 return 0; 1344 1345 free_card: 1346 if (!oldcard) 1347 mmc_remove_card(card); 1348 err: 1349 mmc_free_ext_csd(ext_csd); 1350 1351 return err; 1352 } 1353 1354 static int mmc_can_sleep(struct mmc_card *card) 1355 { 1356 return (card && card->ext_csd.rev >= 3); 1357 } 1358 1359 static int mmc_sleep(struct mmc_host *host) 1360 { 1361 struct mmc_command cmd = {0}; 1362 struct mmc_card *card = host->card; 1363 int err; 1364 1365 if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD) 1366 return 0; 1367 1368 err = mmc_deselect_cards(host); 1369 if (err) 1370 return err; 1371 1372 cmd.opcode = MMC_SLEEP_AWAKE; 1373 cmd.arg = card->rca << 16; 1374 cmd.arg |= 1 << 15; 1375 1376 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; 1377 err = mmc_wait_for_cmd(host, &cmd, 0); 1378 if (err) 1379 return err; 1380 1381 /* 1382 * If the host does not wait while the card signals busy, then we will 1383 * will have to wait the sleep/awake timeout. Note, we cannot use the 1384 * SEND_STATUS command to poll the status because that command (and most 1385 * others) is invalid while the card sleeps. 1386 */ 1387 if (!(host->caps & MMC_CAP_WAIT_WHILE_BUSY)) 1388 mmc_delay(DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000)); 1389 1390 return err; 1391 } 1392 1393 static int mmc_can_poweroff_notify(const struct mmc_card *card) 1394 { 1395 return card && 1396 mmc_card_mmc(card) && 1397 (card->ext_csd.power_off_notification == EXT_CSD_POWER_ON); 1398 } 1399 1400 static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type) 1401 { 1402 unsigned int timeout = card->ext_csd.generic_cmd6_time; 1403 int err; 1404 1405 /* Use EXT_CSD_POWER_OFF_SHORT as default notification type. */ 1406 if (notify_type == EXT_CSD_POWER_OFF_LONG) 1407 timeout = card->ext_csd.power_off_longtime; 1408 1409 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1410 EXT_CSD_POWER_OFF_NOTIFICATION, 1411 notify_type, timeout, true, false); 1412 if (err) 1413 pr_err("%s: Power Off Notification timed out, %u\n", 1414 mmc_hostname(card->host), timeout); 1415 1416 /* Disable the power off notification after the switch operation. */ 1417 card->ext_csd.power_off_notification = EXT_CSD_NO_POWER_NOTIFICATION; 1418 1419 return err; 1420 } 1421 1422 /* 1423 * Host is being removed. Free up the current card. 1424 */ 1425 static void mmc_remove(struct mmc_host *host) 1426 { 1427 BUG_ON(!host); 1428 BUG_ON(!host->card); 1429 1430 mmc_remove_card(host->card); 1431 host->card = NULL; 1432 } 1433 1434 /* 1435 * Card detection - card is alive. 1436 */ 1437 static int mmc_alive(struct mmc_host *host) 1438 { 1439 return mmc_send_status(host->card, NULL); 1440 } 1441 1442 /* 1443 * Card detection callback from host. 1444 */ 1445 static void mmc_detect(struct mmc_host *host) 1446 { 1447 int err; 1448 1449 BUG_ON(!host); 1450 BUG_ON(!host->card); 1451 1452 mmc_get_card(host->card); 1453 1454 /* 1455 * Just check if our card has been removed. 1456 */ 1457 err = _mmc_detect_card_removed(host); 1458 1459 mmc_put_card(host->card); 1460 1461 if (err) { 1462 mmc_remove(host); 1463 1464 mmc_claim_host(host); 1465 mmc_detach_bus(host); 1466 mmc_power_off(host); 1467 mmc_release_host(host); 1468 } 1469 } 1470 1471 static int _mmc_suspend(struct mmc_host *host, bool is_suspend) 1472 { 1473 int err = 0; 1474 unsigned int notify_type = is_suspend ? EXT_CSD_POWER_OFF_SHORT : 1475 EXT_CSD_POWER_OFF_LONG; 1476 1477 BUG_ON(!host); 1478 BUG_ON(!host->card); 1479 1480 mmc_claim_host(host); 1481 1482 if (mmc_card_suspended(host->card)) 1483 goto out; 1484 1485 if (mmc_card_doing_bkops(host->card)) { 1486 err = mmc_stop_bkops(host->card); 1487 if (err) 1488 goto out; 1489 } 1490 1491 err = mmc_cache_ctrl(host, 0); 1492 if (err) 1493 goto out; 1494 1495 if (mmc_can_poweroff_notify(host->card) && 1496 ((host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) || !is_suspend)) 1497 err = mmc_poweroff_notify(host->card, notify_type); 1498 else if (mmc_can_sleep(host->card)) 1499 err = mmc_sleep(host); 1500 else if (!mmc_host_is_spi(host)) 1501 err = mmc_deselect_cards(host); 1502 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200); 1503 1504 if (!err) { 1505 mmc_power_off(host); 1506 mmc_card_set_suspended(host->card); 1507 } 1508 out: 1509 mmc_release_host(host); 1510 return err; 1511 } 1512 1513 /* 1514 * Suspend callback 1515 */ 1516 static int mmc_suspend(struct mmc_host *host) 1517 { 1518 int err; 1519 1520 err = _mmc_suspend(host, true); 1521 if (!err) { 1522 pm_runtime_disable(&host->card->dev); 1523 pm_runtime_set_suspended(&host->card->dev); 1524 } 1525 1526 return err; 1527 } 1528 1529 /* 1530 * This function tries to determine if the same card is still present 1531 * and, if so, restore all state to it. 1532 */ 1533 static int _mmc_resume(struct mmc_host *host) 1534 { 1535 int err = 0; 1536 1537 BUG_ON(!host); 1538 BUG_ON(!host->card); 1539 1540 mmc_claim_host(host); 1541 1542 if (!mmc_card_suspended(host->card)) 1543 goto out; 1544 1545 mmc_power_up(host, host->card->ocr); 1546 err = mmc_init_card(host, host->card->ocr, host->card); 1547 mmc_card_clr_suspended(host->card); 1548 1549 out: 1550 mmc_release_host(host); 1551 return err; 1552 } 1553 1554 /* 1555 * Shutdown callback 1556 */ 1557 static int mmc_shutdown(struct mmc_host *host) 1558 { 1559 int err = 0; 1560 1561 /* 1562 * In a specific case for poweroff notify, we need to resume the card 1563 * before we can shutdown it properly. 1564 */ 1565 if (mmc_can_poweroff_notify(host->card) && 1566 !(host->caps2 & MMC_CAP2_FULL_PWR_CYCLE)) 1567 err = _mmc_resume(host); 1568 1569 if (!err) 1570 err = _mmc_suspend(host, false); 1571 1572 return err; 1573 } 1574 1575 /* 1576 * Callback for resume. 1577 */ 1578 static int mmc_resume(struct mmc_host *host) 1579 { 1580 int err = 0; 1581 1582 if (!(host->caps & MMC_CAP_RUNTIME_RESUME)) { 1583 err = _mmc_resume(host); 1584 pm_runtime_set_active(&host->card->dev); 1585 pm_runtime_mark_last_busy(&host->card->dev); 1586 } 1587 pm_runtime_enable(&host->card->dev); 1588 1589 return err; 1590 } 1591 1592 /* 1593 * Callback for runtime_suspend. 1594 */ 1595 static int mmc_runtime_suspend(struct mmc_host *host) 1596 { 1597 int err; 1598 1599 if (!(host->caps & MMC_CAP_AGGRESSIVE_PM)) 1600 return 0; 1601 1602 err = _mmc_suspend(host, true); 1603 if (err) 1604 pr_err("%s: error %d doing aggessive suspend\n", 1605 mmc_hostname(host), err); 1606 1607 return err; 1608 } 1609 1610 /* 1611 * Callback for runtime_resume. 1612 */ 1613 static int mmc_runtime_resume(struct mmc_host *host) 1614 { 1615 int err; 1616 1617 if (!(host->caps & (MMC_CAP_AGGRESSIVE_PM | MMC_CAP_RUNTIME_RESUME))) 1618 return 0; 1619 1620 err = _mmc_resume(host); 1621 if (err) 1622 pr_err("%s: error %d doing aggessive resume\n", 1623 mmc_hostname(host), err); 1624 1625 return 0; 1626 } 1627 1628 static int mmc_power_restore(struct mmc_host *host) 1629 { 1630 int ret; 1631 1632 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200); 1633 mmc_claim_host(host); 1634 ret = mmc_init_card(host, host->card->ocr, host->card); 1635 mmc_release_host(host); 1636 1637 return ret; 1638 } 1639 1640 static const struct mmc_bus_ops mmc_ops = { 1641 .remove = mmc_remove, 1642 .detect = mmc_detect, 1643 .suspend = NULL, 1644 .resume = NULL, 1645 .power_restore = mmc_power_restore, 1646 .alive = mmc_alive, 1647 .shutdown = mmc_shutdown, 1648 }; 1649 1650 static const struct mmc_bus_ops mmc_ops_unsafe = { 1651 .remove = mmc_remove, 1652 .detect = mmc_detect, 1653 .suspend = mmc_suspend, 1654 .resume = mmc_resume, 1655 .runtime_suspend = mmc_runtime_suspend, 1656 .runtime_resume = mmc_runtime_resume, 1657 .power_restore = mmc_power_restore, 1658 .alive = mmc_alive, 1659 .shutdown = mmc_shutdown, 1660 }; 1661 1662 static void mmc_attach_bus_ops(struct mmc_host *host) 1663 { 1664 const struct mmc_bus_ops *bus_ops; 1665 1666 if (!mmc_card_is_removable(host)) 1667 bus_ops = &mmc_ops_unsafe; 1668 else 1669 bus_ops = &mmc_ops; 1670 mmc_attach_bus(host, bus_ops); 1671 } 1672 1673 /* 1674 * Starting point for MMC card init. 1675 */ 1676 int mmc_attach_mmc(struct mmc_host *host) 1677 { 1678 int err; 1679 u32 ocr, rocr; 1680 1681 BUG_ON(!host); 1682 WARN_ON(!host->claimed); 1683 1684 /* Set correct bus mode for MMC before attempting attach */ 1685 if (!mmc_host_is_spi(host)) 1686 mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN); 1687 1688 err = mmc_send_op_cond(host, 0, &ocr); 1689 if (err) 1690 return err; 1691 1692 mmc_attach_bus_ops(host); 1693 if (host->ocr_avail_mmc) 1694 host->ocr_avail = host->ocr_avail_mmc; 1695 1696 /* 1697 * We need to get OCR a different way for SPI. 1698 */ 1699 if (mmc_host_is_spi(host)) { 1700 err = mmc_spi_read_ocr(host, 1, &ocr); 1701 if (err) 1702 goto err; 1703 } 1704 1705 rocr = mmc_select_voltage(host, ocr); 1706 1707 /* 1708 * Can we support the voltage of the card? 1709 */ 1710 if (!rocr) { 1711 err = -EINVAL; 1712 goto err; 1713 } 1714 1715 /* 1716 * Detect and init the card. 1717 */ 1718 err = mmc_init_card(host, rocr, NULL); 1719 if (err) 1720 goto err; 1721 1722 mmc_release_host(host); 1723 err = mmc_add_card(host->card); 1724 mmc_claim_host(host); 1725 if (err) 1726 goto remove_card; 1727 1728 return 0; 1729 1730 remove_card: 1731 mmc_release_host(host); 1732 mmc_remove_card(host->card); 1733 mmc_claim_host(host); 1734 host->card = NULL; 1735 err: 1736 mmc_detach_bus(host); 1737 1738 pr_err("%s: error %d whilst initialising MMC card\n", 1739 mmc_hostname(host), err); 1740 1741 return err; 1742 } 1743