1 /* 2 * linux/drivers/mmc/core/mmc.c 3 * 4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved. 5 * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved. 6 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/err.h> 14 #include <linux/slab.h> 15 #include <linux/stat.h> 16 17 #include <linux/mmc/host.h> 18 #include <linux/mmc/card.h> 19 #include <linux/mmc/mmc.h> 20 21 #include "core.h" 22 #include "bus.h" 23 #include "mmc_ops.h" 24 #include "sd_ops.h" 25 26 static const unsigned int tran_exp[] = { 27 10000, 100000, 1000000, 10000000, 28 0, 0, 0, 0 29 }; 30 31 static const unsigned char tran_mant[] = { 32 0, 10, 12, 13, 15, 20, 25, 30, 33 35, 40, 45, 50, 55, 60, 70, 80, 34 }; 35 36 static const unsigned int tacc_exp[] = { 37 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 38 }; 39 40 static const unsigned int tacc_mant[] = { 41 0, 10, 12, 13, 15, 20, 25, 30, 42 35, 40, 45, 50, 55, 60, 70, 80, 43 }; 44 45 #define UNSTUFF_BITS(resp,start,size) \ 46 ({ \ 47 const int __size = size; \ 48 const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \ 49 const int __off = 3 - ((start) / 32); \ 50 const int __shft = (start) & 31; \ 51 u32 __res; \ 52 \ 53 __res = resp[__off] >> __shft; \ 54 if (__size + __shft > 32) \ 55 __res |= resp[__off-1] << ((32 - __shft) % 32); \ 56 __res & __mask; \ 57 }) 58 59 /* 60 * Given the decoded CSD structure, decode the raw CID to our CID structure. 61 */ 62 static int mmc_decode_cid(struct mmc_card *card) 63 { 64 u32 *resp = card->raw_cid; 65 66 /* 67 * The selection of the format here is based upon published 68 * specs from sandisk and from what people have reported. 69 */ 70 switch (card->csd.mmca_vsn) { 71 case 0: /* MMC v1.0 - v1.2 */ 72 case 1: /* MMC v1.4 */ 73 card->cid.manfid = UNSTUFF_BITS(resp, 104, 24); 74 card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8); 75 card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8); 76 card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8); 77 card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8); 78 card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8); 79 card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8); 80 card->cid.prod_name[6] = UNSTUFF_BITS(resp, 48, 8); 81 card->cid.hwrev = UNSTUFF_BITS(resp, 44, 4); 82 card->cid.fwrev = UNSTUFF_BITS(resp, 40, 4); 83 card->cid.serial = UNSTUFF_BITS(resp, 16, 24); 84 card->cid.month = UNSTUFF_BITS(resp, 12, 4); 85 card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997; 86 break; 87 88 case 2: /* MMC v2.0 - v2.2 */ 89 case 3: /* MMC v3.1 - v3.3 */ 90 case 4: /* MMC v4 */ 91 card->cid.manfid = UNSTUFF_BITS(resp, 120, 8); 92 card->cid.oemid = UNSTUFF_BITS(resp, 104, 16); 93 card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8); 94 card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8); 95 card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8); 96 card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8); 97 card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8); 98 card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8); 99 card->cid.serial = UNSTUFF_BITS(resp, 16, 32); 100 card->cid.month = UNSTUFF_BITS(resp, 12, 4); 101 card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997; 102 break; 103 104 default: 105 pr_err("%s: card has unknown MMCA version %d\n", 106 mmc_hostname(card->host), card->csd.mmca_vsn); 107 return -EINVAL; 108 } 109 110 return 0; 111 } 112 113 static void mmc_set_erase_size(struct mmc_card *card) 114 { 115 if (card->ext_csd.erase_group_def & 1) 116 card->erase_size = card->ext_csd.hc_erase_size; 117 else 118 card->erase_size = card->csd.erase_size; 119 120 mmc_init_erase(card); 121 } 122 123 /* 124 * Given a 128-bit response, decode to our card CSD structure. 125 */ 126 static int mmc_decode_csd(struct mmc_card *card) 127 { 128 struct mmc_csd *csd = &card->csd; 129 unsigned int e, m, a, b; 130 u32 *resp = card->raw_csd; 131 132 /* 133 * We only understand CSD structure v1.1 and v1.2. 134 * v1.2 has extra information in bits 15, 11 and 10. 135 * We also support eMMC v4.4 & v4.41. 136 */ 137 csd->structure = UNSTUFF_BITS(resp, 126, 2); 138 if (csd->structure == 0) { 139 pr_err("%s: unrecognised CSD structure version %d\n", 140 mmc_hostname(card->host), csd->structure); 141 return -EINVAL; 142 } 143 144 csd->mmca_vsn = UNSTUFF_BITS(resp, 122, 4); 145 m = UNSTUFF_BITS(resp, 115, 4); 146 e = UNSTUFF_BITS(resp, 112, 3); 147 csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10; 148 csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100; 149 150 m = UNSTUFF_BITS(resp, 99, 4); 151 e = UNSTUFF_BITS(resp, 96, 3); 152 csd->max_dtr = tran_exp[e] * tran_mant[m]; 153 csd->cmdclass = UNSTUFF_BITS(resp, 84, 12); 154 155 e = UNSTUFF_BITS(resp, 47, 3); 156 m = UNSTUFF_BITS(resp, 62, 12); 157 csd->capacity = (1 + m) << (e + 2); 158 159 csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4); 160 csd->read_partial = UNSTUFF_BITS(resp, 79, 1); 161 csd->write_misalign = UNSTUFF_BITS(resp, 78, 1); 162 csd->read_misalign = UNSTUFF_BITS(resp, 77, 1); 163 csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3); 164 csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4); 165 csd->write_partial = UNSTUFF_BITS(resp, 21, 1); 166 167 if (csd->write_blkbits >= 9) { 168 a = UNSTUFF_BITS(resp, 42, 5); 169 b = UNSTUFF_BITS(resp, 37, 5); 170 csd->erase_size = (a + 1) * (b + 1); 171 csd->erase_size <<= csd->write_blkbits - 9; 172 } 173 174 return 0; 175 } 176 177 /* 178 * Read extended CSD. 179 */ 180 static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd) 181 { 182 int err; 183 u8 *ext_csd; 184 185 BUG_ON(!card); 186 BUG_ON(!new_ext_csd); 187 188 *new_ext_csd = NULL; 189 190 if (card->csd.mmca_vsn < CSD_SPEC_VER_4) 191 return 0; 192 193 /* 194 * As the ext_csd is so large and mostly unused, we don't store the 195 * raw block in mmc_card. 196 */ 197 ext_csd = kmalloc(512, GFP_KERNEL); 198 if (!ext_csd) { 199 pr_err("%s: could not allocate a buffer to " 200 "receive the ext_csd.\n", mmc_hostname(card->host)); 201 return -ENOMEM; 202 } 203 204 err = mmc_send_ext_csd(card, ext_csd); 205 if (err) { 206 kfree(ext_csd); 207 *new_ext_csd = NULL; 208 209 /* If the host or the card can't do the switch, 210 * fail more gracefully. */ 211 if ((err != -EINVAL) 212 && (err != -ENOSYS) 213 && (err != -EFAULT)) 214 return err; 215 216 /* 217 * High capacity cards should have this "magic" size 218 * stored in their CSD. 219 */ 220 if (card->csd.capacity == (4096 * 512)) { 221 pr_err("%s: unable to read EXT_CSD " 222 "on a possible high capacity card. " 223 "Card will be ignored.\n", 224 mmc_hostname(card->host)); 225 } else { 226 pr_warning("%s: unable to read " 227 "EXT_CSD, performance might " 228 "suffer.\n", 229 mmc_hostname(card->host)); 230 err = 0; 231 } 232 } else 233 *new_ext_csd = ext_csd; 234 235 return err; 236 } 237 238 static void mmc_select_card_type(struct mmc_card *card) 239 { 240 struct mmc_host *host = card->host; 241 u8 card_type = card->ext_csd.raw_card_type & EXT_CSD_CARD_TYPE_MASK; 242 u32 caps = host->caps, caps2 = host->caps2; 243 unsigned int hs_max_dtr = 0; 244 245 if (card_type & EXT_CSD_CARD_TYPE_26) 246 hs_max_dtr = MMC_HIGH_26_MAX_DTR; 247 248 if (caps & MMC_CAP_MMC_HIGHSPEED && 249 card_type & EXT_CSD_CARD_TYPE_52) 250 hs_max_dtr = MMC_HIGH_52_MAX_DTR; 251 252 if ((caps & MMC_CAP_1_8V_DDR && 253 card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) || 254 (caps & MMC_CAP_1_2V_DDR && 255 card_type & EXT_CSD_CARD_TYPE_DDR_1_2V)) 256 hs_max_dtr = MMC_HIGH_DDR_MAX_DTR; 257 258 if ((caps2 & MMC_CAP2_HS200_1_8V_SDR && 259 card_type & EXT_CSD_CARD_TYPE_SDR_1_8V) || 260 (caps2 & MMC_CAP2_HS200_1_2V_SDR && 261 card_type & EXT_CSD_CARD_TYPE_SDR_1_2V)) 262 hs_max_dtr = MMC_HS200_MAX_DTR; 263 264 card->ext_csd.hs_max_dtr = hs_max_dtr; 265 card->ext_csd.card_type = card_type; 266 } 267 268 /* 269 * Decode extended CSD. 270 */ 271 static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) 272 { 273 int err = 0, idx; 274 unsigned int part_size; 275 u8 hc_erase_grp_sz = 0, hc_wp_grp_sz = 0; 276 277 BUG_ON(!card); 278 279 if (!ext_csd) 280 return 0; 281 282 /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */ 283 card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE]; 284 if (card->csd.structure == 3) { 285 if (card->ext_csd.raw_ext_csd_structure > 2) { 286 pr_err("%s: unrecognised EXT_CSD structure " 287 "version %d\n", mmc_hostname(card->host), 288 card->ext_csd.raw_ext_csd_structure); 289 err = -EINVAL; 290 goto out; 291 } 292 } 293 294 card->ext_csd.rev = ext_csd[EXT_CSD_REV]; 295 if (card->ext_csd.rev > 6) { 296 pr_err("%s: unrecognised EXT_CSD revision %d\n", 297 mmc_hostname(card->host), card->ext_csd.rev); 298 err = -EINVAL; 299 goto out; 300 } 301 302 card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0]; 303 card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1]; 304 card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2]; 305 card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3]; 306 if (card->ext_csd.rev >= 2) { 307 card->ext_csd.sectors = 308 ext_csd[EXT_CSD_SEC_CNT + 0] << 0 | 309 ext_csd[EXT_CSD_SEC_CNT + 1] << 8 | 310 ext_csd[EXT_CSD_SEC_CNT + 2] << 16 | 311 ext_csd[EXT_CSD_SEC_CNT + 3] << 24; 312 313 /* Cards with density > 2GiB are sector addressed */ 314 if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512) 315 mmc_card_set_blockaddr(card); 316 } 317 318 card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE]; 319 mmc_select_card_type(card); 320 321 card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT]; 322 card->ext_csd.raw_erase_timeout_mult = 323 ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]; 324 card->ext_csd.raw_hc_erase_grp_size = 325 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 326 if (card->ext_csd.rev >= 3) { 327 u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT]; 328 card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG]; 329 330 /* EXT_CSD value is in units of 10ms, but we store in ms */ 331 card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME]; 332 333 /* Sleep / awake timeout in 100ns units */ 334 if (sa_shift > 0 && sa_shift <= 0x17) 335 card->ext_csd.sa_timeout = 336 1 << ext_csd[EXT_CSD_S_A_TIMEOUT]; 337 card->ext_csd.erase_group_def = 338 ext_csd[EXT_CSD_ERASE_GROUP_DEF]; 339 card->ext_csd.hc_erase_timeout = 300 * 340 ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]; 341 card->ext_csd.hc_erase_size = 342 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10; 343 344 card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C]; 345 346 /* 347 * There are two boot regions of equal size, defined in 348 * multiples of 128K. 349 */ 350 if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_boot_partition_access(card->host)) { 351 for (idx = 0; idx < MMC_NUM_BOOT_PARTITION; idx++) { 352 part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17; 353 mmc_part_add(card, part_size, 354 EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx, 355 "boot%d", idx, true, 356 MMC_BLK_DATA_AREA_BOOT); 357 } 358 } 359 } 360 361 card->ext_csd.raw_hc_erase_gap_size = 362 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 363 card->ext_csd.raw_sec_trim_mult = 364 ext_csd[EXT_CSD_SEC_TRIM_MULT]; 365 card->ext_csd.raw_sec_erase_mult = 366 ext_csd[EXT_CSD_SEC_ERASE_MULT]; 367 card->ext_csd.raw_sec_feature_support = 368 ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]; 369 card->ext_csd.raw_trim_mult = 370 ext_csd[EXT_CSD_TRIM_MULT]; 371 if (card->ext_csd.rev >= 4) { 372 /* 373 * Enhanced area feature support -- check whether the eMMC 374 * card has the Enhanced area enabled. If so, export enhanced 375 * area offset and size to user by adding sysfs interface. 376 */ 377 card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT]; 378 if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) && 379 (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) { 380 hc_erase_grp_sz = 381 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 382 hc_wp_grp_sz = 383 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 384 385 card->ext_csd.enhanced_area_en = 1; 386 /* 387 * calculate the enhanced data area offset, in bytes 388 */ 389 card->ext_csd.enhanced_area_offset = 390 (ext_csd[139] << 24) + (ext_csd[138] << 16) + 391 (ext_csd[137] << 8) + ext_csd[136]; 392 if (mmc_card_blockaddr(card)) 393 card->ext_csd.enhanced_area_offset <<= 9; 394 /* 395 * calculate the enhanced data area size, in kilobytes 396 */ 397 card->ext_csd.enhanced_area_size = 398 (ext_csd[142] << 16) + (ext_csd[141] << 8) + 399 ext_csd[140]; 400 card->ext_csd.enhanced_area_size *= 401 (size_t)(hc_erase_grp_sz * hc_wp_grp_sz); 402 card->ext_csd.enhanced_area_size <<= 9; 403 } else { 404 /* 405 * If the enhanced area is not enabled, disable these 406 * device attributes. 407 */ 408 card->ext_csd.enhanced_area_offset = -EINVAL; 409 card->ext_csd.enhanced_area_size = -EINVAL; 410 } 411 412 /* 413 * General purpose partition feature support -- 414 * If ext_csd has the size of general purpose partitions, 415 * set size, part_cfg, partition name in mmc_part. 416 */ 417 if (ext_csd[EXT_CSD_PARTITION_SUPPORT] & 418 EXT_CSD_PART_SUPPORT_PART_EN) { 419 if (card->ext_csd.enhanced_area_en != 1) { 420 hc_erase_grp_sz = 421 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 422 hc_wp_grp_sz = 423 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 424 425 card->ext_csd.enhanced_area_en = 1; 426 } 427 428 for (idx = 0; idx < MMC_NUM_GP_PARTITION; idx++) { 429 if (!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3] && 430 !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] && 431 !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2]) 432 continue; 433 part_size = 434 (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2] 435 << 16) + 436 (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] 437 << 8) + 438 ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3]; 439 part_size *= (size_t)(hc_erase_grp_sz * 440 hc_wp_grp_sz); 441 mmc_part_add(card, part_size << 19, 442 EXT_CSD_PART_CONFIG_ACC_GP0 + idx, 443 "gp%d", idx, false, 444 MMC_BLK_DATA_AREA_GP); 445 } 446 } 447 card->ext_csd.sec_trim_mult = 448 ext_csd[EXT_CSD_SEC_TRIM_MULT]; 449 card->ext_csd.sec_erase_mult = 450 ext_csd[EXT_CSD_SEC_ERASE_MULT]; 451 card->ext_csd.sec_feature_support = 452 ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]; 453 card->ext_csd.trim_timeout = 300 * 454 ext_csd[EXT_CSD_TRIM_MULT]; 455 456 /* 457 * Note that the call to mmc_part_add above defaults to read 458 * only. If this default assumption is changed, the call must 459 * take into account the value of boot_locked below. 460 */ 461 card->ext_csd.boot_ro_lock = ext_csd[EXT_CSD_BOOT_WP]; 462 card->ext_csd.boot_ro_lockable = true; 463 } 464 465 if (card->ext_csd.rev >= 5) { 466 /* check whether the eMMC card supports BKOPS */ 467 if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) { 468 card->ext_csd.bkops = 1; 469 card->ext_csd.bkops_en = ext_csd[EXT_CSD_BKOPS_EN]; 470 card->ext_csd.raw_bkops_status = 471 ext_csd[EXT_CSD_BKOPS_STATUS]; 472 if (!card->ext_csd.bkops_en) 473 pr_info("%s: BKOPS_EN bit is not set\n", 474 mmc_hostname(card->host)); 475 } 476 477 /* check whether the eMMC card supports HPI */ 478 if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) { 479 card->ext_csd.hpi = 1; 480 if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2) 481 card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION; 482 else 483 card->ext_csd.hpi_cmd = MMC_SEND_STATUS; 484 /* 485 * Indicate the maximum timeout to close 486 * a command interrupted by HPI 487 */ 488 card->ext_csd.out_of_int_time = 489 ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10; 490 } 491 492 card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM]; 493 card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION]; 494 495 /* 496 * RPMB regions are defined in multiples of 128K. 497 */ 498 card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT]; 499 if (ext_csd[EXT_CSD_RPMB_MULT] && mmc_host_cmd23(card->host)) { 500 mmc_part_add(card, ext_csd[EXT_CSD_RPMB_MULT] << 17, 501 EXT_CSD_PART_CONFIG_ACC_RPMB, 502 "rpmb", 0, false, 503 MMC_BLK_DATA_AREA_RPMB); 504 } 505 } 506 507 card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT]; 508 if (ext_csd[EXT_CSD_ERASED_MEM_CONT]) 509 card->erased_byte = 0xFF; 510 else 511 card->erased_byte = 0x0; 512 513 /* eMMC v4.5 or later */ 514 if (card->ext_csd.rev >= 6) { 515 card->ext_csd.feature_support |= MMC_DISCARD_FEATURE; 516 517 card->ext_csd.generic_cmd6_time = 10 * 518 ext_csd[EXT_CSD_GENERIC_CMD6_TIME]; 519 card->ext_csd.power_off_longtime = 10 * 520 ext_csd[EXT_CSD_POWER_OFF_LONG_TIME]; 521 522 card->ext_csd.cache_size = 523 ext_csd[EXT_CSD_CACHE_SIZE + 0] << 0 | 524 ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 | 525 ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 | 526 ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24; 527 528 if (ext_csd[EXT_CSD_DATA_SECTOR_SIZE] == 1) 529 card->ext_csd.data_sector_size = 4096; 530 else 531 card->ext_csd.data_sector_size = 512; 532 533 if ((ext_csd[EXT_CSD_DATA_TAG_SUPPORT] & 1) && 534 (ext_csd[EXT_CSD_TAG_UNIT_SIZE] <= 8)) { 535 card->ext_csd.data_tag_unit_size = 536 ((unsigned int) 1 << ext_csd[EXT_CSD_TAG_UNIT_SIZE]) * 537 (card->ext_csd.data_sector_size); 538 } else { 539 card->ext_csd.data_tag_unit_size = 0; 540 } 541 542 card->ext_csd.max_packed_writes = 543 ext_csd[EXT_CSD_MAX_PACKED_WRITES]; 544 card->ext_csd.max_packed_reads = 545 ext_csd[EXT_CSD_MAX_PACKED_READS]; 546 } else { 547 card->ext_csd.data_sector_size = 512; 548 } 549 550 out: 551 return err; 552 } 553 554 static inline void mmc_free_ext_csd(u8 *ext_csd) 555 { 556 kfree(ext_csd); 557 } 558 559 560 static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width) 561 { 562 u8 *bw_ext_csd; 563 int err; 564 565 if (bus_width == MMC_BUS_WIDTH_1) 566 return 0; 567 568 err = mmc_get_ext_csd(card, &bw_ext_csd); 569 570 if (err || bw_ext_csd == NULL) { 571 err = -EINVAL; 572 goto out; 573 } 574 575 /* only compare read only fields */ 576 err = !((card->ext_csd.raw_partition_support == 577 bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) && 578 (card->ext_csd.raw_erased_mem_count == 579 bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) && 580 (card->ext_csd.rev == 581 bw_ext_csd[EXT_CSD_REV]) && 582 (card->ext_csd.raw_ext_csd_structure == 583 bw_ext_csd[EXT_CSD_STRUCTURE]) && 584 (card->ext_csd.raw_card_type == 585 bw_ext_csd[EXT_CSD_CARD_TYPE]) && 586 (card->ext_csd.raw_s_a_timeout == 587 bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) && 588 (card->ext_csd.raw_hc_erase_gap_size == 589 bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) && 590 (card->ext_csd.raw_erase_timeout_mult == 591 bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) && 592 (card->ext_csd.raw_hc_erase_grp_size == 593 bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) && 594 (card->ext_csd.raw_sec_trim_mult == 595 bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) && 596 (card->ext_csd.raw_sec_erase_mult == 597 bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) && 598 (card->ext_csd.raw_sec_feature_support == 599 bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) && 600 (card->ext_csd.raw_trim_mult == 601 bw_ext_csd[EXT_CSD_TRIM_MULT]) && 602 (card->ext_csd.raw_sectors[0] == 603 bw_ext_csd[EXT_CSD_SEC_CNT + 0]) && 604 (card->ext_csd.raw_sectors[1] == 605 bw_ext_csd[EXT_CSD_SEC_CNT + 1]) && 606 (card->ext_csd.raw_sectors[2] == 607 bw_ext_csd[EXT_CSD_SEC_CNT + 2]) && 608 (card->ext_csd.raw_sectors[3] == 609 bw_ext_csd[EXT_CSD_SEC_CNT + 3])); 610 if (err) 611 err = -EINVAL; 612 613 out: 614 mmc_free_ext_csd(bw_ext_csd); 615 return err; 616 } 617 618 MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1], 619 card->raw_cid[2], card->raw_cid[3]); 620 MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1], 621 card->raw_csd[2], card->raw_csd[3]); 622 MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year); 623 MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9); 624 MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9); 625 MMC_DEV_ATTR(fwrev, "0x%x\n", card->cid.fwrev); 626 MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev); 627 MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid); 628 MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name); 629 MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid); 630 MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial); 631 MMC_DEV_ATTR(enhanced_area_offset, "%llu\n", 632 card->ext_csd.enhanced_area_offset); 633 MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size); 634 MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult); 635 MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors); 636 637 static struct attribute *mmc_std_attrs[] = { 638 &dev_attr_cid.attr, 639 &dev_attr_csd.attr, 640 &dev_attr_date.attr, 641 &dev_attr_erase_size.attr, 642 &dev_attr_preferred_erase_size.attr, 643 &dev_attr_fwrev.attr, 644 &dev_attr_hwrev.attr, 645 &dev_attr_manfid.attr, 646 &dev_attr_name.attr, 647 &dev_attr_oemid.attr, 648 &dev_attr_serial.attr, 649 &dev_attr_enhanced_area_offset.attr, 650 &dev_attr_enhanced_area_size.attr, 651 &dev_attr_raw_rpmb_size_mult.attr, 652 &dev_attr_rel_sectors.attr, 653 NULL, 654 }; 655 656 static struct attribute_group mmc_std_attr_group = { 657 .attrs = mmc_std_attrs, 658 }; 659 660 static const struct attribute_group *mmc_attr_groups[] = { 661 &mmc_std_attr_group, 662 NULL, 663 }; 664 665 static struct device_type mmc_type = { 666 .groups = mmc_attr_groups, 667 }; 668 669 /* 670 * Select the PowerClass for the current bus width 671 * If power class is defined for 4/8 bit bus in the 672 * extended CSD register, select it by executing the 673 * mmc_switch command. 674 */ 675 static int mmc_select_powerclass(struct mmc_card *card, 676 unsigned int bus_width, u8 *ext_csd) 677 { 678 int err = 0; 679 unsigned int pwrclass_val; 680 unsigned int index = 0; 681 struct mmc_host *host; 682 683 BUG_ON(!card); 684 685 host = card->host; 686 BUG_ON(!host); 687 688 if (ext_csd == NULL) 689 return 0; 690 691 /* Power class selection is supported for versions >= 4.0 */ 692 if (card->csd.mmca_vsn < CSD_SPEC_VER_4) 693 return 0; 694 695 /* Power class values are defined only for 4/8 bit bus */ 696 if (bus_width == EXT_CSD_BUS_WIDTH_1) 697 return 0; 698 699 switch (1 << host->ios.vdd) { 700 case MMC_VDD_165_195: 701 if (host->ios.clock <= 26000000) 702 index = EXT_CSD_PWR_CL_26_195; 703 else if (host->ios.clock <= 52000000) 704 index = (bus_width <= EXT_CSD_BUS_WIDTH_8) ? 705 EXT_CSD_PWR_CL_52_195 : 706 EXT_CSD_PWR_CL_DDR_52_195; 707 else if (host->ios.clock <= 200000000) 708 index = EXT_CSD_PWR_CL_200_195; 709 break; 710 case MMC_VDD_27_28: 711 case MMC_VDD_28_29: 712 case MMC_VDD_29_30: 713 case MMC_VDD_30_31: 714 case MMC_VDD_31_32: 715 case MMC_VDD_32_33: 716 case MMC_VDD_33_34: 717 case MMC_VDD_34_35: 718 case MMC_VDD_35_36: 719 if (host->ios.clock <= 26000000) 720 index = EXT_CSD_PWR_CL_26_360; 721 else if (host->ios.clock <= 52000000) 722 index = (bus_width <= EXT_CSD_BUS_WIDTH_8) ? 723 EXT_CSD_PWR_CL_52_360 : 724 EXT_CSD_PWR_CL_DDR_52_360; 725 else if (host->ios.clock <= 200000000) 726 index = EXT_CSD_PWR_CL_200_360; 727 break; 728 default: 729 pr_warning("%s: Voltage range not supported " 730 "for power class.\n", mmc_hostname(host)); 731 return -EINVAL; 732 } 733 734 pwrclass_val = ext_csd[index]; 735 736 if (bus_width & (EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_BUS_WIDTH_8)) 737 pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_8BIT_MASK) >> 738 EXT_CSD_PWR_CL_8BIT_SHIFT; 739 else 740 pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_4BIT_MASK) >> 741 EXT_CSD_PWR_CL_4BIT_SHIFT; 742 743 /* If the power class is different from the default value */ 744 if (pwrclass_val > 0) { 745 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 746 EXT_CSD_POWER_CLASS, 747 pwrclass_val, 748 card->ext_csd.generic_cmd6_time); 749 } 750 751 return err; 752 } 753 754 /* 755 * Selects the desired buswidth and switch to the HS200 mode 756 * if bus width set without error 757 */ 758 static int mmc_select_hs200(struct mmc_card *card) 759 { 760 int idx, err = -EINVAL; 761 struct mmc_host *host; 762 static unsigned ext_csd_bits[] = { 763 EXT_CSD_BUS_WIDTH_4, 764 EXT_CSD_BUS_WIDTH_8, 765 }; 766 static unsigned bus_widths[] = { 767 MMC_BUS_WIDTH_4, 768 MMC_BUS_WIDTH_8, 769 }; 770 771 BUG_ON(!card); 772 773 host = card->host; 774 775 if (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_2V && 776 host->caps2 & MMC_CAP2_HS200_1_2V_SDR) 777 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120); 778 779 if (err && card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_8V && 780 host->caps2 & MMC_CAP2_HS200_1_8V_SDR) 781 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180); 782 783 /* If fails try again during next card power cycle */ 784 if (err) 785 goto err; 786 787 idx = (host->caps & MMC_CAP_8_BIT_DATA) ? 1 : 0; 788 789 /* 790 * Unlike SD, MMC cards dont have a configuration register to notify 791 * supported bus width. So bus test command should be run to identify 792 * the supported bus width or compare the ext csd values of current 793 * bus width and ext csd values of 1 bit mode read earlier. 794 */ 795 for (; idx >= 0; idx--) { 796 797 /* 798 * Host is capable of 8bit transfer, then switch 799 * the device to work in 8bit transfer mode. If the 800 * mmc switch command returns error then switch to 801 * 4bit transfer mode. On success set the corresponding 802 * bus width on the host. 803 */ 804 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 805 EXT_CSD_BUS_WIDTH, 806 ext_csd_bits[idx], 807 card->ext_csd.generic_cmd6_time); 808 if (err) 809 continue; 810 811 mmc_set_bus_width(card->host, bus_widths[idx]); 812 813 if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST)) 814 err = mmc_compare_ext_csds(card, bus_widths[idx]); 815 else 816 err = mmc_bus_test(card, bus_widths[idx]); 817 if (!err) 818 break; 819 } 820 821 /* switch to HS200 mode if bus width set successfully */ 822 if (!err) 823 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 824 EXT_CSD_HS_TIMING, 2, 0); 825 err: 826 return err; 827 } 828 829 /* 830 * Handle the detection and initialisation of a card. 831 * 832 * In the case of a resume, "oldcard" will contain the card 833 * we're trying to reinitialise. 834 */ 835 static int mmc_init_card(struct mmc_host *host, u32 ocr, 836 struct mmc_card *oldcard) 837 { 838 struct mmc_card *card; 839 int err, ddr = 0; 840 u32 cid[4]; 841 unsigned int max_dtr; 842 u32 rocr; 843 u8 *ext_csd = NULL; 844 845 BUG_ON(!host); 846 WARN_ON(!host->claimed); 847 848 /* Set correct bus mode for MMC before attempting init */ 849 if (!mmc_host_is_spi(host)) 850 mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN); 851 852 /* 853 * Since we're changing the OCR value, we seem to 854 * need to tell some cards to go back to the idle 855 * state. We wait 1ms to give cards time to 856 * respond. 857 * mmc_go_idle is needed for eMMC that are asleep 858 */ 859 mmc_go_idle(host); 860 861 /* The extra bit indicates that we support high capacity */ 862 err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr); 863 if (err) 864 goto err; 865 866 /* 867 * For SPI, enable CRC as appropriate. 868 */ 869 if (mmc_host_is_spi(host)) { 870 err = mmc_spi_set_crc(host, use_spi_crc); 871 if (err) 872 goto err; 873 } 874 875 /* 876 * Fetch CID from card. 877 */ 878 if (mmc_host_is_spi(host)) 879 err = mmc_send_cid(host, cid); 880 else 881 err = mmc_all_send_cid(host, cid); 882 if (err) 883 goto err; 884 885 if (oldcard) { 886 if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) { 887 err = -ENOENT; 888 goto err; 889 } 890 891 card = oldcard; 892 } else { 893 /* 894 * Allocate card structure. 895 */ 896 card = mmc_alloc_card(host, &mmc_type); 897 if (IS_ERR(card)) { 898 err = PTR_ERR(card); 899 goto err; 900 } 901 902 card->type = MMC_TYPE_MMC; 903 card->rca = 1; 904 memcpy(card->raw_cid, cid, sizeof(card->raw_cid)); 905 } 906 907 /* 908 * For native busses: set card RCA and quit open drain mode. 909 */ 910 if (!mmc_host_is_spi(host)) { 911 err = mmc_set_relative_addr(card); 912 if (err) 913 goto free_card; 914 915 mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL); 916 } 917 918 if (!oldcard) { 919 /* 920 * Fetch CSD from card. 921 */ 922 err = mmc_send_csd(card, card->raw_csd); 923 if (err) 924 goto free_card; 925 926 err = mmc_decode_csd(card); 927 if (err) 928 goto free_card; 929 err = mmc_decode_cid(card); 930 if (err) 931 goto free_card; 932 } 933 934 /* 935 * Select card, as all following commands rely on that. 936 */ 937 if (!mmc_host_is_spi(host)) { 938 err = mmc_select_card(card); 939 if (err) 940 goto free_card; 941 } 942 943 if (!oldcard) { 944 /* 945 * Fetch and process extended CSD. 946 */ 947 948 err = mmc_get_ext_csd(card, &ext_csd); 949 if (err) 950 goto free_card; 951 err = mmc_read_ext_csd(card, ext_csd); 952 if (err) 953 goto free_card; 954 955 /* If doing byte addressing, check if required to do sector 956 * addressing. Handle the case of <2GB cards needing sector 957 * addressing. See section 8.1 JEDEC Standard JED84-A441; 958 * ocr register has bit 30 set for sector addressing. 959 */ 960 if (!(mmc_card_blockaddr(card)) && (rocr & (1<<30))) 961 mmc_card_set_blockaddr(card); 962 963 /* Erase size depends on CSD and Extended CSD */ 964 mmc_set_erase_size(card); 965 } 966 967 /* 968 * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF 969 * bit. This bit will be lost every time after a reset or power off. 970 */ 971 if (card->ext_csd.enhanced_area_en || 972 (card->ext_csd.rev >= 3 && (host->caps2 & MMC_CAP2_HC_ERASE_SZ))) { 973 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 974 EXT_CSD_ERASE_GROUP_DEF, 1, 975 card->ext_csd.generic_cmd6_time); 976 977 if (err && err != -EBADMSG) 978 goto free_card; 979 980 if (err) { 981 err = 0; 982 /* 983 * Just disable enhanced area off & sz 984 * will try to enable ERASE_GROUP_DEF 985 * during next time reinit 986 */ 987 card->ext_csd.enhanced_area_offset = -EINVAL; 988 card->ext_csd.enhanced_area_size = -EINVAL; 989 } else { 990 card->ext_csd.erase_group_def = 1; 991 /* 992 * enable ERASE_GRP_DEF successfully. 993 * This will affect the erase size, so 994 * here need to reset erase size 995 */ 996 mmc_set_erase_size(card); 997 } 998 } 999 1000 /* 1001 * Ensure eMMC user default partition is enabled 1002 */ 1003 if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) { 1004 card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; 1005 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG, 1006 card->ext_csd.part_config, 1007 card->ext_csd.part_time); 1008 if (err && err != -EBADMSG) 1009 goto free_card; 1010 } 1011 1012 /* 1013 * If the host supports the power_off_notify capability then 1014 * set the notification byte in the ext_csd register of device 1015 */ 1016 if ((host->caps2 & MMC_CAP2_POWEROFF_NOTIFY) && 1017 (card->ext_csd.rev >= 6)) { 1018 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1019 EXT_CSD_POWER_OFF_NOTIFICATION, 1020 EXT_CSD_POWER_ON, 1021 card->ext_csd.generic_cmd6_time); 1022 if (err && err != -EBADMSG) 1023 goto free_card; 1024 1025 /* 1026 * The err can be -EBADMSG or 0, 1027 * so check for success and update the flag 1028 */ 1029 if (!err) 1030 card->ext_csd.power_off_notification = EXT_CSD_POWER_ON; 1031 } 1032 1033 /* 1034 * Activate high speed (if supported) 1035 */ 1036 if (card->ext_csd.hs_max_dtr != 0) { 1037 err = 0; 1038 if (card->ext_csd.hs_max_dtr > 52000000 && 1039 host->caps2 & MMC_CAP2_HS200) 1040 err = mmc_select_hs200(card); 1041 else if (host->caps & MMC_CAP_MMC_HIGHSPEED) 1042 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1043 EXT_CSD_HS_TIMING, 1, 1044 card->ext_csd.generic_cmd6_time); 1045 1046 if (err && err != -EBADMSG) 1047 goto free_card; 1048 1049 if (err) { 1050 pr_warning("%s: switch to highspeed failed\n", 1051 mmc_hostname(card->host)); 1052 err = 0; 1053 } else { 1054 if (card->ext_csd.hs_max_dtr > 52000000 && 1055 host->caps2 & MMC_CAP2_HS200) { 1056 mmc_card_set_hs200(card); 1057 mmc_set_timing(card->host, 1058 MMC_TIMING_MMC_HS200); 1059 } else { 1060 mmc_card_set_highspeed(card); 1061 mmc_set_timing(card->host, MMC_TIMING_MMC_HS); 1062 } 1063 } 1064 } 1065 1066 /* 1067 * Compute bus speed. 1068 */ 1069 max_dtr = (unsigned int)-1; 1070 1071 if (mmc_card_highspeed(card) || mmc_card_hs200(card)) { 1072 if (max_dtr > card->ext_csd.hs_max_dtr) 1073 max_dtr = card->ext_csd.hs_max_dtr; 1074 if (mmc_card_highspeed(card) && (max_dtr > 52000000)) 1075 max_dtr = 52000000; 1076 } else if (max_dtr > card->csd.max_dtr) { 1077 max_dtr = card->csd.max_dtr; 1078 } 1079 1080 mmc_set_clock(host, max_dtr); 1081 1082 /* 1083 * Indicate DDR mode (if supported). 1084 */ 1085 if (mmc_card_highspeed(card)) { 1086 if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) 1087 && ((host->caps & (MMC_CAP_1_8V_DDR | 1088 MMC_CAP_UHS_DDR50)) 1089 == (MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50))) 1090 ddr = MMC_1_8V_DDR_MODE; 1091 else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) 1092 && ((host->caps & (MMC_CAP_1_2V_DDR | 1093 MMC_CAP_UHS_DDR50)) 1094 == (MMC_CAP_1_2V_DDR | MMC_CAP_UHS_DDR50))) 1095 ddr = MMC_1_2V_DDR_MODE; 1096 } 1097 1098 /* 1099 * Indicate HS200 SDR mode (if supported). 1100 */ 1101 if (mmc_card_hs200(card)) { 1102 u32 ext_csd_bits; 1103 u32 bus_width = card->host->ios.bus_width; 1104 1105 /* 1106 * For devices supporting HS200 mode, the bus width has 1107 * to be set before executing the tuning function. If 1108 * set before tuning, then device will respond with CRC 1109 * errors for responses on CMD line. So for HS200 the 1110 * sequence will be 1111 * 1. set bus width 4bit / 8 bit (1 bit not supported) 1112 * 2. switch to HS200 mode 1113 * 3. set the clock to > 52Mhz <=200MHz and 1114 * 4. execute tuning for HS200 1115 */ 1116 if ((host->caps2 & MMC_CAP2_HS200) && 1117 card->host->ops->execute_tuning) { 1118 mmc_host_clk_hold(card->host); 1119 err = card->host->ops->execute_tuning(card->host, 1120 MMC_SEND_TUNING_BLOCK_HS200); 1121 mmc_host_clk_release(card->host); 1122 } 1123 if (err) { 1124 pr_warning("%s: tuning execution failed\n", 1125 mmc_hostname(card->host)); 1126 goto err; 1127 } 1128 1129 ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ? 1130 EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4; 1131 err = mmc_select_powerclass(card, ext_csd_bits, ext_csd); 1132 if (err) 1133 pr_warning("%s: power class selection to bus width %d" 1134 " failed\n", mmc_hostname(card->host), 1135 1 << bus_width); 1136 } 1137 1138 /* 1139 * Activate wide bus and DDR (if supported). 1140 */ 1141 if (!mmc_card_hs200(card) && 1142 (card->csd.mmca_vsn >= CSD_SPEC_VER_4) && 1143 (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) { 1144 static unsigned ext_csd_bits[][2] = { 1145 { EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 }, 1146 { EXT_CSD_BUS_WIDTH_4, EXT_CSD_DDR_BUS_WIDTH_4 }, 1147 { EXT_CSD_BUS_WIDTH_1, EXT_CSD_BUS_WIDTH_1 }, 1148 }; 1149 static unsigned bus_widths[] = { 1150 MMC_BUS_WIDTH_8, 1151 MMC_BUS_WIDTH_4, 1152 MMC_BUS_WIDTH_1 1153 }; 1154 unsigned idx, bus_width = 0; 1155 1156 if (host->caps & MMC_CAP_8_BIT_DATA) 1157 idx = 0; 1158 else 1159 idx = 1; 1160 for (; idx < ARRAY_SIZE(bus_widths); idx++) { 1161 bus_width = bus_widths[idx]; 1162 if (bus_width == MMC_BUS_WIDTH_1) 1163 ddr = 0; /* no DDR for 1-bit width */ 1164 err = mmc_select_powerclass(card, ext_csd_bits[idx][0], 1165 ext_csd); 1166 if (err) 1167 pr_warning("%s: power class selection to " 1168 "bus width %d failed\n", 1169 mmc_hostname(card->host), 1170 1 << bus_width); 1171 1172 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1173 EXT_CSD_BUS_WIDTH, 1174 ext_csd_bits[idx][0], 1175 card->ext_csd.generic_cmd6_time); 1176 if (!err) { 1177 mmc_set_bus_width(card->host, bus_width); 1178 1179 /* 1180 * If controller can't handle bus width test, 1181 * compare ext_csd previously read in 1 bit mode 1182 * against ext_csd at new bus width 1183 */ 1184 if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST)) 1185 err = mmc_compare_ext_csds(card, 1186 bus_width); 1187 else 1188 err = mmc_bus_test(card, bus_width); 1189 if (!err) 1190 break; 1191 } 1192 } 1193 1194 if (!err && ddr) { 1195 err = mmc_select_powerclass(card, ext_csd_bits[idx][1], 1196 ext_csd); 1197 if (err) 1198 pr_warning("%s: power class selection to " 1199 "bus width %d ddr %d failed\n", 1200 mmc_hostname(card->host), 1201 1 << bus_width, ddr); 1202 1203 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1204 EXT_CSD_BUS_WIDTH, 1205 ext_csd_bits[idx][1], 1206 card->ext_csd.generic_cmd6_time); 1207 } 1208 if (err) { 1209 pr_warning("%s: switch to bus width %d ddr %d " 1210 "failed\n", mmc_hostname(card->host), 1211 1 << bus_width, ddr); 1212 goto free_card; 1213 } else if (ddr) { 1214 /* 1215 * eMMC cards can support 3.3V to 1.2V i/o (vccq) 1216 * signaling. 1217 * 1218 * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq. 1219 * 1220 * 1.8V vccq at 3.3V core voltage (vcc) is not required 1221 * in the JEDEC spec for DDR. 1222 * 1223 * Do not force change in vccq since we are obviously 1224 * working and no change to vccq is needed. 1225 * 1226 * WARNING: eMMC rules are NOT the same as SD DDR 1227 */ 1228 if (ddr == MMC_1_2V_DDR_MODE) { 1229 err = __mmc_set_signal_voltage(host, 1230 MMC_SIGNAL_VOLTAGE_120); 1231 if (err) 1232 goto err; 1233 } 1234 mmc_card_set_ddr_mode(card); 1235 mmc_set_timing(card->host, MMC_TIMING_UHS_DDR50); 1236 mmc_set_bus_width(card->host, bus_width); 1237 } 1238 } 1239 1240 /* 1241 * Enable HPI feature (if supported) 1242 */ 1243 if (card->ext_csd.hpi) { 1244 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1245 EXT_CSD_HPI_MGMT, 1, 1246 card->ext_csd.generic_cmd6_time); 1247 if (err && err != -EBADMSG) 1248 goto free_card; 1249 if (err) { 1250 pr_warning("%s: Enabling HPI failed\n", 1251 mmc_hostname(card->host)); 1252 err = 0; 1253 } else 1254 card->ext_csd.hpi_en = 1; 1255 } 1256 1257 /* 1258 * If cache size is higher than 0, this indicates 1259 * the existence of cache and it can be turned on. 1260 */ 1261 if ((host->caps2 & MMC_CAP2_CACHE_CTRL) && 1262 card->ext_csd.cache_size > 0) { 1263 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1264 EXT_CSD_CACHE_CTRL, 1, 1265 card->ext_csd.generic_cmd6_time); 1266 if (err && err != -EBADMSG) 1267 goto free_card; 1268 1269 /* 1270 * Only if no error, cache is turned on successfully. 1271 */ 1272 if (err) { 1273 pr_warning("%s: Cache is supported, " 1274 "but failed to turn on (%d)\n", 1275 mmc_hostname(card->host), err); 1276 card->ext_csd.cache_ctrl = 0; 1277 err = 0; 1278 } else { 1279 card->ext_csd.cache_ctrl = 1; 1280 } 1281 } 1282 1283 /* 1284 * The mandatory minimum values are defined for packed command. 1285 * read: 5, write: 3 1286 */ 1287 if (card->ext_csd.max_packed_writes >= 3 && 1288 card->ext_csd.max_packed_reads >= 5 && 1289 host->caps2 & MMC_CAP2_PACKED_CMD) { 1290 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1291 EXT_CSD_EXP_EVENTS_CTRL, 1292 EXT_CSD_PACKED_EVENT_EN, 1293 card->ext_csd.generic_cmd6_time); 1294 if (err && err != -EBADMSG) 1295 goto free_card; 1296 if (err) { 1297 pr_warn("%s: Enabling packed event failed\n", 1298 mmc_hostname(card->host)); 1299 card->ext_csd.packed_event_en = 0; 1300 err = 0; 1301 } else { 1302 card->ext_csd.packed_event_en = 1; 1303 } 1304 } 1305 1306 if (!oldcard) 1307 host->card = card; 1308 1309 mmc_free_ext_csd(ext_csd); 1310 return 0; 1311 1312 free_card: 1313 if (!oldcard) 1314 mmc_remove_card(card); 1315 err: 1316 mmc_free_ext_csd(ext_csd); 1317 1318 return err; 1319 } 1320 1321 static int mmc_can_poweroff_notify(const struct mmc_card *card) 1322 { 1323 return card && 1324 mmc_card_mmc(card) && 1325 (card->ext_csd.power_off_notification == EXT_CSD_POWER_ON); 1326 } 1327 1328 static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type) 1329 { 1330 unsigned int timeout = card->ext_csd.generic_cmd6_time; 1331 int err; 1332 1333 /* Use EXT_CSD_POWER_OFF_SHORT as default notification type. */ 1334 if (notify_type == EXT_CSD_POWER_OFF_LONG) 1335 timeout = card->ext_csd.power_off_longtime; 1336 1337 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1338 EXT_CSD_POWER_OFF_NOTIFICATION, 1339 notify_type, timeout); 1340 if (err) 1341 pr_err("%s: Power Off Notification timed out, %u\n", 1342 mmc_hostname(card->host), timeout); 1343 1344 /* Disable the power off notification after the switch operation. */ 1345 card->ext_csd.power_off_notification = EXT_CSD_NO_POWER_NOTIFICATION; 1346 1347 return err; 1348 } 1349 1350 /* 1351 * Host is being removed. Free up the current card. 1352 */ 1353 static void mmc_remove(struct mmc_host *host) 1354 { 1355 BUG_ON(!host); 1356 BUG_ON(!host->card); 1357 1358 mmc_remove_card(host->card); 1359 host->card = NULL; 1360 } 1361 1362 /* 1363 * Card detection - card is alive. 1364 */ 1365 static int mmc_alive(struct mmc_host *host) 1366 { 1367 return mmc_send_status(host->card, NULL); 1368 } 1369 1370 /* 1371 * Card detection callback from host. 1372 */ 1373 static void mmc_detect(struct mmc_host *host) 1374 { 1375 int err; 1376 1377 BUG_ON(!host); 1378 BUG_ON(!host->card); 1379 1380 mmc_claim_host(host); 1381 1382 /* 1383 * Just check if our card has been removed. 1384 */ 1385 err = _mmc_detect_card_removed(host); 1386 1387 mmc_release_host(host); 1388 1389 if (err) { 1390 mmc_remove(host); 1391 1392 mmc_claim_host(host); 1393 mmc_detach_bus(host); 1394 mmc_power_off(host); 1395 mmc_release_host(host); 1396 } 1397 } 1398 1399 /* 1400 * Suspend callback from host. 1401 */ 1402 static int mmc_suspend(struct mmc_host *host) 1403 { 1404 int err = 0; 1405 1406 BUG_ON(!host); 1407 BUG_ON(!host->card); 1408 1409 mmc_claim_host(host); 1410 1411 err = mmc_cache_ctrl(host, 0); 1412 if (err) 1413 goto out; 1414 1415 if (mmc_can_poweroff_notify(host->card)) 1416 err = mmc_poweroff_notify(host->card, EXT_CSD_POWER_OFF_SHORT); 1417 else if (mmc_card_can_sleep(host)) 1418 err = mmc_card_sleep(host); 1419 else if (!mmc_host_is_spi(host)) 1420 err = mmc_deselect_cards(host); 1421 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200); 1422 1423 out: 1424 mmc_release_host(host); 1425 return err; 1426 } 1427 1428 /* 1429 * Resume callback from host. 1430 * 1431 * This function tries to determine if the same card is still present 1432 * and, if so, restore all state to it. 1433 */ 1434 static int mmc_resume(struct mmc_host *host) 1435 { 1436 int err; 1437 1438 BUG_ON(!host); 1439 BUG_ON(!host->card); 1440 1441 mmc_claim_host(host); 1442 err = mmc_init_card(host, host->ocr, host->card); 1443 mmc_release_host(host); 1444 1445 return err; 1446 } 1447 1448 static int mmc_power_restore(struct mmc_host *host) 1449 { 1450 int ret; 1451 1452 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200); 1453 mmc_claim_host(host); 1454 ret = mmc_init_card(host, host->ocr, host->card); 1455 mmc_release_host(host); 1456 1457 return ret; 1458 } 1459 1460 static int mmc_sleep(struct mmc_host *host) 1461 { 1462 struct mmc_card *card = host->card; 1463 int err = -ENOSYS; 1464 1465 if (card && card->ext_csd.rev >= 3) { 1466 err = mmc_card_sleepawake(host, 1); 1467 if (err < 0) 1468 pr_debug("%s: Error %d while putting card into sleep", 1469 mmc_hostname(host), err); 1470 } 1471 1472 return err; 1473 } 1474 1475 static int mmc_awake(struct mmc_host *host) 1476 { 1477 struct mmc_card *card = host->card; 1478 int err = -ENOSYS; 1479 1480 if (card && card->ext_csd.rev >= 3) { 1481 err = mmc_card_sleepawake(host, 0); 1482 if (err < 0) 1483 pr_debug("%s: Error %d while awaking sleeping card", 1484 mmc_hostname(host), err); 1485 } 1486 1487 return err; 1488 } 1489 1490 static const struct mmc_bus_ops mmc_ops = { 1491 .awake = mmc_awake, 1492 .sleep = mmc_sleep, 1493 .remove = mmc_remove, 1494 .detect = mmc_detect, 1495 .suspend = NULL, 1496 .resume = NULL, 1497 .power_restore = mmc_power_restore, 1498 .alive = mmc_alive, 1499 }; 1500 1501 static const struct mmc_bus_ops mmc_ops_unsafe = { 1502 .awake = mmc_awake, 1503 .sleep = mmc_sleep, 1504 .remove = mmc_remove, 1505 .detect = mmc_detect, 1506 .suspend = mmc_suspend, 1507 .resume = mmc_resume, 1508 .power_restore = mmc_power_restore, 1509 .alive = mmc_alive, 1510 }; 1511 1512 static void mmc_attach_bus_ops(struct mmc_host *host) 1513 { 1514 const struct mmc_bus_ops *bus_ops; 1515 1516 if (!mmc_card_is_removable(host)) 1517 bus_ops = &mmc_ops_unsafe; 1518 else 1519 bus_ops = &mmc_ops; 1520 mmc_attach_bus(host, bus_ops); 1521 } 1522 1523 /* 1524 * Starting point for MMC card init. 1525 */ 1526 int mmc_attach_mmc(struct mmc_host *host) 1527 { 1528 int err; 1529 u32 ocr; 1530 1531 BUG_ON(!host); 1532 WARN_ON(!host->claimed); 1533 1534 /* Set correct bus mode for MMC before attempting attach */ 1535 if (!mmc_host_is_spi(host)) 1536 mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN); 1537 1538 err = mmc_send_op_cond(host, 0, &ocr); 1539 if (err) 1540 return err; 1541 1542 mmc_attach_bus_ops(host); 1543 if (host->ocr_avail_mmc) 1544 host->ocr_avail = host->ocr_avail_mmc; 1545 1546 /* 1547 * We need to get OCR a different way for SPI. 1548 */ 1549 if (mmc_host_is_spi(host)) { 1550 err = mmc_spi_read_ocr(host, 1, &ocr); 1551 if (err) 1552 goto err; 1553 } 1554 1555 /* 1556 * Sanity check the voltages that the card claims to 1557 * support. 1558 */ 1559 if (ocr & 0x7F) { 1560 pr_warning("%s: card claims to support voltages " 1561 "below the defined range. These will be ignored.\n", 1562 mmc_hostname(host)); 1563 ocr &= ~0x7F; 1564 } 1565 1566 host->ocr = mmc_select_voltage(host, ocr); 1567 1568 /* 1569 * Can we support the voltage of the card? 1570 */ 1571 if (!host->ocr) { 1572 err = -EINVAL; 1573 goto err; 1574 } 1575 1576 /* 1577 * Detect and init the card. 1578 */ 1579 err = mmc_init_card(host, host->ocr, NULL); 1580 if (err) 1581 goto err; 1582 1583 mmc_release_host(host); 1584 err = mmc_add_card(host->card); 1585 mmc_claim_host(host); 1586 if (err) 1587 goto remove_card; 1588 1589 return 0; 1590 1591 remove_card: 1592 mmc_release_host(host); 1593 mmc_remove_card(host->card); 1594 mmc_claim_host(host); 1595 host->card = NULL; 1596 err: 1597 mmc_detach_bus(host); 1598 1599 pr_err("%s: error %d whilst initialising MMC card\n", 1600 mmc_hostname(host), err); 1601 1602 return err; 1603 } 1604