1 /* 2 * SD Association Host Standard Specification v2.0 controller emulation 3 * 4 * Datasheet: PartA2_SD_Host_Controller_Simplified_Specification_Ver2.00.pdf 5 * 6 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 7 * Mitsyanko Igor <i.mitsyanko@samsung.com> 8 * Peter A.G. Crosthwaite <peter.crosthwaite@petalogix.com> 9 * 10 * Based on MMC controller for Samsung S5PC1xx-based board emulation 11 * by Alexey Merkulov and Vladimir Monakhov. 12 * 13 * This program is free software; you can redistribute it and/or modify it 14 * under the terms of the GNU General Public License as published by the 15 * Free Software Foundation; either version 2 of the License, or (at your 16 * option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 21 * See the GNU General Public License for more details. 22 * 23 * You should have received a copy of the GNU General Public License along 24 * with this program; if not, see <http://www.gnu.org/licenses/>. 25 */ 26 27 #include "qemu/osdep.h" 28 #include "qemu/units.h" 29 #include "qemu/error-report.h" 30 #include "qapi/error.h" 31 #include "hw/irq.h" 32 #include "hw/qdev-properties.h" 33 #include "sysemu/dma.h" 34 #include "qemu/timer.h" 35 #include "qemu/bitops.h" 36 #include "hw/sd/sdhci.h" 37 #include "migration/vmstate.h" 38 #include "sdhci-internal.h" 39 #include "qemu/log.h" 40 #include "qemu/module.h" 41 #include "trace.h" 42 #include "qom/object.h" 43 44 #define TYPE_SDHCI_BUS "sdhci-bus" 45 /* This is reusing the SDBus typedef from SD_BUS */ 46 DECLARE_INSTANCE_CHECKER(SDBus, SDHCI_BUS, 47 TYPE_SDHCI_BUS) 48 49 #define MASKED_WRITE(reg, mask, val) (reg = (reg & (mask)) | (val)) 50 51 static inline unsigned int sdhci_get_fifolen(SDHCIState *s) 52 { 53 return 1 << (9 + FIELD_EX32(s->capareg, SDHC_CAPAB, MAXBLOCKLENGTH)); 54 } 55 56 /* return true on error */ 57 static bool sdhci_check_capab_freq_range(SDHCIState *s, const char *desc, 58 uint8_t freq, Error **errp) 59 { 60 if (s->sd_spec_version >= 3) { 61 return false; 62 } 63 switch (freq) { 64 case 0: 65 case 10 ... 63: 66 break; 67 default: 68 error_setg(errp, "SD %s clock frequency can have value" 69 "in range 0-63 only", desc); 70 return true; 71 } 72 return false; 73 } 74 75 static void sdhci_check_capareg(SDHCIState *s, Error **errp) 76 { 77 uint64_t msk = s->capareg; 78 uint32_t val; 79 bool y; 80 81 switch (s->sd_spec_version) { 82 case 4: 83 val = FIELD_EX64(s->capareg, SDHC_CAPAB, BUS64BIT_V4); 84 trace_sdhci_capareg("64-bit system bus (v4)", val); 85 msk = FIELD_DP64(msk, SDHC_CAPAB, BUS64BIT_V4, 0); 86 87 val = FIELD_EX64(s->capareg, SDHC_CAPAB, UHS_II); 88 trace_sdhci_capareg("UHS-II", val); 89 msk = FIELD_DP64(msk, SDHC_CAPAB, UHS_II, 0); 90 91 val = FIELD_EX64(s->capareg, SDHC_CAPAB, ADMA3); 92 trace_sdhci_capareg("ADMA3", val); 93 msk = FIELD_DP64(msk, SDHC_CAPAB, ADMA3, 0); 94 95 /* fallthrough */ 96 case 3: 97 val = FIELD_EX64(s->capareg, SDHC_CAPAB, ASYNC_INT); 98 trace_sdhci_capareg("async interrupt", val); 99 msk = FIELD_DP64(msk, SDHC_CAPAB, ASYNC_INT, 0); 100 101 val = FIELD_EX64(s->capareg, SDHC_CAPAB, SLOT_TYPE); 102 if (val) { 103 error_setg(errp, "slot-type not supported"); 104 return; 105 } 106 trace_sdhci_capareg("slot type", val); 107 msk = FIELD_DP64(msk, SDHC_CAPAB, SLOT_TYPE, 0); 108 109 if (val != 2) { 110 val = FIELD_EX64(s->capareg, SDHC_CAPAB, EMBEDDED_8BIT); 111 trace_sdhci_capareg("8-bit bus", val); 112 } 113 msk = FIELD_DP64(msk, SDHC_CAPAB, EMBEDDED_8BIT, 0); 114 115 val = FIELD_EX64(s->capareg, SDHC_CAPAB, BUS_SPEED); 116 trace_sdhci_capareg("bus speed mask", val); 117 msk = FIELD_DP64(msk, SDHC_CAPAB, BUS_SPEED, 0); 118 119 val = FIELD_EX64(s->capareg, SDHC_CAPAB, DRIVER_STRENGTH); 120 trace_sdhci_capareg("driver strength mask", val); 121 msk = FIELD_DP64(msk, SDHC_CAPAB, DRIVER_STRENGTH, 0); 122 123 val = FIELD_EX64(s->capareg, SDHC_CAPAB, TIMER_RETUNING); 124 trace_sdhci_capareg("timer re-tuning", val); 125 msk = FIELD_DP64(msk, SDHC_CAPAB, TIMER_RETUNING, 0); 126 127 val = FIELD_EX64(s->capareg, SDHC_CAPAB, SDR50_TUNING); 128 trace_sdhci_capareg("use SDR50 tuning", val); 129 msk = FIELD_DP64(msk, SDHC_CAPAB, SDR50_TUNING, 0); 130 131 val = FIELD_EX64(s->capareg, SDHC_CAPAB, RETUNING_MODE); 132 trace_sdhci_capareg("re-tuning mode", val); 133 msk = FIELD_DP64(msk, SDHC_CAPAB, RETUNING_MODE, 0); 134 135 val = FIELD_EX64(s->capareg, SDHC_CAPAB, CLOCK_MULT); 136 trace_sdhci_capareg("clock multiplier", val); 137 msk = FIELD_DP64(msk, SDHC_CAPAB, CLOCK_MULT, 0); 138 139 /* fallthrough */ 140 case 2: /* default version */ 141 val = FIELD_EX64(s->capareg, SDHC_CAPAB, ADMA2); 142 trace_sdhci_capareg("ADMA2", val); 143 msk = FIELD_DP64(msk, SDHC_CAPAB, ADMA2, 0); 144 145 val = FIELD_EX64(s->capareg, SDHC_CAPAB, ADMA1); 146 trace_sdhci_capareg("ADMA1", val); 147 msk = FIELD_DP64(msk, SDHC_CAPAB, ADMA1, 0); 148 149 val = FIELD_EX64(s->capareg, SDHC_CAPAB, BUS64BIT); 150 trace_sdhci_capareg("64-bit system bus (v3)", val); 151 msk = FIELD_DP64(msk, SDHC_CAPAB, BUS64BIT, 0); 152 153 /* fallthrough */ 154 case 1: 155 y = FIELD_EX64(s->capareg, SDHC_CAPAB, TOUNIT); 156 msk = FIELD_DP64(msk, SDHC_CAPAB, TOUNIT, 0); 157 158 val = FIELD_EX64(s->capareg, SDHC_CAPAB, TOCLKFREQ); 159 trace_sdhci_capareg(y ? "timeout (MHz)" : "Timeout (KHz)", val); 160 if (sdhci_check_capab_freq_range(s, "timeout", val, errp)) { 161 return; 162 } 163 msk = FIELD_DP64(msk, SDHC_CAPAB, TOCLKFREQ, 0); 164 165 val = FIELD_EX64(s->capareg, SDHC_CAPAB, BASECLKFREQ); 166 trace_sdhci_capareg(y ? "base (MHz)" : "Base (KHz)", val); 167 if (sdhci_check_capab_freq_range(s, "base", val, errp)) { 168 return; 169 } 170 msk = FIELD_DP64(msk, SDHC_CAPAB, BASECLKFREQ, 0); 171 172 val = FIELD_EX64(s->capareg, SDHC_CAPAB, MAXBLOCKLENGTH); 173 if (val >= 3) { 174 error_setg(errp, "block size can be 512, 1024 or 2048 only"); 175 return; 176 } 177 trace_sdhci_capareg("max block length", sdhci_get_fifolen(s)); 178 msk = FIELD_DP64(msk, SDHC_CAPAB, MAXBLOCKLENGTH, 0); 179 180 val = FIELD_EX64(s->capareg, SDHC_CAPAB, HIGHSPEED); 181 trace_sdhci_capareg("high speed", val); 182 msk = FIELD_DP64(msk, SDHC_CAPAB, HIGHSPEED, 0); 183 184 val = FIELD_EX64(s->capareg, SDHC_CAPAB, SDMA); 185 trace_sdhci_capareg("SDMA", val); 186 msk = FIELD_DP64(msk, SDHC_CAPAB, SDMA, 0); 187 188 val = FIELD_EX64(s->capareg, SDHC_CAPAB, SUSPRESUME); 189 trace_sdhci_capareg("suspend/resume", val); 190 msk = FIELD_DP64(msk, SDHC_CAPAB, SUSPRESUME, 0); 191 192 val = FIELD_EX64(s->capareg, SDHC_CAPAB, V33); 193 trace_sdhci_capareg("3.3v", val); 194 msk = FIELD_DP64(msk, SDHC_CAPAB, V33, 0); 195 196 val = FIELD_EX64(s->capareg, SDHC_CAPAB, V30); 197 trace_sdhci_capareg("3.0v", val); 198 msk = FIELD_DP64(msk, SDHC_CAPAB, V30, 0); 199 200 val = FIELD_EX64(s->capareg, SDHC_CAPAB, V18); 201 trace_sdhci_capareg("1.8v", val); 202 msk = FIELD_DP64(msk, SDHC_CAPAB, V18, 0); 203 break; 204 205 default: 206 error_setg(errp, "Unsupported spec version: %u", s->sd_spec_version); 207 } 208 if (msk) { 209 qemu_log_mask(LOG_UNIMP, 210 "SDHCI: unknown CAPAB mask: 0x%016" PRIx64 "\n", msk); 211 } 212 } 213 214 static uint8_t sdhci_slotint(SDHCIState *s) 215 { 216 return (s->norintsts & s->norintsigen) || (s->errintsts & s->errintsigen) || 217 ((s->norintsts & SDHC_NIS_INSERT) && (s->wakcon & SDHC_WKUP_ON_INS)) || 218 ((s->norintsts & SDHC_NIS_REMOVE) && (s->wakcon & SDHC_WKUP_ON_RMV)); 219 } 220 221 /* Return true if IRQ was pending and delivered */ 222 static bool sdhci_update_irq(SDHCIState *s) 223 { 224 bool pending = sdhci_slotint(s); 225 226 qemu_set_irq(s->irq, pending); 227 228 return pending; 229 } 230 231 static void sdhci_raise_insertion_irq(void *opaque) 232 { 233 SDHCIState *s = (SDHCIState *)opaque; 234 235 if (s->norintsts & SDHC_NIS_REMOVE) { 236 timer_mod(s->insert_timer, 237 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + SDHC_INSERTION_DELAY); 238 } else { 239 s->prnsts = 0x1ff0000; 240 if (s->norintstsen & SDHC_NISEN_INSERT) { 241 s->norintsts |= SDHC_NIS_INSERT; 242 } 243 sdhci_update_irq(s); 244 } 245 } 246 247 static void sdhci_set_inserted(DeviceState *dev, bool level) 248 { 249 SDHCIState *s = (SDHCIState *)dev; 250 251 trace_sdhci_set_inserted(level ? "insert" : "eject"); 252 if ((s->norintsts & SDHC_NIS_REMOVE) && level) { 253 /* Give target some time to notice card ejection */ 254 timer_mod(s->insert_timer, 255 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + SDHC_INSERTION_DELAY); 256 } else { 257 if (level) { 258 s->prnsts = 0x1ff0000; 259 if (s->norintstsen & SDHC_NISEN_INSERT) { 260 s->norintsts |= SDHC_NIS_INSERT; 261 } 262 } else { 263 s->prnsts = 0x1fa0000; 264 s->pwrcon &= ~SDHC_POWER_ON; 265 s->clkcon &= ~SDHC_CLOCK_SDCLK_EN; 266 if (s->norintstsen & SDHC_NISEN_REMOVE) { 267 s->norintsts |= SDHC_NIS_REMOVE; 268 } 269 } 270 sdhci_update_irq(s); 271 } 272 } 273 274 static void sdhci_set_readonly(DeviceState *dev, bool level) 275 { 276 SDHCIState *s = (SDHCIState *)dev; 277 278 if (level) { 279 s->prnsts &= ~SDHC_WRITE_PROTECT; 280 } else { 281 /* Write enabled */ 282 s->prnsts |= SDHC_WRITE_PROTECT; 283 } 284 } 285 286 static void sdhci_reset(SDHCIState *s) 287 { 288 DeviceState *dev = DEVICE(s); 289 290 timer_del(s->insert_timer); 291 timer_del(s->transfer_timer); 292 293 /* Set all registers to 0. Capabilities/Version registers are not cleared 294 * and assumed to always preserve their value, given to them during 295 * initialization */ 296 memset(&s->sdmasysad, 0, (uintptr_t)&s->capareg - (uintptr_t)&s->sdmasysad); 297 298 /* Reset other state based on current card insertion/readonly status */ 299 sdhci_set_inserted(dev, sdbus_get_inserted(&s->sdbus)); 300 sdhci_set_readonly(dev, sdbus_get_readonly(&s->sdbus)); 301 302 s->data_count = 0; 303 s->stopped_state = sdhc_not_stopped; 304 s->pending_insert_state = false; 305 } 306 307 static void sdhci_poweron_reset(DeviceState *dev) 308 { 309 /* QOM (ie power-on) reset. This is identical to reset 310 * commanded via device register apart from handling of the 311 * 'pending insert on powerup' quirk. 312 */ 313 SDHCIState *s = (SDHCIState *)dev; 314 315 sdhci_reset(s); 316 317 if (s->pending_insert_quirk) { 318 s->pending_insert_state = true; 319 } 320 } 321 322 static void sdhci_data_transfer(void *opaque); 323 324 static void sdhci_send_command(SDHCIState *s) 325 { 326 SDRequest request; 327 uint8_t response[16]; 328 int rlen; 329 330 s->errintsts = 0; 331 s->acmd12errsts = 0; 332 request.cmd = s->cmdreg >> 8; 333 request.arg = s->argument; 334 335 trace_sdhci_send_command(request.cmd, request.arg); 336 rlen = sdbus_do_command(&s->sdbus, &request, response); 337 338 if (s->cmdreg & SDHC_CMD_RESPONSE) { 339 if (rlen == 4) { 340 s->rspreg[0] = ldl_be_p(response); 341 s->rspreg[1] = s->rspreg[2] = s->rspreg[3] = 0; 342 trace_sdhci_response4(s->rspreg[0]); 343 } else if (rlen == 16) { 344 s->rspreg[0] = ldl_be_p(&response[11]); 345 s->rspreg[1] = ldl_be_p(&response[7]); 346 s->rspreg[2] = ldl_be_p(&response[3]); 347 s->rspreg[3] = (response[0] << 16) | (response[1] << 8) | 348 response[2]; 349 trace_sdhci_response16(s->rspreg[3], s->rspreg[2], 350 s->rspreg[1], s->rspreg[0]); 351 } else { 352 trace_sdhci_error("timeout waiting for command response"); 353 if (s->errintstsen & SDHC_EISEN_CMDTIMEOUT) { 354 s->errintsts |= SDHC_EIS_CMDTIMEOUT; 355 s->norintsts |= SDHC_NIS_ERR; 356 } 357 } 358 359 if (!(s->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) && 360 (s->norintstsen & SDHC_NISEN_TRSCMP) && 361 (s->cmdreg & SDHC_CMD_RESPONSE) == SDHC_CMD_RSP_WITH_BUSY) { 362 s->norintsts |= SDHC_NIS_TRSCMP; 363 } 364 } 365 366 if (s->norintstsen & SDHC_NISEN_CMDCMP) { 367 s->norintsts |= SDHC_NIS_CMDCMP; 368 } 369 370 sdhci_update_irq(s); 371 372 if (s->blksize && (s->cmdreg & SDHC_CMD_DATA_PRESENT)) { 373 s->data_count = 0; 374 sdhci_data_transfer(s); 375 } 376 } 377 378 static void sdhci_end_transfer(SDHCIState *s) 379 { 380 /* Automatically send CMD12 to stop transfer if AutoCMD12 enabled */ 381 if ((s->trnmod & SDHC_TRNS_ACMD12) != 0) { 382 SDRequest request; 383 uint8_t response[16]; 384 385 request.cmd = 0x0C; 386 request.arg = 0; 387 trace_sdhci_end_transfer(request.cmd, request.arg); 388 sdbus_do_command(&s->sdbus, &request, response); 389 /* Auto CMD12 response goes to the upper Response register */ 390 s->rspreg[3] = ldl_be_p(response); 391 } 392 393 s->prnsts &= ~(SDHC_DOING_READ | SDHC_DOING_WRITE | 394 SDHC_DAT_LINE_ACTIVE | SDHC_DATA_INHIBIT | 395 SDHC_SPACE_AVAILABLE | SDHC_DATA_AVAILABLE); 396 397 if (s->norintstsen & SDHC_NISEN_TRSCMP) { 398 s->norintsts |= SDHC_NIS_TRSCMP; 399 } 400 401 sdhci_update_irq(s); 402 } 403 404 /* 405 * Programmed i/o data transfer 406 */ 407 #define BLOCK_SIZE_MASK (4 * KiB - 1) 408 409 /* Fill host controller's read buffer with BLKSIZE bytes of data from card */ 410 static void sdhci_read_block_from_card(SDHCIState *s) 411 { 412 const uint16_t blk_size = s->blksize & BLOCK_SIZE_MASK; 413 414 if ((s->trnmod & SDHC_TRNS_MULTI) && 415 (s->trnmod & SDHC_TRNS_BLK_CNT_EN) && (s->blkcnt == 0)) { 416 return; 417 } 418 419 if (!FIELD_EX32(s->hostctl2, SDHC_HOSTCTL2, EXECUTE_TUNING)) { 420 /* Device is not in tuning */ 421 sdbus_read_data(&s->sdbus, s->fifo_buffer, blk_size); 422 } 423 424 if (FIELD_EX32(s->hostctl2, SDHC_HOSTCTL2, EXECUTE_TUNING)) { 425 /* Device is in tuning */ 426 s->hostctl2 &= ~R_SDHC_HOSTCTL2_EXECUTE_TUNING_MASK; 427 s->hostctl2 |= R_SDHC_HOSTCTL2_SAMPLING_CLKSEL_MASK; 428 s->prnsts &= ~(SDHC_DAT_LINE_ACTIVE | SDHC_DOING_READ | 429 SDHC_DATA_INHIBIT); 430 goto read_done; 431 } 432 433 /* New data now available for READ through Buffer Port Register */ 434 s->prnsts |= SDHC_DATA_AVAILABLE; 435 if (s->norintstsen & SDHC_NISEN_RBUFRDY) { 436 s->norintsts |= SDHC_NIS_RBUFRDY; 437 } 438 439 /* Clear DAT line active status if that was the last block */ 440 if ((s->trnmod & SDHC_TRNS_MULTI) == 0 || 441 ((s->trnmod & SDHC_TRNS_MULTI) && s->blkcnt == 1)) { 442 s->prnsts &= ~SDHC_DAT_LINE_ACTIVE; 443 } 444 445 /* If stop at block gap request was set and it's not the last block of 446 * data - generate Block Event interrupt */ 447 if (s->stopped_state == sdhc_gap_read && (s->trnmod & SDHC_TRNS_MULTI) && 448 s->blkcnt != 1) { 449 s->prnsts &= ~SDHC_DAT_LINE_ACTIVE; 450 if (s->norintstsen & SDHC_EISEN_BLKGAP) { 451 s->norintsts |= SDHC_EIS_BLKGAP; 452 } 453 } 454 455 read_done: 456 sdhci_update_irq(s); 457 } 458 459 /* Read @size byte of data from host controller @s BUFFER DATA PORT register */ 460 static uint32_t sdhci_read_dataport(SDHCIState *s, unsigned size) 461 { 462 uint32_t value = 0; 463 int i; 464 465 /* first check that a valid data exists in host controller input buffer */ 466 if ((s->prnsts & SDHC_DATA_AVAILABLE) == 0) { 467 trace_sdhci_error("read from empty buffer"); 468 return 0; 469 } 470 471 for (i = 0; i < size; i++) { 472 value |= s->fifo_buffer[s->data_count] << i * 8; 473 s->data_count++; 474 /* check if we've read all valid data (blksize bytes) from buffer */ 475 if ((s->data_count) >= (s->blksize & BLOCK_SIZE_MASK)) { 476 trace_sdhci_read_dataport(s->data_count); 477 s->prnsts &= ~SDHC_DATA_AVAILABLE; /* no more data in a buffer */ 478 s->data_count = 0; /* next buff read must start at position [0] */ 479 480 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) { 481 s->blkcnt--; 482 } 483 484 /* if that was the last block of data */ 485 if ((s->trnmod & SDHC_TRNS_MULTI) == 0 || 486 ((s->trnmod & SDHC_TRNS_BLK_CNT_EN) && (s->blkcnt == 0)) || 487 /* stop at gap request */ 488 (s->stopped_state == sdhc_gap_read && 489 !(s->prnsts & SDHC_DAT_LINE_ACTIVE))) { 490 sdhci_end_transfer(s); 491 } else { /* if there are more data, read next block from card */ 492 sdhci_read_block_from_card(s); 493 } 494 break; 495 } 496 } 497 498 return value; 499 } 500 501 /* Write data from host controller FIFO to card */ 502 static void sdhci_write_block_to_card(SDHCIState *s) 503 { 504 if (s->prnsts & SDHC_SPACE_AVAILABLE) { 505 if (s->norintstsen & SDHC_NISEN_WBUFRDY) { 506 s->norintsts |= SDHC_NIS_WBUFRDY; 507 } 508 sdhci_update_irq(s); 509 return; 510 } 511 512 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) { 513 if (s->blkcnt == 0) { 514 return; 515 } else { 516 s->blkcnt--; 517 } 518 } 519 520 sdbus_write_data(&s->sdbus, s->fifo_buffer, s->blksize & BLOCK_SIZE_MASK); 521 522 /* Next data can be written through BUFFER DATORT register */ 523 s->prnsts |= SDHC_SPACE_AVAILABLE; 524 525 /* Finish transfer if that was the last block of data */ 526 if ((s->trnmod & SDHC_TRNS_MULTI) == 0 || 527 ((s->trnmod & SDHC_TRNS_MULTI) && 528 (s->trnmod & SDHC_TRNS_BLK_CNT_EN) && (s->blkcnt == 0))) { 529 sdhci_end_transfer(s); 530 } else if (s->norintstsen & SDHC_NISEN_WBUFRDY) { 531 s->norintsts |= SDHC_NIS_WBUFRDY; 532 } 533 534 /* Generate Block Gap Event if requested and if not the last block */ 535 if (s->stopped_state == sdhc_gap_write && (s->trnmod & SDHC_TRNS_MULTI) && 536 s->blkcnt > 0) { 537 s->prnsts &= ~SDHC_DOING_WRITE; 538 if (s->norintstsen & SDHC_EISEN_BLKGAP) { 539 s->norintsts |= SDHC_EIS_BLKGAP; 540 } 541 sdhci_end_transfer(s); 542 } 543 544 sdhci_update_irq(s); 545 } 546 547 /* Write @size bytes of @value data to host controller @s Buffer Data Port 548 * register */ 549 static void sdhci_write_dataport(SDHCIState *s, uint32_t value, unsigned size) 550 { 551 unsigned i; 552 553 /* Check that there is free space left in a buffer */ 554 if (!(s->prnsts & SDHC_SPACE_AVAILABLE)) { 555 trace_sdhci_error("Can't write to data buffer: buffer full"); 556 return; 557 } 558 559 for (i = 0; i < size; i++) { 560 s->fifo_buffer[s->data_count] = value & 0xFF; 561 s->data_count++; 562 value >>= 8; 563 if (s->data_count >= (s->blksize & BLOCK_SIZE_MASK)) { 564 trace_sdhci_write_dataport(s->data_count); 565 s->data_count = 0; 566 s->prnsts &= ~SDHC_SPACE_AVAILABLE; 567 if (s->prnsts & SDHC_DOING_WRITE) { 568 sdhci_write_block_to_card(s); 569 } 570 } 571 } 572 } 573 574 /* 575 * Single DMA data transfer 576 */ 577 578 /* Multi block SDMA transfer */ 579 static void sdhci_sdma_transfer_multi_blocks(SDHCIState *s) 580 { 581 bool page_aligned = false; 582 unsigned int begin; 583 const uint16_t block_size = s->blksize & BLOCK_SIZE_MASK; 584 uint32_t boundary_chk = 1 << (((s->blksize & ~BLOCK_SIZE_MASK) >> 12) + 12); 585 uint32_t boundary_count = boundary_chk - (s->sdmasysad % boundary_chk); 586 587 if (!(s->trnmod & SDHC_TRNS_BLK_CNT_EN) || !s->blkcnt) { 588 qemu_log_mask(LOG_UNIMP, "infinite transfer is not supported\n"); 589 return; 590 } 591 592 /* XXX: Some sd/mmc drivers (for example, u-boot-slp) do not account for 593 * possible stop at page boundary if initial address is not page aligned, 594 * allow them to work properly */ 595 if ((s->sdmasysad % boundary_chk) == 0) { 596 page_aligned = true; 597 } 598 599 if (s->trnmod & SDHC_TRNS_READ) { 600 s->prnsts |= SDHC_DOING_READ | SDHC_DATA_INHIBIT | 601 SDHC_DAT_LINE_ACTIVE; 602 while (s->blkcnt) { 603 if (s->data_count == 0) { 604 sdbus_read_data(&s->sdbus, s->fifo_buffer, block_size); 605 } 606 begin = s->data_count; 607 if (((boundary_count + begin) < block_size) && page_aligned) { 608 s->data_count = boundary_count + begin; 609 boundary_count = 0; 610 } else { 611 s->data_count = block_size; 612 boundary_count -= block_size - begin; 613 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) { 614 s->blkcnt--; 615 } 616 } 617 dma_memory_write(s->dma_as, s->sdmasysad, 618 &s->fifo_buffer[begin], s->data_count - begin); 619 s->sdmasysad += s->data_count - begin; 620 if (s->data_count == block_size) { 621 s->data_count = 0; 622 } 623 if (page_aligned && boundary_count == 0) { 624 break; 625 } 626 } 627 } else { 628 s->prnsts |= SDHC_DOING_WRITE | SDHC_DATA_INHIBIT | 629 SDHC_DAT_LINE_ACTIVE; 630 while (s->blkcnt) { 631 begin = s->data_count; 632 if (((boundary_count + begin) < block_size) && page_aligned) { 633 s->data_count = boundary_count + begin; 634 boundary_count = 0; 635 } else { 636 s->data_count = block_size; 637 boundary_count -= block_size - begin; 638 } 639 dma_memory_read(s->dma_as, s->sdmasysad, 640 &s->fifo_buffer[begin], s->data_count - begin); 641 s->sdmasysad += s->data_count - begin; 642 if (s->data_count == block_size) { 643 sdbus_write_data(&s->sdbus, s->fifo_buffer, block_size); 644 s->data_count = 0; 645 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) { 646 s->blkcnt--; 647 } 648 } 649 if (page_aligned && boundary_count == 0) { 650 break; 651 } 652 } 653 } 654 655 if (s->blkcnt == 0) { 656 sdhci_end_transfer(s); 657 } else { 658 if (s->norintstsen & SDHC_NISEN_DMA) { 659 s->norintsts |= SDHC_NIS_DMA; 660 } 661 sdhci_update_irq(s); 662 } 663 } 664 665 /* single block SDMA transfer */ 666 static void sdhci_sdma_transfer_single_block(SDHCIState *s) 667 { 668 uint32_t datacnt = s->blksize & BLOCK_SIZE_MASK; 669 670 if (s->trnmod & SDHC_TRNS_READ) { 671 sdbus_read_data(&s->sdbus, s->fifo_buffer, datacnt); 672 dma_memory_write(s->dma_as, s->sdmasysad, s->fifo_buffer, datacnt); 673 } else { 674 dma_memory_read(s->dma_as, s->sdmasysad, s->fifo_buffer, datacnt); 675 sdbus_write_data(&s->sdbus, s->fifo_buffer, datacnt); 676 } 677 s->blkcnt--; 678 679 sdhci_end_transfer(s); 680 } 681 682 typedef struct ADMADescr { 683 hwaddr addr; 684 uint16_t length; 685 uint8_t attr; 686 uint8_t incr; 687 } ADMADescr; 688 689 static void get_adma_description(SDHCIState *s, ADMADescr *dscr) 690 { 691 uint32_t adma1 = 0; 692 uint64_t adma2 = 0; 693 hwaddr entry_addr = (hwaddr)s->admasysaddr; 694 switch (SDHC_DMA_TYPE(s->hostctl1)) { 695 case SDHC_CTRL_ADMA2_32: 696 dma_memory_read(s->dma_as, entry_addr, &adma2, sizeof(adma2)); 697 adma2 = le64_to_cpu(adma2); 698 /* The spec does not specify endianness of descriptor table. 699 * We currently assume that it is LE. 700 */ 701 dscr->addr = (hwaddr)extract64(adma2, 32, 32) & ~0x3ull; 702 dscr->length = (uint16_t)extract64(adma2, 16, 16); 703 dscr->attr = (uint8_t)extract64(adma2, 0, 7); 704 dscr->incr = 8; 705 break; 706 case SDHC_CTRL_ADMA1_32: 707 dma_memory_read(s->dma_as, entry_addr, &adma1, sizeof(adma1)); 708 adma1 = le32_to_cpu(adma1); 709 dscr->addr = (hwaddr)(adma1 & 0xFFFFF000); 710 dscr->attr = (uint8_t)extract32(adma1, 0, 7); 711 dscr->incr = 4; 712 if ((dscr->attr & SDHC_ADMA_ATTR_ACT_MASK) == SDHC_ADMA_ATTR_SET_LEN) { 713 dscr->length = (uint16_t)extract32(adma1, 12, 16); 714 } else { 715 dscr->length = 4 * KiB; 716 } 717 break; 718 case SDHC_CTRL_ADMA2_64: 719 dma_memory_read(s->dma_as, entry_addr, &dscr->attr, 1); 720 dma_memory_read(s->dma_as, entry_addr + 2, &dscr->length, 2); 721 dscr->length = le16_to_cpu(dscr->length); 722 dma_memory_read(s->dma_as, entry_addr + 4, &dscr->addr, 8); 723 dscr->addr = le64_to_cpu(dscr->addr); 724 dscr->attr &= (uint8_t) ~0xC0; 725 dscr->incr = 12; 726 break; 727 } 728 } 729 730 /* Advanced DMA data transfer */ 731 732 static void sdhci_do_adma(SDHCIState *s) 733 { 734 unsigned int begin, length; 735 const uint16_t block_size = s->blksize & BLOCK_SIZE_MASK; 736 ADMADescr dscr = {}; 737 int i; 738 739 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN && !s->blkcnt) { 740 /* Stop Multiple Transfer */ 741 sdhci_end_transfer(s); 742 return; 743 } 744 745 for (i = 0; i < SDHC_ADMA_DESCS_PER_DELAY; ++i) { 746 s->admaerr &= ~SDHC_ADMAERR_LENGTH_MISMATCH; 747 748 get_adma_description(s, &dscr); 749 trace_sdhci_adma_loop(dscr.addr, dscr.length, dscr.attr); 750 751 if ((dscr.attr & SDHC_ADMA_ATTR_VALID) == 0) { 752 /* Indicate that error occurred in ST_FDS state */ 753 s->admaerr &= ~SDHC_ADMAERR_STATE_MASK; 754 s->admaerr |= SDHC_ADMAERR_STATE_ST_FDS; 755 756 /* Generate ADMA error interrupt */ 757 if (s->errintstsen & SDHC_EISEN_ADMAERR) { 758 s->errintsts |= SDHC_EIS_ADMAERR; 759 s->norintsts |= SDHC_NIS_ERR; 760 } 761 762 sdhci_update_irq(s); 763 return; 764 } 765 766 length = dscr.length ? dscr.length : 64 * KiB; 767 768 switch (dscr.attr & SDHC_ADMA_ATTR_ACT_MASK) { 769 case SDHC_ADMA_ATTR_ACT_TRAN: /* data transfer */ 770 if (s->trnmod & SDHC_TRNS_READ) { 771 while (length) { 772 if (s->data_count == 0) { 773 sdbus_read_data(&s->sdbus, s->fifo_buffer, block_size); 774 } 775 begin = s->data_count; 776 if ((length + begin) < block_size) { 777 s->data_count = length + begin; 778 length = 0; 779 } else { 780 s->data_count = block_size; 781 length -= block_size - begin; 782 } 783 dma_memory_write(s->dma_as, dscr.addr, 784 &s->fifo_buffer[begin], 785 s->data_count - begin); 786 dscr.addr += s->data_count - begin; 787 if (s->data_count == block_size) { 788 s->data_count = 0; 789 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) { 790 s->blkcnt--; 791 if (s->blkcnt == 0) { 792 break; 793 } 794 } 795 } 796 } 797 } else { 798 while (length) { 799 begin = s->data_count; 800 if ((length + begin) < block_size) { 801 s->data_count = length + begin; 802 length = 0; 803 } else { 804 s->data_count = block_size; 805 length -= block_size - begin; 806 } 807 dma_memory_read(s->dma_as, dscr.addr, 808 &s->fifo_buffer[begin], 809 s->data_count - begin); 810 dscr.addr += s->data_count - begin; 811 if (s->data_count == block_size) { 812 sdbus_write_data(&s->sdbus, s->fifo_buffer, block_size); 813 s->data_count = 0; 814 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) { 815 s->blkcnt--; 816 if (s->blkcnt == 0) { 817 break; 818 } 819 } 820 } 821 } 822 } 823 s->admasysaddr += dscr.incr; 824 break; 825 case SDHC_ADMA_ATTR_ACT_LINK: /* link to next descriptor table */ 826 s->admasysaddr = dscr.addr; 827 trace_sdhci_adma("link", s->admasysaddr); 828 break; 829 default: 830 s->admasysaddr += dscr.incr; 831 break; 832 } 833 834 if (dscr.attr & SDHC_ADMA_ATTR_INT) { 835 trace_sdhci_adma("interrupt", s->admasysaddr); 836 if (s->norintstsen & SDHC_NISEN_DMA) { 837 s->norintsts |= SDHC_NIS_DMA; 838 } 839 840 if (sdhci_update_irq(s) && !(dscr.attr & SDHC_ADMA_ATTR_END)) { 841 /* IRQ delivered, reschedule current transfer */ 842 break; 843 } 844 } 845 846 /* ADMA transfer terminates if blkcnt == 0 or by END attribute */ 847 if (((s->trnmod & SDHC_TRNS_BLK_CNT_EN) && 848 (s->blkcnt == 0)) || (dscr.attr & SDHC_ADMA_ATTR_END)) { 849 trace_sdhci_adma_transfer_completed(); 850 if (length || ((dscr.attr & SDHC_ADMA_ATTR_END) && 851 (s->trnmod & SDHC_TRNS_BLK_CNT_EN) && 852 s->blkcnt != 0)) { 853 trace_sdhci_error("SD/MMC host ADMA length mismatch"); 854 s->admaerr |= SDHC_ADMAERR_LENGTH_MISMATCH | 855 SDHC_ADMAERR_STATE_ST_TFR; 856 if (s->errintstsen & SDHC_EISEN_ADMAERR) { 857 trace_sdhci_error("Set ADMA error flag"); 858 s->errintsts |= SDHC_EIS_ADMAERR; 859 s->norintsts |= SDHC_NIS_ERR; 860 } 861 862 sdhci_update_irq(s); 863 } 864 sdhci_end_transfer(s); 865 return; 866 } 867 868 } 869 870 /* we have unfinished business - reschedule to continue ADMA */ 871 timer_mod(s->transfer_timer, 872 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + SDHC_TRANSFER_DELAY); 873 } 874 875 /* Perform data transfer according to controller configuration */ 876 877 static void sdhci_data_transfer(void *opaque) 878 { 879 SDHCIState *s = (SDHCIState *)opaque; 880 881 if (s->trnmod & SDHC_TRNS_DMA) { 882 switch (SDHC_DMA_TYPE(s->hostctl1)) { 883 case SDHC_CTRL_SDMA: 884 if ((s->blkcnt == 1) || !(s->trnmod & SDHC_TRNS_MULTI)) { 885 sdhci_sdma_transfer_single_block(s); 886 } else { 887 sdhci_sdma_transfer_multi_blocks(s); 888 } 889 890 break; 891 case SDHC_CTRL_ADMA1_32: 892 if (!(s->capareg & R_SDHC_CAPAB_ADMA1_MASK)) { 893 trace_sdhci_error("ADMA1 not supported"); 894 break; 895 } 896 897 sdhci_do_adma(s); 898 break; 899 case SDHC_CTRL_ADMA2_32: 900 if (!(s->capareg & R_SDHC_CAPAB_ADMA2_MASK)) { 901 trace_sdhci_error("ADMA2 not supported"); 902 break; 903 } 904 905 sdhci_do_adma(s); 906 break; 907 case SDHC_CTRL_ADMA2_64: 908 if (!(s->capareg & R_SDHC_CAPAB_ADMA2_MASK) || 909 !(s->capareg & R_SDHC_CAPAB_BUS64BIT_MASK)) { 910 trace_sdhci_error("64 bit ADMA not supported"); 911 break; 912 } 913 914 sdhci_do_adma(s); 915 break; 916 default: 917 trace_sdhci_error("Unsupported DMA type"); 918 break; 919 } 920 } else { 921 if ((s->trnmod & SDHC_TRNS_READ) && sdbus_data_ready(&s->sdbus)) { 922 s->prnsts |= SDHC_DOING_READ | SDHC_DATA_INHIBIT | 923 SDHC_DAT_LINE_ACTIVE; 924 sdhci_read_block_from_card(s); 925 } else { 926 s->prnsts |= SDHC_DOING_WRITE | SDHC_DAT_LINE_ACTIVE | 927 SDHC_SPACE_AVAILABLE | SDHC_DATA_INHIBIT; 928 sdhci_write_block_to_card(s); 929 } 930 } 931 } 932 933 static bool sdhci_can_issue_command(SDHCIState *s) 934 { 935 if (!SDHC_CLOCK_IS_ON(s->clkcon) || 936 (((s->prnsts & SDHC_DATA_INHIBIT) || s->stopped_state) && 937 ((s->cmdreg & SDHC_CMD_DATA_PRESENT) || 938 ((s->cmdreg & SDHC_CMD_RESPONSE) == SDHC_CMD_RSP_WITH_BUSY && 939 !(SDHC_COMMAND_TYPE(s->cmdreg) == SDHC_CMD_ABORT))))) { 940 return false; 941 } 942 943 return true; 944 } 945 946 /* The Buffer Data Port register must be accessed in sequential and 947 * continuous manner */ 948 static inline bool 949 sdhci_buff_access_is_sequential(SDHCIState *s, unsigned byte_num) 950 { 951 if ((s->data_count & 0x3) != byte_num) { 952 trace_sdhci_error("Non-sequential access to Buffer Data Port register" 953 "is prohibited\n"); 954 return false; 955 } 956 return true; 957 } 958 959 static void sdhci_resume_pending_transfer(SDHCIState *s) 960 { 961 timer_del(s->transfer_timer); 962 sdhci_data_transfer(s); 963 } 964 965 static uint64_t sdhci_read(void *opaque, hwaddr offset, unsigned size) 966 { 967 SDHCIState *s = (SDHCIState *)opaque; 968 uint32_t ret = 0; 969 970 if (timer_pending(s->transfer_timer)) { 971 sdhci_resume_pending_transfer(s); 972 } 973 974 switch (offset & ~0x3) { 975 case SDHC_SYSAD: 976 ret = s->sdmasysad; 977 break; 978 case SDHC_BLKSIZE: 979 ret = s->blksize | (s->blkcnt << 16); 980 break; 981 case SDHC_ARGUMENT: 982 ret = s->argument; 983 break; 984 case SDHC_TRNMOD: 985 ret = s->trnmod | (s->cmdreg << 16); 986 break; 987 case SDHC_RSPREG0 ... SDHC_RSPREG3: 988 ret = s->rspreg[((offset & ~0x3) - SDHC_RSPREG0) >> 2]; 989 break; 990 case SDHC_BDATA: 991 if (sdhci_buff_access_is_sequential(s, offset - SDHC_BDATA)) { 992 ret = sdhci_read_dataport(s, size); 993 trace_sdhci_access("rd", size << 3, offset, "->", ret, ret); 994 return ret; 995 } 996 break; 997 case SDHC_PRNSTS: 998 ret = s->prnsts; 999 ret = FIELD_DP32(ret, SDHC_PRNSTS, DAT_LVL, 1000 sdbus_get_dat_lines(&s->sdbus)); 1001 ret = FIELD_DP32(ret, SDHC_PRNSTS, CMD_LVL, 1002 sdbus_get_cmd_line(&s->sdbus)); 1003 break; 1004 case SDHC_HOSTCTL: 1005 ret = s->hostctl1 | (s->pwrcon << 8) | (s->blkgap << 16) | 1006 (s->wakcon << 24); 1007 break; 1008 case SDHC_CLKCON: 1009 ret = s->clkcon | (s->timeoutcon << 16); 1010 break; 1011 case SDHC_NORINTSTS: 1012 ret = s->norintsts | (s->errintsts << 16); 1013 break; 1014 case SDHC_NORINTSTSEN: 1015 ret = s->norintstsen | (s->errintstsen << 16); 1016 break; 1017 case SDHC_NORINTSIGEN: 1018 ret = s->norintsigen | (s->errintsigen << 16); 1019 break; 1020 case SDHC_ACMD12ERRSTS: 1021 ret = s->acmd12errsts | (s->hostctl2 << 16); 1022 break; 1023 case SDHC_CAPAB: 1024 ret = (uint32_t)s->capareg; 1025 break; 1026 case SDHC_CAPAB + 4: 1027 ret = (uint32_t)(s->capareg >> 32); 1028 break; 1029 case SDHC_MAXCURR: 1030 ret = (uint32_t)s->maxcurr; 1031 break; 1032 case SDHC_MAXCURR + 4: 1033 ret = (uint32_t)(s->maxcurr >> 32); 1034 break; 1035 case SDHC_ADMAERR: 1036 ret = s->admaerr; 1037 break; 1038 case SDHC_ADMASYSADDR: 1039 ret = (uint32_t)s->admasysaddr; 1040 break; 1041 case SDHC_ADMASYSADDR + 4: 1042 ret = (uint32_t)(s->admasysaddr >> 32); 1043 break; 1044 case SDHC_SLOT_INT_STATUS: 1045 ret = (s->version << 16) | sdhci_slotint(s); 1046 break; 1047 default: 1048 qemu_log_mask(LOG_UNIMP, "SDHC rd_%ub @0x%02" HWADDR_PRIx " " 1049 "not implemented\n", size, offset); 1050 break; 1051 } 1052 1053 ret >>= (offset & 0x3) * 8; 1054 ret &= (1ULL << (size * 8)) - 1; 1055 trace_sdhci_access("rd", size << 3, offset, "->", ret, ret); 1056 return ret; 1057 } 1058 1059 static inline void sdhci_blkgap_write(SDHCIState *s, uint8_t value) 1060 { 1061 if ((value & SDHC_STOP_AT_GAP_REQ) && (s->blkgap & SDHC_STOP_AT_GAP_REQ)) { 1062 return; 1063 } 1064 s->blkgap = value & SDHC_STOP_AT_GAP_REQ; 1065 1066 if ((value & SDHC_CONTINUE_REQ) && s->stopped_state && 1067 (s->blkgap & SDHC_STOP_AT_GAP_REQ) == 0) { 1068 if (s->stopped_state == sdhc_gap_read) { 1069 s->prnsts |= SDHC_DAT_LINE_ACTIVE | SDHC_DOING_READ; 1070 sdhci_read_block_from_card(s); 1071 } else { 1072 s->prnsts |= SDHC_DAT_LINE_ACTIVE | SDHC_DOING_WRITE; 1073 sdhci_write_block_to_card(s); 1074 } 1075 s->stopped_state = sdhc_not_stopped; 1076 } else if (!s->stopped_state && (value & SDHC_STOP_AT_GAP_REQ)) { 1077 if (s->prnsts & SDHC_DOING_READ) { 1078 s->stopped_state = sdhc_gap_read; 1079 } else if (s->prnsts & SDHC_DOING_WRITE) { 1080 s->stopped_state = sdhc_gap_write; 1081 } 1082 } 1083 } 1084 1085 static inline void sdhci_reset_write(SDHCIState *s, uint8_t value) 1086 { 1087 switch (value) { 1088 case SDHC_RESET_ALL: 1089 sdhci_reset(s); 1090 break; 1091 case SDHC_RESET_CMD: 1092 s->prnsts &= ~SDHC_CMD_INHIBIT; 1093 s->norintsts &= ~SDHC_NIS_CMDCMP; 1094 break; 1095 case SDHC_RESET_DATA: 1096 s->data_count = 0; 1097 s->prnsts &= ~(SDHC_SPACE_AVAILABLE | SDHC_DATA_AVAILABLE | 1098 SDHC_DOING_READ | SDHC_DOING_WRITE | 1099 SDHC_DATA_INHIBIT | SDHC_DAT_LINE_ACTIVE); 1100 s->blkgap &= ~(SDHC_STOP_AT_GAP_REQ | SDHC_CONTINUE_REQ); 1101 s->stopped_state = sdhc_not_stopped; 1102 s->norintsts &= ~(SDHC_NIS_WBUFRDY | SDHC_NIS_RBUFRDY | 1103 SDHC_NIS_DMA | SDHC_NIS_TRSCMP | SDHC_NIS_BLKGAP); 1104 break; 1105 } 1106 } 1107 1108 static void 1109 sdhci_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) 1110 { 1111 SDHCIState *s = (SDHCIState *)opaque; 1112 unsigned shift = 8 * (offset & 0x3); 1113 uint32_t mask = ~(((1ULL << (size * 8)) - 1) << shift); 1114 uint32_t value = val; 1115 value <<= shift; 1116 1117 if (timer_pending(s->transfer_timer)) { 1118 sdhci_resume_pending_transfer(s); 1119 } 1120 1121 switch (offset & ~0x3) { 1122 case SDHC_SYSAD: 1123 s->sdmasysad = (s->sdmasysad & mask) | value; 1124 MASKED_WRITE(s->sdmasysad, mask, value); 1125 /* Writing to last byte of sdmasysad might trigger transfer */ 1126 if (!(mask & 0xFF000000) && TRANSFERRING_DATA(s->prnsts) && s->blkcnt && 1127 s->blksize && SDHC_DMA_TYPE(s->hostctl1) == SDHC_CTRL_SDMA) { 1128 if (s->trnmod & SDHC_TRNS_MULTI) { 1129 sdhci_sdma_transfer_multi_blocks(s); 1130 } else { 1131 sdhci_sdma_transfer_single_block(s); 1132 } 1133 } 1134 break; 1135 case SDHC_BLKSIZE: 1136 if (!TRANSFERRING_DATA(s->prnsts)) { 1137 MASKED_WRITE(s->blksize, mask, extract32(value, 0, 12)); 1138 MASKED_WRITE(s->blkcnt, mask >> 16, value >> 16); 1139 } 1140 1141 /* Limit block size to the maximum buffer size */ 1142 if (extract32(s->blksize, 0, 12) > s->buf_maxsz) { 1143 qemu_log_mask(LOG_GUEST_ERROR, "%s: Size 0x%x is larger than " 1144 "the maximum buffer 0x%x\n", __func__, s->blksize, 1145 s->buf_maxsz); 1146 1147 s->blksize = deposit32(s->blksize, 0, 12, s->buf_maxsz); 1148 } 1149 1150 break; 1151 case SDHC_ARGUMENT: 1152 MASKED_WRITE(s->argument, mask, value); 1153 break; 1154 case SDHC_TRNMOD: 1155 /* DMA can be enabled only if it is supported as indicated by 1156 * capabilities register */ 1157 if (!(s->capareg & R_SDHC_CAPAB_SDMA_MASK)) { 1158 value &= ~SDHC_TRNS_DMA; 1159 } 1160 MASKED_WRITE(s->trnmod, mask, value & SDHC_TRNMOD_MASK); 1161 MASKED_WRITE(s->cmdreg, mask >> 16, value >> 16); 1162 1163 /* Writing to the upper byte of CMDREG triggers SD command generation */ 1164 if ((mask & 0xFF000000) || !sdhci_can_issue_command(s)) { 1165 break; 1166 } 1167 1168 sdhci_send_command(s); 1169 break; 1170 case SDHC_BDATA: 1171 if (sdhci_buff_access_is_sequential(s, offset - SDHC_BDATA)) { 1172 sdhci_write_dataport(s, value >> shift, size); 1173 } 1174 break; 1175 case SDHC_HOSTCTL: 1176 if (!(mask & 0xFF0000)) { 1177 sdhci_blkgap_write(s, value >> 16); 1178 } 1179 MASKED_WRITE(s->hostctl1, mask, value); 1180 MASKED_WRITE(s->pwrcon, mask >> 8, value >> 8); 1181 MASKED_WRITE(s->wakcon, mask >> 24, value >> 24); 1182 if (!(s->prnsts & SDHC_CARD_PRESENT) || ((s->pwrcon >> 1) & 0x7) < 5 || 1183 !(s->capareg & (1 << (31 - ((s->pwrcon >> 1) & 0x7))))) { 1184 s->pwrcon &= ~SDHC_POWER_ON; 1185 } 1186 break; 1187 case SDHC_CLKCON: 1188 if (!(mask & 0xFF000000)) { 1189 sdhci_reset_write(s, value >> 24); 1190 } 1191 MASKED_WRITE(s->clkcon, mask, value); 1192 MASKED_WRITE(s->timeoutcon, mask >> 16, value >> 16); 1193 if (s->clkcon & SDHC_CLOCK_INT_EN) { 1194 s->clkcon |= SDHC_CLOCK_INT_STABLE; 1195 } else { 1196 s->clkcon &= ~SDHC_CLOCK_INT_STABLE; 1197 } 1198 break; 1199 case SDHC_NORINTSTS: 1200 if (s->norintstsen & SDHC_NISEN_CARDINT) { 1201 value &= ~SDHC_NIS_CARDINT; 1202 } 1203 s->norintsts &= mask | ~value; 1204 s->errintsts &= (mask >> 16) | ~(value >> 16); 1205 if (s->errintsts) { 1206 s->norintsts |= SDHC_NIS_ERR; 1207 } else { 1208 s->norintsts &= ~SDHC_NIS_ERR; 1209 } 1210 sdhci_update_irq(s); 1211 break; 1212 case SDHC_NORINTSTSEN: 1213 MASKED_WRITE(s->norintstsen, mask, value); 1214 MASKED_WRITE(s->errintstsen, mask >> 16, value >> 16); 1215 s->norintsts &= s->norintstsen; 1216 s->errintsts &= s->errintstsen; 1217 if (s->errintsts) { 1218 s->norintsts |= SDHC_NIS_ERR; 1219 } else { 1220 s->norintsts &= ~SDHC_NIS_ERR; 1221 } 1222 /* Quirk for Raspberry Pi: pending card insert interrupt 1223 * appears when first enabled after power on */ 1224 if ((s->norintstsen & SDHC_NISEN_INSERT) && s->pending_insert_state) { 1225 assert(s->pending_insert_quirk); 1226 s->norintsts |= SDHC_NIS_INSERT; 1227 s->pending_insert_state = false; 1228 } 1229 sdhci_update_irq(s); 1230 break; 1231 case SDHC_NORINTSIGEN: 1232 MASKED_WRITE(s->norintsigen, mask, value); 1233 MASKED_WRITE(s->errintsigen, mask >> 16, value >> 16); 1234 sdhci_update_irq(s); 1235 break; 1236 case SDHC_ADMAERR: 1237 MASKED_WRITE(s->admaerr, mask, value); 1238 break; 1239 case SDHC_ADMASYSADDR: 1240 s->admasysaddr = (s->admasysaddr & (0xFFFFFFFF00000000ULL | 1241 (uint64_t)mask)) | (uint64_t)value; 1242 break; 1243 case SDHC_ADMASYSADDR + 4: 1244 s->admasysaddr = (s->admasysaddr & (0x00000000FFFFFFFFULL | 1245 ((uint64_t)mask << 32))) | ((uint64_t)value << 32); 1246 break; 1247 case SDHC_FEAER: 1248 s->acmd12errsts |= value; 1249 s->errintsts |= (value >> 16) & s->errintstsen; 1250 if (s->acmd12errsts) { 1251 s->errintsts |= SDHC_EIS_CMD12ERR; 1252 } 1253 if (s->errintsts) { 1254 s->norintsts |= SDHC_NIS_ERR; 1255 } 1256 sdhci_update_irq(s); 1257 break; 1258 case SDHC_ACMD12ERRSTS: 1259 MASKED_WRITE(s->acmd12errsts, mask, value & UINT16_MAX); 1260 if (s->uhs_mode >= UHS_I) { 1261 MASKED_WRITE(s->hostctl2, mask >> 16, value >> 16); 1262 1263 if (FIELD_EX32(s->hostctl2, SDHC_HOSTCTL2, V18_ENA)) { 1264 sdbus_set_voltage(&s->sdbus, SD_VOLTAGE_1_8V); 1265 } else { 1266 sdbus_set_voltage(&s->sdbus, SD_VOLTAGE_3_3V); 1267 } 1268 } 1269 break; 1270 1271 case SDHC_CAPAB: 1272 case SDHC_CAPAB + 4: 1273 case SDHC_MAXCURR: 1274 case SDHC_MAXCURR + 4: 1275 qemu_log_mask(LOG_GUEST_ERROR, "SDHC wr_%ub @0x%02" HWADDR_PRIx 1276 " <- 0x%08x read-only\n", size, offset, value >> shift); 1277 break; 1278 1279 default: 1280 qemu_log_mask(LOG_UNIMP, "SDHC wr_%ub @0x%02" HWADDR_PRIx " <- 0x%08x " 1281 "not implemented\n", size, offset, value >> shift); 1282 break; 1283 } 1284 trace_sdhci_access("wr", size << 3, offset, "<-", 1285 value >> shift, value >> shift); 1286 } 1287 1288 static const MemoryRegionOps sdhci_mmio_ops = { 1289 .read = sdhci_read, 1290 .write = sdhci_write, 1291 .valid = { 1292 .min_access_size = 1, 1293 .max_access_size = 4, 1294 .unaligned = false 1295 }, 1296 .endianness = DEVICE_LITTLE_ENDIAN, 1297 }; 1298 1299 static void sdhci_init_readonly_registers(SDHCIState *s, Error **errp) 1300 { 1301 ERRP_GUARD(); 1302 1303 switch (s->sd_spec_version) { 1304 case 2 ... 3: 1305 break; 1306 default: 1307 error_setg(errp, "Only Spec v2/v3 are supported"); 1308 return; 1309 } 1310 s->version = (SDHC_HCVER_VENDOR << 8) | (s->sd_spec_version - 1); 1311 1312 sdhci_check_capareg(s, errp); 1313 if (*errp) { 1314 return; 1315 } 1316 } 1317 1318 /* --- qdev common --- */ 1319 1320 void sdhci_initfn(SDHCIState *s) 1321 { 1322 qbus_create_inplace(&s->sdbus, sizeof(s->sdbus), 1323 TYPE_SDHCI_BUS, DEVICE(s), "sd-bus"); 1324 1325 s->insert_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, sdhci_raise_insertion_irq, s); 1326 s->transfer_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, sdhci_data_transfer, s); 1327 1328 s->io_ops = &sdhci_mmio_ops; 1329 } 1330 1331 void sdhci_uninitfn(SDHCIState *s) 1332 { 1333 timer_free(s->insert_timer); 1334 timer_free(s->transfer_timer); 1335 1336 g_free(s->fifo_buffer); 1337 s->fifo_buffer = NULL; 1338 } 1339 1340 void sdhci_common_realize(SDHCIState *s, Error **errp) 1341 { 1342 ERRP_GUARD(); 1343 1344 sdhci_init_readonly_registers(s, errp); 1345 if (*errp) { 1346 return; 1347 } 1348 s->buf_maxsz = sdhci_get_fifolen(s); 1349 s->fifo_buffer = g_malloc0(s->buf_maxsz); 1350 1351 memory_region_init_io(&s->iomem, OBJECT(s), s->io_ops, s, "sdhci", 1352 SDHC_REGISTERS_MAP_SIZE); 1353 } 1354 1355 void sdhci_common_unrealize(SDHCIState *s) 1356 { 1357 /* This function is expected to be called only once for each class: 1358 * - SysBus: via DeviceClass->unrealize(), 1359 * - PCI: via PCIDeviceClass->exit(). 1360 * However to avoid double-free and/or use-after-free we still nullify 1361 * this variable (better safe than sorry!). */ 1362 g_free(s->fifo_buffer); 1363 s->fifo_buffer = NULL; 1364 } 1365 1366 static bool sdhci_pending_insert_vmstate_needed(void *opaque) 1367 { 1368 SDHCIState *s = opaque; 1369 1370 return s->pending_insert_state; 1371 } 1372 1373 static const VMStateDescription sdhci_pending_insert_vmstate = { 1374 .name = "sdhci/pending-insert", 1375 .version_id = 1, 1376 .minimum_version_id = 1, 1377 .needed = sdhci_pending_insert_vmstate_needed, 1378 .fields = (VMStateField[]) { 1379 VMSTATE_BOOL(pending_insert_state, SDHCIState), 1380 VMSTATE_END_OF_LIST() 1381 }, 1382 }; 1383 1384 const VMStateDescription sdhci_vmstate = { 1385 .name = "sdhci", 1386 .version_id = 1, 1387 .minimum_version_id = 1, 1388 .fields = (VMStateField[]) { 1389 VMSTATE_UINT32(sdmasysad, SDHCIState), 1390 VMSTATE_UINT16(blksize, SDHCIState), 1391 VMSTATE_UINT16(blkcnt, SDHCIState), 1392 VMSTATE_UINT32(argument, SDHCIState), 1393 VMSTATE_UINT16(trnmod, SDHCIState), 1394 VMSTATE_UINT16(cmdreg, SDHCIState), 1395 VMSTATE_UINT32_ARRAY(rspreg, SDHCIState, 4), 1396 VMSTATE_UINT32(prnsts, SDHCIState), 1397 VMSTATE_UINT8(hostctl1, SDHCIState), 1398 VMSTATE_UINT8(pwrcon, SDHCIState), 1399 VMSTATE_UINT8(blkgap, SDHCIState), 1400 VMSTATE_UINT8(wakcon, SDHCIState), 1401 VMSTATE_UINT16(clkcon, SDHCIState), 1402 VMSTATE_UINT8(timeoutcon, SDHCIState), 1403 VMSTATE_UINT8(admaerr, SDHCIState), 1404 VMSTATE_UINT16(norintsts, SDHCIState), 1405 VMSTATE_UINT16(errintsts, SDHCIState), 1406 VMSTATE_UINT16(norintstsen, SDHCIState), 1407 VMSTATE_UINT16(errintstsen, SDHCIState), 1408 VMSTATE_UINT16(norintsigen, SDHCIState), 1409 VMSTATE_UINT16(errintsigen, SDHCIState), 1410 VMSTATE_UINT16(acmd12errsts, SDHCIState), 1411 VMSTATE_UINT16(data_count, SDHCIState), 1412 VMSTATE_UINT64(admasysaddr, SDHCIState), 1413 VMSTATE_UINT8(stopped_state, SDHCIState), 1414 VMSTATE_VBUFFER_UINT32(fifo_buffer, SDHCIState, 1, NULL, buf_maxsz), 1415 VMSTATE_TIMER_PTR(insert_timer, SDHCIState), 1416 VMSTATE_TIMER_PTR(transfer_timer, SDHCIState), 1417 VMSTATE_END_OF_LIST() 1418 }, 1419 .subsections = (const VMStateDescription*[]) { 1420 &sdhci_pending_insert_vmstate, 1421 NULL 1422 }, 1423 }; 1424 1425 void sdhci_common_class_init(ObjectClass *klass, void *data) 1426 { 1427 DeviceClass *dc = DEVICE_CLASS(klass); 1428 1429 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 1430 dc->vmsd = &sdhci_vmstate; 1431 dc->reset = sdhci_poweron_reset; 1432 } 1433 1434 /* --- qdev SysBus --- */ 1435 1436 static Property sdhci_sysbus_properties[] = { 1437 DEFINE_SDHCI_COMMON_PROPERTIES(SDHCIState), 1438 DEFINE_PROP_BOOL("pending-insert-quirk", SDHCIState, pending_insert_quirk, 1439 false), 1440 DEFINE_PROP_LINK("dma", SDHCIState, 1441 dma_mr, TYPE_MEMORY_REGION, MemoryRegion *), 1442 DEFINE_PROP_END_OF_LIST(), 1443 }; 1444 1445 static void sdhci_sysbus_init(Object *obj) 1446 { 1447 SDHCIState *s = SYSBUS_SDHCI(obj); 1448 1449 sdhci_initfn(s); 1450 } 1451 1452 static void sdhci_sysbus_finalize(Object *obj) 1453 { 1454 SDHCIState *s = SYSBUS_SDHCI(obj); 1455 1456 if (s->dma_mr) { 1457 object_unparent(OBJECT(s->dma_mr)); 1458 } 1459 1460 sdhci_uninitfn(s); 1461 } 1462 1463 static void sdhci_sysbus_realize(DeviceState *dev, Error **errp) 1464 { 1465 ERRP_GUARD(); 1466 SDHCIState *s = SYSBUS_SDHCI(dev); 1467 SysBusDevice *sbd = SYS_BUS_DEVICE(dev); 1468 1469 sdhci_common_realize(s, errp); 1470 if (*errp) { 1471 return; 1472 } 1473 1474 if (s->dma_mr) { 1475 s->dma_as = &s->sysbus_dma_as; 1476 address_space_init(s->dma_as, s->dma_mr, "sdhci-dma"); 1477 } else { 1478 /* use system_memory() if property "dma" not set */ 1479 s->dma_as = &address_space_memory; 1480 } 1481 1482 sysbus_init_irq(sbd, &s->irq); 1483 1484 sysbus_init_mmio(sbd, &s->iomem); 1485 } 1486 1487 static void sdhci_sysbus_unrealize(DeviceState *dev) 1488 { 1489 SDHCIState *s = SYSBUS_SDHCI(dev); 1490 1491 sdhci_common_unrealize(s); 1492 1493 if (s->dma_mr) { 1494 address_space_destroy(s->dma_as); 1495 } 1496 } 1497 1498 static void sdhci_sysbus_class_init(ObjectClass *klass, void *data) 1499 { 1500 DeviceClass *dc = DEVICE_CLASS(klass); 1501 1502 device_class_set_props(dc, sdhci_sysbus_properties); 1503 dc->realize = sdhci_sysbus_realize; 1504 dc->unrealize = sdhci_sysbus_unrealize; 1505 1506 sdhci_common_class_init(klass, data); 1507 } 1508 1509 static const TypeInfo sdhci_sysbus_info = { 1510 .name = TYPE_SYSBUS_SDHCI, 1511 .parent = TYPE_SYS_BUS_DEVICE, 1512 .instance_size = sizeof(SDHCIState), 1513 .instance_init = sdhci_sysbus_init, 1514 .instance_finalize = sdhci_sysbus_finalize, 1515 .class_init = sdhci_sysbus_class_init, 1516 }; 1517 1518 /* --- qdev bus master --- */ 1519 1520 static void sdhci_bus_class_init(ObjectClass *klass, void *data) 1521 { 1522 SDBusClass *sbc = SD_BUS_CLASS(klass); 1523 1524 sbc->set_inserted = sdhci_set_inserted; 1525 sbc->set_readonly = sdhci_set_readonly; 1526 } 1527 1528 static const TypeInfo sdhci_bus_info = { 1529 .name = TYPE_SDHCI_BUS, 1530 .parent = TYPE_SD_BUS, 1531 .instance_size = sizeof(SDBus), 1532 .class_init = sdhci_bus_class_init, 1533 }; 1534 1535 /* --- qdev i.MX eSDHC --- */ 1536 1537 static uint64_t usdhc_read(void *opaque, hwaddr offset, unsigned size) 1538 { 1539 SDHCIState *s = SYSBUS_SDHCI(opaque); 1540 uint32_t ret; 1541 uint16_t hostctl1; 1542 1543 switch (offset) { 1544 default: 1545 return sdhci_read(opaque, offset, size); 1546 1547 case SDHC_HOSTCTL: 1548 /* 1549 * For a detailed explanation on the following bit 1550 * manipulation code see comments in a similar part of 1551 * usdhc_write() 1552 */ 1553 hostctl1 = SDHC_DMA_TYPE(s->hostctl1) << (8 - 3); 1554 1555 if (s->hostctl1 & SDHC_CTRL_8BITBUS) { 1556 hostctl1 |= ESDHC_CTRL_8BITBUS; 1557 } 1558 1559 if (s->hostctl1 & SDHC_CTRL_4BITBUS) { 1560 hostctl1 |= ESDHC_CTRL_4BITBUS; 1561 } 1562 1563 ret = hostctl1; 1564 ret |= (uint32_t)s->blkgap << 16; 1565 ret |= (uint32_t)s->wakcon << 24; 1566 1567 break; 1568 1569 case SDHC_PRNSTS: 1570 /* Add SDSTB (SD Clock Stable) bit to PRNSTS */ 1571 ret = sdhci_read(opaque, offset, size) & ~ESDHC_PRNSTS_SDSTB; 1572 if (s->clkcon & SDHC_CLOCK_INT_STABLE) { 1573 ret |= ESDHC_PRNSTS_SDSTB; 1574 } 1575 break; 1576 1577 case ESDHC_VENDOR_SPEC: 1578 ret = s->vendor_spec; 1579 break; 1580 case ESDHC_DLL_CTRL: 1581 case ESDHC_TUNE_CTRL_STATUS: 1582 case ESDHC_UNDOCUMENTED_REG27: 1583 case ESDHC_TUNING_CTRL: 1584 case ESDHC_MIX_CTRL: 1585 case ESDHC_WTMK_LVL: 1586 ret = 0; 1587 break; 1588 } 1589 1590 return ret; 1591 } 1592 1593 static void 1594 usdhc_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) 1595 { 1596 SDHCIState *s = SYSBUS_SDHCI(opaque); 1597 uint8_t hostctl1; 1598 uint32_t value = (uint32_t)val; 1599 1600 switch (offset) { 1601 case ESDHC_DLL_CTRL: 1602 case ESDHC_TUNE_CTRL_STATUS: 1603 case ESDHC_UNDOCUMENTED_REG27: 1604 case ESDHC_TUNING_CTRL: 1605 case ESDHC_WTMK_LVL: 1606 break; 1607 1608 case ESDHC_VENDOR_SPEC: 1609 s->vendor_spec = value; 1610 switch (s->vendor) { 1611 case SDHCI_VENDOR_IMX: 1612 if (value & ESDHC_IMX_FRC_SDCLK_ON) { 1613 s->prnsts &= ~SDHC_IMX_CLOCK_GATE_OFF; 1614 } else { 1615 s->prnsts |= SDHC_IMX_CLOCK_GATE_OFF; 1616 } 1617 break; 1618 default: 1619 break; 1620 } 1621 break; 1622 1623 case SDHC_HOSTCTL: 1624 /* 1625 * Here's What ESDHCI has at offset 0x28 (SDHC_HOSTCTL) 1626 * 1627 * 7 6 5 4 3 2 1 0 1628 * |-----------+--------+--------+-----------+----------+---------| 1629 * | Card | Card | Endian | DATA3 | Data | Led | 1630 * | Detect | Detect | Mode | as Card | Transfer | Control | 1631 * | Signal | Test | | Detection | Width | | 1632 * | Selection | Level | | Pin | | | 1633 * |-----------+--------+--------+-----------+----------+---------| 1634 * 1635 * and 0x29 1636 * 1637 * 15 10 9 8 1638 * |----------+------| 1639 * | Reserved | DMA | 1640 * | | Sel. | 1641 * | | | 1642 * |----------+------| 1643 * 1644 * and here's what SDCHI spec expects those offsets to be: 1645 * 1646 * 0x28 (Host Control Register) 1647 * 1648 * 7 6 5 4 3 2 1 0 1649 * |--------+--------+----------+------+--------+----------+---------| 1650 * | Card | Card | Extended | DMA | High | Data | LED | 1651 * | Detect | Detect | Data | Sel. | Speed | Transfer | Control | 1652 * | Signal | Test | Transfer | | Enable | Width | | 1653 * | Sel. | Level | Width | | | | | 1654 * |--------+--------+----------+------+--------+----------+---------| 1655 * 1656 * and 0x29 (Power Control Register) 1657 * 1658 * |----------------------------------| 1659 * | Power Control Register | 1660 * | | 1661 * | Description omitted, | 1662 * | since it has no analog in ESDHCI | 1663 * | | 1664 * |----------------------------------| 1665 * 1666 * Since offsets 0x2A and 0x2B should be compatible between 1667 * both IP specs we only need to reconcile least 16-bit of the 1668 * word we've been given. 1669 */ 1670 1671 /* 1672 * First, save bits 7 6 and 0 since they are identical 1673 */ 1674 hostctl1 = value & (SDHC_CTRL_LED | 1675 SDHC_CTRL_CDTEST_INS | 1676 SDHC_CTRL_CDTEST_EN); 1677 /* 1678 * Second, split "Data Transfer Width" from bits 2 and 1 in to 1679 * bits 5 and 1 1680 */ 1681 if (value & ESDHC_CTRL_8BITBUS) { 1682 hostctl1 |= SDHC_CTRL_8BITBUS; 1683 } 1684 1685 if (value & ESDHC_CTRL_4BITBUS) { 1686 hostctl1 |= ESDHC_CTRL_4BITBUS; 1687 } 1688 1689 /* 1690 * Third, move DMA select from bits 9 and 8 to bits 4 and 3 1691 */ 1692 hostctl1 |= SDHC_DMA_TYPE(value >> (8 - 3)); 1693 1694 /* 1695 * Now place the corrected value into low 16-bit of the value 1696 * we are going to give standard SDHCI write function 1697 * 1698 * NOTE: This transformation should be the inverse of what can 1699 * be found in drivers/mmc/host/sdhci-esdhc-imx.c in Linux 1700 * kernel 1701 */ 1702 value &= ~UINT16_MAX; 1703 value |= hostctl1; 1704 value |= (uint16_t)s->pwrcon << 8; 1705 1706 sdhci_write(opaque, offset, value, size); 1707 break; 1708 1709 case ESDHC_MIX_CTRL: 1710 /* 1711 * So, when SD/MMC stack in Linux tries to write to "Transfer 1712 * Mode Register", ESDHC i.MX quirk code will translate it 1713 * into a write to ESDHC_MIX_CTRL, so we do the opposite in 1714 * order to get where we started 1715 * 1716 * Note that Auto CMD23 Enable bit is located in a wrong place 1717 * on i.MX, but since it is not used by QEMU we do not care. 1718 * 1719 * We don't want to call sdhci_write(.., SDHC_TRNMOD, ...) 1720 * here becuase it will result in a call to 1721 * sdhci_send_command(s) which we don't want. 1722 * 1723 */ 1724 s->trnmod = value & UINT16_MAX; 1725 break; 1726 case SDHC_TRNMOD: 1727 /* 1728 * Similar to above, but this time a write to "Command 1729 * Register" will be translated into a 4-byte write to 1730 * "Transfer Mode register" where lower 16-bit of value would 1731 * be set to zero. So what we do is fill those bits with 1732 * cached value from s->trnmod and let the SDHCI 1733 * infrastructure handle the rest 1734 */ 1735 sdhci_write(opaque, offset, val | s->trnmod, size); 1736 break; 1737 case SDHC_BLKSIZE: 1738 /* 1739 * ESDHCI does not implement "Host SDMA Buffer Boundary", and 1740 * Linux driver will try to zero this field out which will 1741 * break the rest of SDHCI emulation. 1742 * 1743 * Linux defaults to maximum possible setting (512K boundary) 1744 * and it seems to be the only option that i.MX IP implements, 1745 * so we artificially set it to that value. 1746 */ 1747 val |= 0x7 << 12; 1748 /* FALLTHROUGH */ 1749 default: 1750 sdhci_write(opaque, offset, val, size); 1751 break; 1752 } 1753 } 1754 1755 static const MemoryRegionOps usdhc_mmio_ops = { 1756 .read = usdhc_read, 1757 .write = usdhc_write, 1758 .valid = { 1759 .min_access_size = 1, 1760 .max_access_size = 4, 1761 .unaligned = false 1762 }, 1763 .endianness = DEVICE_LITTLE_ENDIAN, 1764 }; 1765 1766 static void imx_usdhc_init(Object *obj) 1767 { 1768 SDHCIState *s = SYSBUS_SDHCI(obj); 1769 1770 s->io_ops = &usdhc_mmio_ops; 1771 s->quirks = SDHCI_QUIRK_NO_BUSY_IRQ; 1772 } 1773 1774 static const TypeInfo imx_usdhc_info = { 1775 .name = TYPE_IMX_USDHC, 1776 .parent = TYPE_SYSBUS_SDHCI, 1777 .instance_init = imx_usdhc_init, 1778 }; 1779 1780 /* --- qdev Samsung s3c --- */ 1781 1782 #define S3C_SDHCI_CONTROL2 0x80 1783 #define S3C_SDHCI_CONTROL3 0x84 1784 #define S3C_SDHCI_CONTROL4 0x8c 1785 1786 static uint64_t sdhci_s3c_read(void *opaque, hwaddr offset, unsigned size) 1787 { 1788 uint64_t ret; 1789 1790 switch (offset) { 1791 case S3C_SDHCI_CONTROL2: 1792 case S3C_SDHCI_CONTROL3: 1793 case S3C_SDHCI_CONTROL4: 1794 /* ignore */ 1795 ret = 0; 1796 break; 1797 default: 1798 ret = sdhci_read(opaque, offset, size); 1799 break; 1800 } 1801 1802 return ret; 1803 } 1804 1805 static void sdhci_s3c_write(void *opaque, hwaddr offset, uint64_t val, 1806 unsigned size) 1807 { 1808 switch (offset) { 1809 case S3C_SDHCI_CONTROL2: 1810 case S3C_SDHCI_CONTROL3: 1811 case S3C_SDHCI_CONTROL4: 1812 /* ignore */ 1813 break; 1814 default: 1815 sdhci_write(opaque, offset, val, size); 1816 break; 1817 } 1818 } 1819 1820 static const MemoryRegionOps sdhci_s3c_mmio_ops = { 1821 .read = sdhci_s3c_read, 1822 .write = sdhci_s3c_write, 1823 .valid = { 1824 .min_access_size = 1, 1825 .max_access_size = 4, 1826 .unaligned = false 1827 }, 1828 .endianness = DEVICE_LITTLE_ENDIAN, 1829 }; 1830 1831 static void sdhci_s3c_init(Object *obj) 1832 { 1833 SDHCIState *s = SYSBUS_SDHCI(obj); 1834 1835 s->io_ops = &sdhci_s3c_mmio_ops; 1836 } 1837 1838 static const TypeInfo sdhci_s3c_info = { 1839 .name = TYPE_S3C_SDHCI , 1840 .parent = TYPE_SYSBUS_SDHCI, 1841 .instance_init = sdhci_s3c_init, 1842 }; 1843 1844 static void sdhci_register_types(void) 1845 { 1846 type_register_static(&sdhci_sysbus_info); 1847 type_register_static(&sdhci_bus_info); 1848 type_register_static(&imx_usdhc_info); 1849 type_register_static(&sdhci_s3c_info); 1850 } 1851 1852 type_init(sdhci_register_types) 1853