1 /* 2 * ASPEED AST2400 SMC Controller (SPI Flash Only) 3 * 4 * Copyright (C) 2016 IBM Corp. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "hw/sysbus.h" 27 #include "migration/vmstate.h" 28 #include "qemu/log.h" 29 #include "qemu/module.h" 30 #include "qemu/error-report.h" 31 #include "qapi/error.h" 32 #include "exec/address-spaces.h" 33 #include "qemu/units.h" 34 #include "trace.h" 35 36 #include "hw/irq.h" 37 #include "hw/qdev-properties.h" 38 #include "hw/ssi/aspeed_smc.h" 39 40 /* CE Type Setting Register */ 41 #define R_CONF (0x00 / 4) 42 #define CONF_LEGACY_DISABLE (1 << 31) 43 #define CONF_ENABLE_W4 20 44 #define CONF_ENABLE_W3 19 45 #define CONF_ENABLE_W2 18 46 #define CONF_ENABLE_W1 17 47 #define CONF_ENABLE_W0 16 48 #define CONF_FLASH_TYPE4 8 49 #define CONF_FLASH_TYPE3 6 50 #define CONF_FLASH_TYPE2 4 51 #define CONF_FLASH_TYPE1 2 52 #define CONF_FLASH_TYPE0 0 53 #define CONF_FLASH_TYPE_NOR 0x0 54 #define CONF_FLASH_TYPE_NAND 0x1 55 #define CONF_FLASH_TYPE_SPI 0x2 /* AST2600 is SPI only */ 56 57 /* CE Control Register */ 58 #define R_CE_CTRL (0x04 / 4) 59 #define CTRL_EXTENDED4 4 /* 32 bit addressing for SPI */ 60 #define CTRL_EXTENDED3 3 /* 32 bit addressing for SPI */ 61 #define CTRL_EXTENDED2 2 /* 32 bit addressing for SPI */ 62 #define CTRL_EXTENDED1 1 /* 32 bit addressing for SPI */ 63 #define CTRL_EXTENDED0 0 /* 32 bit addressing for SPI */ 64 65 /* Interrupt Control and Status Register */ 66 #define R_INTR_CTRL (0x08 / 4) 67 #define INTR_CTRL_DMA_STATUS (1 << 11) 68 #define INTR_CTRL_CMD_ABORT_STATUS (1 << 10) 69 #define INTR_CTRL_WRITE_PROTECT_STATUS (1 << 9) 70 #define INTR_CTRL_DMA_EN (1 << 3) 71 #define INTR_CTRL_CMD_ABORT_EN (1 << 2) 72 #define INTR_CTRL_WRITE_PROTECT_EN (1 << 1) 73 74 /* Command Control Register */ 75 #define R_CE_CMD_CTRL (0x0C / 4) 76 #define CTRL_ADDR_BYTE0_DISABLE_SHIFT 4 77 #define CTRL_DATA_BYTE0_DISABLE_SHIFT 0 78 79 #define aspeed_smc_addr_byte_enabled(s, i) \ 80 (!((s)->regs[R_CE_CMD_CTRL] & (1 << (CTRL_ADDR_BYTE0_DISABLE_SHIFT + (i))))) 81 #define aspeed_smc_data_byte_enabled(s, i) \ 82 (!((s)->regs[R_CE_CMD_CTRL] & (1 << (CTRL_DATA_BYTE0_DISABLE_SHIFT + (i))))) 83 84 /* CEx Control Register */ 85 #define R_CTRL0 (0x10 / 4) 86 #define CTRL_IO_QPI (1 << 31) 87 #define CTRL_IO_QUAD_DATA (1 << 30) 88 #define CTRL_IO_DUAL_DATA (1 << 29) 89 #define CTRL_IO_DUAL_ADDR_DATA (1 << 28) /* Includes dummies */ 90 #define CTRL_IO_QUAD_ADDR_DATA (1 << 28) /* Includes dummies */ 91 #define CTRL_CMD_SHIFT 16 92 #define CTRL_CMD_MASK 0xff 93 #define CTRL_DUMMY_HIGH_SHIFT 14 94 #define CTRL_AST2400_SPI_4BYTE (1 << 13) 95 #define CE_CTRL_CLOCK_FREQ_SHIFT 8 96 #define CE_CTRL_CLOCK_FREQ_MASK 0xf 97 #define CE_CTRL_CLOCK_FREQ(div) \ 98 (((div) & CE_CTRL_CLOCK_FREQ_MASK) << CE_CTRL_CLOCK_FREQ_SHIFT) 99 #define CTRL_DUMMY_LOW_SHIFT 6 /* 2 bits [7:6] */ 100 #define CTRL_CE_STOP_ACTIVE (1 << 2) 101 #define CTRL_CMD_MODE_MASK 0x3 102 #define CTRL_READMODE 0x0 103 #define CTRL_FREADMODE 0x1 104 #define CTRL_WRITEMODE 0x2 105 #define CTRL_USERMODE 0x3 106 #define R_CTRL1 (0x14 / 4) 107 #define R_CTRL2 (0x18 / 4) 108 #define R_CTRL3 (0x1C / 4) 109 #define R_CTRL4 (0x20 / 4) 110 111 /* CEx Segment Address Register */ 112 #define R_SEG_ADDR0 (0x30 / 4) 113 #define SEG_END_SHIFT 24 /* 8MB units */ 114 #define SEG_END_MASK 0xff 115 #define SEG_START_SHIFT 16 /* address bit [A29-A23] */ 116 #define SEG_START_MASK 0xff 117 #define R_SEG_ADDR1 (0x34 / 4) 118 #define R_SEG_ADDR2 (0x38 / 4) 119 #define R_SEG_ADDR3 (0x3C / 4) 120 #define R_SEG_ADDR4 (0x40 / 4) 121 122 /* Misc Control Register #1 */ 123 #define R_MISC_CTRL1 (0x50 / 4) 124 125 /* SPI dummy cycle data */ 126 #define R_DUMMY_DATA (0x54 / 4) 127 128 /* DMA Control/Status Register */ 129 #define R_DMA_CTRL (0x80 / 4) 130 #define DMA_CTRL_DELAY_MASK 0xf 131 #define DMA_CTRL_DELAY_SHIFT 8 132 #define DMA_CTRL_FREQ_MASK 0xf 133 #define DMA_CTRL_FREQ_SHIFT 4 134 #define DMA_CTRL_CALIB (1 << 3) 135 #define DMA_CTRL_CKSUM (1 << 2) 136 #define DMA_CTRL_WRITE (1 << 1) 137 #define DMA_CTRL_ENABLE (1 << 0) 138 139 /* DMA Flash Side Address */ 140 #define R_DMA_FLASH_ADDR (0x84 / 4) 141 142 /* DMA DRAM Side Address */ 143 #define R_DMA_DRAM_ADDR (0x88 / 4) 144 145 /* DMA Length Register */ 146 #define R_DMA_LEN (0x8C / 4) 147 148 /* Checksum Calculation Result */ 149 #define R_DMA_CHECKSUM (0x90 / 4) 150 151 /* Read Timing Compensation Register */ 152 #define R_TIMINGS (0x94 / 4) 153 154 /* SPI controller registers and bits (AST2400) */ 155 #define R_SPI_CONF (0x00 / 4) 156 #define SPI_CONF_ENABLE_W0 0 157 #define R_SPI_CTRL0 (0x4 / 4) 158 #define R_SPI_MISC_CTRL (0x10 / 4) 159 #define R_SPI_TIMINGS (0x14 / 4) 160 161 #define ASPEED_SMC_R_SPI_MAX (0x20 / 4) 162 #define ASPEED_SMC_R_SMC_MAX (0x20 / 4) 163 164 #define ASPEED_SOC_SMC_FLASH_BASE 0x10000000 165 #define ASPEED_SOC_FMC_FLASH_BASE 0x20000000 166 #define ASPEED_SOC_SPI_FLASH_BASE 0x30000000 167 #define ASPEED_SOC_SPI2_FLASH_BASE 0x38000000 168 169 /* 170 * DMA DRAM addresses should be 4 bytes aligned and the valid address 171 * range is 0x40000000 - 0x5FFFFFFF (AST2400) 172 * 0x80000000 - 0xBFFFFFFF (AST2500) 173 * 174 * DMA flash addresses should be 4 bytes aligned and the valid address 175 * range is 0x20000000 - 0x2FFFFFFF. 176 * 177 * DMA length is from 4 bytes to 32MB 178 * 0: 4 bytes 179 * 0x7FFFFF: 32M bytes 180 */ 181 #define DMA_DRAM_ADDR(s, val) ((s)->sdram_base | \ 182 ((val) & (s)->ctrl->dma_dram_mask)) 183 #define DMA_FLASH_ADDR(s, val) ((s)->ctrl->flash_window_base | \ 184 ((val) & (s)->ctrl->dma_flash_mask)) 185 #define DMA_LENGTH(val) ((val) & 0x01FFFFFC) 186 187 /* Flash opcodes. */ 188 #define SPI_OP_READ 0x03 /* Read data bytes (low frequency) */ 189 190 #define SNOOP_OFF 0xFF 191 #define SNOOP_START 0x0 192 193 /* 194 * Default segments mapping addresses and size for each peripheral per 195 * controller. These can be changed when board is initialized with the 196 * Segment Address Registers. 197 */ 198 static const AspeedSegments aspeed_segments_legacy[] = { 199 { 0x10000000, 32 * 1024 * 1024 }, 200 }; 201 202 static const AspeedSegments aspeed_segments_fmc[] = { 203 { 0x20000000, 64 * 1024 * 1024 }, /* start address is readonly */ 204 { 0x24000000, 32 * 1024 * 1024 }, 205 { 0x26000000, 32 * 1024 * 1024 }, 206 { 0x28000000, 32 * 1024 * 1024 }, 207 { 0x2A000000, 32 * 1024 * 1024 } 208 }; 209 210 static const AspeedSegments aspeed_segments_spi[] = { 211 { 0x30000000, 64 * 1024 * 1024 }, 212 }; 213 214 static const AspeedSegments aspeed_segments_ast2500_fmc[] = { 215 { 0x20000000, 128 * 1024 * 1024 }, /* start address is readonly */ 216 { 0x28000000, 32 * 1024 * 1024 }, 217 { 0x2A000000, 32 * 1024 * 1024 }, 218 }; 219 220 static const AspeedSegments aspeed_segments_ast2500_spi1[] = { 221 { 0x30000000, 32 * 1024 * 1024 }, /* start address is readonly */ 222 { 0x32000000, 96 * 1024 * 1024 }, /* end address is readonly */ 223 }; 224 225 static const AspeedSegments aspeed_segments_ast2500_spi2[] = { 226 { 0x38000000, 32 * 1024 * 1024 }, /* start address is readonly */ 227 { 0x3A000000, 96 * 1024 * 1024 }, /* end address is readonly */ 228 }; 229 static uint32_t aspeed_smc_segment_to_reg(const AspeedSMCState *s, 230 const AspeedSegments *seg); 231 static void aspeed_smc_reg_to_segment(const AspeedSMCState *s, uint32_t reg, 232 AspeedSegments *seg); 233 234 /* 235 * AST2600 definitions 236 */ 237 #define ASPEED26_SOC_FMC_FLASH_BASE 0x20000000 238 #define ASPEED26_SOC_SPI_FLASH_BASE 0x30000000 239 #define ASPEED26_SOC_SPI2_FLASH_BASE 0x50000000 240 241 static const AspeedSegments aspeed_segments_ast2600_fmc[] = { 242 { 0x0, 128 * MiB }, /* start address is readonly */ 243 { 128 * MiB, 128 * MiB }, /* default is disabled but needed for -kernel */ 244 { 0x0, 0 }, /* disabled */ 245 }; 246 247 static const AspeedSegments aspeed_segments_ast2600_spi1[] = { 248 { 0x0, 128 * MiB }, /* start address is readonly */ 249 { 0x0, 0 }, /* disabled */ 250 }; 251 252 static const AspeedSegments aspeed_segments_ast2600_spi2[] = { 253 { 0x0, 128 * MiB }, /* start address is readonly */ 254 { 0x0, 0 }, /* disabled */ 255 { 0x0, 0 }, /* disabled */ 256 }; 257 258 static uint32_t aspeed_2600_smc_segment_to_reg(const AspeedSMCState *s, 259 const AspeedSegments *seg); 260 static void aspeed_2600_smc_reg_to_segment(const AspeedSMCState *s, 261 uint32_t reg, AspeedSegments *seg); 262 263 static const AspeedSMCController controllers[] = { 264 { 265 .name = "aspeed.smc-ast2400", 266 .r_conf = R_CONF, 267 .r_ce_ctrl = R_CE_CTRL, 268 .r_ctrl0 = R_CTRL0, 269 .r_timings = R_TIMINGS, 270 .nregs_timings = 1, 271 .conf_enable_w0 = CONF_ENABLE_W0, 272 .max_peripherals = 1, 273 .segments = aspeed_segments_legacy, 274 .flash_window_base = ASPEED_SOC_SMC_FLASH_BASE, 275 .flash_window_size = 0x6000000, 276 .has_dma = false, 277 .nregs = ASPEED_SMC_R_SMC_MAX, 278 .segment_to_reg = aspeed_smc_segment_to_reg, 279 .reg_to_segment = aspeed_smc_reg_to_segment, 280 }, { 281 .name = "aspeed.fmc-ast2400", 282 .r_conf = R_CONF, 283 .r_ce_ctrl = R_CE_CTRL, 284 .r_ctrl0 = R_CTRL0, 285 .r_timings = R_TIMINGS, 286 .nregs_timings = 1, 287 .conf_enable_w0 = CONF_ENABLE_W0, 288 .max_peripherals = 5, 289 .segments = aspeed_segments_fmc, 290 .flash_window_base = ASPEED_SOC_FMC_FLASH_BASE, 291 .flash_window_size = 0x10000000, 292 .has_dma = true, 293 .dma_flash_mask = 0x0FFFFFFC, 294 .dma_dram_mask = 0x1FFFFFFC, 295 .nregs = ASPEED_SMC_R_MAX, 296 .segment_to_reg = aspeed_smc_segment_to_reg, 297 .reg_to_segment = aspeed_smc_reg_to_segment, 298 }, { 299 .name = "aspeed.spi1-ast2400", 300 .r_conf = R_SPI_CONF, 301 .r_ce_ctrl = 0xff, 302 .r_ctrl0 = R_SPI_CTRL0, 303 .r_timings = R_SPI_TIMINGS, 304 .nregs_timings = 1, 305 .conf_enable_w0 = SPI_CONF_ENABLE_W0, 306 .max_peripherals = 1, 307 .segments = aspeed_segments_spi, 308 .flash_window_base = ASPEED_SOC_SPI_FLASH_BASE, 309 .flash_window_size = 0x10000000, 310 .has_dma = false, 311 .nregs = ASPEED_SMC_R_SPI_MAX, 312 .segment_to_reg = aspeed_smc_segment_to_reg, 313 .reg_to_segment = aspeed_smc_reg_to_segment, 314 }, { 315 .name = "aspeed.fmc-ast2500", 316 .r_conf = R_CONF, 317 .r_ce_ctrl = R_CE_CTRL, 318 .r_ctrl0 = R_CTRL0, 319 .r_timings = R_TIMINGS, 320 .nregs_timings = 1, 321 .conf_enable_w0 = CONF_ENABLE_W0, 322 .max_peripherals = 3, 323 .segments = aspeed_segments_ast2500_fmc, 324 .flash_window_base = ASPEED_SOC_FMC_FLASH_BASE, 325 .flash_window_size = 0x10000000, 326 .has_dma = true, 327 .dma_flash_mask = 0x0FFFFFFC, 328 .dma_dram_mask = 0x3FFFFFFC, 329 .nregs = ASPEED_SMC_R_MAX, 330 .segment_to_reg = aspeed_smc_segment_to_reg, 331 .reg_to_segment = aspeed_smc_reg_to_segment, 332 }, { 333 .name = "aspeed.spi1-ast2500", 334 .r_conf = R_CONF, 335 .r_ce_ctrl = R_CE_CTRL, 336 .r_ctrl0 = R_CTRL0, 337 .r_timings = R_TIMINGS, 338 .nregs_timings = 1, 339 .conf_enable_w0 = CONF_ENABLE_W0, 340 .max_peripherals = 2, 341 .segments = aspeed_segments_ast2500_spi1, 342 .flash_window_base = ASPEED_SOC_SPI_FLASH_BASE, 343 .flash_window_size = 0x8000000, 344 .has_dma = false, 345 .nregs = ASPEED_SMC_R_MAX, 346 .segment_to_reg = aspeed_smc_segment_to_reg, 347 .reg_to_segment = aspeed_smc_reg_to_segment, 348 }, { 349 .name = "aspeed.spi2-ast2500", 350 .r_conf = R_CONF, 351 .r_ce_ctrl = R_CE_CTRL, 352 .r_ctrl0 = R_CTRL0, 353 .r_timings = R_TIMINGS, 354 .nregs_timings = 1, 355 .conf_enable_w0 = CONF_ENABLE_W0, 356 .max_peripherals = 2, 357 .segments = aspeed_segments_ast2500_spi2, 358 .flash_window_base = ASPEED_SOC_SPI2_FLASH_BASE, 359 .flash_window_size = 0x8000000, 360 .has_dma = false, 361 .nregs = ASPEED_SMC_R_MAX, 362 .segment_to_reg = aspeed_smc_segment_to_reg, 363 .reg_to_segment = aspeed_smc_reg_to_segment, 364 }, { 365 .name = "aspeed.fmc-ast2600", 366 .r_conf = R_CONF, 367 .r_ce_ctrl = R_CE_CTRL, 368 .r_ctrl0 = R_CTRL0, 369 .r_timings = R_TIMINGS, 370 .nregs_timings = 1, 371 .conf_enable_w0 = CONF_ENABLE_W0, 372 .max_peripherals = 3, 373 .segments = aspeed_segments_ast2600_fmc, 374 .flash_window_base = ASPEED26_SOC_FMC_FLASH_BASE, 375 .flash_window_size = 0x10000000, 376 .has_dma = true, 377 .dma_flash_mask = 0x0FFFFFFC, 378 .dma_dram_mask = 0x3FFFFFFC, 379 .nregs = ASPEED_SMC_R_MAX, 380 .segment_to_reg = aspeed_2600_smc_segment_to_reg, 381 .reg_to_segment = aspeed_2600_smc_reg_to_segment, 382 }, { 383 .name = "aspeed.spi1-ast2600", 384 .r_conf = R_CONF, 385 .r_ce_ctrl = R_CE_CTRL, 386 .r_ctrl0 = R_CTRL0, 387 .r_timings = R_TIMINGS, 388 .nregs_timings = 2, 389 .conf_enable_w0 = CONF_ENABLE_W0, 390 .max_peripherals = 2, 391 .segments = aspeed_segments_ast2600_spi1, 392 .flash_window_base = ASPEED26_SOC_SPI_FLASH_BASE, 393 .flash_window_size = 0x10000000, 394 .has_dma = true, 395 .dma_flash_mask = 0x0FFFFFFC, 396 .dma_dram_mask = 0x3FFFFFFC, 397 .nregs = ASPEED_SMC_R_MAX, 398 .segment_to_reg = aspeed_2600_smc_segment_to_reg, 399 .reg_to_segment = aspeed_2600_smc_reg_to_segment, 400 }, { 401 .name = "aspeed.spi2-ast2600", 402 .r_conf = R_CONF, 403 .r_ce_ctrl = R_CE_CTRL, 404 .r_ctrl0 = R_CTRL0, 405 .r_timings = R_TIMINGS, 406 .nregs_timings = 3, 407 .conf_enable_w0 = CONF_ENABLE_W0, 408 .max_peripherals = 3, 409 .segments = aspeed_segments_ast2600_spi2, 410 .flash_window_base = ASPEED26_SOC_SPI2_FLASH_BASE, 411 .flash_window_size = 0x10000000, 412 .has_dma = true, 413 .dma_flash_mask = 0x0FFFFFFC, 414 .dma_dram_mask = 0x3FFFFFFC, 415 .nregs = ASPEED_SMC_R_MAX, 416 .segment_to_reg = aspeed_2600_smc_segment_to_reg, 417 .reg_to_segment = aspeed_2600_smc_reg_to_segment, 418 }, 419 }; 420 421 /* 422 * The Segment Registers of the AST2400 and AST2500 have a 8MB 423 * unit. The address range of a flash SPI peripheral is encoded with 424 * absolute addresses which should be part of the overall controller 425 * window. 426 */ 427 static uint32_t aspeed_smc_segment_to_reg(const AspeedSMCState *s, 428 const AspeedSegments *seg) 429 { 430 uint32_t reg = 0; 431 reg |= ((seg->addr >> 23) & SEG_START_MASK) << SEG_START_SHIFT; 432 reg |= (((seg->addr + seg->size) >> 23) & SEG_END_MASK) << SEG_END_SHIFT; 433 return reg; 434 } 435 436 static void aspeed_smc_reg_to_segment(const AspeedSMCState *s, 437 uint32_t reg, AspeedSegments *seg) 438 { 439 seg->addr = ((reg >> SEG_START_SHIFT) & SEG_START_MASK) << 23; 440 seg->size = (((reg >> SEG_END_SHIFT) & SEG_END_MASK) << 23) - seg->addr; 441 } 442 443 /* 444 * The Segment Registers of the AST2600 have a 1MB unit. The address 445 * range of a flash SPI peripheral is encoded with offsets in the overall 446 * controller window. The previous SoC AST2400 and AST2500 used 447 * absolute addresses. Only bits [27:20] are relevant and the end 448 * address is an upper bound limit. 449 */ 450 #define AST2600_SEG_ADDR_MASK 0x0ff00000 451 452 static uint32_t aspeed_2600_smc_segment_to_reg(const AspeedSMCState *s, 453 const AspeedSegments *seg) 454 { 455 uint32_t reg = 0; 456 457 /* Disabled segments have a nil register */ 458 if (!seg->size) { 459 return 0; 460 } 461 462 reg |= (seg->addr & AST2600_SEG_ADDR_MASK) >> 16; /* start offset */ 463 reg |= (seg->addr + seg->size - 1) & AST2600_SEG_ADDR_MASK; /* end offset */ 464 return reg; 465 } 466 467 static void aspeed_2600_smc_reg_to_segment(const AspeedSMCState *s, 468 uint32_t reg, AspeedSegments *seg) 469 { 470 uint32_t start_offset = (reg << 16) & AST2600_SEG_ADDR_MASK; 471 uint32_t end_offset = reg & AST2600_SEG_ADDR_MASK; 472 473 if (reg) { 474 seg->addr = s->ctrl->flash_window_base + start_offset; 475 seg->size = end_offset + MiB - start_offset; 476 } else { 477 seg->addr = s->ctrl->flash_window_base; 478 seg->size = 0; 479 } 480 } 481 482 static bool aspeed_smc_flash_overlap(const AspeedSMCState *s, 483 const AspeedSegments *new, 484 int cs) 485 { 486 AspeedSegments seg; 487 int i; 488 489 for (i = 0; i < s->ctrl->max_peripherals; i++) { 490 if (i == cs) { 491 continue; 492 } 493 494 s->ctrl->reg_to_segment(s, s->regs[R_SEG_ADDR0 + i], &seg); 495 496 if (new->addr + new->size > seg.addr && 497 new->addr < seg.addr + seg.size) { 498 qemu_log_mask(LOG_GUEST_ERROR, "%s: new segment CS%d [ 0x%" 499 HWADDR_PRIx" - 0x%"HWADDR_PRIx" ] overlaps with " 500 "CS%d [ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]\n", 501 s->ctrl->name, cs, new->addr, new->addr + new->size, 502 i, seg.addr, seg.addr + seg.size); 503 return true; 504 } 505 } 506 return false; 507 } 508 509 static void aspeed_smc_flash_set_segment_region(AspeedSMCState *s, int cs, 510 uint64_t regval) 511 { 512 AspeedSMCFlash *fl = &s->flashes[cs]; 513 AspeedSegments seg; 514 515 s->ctrl->reg_to_segment(s, regval, &seg); 516 517 memory_region_transaction_begin(); 518 memory_region_set_size(&fl->mmio, seg.size); 519 memory_region_set_address(&fl->mmio, seg.addr - s->ctrl->flash_window_base); 520 memory_region_set_enabled(&fl->mmio, !!seg.size); 521 memory_region_transaction_commit(); 522 523 s->regs[R_SEG_ADDR0 + cs] = regval; 524 } 525 526 static void aspeed_smc_flash_set_segment(AspeedSMCState *s, int cs, 527 uint64_t new) 528 { 529 AspeedSegments seg; 530 531 s->ctrl->reg_to_segment(s, new, &seg); 532 533 trace_aspeed_smc_flash_set_segment(cs, new, seg.addr, seg.addr + seg.size); 534 535 /* The start address of CS0 is read-only */ 536 if (cs == 0 && seg.addr != s->ctrl->flash_window_base) { 537 qemu_log_mask(LOG_GUEST_ERROR, 538 "%s: Tried to change CS0 start address to 0x%" 539 HWADDR_PRIx "\n", s->ctrl->name, seg.addr); 540 seg.addr = s->ctrl->flash_window_base; 541 new = s->ctrl->segment_to_reg(s, &seg); 542 } 543 544 /* 545 * The end address of the AST2500 spi controllers is also 546 * read-only. 547 */ 548 if ((s->ctrl->segments == aspeed_segments_ast2500_spi1 || 549 s->ctrl->segments == aspeed_segments_ast2500_spi2) && 550 cs == s->ctrl->max_peripherals && 551 seg.addr + seg.size != s->ctrl->segments[cs].addr + 552 s->ctrl->segments[cs].size) { 553 qemu_log_mask(LOG_GUEST_ERROR, 554 "%s: Tried to change CS%d end address to 0x%" 555 HWADDR_PRIx "\n", s->ctrl->name, cs, seg.addr + seg.size); 556 seg.size = s->ctrl->segments[cs].addr + s->ctrl->segments[cs].size - 557 seg.addr; 558 new = s->ctrl->segment_to_reg(s, &seg); 559 } 560 561 /* Keep the segment in the overall flash window */ 562 if (seg.size && 563 (seg.addr + seg.size <= s->ctrl->flash_window_base || 564 seg.addr > s->ctrl->flash_window_base + s->ctrl->flash_window_size)) { 565 qemu_log_mask(LOG_GUEST_ERROR, "%s: new segment for CS%d is invalid : " 566 "[ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]\n", 567 s->ctrl->name, cs, seg.addr, seg.addr + seg.size); 568 return; 569 } 570 571 /* Check start address vs. alignment */ 572 if (seg.size && !QEMU_IS_ALIGNED(seg.addr, seg.size)) { 573 qemu_log_mask(LOG_GUEST_ERROR, "%s: new segment for CS%d is not " 574 "aligned : [ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]\n", 575 s->ctrl->name, cs, seg.addr, seg.addr + seg.size); 576 } 577 578 /* And segments should not overlap (in the specs) */ 579 aspeed_smc_flash_overlap(s, &seg, cs); 580 581 /* All should be fine now to move the region */ 582 aspeed_smc_flash_set_segment_region(s, cs, new); 583 } 584 585 static uint64_t aspeed_smc_flash_default_read(void *opaque, hwaddr addr, 586 unsigned size) 587 { 588 qemu_log_mask(LOG_GUEST_ERROR, "%s: To 0x%" HWADDR_PRIx " of size %u" 589 PRIx64 "\n", __func__, addr, size); 590 return 0; 591 } 592 593 static void aspeed_smc_flash_default_write(void *opaque, hwaddr addr, 594 uint64_t data, unsigned size) 595 { 596 qemu_log_mask(LOG_GUEST_ERROR, "%s: To 0x%" HWADDR_PRIx " of size %u: 0x%" 597 PRIx64 "\n", __func__, addr, size, data); 598 } 599 600 static const MemoryRegionOps aspeed_smc_flash_default_ops = { 601 .read = aspeed_smc_flash_default_read, 602 .write = aspeed_smc_flash_default_write, 603 .endianness = DEVICE_LITTLE_ENDIAN, 604 .valid = { 605 .min_access_size = 1, 606 .max_access_size = 4, 607 }, 608 }; 609 610 static inline int aspeed_smc_flash_mode(const AspeedSMCFlash *fl) 611 { 612 const AspeedSMCState *s = fl->controller; 613 614 return s->regs[s->r_ctrl0 + fl->id] & CTRL_CMD_MODE_MASK; 615 } 616 617 static inline bool aspeed_smc_is_writable(const AspeedSMCFlash *fl) 618 { 619 const AspeedSMCState *s = fl->controller; 620 621 return s->regs[s->r_conf] & (1 << (s->conf_enable_w0 + fl->id)); 622 } 623 624 static inline int aspeed_smc_flash_cmd(const AspeedSMCFlash *fl) 625 { 626 const AspeedSMCState *s = fl->controller; 627 int cmd = (s->regs[s->r_ctrl0 + fl->id] >> CTRL_CMD_SHIFT) & CTRL_CMD_MASK; 628 629 /* 630 * In read mode, the default SPI command is READ (0x3). In other 631 * modes, the command should necessarily be defined 632 * 633 * TODO: add support for READ4 (0x13) on AST2600 634 */ 635 if (aspeed_smc_flash_mode(fl) == CTRL_READMODE) { 636 cmd = SPI_OP_READ; 637 } 638 639 if (!cmd) { 640 qemu_log_mask(LOG_GUEST_ERROR, "%s: no command defined for mode %d\n", 641 __func__, aspeed_smc_flash_mode(fl)); 642 } 643 644 return cmd; 645 } 646 647 static inline int aspeed_smc_flash_is_4byte(const AspeedSMCFlash *fl) 648 { 649 const AspeedSMCState *s = fl->controller; 650 651 if (s->ctrl->segments == aspeed_segments_spi) { 652 return s->regs[s->r_ctrl0] & CTRL_AST2400_SPI_4BYTE; 653 } else { 654 return s->regs[s->r_ce_ctrl] & (1 << (CTRL_EXTENDED0 + fl->id)); 655 } 656 } 657 658 static void aspeed_smc_flash_do_select(AspeedSMCFlash *fl, bool unselect) 659 { 660 AspeedSMCState *s = fl->controller; 661 662 trace_aspeed_smc_flash_select(fl->id, unselect ? "un" : ""); 663 664 qemu_set_irq(s->cs_lines[fl->id], unselect); 665 } 666 667 static void aspeed_smc_flash_select(AspeedSMCFlash *fl) 668 { 669 aspeed_smc_flash_do_select(fl, false); 670 } 671 672 static void aspeed_smc_flash_unselect(AspeedSMCFlash *fl) 673 { 674 aspeed_smc_flash_do_select(fl, true); 675 } 676 677 static uint32_t aspeed_smc_check_segment_addr(const AspeedSMCFlash *fl, 678 uint32_t addr) 679 { 680 const AspeedSMCState *s = fl->controller; 681 AspeedSegments seg; 682 683 s->ctrl->reg_to_segment(s, s->regs[R_SEG_ADDR0 + fl->id], &seg); 684 if ((addr % seg.size) != addr) { 685 qemu_log_mask(LOG_GUEST_ERROR, 686 "%s: invalid address 0x%08x for CS%d segment : " 687 "[ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]\n", 688 s->ctrl->name, addr, fl->id, seg.addr, 689 seg.addr + seg.size); 690 addr %= seg.size; 691 } 692 693 return addr; 694 } 695 696 static int aspeed_smc_flash_dummies(const AspeedSMCFlash *fl) 697 { 698 const AspeedSMCState *s = fl->controller; 699 uint32_t r_ctrl0 = s->regs[s->r_ctrl0 + fl->id]; 700 uint32_t dummy_high = (r_ctrl0 >> CTRL_DUMMY_HIGH_SHIFT) & 0x1; 701 uint32_t dummy_low = (r_ctrl0 >> CTRL_DUMMY_LOW_SHIFT) & 0x3; 702 uint32_t dummies = ((dummy_high << 2) | dummy_low) * 8; 703 704 if (r_ctrl0 & CTRL_IO_DUAL_ADDR_DATA) { 705 dummies /= 2; 706 } 707 708 return dummies; 709 } 710 711 static void aspeed_smc_flash_setup(AspeedSMCFlash *fl, uint32_t addr) 712 { 713 const AspeedSMCState *s = fl->controller; 714 uint8_t cmd = aspeed_smc_flash_cmd(fl); 715 int i = aspeed_smc_flash_is_4byte(fl) ? 4 : 3; 716 717 /* Flash access can not exceed CS segment */ 718 addr = aspeed_smc_check_segment_addr(fl, addr); 719 720 ssi_transfer(s->spi, cmd); 721 while (i--) { 722 if (aspeed_smc_addr_byte_enabled(s, i)) { 723 ssi_transfer(s->spi, (addr >> (i * 8)) & 0xff); 724 } 725 } 726 727 /* 728 * Use fake transfers to model dummy bytes. The value should 729 * be configured to some non-zero value in fast read mode and 730 * zero in read mode. But, as the HW allows inconsistent 731 * settings, let's check for fast read mode. 732 */ 733 if (aspeed_smc_flash_mode(fl) == CTRL_FREADMODE) { 734 for (i = 0; i < aspeed_smc_flash_dummies(fl); i++) { 735 ssi_transfer(fl->controller->spi, s->regs[R_DUMMY_DATA] & 0xff); 736 } 737 } 738 } 739 740 static uint64_t aspeed_smc_flash_read(void *opaque, hwaddr addr, unsigned size) 741 { 742 AspeedSMCFlash *fl = opaque; 743 AspeedSMCState *s = fl->controller; 744 uint64_t ret = 0; 745 int i; 746 747 switch (aspeed_smc_flash_mode(fl)) { 748 case CTRL_USERMODE: 749 for (i = 0; i < size; i++) { 750 ret |= ssi_transfer(s->spi, 0x0) << (8 * i); 751 } 752 break; 753 case CTRL_READMODE: 754 case CTRL_FREADMODE: 755 aspeed_smc_flash_select(fl); 756 aspeed_smc_flash_setup(fl, addr); 757 758 for (i = 0; i < size; i++) { 759 ret |= ssi_transfer(s->spi, 0x0) << (8 * i); 760 } 761 762 aspeed_smc_flash_unselect(fl); 763 break; 764 default: 765 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid flash mode %d\n", 766 __func__, aspeed_smc_flash_mode(fl)); 767 } 768 769 trace_aspeed_smc_flash_read(fl->id, addr, size, ret, 770 aspeed_smc_flash_mode(fl)); 771 return ret; 772 } 773 774 /* 775 * TODO (clg@kaod.org): stolen from xilinx_spips.c. Should move to a 776 * common include header. 777 */ 778 typedef enum { 779 READ = 0x3, READ_4 = 0x13, 780 FAST_READ = 0xb, FAST_READ_4 = 0x0c, 781 DOR = 0x3b, DOR_4 = 0x3c, 782 QOR = 0x6b, QOR_4 = 0x6c, 783 DIOR = 0xbb, DIOR_4 = 0xbc, 784 QIOR = 0xeb, QIOR_4 = 0xec, 785 786 PP = 0x2, PP_4 = 0x12, 787 DPP = 0xa2, 788 QPP = 0x32, QPP_4 = 0x34, 789 } FlashCMD; 790 791 static int aspeed_smc_num_dummies(uint8_t command) 792 { 793 switch (command) { /* check for dummies */ 794 case READ: /* no dummy bytes/cycles */ 795 case PP: 796 case DPP: 797 case QPP: 798 case READ_4: 799 case PP_4: 800 case QPP_4: 801 return 0; 802 case FAST_READ: 803 case DOR: 804 case QOR: 805 case FAST_READ_4: 806 case DOR_4: 807 case QOR_4: 808 return 1; 809 case DIOR: 810 case DIOR_4: 811 return 2; 812 case QIOR: 813 case QIOR_4: 814 return 4; 815 default: 816 return -1; 817 } 818 } 819 820 static bool aspeed_smc_do_snoop(AspeedSMCFlash *fl, uint64_t data, 821 unsigned size) 822 { 823 AspeedSMCState *s = fl->controller; 824 uint8_t addr_width = aspeed_smc_flash_is_4byte(fl) ? 4 : 3; 825 826 trace_aspeed_smc_do_snoop(fl->id, s->snoop_index, s->snoop_dummies, 827 (uint8_t) data & 0xff); 828 829 if (s->snoop_index == SNOOP_OFF) { 830 return false; /* Do nothing */ 831 832 } else if (s->snoop_index == SNOOP_START) { 833 uint8_t cmd = data & 0xff; 834 int ndummies = aspeed_smc_num_dummies(cmd); 835 836 /* 837 * No dummy cycles are expected with the current command. Turn 838 * off snooping and let the transfer proceed normally. 839 */ 840 if (ndummies <= 0) { 841 s->snoop_index = SNOOP_OFF; 842 return false; 843 } 844 845 s->snoop_dummies = ndummies * 8; 846 847 } else if (s->snoop_index >= addr_width + 1) { 848 849 /* The SPI transfer has reached the dummy cycles sequence */ 850 for (; s->snoop_dummies; s->snoop_dummies--) { 851 ssi_transfer(s->spi, s->regs[R_DUMMY_DATA] & 0xff); 852 } 853 854 /* If no more dummy cycles are expected, turn off snooping */ 855 if (!s->snoop_dummies) { 856 s->snoop_index = SNOOP_OFF; 857 } else { 858 s->snoop_index += size; 859 } 860 861 /* 862 * Dummy cycles have been faked already. Ignore the current 863 * SPI transfer 864 */ 865 return true; 866 } 867 868 s->snoop_index += size; 869 return false; 870 } 871 872 static void aspeed_smc_flash_write(void *opaque, hwaddr addr, uint64_t data, 873 unsigned size) 874 { 875 AspeedSMCFlash *fl = opaque; 876 AspeedSMCState *s = fl->controller; 877 int i; 878 879 trace_aspeed_smc_flash_write(fl->id, addr, size, data, 880 aspeed_smc_flash_mode(fl)); 881 882 if (!aspeed_smc_is_writable(fl)) { 883 qemu_log_mask(LOG_GUEST_ERROR, "%s: flash is not writable at 0x%" 884 HWADDR_PRIx "\n", __func__, addr); 885 return; 886 } 887 888 switch (aspeed_smc_flash_mode(fl)) { 889 case CTRL_USERMODE: 890 if (aspeed_smc_do_snoop(fl, data, size)) { 891 break; 892 } 893 894 for (i = 0; i < size; i++) { 895 ssi_transfer(s->spi, (data >> (8 * i)) & 0xff); 896 } 897 break; 898 case CTRL_WRITEMODE: 899 aspeed_smc_flash_select(fl); 900 aspeed_smc_flash_setup(fl, addr); 901 902 for (i = 0; i < size; i++) { 903 ssi_transfer(s->spi, (data >> (8 * i)) & 0xff); 904 } 905 906 aspeed_smc_flash_unselect(fl); 907 break; 908 default: 909 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid flash mode %d\n", 910 __func__, aspeed_smc_flash_mode(fl)); 911 } 912 } 913 914 static const MemoryRegionOps aspeed_smc_flash_ops = { 915 .read = aspeed_smc_flash_read, 916 .write = aspeed_smc_flash_write, 917 .endianness = DEVICE_LITTLE_ENDIAN, 918 .valid = { 919 .min_access_size = 1, 920 .max_access_size = 4, 921 }, 922 }; 923 924 static void aspeed_smc_flash_update_ctrl(AspeedSMCFlash *fl, uint32_t value) 925 { 926 AspeedSMCState *s = fl->controller; 927 bool unselect; 928 929 /* User mode selects the CS, other modes unselect */ 930 unselect = (value & CTRL_CMD_MODE_MASK) != CTRL_USERMODE; 931 932 /* A change of CTRL_CE_STOP_ACTIVE from 0 to 1, unselects the CS */ 933 if (!(s->regs[s->r_ctrl0 + fl->id] & CTRL_CE_STOP_ACTIVE) && 934 value & CTRL_CE_STOP_ACTIVE) { 935 unselect = true; 936 } 937 938 s->regs[s->r_ctrl0 + fl->id] = value; 939 940 s->snoop_index = unselect ? SNOOP_OFF : SNOOP_START; 941 942 aspeed_smc_flash_do_select(fl, unselect); 943 } 944 945 static void aspeed_smc_reset(DeviceState *d) 946 { 947 AspeedSMCState *s = ASPEED_SMC(d); 948 int i; 949 950 memset(s->regs, 0, sizeof s->regs); 951 952 /* Unselect all peripherals */ 953 for (i = 0; i < s->num_cs; ++i) { 954 s->regs[s->r_ctrl0 + i] |= CTRL_CE_STOP_ACTIVE; 955 qemu_set_irq(s->cs_lines[i], true); 956 } 957 958 /* setup the default segment register values and regions for all */ 959 for (i = 0; i < s->ctrl->max_peripherals; ++i) { 960 aspeed_smc_flash_set_segment_region(s, i, 961 s->ctrl->segment_to_reg(s, &s->ctrl->segments[i])); 962 } 963 964 /* HW strapping flash type for the AST2600 controllers */ 965 if (s->ctrl->segments == aspeed_segments_ast2600_fmc) { 966 /* flash type is fixed to SPI for all */ 967 s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE0); 968 s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE1); 969 s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE2); 970 } 971 972 /* HW strapping flash type for FMC controllers */ 973 if (s->ctrl->segments == aspeed_segments_ast2500_fmc) { 974 /* flash type is fixed to SPI for CE0 and CE1 */ 975 s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE0); 976 s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE1); 977 } 978 979 /* HW strapping for AST2400 FMC controllers (SCU70). Let's use the 980 * configuration of the palmetto-bmc machine */ 981 if (s->ctrl->segments == aspeed_segments_fmc) { 982 s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE0); 983 } 984 985 s->snoop_index = SNOOP_OFF; 986 s->snoop_dummies = 0; 987 } 988 989 static uint64_t aspeed_smc_read(void *opaque, hwaddr addr, unsigned int size) 990 { 991 AspeedSMCState *s = ASPEED_SMC(opaque); 992 993 addr >>= 2; 994 995 if (addr == s->r_conf || 996 (addr >= s->r_timings && 997 addr < s->r_timings + s->ctrl->nregs_timings) || 998 addr == s->r_ce_ctrl || 999 addr == R_CE_CMD_CTRL || 1000 addr == R_INTR_CTRL || 1001 addr == R_DUMMY_DATA || 1002 (s->ctrl->has_dma && addr == R_DMA_CTRL) || 1003 (s->ctrl->has_dma && addr == R_DMA_FLASH_ADDR) || 1004 (s->ctrl->has_dma && addr == R_DMA_DRAM_ADDR) || 1005 (s->ctrl->has_dma && addr == R_DMA_LEN) || 1006 (s->ctrl->has_dma && addr == R_DMA_CHECKSUM) || 1007 (addr >= R_SEG_ADDR0 && 1008 addr < R_SEG_ADDR0 + s->ctrl->max_peripherals) || 1009 (addr >= s->r_ctrl0 && addr < s->r_ctrl0 + s->ctrl->max_peripherals)) { 1010 1011 trace_aspeed_smc_read(addr, size, s->regs[addr]); 1012 1013 return s->regs[addr]; 1014 } else { 1015 qemu_log_mask(LOG_UNIMP, "%s: not implemented: 0x%" HWADDR_PRIx "\n", 1016 __func__, addr); 1017 return -1; 1018 } 1019 } 1020 1021 static uint8_t aspeed_smc_hclk_divisor(uint8_t hclk_mask) 1022 { 1023 /* HCLK/1 .. HCLK/16 */ 1024 const uint8_t hclk_divisors[] = { 1025 15, 7, 14, 6, 13, 5, 12, 4, 11, 3, 10, 2, 9, 1, 8, 0 1026 }; 1027 int i; 1028 1029 for (i = 0; i < ARRAY_SIZE(hclk_divisors); i++) { 1030 if (hclk_mask == hclk_divisors[i]) { 1031 return i + 1; 1032 } 1033 } 1034 1035 qemu_log_mask(LOG_GUEST_ERROR, "invalid HCLK mask %x", hclk_mask); 1036 return 0; 1037 } 1038 1039 /* 1040 * When doing calibration, the SPI clock rate in the CE0 Control 1041 * Register and the read delay cycles in the Read Timing Compensation 1042 * Register are set using bit[11:4] of the DMA Control Register. 1043 */ 1044 static void aspeed_smc_dma_calibration(AspeedSMCState *s) 1045 { 1046 uint8_t delay = 1047 (s->regs[R_DMA_CTRL] >> DMA_CTRL_DELAY_SHIFT) & DMA_CTRL_DELAY_MASK; 1048 uint8_t hclk_mask = 1049 (s->regs[R_DMA_CTRL] >> DMA_CTRL_FREQ_SHIFT) & DMA_CTRL_FREQ_MASK; 1050 uint8_t hclk_div = aspeed_smc_hclk_divisor(hclk_mask); 1051 uint32_t hclk_shift = (hclk_div - 1) << 2; 1052 uint8_t cs; 1053 1054 /* 1055 * The Read Timing Compensation Register values apply to all CS on 1056 * the SPI bus and only HCLK/1 - HCLK/5 can have tunable delays 1057 */ 1058 if (hclk_div && hclk_div < 6) { 1059 s->regs[s->r_timings] &= ~(0xf << hclk_shift); 1060 s->regs[s->r_timings] |= delay << hclk_shift; 1061 } 1062 1063 /* 1064 * TODO: compute the CS from the DMA address and the segment 1065 * registers. This is not really a problem for now because the 1066 * Timing Register values apply to all CS and software uses CS0 to 1067 * do calibration. 1068 */ 1069 cs = 0; 1070 s->regs[s->r_ctrl0 + cs] &= 1071 ~(CE_CTRL_CLOCK_FREQ_MASK << CE_CTRL_CLOCK_FREQ_SHIFT); 1072 s->regs[s->r_ctrl0 + cs] |= CE_CTRL_CLOCK_FREQ(hclk_div); 1073 } 1074 1075 /* 1076 * Emulate read errors in the DMA Checksum Register for high 1077 * frequencies and optimistic settings of the Read Timing Compensation 1078 * Register. This will help in tuning the SPI timing calibration 1079 * algorithm. 1080 */ 1081 static bool aspeed_smc_inject_read_failure(AspeedSMCState *s) 1082 { 1083 uint8_t delay = 1084 (s->regs[R_DMA_CTRL] >> DMA_CTRL_DELAY_SHIFT) & DMA_CTRL_DELAY_MASK; 1085 uint8_t hclk_mask = 1086 (s->regs[R_DMA_CTRL] >> DMA_CTRL_FREQ_SHIFT) & DMA_CTRL_FREQ_MASK; 1087 1088 /* 1089 * Typical values of a palmetto-bmc machine. 1090 */ 1091 switch (aspeed_smc_hclk_divisor(hclk_mask)) { 1092 case 4 ... 16: 1093 return false; 1094 case 3: /* at least one HCLK cycle delay */ 1095 return (delay & 0x7) < 1; 1096 case 2: /* at least two HCLK cycle delay */ 1097 return (delay & 0x7) < 2; 1098 case 1: /* (> 100MHz) is above the max freq of the controller */ 1099 return true; 1100 default: 1101 g_assert_not_reached(); 1102 } 1103 } 1104 1105 /* 1106 * Accumulate the result of the reads to provide a checksum that will 1107 * be used to validate the read timing settings. 1108 */ 1109 static void aspeed_smc_dma_checksum(AspeedSMCState *s) 1110 { 1111 MemTxResult result; 1112 uint32_t data; 1113 1114 if (s->regs[R_DMA_CTRL] & DMA_CTRL_WRITE) { 1115 qemu_log_mask(LOG_GUEST_ERROR, 1116 "%s: invalid direction for DMA checksum\n", __func__); 1117 return; 1118 } 1119 1120 if (s->regs[R_DMA_CTRL] & DMA_CTRL_CALIB) { 1121 aspeed_smc_dma_calibration(s); 1122 } 1123 1124 while (s->regs[R_DMA_LEN]) { 1125 data = address_space_ldl_le(&s->flash_as, s->regs[R_DMA_FLASH_ADDR], 1126 MEMTXATTRS_UNSPECIFIED, &result); 1127 if (result != MEMTX_OK) { 1128 qemu_log_mask(LOG_GUEST_ERROR, "%s: Flash read failed @%08x\n", 1129 __func__, s->regs[R_DMA_FLASH_ADDR]); 1130 return; 1131 } 1132 trace_aspeed_smc_dma_checksum(s->regs[R_DMA_FLASH_ADDR], data); 1133 1134 /* 1135 * When the DMA is on-going, the DMA registers are updated 1136 * with the current working addresses and length. 1137 */ 1138 s->regs[R_DMA_CHECKSUM] += data; 1139 s->regs[R_DMA_FLASH_ADDR] += 4; 1140 s->regs[R_DMA_LEN] -= 4; 1141 } 1142 1143 if (s->inject_failure && aspeed_smc_inject_read_failure(s)) { 1144 s->regs[R_DMA_CHECKSUM] = 0xbadc0de; 1145 } 1146 1147 } 1148 1149 static void aspeed_smc_dma_rw(AspeedSMCState *s) 1150 { 1151 MemTxResult result; 1152 uint32_t data; 1153 1154 trace_aspeed_smc_dma_rw(s->regs[R_DMA_CTRL] & DMA_CTRL_WRITE ? 1155 "write" : "read", 1156 s->regs[R_DMA_FLASH_ADDR], 1157 s->regs[R_DMA_DRAM_ADDR], 1158 s->regs[R_DMA_LEN]); 1159 while (s->regs[R_DMA_LEN]) { 1160 if (s->regs[R_DMA_CTRL] & DMA_CTRL_WRITE) { 1161 data = address_space_ldl_le(&s->dram_as, s->regs[R_DMA_DRAM_ADDR], 1162 MEMTXATTRS_UNSPECIFIED, &result); 1163 if (result != MEMTX_OK) { 1164 qemu_log_mask(LOG_GUEST_ERROR, "%s: DRAM read failed @%08x\n", 1165 __func__, s->regs[R_DMA_DRAM_ADDR]); 1166 return; 1167 } 1168 1169 address_space_stl_le(&s->flash_as, s->regs[R_DMA_FLASH_ADDR], 1170 data, MEMTXATTRS_UNSPECIFIED, &result); 1171 if (result != MEMTX_OK) { 1172 qemu_log_mask(LOG_GUEST_ERROR, "%s: Flash write failed @%08x\n", 1173 __func__, s->regs[R_DMA_FLASH_ADDR]); 1174 return; 1175 } 1176 } else { 1177 data = address_space_ldl_le(&s->flash_as, s->regs[R_DMA_FLASH_ADDR], 1178 MEMTXATTRS_UNSPECIFIED, &result); 1179 if (result != MEMTX_OK) { 1180 qemu_log_mask(LOG_GUEST_ERROR, "%s: Flash read failed @%08x\n", 1181 __func__, s->regs[R_DMA_FLASH_ADDR]); 1182 return; 1183 } 1184 1185 address_space_stl_le(&s->dram_as, s->regs[R_DMA_DRAM_ADDR], 1186 data, MEMTXATTRS_UNSPECIFIED, &result); 1187 if (result != MEMTX_OK) { 1188 qemu_log_mask(LOG_GUEST_ERROR, "%s: DRAM write failed @%08x\n", 1189 __func__, s->regs[R_DMA_DRAM_ADDR]); 1190 return; 1191 } 1192 } 1193 1194 /* 1195 * When the DMA is on-going, the DMA registers are updated 1196 * with the current working addresses and length. 1197 */ 1198 s->regs[R_DMA_FLASH_ADDR] += 4; 1199 s->regs[R_DMA_DRAM_ADDR] += 4; 1200 s->regs[R_DMA_LEN] -= 4; 1201 s->regs[R_DMA_CHECKSUM] += data; 1202 } 1203 } 1204 1205 static void aspeed_smc_dma_stop(AspeedSMCState *s) 1206 { 1207 /* 1208 * When the DMA is disabled, INTR_CTRL_DMA_STATUS=0 means the 1209 * engine is idle 1210 */ 1211 s->regs[R_INTR_CTRL] &= ~INTR_CTRL_DMA_STATUS; 1212 s->regs[R_DMA_CHECKSUM] = 0; 1213 1214 /* 1215 * Lower the DMA irq in any case. The IRQ control register could 1216 * have been cleared before disabling the DMA. 1217 */ 1218 qemu_irq_lower(s->irq); 1219 } 1220 1221 /* 1222 * When INTR_CTRL_DMA_STATUS=1, the DMA has completed and a new DMA 1223 * can start even if the result of the previous was not collected. 1224 */ 1225 static bool aspeed_smc_dma_in_progress(AspeedSMCState *s) 1226 { 1227 return s->regs[R_DMA_CTRL] & DMA_CTRL_ENABLE && 1228 !(s->regs[R_INTR_CTRL] & INTR_CTRL_DMA_STATUS); 1229 } 1230 1231 static void aspeed_smc_dma_done(AspeedSMCState *s) 1232 { 1233 s->regs[R_INTR_CTRL] |= INTR_CTRL_DMA_STATUS; 1234 if (s->regs[R_INTR_CTRL] & INTR_CTRL_DMA_EN) { 1235 qemu_irq_raise(s->irq); 1236 } 1237 } 1238 1239 static void aspeed_smc_dma_ctrl(AspeedSMCState *s, uint64_t dma_ctrl) 1240 { 1241 if (!(dma_ctrl & DMA_CTRL_ENABLE)) { 1242 s->regs[R_DMA_CTRL] = dma_ctrl; 1243 1244 aspeed_smc_dma_stop(s); 1245 return; 1246 } 1247 1248 if (aspeed_smc_dma_in_progress(s)) { 1249 qemu_log_mask(LOG_GUEST_ERROR, "%s: DMA in progress\n", __func__); 1250 return; 1251 } 1252 1253 s->regs[R_DMA_CTRL] = dma_ctrl; 1254 1255 if (s->regs[R_DMA_CTRL] & DMA_CTRL_CKSUM) { 1256 aspeed_smc_dma_checksum(s); 1257 } else { 1258 aspeed_smc_dma_rw(s); 1259 } 1260 1261 aspeed_smc_dma_done(s); 1262 } 1263 1264 static void aspeed_smc_write(void *opaque, hwaddr addr, uint64_t data, 1265 unsigned int size) 1266 { 1267 AspeedSMCState *s = ASPEED_SMC(opaque); 1268 uint32_t value = data; 1269 1270 addr >>= 2; 1271 1272 trace_aspeed_smc_write(addr, size, data); 1273 1274 if (addr == s->r_conf || 1275 (addr >= s->r_timings && 1276 addr < s->r_timings + s->ctrl->nregs_timings) || 1277 addr == s->r_ce_ctrl) { 1278 s->regs[addr] = value; 1279 } else if (addr >= s->r_ctrl0 && addr < s->r_ctrl0 + s->num_cs) { 1280 int cs = addr - s->r_ctrl0; 1281 aspeed_smc_flash_update_ctrl(&s->flashes[cs], value); 1282 } else if (addr >= R_SEG_ADDR0 && 1283 addr < R_SEG_ADDR0 + s->ctrl->max_peripherals) { 1284 int cs = addr - R_SEG_ADDR0; 1285 1286 if (value != s->regs[R_SEG_ADDR0 + cs]) { 1287 aspeed_smc_flash_set_segment(s, cs, value); 1288 } 1289 } else if (addr == R_CE_CMD_CTRL) { 1290 s->regs[addr] = value & 0xff; 1291 } else if (addr == R_DUMMY_DATA) { 1292 s->regs[addr] = value & 0xff; 1293 } else if (addr == R_INTR_CTRL) { 1294 s->regs[addr] = value; 1295 } else if (s->ctrl->has_dma && addr == R_DMA_CTRL) { 1296 aspeed_smc_dma_ctrl(s, value); 1297 } else if (s->ctrl->has_dma && addr == R_DMA_DRAM_ADDR) { 1298 s->regs[addr] = DMA_DRAM_ADDR(s, value); 1299 } else if (s->ctrl->has_dma && addr == R_DMA_FLASH_ADDR) { 1300 s->regs[addr] = DMA_FLASH_ADDR(s, value); 1301 } else if (s->ctrl->has_dma && addr == R_DMA_LEN) { 1302 s->regs[addr] = DMA_LENGTH(value); 1303 } else { 1304 qemu_log_mask(LOG_UNIMP, "%s: not implemented: 0x%" HWADDR_PRIx "\n", 1305 __func__, addr); 1306 return; 1307 } 1308 } 1309 1310 static const MemoryRegionOps aspeed_smc_ops = { 1311 .read = aspeed_smc_read, 1312 .write = aspeed_smc_write, 1313 .endianness = DEVICE_LITTLE_ENDIAN, 1314 }; 1315 1316 /* 1317 * Initialize the custom address spaces for DMAs 1318 */ 1319 static void aspeed_smc_dma_setup(AspeedSMCState *s, Error **errp) 1320 { 1321 char *name; 1322 1323 if (!s->dram_mr) { 1324 error_setg(errp, TYPE_ASPEED_SMC ": 'dram' link not set"); 1325 return; 1326 } 1327 1328 name = g_strdup_printf("%s-dma-flash", s->ctrl->name); 1329 address_space_init(&s->flash_as, &s->mmio_flash, name); 1330 g_free(name); 1331 1332 name = g_strdup_printf("%s-dma-dram", s->ctrl->name); 1333 address_space_init(&s->dram_as, s->dram_mr, name); 1334 g_free(name); 1335 } 1336 1337 static void aspeed_smc_realize(DeviceState *dev, Error **errp) 1338 { 1339 SysBusDevice *sbd = SYS_BUS_DEVICE(dev); 1340 AspeedSMCState *s = ASPEED_SMC(dev); 1341 AspeedSMCClass *mc = ASPEED_SMC_GET_CLASS(s); 1342 int i; 1343 char name[32]; 1344 hwaddr offset = 0; 1345 1346 s->ctrl = mc->ctrl; 1347 1348 /* keep a copy under AspeedSMCState to speed up accesses */ 1349 s->r_conf = s->ctrl->r_conf; 1350 s->r_ce_ctrl = s->ctrl->r_ce_ctrl; 1351 s->r_ctrl0 = s->ctrl->r_ctrl0; 1352 s->r_timings = s->ctrl->r_timings; 1353 s->conf_enable_w0 = s->ctrl->conf_enable_w0; 1354 1355 /* Enforce some real HW limits */ 1356 if (s->num_cs > s->ctrl->max_peripherals) { 1357 qemu_log_mask(LOG_GUEST_ERROR, "%s: num_cs cannot exceed: %d\n", 1358 __func__, s->ctrl->max_peripherals); 1359 s->num_cs = s->ctrl->max_peripherals; 1360 } 1361 1362 /* DMA irq. Keep it first for the initialization in the SoC */ 1363 sysbus_init_irq(sbd, &s->irq); 1364 1365 s->spi = ssi_create_bus(dev, "spi"); 1366 1367 /* Setup cs_lines for peripherals */ 1368 s->cs_lines = g_new0(qemu_irq, s->num_cs); 1369 1370 for (i = 0; i < s->num_cs; ++i) { 1371 sysbus_init_irq(sbd, &s->cs_lines[i]); 1372 } 1373 1374 /* The memory region for the controller registers */ 1375 memory_region_init_io(&s->mmio, OBJECT(s), &aspeed_smc_ops, s, 1376 s->ctrl->name, s->ctrl->nregs * 4); 1377 sysbus_init_mmio(sbd, &s->mmio); 1378 1379 /* 1380 * The container memory region representing the address space 1381 * window in which the flash modules are mapped. The size and 1382 * address depends on the SoC model and controller type. 1383 */ 1384 snprintf(name, sizeof(name), "%s.flash", s->ctrl->name); 1385 1386 memory_region_init_io(&s->mmio_flash, OBJECT(s), 1387 &aspeed_smc_flash_default_ops, s, name, 1388 s->ctrl->flash_window_size); 1389 sysbus_init_mmio(sbd, &s->mmio_flash); 1390 1391 s->flashes = g_new0(AspeedSMCFlash, s->ctrl->max_peripherals); 1392 1393 /* 1394 * Let's create a sub memory region for each possible peripheral. All 1395 * have a configurable memory segment in the overall flash mapping 1396 * window of the controller but, there is not necessarily a flash 1397 * module behind to handle the memory accesses. This depends on 1398 * the board configuration. 1399 */ 1400 for (i = 0; i < s->ctrl->max_peripherals; ++i) { 1401 AspeedSMCFlash *fl = &s->flashes[i]; 1402 1403 snprintf(name, sizeof(name), "%s.%d", s->ctrl->name, i); 1404 1405 fl->id = i; 1406 fl->controller = s; 1407 fl->size = s->ctrl->segments[i].size; 1408 memory_region_init_io(&fl->mmio, OBJECT(s), &aspeed_smc_flash_ops, 1409 fl, name, fl->size); 1410 memory_region_add_subregion(&s->mmio_flash, offset, &fl->mmio); 1411 offset += fl->size; 1412 } 1413 1414 /* DMA support */ 1415 if (s->ctrl->has_dma) { 1416 aspeed_smc_dma_setup(s, errp); 1417 } 1418 } 1419 1420 static const VMStateDescription vmstate_aspeed_smc = { 1421 .name = "aspeed.smc", 1422 .version_id = 2, 1423 .minimum_version_id = 2, 1424 .fields = (VMStateField[]) { 1425 VMSTATE_UINT32_ARRAY(regs, AspeedSMCState, ASPEED_SMC_R_MAX), 1426 VMSTATE_UINT8(snoop_index, AspeedSMCState), 1427 VMSTATE_UINT8(snoop_dummies, AspeedSMCState), 1428 VMSTATE_END_OF_LIST() 1429 } 1430 }; 1431 1432 static Property aspeed_smc_properties[] = { 1433 DEFINE_PROP_UINT32("num-cs", AspeedSMCState, num_cs, 1), 1434 DEFINE_PROP_BOOL("inject-failure", AspeedSMCState, inject_failure, false), 1435 DEFINE_PROP_UINT64("sdram-base", AspeedSMCState, sdram_base, 0), 1436 DEFINE_PROP_LINK("dram", AspeedSMCState, dram_mr, 1437 TYPE_MEMORY_REGION, MemoryRegion *), 1438 DEFINE_PROP_END_OF_LIST(), 1439 }; 1440 1441 static void aspeed_smc_class_init(ObjectClass *klass, void *data) 1442 { 1443 DeviceClass *dc = DEVICE_CLASS(klass); 1444 AspeedSMCClass *mc = ASPEED_SMC_CLASS(klass); 1445 1446 dc->realize = aspeed_smc_realize; 1447 dc->reset = aspeed_smc_reset; 1448 device_class_set_props(dc, aspeed_smc_properties); 1449 dc->vmsd = &vmstate_aspeed_smc; 1450 mc->ctrl = data; 1451 } 1452 1453 static const TypeInfo aspeed_smc_info = { 1454 .name = TYPE_ASPEED_SMC, 1455 .parent = TYPE_SYS_BUS_DEVICE, 1456 .instance_size = sizeof(AspeedSMCState), 1457 .class_size = sizeof(AspeedSMCClass), 1458 .abstract = true, 1459 }; 1460 1461 static void aspeed_smc_register_types(void) 1462 { 1463 int i; 1464 1465 type_register_static(&aspeed_smc_info); 1466 for (i = 0; i < ARRAY_SIZE(controllers); ++i) { 1467 TypeInfo ti = { 1468 .name = controllers[i].name, 1469 .parent = TYPE_ASPEED_SMC, 1470 .class_init = aspeed_smc_class_init, 1471 .class_data = (void *)&controllers[i], 1472 }; 1473 type_register(&ti); 1474 } 1475 } 1476 1477 type_init(aspeed_smc_register_types) 1478