1 /* 2 * ASPEED AST2400 SMC Controller (SPI Flash Only) 3 * 4 * Copyright (C) 2016 IBM Corp. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "hw/sysbus.h" 27 #include "migration/vmstate.h" 28 #include "qemu/log.h" 29 #include "qemu/module.h" 30 #include "qemu/error-report.h" 31 #include "qapi/error.h" 32 #include "exec/address-spaces.h" 33 #include "qemu/units.h" 34 #include "trace.h" 35 36 #include "hw/irq.h" 37 #include "hw/qdev-properties.h" 38 #include "hw/ssi/aspeed_smc.h" 39 40 /* CE Type Setting Register */ 41 #define R_CONF (0x00 / 4) 42 #define CONF_LEGACY_DISABLE (1 << 31) 43 #define CONF_ENABLE_W4 20 44 #define CONF_ENABLE_W3 19 45 #define CONF_ENABLE_W2 18 46 #define CONF_ENABLE_W1 17 47 #define CONF_ENABLE_W0 16 48 #define CONF_FLASH_TYPE4 8 49 #define CONF_FLASH_TYPE3 6 50 #define CONF_FLASH_TYPE2 4 51 #define CONF_FLASH_TYPE1 2 52 #define CONF_FLASH_TYPE0 0 53 #define CONF_FLASH_TYPE_NOR 0x0 54 #define CONF_FLASH_TYPE_NAND 0x1 55 #define CONF_FLASH_TYPE_SPI 0x2 /* AST2600 is SPI only */ 56 57 /* CE Control Register */ 58 #define R_CE_CTRL (0x04 / 4) 59 #define CTRL_EXTENDED4 4 /* 32 bit addressing for SPI */ 60 #define CTRL_EXTENDED3 3 /* 32 bit addressing for SPI */ 61 #define CTRL_EXTENDED2 2 /* 32 bit addressing for SPI */ 62 #define CTRL_EXTENDED1 1 /* 32 bit addressing for SPI */ 63 #define CTRL_EXTENDED0 0 /* 32 bit addressing for SPI */ 64 65 /* Interrupt Control and Status Register */ 66 #define R_INTR_CTRL (0x08 / 4) 67 #define INTR_CTRL_DMA_STATUS (1 << 11) 68 #define INTR_CTRL_CMD_ABORT_STATUS (1 << 10) 69 #define INTR_CTRL_WRITE_PROTECT_STATUS (1 << 9) 70 #define INTR_CTRL_DMA_EN (1 << 3) 71 #define INTR_CTRL_CMD_ABORT_EN (1 << 2) 72 #define INTR_CTRL_WRITE_PROTECT_EN (1 << 1) 73 74 /* Command Control Register */ 75 #define R_CE_CMD_CTRL (0x0C / 4) 76 #define CTRL_ADDR_BYTE0_DISABLE_SHIFT 4 77 #define CTRL_DATA_BYTE0_DISABLE_SHIFT 0 78 79 #define aspeed_smc_addr_byte_enabled(s, i) \ 80 (!((s)->regs[R_CE_CMD_CTRL] & (1 << (CTRL_ADDR_BYTE0_DISABLE_SHIFT + (i))))) 81 #define aspeed_smc_data_byte_enabled(s, i) \ 82 (!((s)->regs[R_CE_CMD_CTRL] & (1 << (CTRL_DATA_BYTE0_DISABLE_SHIFT + (i))))) 83 84 /* CEx Control Register */ 85 #define R_CTRL0 (0x10 / 4) 86 #define CTRL_IO_QPI (1 << 31) 87 #define CTRL_IO_QUAD_DATA (1 << 30) 88 #define CTRL_IO_DUAL_DATA (1 << 29) 89 #define CTRL_IO_DUAL_ADDR_DATA (1 << 28) /* Includes dummies */ 90 #define CTRL_IO_QUAD_ADDR_DATA (1 << 28) /* Includes dummies */ 91 #define CTRL_CMD_SHIFT 16 92 #define CTRL_CMD_MASK 0xff 93 #define CTRL_DUMMY_HIGH_SHIFT 14 94 #define CTRL_AST2400_SPI_4BYTE (1 << 13) 95 #define CE_CTRL_CLOCK_FREQ_SHIFT 8 96 #define CE_CTRL_CLOCK_FREQ_MASK 0xf 97 #define CE_CTRL_CLOCK_FREQ(div) \ 98 (((div) & CE_CTRL_CLOCK_FREQ_MASK) << CE_CTRL_CLOCK_FREQ_SHIFT) 99 #define CTRL_DUMMY_LOW_SHIFT 6 /* 2 bits [7:6] */ 100 #define CTRL_CE_STOP_ACTIVE (1 << 2) 101 #define CTRL_CMD_MODE_MASK 0x3 102 #define CTRL_READMODE 0x0 103 #define CTRL_FREADMODE 0x1 104 #define CTRL_WRITEMODE 0x2 105 #define CTRL_USERMODE 0x3 106 #define R_CTRL1 (0x14 / 4) 107 #define R_CTRL2 (0x18 / 4) 108 #define R_CTRL3 (0x1C / 4) 109 #define R_CTRL4 (0x20 / 4) 110 111 /* CEx Segment Address Register */ 112 #define R_SEG_ADDR0 (0x30 / 4) 113 #define SEG_END_SHIFT 24 /* 8MB units */ 114 #define SEG_END_MASK 0xff 115 #define SEG_START_SHIFT 16 /* address bit [A29-A23] */ 116 #define SEG_START_MASK 0xff 117 #define R_SEG_ADDR1 (0x34 / 4) 118 #define R_SEG_ADDR2 (0x38 / 4) 119 #define R_SEG_ADDR3 (0x3C / 4) 120 #define R_SEG_ADDR4 (0x40 / 4) 121 122 /* Misc Control Register #1 */ 123 #define R_MISC_CTRL1 (0x50 / 4) 124 125 /* SPI dummy cycle data */ 126 #define R_DUMMY_DATA (0x54 / 4) 127 128 /* DMA Control/Status Register */ 129 #define R_DMA_CTRL (0x80 / 4) 130 #define DMA_CTRL_DELAY_MASK 0xf 131 #define DMA_CTRL_DELAY_SHIFT 8 132 #define DMA_CTRL_FREQ_MASK 0xf 133 #define DMA_CTRL_FREQ_SHIFT 4 134 #define DMA_CTRL_CALIB (1 << 3) 135 #define DMA_CTRL_CKSUM (1 << 2) 136 #define DMA_CTRL_WRITE (1 << 1) 137 #define DMA_CTRL_ENABLE (1 << 0) 138 139 /* DMA Flash Side Address */ 140 #define R_DMA_FLASH_ADDR (0x84 / 4) 141 142 /* DMA DRAM Side Address */ 143 #define R_DMA_DRAM_ADDR (0x88 / 4) 144 145 /* DMA Length Register */ 146 #define R_DMA_LEN (0x8C / 4) 147 148 /* Checksum Calculation Result */ 149 #define R_DMA_CHECKSUM (0x90 / 4) 150 151 /* Read Timing Compensation Register */ 152 #define R_TIMINGS (0x94 / 4) 153 154 /* SPI controller registers and bits (AST2400) */ 155 #define R_SPI_CONF (0x00 / 4) 156 #define SPI_CONF_ENABLE_W0 0 157 #define R_SPI_CTRL0 (0x4 / 4) 158 #define R_SPI_MISC_CTRL (0x10 / 4) 159 #define R_SPI_TIMINGS (0x14 / 4) 160 161 #define ASPEED_SMC_R_SPI_MAX (0x20 / 4) 162 #define ASPEED_SMC_R_SMC_MAX (0x20 / 4) 163 164 #define ASPEED_SOC_SMC_FLASH_BASE 0x10000000 165 #define ASPEED_SOC_FMC_FLASH_BASE 0x20000000 166 #define ASPEED_SOC_SPI_FLASH_BASE 0x30000000 167 #define ASPEED_SOC_SPI2_FLASH_BASE 0x38000000 168 169 /* 170 * DMA DRAM addresses should be 4 bytes aligned and the valid address 171 * range is 0x40000000 - 0x5FFFFFFF (AST2400) 172 * 0x80000000 - 0xBFFFFFFF (AST2500) 173 * 174 * DMA flash addresses should be 4 bytes aligned and the valid address 175 * range is 0x20000000 - 0x2FFFFFFF. 176 * 177 * DMA length is from 4 bytes to 32MB 178 * 0: 4 bytes 179 * 0x7FFFFF: 32M bytes 180 */ 181 #define DMA_DRAM_ADDR(s, val) ((val) & (s)->ctrl->dma_dram_mask) 182 #define DMA_FLASH_ADDR(s, val) ((s)->ctrl->flash_window_base | \ 183 ((val) & (s)->ctrl->dma_flash_mask)) 184 #define DMA_LENGTH(val) ((val) & 0x01FFFFFC) 185 186 /* Flash opcodes. */ 187 #define SPI_OP_READ 0x03 /* Read data bytes (low frequency) */ 188 189 #define SNOOP_OFF 0xFF 190 #define SNOOP_START 0x0 191 192 /* 193 * Default segments mapping addresses and size for each peripheral per 194 * controller. These can be changed when board is initialized with the 195 * Segment Address Registers. 196 */ 197 static const AspeedSegments aspeed_segments_legacy[] = { 198 { 0x10000000, 32 * 1024 * 1024 }, 199 }; 200 201 static const AspeedSegments aspeed_segments_fmc[] = { 202 { 0x20000000, 64 * 1024 * 1024 }, /* start address is readonly */ 203 { 0x24000000, 32 * 1024 * 1024 }, 204 { 0x26000000, 32 * 1024 * 1024 }, 205 { 0x28000000, 32 * 1024 * 1024 }, 206 { 0x2A000000, 32 * 1024 * 1024 } 207 }; 208 209 static const AspeedSegments aspeed_segments_spi[] = { 210 { 0x30000000, 64 * 1024 * 1024 }, 211 }; 212 213 static const AspeedSegments aspeed_segments_ast2500_fmc[] = { 214 { 0x20000000, 128 * 1024 * 1024 }, /* start address is readonly */ 215 { 0x28000000, 32 * 1024 * 1024 }, 216 { 0x2A000000, 32 * 1024 * 1024 }, 217 }; 218 219 static const AspeedSegments aspeed_segments_ast2500_spi1[] = { 220 { 0x30000000, 32 * 1024 * 1024 }, /* start address is readonly */ 221 { 0x32000000, 96 * 1024 * 1024 }, /* end address is readonly */ 222 }; 223 224 static const AspeedSegments aspeed_segments_ast2500_spi2[] = { 225 { 0x38000000, 32 * 1024 * 1024 }, /* start address is readonly */ 226 { 0x3A000000, 96 * 1024 * 1024 }, /* end address is readonly */ 227 }; 228 static uint32_t aspeed_smc_segment_to_reg(const AspeedSMCState *s, 229 const AspeedSegments *seg); 230 static void aspeed_smc_reg_to_segment(const AspeedSMCState *s, uint32_t reg, 231 AspeedSegments *seg); 232 233 /* 234 * AST2600 definitions 235 */ 236 #define ASPEED26_SOC_FMC_FLASH_BASE 0x20000000 237 #define ASPEED26_SOC_SPI_FLASH_BASE 0x30000000 238 #define ASPEED26_SOC_SPI2_FLASH_BASE 0x50000000 239 240 static const AspeedSegments aspeed_segments_ast2600_fmc[] = { 241 { 0x0, 128 * MiB }, /* start address is readonly */ 242 { 128 * MiB, 128 * MiB }, /* default is disabled but needed for -kernel */ 243 { 0x0, 0 }, /* disabled */ 244 }; 245 246 static const AspeedSegments aspeed_segments_ast2600_spi1[] = { 247 { 0x0, 128 * MiB }, /* start address is readonly */ 248 { 0x0, 0 }, /* disabled */ 249 }; 250 251 static const AspeedSegments aspeed_segments_ast2600_spi2[] = { 252 { 0x0, 128 * MiB }, /* start address is readonly */ 253 { 0x0, 0 }, /* disabled */ 254 { 0x0, 0 }, /* disabled */ 255 }; 256 257 static uint32_t aspeed_2600_smc_segment_to_reg(const AspeedSMCState *s, 258 const AspeedSegments *seg); 259 static void aspeed_2600_smc_reg_to_segment(const AspeedSMCState *s, 260 uint32_t reg, AspeedSegments *seg); 261 262 static const AspeedSMCController controllers[] = { 263 { 264 .name = "aspeed.smc-ast2400", 265 .r_conf = R_CONF, 266 .r_ce_ctrl = R_CE_CTRL, 267 .r_ctrl0 = R_CTRL0, 268 .r_timings = R_TIMINGS, 269 .nregs_timings = 1, 270 .conf_enable_w0 = CONF_ENABLE_W0, 271 .max_peripherals = 1, 272 .segments = aspeed_segments_legacy, 273 .flash_window_base = ASPEED_SOC_SMC_FLASH_BASE, 274 .flash_window_size = 0x6000000, 275 .has_dma = false, 276 .nregs = ASPEED_SMC_R_SMC_MAX, 277 .segment_to_reg = aspeed_smc_segment_to_reg, 278 .reg_to_segment = aspeed_smc_reg_to_segment, 279 }, { 280 .name = "aspeed.fmc-ast2400", 281 .r_conf = R_CONF, 282 .r_ce_ctrl = R_CE_CTRL, 283 .r_ctrl0 = R_CTRL0, 284 .r_timings = R_TIMINGS, 285 .nregs_timings = 1, 286 .conf_enable_w0 = CONF_ENABLE_W0, 287 .max_peripherals = 5, 288 .segments = aspeed_segments_fmc, 289 .flash_window_base = ASPEED_SOC_FMC_FLASH_BASE, 290 .flash_window_size = 0x10000000, 291 .has_dma = true, 292 .dma_flash_mask = 0x0FFFFFFC, 293 .dma_dram_mask = 0x1FFFFFFC, 294 .nregs = ASPEED_SMC_R_MAX, 295 .segment_to_reg = aspeed_smc_segment_to_reg, 296 .reg_to_segment = aspeed_smc_reg_to_segment, 297 }, { 298 .name = "aspeed.spi1-ast2400", 299 .r_conf = R_SPI_CONF, 300 .r_ce_ctrl = 0xff, 301 .r_ctrl0 = R_SPI_CTRL0, 302 .r_timings = R_SPI_TIMINGS, 303 .nregs_timings = 1, 304 .conf_enable_w0 = SPI_CONF_ENABLE_W0, 305 .max_peripherals = 1, 306 .segments = aspeed_segments_spi, 307 .flash_window_base = ASPEED_SOC_SPI_FLASH_BASE, 308 .flash_window_size = 0x10000000, 309 .has_dma = false, 310 .nregs = ASPEED_SMC_R_SPI_MAX, 311 .segment_to_reg = aspeed_smc_segment_to_reg, 312 .reg_to_segment = aspeed_smc_reg_to_segment, 313 }, { 314 .name = "aspeed.fmc-ast2500", 315 .r_conf = R_CONF, 316 .r_ce_ctrl = R_CE_CTRL, 317 .r_ctrl0 = R_CTRL0, 318 .r_timings = R_TIMINGS, 319 .nregs_timings = 1, 320 .conf_enable_w0 = CONF_ENABLE_W0, 321 .max_peripherals = 3, 322 .segments = aspeed_segments_ast2500_fmc, 323 .flash_window_base = ASPEED_SOC_FMC_FLASH_BASE, 324 .flash_window_size = 0x10000000, 325 .has_dma = true, 326 .dma_flash_mask = 0x0FFFFFFC, 327 .dma_dram_mask = 0x3FFFFFFC, 328 .nregs = ASPEED_SMC_R_MAX, 329 .segment_to_reg = aspeed_smc_segment_to_reg, 330 .reg_to_segment = aspeed_smc_reg_to_segment, 331 }, { 332 .name = "aspeed.spi1-ast2500", 333 .r_conf = R_CONF, 334 .r_ce_ctrl = R_CE_CTRL, 335 .r_ctrl0 = R_CTRL0, 336 .r_timings = R_TIMINGS, 337 .nregs_timings = 1, 338 .conf_enable_w0 = CONF_ENABLE_W0, 339 .max_peripherals = 2, 340 .segments = aspeed_segments_ast2500_spi1, 341 .flash_window_base = ASPEED_SOC_SPI_FLASH_BASE, 342 .flash_window_size = 0x8000000, 343 .has_dma = false, 344 .nregs = ASPEED_SMC_R_MAX, 345 .segment_to_reg = aspeed_smc_segment_to_reg, 346 .reg_to_segment = aspeed_smc_reg_to_segment, 347 }, { 348 .name = "aspeed.spi2-ast2500", 349 .r_conf = R_CONF, 350 .r_ce_ctrl = R_CE_CTRL, 351 .r_ctrl0 = R_CTRL0, 352 .r_timings = R_TIMINGS, 353 .nregs_timings = 1, 354 .conf_enable_w0 = CONF_ENABLE_W0, 355 .max_peripherals = 2, 356 .segments = aspeed_segments_ast2500_spi2, 357 .flash_window_base = ASPEED_SOC_SPI2_FLASH_BASE, 358 .flash_window_size = 0x8000000, 359 .has_dma = false, 360 .nregs = ASPEED_SMC_R_MAX, 361 .segment_to_reg = aspeed_smc_segment_to_reg, 362 .reg_to_segment = aspeed_smc_reg_to_segment, 363 }, { 364 .name = "aspeed.fmc-ast2600", 365 .r_conf = R_CONF, 366 .r_ce_ctrl = R_CE_CTRL, 367 .r_ctrl0 = R_CTRL0, 368 .r_timings = R_TIMINGS, 369 .nregs_timings = 1, 370 .conf_enable_w0 = CONF_ENABLE_W0, 371 .max_peripherals = 3, 372 .segments = aspeed_segments_ast2600_fmc, 373 .flash_window_base = ASPEED26_SOC_FMC_FLASH_BASE, 374 .flash_window_size = 0x10000000, 375 .has_dma = true, 376 .dma_flash_mask = 0x0FFFFFFC, 377 .dma_dram_mask = 0x3FFFFFFC, 378 .nregs = ASPEED_SMC_R_MAX, 379 .segment_to_reg = aspeed_2600_smc_segment_to_reg, 380 .reg_to_segment = aspeed_2600_smc_reg_to_segment, 381 }, { 382 .name = "aspeed.spi1-ast2600", 383 .r_conf = R_CONF, 384 .r_ce_ctrl = R_CE_CTRL, 385 .r_ctrl0 = R_CTRL0, 386 .r_timings = R_TIMINGS, 387 .nregs_timings = 2, 388 .conf_enable_w0 = CONF_ENABLE_W0, 389 .max_peripherals = 2, 390 .segments = aspeed_segments_ast2600_spi1, 391 .flash_window_base = ASPEED26_SOC_SPI_FLASH_BASE, 392 .flash_window_size = 0x10000000, 393 .has_dma = true, 394 .dma_flash_mask = 0x0FFFFFFC, 395 .dma_dram_mask = 0x3FFFFFFC, 396 .nregs = ASPEED_SMC_R_MAX, 397 .segment_to_reg = aspeed_2600_smc_segment_to_reg, 398 .reg_to_segment = aspeed_2600_smc_reg_to_segment, 399 }, { 400 .name = "aspeed.spi2-ast2600", 401 .r_conf = R_CONF, 402 .r_ce_ctrl = R_CE_CTRL, 403 .r_ctrl0 = R_CTRL0, 404 .r_timings = R_TIMINGS, 405 .nregs_timings = 3, 406 .conf_enable_w0 = CONF_ENABLE_W0, 407 .max_peripherals = 3, 408 .segments = aspeed_segments_ast2600_spi2, 409 .flash_window_base = ASPEED26_SOC_SPI2_FLASH_BASE, 410 .flash_window_size = 0x10000000, 411 .has_dma = true, 412 .dma_flash_mask = 0x0FFFFFFC, 413 .dma_dram_mask = 0x3FFFFFFC, 414 .nregs = ASPEED_SMC_R_MAX, 415 .segment_to_reg = aspeed_2600_smc_segment_to_reg, 416 .reg_to_segment = aspeed_2600_smc_reg_to_segment, 417 }, 418 }; 419 420 /* 421 * The Segment Registers of the AST2400 and AST2500 have a 8MB 422 * unit. The address range of a flash SPI peripheral is encoded with 423 * absolute addresses which should be part of the overall controller 424 * window. 425 */ 426 static uint32_t aspeed_smc_segment_to_reg(const AspeedSMCState *s, 427 const AspeedSegments *seg) 428 { 429 uint32_t reg = 0; 430 reg |= ((seg->addr >> 23) & SEG_START_MASK) << SEG_START_SHIFT; 431 reg |= (((seg->addr + seg->size) >> 23) & SEG_END_MASK) << SEG_END_SHIFT; 432 return reg; 433 } 434 435 static void aspeed_smc_reg_to_segment(const AspeedSMCState *s, 436 uint32_t reg, AspeedSegments *seg) 437 { 438 seg->addr = ((reg >> SEG_START_SHIFT) & SEG_START_MASK) << 23; 439 seg->size = (((reg >> SEG_END_SHIFT) & SEG_END_MASK) << 23) - seg->addr; 440 } 441 442 /* 443 * The Segment Registers of the AST2600 have a 1MB unit. The address 444 * range of a flash SPI peripheral is encoded with offsets in the overall 445 * controller window. The previous SoC AST2400 and AST2500 used 446 * absolute addresses. Only bits [27:20] are relevant and the end 447 * address is an upper bound limit. 448 */ 449 #define AST2600_SEG_ADDR_MASK 0x0ff00000 450 451 static uint32_t aspeed_2600_smc_segment_to_reg(const AspeedSMCState *s, 452 const AspeedSegments *seg) 453 { 454 uint32_t reg = 0; 455 456 /* Disabled segments have a nil register */ 457 if (!seg->size) { 458 return 0; 459 } 460 461 reg |= (seg->addr & AST2600_SEG_ADDR_MASK) >> 16; /* start offset */ 462 reg |= (seg->addr + seg->size - 1) & AST2600_SEG_ADDR_MASK; /* end offset */ 463 return reg; 464 } 465 466 static void aspeed_2600_smc_reg_to_segment(const AspeedSMCState *s, 467 uint32_t reg, AspeedSegments *seg) 468 { 469 uint32_t start_offset = (reg << 16) & AST2600_SEG_ADDR_MASK; 470 uint32_t end_offset = reg & AST2600_SEG_ADDR_MASK; 471 472 if (reg) { 473 seg->addr = s->ctrl->flash_window_base + start_offset; 474 seg->size = end_offset + MiB - start_offset; 475 } else { 476 seg->addr = s->ctrl->flash_window_base; 477 seg->size = 0; 478 } 479 } 480 481 static bool aspeed_smc_flash_overlap(const AspeedSMCState *s, 482 const AspeedSegments *new, 483 int cs) 484 { 485 AspeedSegments seg; 486 int i; 487 488 for (i = 0; i < s->ctrl->max_peripherals; i++) { 489 if (i == cs) { 490 continue; 491 } 492 493 s->ctrl->reg_to_segment(s, s->regs[R_SEG_ADDR0 + i], &seg); 494 495 if (new->addr + new->size > seg.addr && 496 new->addr < seg.addr + seg.size) { 497 qemu_log_mask(LOG_GUEST_ERROR, "%s: new segment CS%d [ 0x%" 498 HWADDR_PRIx" - 0x%"HWADDR_PRIx" ] overlaps with " 499 "CS%d [ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]\n", 500 s->ctrl->name, cs, new->addr, new->addr + new->size, 501 i, seg.addr, seg.addr + seg.size); 502 return true; 503 } 504 } 505 return false; 506 } 507 508 static void aspeed_smc_flash_set_segment_region(AspeedSMCState *s, int cs, 509 uint64_t regval) 510 { 511 AspeedSMCFlash *fl = &s->flashes[cs]; 512 AspeedSegments seg; 513 514 s->ctrl->reg_to_segment(s, regval, &seg); 515 516 memory_region_transaction_begin(); 517 memory_region_set_size(&fl->mmio, seg.size); 518 memory_region_set_address(&fl->mmio, seg.addr - s->ctrl->flash_window_base); 519 memory_region_set_enabled(&fl->mmio, !!seg.size); 520 memory_region_transaction_commit(); 521 522 s->regs[R_SEG_ADDR0 + cs] = regval; 523 } 524 525 static void aspeed_smc_flash_set_segment(AspeedSMCState *s, int cs, 526 uint64_t new) 527 { 528 AspeedSegments seg; 529 530 s->ctrl->reg_to_segment(s, new, &seg); 531 532 trace_aspeed_smc_flash_set_segment(cs, new, seg.addr, seg.addr + seg.size); 533 534 /* The start address of CS0 is read-only */ 535 if (cs == 0 && seg.addr != s->ctrl->flash_window_base) { 536 qemu_log_mask(LOG_GUEST_ERROR, 537 "%s: Tried to change CS0 start address to 0x%" 538 HWADDR_PRIx "\n", s->ctrl->name, seg.addr); 539 seg.addr = s->ctrl->flash_window_base; 540 new = s->ctrl->segment_to_reg(s, &seg); 541 } 542 543 /* 544 * The end address of the AST2500 spi controllers is also 545 * read-only. 546 */ 547 if ((s->ctrl->segments == aspeed_segments_ast2500_spi1 || 548 s->ctrl->segments == aspeed_segments_ast2500_spi2) && 549 cs == s->ctrl->max_peripherals && 550 seg.addr + seg.size != s->ctrl->segments[cs].addr + 551 s->ctrl->segments[cs].size) { 552 qemu_log_mask(LOG_GUEST_ERROR, 553 "%s: Tried to change CS%d end address to 0x%" 554 HWADDR_PRIx "\n", s->ctrl->name, cs, seg.addr + seg.size); 555 seg.size = s->ctrl->segments[cs].addr + s->ctrl->segments[cs].size - 556 seg.addr; 557 new = s->ctrl->segment_to_reg(s, &seg); 558 } 559 560 /* Keep the segment in the overall flash window */ 561 if (seg.size && 562 (seg.addr + seg.size <= s->ctrl->flash_window_base || 563 seg.addr > s->ctrl->flash_window_base + s->ctrl->flash_window_size)) { 564 qemu_log_mask(LOG_GUEST_ERROR, "%s: new segment for CS%d is invalid : " 565 "[ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]\n", 566 s->ctrl->name, cs, seg.addr, seg.addr + seg.size); 567 return; 568 } 569 570 /* Check start address vs. alignment */ 571 if (seg.size && !QEMU_IS_ALIGNED(seg.addr, seg.size)) { 572 qemu_log_mask(LOG_GUEST_ERROR, "%s: new segment for CS%d is not " 573 "aligned : [ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]\n", 574 s->ctrl->name, cs, seg.addr, seg.addr + seg.size); 575 } 576 577 /* And segments should not overlap (in the specs) */ 578 aspeed_smc_flash_overlap(s, &seg, cs); 579 580 /* All should be fine now to move the region */ 581 aspeed_smc_flash_set_segment_region(s, cs, new); 582 } 583 584 static uint64_t aspeed_smc_flash_default_read(void *opaque, hwaddr addr, 585 unsigned size) 586 { 587 qemu_log_mask(LOG_GUEST_ERROR, "%s: To 0x%" HWADDR_PRIx " of size %u" 588 PRIx64 "\n", __func__, addr, size); 589 return 0; 590 } 591 592 static void aspeed_smc_flash_default_write(void *opaque, hwaddr addr, 593 uint64_t data, unsigned size) 594 { 595 qemu_log_mask(LOG_GUEST_ERROR, "%s: To 0x%" HWADDR_PRIx " of size %u: 0x%" 596 PRIx64 "\n", __func__, addr, size, data); 597 } 598 599 static const MemoryRegionOps aspeed_smc_flash_default_ops = { 600 .read = aspeed_smc_flash_default_read, 601 .write = aspeed_smc_flash_default_write, 602 .endianness = DEVICE_LITTLE_ENDIAN, 603 .valid = { 604 .min_access_size = 1, 605 .max_access_size = 4, 606 }, 607 }; 608 609 static inline int aspeed_smc_flash_mode(const AspeedSMCFlash *fl) 610 { 611 const AspeedSMCState *s = fl->controller; 612 613 return s->regs[s->r_ctrl0 + fl->id] & CTRL_CMD_MODE_MASK; 614 } 615 616 static inline bool aspeed_smc_is_writable(const AspeedSMCFlash *fl) 617 { 618 const AspeedSMCState *s = fl->controller; 619 620 return s->regs[s->r_conf] & (1 << (s->conf_enable_w0 + fl->id)); 621 } 622 623 static inline int aspeed_smc_flash_cmd(const AspeedSMCFlash *fl) 624 { 625 const AspeedSMCState *s = fl->controller; 626 int cmd = (s->regs[s->r_ctrl0 + fl->id] >> CTRL_CMD_SHIFT) & CTRL_CMD_MASK; 627 628 /* 629 * In read mode, the default SPI command is READ (0x3). In other 630 * modes, the command should necessarily be defined 631 * 632 * TODO: add support for READ4 (0x13) on AST2600 633 */ 634 if (aspeed_smc_flash_mode(fl) == CTRL_READMODE) { 635 cmd = SPI_OP_READ; 636 } 637 638 if (!cmd) { 639 qemu_log_mask(LOG_GUEST_ERROR, "%s: no command defined for mode %d\n", 640 __func__, aspeed_smc_flash_mode(fl)); 641 } 642 643 return cmd; 644 } 645 646 static inline int aspeed_smc_flash_is_4byte(const AspeedSMCFlash *fl) 647 { 648 const AspeedSMCState *s = fl->controller; 649 650 if (s->ctrl->segments == aspeed_segments_spi) { 651 return s->regs[s->r_ctrl0] & CTRL_AST2400_SPI_4BYTE; 652 } else { 653 return s->regs[s->r_ce_ctrl] & (1 << (CTRL_EXTENDED0 + fl->id)); 654 } 655 } 656 657 static void aspeed_smc_flash_do_select(AspeedSMCFlash *fl, bool unselect) 658 { 659 AspeedSMCState *s = fl->controller; 660 661 trace_aspeed_smc_flash_select(fl->id, unselect ? "un" : ""); 662 663 qemu_set_irq(s->cs_lines[fl->id], unselect); 664 } 665 666 static void aspeed_smc_flash_select(AspeedSMCFlash *fl) 667 { 668 aspeed_smc_flash_do_select(fl, false); 669 } 670 671 static void aspeed_smc_flash_unselect(AspeedSMCFlash *fl) 672 { 673 aspeed_smc_flash_do_select(fl, true); 674 } 675 676 static uint32_t aspeed_smc_check_segment_addr(const AspeedSMCFlash *fl, 677 uint32_t addr) 678 { 679 const AspeedSMCState *s = fl->controller; 680 AspeedSegments seg; 681 682 s->ctrl->reg_to_segment(s, s->regs[R_SEG_ADDR0 + fl->id], &seg); 683 if ((addr % seg.size) != addr) { 684 qemu_log_mask(LOG_GUEST_ERROR, 685 "%s: invalid address 0x%08x for CS%d segment : " 686 "[ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]\n", 687 s->ctrl->name, addr, fl->id, seg.addr, 688 seg.addr + seg.size); 689 addr %= seg.size; 690 } 691 692 return addr; 693 } 694 695 static int aspeed_smc_flash_dummies(const AspeedSMCFlash *fl) 696 { 697 const AspeedSMCState *s = fl->controller; 698 uint32_t r_ctrl0 = s->regs[s->r_ctrl0 + fl->id]; 699 uint32_t dummy_high = (r_ctrl0 >> CTRL_DUMMY_HIGH_SHIFT) & 0x1; 700 uint32_t dummy_low = (r_ctrl0 >> CTRL_DUMMY_LOW_SHIFT) & 0x3; 701 uint32_t dummies = ((dummy_high << 2) | dummy_low) * 8; 702 703 if (r_ctrl0 & CTRL_IO_DUAL_ADDR_DATA) { 704 dummies /= 2; 705 } 706 707 return dummies; 708 } 709 710 static void aspeed_smc_flash_setup(AspeedSMCFlash *fl, uint32_t addr) 711 { 712 const AspeedSMCState *s = fl->controller; 713 uint8_t cmd = aspeed_smc_flash_cmd(fl); 714 int i = aspeed_smc_flash_is_4byte(fl) ? 4 : 3; 715 716 /* Flash access can not exceed CS segment */ 717 addr = aspeed_smc_check_segment_addr(fl, addr); 718 719 ssi_transfer(s->spi, cmd); 720 while (i--) { 721 if (aspeed_smc_addr_byte_enabled(s, i)) { 722 ssi_transfer(s->spi, (addr >> (i * 8)) & 0xff); 723 } 724 } 725 726 /* 727 * Use fake transfers to model dummy bytes. The value should 728 * be configured to some non-zero value in fast read mode and 729 * zero in read mode. But, as the HW allows inconsistent 730 * settings, let's check for fast read mode. 731 */ 732 if (aspeed_smc_flash_mode(fl) == CTRL_FREADMODE) { 733 for (i = 0; i < aspeed_smc_flash_dummies(fl); i++) { 734 ssi_transfer(fl->controller->spi, s->regs[R_DUMMY_DATA] & 0xff); 735 } 736 } 737 } 738 739 static uint64_t aspeed_smc_flash_read(void *opaque, hwaddr addr, unsigned size) 740 { 741 AspeedSMCFlash *fl = opaque; 742 AspeedSMCState *s = fl->controller; 743 uint64_t ret = 0; 744 int i; 745 746 switch (aspeed_smc_flash_mode(fl)) { 747 case CTRL_USERMODE: 748 for (i = 0; i < size; i++) { 749 ret |= ssi_transfer(s->spi, 0x0) << (8 * i); 750 } 751 break; 752 case CTRL_READMODE: 753 case CTRL_FREADMODE: 754 aspeed_smc_flash_select(fl); 755 aspeed_smc_flash_setup(fl, addr); 756 757 for (i = 0; i < size; i++) { 758 ret |= ssi_transfer(s->spi, 0x0) << (8 * i); 759 } 760 761 aspeed_smc_flash_unselect(fl); 762 break; 763 default: 764 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid flash mode %d\n", 765 __func__, aspeed_smc_flash_mode(fl)); 766 } 767 768 trace_aspeed_smc_flash_read(fl->id, addr, size, ret, 769 aspeed_smc_flash_mode(fl)); 770 return ret; 771 } 772 773 /* 774 * TODO (clg@kaod.org): stolen from xilinx_spips.c. Should move to a 775 * common include header. 776 */ 777 typedef enum { 778 READ = 0x3, READ_4 = 0x13, 779 FAST_READ = 0xb, FAST_READ_4 = 0x0c, 780 DOR = 0x3b, DOR_4 = 0x3c, 781 QOR = 0x6b, QOR_4 = 0x6c, 782 DIOR = 0xbb, DIOR_4 = 0xbc, 783 QIOR = 0xeb, QIOR_4 = 0xec, 784 785 PP = 0x2, PP_4 = 0x12, 786 DPP = 0xa2, 787 QPP = 0x32, QPP_4 = 0x34, 788 } FlashCMD; 789 790 static int aspeed_smc_num_dummies(uint8_t command) 791 { 792 switch (command) { /* check for dummies */ 793 case READ: /* no dummy bytes/cycles */ 794 case PP: 795 case DPP: 796 case QPP: 797 case READ_4: 798 case PP_4: 799 case QPP_4: 800 return 0; 801 case FAST_READ: 802 case DOR: 803 case QOR: 804 case FAST_READ_4: 805 case DOR_4: 806 case QOR_4: 807 return 1; 808 case DIOR: 809 case DIOR_4: 810 return 2; 811 case QIOR: 812 case QIOR_4: 813 return 4; 814 default: 815 return -1; 816 } 817 } 818 819 static bool aspeed_smc_do_snoop(AspeedSMCFlash *fl, uint64_t data, 820 unsigned size) 821 { 822 AspeedSMCState *s = fl->controller; 823 uint8_t addr_width = aspeed_smc_flash_is_4byte(fl) ? 4 : 3; 824 825 trace_aspeed_smc_do_snoop(fl->id, s->snoop_index, s->snoop_dummies, 826 (uint8_t) data & 0xff); 827 828 if (s->snoop_index == SNOOP_OFF) { 829 return false; /* Do nothing */ 830 831 } else if (s->snoop_index == SNOOP_START) { 832 uint8_t cmd = data & 0xff; 833 int ndummies = aspeed_smc_num_dummies(cmd); 834 835 /* 836 * No dummy cycles are expected with the current command. Turn 837 * off snooping and let the transfer proceed normally. 838 */ 839 if (ndummies <= 0) { 840 s->snoop_index = SNOOP_OFF; 841 return false; 842 } 843 844 s->snoop_dummies = ndummies * 8; 845 846 } else if (s->snoop_index >= addr_width + 1) { 847 848 /* The SPI transfer has reached the dummy cycles sequence */ 849 for (; s->snoop_dummies; s->snoop_dummies--) { 850 ssi_transfer(s->spi, s->regs[R_DUMMY_DATA] & 0xff); 851 } 852 853 /* If no more dummy cycles are expected, turn off snooping */ 854 if (!s->snoop_dummies) { 855 s->snoop_index = SNOOP_OFF; 856 } else { 857 s->snoop_index += size; 858 } 859 860 /* 861 * Dummy cycles have been faked already. Ignore the current 862 * SPI transfer 863 */ 864 return true; 865 } 866 867 s->snoop_index += size; 868 return false; 869 } 870 871 static void aspeed_smc_flash_write(void *opaque, hwaddr addr, uint64_t data, 872 unsigned size) 873 { 874 AspeedSMCFlash *fl = opaque; 875 AspeedSMCState *s = fl->controller; 876 int i; 877 878 trace_aspeed_smc_flash_write(fl->id, addr, size, data, 879 aspeed_smc_flash_mode(fl)); 880 881 if (!aspeed_smc_is_writable(fl)) { 882 qemu_log_mask(LOG_GUEST_ERROR, "%s: flash is not writable at 0x%" 883 HWADDR_PRIx "\n", __func__, addr); 884 return; 885 } 886 887 switch (aspeed_smc_flash_mode(fl)) { 888 case CTRL_USERMODE: 889 if (aspeed_smc_do_snoop(fl, data, size)) { 890 break; 891 } 892 893 for (i = 0; i < size; i++) { 894 ssi_transfer(s->spi, (data >> (8 * i)) & 0xff); 895 } 896 break; 897 case CTRL_WRITEMODE: 898 aspeed_smc_flash_select(fl); 899 aspeed_smc_flash_setup(fl, addr); 900 901 for (i = 0; i < size; i++) { 902 ssi_transfer(s->spi, (data >> (8 * i)) & 0xff); 903 } 904 905 aspeed_smc_flash_unselect(fl); 906 break; 907 default: 908 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid flash mode %d\n", 909 __func__, aspeed_smc_flash_mode(fl)); 910 } 911 } 912 913 static const MemoryRegionOps aspeed_smc_flash_ops = { 914 .read = aspeed_smc_flash_read, 915 .write = aspeed_smc_flash_write, 916 .endianness = DEVICE_LITTLE_ENDIAN, 917 .valid = { 918 .min_access_size = 1, 919 .max_access_size = 4, 920 }, 921 }; 922 923 static void aspeed_smc_flash_update_ctrl(AspeedSMCFlash *fl, uint32_t value) 924 { 925 AspeedSMCState *s = fl->controller; 926 bool unselect; 927 928 /* User mode selects the CS, other modes unselect */ 929 unselect = (value & CTRL_CMD_MODE_MASK) != CTRL_USERMODE; 930 931 /* A change of CTRL_CE_STOP_ACTIVE from 0 to 1, unselects the CS */ 932 if (!(s->regs[s->r_ctrl0 + fl->id] & CTRL_CE_STOP_ACTIVE) && 933 value & CTRL_CE_STOP_ACTIVE) { 934 unselect = true; 935 } 936 937 s->regs[s->r_ctrl0 + fl->id] = value; 938 939 s->snoop_index = unselect ? SNOOP_OFF : SNOOP_START; 940 941 aspeed_smc_flash_do_select(fl, unselect); 942 } 943 944 static void aspeed_smc_reset(DeviceState *d) 945 { 946 AspeedSMCState *s = ASPEED_SMC(d); 947 int i; 948 949 memset(s->regs, 0, sizeof s->regs); 950 951 /* Unselect all peripherals */ 952 for (i = 0; i < s->num_cs; ++i) { 953 s->regs[s->r_ctrl0 + i] |= CTRL_CE_STOP_ACTIVE; 954 qemu_set_irq(s->cs_lines[i], true); 955 } 956 957 /* setup the default segment register values and regions for all */ 958 for (i = 0; i < s->ctrl->max_peripherals; ++i) { 959 aspeed_smc_flash_set_segment_region(s, i, 960 s->ctrl->segment_to_reg(s, &s->ctrl->segments[i])); 961 } 962 963 /* HW strapping flash type for the AST2600 controllers */ 964 if (s->ctrl->segments == aspeed_segments_ast2600_fmc) { 965 /* flash type is fixed to SPI for all */ 966 s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE0); 967 s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE1); 968 s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE2); 969 } 970 971 /* HW strapping flash type for FMC controllers */ 972 if (s->ctrl->segments == aspeed_segments_ast2500_fmc) { 973 /* flash type is fixed to SPI for CE0 and CE1 */ 974 s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE0); 975 s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE1); 976 } 977 978 /* HW strapping for AST2400 FMC controllers (SCU70). Let's use the 979 * configuration of the palmetto-bmc machine */ 980 if (s->ctrl->segments == aspeed_segments_fmc) { 981 s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE0); 982 } 983 984 s->snoop_index = SNOOP_OFF; 985 s->snoop_dummies = 0; 986 } 987 988 static uint64_t aspeed_smc_read(void *opaque, hwaddr addr, unsigned int size) 989 { 990 AspeedSMCState *s = ASPEED_SMC(opaque); 991 992 addr >>= 2; 993 994 if (addr == s->r_conf || 995 (addr >= s->r_timings && 996 addr < s->r_timings + s->ctrl->nregs_timings) || 997 addr == s->r_ce_ctrl || 998 addr == R_CE_CMD_CTRL || 999 addr == R_INTR_CTRL || 1000 addr == R_DUMMY_DATA || 1001 (s->ctrl->has_dma && addr == R_DMA_CTRL) || 1002 (s->ctrl->has_dma && addr == R_DMA_FLASH_ADDR) || 1003 (s->ctrl->has_dma && addr == R_DMA_DRAM_ADDR) || 1004 (s->ctrl->has_dma && addr == R_DMA_LEN) || 1005 (s->ctrl->has_dma && addr == R_DMA_CHECKSUM) || 1006 (addr >= R_SEG_ADDR0 && 1007 addr < R_SEG_ADDR0 + s->ctrl->max_peripherals) || 1008 (addr >= s->r_ctrl0 && addr < s->r_ctrl0 + s->ctrl->max_peripherals)) { 1009 1010 trace_aspeed_smc_read(addr, size, s->regs[addr]); 1011 1012 return s->regs[addr]; 1013 } else { 1014 qemu_log_mask(LOG_UNIMP, "%s: not implemented: 0x%" HWADDR_PRIx "\n", 1015 __func__, addr); 1016 return -1; 1017 } 1018 } 1019 1020 static uint8_t aspeed_smc_hclk_divisor(uint8_t hclk_mask) 1021 { 1022 /* HCLK/1 .. HCLK/16 */ 1023 const uint8_t hclk_divisors[] = { 1024 15, 7, 14, 6, 13, 5, 12, 4, 11, 3, 10, 2, 9, 1, 8, 0 1025 }; 1026 int i; 1027 1028 for (i = 0; i < ARRAY_SIZE(hclk_divisors); i++) { 1029 if (hclk_mask == hclk_divisors[i]) { 1030 return i + 1; 1031 } 1032 } 1033 1034 qemu_log_mask(LOG_GUEST_ERROR, "invalid HCLK mask %x", hclk_mask); 1035 return 0; 1036 } 1037 1038 /* 1039 * When doing calibration, the SPI clock rate in the CE0 Control 1040 * Register and the read delay cycles in the Read Timing Compensation 1041 * Register are set using bit[11:4] of the DMA Control Register. 1042 */ 1043 static void aspeed_smc_dma_calibration(AspeedSMCState *s) 1044 { 1045 uint8_t delay = 1046 (s->regs[R_DMA_CTRL] >> DMA_CTRL_DELAY_SHIFT) & DMA_CTRL_DELAY_MASK; 1047 uint8_t hclk_mask = 1048 (s->regs[R_DMA_CTRL] >> DMA_CTRL_FREQ_SHIFT) & DMA_CTRL_FREQ_MASK; 1049 uint8_t hclk_div = aspeed_smc_hclk_divisor(hclk_mask); 1050 uint32_t hclk_shift = (hclk_div - 1) << 2; 1051 uint8_t cs; 1052 1053 /* 1054 * The Read Timing Compensation Register values apply to all CS on 1055 * the SPI bus and only HCLK/1 - HCLK/5 can have tunable delays 1056 */ 1057 if (hclk_div && hclk_div < 6) { 1058 s->regs[s->r_timings] &= ~(0xf << hclk_shift); 1059 s->regs[s->r_timings] |= delay << hclk_shift; 1060 } 1061 1062 /* 1063 * TODO: compute the CS from the DMA address and the segment 1064 * registers. This is not really a problem for now because the 1065 * Timing Register values apply to all CS and software uses CS0 to 1066 * do calibration. 1067 */ 1068 cs = 0; 1069 s->regs[s->r_ctrl0 + cs] &= 1070 ~(CE_CTRL_CLOCK_FREQ_MASK << CE_CTRL_CLOCK_FREQ_SHIFT); 1071 s->regs[s->r_ctrl0 + cs] |= CE_CTRL_CLOCK_FREQ(hclk_div); 1072 } 1073 1074 /* 1075 * Emulate read errors in the DMA Checksum Register for high 1076 * frequencies and optimistic settings of the Read Timing Compensation 1077 * Register. This will help in tuning the SPI timing calibration 1078 * algorithm. 1079 */ 1080 static bool aspeed_smc_inject_read_failure(AspeedSMCState *s) 1081 { 1082 uint8_t delay = 1083 (s->regs[R_DMA_CTRL] >> DMA_CTRL_DELAY_SHIFT) & DMA_CTRL_DELAY_MASK; 1084 uint8_t hclk_mask = 1085 (s->regs[R_DMA_CTRL] >> DMA_CTRL_FREQ_SHIFT) & DMA_CTRL_FREQ_MASK; 1086 1087 /* 1088 * Typical values of a palmetto-bmc machine. 1089 */ 1090 switch (aspeed_smc_hclk_divisor(hclk_mask)) { 1091 case 4 ... 16: 1092 return false; 1093 case 3: /* at least one HCLK cycle delay */ 1094 return (delay & 0x7) < 1; 1095 case 2: /* at least two HCLK cycle delay */ 1096 return (delay & 0x7) < 2; 1097 case 1: /* (> 100MHz) is above the max freq of the controller */ 1098 return true; 1099 default: 1100 g_assert_not_reached(); 1101 } 1102 } 1103 1104 /* 1105 * Accumulate the result of the reads to provide a checksum that will 1106 * be used to validate the read timing settings. 1107 */ 1108 static void aspeed_smc_dma_checksum(AspeedSMCState *s) 1109 { 1110 MemTxResult result; 1111 uint32_t data; 1112 1113 if (s->regs[R_DMA_CTRL] & DMA_CTRL_WRITE) { 1114 qemu_log_mask(LOG_GUEST_ERROR, 1115 "%s: invalid direction for DMA checksum\n", __func__); 1116 return; 1117 } 1118 1119 if (s->regs[R_DMA_CTRL] & DMA_CTRL_CALIB) { 1120 aspeed_smc_dma_calibration(s); 1121 } 1122 1123 while (s->regs[R_DMA_LEN]) { 1124 data = address_space_ldl_le(&s->flash_as, s->regs[R_DMA_FLASH_ADDR], 1125 MEMTXATTRS_UNSPECIFIED, &result); 1126 if (result != MEMTX_OK) { 1127 qemu_log_mask(LOG_GUEST_ERROR, "%s: Flash read failed @%08x\n", 1128 __func__, s->regs[R_DMA_FLASH_ADDR]); 1129 return; 1130 } 1131 trace_aspeed_smc_dma_checksum(s->regs[R_DMA_FLASH_ADDR], data); 1132 1133 /* 1134 * When the DMA is on-going, the DMA registers are updated 1135 * with the current working addresses and length. 1136 */ 1137 s->regs[R_DMA_CHECKSUM] += data; 1138 s->regs[R_DMA_FLASH_ADDR] += 4; 1139 s->regs[R_DMA_LEN] -= 4; 1140 } 1141 1142 if (s->inject_failure && aspeed_smc_inject_read_failure(s)) { 1143 s->regs[R_DMA_CHECKSUM] = 0xbadc0de; 1144 } 1145 1146 } 1147 1148 static void aspeed_smc_dma_rw(AspeedSMCState *s) 1149 { 1150 MemTxResult result; 1151 uint32_t data; 1152 1153 trace_aspeed_smc_dma_rw(s->regs[R_DMA_CTRL] & DMA_CTRL_WRITE ? 1154 "write" : "read", 1155 s->regs[R_DMA_FLASH_ADDR], 1156 s->regs[R_DMA_DRAM_ADDR], 1157 s->regs[R_DMA_LEN]); 1158 while (s->regs[R_DMA_LEN]) { 1159 if (s->regs[R_DMA_CTRL] & DMA_CTRL_WRITE) { 1160 data = address_space_ldl_le(&s->dram_as, s->regs[R_DMA_DRAM_ADDR], 1161 MEMTXATTRS_UNSPECIFIED, &result); 1162 if (result != MEMTX_OK) { 1163 qemu_log_mask(LOG_GUEST_ERROR, "%s: DRAM read failed @%08x\n", 1164 __func__, s->regs[R_DMA_DRAM_ADDR]); 1165 return; 1166 } 1167 1168 address_space_stl_le(&s->flash_as, s->regs[R_DMA_FLASH_ADDR], 1169 data, MEMTXATTRS_UNSPECIFIED, &result); 1170 if (result != MEMTX_OK) { 1171 qemu_log_mask(LOG_GUEST_ERROR, "%s: Flash write failed @%08x\n", 1172 __func__, s->regs[R_DMA_FLASH_ADDR]); 1173 return; 1174 } 1175 } else { 1176 data = address_space_ldl_le(&s->flash_as, s->regs[R_DMA_FLASH_ADDR], 1177 MEMTXATTRS_UNSPECIFIED, &result); 1178 if (result != MEMTX_OK) { 1179 qemu_log_mask(LOG_GUEST_ERROR, "%s: Flash read failed @%08x\n", 1180 __func__, s->regs[R_DMA_FLASH_ADDR]); 1181 return; 1182 } 1183 1184 address_space_stl_le(&s->dram_as, s->regs[R_DMA_DRAM_ADDR], 1185 data, MEMTXATTRS_UNSPECIFIED, &result); 1186 if (result != MEMTX_OK) { 1187 qemu_log_mask(LOG_GUEST_ERROR, "%s: DRAM write failed @%08x\n", 1188 __func__, s->regs[R_DMA_DRAM_ADDR]); 1189 return; 1190 } 1191 } 1192 1193 /* 1194 * When the DMA is on-going, the DMA registers are updated 1195 * with the current working addresses and length. 1196 */ 1197 s->regs[R_DMA_FLASH_ADDR] += 4; 1198 s->regs[R_DMA_DRAM_ADDR] += 4; 1199 s->regs[R_DMA_LEN] -= 4; 1200 s->regs[R_DMA_CHECKSUM] += data; 1201 } 1202 } 1203 1204 static void aspeed_smc_dma_stop(AspeedSMCState *s) 1205 { 1206 /* 1207 * When the DMA is disabled, INTR_CTRL_DMA_STATUS=0 means the 1208 * engine is idle 1209 */ 1210 s->regs[R_INTR_CTRL] &= ~INTR_CTRL_DMA_STATUS; 1211 s->regs[R_DMA_CHECKSUM] = 0; 1212 1213 /* 1214 * Lower the DMA irq in any case. The IRQ control register could 1215 * have been cleared before disabling the DMA. 1216 */ 1217 qemu_irq_lower(s->irq); 1218 } 1219 1220 /* 1221 * When INTR_CTRL_DMA_STATUS=1, the DMA has completed and a new DMA 1222 * can start even if the result of the previous was not collected. 1223 */ 1224 static bool aspeed_smc_dma_in_progress(AspeedSMCState *s) 1225 { 1226 return s->regs[R_DMA_CTRL] & DMA_CTRL_ENABLE && 1227 !(s->regs[R_INTR_CTRL] & INTR_CTRL_DMA_STATUS); 1228 } 1229 1230 static void aspeed_smc_dma_done(AspeedSMCState *s) 1231 { 1232 s->regs[R_INTR_CTRL] |= INTR_CTRL_DMA_STATUS; 1233 if (s->regs[R_INTR_CTRL] & INTR_CTRL_DMA_EN) { 1234 qemu_irq_raise(s->irq); 1235 } 1236 } 1237 1238 static void aspeed_smc_dma_ctrl(AspeedSMCState *s, uint64_t dma_ctrl) 1239 { 1240 if (!(dma_ctrl & DMA_CTRL_ENABLE)) { 1241 s->regs[R_DMA_CTRL] = dma_ctrl; 1242 1243 aspeed_smc_dma_stop(s); 1244 return; 1245 } 1246 1247 if (aspeed_smc_dma_in_progress(s)) { 1248 qemu_log_mask(LOG_GUEST_ERROR, "%s: DMA in progress\n", __func__); 1249 return; 1250 } 1251 1252 s->regs[R_DMA_CTRL] = dma_ctrl; 1253 1254 if (s->regs[R_DMA_CTRL] & DMA_CTRL_CKSUM) { 1255 aspeed_smc_dma_checksum(s); 1256 } else { 1257 aspeed_smc_dma_rw(s); 1258 } 1259 1260 aspeed_smc_dma_done(s); 1261 } 1262 1263 static void aspeed_smc_write(void *opaque, hwaddr addr, uint64_t data, 1264 unsigned int size) 1265 { 1266 AspeedSMCState *s = ASPEED_SMC(opaque); 1267 uint32_t value = data; 1268 1269 addr >>= 2; 1270 1271 trace_aspeed_smc_write(addr, size, data); 1272 1273 if (addr == s->r_conf || 1274 (addr >= s->r_timings && 1275 addr < s->r_timings + s->ctrl->nregs_timings) || 1276 addr == s->r_ce_ctrl) { 1277 s->regs[addr] = value; 1278 } else if (addr >= s->r_ctrl0 && addr < s->r_ctrl0 + s->num_cs) { 1279 int cs = addr - s->r_ctrl0; 1280 aspeed_smc_flash_update_ctrl(&s->flashes[cs], value); 1281 } else if (addr >= R_SEG_ADDR0 && 1282 addr < R_SEG_ADDR0 + s->ctrl->max_peripherals) { 1283 int cs = addr - R_SEG_ADDR0; 1284 1285 if (value != s->regs[R_SEG_ADDR0 + cs]) { 1286 aspeed_smc_flash_set_segment(s, cs, value); 1287 } 1288 } else if (addr == R_CE_CMD_CTRL) { 1289 s->regs[addr] = value & 0xff; 1290 } else if (addr == R_DUMMY_DATA) { 1291 s->regs[addr] = value & 0xff; 1292 } else if (addr == R_INTR_CTRL) { 1293 s->regs[addr] = value; 1294 } else if (s->ctrl->has_dma && addr == R_DMA_CTRL) { 1295 aspeed_smc_dma_ctrl(s, value); 1296 } else if (s->ctrl->has_dma && addr == R_DMA_DRAM_ADDR) { 1297 s->regs[addr] = DMA_DRAM_ADDR(s, value); 1298 } else if (s->ctrl->has_dma && addr == R_DMA_FLASH_ADDR) { 1299 s->regs[addr] = DMA_FLASH_ADDR(s, value); 1300 } else if (s->ctrl->has_dma && addr == R_DMA_LEN) { 1301 s->regs[addr] = DMA_LENGTH(value); 1302 } else { 1303 qemu_log_mask(LOG_UNIMP, "%s: not implemented: 0x%" HWADDR_PRIx "\n", 1304 __func__, addr); 1305 return; 1306 } 1307 } 1308 1309 static const MemoryRegionOps aspeed_smc_ops = { 1310 .read = aspeed_smc_read, 1311 .write = aspeed_smc_write, 1312 .endianness = DEVICE_LITTLE_ENDIAN, 1313 }; 1314 1315 /* 1316 * Initialize the custom address spaces for DMAs 1317 */ 1318 static void aspeed_smc_dma_setup(AspeedSMCState *s, Error **errp) 1319 { 1320 char *name; 1321 1322 if (!s->dram_mr) { 1323 error_setg(errp, TYPE_ASPEED_SMC ": 'dram' link not set"); 1324 return; 1325 } 1326 1327 name = g_strdup_printf("%s-dma-flash", s->ctrl->name); 1328 address_space_init(&s->flash_as, &s->mmio_flash, name); 1329 g_free(name); 1330 1331 name = g_strdup_printf("%s-dma-dram", s->ctrl->name); 1332 address_space_init(&s->dram_as, s->dram_mr, name); 1333 g_free(name); 1334 } 1335 1336 static void aspeed_smc_realize(DeviceState *dev, Error **errp) 1337 { 1338 SysBusDevice *sbd = SYS_BUS_DEVICE(dev); 1339 AspeedSMCState *s = ASPEED_SMC(dev); 1340 AspeedSMCClass *mc = ASPEED_SMC_GET_CLASS(s); 1341 int i; 1342 char name[32]; 1343 hwaddr offset = 0; 1344 1345 s->ctrl = mc->ctrl; 1346 1347 /* keep a copy under AspeedSMCState to speed up accesses */ 1348 s->r_conf = s->ctrl->r_conf; 1349 s->r_ce_ctrl = s->ctrl->r_ce_ctrl; 1350 s->r_ctrl0 = s->ctrl->r_ctrl0; 1351 s->r_timings = s->ctrl->r_timings; 1352 s->conf_enable_w0 = s->ctrl->conf_enable_w0; 1353 1354 /* Enforce some real HW limits */ 1355 if (s->num_cs > s->ctrl->max_peripherals) { 1356 qemu_log_mask(LOG_GUEST_ERROR, "%s: num_cs cannot exceed: %d\n", 1357 __func__, s->ctrl->max_peripherals); 1358 s->num_cs = s->ctrl->max_peripherals; 1359 } 1360 1361 /* DMA irq. Keep it first for the initialization in the SoC */ 1362 sysbus_init_irq(sbd, &s->irq); 1363 1364 s->spi = ssi_create_bus(dev, "spi"); 1365 1366 /* Setup cs_lines for peripherals */ 1367 s->cs_lines = g_new0(qemu_irq, s->num_cs); 1368 1369 for (i = 0; i < s->num_cs; ++i) { 1370 sysbus_init_irq(sbd, &s->cs_lines[i]); 1371 } 1372 1373 /* The memory region for the controller registers */ 1374 memory_region_init_io(&s->mmio, OBJECT(s), &aspeed_smc_ops, s, 1375 s->ctrl->name, s->ctrl->nregs * 4); 1376 sysbus_init_mmio(sbd, &s->mmio); 1377 1378 /* 1379 * The container memory region representing the address space 1380 * window in which the flash modules are mapped. The size and 1381 * address depends on the SoC model and controller type. 1382 */ 1383 snprintf(name, sizeof(name), "%s.flash", s->ctrl->name); 1384 1385 memory_region_init_io(&s->mmio_flash, OBJECT(s), 1386 &aspeed_smc_flash_default_ops, s, name, 1387 s->ctrl->flash_window_size); 1388 sysbus_init_mmio(sbd, &s->mmio_flash); 1389 1390 s->flashes = g_new0(AspeedSMCFlash, s->ctrl->max_peripherals); 1391 1392 /* 1393 * Let's create a sub memory region for each possible peripheral. All 1394 * have a configurable memory segment in the overall flash mapping 1395 * window of the controller but, there is not necessarily a flash 1396 * module behind to handle the memory accesses. This depends on 1397 * the board configuration. 1398 */ 1399 for (i = 0; i < s->ctrl->max_peripherals; ++i) { 1400 AspeedSMCFlash *fl = &s->flashes[i]; 1401 1402 snprintf(name, sizeof(name), "%s.%d", s->ctrl->name, i); 1403 1404 fl->id = i; 1405 fl->controller = s; 1406 fl->size = s->ctrl->segments[i].size; 1407 memory_region_init_io(&fl->mmio, OBJECT(s), &aspeed_smc_flash_ops, 1408 fl, name, fl->size); 1409 memory_region_add_subregion(&s->mmio_flash, offset, &fl->mmio); 1410 offset += fl->size; 1411 } 1412 1413 /* DMA support */ 1414 if (s->ctrl->has_dma) { 1415 aspeed_smc_dma_setup(s, errp); 1416 } 1417 } 1418 1419 static const VMStateDescription vmstate_aspeed_smc = { 1420 .name = "aspeed.smc", 1421 .version_id = 2, 1422 .minimum_version_id = 2, 1423 .fields = (VMStateField[]) { 1424 VMSTATE_UINT32_ARRAY(regs, AspeedSMCState, ASPEED_SMC_R_MAX), 1425 VMSTATE_UINT8(snoop_index, AspeedSMCState), 1426 VMSTATE_UINT8(snoop_dummies, AspeedSMCState), 1427 VMSTATE_END_OF_LIST() 1428 } 1429 }; 1430 1431 static Property aspeed_smc_properties[] = { 1432 DEFINE_PROP_UINT32("num-cs", AspeedSMCState, num_cs, 1), 1433 DEFINE_PROP_BOOL("inject-failure", AspeedSMCState, inject_failure, false), 1434 DEFINE_PROP_LINK("dram", AspeedSMCState, dram_mr, 1435 TYPE_MEMORY_REGION, MemoryRegion *), 1436 DEFINE_PROP_END_OF_LIST(), 1437 }; 1438 1439 static void aspeed_smc_class_init(ObjectClass *klass, void *data) 1440 { 1441 DeviceClass *dc = DEVICE_CLASS(klass); 1442 AspeedSMCClass *mc = ASPEED_SMC_CLASS(klass); 1443 1444 dc->realize = aspeed_smc_realize; 1445 dc->reset = aspeed_smc_reset; 1446 device_class_set_props(dc, aspeed_smc_properties); 1447 dc->vmsd = &vmstate_aspeed_smc; 1448 mc->ctrl = data; 1449 } 1450 1451 static const TypeInfo aspeed_smc_info = { 1452 .name = TYPE_ASPEED_SMC, 1453 .parent = TYPE_SYS_BUS_DEVICE, 1454 .instance_size = sizeof(AspeedSMCState), 1455 .class_size = sizeof(AspeedSMCClass), 1456 .abstract = true, 1457 }; 1458 1459 static void aspeed_smc_register_types(void) 1460 { 1461 int i; 1462 1463 type_register_static(&aspeed_smc_info); 1464 for (i = 0; i < ARRAY_SIZE(controllers); ++i) { 1465 TypeInfo ti = { 1466 .name = controllers[i].name, 1467 .parent = TYPE_ASPEED_SMC, 1468 .class_init = aspeed_smc_class_init, 1469 .class_data = (void *)&controllers[i], 1470 }; 1471 type_register(&ti); 1472 } 1473 } 1474 1475 type_init(aspeed_smc_register_types) 1476