1 /* 2 * ASPEED AST2400 SMC Controller (SPI Flash Only) 3 * 4 * Copyright (C) 2016 IBM Corp. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "hw/sysbus.h" 27 #include "migration/vmstate.h" 28 #include "qemu/log.h" 29 #include "qemu/module.h" 30 #include "qemu/error-report.h" 31 #include "qapi/error.h" 32 #include "exec/address-spaces.h" 33 #include "qemu/units.h" 34 #include "trace.h" 35 36 #include "hw/irq.h" 37 #include "hw/qdev-properties.h" 38 #include "hw/ssi/aspeed_smc.h" 39 40 /* CE Type Setting Register */ 41 #define R_CONF (0x00 / 4) 42 #define CONF_LEGACY_DISABLE (1 << 31) 43 #define CONF_ENABLE_W4 20 44 #define CONF_ENABLE_W3 19 45 #define CONF_ENABLE_W2 18 46 #define CONF_ENABLE_W1 17 47 #define CONF_ENABLE_W0 16 48 #define CONF_FLASH_TYPE4 8 49 #define CONF_FLASH_TYPE3 6 50 #define CONF_FLASH_TYPE2 4 51 #define CONF_FLASH_TYPE1 2 52 #define CONF_FLASH_TYPE0 0 53 #define CONF_FLASH_TYPE_NOR 0x0 54 #define CONF_FLASH_TYPE_NAND 0x1 55 #define CONF_FLASH_TYPE_SPI 0x2 /* AST2600 is SPI only */ 56 57 /* CE Control Register */ 58 #define R_CE_CTRL (0x04 / 4) 59 #define CTRL_EXTENDED4 4 /* 32 bit addressing for SPI */ 60 #define CTRL_EXTENDED3 3 /* 32 bit addressing for SPI */ 61 #define CTRL_EXTENDED2 2 /* 32 bit addressing for SPI */ 62 #define CTRL_EXTENDED1 1 /* 32 bit addressing for SPI */ 63 #define CTRL_EXTENDED0 0 /* 32 bit addressing for SPI */ 64 65 /* Interrupt Control and Status Register */ 66 #define R_INTR_CTRL (0x08 / 4) 67 #define INTR_CTRL_DMA_STATUS (1 << 11) 68 #define INTR_CTRL_CMD_ABORT_STATUS (1 << 10) 69 #define INTR_CTRL_WRITE_PROTECT_STATUS (1 << 9) 70 #define INTR_CTRL_DMA_EN (1 << 3) 71 #define INTR_CTRL_CMD_ABORT_EN (1 << 2) 72 #define INTR_CTRL_WRITE_PROTECT_EN (1 << 1) 73 74 /* Command Control Register */ 75 #define R_CE_CMD_CTRL (0x0C / 4) 76 #define CTRL_ADDR_BYTE0_DISABLE_SHIFT 4 77 #define CTRL_DATA_BYTE0_DISABLE_SHIFT 0 78 79 #define aspeed_smc_addr_byte_enabled(s, i) \ 80 (!((s)->regs[R_CE_CMD_CTRL] & (1 << (CTRL_ADDR_BYTE0_DISABLE_SHIFT + (i))))) 81 #define aspeed_smc_data_byte_enabled(s, i) \ 82 (!((s)->regs[R_CE_CMD_CTRL] & (1 << (CTRL_DATA_BYTE0_DISABLE_SHIFT + (i))))) 83 84 /* CEx Control Register */ 85 #define R_CTRL0 (0x10 / 4) 86 #define CTRL_IO_QPI (1 << 31) 87 #define CTRL_IO_QUAD_DATA (1 << 30) 88 #define CTRL_IO_DUAL_DATA (1 << 29) 89 #define CTRL_IO_DUAL_ADDR_DATA (1 << 28) /* Includes dummies */ 90 #define CTRL_IO_QUAD_ADDR_DATA (1 << 28) /* Includes dummies */ 91 #define CTRL_CMD_SHIFT 16 92 #define CTRL_CMD_MASK 0xff 93 #define CTRL_DUMMY_HIGH_SHIFT 14 94 #define CTRL_AST2400_SPI_4BYTE (1 << 13) 95 #define CE_CTRL_CLOCK_FREQ_SHIFT 8 96 #define CE_CTRL_CLOCK_FREQ_MASK 0xf 97 #define CE_CTRL_CLOCK_FREQ(div) \ 98 (((div) & CE_CTRL_CLOCK_FREQ_MASK) << CE_CTRL_CLOCK_FREQ_SHIFT) 99 #define CTRL_DUMMY_LOW_SHIFT 6 /* 2 bits [7:6] */ 100 #define CTRL_CE_STOP_ACTIVE (1 << 2) 101 #define CTRL_CMD_MODE_MASK 0x3 102 #define CTRL_READMODE 0x0 103 #define CTRL_FREADMODE 0x1 104 #define CTRL_WRITEMODE 0x2 105 #define CTRL_USERMODE 0x3 106 #define R_CTRL1 (0x14 / 4) 107 #define R_CTRL2 (0x18 / 4) 108 #define R_CTRL3 (0x1C / 4) 109 #define R_CTRL4 (0x20 / 4) 110 111 /* CEx Segment Address Register */ 112 #define R_SEG_ADDR0 (0x30 / 4) 113 #define SEG_END_SHIFT 24 /* 8MB units */ 114 #define SEG_END_MASK 0xff 115 #define SEG_START_SHIFT 16 /* address bit [A29-A23] */ 116 #define SEG_START_MASK 0xff 117 #define R_SEG_ADDR1 (0x34 / 4) 118 #define R_SEG_ADDR2 (0x38 / 4) 119 #define R_SEG_ADDR3 (0x3C / 4) 120 #define R_SEG_ADDR4 (0x40 / 4) 121 122 /* Misc Control Register #1 */ 123 #define R_MISC_CTRL1 (0x50 / 4) 124 125 /* SPI dummy cycle data */ 126 #define R_DUMMY_DATA (0x54 / 4) 127 128 /* DMA Control/Status Register */ 129 #define R_DMA_CTRL (0x80 / 4) 130 #define DMA_CTRL_REQUEST (1 << 31) 131 #define DMA_CTRL_GRANT (1 << 30) 132 #define DMA_CTRL_DELAY_MASK 0xf 133 #define DMA_CTRL_DELAY_SHIFT 8 134 #define DMA_CTRL_FREQ_MASK 0xf 135 #define DMA_CTRL_FREQ_SHIFT 4 136 #define DMA_CTRL_CALIB (1 << 3) 137 #define DMA_CTRL_CKSUM (1 << 2) 138 #define DMA_CTRL_WRITE (1 << 1) 139 #define DMA_CTRL_ENABLE (1 << 0) 140 141 /* DMA Flash Side Address */ 142 #define R_DMA_FLASH_ADDR (0x84 / 4) 143 144 /* DMA DRAM Side Address */ 145 #define R_DMA_DRAM_ADDR (0x88 / 4) 146 147 /* DMA Length Register */ 148 #define R_DMA_LEN (0x8C / 4) 149 150 /* Checksum Calculation Result */ 151 #define R_DMA_CHECKSUM (0x90 / 4) 152 153 /* Read Timing Compensation Register */ 154 #define R_TIMINGS (0x94 / 4) 155 156 /* SPI controller registers and bits (AST2400) */ 157 #define R_SPI_CONF (0x00 / 4) 158 #define SPI_CONF_ENABLE_W0 0 159 #define R_SPI_CTRL0 (0x4 / 4) 160 #define R_SPI_MISC_CTRL (0x10 / 4) 161 #define R_SPI_TIMINGS (0x14 / 4) 162 163 #define ASPEED_SMC_R_SPI_MAX (0x20 / 4) 164 #define ASPEED_SMC_R_SMC_MAX (0x20 / 4) 165 166 #define ASPEED_SOC_SMC_FLASH_BASE 0x10000000 167 #define ASPEED_SOC_FMC_FLASH_BASE 0x20000000 168 #define ASPEED_SOC_SPI_FLASH_BASE 0x30000000 169 #define ASPEED_SOC_SPI2_FLASH_BASE 0x38000000 170 171 /* 172 * DMA DRAM addresses should be 4 bytes aligned and the valid address 173 * range is 0x40000000 - 0x5FFFFFFF (AST2400) 174 * 0x80000000 - 0xBFFFFFFF (AST2500) 175 * 176 * DMA flash addresses should be 4 bytes aligned and the valid address 177 * range is 0x20000000 - 0x2FFFFFFF. 178 * 179 * DMA length is from 4 bytes to 32MB 180 * 0: 4 bytes 181 * 0x7FFFFF: 32M bytes 182 */ 183 #define DMA_DRAM_ADDR(s, val) ((val) & (s)->ctrl->dma_dram_mask) 184 #define DMA_FLASH_ADDR(s, val) ((val) & (s)->ctrl->dma_flash_mask) 185 #define DMA_LENGTH(val) ((val) & 0x01FFFFFC) 186 187 /* Flash opcodes. */ 188 #define SPI_OP_READ 0x03 /* Read data bytes (low frequency) */ 189 190 #define SNOOP_OFF 0xFF 191 #define SNOOP_START 0x0 192 193 /* 194 * Default segments mapping addresses and size for each peripheral per 195 * controller. These can be changed when board is initialized with the 196 * Segment Address Registers. 197 */ 198 static const AspeedSegments aspeed_segments_legacy[] = { 199 { 0x10000000, 32 * 1024 * 1024 }, 200 }; 201 202 static const AspeedSegments aspeed_segments_fmc[] = { 203 { 0x20000000, 64 * 1024 * 1024 }, /* start address is readonly */ 204 { 0x24000000, 32 * 1024 * 1024 }, 205 { 0x26000000, 32 * 1024 * 1024 }, 206 { 0x28000000, 32 * 1024 * 1024 }, 207 { 0x2A000000, 32 * 1024 * 1024 } 208 }; 209 210 static const AspeedSegments aspeed_segments_spi[] = { 211 { 0x30000000, 64 * 1024 * 1024 }, 212 }; 213 214 static const AspeedSegments aspeed_segments_ast2500_fmc[] = { 215 { 0x20000000, 128 * 1024 * 1024 }, /* start address is readonly */ 216 { 0x28000000, 32 * 1024 * 1024 }, 217 { 0x2A000000, 32 * 1024 * 1024 }, 218 }; 219 220 static const AspeedSegments aspeed_segments_ast2500_spi1[] = { 221 { 0x30000000, 32 * 1024 * 1024 }, /* start address is readonly */ 222 { 0x32000000, 96 * 1024 * 1024 }, /* end address is readonly */ 223 }; 224 225 static const AspeedSegments aspeed_segments_ast2500_spi2[] = { 226 { 0x38000000, 32 * 1024 * 1024 }, /* start address is readonly */ 227 { 0x3A000000, 96 * 1024 * 1024 }, /* end address is readonly */ 228 }; 229 static uint32_t aspeed_smc_segment_to_reg(const AspeedSMCState *s, 230 const AspeedSegments *seg); 231 static void aspeed_smc_reg_to_segment(const AspeedSMCState *s, uint32_t reg, 232 AspeedSegments *seg); 233 static void aspeed_smc_dma_ctrl(AspeedSMCState *s, uint32_t value); 234 235 /* 236 * AST2600 definitions 237 */ 238 #define ASPEED26_SOC_FMC_FLASH_BASE 0x20000000 239 #define ASPEED26_SOC_SPI_FLASH_BASE 0x30000000 240 #define ASPEED26_SOC_SPI2_FLASH_BASE 0x50000000 241 242 static const AspeedSegments aspeed_segments_ast2600_fmc[] = { 243 { 0x0, 128 * MiB }, /* start address is readonly */ 244 { 128 * MiB, 128 * MiB }, /* default is disabled but needed for -kernel */ 245 { 0x0, 0 }, /* disabled */ 246 }; 247 248 static const AspeedSegments aspeed_segments_ast2600_spi1[] = { 249 { 0x0, 128 * MiB }, /* start address is readonly */ 250 { 0x0, 0 }, /* disabled */ 251 }; 252 253 static const AspeedSegments aspeed_segments_ast2600_spi2[] = { 254 { 0x0, 128 * MiB }, /* start address is readonly */ 255 { 0x0, 0 }, /* disabled */ 256 { 0x0, 0 }, /* disabled */ 257 }; 258 259 static uint32_t aspeed_2600_smc_segment_to_reg(const AspeedSMCState *s, 260 const AspeedSegments *seg); 261 static void aspeed_2600_smc_reg_to_segment(const AspeedSMCState *s, 262 uint32_t reg, AspeedSegments *seg); 263 static void aspeed_2600_smc_dma_ctrl(AspeedSMCState *s, uint32_t value); 264 265 #define ASPEED_SMC_FEATURE_DMA 0x1 266 #define ASPEED_SMC_FEATURE_DMA_GRANT 0x2 267 268 static inline bool aspeed_smc_has_dma(const AspeedSMCState *s) 269 { 270 return !!(s->ctrl->features & ASPEED_SMC_FEATURE_DMA); 271 } 272 273 static const AspeedSMCController controllers[] = { 274 { 275 .name = "aspeed.smc-ast2400", 276 .r_conf = R_CONF, 277 .r_ce_ctrl = R_CE_CTRL, 278 .r_ctrl0 = R_CTRL0, 279 .r_timings = R_TIMINGS, 280 .nregs_timings = 1, 281 .conf_enable_w0 = CONF_ENABLE_W0, 282 .max_peripherals = 1, 283 .segments = aspeed_segments_legacy, 284 .flash_window_base = ASPEED_SOC_SMC_FLASH_BASE, 285 .flash_window_size = 0x6000000, 286 .features = 0x0, 287 .nregs = ASPEED_SMC_R_SMC_MAX, 288 .segment_to_reg = aspeed_smc_segment_to_reg, 289 .reg_to_segment = aspeed_smc_reg_to_segment, 290 .dma_ctrl = aspeed_smc_dma_ctrl, 291 }, { 292 .name = "aspeed.fmc-ast2400", 293 .r_conf = R_CONF, 294 .r_ce_ctrl = R_CE_CTRL, 295 .r_ctrl0 = R_CTRL0, 296 .r_timings = R_TIMINGS, 297 .nregs_timings = 1, 298 .conf_enable_w0 = CONF_ENABLE_W0, 299 .max_peripherals = 5, 300 .segments = aspeed_segments_fmc, 301 .flash_window_base = ASPEED_SOC_FMC_FLASH_BASE, 302 .flash_window_size = 0x10000000, 303 .features = ASPEED_SMC_FEATURE_DMA, 304 .dma_flash_mask = 0x0FFFFFFC, 305 .dma_dram_mask = 0x1FFFFFFC, 306 .nregs = ASPEED_SMC_R_MAX, 307 .segment_to_reg = aspeed_smc_segment_to_reg, 308 .reg_to_segment = aspeed_smc_reg_to_segment, 309 .dma_ctrl = aspeed_smc_dma_ctrl, 310 }, { 311 .name = "aspeed.spi1-ast2400", 312 .r_conf = R_SPI_CONF, 313 .r_ce_ctrl = 0xff, 314 .r_ctrl0 = R_SPI_CTRL0, 315 .r_timings = R_SPI_TIMINGS, 316 .nregs_timings = 1, 317 .conf_enable_w0 = SPI_CONF_ENABLE_W0, 318 .max_peripherals = 1, 319 .segments = aspeed_segments_spi, 320 .flash_window_base = ASPEED_SOC_SPI_FLASH_BASE, 321 .flash_window_size = 0x10000000, 322 .features = 0x0, 323 .nregs = ASPEED_SMC_R_SPI_MAX, 324 .segment_to_reg = aspeed_smc_segment_to_reg, 325 .reg_to_segment = aspeed_smc_reg_to_segment, 326 .dma_ctrl = aspeed_smc_dma_ctrl, 327 }, { 328 .name = "aspeed.fmc-ast2500", 329 .r_conf = R_CONF, 330 .r_ce_ctrl = R_CE_CTRL, 331 .r_ctrl0 = R_CTRL0, 332 .r_timings = R_TIMINGS, 333 .nregs_timings = 1, 334 .conf_enable_w0 = CONF_ENABLE_W0, 335 .max_peripherals = 3, 336 .segments = aspeed_segments_ast2500_fmc, 337 .flash_window_base = ASPEED_SOC_FMC_FLASH_BASE, 338 .flash_window_size = 0x10000000, 339 .features = ASPEED_SMC_FEATURE_DMA, 340 .dma_flash_mask = 0x0FFFFFFC, 341 .dma_dram_mask = 0x3FFFFFFC, 342 .nregs = ASPEED_SMC_R_MAX, 343 .segment_to_reg = aspeed_smc_segment_to_reg, 344 .reg_to_segment = aspeed_smc_reg_to_segment, 345 .dma_ctrl = aspeed_smc_dma_ctrl, 346 }, { 347 .name = "aspeed.spi1-ast2500", 348 .r_conf = R_CONF, 349 .r_ce_ctrl = R_CE_CTRL, 350 .r_ctrl0 = R_CTRL0, 351 .r_timings = R_TIMINGS, 352 .nregs_timings = 1, 353 .conf_enable_w0 = CONF_ENABLE_W0, 354 .max_peripherals = 2, 355 .segments = aspeed_segments_ast2500_spi1, 356 .flash_window_base = ASPEED_SOC_SPI_FLASH_BASE, 357 .flash_window_size = 0x8000000, 358 .features = 0x0, 359 .nregs = ASPEED_SMC_R_MAX, 360 .segment_to_reg = aspeed_smc_segment_to_reg, 361 .reg_to_segment = aspeed_smc_reg_to_segment, 362 .dma_ctrl = aspeed_smc_dma_ctrl, 363 }, { 364 .name = "aspeed.spi2-ast2500", 365 .r_conf = R_CONF, 366 .r_ce_ctrl = R_CE_CTRL, 367 .r_ctrl0 = R_CTRL0, 368 .r_timings = R_TIMINGS, 369 .nregs_timings = 1, 370 .conf_enable_w0 = CONF_ENABLE_W0, 371 .max_peripherals = 2, 372 .segments = aspeed_segments_ast2500_spi2, 373 .flash_window_base = ASPEED_SOC_SPI2_FLASH_BASE, 374 .flash_window_size = 0x8000000, 375 .features = 0x0, 376 .nregs = ASPEED_SMC_R_MAX, 377 .segment_to_reg = aspeed_smc_segment_to_reg, 378 .reg_to_segment = aspeed_smc_reg_to_segment, 379 .dma_ctrl = aspeed_smc_dma_ctrl, 380 }, { 381 .name = "aspeed.fmc-ast2600", 382 .r_conf = R_CONF, 383 .r_ce_ctrl = R_CE_CTRL, 384 .r_ctrl0 = R_CTRL0, 385 .r_timings = R_TIMINGS, 386 .nregs_timings = 1, 387 .conf_enable_w0 = CONF_ENABLE_W0, 388 .max_peripherals = 3, 389 .segments = aspeed_segments_ast2600_fmc, 390 .flash_window_base = ASPEED26_SOC_FMC_FLASH_BASE, 391 .flash_window_size = 0x10000000, 392 .features = ASPEED_SMC_FEATURE_DMA, 393 .dma_flash_mask = 0x0FFFFFFC, 394 .dma_dram_mask = 0x3FFFFFFC, 395 .nregs = ASPEED_SMC_R_MAX, 396 .segment_to_reg = aspeed_2600_smc_segment_to_reg, 397 .reg_to_segment = aspeed_2600_smc_reg_to_segment, 398 .dma_ctrl = aspeed_2600_smc_dma_ctrl, 399 }, { 400 .name = "aspeed.spi1-ast2600", 401 .r_conf = R_CONF, 402 .r_ce_ctrl = R_CE_CTRL, 403 .r_ctrl0 = R_CTRL0, 404 .r_timings = R_TIMINGS, 405 .nregs_timings = 2, 406 .conf_enable_w0 = CONF_ENABLE_W0, 407 .max_peripherals = 2, 408 .segments = aspeed_segments_ast2600_spi1, 409 .flash_window_base = ASPEED26_SOC_SPI_FLASH_BASE, 410 .flash_window_size = 0x10000000, 411 .features = ASPEED_SMC_FEATURE_DMA | 412 ASPEED_SMC_FEATURE_DMA_GRANT, 413 .dma_flash_mask = 0x0FFFFFFC, 414 .dma_dram_mask = 0x3FFFFFFC, 415 .nregs = ASPEED_SMC_R_MAX, 416 .segment_to_reg = aspeed_2600_smc_segment_to_reg, 417 .reg_to_segment = aspeed_2600_smc_reg_to_segment, 418 .dma_ctrl = aspeed_2600_smc_dma_ctrl, 419 }, { 420 .name = "aspeed.spi2-ast2600", 421 .r_conf = R_CONF, 422 .r_ce_ctrl = R_CE_CTRL, 423 .r_ctrl0 = R_CTRL0, 424 .r_timings = R_TIMINGS, 425 .nregs_timings = 3, 426 .conf_enable_w0 = CONF_ENABLE_W0, 427 .max_peripherals = 3, 428 .segments = aspeed_segments_ast2600_spi2, 429 .flash_window_base = ASPEED26_SOC_SPI2_FLASH_BASE, 430 .flash_window_size = 0x10000000, 431 .features = ASPEED_SMC_FEATURE_DMA | 432 ASPEED_SMC_FEATURE_DMA_GRANT, 433 .dma_flash_mask = 0x0FFFFFFC, 434 .dma_dram_mask = 0x3FFFFFFC, 435 .nregs = ASPEED_SMC_R_MAX, 436 .segment_to_reg = aspeed_2600_smc_segment_to_reg, 437 .reg_to_segment = aspeed_2600_smc_reg_to_segment, 438 .dma_ctrl = aspeed_2600_smc_dma_ctrl, 439 }, 440 }; 441 442 /* 443 * The Segment Registers of the AST2400 and AST2500 have a 8MB 444 * unit. The address range of a flash SPI peripheral is encoded with 445 * absolute addresses which should be part of the overall controller 446 * window. 447 */ 448 static uint32_t aspeed_smc_segment_to_reg(const AspeedSMCState *s, 449 const AspeedSegments *seg) 450 { 451 uint32_t reg = 0; 452 reg |= ((seg->addr >> 23) & SEG_START_MASK) << SEG_START_SHIFT; 453 reg |= (((seg->addr + seg->size) >> 23) & SEG_END_MASK) << SEG_END_SHIFT; 454 return reg; 455 } 456 457 static void aspeed_smc_reg_to_segment(const AspeedSMCState *s, 458 uint32_t reg, AspeedSegments *seg) 459 { 460 seg->addr = ((reg >> SEG_START_SHIFT) & SEG_START_MASK) << 23; 461 seg->size = (((reg >> SEG_END_SHIFT) & SEG_END_MASK) << 23) - seg->addr; 462 } 463 464 /* 465 * The Segment Registers of the AST2600 have a 1MB unit. The address 466 * range of a flash SPI peripheral is encoded with offsets in the overall 467 * controller window. The previous SoC AST2400 and AST2500 used 468 * absolute addresses. Only bits [27:20] are relevant and the end 469 * address is an upper bound limit. 470 */ 471 #define AST2600_SEG_ADDR_MASK 0x0ff00000 472 473 static uint32_t aspeed_2600_smc_segment_to_reg(const AspeedSMCState *s, 474 const AspeedSegments *seg) 475 { 476 uint32_t reg = 0; 477 478 /* Disabled segments have a nil register */ 479 if (!seg->size) { 480 return 0; 481 } 482 483 reg |= (seg->addr & AST2600_SEG_ADDR_MASK) >> 16; /* start offset */ 484 reg |= (seg->addr + seg->size - 1) & AST2600_SEG_ADDR_MASK; /* end offset */ 485 return reg; 486 } 487 488 static void aspeed_2600_smc_reg_to_segment(const AspeedSMCState *s, 489 uint32_t reg, AspeedSegments *seg) 490 { 491 uint32_t start_offset = (reg << 16) & AST2600_SEG_ADDR_MASK; 492 uint32_t end_offset = reg & AST2600_SEG_ADDR_MASK; 493 494 if (reg) { 495 seg->addr = s->ctrl->flash_window_base + start_offset; 496 seg->size = end_offset + MiB - start_offset; 497 } else { 498 seg->addr = s->ctrl->flash_window_base; 499 seg->size = 0; 500 } 501 } 502 503 static bool aspeed_smc_flash_overlap(const AspeedSMCState *s, 504 const AspeedSegments *new, 505 int cs) 506 { 507 AspeedSegments seg; 508 int i; 509 510 for (i = 0; i < s->ctrl->max_peripherals; i++) { 511 if (i == cs) { 512 continue; 513 } 514 515 s->ctrl->reg_to_segment(s, s->regs[R_SEG_ADDR0 + i], &seg); 516 517 if (new->addr + new->size > seg.addr && 518 new->addr < seg.addr + seg.size) { 519 qemu_log_mask(LOG_GUEST_ERROR, "%s: new segment CS%d [ 0x%" 520 HWADDR_PRIx" - 0x%"HWADDR_PRIx" ] overlaps with " 521 "CS%d [ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]\n", 522 s->ctrl->name, cs, new->addr, new->addr + new->size, 523 i, seg.addr, seg.addr + seg.size); 524 return true; 525 } 526 } 527 return false; 528 } 529 530 static void aspeed_smc_flash_set_segment_region(AspeedSMCState *s, int cs, 531 uint64_t regval) 532 { 533 AspeedSMCFlash *fl = &s->flashes[cs]; 534 AspeedSegments seg; 535 536 s->ctrl->reg_to_segment(s, regval, &seg); 537 538 memory_region_transaction_begin(); 539 memory_region_set_size(&fl->mmio, seg.size); 540 memory_region_set_address(&fl->mmio, seg.addr - s->ctrl->flash_window_base); 541 memory_region_set_enabled(&fl->mmio, !!seg.size); 542 memory_region_transaction_commit(); 543 544 s->regs[R_SEG_ADDR0 + cs] = regval; 545 } 546 547 static void aspeed_smc_flash_set_segment(AspeedSMCState *s, int cs, 548 uint64_t new) 549 { 550 AspeedSegments seg; 551 552 s->ctrl->reg_to_segment(s, new, &seg); 553 554 trace_aspeed_smc_flash_set_segment(cs, new, seg.addr, seg.addr + seg.size); 555 556 /* The start address of CS0 is read-only */ 557 if (cs == 0 && seg.addr != s->ctrl->flash_window_base) { 558 qemu_log_mask(LOG_GUEST_ERROR, 559 "%s: Tried to change CS0 start address to 0x%" 560 HWADDR_PRIx "\n", s->ctrl->name, seg.addr); 561 seg.addr = s->ctrl->flash_window_base; 562 new = s->ctrl->segment_to_reg(s, &seg); 563 } 564 565 /* 566 * The end address of the AST2500 spi controllers is also 567 * read-only. 568 */ 569 if ((s->ctrl->segments == aspeed_segments_ast2500_spi1 || 570 s->ctrl->segments == aspeed_segments_ast2500_spi2) && 571 cs == s->ctrl->max_peripherals && 572 seg.addr + seg.size != s->ctrl->segments[cs].addr + 573 s->ctrl->segments[cs].size) { 574 qemu_log_mask(LOG_GUEST_ERROR, 575 "%s: Tried to change CS%d end address to 0x%" 576 HWADDR_PRIx "\n", s->ctrl->name, cs, seg.addr + seg.size); 577 seg.size = s->ctrl->segments[cs].addr + s->ctrl->segments[cs].size - 578 seg.addr; 579 new = s->ctrl->segment_to_reg(s, &seg); 580 } 581 582 /* Keep the segment in the overall flash window */ 583 if (seg.size && 584 (seg.addr + seg.size <= s->ctrl->flash_window_base || 585 seg.addr > s->ctrl->flash_window_base + s->ctrl->flash_window_size)) { 586 qemu_log_mask(LOG_GUEST_ERROR, "%s: new segment for CS%d is invalid : " 587 "[ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]\n", 588 s->ctrl->name, cs, seg.addr, seg.addr + seg.size); 589 return; 590 } 591 592 /* Check start address vs. alignment */ 593 if (seg.size && !QEMU_IS_ALIGNED(seg.addr, seg.size)) { 594 qemu_log_mask(LOG_GUEST_ERROR, "%s: new segment for CS%d is not " 595 "aligned : [ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]\n", 596 s->ctrl->name, cs, seg.addr, seg.addr + seg.size); 597 } 598 599 /* And segments should not overlap (in the specs) */ 600 aspeed_smc_flash_overlap(s, &seg, cs); 601 602 /* All should be fine now to move the region */ 603 aspeed_smc_flash_set_segment_region(s, cs, new); 604 } 605 606 static uint64_t aspeed_smc_flash_default_read(void *opaque, hwaddr addr, 607 unsigned size) 608 { 609 qemu_log_mask(LOG_GUEST_ERROR, "%s: To 0x%" HWADDR_PRIx " of size %u" 610 PRIx64 "\n", __func__, addr, size); 611 return 0; 612 } 613 614 static void aspeed_smc_flash_default_write(void *opaque, hwaddr addr, 615 uint64_t data, unsigned size) 616 { 617 qemu_log_mask(LOG_GUEST_ERROR, "%s: To 0x%" HWADDR_PRIx " of size %u: 0x%" 618 PRIx64 "\n", __func__, addr, size, data); 619 } 620 621 static const MemoryRegionOps aspeed_smc_flash_default_ops = { 622 .read = aspeed_smc_flash_default_read, 623 .write = aspeed_smc_flash_default_write, 624 .endianness = DEVICE_LITTLE_ENDIAN, 625 .valid = { 626 .min_access_size = 1, 627 .max_access_size = 4, 628 }, 629 }; 630 631 static inline int aspeed_smc_flash_mode(const AspeedSMCFlash *fl) 632 { 633 const AspeedSMCState *s = fl->controller; 634 635 return s->regs[s->r_ctrl0 + fl->id] & CTRL_CMD_MODE_MASK; 636 } 637 638 static inline bool aspeed_smc_is_writable(const AspeedSMCFlash *fl) 639 { 640 const AspeedSMCState *s = fl->controller; 641 642 return s->regs[s->r_conf] & (1 << (s->conf_enable_w0 + fl->id)); 643 } 644 645 static inline int aspeed_smc_flash_cmd(const AspeedSMCFlash *fl) 646 { 647 const AspeedSMCState *s = fl->controller; 648 int cmd = (s->regs[s->r_ctrl0 + fl->id] >> CTRL_CMD_SHIFT) & CTRL_CMD_MASK; 649 650 /* 651 * In read mode, the default SPI command is READ (0x3). In other 652 * modes, the command should necessarily be defined 653 * 654 * TODO: add support for READ4 (0x13) on AST2600 655 */ 656 if (aspeed_smc_flash_mode(fl) == CTRL_READMODE) { 657 cmd = SPI_OP_READ; 658 } 659 660 if (!cmd) { 661 qemu_log_mask(LOG_GUEST_ERROR, "%s: no command defined for mode %d\n", 662 __func__, aspeed_smc_flash_mode(fl)); 663 } 664 665 return cmd; 666 } 667 668 static inline int aspeed_smc_flash_is_4byte(const AspeedSMCFlash *fl) 669 { 670 const AspeedSMCState *s = fl->controller; 671 672 if (s->ctrl->segments == aspeed_segments_spi) { 673 return s->regs[s->r_ctrl0] & CTRL_AST2400_SPI_4BYTE; 674 } else { 675 return s->regs[s->r_ce_ctrl] & (1 << (CTRL_EXTENDED0 + fl->id)); 676 } 677 } 678 679 static void aspeed_smc_flash_do_select(AspeedSMCFlash *fl, bool unselect) 680 { 681 AspeedSMCState *s = fl->controller; 682 683 trace_aspeed_smc_flash_select(fl->id, unselect ? "un" : ""); 684 685 qemu_set_irq(s->cs_lines[fl->id], unselect); 686 } 687 688 static void aspeed_smc_flash_select(AspeedSMCFlash *fl) 689 { 690 aspeed_smc_flash_do_select(fl, false); 691 } 692 693 static void aspeed_smc_flash_unselect(AspeedSMCFlash *fl) 694 { 695 aspeed_smc_flash_do_select(fl, true); 696 } 697 698 static uint32_t aspeed_smc_check_segment_addr(const AspeedSMCFlash *fl, 699 uint32_t addr) 700 { 701 const AspeedSMCState *s = fl->controller; 702 AspeedSegments seg; 703 704 s->ctrl->reg_to_segment(s, s->regs[R_SEG_ADDR0 + fl->id], &seg); 705 if ((addr % seg.size) != addr) { 706 qemu_log_mask(LOG_GUEST_ERROR, 707 "%s: invalid address 0x%08x for CS%d segment : " 708 "[ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]\n", 709 s->ctrl->name, addr, fl->id, seg.addr, 710 seg.addr + seg.size); 711 addr %= seg.size; 712 } 713 714 return addr; 715 } 716 717 static int aspeed_smc_flash_dummies(const AspeedSMCFlash *fl) 718 { 719 const AspeedSMCState *s = fl->controller; 720 uint32_t r_ctrl0 = s->regs[s->r_ctrl0 + fl->id]; 721 uint32_t dummy_high = (r_ctrl0 >> CTRL_DUMMY_HIGH_SHIFT) & 0x1; 722 uint32_t dummy_low = (r_ctrl0 >> CTRL_DUMMY_LOW_SHIFT) & 0x3; 723 uint32_t dummies = ((dummy_high << 2) | dummy_low) * 8; 724 725 if (r_ctrl0 & CTRL_IO_DUAL_ADDR_DATA) { 726 dummies /= 2; 727 } 728 729 return dummies; 730 } 731 732 static void aspeed_smc_flash_setup(AspeedSMCFlash *fl, uint32_t addr) 733 { 734 const AspeedSMCState *s = fl->controller; 735 uint8_t cmd = aspeed_smc_flash_cmd(fl); 736 int i = aspeed_smc_flash_is_4byte(fl) ? 4 : 3; 737 738 /* Flash access can not exceed CS segment */ 739 addr = aspeed_smc_check_segment_addr(fl, addr); 740 741 ssi_transfer(s->spi, cmd); 742 while (i--) { 743 if (aspeed_smc_addr_byte_enabled(s, i)) { 744 ssi_transfer(s->spi, (addr >> (i * 8)) & 0xff); 745 } 746 } 747 748 /* 749 * Use fake transfers to model dummy bytes. The value should 750 * be configured to some non-zero value in fast read mode and 751 * zero in read mode. But, as the HW allows inconsistent 752 * settings, let's check for fast read mode. 753 */ 754 if (aspeed_smc_flash_mode(fl) == CTRL_FREADMODE) { 755 for (i = 0; i < aspeed_smc_flash_dummies(fl); i++) { 756 ssi_transfer(fl->controller->spi, s->regs[R_DUMMY_DATA] & 0xff); 757 } 758 } 759 } 760 761 static uint64_t aspeed_smc_flash_read(void *opaque, hwaddr addr, unsigned size) 762 { 763 AspeedSMCFlash *fl = opaque; 764 AspeedSMCState *s = fl->controller; 765 uint64_t ret = 0; 766 int i; 767 768 switch (aspeed_smc_flash_mode(fl)) { 769 case CTRL_USERMODE: 770 for (i = 0; i < size; i++) { 771 ret |= ssi_transfer(s->spi, 0x0) << (8 * i); 772 } 773 break; 774 case CTRL_READMODE: 775 case CTRL_FREADMODE: 776 aspeed_smc_flash_select(fl); 777 aspeed_smc_flash_setup(fl, addr); 778 779 for (i = 0; i < size; i++) { 780 ret |= ssi_transfer(s->spi, 0x0) << (8 * i); 781 } 782 783 aspeed_smc_flash_unselect(fl); 784 break; 785 default: 786 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid flash mode %d\n", 787 __func__, aspeed_smc_flash_mode(fl)); 788 } 789 790 trace_aspeed_smc_flash_read(fl->id, addr, size, ret, 791 aspeed_smc_flash_mode(fl)); 792 return ret; 793 } 794 795 /* 796 * TODO (clg@kaod.org): stolen from xilinx_spips.c. Should move to a 797 * common include header. 798 */ 799 typedef enum { 800 READ = 0x3, READ_4 = 0x13, 801 FAST_READ = 0xb, FAST_READ_4 = 0x0c, 802 DOR = 0x3b, DOR_4 = 0x3c, 803 QOR = 0x6b, QOR_4 = 0x6c, 804 DIOR = 0xbb, DIOR_4 = 0xbc, 805 QIOR = 0xeb, QIOR_4 = 0xec, 806 807 PP = 0x2, PP_4 = 0x12, 808 DPP = 0xa2, 809 QPP = 0x32, QPP_4 = 0x34, 810 } FlashCMD; 811 812 static int aspeed_smc_num_dummies(uint8_t command) 813 { 814 switch (command) { /* check for dummies */ 815 case READ: /* no dummy bytes/cycles */ 816 case PP: 817 case DPP: 818 case QPP: 819 case READ_4: 820 case PP_4: 821 case QPP_4: 822 return 0; 823 case FAST_READ: 824 case DOR: 825 case QOR: 826 case FAST_READ_4: 827 case DOR_4: 828 case QOR_4: 829 return 1; 830 case DIOR: 831 case DIOR_4: 832 return 2; 833 case QIOR: 834 case QIOR_4: 835 return 4; 836 default: 837 return -1; 838 } 839 } 840 841 static bool aspeed_smc_do_snoop(AspeedSMCFlash *fl, uint64_t data, 842 unsigned size) 843 { 844 AspeedSMCState *s = fl->controller; 845 uint8_t addr_width = aspeed_smc_flash_is_4byte(fl) ? 4 : 3; 846 847 trace_aspeed_smc_do_snoop(fl->id, s->snoop_index, s->snoop_dummies, 848 (uint8_t) data & 0xff); 849 850 if (s->snoop_index == SNOOP_OFF) { 851 return false; /* Do nothing */ 852 853 } else if (s->snoop_index == SNOOP_START) { 854 uint8_t cmd = data & 0xff; 855 int ndummies = aspeed_smc_num_dummies(cmd); 856 857 /* 858 * No dummy cycles are expected with the current command. Turn 859 * off snooping and let the transfer proceed normally. 860 */ 861 if (ndummies <= 0) { 862 s->snoop_index = SNOOP_OFF; 863 return false; 864 } 865 866 s->snoop_dummies = ndummies * 8; 867 868 } else if (s->snoop_index >= addr_width + 1) { 869 870 /* The SPI transfer has reached the dummy cycles sequence */ 871 for (; s->snoop_dummies; s->snoop_dummies--) { 872 ssi_transfer(s->spi, s->regs[R_DUMMY_DATA] & 0xff); 873 } 874 875 /* If no more dummy cycles are expected, turn off snooping */ 876 if (!s->snoop_dummies) { 877 s->snoop_index = SNOOP_OFF; 878 } else { 879 s->snoop_index += size; 880 } 881 882 /* 883 * Dummy cycles have been faked already. Ignore the current 884 * SPI transfer 885 */ 886 return true; 887 } 888 889 s->snoop_index += size; 890 return false; 891 } 892 893 static void aspeed_smc_flash_write(void *opaque, hwaddr addr, uint64_t data, 894 unsigned size) 895 { 896 AspeedSMCFlash *fl = opaque; 897 AspeedSMCState *s = fl->controller; 898 int i; 899 900 trace_aspeed_smc_flash_write(fl->id, addr, size, data, 901 aspeed_smc_flash_mode(fl)); 902 903 if (!aspeed_smc_is_writable(fl)) { 904 qemu_log_mask(LOG_GUEST_ERROR, "%s: flash is not writable at 0x%" 905 HWADDR_PRIx "\n", __func__, addr); 906 return; 907 } 908 909 switch (aspeed_smc_flash_mode(fl)) { 910 case CTRL_USERMODE: 911 if (aspeed_smc_do_snoop(fl, data, size)) { 912 break; 913 } 914 915 for (i = 0; i < size; i++) { 916 ssi_transfer(s->spi, (data >> (8 * i)) & 0xff); 917 } 918 break; 919 case CTRL_WRITEMODE: 920 aspeed_smc_flash_select(fl); 921 aspeed_smc_flash_setup(fl, addr); 922 923 for (i = 0; i < size; i++) { 924 ssi_transfer(s->spi, (data >> (8 * i)) & 0xff); 925 } 926 927 aspeed_smc_flash_unselect(fl); 928 break; 929 default: 930 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid flash mode %d\n", 931 __func__, aspeed_smc_flash_mode(fl)); 932 } 933 } 934 935 static const MemoryRegionOps aspeed_smc_flash_ops = { 936 .read = aspeed_smc_flash_read, 937 .write = aspeed_smc_flash_write, 938 .endianness = DEVICE_LITTLE_ENDIAN, 939 .valid = { 940 .min_access_size = 1, 941 .max_access_size = 4, 942 }, 943 }; 944 945 static void aspeed_smc_flash_update_ctrl(AspeedSMCFlash *fl, uint32_t value) 946 { 947 AspeedSMCState *s = fl->controller; 948 bool unselect; 949 950 /* User mode selects the CS, other modes unselect */ 951 unselect = (value & CTRL_CMD_MODE_MASK) != CTRL_USERMODE; 952 953 /* A change of CTRL_CE_STOP_ACTIVE from 0 to 1, unselects the CS */ 954 if (!(s->regs[s->r_ctrl0 + fl->id] & CTRL_CE_STOP_ACTIVE) && 955 value & CTRL_CE_STOP_ACTIVE) { 956 unselect = true; 957 } 958 959 s->regs[s->r_ctrl0 + fl->id] = value; 960 961 s->snoop_index = unselect ? SNOOP_OFF : SNOOP_START; 962 963 aspeed_smc_flash_do_select(fl, unselect); 964 } 965 966 static void aspeed_smc_reset(DeviceState *d) 967 { 968 AspeedSMCState *s = ASPEED_SMC(d); 969 int i; 970 971 memset(s->regs, 0, sizeof s->regs); 972 973 /* Unselect all peripherals */ 974 for (i = 0; i < s->num_cs; ++i) { 975 s->regs[s->r_ctrl0 + i] |= CTRL_CE_STOP_ACTIVE; 976 qemu_set_irq(s->cs_lines[i], true); 977 } 978 979 /* setup the default segment register values and regions for all */ 980 for (i = 0; i < s->ctrl->max_peripherals; ++i) { 981 aspeed_smc_flash_set_segment_region(s, i, 982 s->ctrl->segment_to_reg(s, &s->ctrl->segments[i])); 983 } 984 985 /* HW strapping flash type for the AST2600 controllers */ 986 if (s->ctrl->segments == aspeed_segments_ast2600_fmc) { 987 /* flash type is fixed to SPI for all */ 988 s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE0); 989 s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE1); 990 s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE2); 991 } 992 993 /* HW strapping flash type for FMC controllers */ 994 if (s->ctrl->segments == aspeed_segments_ast2500_fmc) { 995 /* flash type is fixed to SPI for CE0 and CE1 */ 996 s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE0); 997 s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE1); 998 } 999 1000 /* HW strapping for AST2400 FMC controllers (SCU70). Let's use the 1001 * configuration of the palmetto-bmc machine */ 1002 if (s->ctrl->segments == aspeed_segments_fmc) { 1003 s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE0); 1004 } 1005 1006 s->snoop_index = SNOOP_OFF; 1007 s->snoop_dummies = 0; 1008 } 1009 1010 static uint64_t aspeed_smc_read(void *opaque, hwaddr addr, unsigned int size) 1011 { 1012 AspeedSMCState *s = ASPEED_SMC(opaque); 1013 1014 addr >>= 2; 1015 1016 if (addr == s->r_conf || 1017 (addr >= s->r_timings && 1018 addr < s->r_timings + s->ctrl->nregs_timings) || 1019 addr == s->r_ce_ctrl || 1020 addr == R_CE_CMD_CTRL || 1021 addr == R_INTR_CTRL || 1022 addr == R_DUMMY_DATA || 1023 (aspeed_smc_has_dma(s) && addr == R_DMA_CTRL) || 1024 (aspeed_smc_has_dma(s) && addr == R_DMA_FLASH_ADDR) || 1025 (aspeed_smc_has_dma(s) && addr == R_DMA_DRAM_ADDR) || 1026 (aspeed_smc_has_dma(s) && addr == R_DMA_LEN) || 1027 (aspeed_smc_has_dma(s) && addr == R_DMA_CHECKSUM) || 1028 (addr >= R_SEG_ADDR0 && 1029 addr < R_SEG_ADDR0 + s->ctrl->max_peripherals) || 1030 (addr >= s->r_ctrl0 && addr < s->r_ctrl0 + s->ctrl->max_peripherals)) { 1031 1032 trace_aspeed_smc_read(addr, size, s->regs[addr]); 1033 1034 return s->regs[addr]; 1035 } else { 1036 qemu_log_mask(LOG_UNIMP, "%s: not implemented: 0x%" HWADDR_PRIx "\n", 1037 __func__, addr); 1038 return -1; 1039 } 1040 } 1041 1042 static uint8_t aspeed_smc_hclk_divisor(uint8_t hclk_mask) 1043 { 1044 /* HCLK/1 .. HCLK/16 */ 1045 const uint8_t hclk_divisors[] = { 1046 15, 7, 14, 6, 13, 5, 12, 4, 11, 3, 10, 2, 9, 1, 8, 0 1047 }; 1048 int i; 1049 1050 for (i = 0; i < ARRAY_SIZE(hclk_divisors); i++) { 1051 if (hclk_mask == hclk_divisors[i]) { 1052 return i + 1; 1053 } 1054 } 1055 1056 qemu_log_mask(LOG_GUEST_ERROR, "invalid HCLK mask %x", hclk_mask); 1057 return 0; 1058 } 1059 1060 /* 1061 * When doing calibration, the SPI clock rate in the CE0 Control 1062 * Register and the read delay cycles in the Read Timing Compensation 1063 * Register are set using bit[11:4] of the DMA Control Register. 1064 */ 1065 static void aspeed_smc_dma_calibration(AspeedSMCState *s) 1066 { 1067 uint8_t delay = 1068 (s->regs[R_DMA_CTRL] >> DMA_CTRL_DELAY_SHIFT) & DMA_CTRL_DELAY_MASK; 1069 uint8_t hclk_mask = 1070 (s->regs[R_DMA_CTRL] >> DMA_CTRL_FREQ_SHIFT) & DMA_CTRL_FREQ_MASK; 1071 uint8_t hclk_div = aspeed_smc_hclk_divisor(hclk_mask); 1072 uint32_t hclk_shift = (hclk_div - 1) << 2; 1073 uint8_t cs; 1074 1075 /* 1076 * The Read Timing Compensation Register values apply to all CS on 1077 * the SPI bus and only HCLK/1 - HCLK/5 can have tunable delays 1078 */ 1079 if (hclk_div && hclk_div < 6) { 1080 s->regs[s->r_timings] &= ~(0xf << hclk_shift); 1081 s->regs[s->r_timings] |= delay << hclk_shift; 1082 } 1083 1084 /* 1085 * TODO: compute the CS from the DMA address and the segment 1086 * registers. This is not really a problem for now because the 1087 * Timing Register values apply to all CS and software uses CS0 to 1088 * do calibration. 1089 */ 1090 cs = 0; 1091 s->regs[s->r_ctrl0 + cs] &= 1092 ~(CE_CTRL_CLOCK_FREQ_MASK << CE_CTRL_CLOCK_FREQ_SHIFT); 1093 s->regs[s->r_ctrl0 + cs] |= CE_CTRL_CLOCK_FREQ(hclk_div); 1094 } 1095 1096 /* 1097 * Emulate read errors in the DMA Checksum Register for high 1098 * frequencies and optimistic settings of the Read Timing Compensation 1099 * Register. This will help in tuning the SPI timing calibration 1100 * algorithm. 1101 */ 1102 static bool aspeed_smc_inject_read_failure(AspeedSMCState *s) 1103 { 1104 uint8_t delay = 1105 (s->regs[R_DMA_CTRL] >> DMA_CTRL_DELAY_SHIFT) & DMA_CTRL_DELAY_MASK; 1106 uint8_t hclk_mask = 1107 (s->regs[R_DMA_CTRL] >> DMA_CTRL_FREQ_SHIFT) & DMA_CTRL_FREQ_MASK; 1108 1109 /* 1110 * Typical values of a palmetto-bmc machine. 1111 */ 1112 switch (aspeed_smc_hclk_divisor(hclk_mask)) { 1113 case 4 ... 16: 1114 return false; 1115 case 3: /* at least one HCLK cycle delay */ 1116 return (delay & 0x7) < 1; 1117 case 2: /* at least two HCLK cycle delay */ 1118 return (delay & 0x7) < 2; 1119 case 1: /* (> 100MHz) is above the max freq of the controller */ 1120 return true; 1121 default: 1122 g_assert_not_reached(); 1123 } 1124 } 1125 1126 /* 1127 * Accumulate the result of the reads to provide a checksum that will 1128 * be used to validate the read timing settings. 1129 */ 1130 static void aspeed_smc_dma_checksum(AspeedSMCState *s) 1131 { 1132 MemTxResult result; 1133 uint32_t data; 1134 1135 if (s->regs[R_DMA_CTRL] & DMA_CTRL_WRITE) { 1136 qemu_log_mask(LOG_GUEST_ERROR, 1137 "%s: invalid direction for DMA checksum\n", __func__); 1138 return; 1139 } 1140 1141 if (s->regs[R_DMA_CTRL] & DMA_CTRL_CALIB) { 1142 aspeed_smc_dma_calibration(s); 1143 } 1144 1145 while (s->regs[R_DMA_LEN]) { 1146 data = address_space_ldl_le(&s->flash_as, s->regs[R_DMA_FLASH_ADDR], 1147 MEMTXATTRS_UNSPECIFIED, &result); 1148 if (result != MEMTX_OK) { 1149 qemu_log_mask(LOG_GUEST_ERROR, "%s: Flash read failed @%08x\n", 1150 __func__, s->regs[R_DMA_FLASH_ADDR]); 1151 return; 1152 } 1153 trace_aspeed_smc_dma_checksum(s->regs[R_DMA_FLASH_ADDR], data); 1154 1155 /* 1156 * When the DMA is on-going, the DMA registers are updated 1157 * with the current working addresses and length. 1158 */ 1159 s->regs[R_DMA_CHECKSUM] += data; 1160 s->regs[R_DMA_FLASH_ADDR] += 4; 1161 s->regs[R_DMA_LEN] -= 4; 1162 } 1163 1164 if (s->inject_failure && aspeed_smc_inject_read_failure(s)) { 1165 s->regs[R_DMA_CHECKSUM] = 0xbadc0de; 1166 } 1167 1168 } 1169 1170 static void aspeed_smc_dma_rw(AspeedSMCState *s) 1171 { 1172 MemTxResult result; 1173 uint32_t data; 1174 1175 trace_aspeed_smc_dma_rw(s->regs[R_DMA_CTRL] & DMA_CTRL_WRITE ? 1176 "write" : "read", 1177 s->regs[R_DMA_FLASH_ADDR], 1178 s->regs[R_DMA_DRAM_ADDR], 1179 s->regs[R_DMA_LEN]); 1180 while (s->regs[R_DMA_LEN]) { 1181 if (s->regs[R_DMA_CTRL] & DMA_CTRL_WRITE) { 1182 data = address_space_ldl_le(&s->dram_as, s->regs[R_DMA_DRAM_ADDR], 1183 MEMTXATTRS_UNSPECIFIED, &result); 1184 if (result != MEMTX_OK) { 1185 qemu_log_mask(LOG_GUEST_ERROR, "%s: DRAM read failed @%08x\n", 1186 __func__, s->regs[R_DMA_DRAM_ADDR]); 1187 return; 1188 } 1189 1190 address_space_stl_le(&s->flash_as, s->regs[R_DMA_FLASH_ADDR], 1191 data, MEMTXATTRS_UNSPECIFIED, &result); 1192 if (result != MEMTX_OK) { 1193 qemu_log_mask(LOG_GUEST_ERROR, "%s: Flash write failed @%08x\n", 1194 __func__, s->regs[R_DMA_FLASH_ADDR]); 1195 return; 1196 } 1197 } else { 1198 data = address_space_ldl_le(&s->flash_as, s->regs[R_DMA_FLASH_ADDR], 1199 MEMTXATTRS_UNSPECIFIED, &result); 1200 if (result != MEMTX_OK) { 1201 qemu_log_mask(LOG_GUEST_ERROR, "%s: Flash read failed @%08x\n", 1202 __func__, s->regs[R_DMA_FLASH_ADDR]); 1203 return; 1204 } 1205 1206 address_space_stl_le(&s->dram_as, s->regs[R_DMA_DRAM_ADDR], 1207 data, MEMTXATTRS_UNSPECIFIED, &result); 1208 if (result != MEMTX_OK) { 1209 qemu_log_mask(LOG_GUEST_ERROR, "%s: DRAM write failed @%08x\n", 1210 __func__, s->regs[R_DMA_DRAM_ADDR]); 1211 return; 1212 } 1213 } 1214 1215 /* 1216 * When the DMA is on-going, the DMA registers are updated 1217 * with the current working addresses and length. 1218 */ 1219 s->regs[R_DMA_FLASH_ADDR] += 4; 1220 s->regs[R_DMA_DRAM_ADDR] += 4; 1221 s->regs[R_DMA_LEN] -= 4; 1222 s->regs[R_DMA_CHECKSUM] += data; 1223 } 1224 } 1225 1226 static void aspeed_smc_dma_stop(AspeedSMCState *s) 1227 { 1228 /* 1229 * When the DMA is disabled, INTR_CTRL_DMA_STATUS=0 means the 1230 * engine is idle 1231 */ 1232 s->regs[R_INTR_CTRL] &= ~INTR_CTRL_DMA_STATUS; 1233 s->regs[R_DMA_CHECKSUM] = 0; 1234 1235 /* 1236 * Lower the DMA irq in any case. The IRQ control register could 1237 * have been cleared before disabling the DMA. 1238 */ 1239 qemu_irq_lower(s->irq); 1240 } 1241 1242 /* 1243 * When INTR_CTRL_DMA_STATUS=1, the DMA has completed and a new DMA 1244 * can start even if the result of the previous was not collected. 1245 */ 1246 static bool aspeed_smc_dma_in_progress(AspeedSMCState *s) 1247 { 1248 return s->regs[R_DMA_CTRL] & DMA_CTRL_ENABLE && 1249 !(s->regs[R_INTR_CTRL] & INTR_CTRL_DMA_STATUS); 1250 } 1251 1252 static void aspeed_smc_dma_done(AspeedSMCState *s) 1253 { 1254 s->regs[R_INTR_CTRL] |= INTR_CTRL_DMA_STATUS; 1255 if (s->regs[R_INTR_CTRL] & INTR_CTRL_DMA_EN) { 1256 qemu_irq_raise(s->irq); 1257 } 1258 } 1259 1260 static void aspeed_smc_dma_ctrl(AspeedSMCState *s, uint32_t dma_ctrl) 1261 { 1262 if (!(dma_ctrl & DMA_CTRL_ENABLE)) { 1263 s->regs[R_DMA_CTRL] = dma_ctrl; 1264 1265 aspeed_smc_dma_stop(s); 1266 return; 1267 } 1268 1269 if (aspeed_smc_dma_in_progress(s)) { 1270 qemu_log_mask(LOG_GUEST_ERROR, "%s: DMA in progress\n", __func__); 1271 return; 1272 } 1273 1274 s->regs[R_DMA_CTRL] = dma_ctrl; 1275 1276 if (s->regs[R_DMA_CTRL] & DMA_CTRL_CKSUM) { 1277 aspeed_smc_dma_checksum(s); 1278 } else { 1279 aspeed_smc_dma_rw(s); 1280 } 1281 1282 aspeed_smc_dma_done(s); 1283 } 1284 1285 static inline bool aspeed_smc_dma_granted(AspeedSMCState *s) 1286 { 1287 if (!(s->ctrl->features & ASPEED_SMC_FEATURE_DMA_GRANT)) { 1288 return true; 1289 } 1290 1291 if (!(s->regs[R_DMA_CTRL] & DMA_CTRL_GRANT)) { 1292 qemu_log_mask(LOG_GUEST_ERROR, "%s: DMA not granted\n", __func__); 1293 return false; 1294 } 1295 1296 return true; 1297 } 1298 1299 static void aspeed_2600_smc_dma_ctrl(AspeedSMCState *s, uint32_t dma_ctrl) 1300 { 1301 /* Preserve DMA bits */ 1302 dma_ctrl |= s->regs[R_DMA_CTRL] & (DMA_CTRL_REQUEST | DMA_CTRL_GRANT); 1303 1304 if (dma_ctrl == 0xAEED0000) { 1305 /* automatically grant request */ 1306 s->regs[R_DMA_CTRL] |= (DMA_CTRL_REQUEST | DMA_CTRL_GRANT); 1307 return; 1308 } 1309 1310 /* clear request */ 1311 if (dma_ctrl == 0xDEEA0000) { 1312 s->regs[R_DMA_CTRL] &= ~(DMA_CTRL_REQUEST | DMA_CTRL_GRANT); 1313 return; 1314 } 1315 1316 if (!aspeed_smc_dma_granted(s)) { 1317 qemu_log_mask(LOG_GUEST_ERROR, "%s: DMA not granted\n", __func__); 1318 return; 1319 } 1320 1321 aspeed_smc_dma_ctrl(s, dma_ctrl); 1322 s->regs[R_DMA_CTRL] &= ~(DMA_CTRL_REQUEST | DMA_CTRL_GRANT); 1323 } 1324 1325 static void aspeed_smc_write(void *opaque, hwaddr addr, uint64_t data, 1326 unsigned int size) 1327 { 1328 AspeedSMCState *s = ASPEED_SMC(opaque); 1329 uint32_t value = data; 1330 1331 addr >>= 2; 1332 1333 trace_aspeed_smc_write(addr, size, data); 1334 1335 if (addr == s->r_conf || 1336 (addr >= s->r_timings && 1337 addr < s->r_timings + s->ctrl->nregs_timings) || 1338 addr == s->r_ce_ctrl) { 1339 s->regs[addr] = value; 1340 } else if (addr >= s->r_ctrl0 && addr < s->r_ctrl0 + s->num_cs) { 1341 int cs = addr - s->r_ctrl0; 1342 aspeed_smc_flash_update_ctrl(&s->flashes[cs], value); 1343 } else if (addr >= R_SEG_ADDR0 && 1344 addr < R_SEG_ADDR0 + s->ctrl->max_peripherals) { 1345 int cs = addr - R_SEG_ADDR0; 1346 1347 if (value != s->regs[R_SEG_ADDR0 + cs]) { 1348 aspeed_smc_flash_set_segment(s, cs, value); 1349 } 1350 } else if (addr == R_CE_CMD_CTRL) { 1351 s->regs[addr] = value & 0xff; 1352 } else if (addr == R_DUMMY_DATA) { 1353 s->regs[addr] = value & 0xff; 1354 } else if (addr == R_INTR_CTRL) { 1355 s->regs[addr] = value; 1356 } else if (aspeed_smc_has_dma(s) && addr == R_DMA_CTRL) { 1357 s->ctrl->dma_ctrl(s, value); 1358 } else if (aspeed_smc_has_dma(s) && addr == R_DMA_DRAM_ADDR && 1359 aspeed_smc_dma_granted(s)) { 1360 s->regs[addr] = DMA_DRAM_ADDR(s, value); 1361 } else if (aspeed_smc_has_dma(s) && addr == R_DMA_FLASH_ADDR && 1362 aspeed_smc_dma_granted(s)) { 1363 s->regs[addr] = DMA_FLASH_ADDR(s, value); 1364 } else if (aspeed_smc_has_dma(s) && addr == R_DMA_LEN && 1365 aspeed_smc_dma_granted(s)) { 1366 s->regs[addr] = DMA_LENGTH(value); 1367 } else { 1368 qemu_log_mask(LOG_UNIMP, "%s: not implemented: 0x%" HWADDR_PRIx "\n", 1369 __func__, addr); 1370 return; 1371 } 1372 } 1373 1374 static const MemoryRegionOps aspeed_smc_ops = { 1375 .read = aspeed_smc_read, 1376 .write = aspeed_smc_write, 1377 .endianness = DEVICE_LITTLE_ENDIAN, 1378 }; 1379 1380 /* 1381 * Initialize the custom address spaces for DMAs 1382 */ 1383 static void aspeed_smc_dma_setup(AspeedSMCState *s, Error **errp) 1384 { 1385 char *name; 1386 1387 if (!s->dram_mr) { 1388 error_setg(errp, TYPE_ASPEED_SMC ": 'dram' link not set"); 1389 return; 1390 } 1391 1392 name = g_strdup_printf("%s-dma-flash", s->ctrl->name); 1393 address_space_init(&s->flash_as, &s->mmio_flash, name); 1394 g_free(name); 1395 1396 name = g_strdup_printf("%s-dma-dram", s->ctrl->name); 1397 address_space_init(&s->dram_as, s->dram_mr, name); 1398 g_free(name); 1399 } 1400 1401 static void aspeed_smc_realize(DeviceState *dev, Error **errp) 1402 { 1403 SysBusDevice *sbd = SYS_BUS_DEVICE(dev); 1404 AspeedSMCState *s = ASPEED_SMC(dev); 1405 AspeedSMCClass *mc = ASPEED_SMC_GET_CLASS(s); 1406 int i; 1407 char name[32]; 1408 hwaddr offset = 0; 1409 1410 s->ctrl = mc->ctrl; 1411 1412 /* keep a copy under AspeedSMCState to speed up accesses */ 1413 s->r_conf = s->ctrl->r_conf; 1414 s->r_ce_ctrl = s->ctrl->r_ce_ctrl; 1415 s->r_ctrl0 = s->ctrl->r_ctrl0; 1416 s->r_timings = s->ctrl->r_timings; 1417 s->conf_enable_w0 = s->ctrl->conf_enable_w0; 1418 1419 /* Enforce some real HW limits */ 1420 if (s->num_cs > s->ctrl->max_peripherals) { 1421 qemu_log_mask(LOG_GUEST_ERROR, "%s: num_cs cannot exceed: %d\n", 1422 __func__, s->ctrl->max_peripherals); 1423 s->num_cs = s->ctrl->max_peripherals; 1424 } 1425 1426 /* DMA irq. Keep it first for the initialization in the SoC */ 1427 sysbus_init_irq(sbd, &s->irq); 1428 1429 s->spi = ssi_create_bus(dev, "spi"); 1430 1431 /* Setup cs_lines for peripherals */ 1432 s->cs_lines = g_new0(qemu_irq, s->num_cs); 1433 1434 for (i = 0; i < s->num_cs; ++i) { 1435 sysbus_init_irq(sbd, &s->cs_lines[i]); 1436 } 1437 1438 /* The memory region for the controller registers */ 1439 memory_region_init_io(&s->mmio, OBJECT(s), &aspeed_smc_ops, s, 1440 s->ctrl->name, s->ctrl->nregs * 4); 1441 sysbus_init_mmio(sbd, &s->mmio); 1442 1443 /* 1444 * The container memory region representing the address space 1445 * window in which the flash modules are mapped. The size and 1446 * address depends on the SoC model and controller type. 1447 */ 1448 snprintf(name, sizeof(name), "%s.flash", s->ctrl->name); 1449 1450 memory_region_init_io(&s->mmio_flash, OBJECT(s), 1451 &aspeed_smc_flash_default_ops, s, name, 1452 s->ctrl->flash_window_size); 1453 memory_region_init_alias(&s->mmio_flash_alias, OBJECT(s), name, 1454 &s->mmio_flash, 0, s->ctrl->flash_window_size); 1455 sysbus_init_mmio(sbd, &s->mmio_flash_alias); 1456 1457 s->flashes = g_new0(AspeedSMCFlash, s->ctrl->max_peripherals); 1458 1459 /* 1460 * Let's create a sub memory region for each possible peripheral. All 1461 * have a configurable memory segment in the overall flash mapping 1462 * window of the controller but, there is not necessarily a flash 1463 * module behind to handle the memory accesses. This depends on 1464 * the board configuration. 1465 */ 1466 for (i = 0; i < s->ctrl->max_peripherals; ++i) { 1467 AspeedSMCFlash *fl = &s->flashes[i]; 1468 1469 snprintf(name, sizeof(name), "%s.%d", s->ctrl->name, i); 1470 1471 fl->id = i; 1472 fl->controller = s; 1473 fl->size = s->ctrl->segments[i].size; 1474 memory_region_init_io(&fl->mmio, OBJECT(s), &aspeed_smc_flash_ops, 1475 fl, name, fl->size); 1476 memory_region_add_subregion(&s->mmio_flash, offset, &fl->mmio); 1477 offset += fl->size; 1478 } 1479 1480 /* DMA support */ 1481 if (aspeed_smc_has_dma(s)) { 1482 aspeed_smc_dma_setup(s, errp); 1483 } 1484 } 1485 1486 static const VMStateDescription vmstate_aspeed_smc = { 1487 .name = "aspeed.smc", 1488 .version_id = 2, 1489 .minimum_version_id = 2, 1490 .fields = (VMStateField[]) { 1491 VMSTATE_UINT32_ARRAY(regs, AspeedSMCState, ASPEED_SMC_R_MAX), 1492 VMSTATE_UINT8(snoop_index, AspeedSMCState), 1493 VMSTATE_UINT8(snoop_dummies, AspeedSMCState), 1494 VMSTATE_END_OF_LIST() 1495 } 1496 }; 1497 1498 static Property aspeed_smc_properties[] = { 1499 DEFINE_PROP_UINT32("num-cs", AspeedSMCState, num_cs, 1), 1500 DEFINE_PROP_BOOL("inject-failure", AspeedSMCState, inject_failure, false), 1501 DEFINE_PROP_LINK("dram", AspeedSMCState, dram_mr, 1502 TYPE_MEMORY_REGION, MemoryRegion *), 1503 DEFINE_PROP_END_OF_LIST(), 1504 }; 1505 1506 static void aspeed_smc_class_init(ObjectClass *klass, void *data) 1507 { 1508 DeviceClass *dc = DEVICE_CLASS(klass); 1509 AspeedSMCClass *mc = ASPEED_SMC_CLASS(klass); 1510 1511 dc->realize = aspeed_smc_realize; 1512 dc->reset = aspeed_smc_reset; 1513 device_class_set_props(dc, aspeed_smc_properties); 1514 dc->vmsd = &vmstate_aspeed_smc; 1515 mc->ctrl = data; 1516 } 1517 1518 static const TypeInfo aspeed_smc_info = { 1519 .name = TYPE_ASPEED_SMC, 1520 .parent = TYPE_SYS_BUS_DEVICE, 1521 .instance_size = sizeof(AspeedSMCState), 1522 .class_size = sizeof(AspeedSMCClass), 1523 .abstract = true, 1524 }; 1525 1526 static void aspeed_smc_register_types(void) 1527 { 1528 int i; 1529 1530 type_register_static(&aspeed_smc_info); 1531 for (i = 0; i < ARRAY_SIZE(controllers); ++i) { 1532 TypeInfo ti = { 1533 .name = controllers[i].name, 1534 .parent = TYPE_ASPEED_SMC, 1535 .class_init = aspeed_smc_class_init, 1536 .class_data = (void *)&controllers[i], 1537 }; 1538 type_register(&ti); 1539 } 1540 } 1541 1542 type_init(aspeed_smc_register_types) 1543