1 /* 2 * TI OMAP general purpose memory controller emulation. 3 * 4 * Copyright (C) 2007-2009 Nokia Corporation 5 * Original code written by Andrzej Zaborowski <andrew@openedhand.com> 6 * Enhancements for OMAP3 and NAND support written by Juha Riihimäki 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License as 10 * published by the Free Software Foundation; either version 2 or 11 * (at your option) any later version of the License. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License along 19 * with this program; if not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include "qemu/osdep.h" 23 #include "hw/irq.h" 24 #include "hw/block/flash.h" 25 #include "hw/arm/omap.h" 26 #include "exec/memory.h" 27 #include "exec/address-spaces.h" 28 29 /* General-Purpose Memory Controller */ 30 struct omap_gpmc_s { 31 qemu_irq irq; 32 qemu_irq drq; 33 MemoryRegion iomem; 34 int accept_256; 35 36 uint8_t revision; 37 uint8_t sysconfig; 38 uint16_t irqst; 39 uint16_t irqen; 40 uint16_t lastirq; 41 uint16_t timeout; 42 uint16_t config; 43 struct omap_gpmc_cs_file_s { 44 uint32_t config[7]; 45 MemoryRegion *iomem; 46 MemoryRegion container; 47 MemoryRegion nandiomem; 48 DeviceState *dev; 49 } cs_file[8]; 50 int ecc_cs; 51 int ecc_ptr; 52 uint32_t ecc_cfg; 53 ECCState ecc[9]; 54 struct prefetch { 55 uint32_t config1; /* GPMC_PREFETCH_CONFIG1 */ 56 uint32_t transfercount; /* GPMC_PREFETCH_CONFIG2:TRANSFERCOUNT */ 57 int startengine; /* GPMC_PREFETCH_CONTROL:STARTENGINE */ 58 int fifopointer; /* GPMC_PREFETCH_STATUS:FIFOPOINTER */ 59 int count; /* GPMC_PREFETCH_STATUS:COUNTVALUE */ 60 MemoryRegion iomem; 61 uint8_t fifo[64]; 62 } prefetch; 63 }; 64 65 #define OMAP_GPMC_8BIT 0 66 #define OMAP_GPMC_16BIT 1 67 #define OMAP_GPMC_NOR 0 68 #define OMAP_GPMC_NAND 2 69 70 static int omap_gpmc_devtype(struct omap_gpmc_cs_file_s *f) 71 { 72 return (f->config[0] >> 10) & 3; 73 } 74 75 static int omap_gpmc_devsize(struct omap_gpmc_cs_file_s *f) 76 { 77 /* devsize field is really 2 bits but we ignore the high 78 * bit to ensure consistent behaviour if the guest sets 79 * it (values 2 and 3 are reserved in the TRM) 80 */ 81 return (f->config[0] >> 12) & 1; 82 } 83 84 /* Extract the chip-select value from the prefetch config1 register */ 85 static int prefetch_cs(uint32_t config1) 86 { 87 return (config1 >> 24) & 7; 88 } 89 90 static int prefetch_threshold(uint32_t config1) 91 { 92 return (config1 >> 8) & 0x7f; 93 } 94 95 static void omap_gpmc_int_update(struct omap_gpmc_s *s) 96 { 97 /* The TRM is a bit unclear, but it seems to say that 98 * the TERMINALCOUNTSTATUS bit is set only on the 99 * transition when the prefetch engine goes from 100 * active to inactive, whereas the FIFOEVENTSTATUS 101 * bit is held high as long as the fifo has at 102 * least THRESHOLD bytes available. 103 * So we do the latter here, but TERMINALCOUNTSTATUS 104 * is set elsewhere. 105 */ 106 if (s->prefetch.fifopointer >= prefetch_threshold(s->prefetch.config1)) { 107 s->irqst |= 1; 108 } 109 if ((s->irqen & s->irqst) != s->lastirq) { 110 s->lastirq = s->irqen & s->irqst; 111 qemu_set_irq(s->irq, s->lastirq); 112 } 113 } 114 115 static void omap_gpmc_dma_update(struct omap_gpmc_s *s, int value) 116 { 117 if (s->prefetch.config1 & 4) { 118 qemu_set_irq(s->drq, value); 119 } 120 } 121 122 /* Access functions for when a NAND-like device is mapped into memory: 123 * all addresses in the region behave like accesses to the relevant 124 * GPMC_NAND_DATA_i register (which is actually implemented to call these) 125 */ 126 static uint64_t omap_nand_read(void *opaque, hwaddr addr, 127 unsigned size) 128 { 129 struct omap_gpmc_cs_file_s *f = opaque; 130 uint64_t v; 131 nand_setpins(f->dev, 0, 0, 0, 1, 0); 132 switch (omap_gpmc_devsize(f)) { 133 case OMAP_GPMC_8BIT: 134 v = nand_getio(f->dev); 135 if (size == 1) { 136 return v; 137 } 138 v |= (nand_getio(f->dev) << 8); 139 if (size == 2) { 140 return v; 141 } 142 v |= (nand_getio(f->dev) << 16); 143 v |= (nand_getio(f->dev) << 24); 144 return v; 145 case OMAP_GPMC_16BIT: 146 v = nand_getio(f->dev); 147 if (size == 1) { 148 /* 8 bit read from 16 bit device : probably a guest bug */ 149 return v & 0xff; 150 } 151 if (size == 2) { 152 return v; 153 } 154 v |= (nand_getio(f->dev) << 16); 155 return v; 156 default: 157 abort(); 158 } 159 } 160 161 static void omap_nand_setio(DeviceState *dev, uint64_t value, 162 int nandsize, int size) 163 { 164 /* Write the specified value to the NAND device, respecting 165 * both size of the NAND device and size of the write access. 166 */ 167 switch (nandsize) { 168 case OMAP_GPMC_8BIT: 169 switch (size) { 170 case 1: 171 nand_setio(dev, value & 0xff); 172 break; 173 case 2: 174 nand_setio(dev, value & 0xff); 175 nand_setio(dev, (value >> 8) & 0xff); 176 break; 177 case 4: 178 default: 179 nand_setio(dev, value & 0xff); 180 nand_setio(dev, (value >> 8) & 0xff); 181 nand_setio(dev, (value >> 16) & 0xff); 182 nand_setio(dev, (value >> 24) & 0xff); 183 break; 184 } 185 break; 186 case OMAP_GPMC_16BIT: 187 switch (size) { 188 case 1: 189 /* writing to a 16bit device with 8bit access is probably a guest 190 * bug; pass the value through anyway. 191 */ 192 case 2: 193 nand_setio(dev, value & 0xffff); 194 break; 195 case 4: 196 default: 197 nand_setio(dev, value & 0xffff); 198 nand_setio(dev, (value >> 16) & 0xffff); 199 break; 200 } 201 break; 202 } 203 } 204 205 static void omap_nand_write(void *opaque, hwaddr addr, 206 uint64_t value, unsigned size) 207 { 208 struct omap_gpmc_cs_file_s *f = opaque; 209 nand_setpins(f->dev, 0, 0, 0, 1, 0); 210 omap_nand_setio(f->dev, value, omap_gpmc_devsize(f), size); 211 } 212 213 static const MemoryRegionOps omap_nand_ops = { 214 .read = omap_nand_read, 215 .write = omap_nand_write, 216 .endianness = DEVICE_NATIVE_ENDIAN, 217 }; 218 219 static void fill_prefetch_fifo(struct omap_gpmc_s *s) 220 { 221 /* Fill the prefetch FIFO by reading data from NAND. 222 * We do this synchronously, unlike the hardware which 223 * will do this asynchronously. We refill when the 224 * FIFO has THRESHOLD bytes free, and we always refill 225 * as much data as possible starting at the top end 226 * of the FIFO. 227 * (We have to refill at THRESHOLD rather than waiting 228 * for the FIFO to empty to allow for the case where 229 * the FIFO size isn't an exact multiple of THRESHOLD 230 * and we're doing DMA transfers.) 231 * This means we never need to handle wrap-around in 232 * the fifo-reading code, and the next byte of data 233 * to read is always fifo[63 - fifopointer]. 234 */ 235 int fptr; 236 int cs = prefetch_cs(s->prefetch.config1); 237 int is16bit = (((s->cs_file[cs].config[0] >> 12) & 3) != 0); 238 int bytes; 239 /* Don't believe the bit of the OMAP TRM that says that COUNTVALUE 240 * and TRANSFERCOUNT are in units of 16 bit words for 16 bit NAND. 241 * Instead believe the bit that says it is always a byte count. 242 */ 243 bytes = 64 - s->prefetch.fifopointer; 244 if (bytes > s->prefetch.count) { 245 bytes = s->prefetch.count; 246 } 247 if (is16bit) { 248 bytes &= ~1; 249 } 250 251 s->prefetch.count -= bytes; 252 s->prefetch.fifopointer += bytes; 253 fptr = 64 - s->prefetch.fifopointer; 254 /* Move the existing data in the FIFO so it sits just 255 * before what we're about to read in 256 */ 257 while (fptr < (64 - bytes)) { 258 s->prefetch.fifo[fptr] = s->prefetch.fifo[fptr + bytes]; 259 fptr++; 260 } 261 while (fptr < 64) { 262 if (is16bit) { 263 uint32_t v = omap_nand_read(&s->cs_file[cs], 0, 2); 264 s->prefetch.fifo[fptr++] = v & 0xff; 265 s->prefetch.fifo[fptr++] = (v >> 8) & 0xff; 266 } else { 267 s->prefetch.fifo[fptr++] = omap_nand_read(&s->cs_file[cs], 0, 1); 268 } 269 } 270 if (s->prefetch.startengine && (s->prefetch.count == 0)) { 271 /* This was the final transfer: raise TERMINALCOUNTSTATUS */ 272 s->irqst |= 2; 273 s->prefetch.startengine = 0; 274 } 275 /* If there are any bytes in the FIFO at this point then 276 * we must raise a DMA request (either this is a final part 277 * transfer, or we filled the FIFO in which case we certainly 278 * have THRESHOLD bytes available) 279 */ 280 if (s->prefetch.fifopointer != 0) { 281 omap_gpmc_dma_update(s, 1); 282 } 283 omap_gpmc_int_update(s); 284 } 285 286 /* Access functions for a NAND-like device when the prefetch/postwrite 287 * engine is enabled -- all addresses in the region behave alike: 288 * data is read or written to the FIFO. 289 */ 290 static uint64_t omap_gpmc_prefetch_read(void *opaque, hwaddr addr, 291 unsigned size) 292 { 293 struct omap_gpmc_s *s = opaque; 294 uint32_t data; 295 if (s->prefetch.config1 & 1) { 296 /* The TRM doesn't define the behaviour if you read from the 297 * FIFO when the prefetch engine is in write mode. We choose 298 * to always return zero. 299 */ 300 return 0; 301 } 302 /* Note that trying to read an empty fifo repeats the last byte */ 303 if (s->prefetch.fifopointer) { 304 s->prefetch.fifopointer--; 305 } 306 data = s->prefetch.fifo[63 - s->prefetch.fifopointer]; 307 if (s->prefetch.fifopointer == 308 (64 - prefetch_threshold(s->prefetch.config1))) { 309 /* We've drained THRESHOLD bytes now. So deassert the 310 * DMA request, then refill the FIFO (which will probably 311 * assert it again.) 312 */ 313 omap_gpmc_dma_update(s, 0); 314 fill_prefetch_fifo(s); 315 } 316 omap_gpmc_int_update(s); 317 return data; 318 } 319 320 static void omap_gpmc_prefetch_write(void *opaque, hwaddr addr, 321 uint64_t value, unsigned size) 322 { 323 struct omap_gpmc_s *s = opaque; 324 int cs = prefetch_cs(s->prefetch.config1); 325 if ((s->prefetch.config1 & 1) == 0) { 326 /* The TRM doesn't define the behaviour of writing to the 327 * FIFO when the prefetch engine is in read mode. We 328 * choose to ignore the write. 329 */ 330 return; 331 } 332 if (s->prefetch.count == 0) { 333 /* The TRM doesn't define the behaviour of writing to the 334 * FIFO if the transfer is complete. We choose to ignore. 335 */ 336 return; 337 } 338 /* The only reason we do any data buffering in postwrite 339 * mode is if we are talking to a 16 bit NAND device, in 340 * which case we need to buffer the first byte of the 341 * 16 bit word until the other byte arrives. 342 */ 343 int is16bit = (((s->cs_file[cs].config[0] >> 12) & 3) != 0); 344 if (is16bit) { 345 /* fifopointer alternates between 64 (waiting for first 346 * byte of word) and 63 (waiting for second byte) 347 */ 348 if (s->prefetch.fifopointer == 64) { 349 s->prefetch.fifo[0] = value; 350 s->prefetch.fifopointer--; 351 } else { 352 value = (value << 8) | s->prefetch.fifo[0]; 353 omap_nand_write(&s->cs_file[cs], 0, value, 2); 354 s->prefetch.count--; 355 s->prefetch.fifopointer = 64; 356 } 357 } else { 358 /* Just write the byte : fifopointer remains 64 at all times */ 359 omap_nand_write(&s->cs_file[cs], 0, value, 1); 360 s->prefetch.count--; 361 } 362 if (s->prefetch.count == 0) { 363 /* Final transfer: raise TERMINALCOUNTSTATUS */ 364 s->irqst |= 2; 365 s->prefetch.startengine = 0; 366 } 367 omap_gpmc_int_update(s); 368 } 369 370 static const MemoryRegionOps omap_prefetch_ops = { 371 .read = omap_gpmc_prefetch_read, 372 .write = omap_gpmc_prefetch_write, 373 .endianness = DEVICE_NATIVE_ENDIAN, 374 .impl.min_access_size = 1, 375 .impl.max_access_size = 1, 376 }; 377 378 static MemoryRegion *omap_gpmc_cs_memregion(struct omap_gpmc_s *s, int cs) 379 { 380 /* Return the MemoryRegion* to map/unmap for this chipselect */ 381 struct omap_gpmc_cs_file_s *f = &s->cs_file[cs]; 382 if (omap_gpmc_devtype(f) == OMAP_GPMC_NOR) { 383 return f->iomem; 384 } 385 if ((s->prefetch.config1 & 0x80) && 386 (prefetch_cs(s->prefetch.config1) == cs)) { 387 /* The prefetch engine is enabled for this CS: map the FIFO */ 388 return &s->prefetch.iomem; 389 } 390 return &f->nandiomem; 391 } 392 393 static void omap_gpmc_cs_map(struct omap_gpmc_s *s, int cs) 394 { 395 struct omap_gpmc_cs_file_s *f = &s->cs_file[cs]; 396 uint32_t mask = (f->config[6] >> 8) & 0xf; 397 uint32_t base = f->config[6] & 0x3f; 398 uint32_t size; 399 400 if (!f->iomem && !f->dev) { 401 return; 402 } 403 404 if (!(f->config[6] & (1 << 6))) { 405 /* Do nothing unless CSVALID */ 406 return; 407 } 408 409 /* TODO: check for overlapping regions and report access errors */ 410 if (mask != 0x8 && mask != 0xc && mask != 0xe && mask != 0xf 411 && !(s->accept_256 && !mask)) { 412 fprintf(stderr, "%s: invalid chip-select mask address (0x%x)\n", 413 __func__, mask); 414 } 415 416 base <<= 24; 417 size = (0x0fffffff & ~(mask << 24)) + 1; 418 /* TODO: rather than setting the size of the mapping (which should be 419 * constant), the mask should cause wrapping of the address space, so 420 * that the same memory becomes accessible at every <i>size</i> bytes 421 * starting from <i>base</i>. */ 422 memory_region_init(&f->container, NULL, "omap-gpmc-file", size); 423 memory_region_add_subregion(&f->container, 0, 424 omap_gpmc_cs_memregion(s, cs)); 425 memory_region_add_subregion(get_system_memory(), base, 426 &f->container); 427 } 428 429 static void omap_gpmc_cs_unmap(struct omap_gpmc_s *s, int cs) 430 { 431 struct omap_gpmc_cs_file_s *f = &s->cs_file[cs]; 432 if (!(f->config[6] & (1 << 6))) { 433 /* Do nothing unless CSVALID */ 434 return; 435 } 436 if (!f->iomem && !f->dev) { 437 return; 438 } 439 memory_region_del_subregion(get_system_memory(), &f->container); 440 memory_region_del_subregion(&f->container, omap_gpmc_cs_memregion(s, cs)); 441 object_unparent(OBJECT(&f->container)); 442 } 443 444 void omap_gpmc_reset(struct omap_gpmc_s *s) 445 { 446 int i; 447 448 s->sysconfig = 0; 449 s->irqst = 0; 450 s->irqen = 0; 451 omap_gpmc_int_update(s); 452 for (i = 0; i < 8; i++) { 453 /* This has to happen before we change any of the config 454 * used to determine which memory regions are mapped or unmapped. 455 */ 456 omap_gpmc_cs_unmap(s, i); 457 } 458 s->timeout = 0; 459 s->config = 0xa00; 460 s->prefetch.config1 = 0x00004000; 461 s->prefetch.transfercount = 0x00000000; 462 s->prefetch.startengine = 0; 463 s->prefetch.fifopointer = 0; 464 s->prefetch.count = 0; 465 for (i = 0; i < 8; i ++) { 466 s->cs_file[i].config[1] = 0x101001; 467 s->cs_file[i].config[2] = 0x020201; 468 s->cs_file[i].config[3] = 0x10031003; 469 s->cs_file[i].config[4] = 0x10f1111; 470 s->cs_file[i].config[5] = 0; 471 s->cs_file[i].config[6] = 0xf00; 472 /* In theory we could probe attached devices for some CFG1 473 * bits here, but we just retain them across resets as they 474 * were set initially by omap_gpmc_attach(). 475 */ 476 if (i == 0) { 477 s->cs_file[i].config[0] &= 0x00433e00; 478 s->cs_file[i].config[6] |= 1 << 6; /* CSVALID */ 479 omap_gpmc_cs_map(s, i); 480 } else { 481 s->cs_file[i].config[0] &= 0x00403c00; 482 } 483 } 484 s->ecc_cs = 0; 485 s->ecc_ptr = 0; 486 s->ecc_cfg = 0x3fcff000; 487 for (i = 0; i < 9; i ++) 488 ecc_reset(&s->ecc[i]); 489 } 490 491 static int gpmc_wordaccess_only(hwaddr addr) 492 { 493 /* Return true if the register offset is to a register that 494 * only permits word width accesses. 495 * Non-word accesses are only OK for GPMC_NAND_DATA/ADDRESS/COMMAND 496 * for any chipselect. 497 */ 498 if (addr >= 0x60 && addr <= 0x1d4) { 499 int cs = (addr - 0x60) / 0x30; 500 addr -= cs * 0x30; 501 if (addr >= 0x7c && addr < 0x88) { 502 /* GPMC_NAND_COMMAND, GPMC_NAND_ADDRESS, GPMC_NAND_DATA */ 503 return 0; 504 } 505 } 506 return 1; 507 } 508 509 static uint64_t omap_gpmc_read(void *opaque, hwaddr addr, 510 unsigned size) 511 { 512 struct omap_gpmc_s *s = opaque; 513 int cs; 514 struct omap_gpmc_cs_file_s *f; 515 516 if (size != 4 && gpmc_wordaccess_only(addr)) { 517 return omap_badwidth_read32(opaque, addr); 518 } 519 520 switch (addr) { 521 case 0x000: /* GPMC_REVISION */ 522 return s->revision; 523 524 case 0x010: /* GPMC_SYSCONFIG */ 525 return s->sysconfig; 526 527 case 0x014: /* GPMC_SYSSTATUS */ 528 return 1; /* RESETDONE */ 529 530 case 0x018: /* GPMC_IRQSTATUS */ 531 return s->irqst; 532 533 case 0x01c: /* GPMC_IRQENABLE */ 534 return s->irqen; 535 536 case 0x040: /* GPMC_TIMEOUT_CONTROL */ 537 return s->timeout; 538 539 case 0x044: /* GPMC_ERR_ADDRESS */ 540 case 0x048: /* GPMC_ERR_TYPE */ 541 return 0; 542 543 case 0x050: /* GPMC_CONFIG */ 544 return s->config; 545 546 case 0x054: /* GPMC_STATUS */ 547 return 0x001; 548 549 case 0x060 ... 0x1d4: 550 cs = (addr - 0x060) / 0x30; 551 addr -= cs * 0x30; 552 f = s->cs_file + cs; 553 switch (addr) { 554 case 0x60: /* GPMC_CONFIG1 */ 555 return f->config[0]; 556 case 0x64: /* GPMC_CONFIG2 */ 557 return f->config[1]; 558 case 0x68: /* GPMC_CONFIG3 */ 559 return f->config[2]; 560 case 0x6c: /* GPMC_CONFIG4 */ 561 return f->config[3]; 562 case 0x70: /* GPMC_CONFIG5 */ 563 return f->config[4]; 564 case 0x74: /* GPMC_CONFIG6 */ 565 return f->config[5]; 566 case 0x78: /* GPMC_CONFIG7 */ 567 return f->config[6]; 568 case 0x84 ... 0x87: /* GPMC_NAND_DATA */ 569 if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) { 570 return omap_nand_read(f, 0, size); 571 } 572 return 0; 573 } 574 break; 575 576 case 0x1e0: /* GPMC_PREFETCH_CONFIG1 */ 577 return s->prefetch.config1; 578 case 0x1e4: /* GPMC_PREFETCH_CONFIG2 */ 579 return s->prefetch.transfercount; 580 case 0x1ec: /* GPMC_PREFETCH_CONTROL */ 581 return s->prefetch.startengine; 582 case 0x1f0: /* GPMC_PREFETCH_STATUS */ 583 /* NB: The OMAP3 TRM is inconsistent about whether the GPMC 584 * FIFOTHRESHOLDSTATUS bit should be set when 585 * FIFOPOINTER > FIFOTHRESHOLD or when it is >= FIFOTHRESHOLD. 586 * Apparently the underlying functional spec from which the TRM was 587 * created states that the behaviour is ">=", and this also 588 * makes more conceptual sense. 589 */ 590 return (s->prefetch.fifopointer << 24) | 591 ((s->prefetch.fifopointer >= 592 ((s->prefetch.config1 >> 8) & 0x7f) ? 1 : 0) << 16) | 593 s->prefetch.count; 594 595 case 0x1f4: /* GPMC_ECC_CONFIG */ 596 return s->ecc_cs; 597 case 0x1f8: /* GPMC_ECC_CONTROL */ 598 return s->ecc_ptr; 599 case 0x1fc: /* GPMC_ECC_SIZE_CONFIG */ 600 return s->ecc_cfg; 601 case 0x200 ... 0x220: /* GPMC_ECC_RESULT */ 602 cs = (addr & 0x1f) >> 2; 603 /* TODO: check correctness */ 604 return 605 ((s->ecc[cs].cp & 0x07) << 0) | 606 ((s->ecc[cs].cp & 0x38) << 13) | 607 ((s->ecc[cs].lp[0] & 0x1ff) << 3) | 608 ((s->ecc[cs].lp[1] & 0x1ff) << 19); 609 610 case 0x230: /* GPMC_TESTMODE_CTRL */ 611 return 0; 612 case 0x234: /* GPMC_PSA_LSB */ 613 case 0x238: /* GPMC_PSA_MSB */ 614 return 0x00000000; 615 } 616 617 OMAP_BAD_REG(addr); 618 return 0; 619 } 620 621 static void omap_gpmc_write(void *opaque, hwaddr addr, 622 uint64_t value, unsigned size) 623 { 624 struct omap_gpmc_s *s = opaque; 625 int cs; 626 struct omap_gpmc_cs_file_s *f; 627 628 if (size != 4 && gpmc_wordaccess_only(addr)) { 629 omap_badwidth_write32(opaque, addr, value); 630 return; 631 } 632 633 switch (addr) { 634 case 0x000: /* GPMC_REVISION */ 635 case 0x014: /* GPMC_SYSSTATUS */ 636 case 0x054: /* GPMC_STATUS */ 637 case 0x1f0: /* GPMC_PREFETCH_STATUS */ 638 case 0x200 ... 0x220: /* GPMC_ECC_RESULT */ 639 case 0x234: /* GPMC_PSA_LSB */ 640 case 0x238: /* GPMC_PSA_MSB */ 641 OMAP_RO_REG(addr); 642 break; 643 644 case 0x010: /* GPMC_SYSCONFIG */ 645 if ((value >> 3) == 0x3) 646 fprintf(stderr, "%s: bad SDRAM idle mode %"PRIi64"\n", 647 __func__, value >> 3); 648 if (value & 2) 649 omap_gpmc_reset(s); 650 s->sysconfig = value & 0x19; 651 break; 652 653 case 0x018: /* GPMC_IRQSTATUS */ 654 s->irqst &= ~value; 655 omap_gpmc_int_update(s); 656 break; 657 658 case 0x01c: /* GPMC_IRQENABLE */ 659 s->irqen = value & 0xf03; 660 omap_gpmc_int_update(s); 661 break; 662 663 case 0x040: /* GPMC_TIMEOUT_CONTROL */ 664 s->timeout = value & 0x1ff1; 665 break; 666 667 case 0x044: /* GPMC_ERR_ADDRESS */ 668 case 0x048: /* GPMC_ERR_TYPE */ 669 break; 670 671 case 0x050: /* GPMC_CONFIG */ 672 s->config = value & 0xf13; 673 break; 674 675 case 0x060 ... 0x1d4: 676 cs = (addr - 0x060) / 0x30; 677 addr -= cs * 0x30; 678 f = s->cs_file + cs; 679 switch (addr) { 680 case 0x60: /* GPMC_CONFIG1 */ 681 f->config[0] = value & 0xffef3e13; 682 break; 683 case 0x64: /* GPMC_CONFIG2 */ 684 f->config[1] = value & 0x001f1f8f; 685 break; 686 case 0x68: /* GPMC_CONFIG3 */ 687 f->config[2] = value & 0x001f1f8f; 688 break; 689 case 0x6c: /* GPMC_CONFIG4 */ 690 f->config[3] = value & 0x1f8f1f8f; 691 break; 692 case 0x70: /* GPMC_CONFIG5 */ 693 f->config[4] = value & 0x0f1f1f1f; 694 break; 695 case 0x74: /* GPMC_CONFIG6 */ 696 f->config[5] = value & 0x00000fcf; 697 break; 698 case 0x78: /* GPMC_CONFIG7 */ 699 if ((f->config[6] ^ value) & 0xf7f) { 700 omap_gpmc_cs_unmap(s, cs); 701 f->config[6] = value & 0x00000f7f; 702 omap_gpmc_cs_map(s, cs); 703 } 704 break; 705 case 0x7c ... 0x7f: /* GPMC_NAND_COMMAND */ 706 if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) { 707 nand_setpins(f->dev, 1, 0, 0, 1, 0); /* CLE */ 708 omap_nand_setio(f->dev, value, omap_gpmc_devsize(f), size); 709 } 710 break; 711 case 0x80 ... 0x83: /* GPMC_NAND_ADDRESS */ 712 if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) { 713 nand_setpins(f->dev, 0, 1, 0, 1, 0); /* ALE */ 714 omap_nand_setio(f->dev, value, omap_gpmc_devsize(f), size); 715 } 716 break; 717 case 0x84 ... 0x87: /* GPMC_NAND_DATA */ 718 if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) { 719 omap_nand_write(f, 0, value, size); 720 } 721 break; 722 default: 723 goto bad_reg; 724 } 725 break; 726 727 case 0x1e0: /* GPMC_PREFETCH_CONFIG1 */ 728 if (!s->prefetch.startengine) { 729 uint32_t newconfig1 = value & 0x7f8f7fbf; 730 uint32_t changed; 731 changed = newconfig1 ^ s->prefetch.config1; 732 if (changed & (0x80 | 0x7000000)) { 733 /* Turning the engine on or off, or mapping it somewhere else. 734 * cs_map() and cs_unmap() check the prefetch config and 735 * overall CSVALID bits, so it is sufficient to unmap-and-map 736 * both the old cs and the new one. Note that we adhere to 737 * the "unmap/change config/map" order (and not unmap twice 738 * if newcs == oldcs), otherwise we'll try to delete the wrong 739 * memory region. 740 */ 741 int oldcs = prefetch_cs(s->prefetch.config1); 742 int newcs = prefetch_cs(newconfig1); 743 omap_gpmc_cs_unmap(s, oldcs); 744 if (oldcs != newcs) { 745 omap_gpmc_cs_unmap(s, newcs); 746 } 747 s->prefetch.config1 = newconfig1; 748 omap_gpmc_cs_map(s, oldcs); 749 if (oldcs != newcs) { 750 omap_gpmc_cs_map(s, newcs); 751 } 752 } else { 753 s->prefetch.config1 = newconfig1; 754 } 755 } 756 break; 757 758 case 0x1e4: /* GPMC_PREFETCH_CONFIG2 */ 759 if (!s->prefetch.startengine) { 760 s->prefetch.transfercount = value & 0x3fff; 761 } 762 break; 763 764 case 0x1ec: /* GPMC_PREFETCH_CONTROL */ 765 if (s->prefetch.startengine != (value & 1)) { 766 s->prefetch.startengine = value & 1; 767 if (s->prefetch.startengine) { 768 /* Prefetch engine start */ 769 s->prefetch.count = s->prefetch.transfercount; 770 if (s->prefetch.config1 & 1) { 771 /* Write */ 772 s->prefetch.fifopointer = 64; 773 } else { 774 /* Read */ 775 s->prefetch.fifopointer = 0; 776 fill_prefetch_fifo(s); 777 } 778 } else { 779 /* Prefetch engine forcibly stopped. The TRM 780 * doesn't define the behaviour if you do this. 781 * We clear the prefetch count, which means that 782 * we permit no more writes, and don't read any 783 * more data from NAND. The CPU can still drain 784 * the FIFO of unread data. 785 */ 786 s->prefetch.count = 0; 787 } 788 omap_gpmc_int_update(s); 789 } 790 break; 791 792 case 0x1f4: /* GPMC_ECC_CONFIG */ 793 s->ecc_cs = 0x8f; 794 break; 795 case 0x1f8: /* GPMC_ECC_CONTROL */ 796 if (value & (1 << 8)) 797 for (cs = 0; cs < 9; cs ++) 798 ecc_reset(&s->ecc[cs]); 799 s->ecc_ptr = value & 0xf; 800 if (s->ecc_ptr == 0 || s->ecc_ptr > 9) { 801 s->ecc_ptr = 0; 802 s->ecc_cs &= ~1; 803 } 804 break; 805 case 0x1fc: /* GPMC_ECC_SIZE_CONFIG */ 806 s->ecc_cfg = value & 0x3fcff1ff; 807 break; 808 case 0x230: /* GPMC_TESTMODE_CTRL */ 809 if (value & 7) 810 fprintf(stderr, "%s: test mode enable attempt\n", __func__); 811 break; 812 813 default: 814 bad_reg: 815 OMAP_BAD_REG(addr); 816 return; 817 } 818 } 819 820 static const MemoryRegionOps omap_gpmc_ops = { 821 .read = omap_gpmc_read, 822 .write = omap_gpmc_write, 823 .endianness = DEVICE_NATIVE_ENDIAN, 824 }; 825 826 struct omap_gpmc_s *omap_gpmc_init(struct omap_mpu_state_s *mpu, 827 hwaddr base, 828 qemu_irq irq, qemu_irq drq) 829 { 830 int cs; 831 struct omap_gpmc_s *s = g_new0(struct omap_gpmc_s, 1); 832 833 memory_region_init_io(&s->iomem, NULL, &omap_gpmc_ops, s, "omap-gpmc", 0x1000); 834 memory_region_add_subregion(get_system_memory(), base, &s->iomem); 835 836 s->irq = irq; 837 s->drq = drq; 838 s->accept_256 = cpu_is_omap3630(mpu); 839 s->revision = cpu_class_omap3(mpu) ? 0x50 : 0x20; 840 s->lastirq = 0; 841 omap_gpmc_reset(s); 842 843 /* We have to register a different IO memory handler for each 844 * chip select region in case a NAND device is mapped there. We 845 * make the region the worst-case size of 256MB and rely on the 846 * container memory region in cs_map to chop it down to the actual 847 * guest-requested size. 848 */ 849 for (cs = 0; cs < 8; cs++) { 850 memory_region_init_io(&s->cs_file[cs].nandiomem, NULL, 851 &omap_nand_ops, 852 &s->cs_file[cs], 853 "omap-nand", 854 256 * 1024 * 1024); 855 } 856 857 memory_region_init_io(&s->prefetch.iomem, NULL, &omap_prefetch_ops, s, 858 "omap-gpmc-prefetch", 256 * 1024 * 1024); 859 return s; 860 } 861 862 void omap_gpmc_attach(struct omap_gpmc_s *s, int cs, MemoryRegion *iomem) 863 { 864 struct omap_gpmc_cs_file_s *f; 865 assert(iomem); 866 867 if (cs < 0 || cs >= 8) { 868 fprintf(stderr, "%s: bad chip-select %i\n", __func__, cs); 869 exit(-1); 870 } 871 f = &s->cs_file[cs]; 872 873 omap_gpmc_cs_unmap(s, cs); 874 f->config[0] &= ~(0xf << 10); 875 f->iomem = iomem; 876 omap_gpmc_cs_map(s, cs); 877 } 878 879 void omap_gpmc_attach_nand(struct omap_gpmc_s *s, int cs, DeviceState *nand) 880 { 881 struct omap_gpmc_cs_file_s *f; 882 assert(nand); 883 884 if (cs < 0 || cs >= 8) { 885 fprintf(stderr, "%s: bad chip-select %i\n", __func__, cs); 886 exit(-1); 887 } 888 f = &s->cs_file[cs]; 889 890 omap_gpmc_cs_unmap(s, cs); 891 f->config[0] &= ~(0xf << 10); 892 f->config[0] |= (OMAP_GPMC_NAND << 10); 893 f->dev = nand; 894 if (nand_getbuswidth(f->dev) == 16) { 895 f->config[0] |= OMAP_GPMC_16BIT << 12; 896 } 897 omap_gpmc_cs_map(s, cs); 898 } 899