1 /* 2 * TI OMAP general purpose memory controller emulation. 3 * 4 * Copyright (C) 2007-2009 Nokia Corporation 5 * Original code written by Andrzej Zaborowski <andrew@openedhand.com> 6 * Enhancements for OMAP3 and NAND support written by Juha Riihimäki 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License as 10 * published by the Free Software Foundation; either version 2 or 11 * (at your option) any later version of the License. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License along 19 * with this program; if not, see <http://www.gnu.org/licenses/>. 20 */ 21 #include "qemu/osdep.h" 22 #include "hw/hw.h" 23 #include "hw/block/flash.h" 24 #include "hw/arm/omap.h" 25 #include "exec/memory.h" 26 #include "exec/address-spaces.h" 27 28 /* General-Purpose Memory Controller */ 29 struct omap_gpmc_s { 30 qemu_irq irq; 31 qemu_irq drq; 32 MemoryRegion iomem; 33 int accept_256; 34 35 uint8_t revision; 36 uint8_t sysconfig; 37 uint16_t irqst; 38 uint16_t irqen; 39 uint16_t lastirq; 40 uint16_t timeout; 41 uint16_t config; 42 struct omap_gpmc_cs_file_s { 43 uint32_t config[7]; 44 MemoryRegion *iomem; 45 MemoryRegion container; 46 MemoryRegion nandiomem; 47 DeviceState *dev; 48 } cs_file[8]; 49 int ecc_cs; 50 int ecc_ptr; 51 uint32_t ecc_cfg; 52 ECCState ecc[9]; 53 struct prefetch { 54 uint32_t config1; /* GPMC_PREFETCH_CONFIG1 */ 55 uint32_t transfercount; /* GPMC_PREFETCH_CONFIG2:TRANSFERCOUNT */ 56 int startengine; /* GPMC_PREFETCH_CONTROL:STARTENGINE */ 57 int fifopointer; /* GPMC_PREFETCH_STATUS:FIFOPOINTER */ 58 int count; /* GPMC_PREFETCH_STATUS:COUNTVALUE */ 59 MemoryRegion iomem; 60 uint8_t fifo[64]; 61 } prefetch; 62 }; 63 64 #define OMAP_GPMC_8BIT 0 65 #define OMAP_GPMC_16BIT 1 66 #define OMAP_GPMC_NOR 0 67 #define OMAP_GPMC_NAND 2 68 69 static int omap_gpmc_devtype(struct omap_gpmc_cs_file_s *f) 70 { 71 return (f->config[0] >> 10) & 3; 72 } 73 74 static int omap_gpmc_devsize(struct omap_gpmc_cs_file_s *f) 75 { 76 /* devsize field is really 2 bits but we ignore the high 77 * bit to ensure consistent behaviour if the guest sets 78 * it (values 2 and 3 are reserved in the TRM) 79 */ 80 return (f->config[0] >> 12) & 1; 81 } 82 83 /* Extract the chip-select value from the prefetch config1 register */ 84 static int prefetch_cs(uint32_t config1) 85 { 86 return (config1 >> 24) & 7; 87 } 88 89 static int prefetch_threshold(uint32_t config1) 90 { 91 return (config1 >> 8) & 0x7f; 92 } 93 94 static void omap_gpmc_int_update(struct omap_gpmc_s *s) 95 { 96 /* The TRM is a bit unclear, but it seems to say that 97 * the TERMINALCOUNTSTATUS bit is set only on the 98 * transition when the prefetch engine goes from 99 * active to inactive, whereas the FIFOEVENTSTATUS 100 * bit is held high as long as the fifo has at 101 * least THRESHOLD bytes available. 102 * So we do the latter here, but TERMINALCOUNTSTATUS 103 * is set elsewhere. 104 */ 105 if (s->prefetch.fifopointer >= prefetch_threshold(s->prefetch.config1)) { 106 s->irqst |= 1; 107 } 108 if ((s->irqen & s->irqst) != s->lastirq) { 109 s->lastirq = s->irqen & s->irqst; 110 qemu_set_irq(s->irq, s->lastirq); 111 } 112 } 113 114 static void omap_gpmc_dma_update(struct omap_gpmc_s *s, int value) 115 { 116 if (s->prefetch.config1 & 4) { 117 qemu_set_irq(s->drq, value); 118 } 119 } 120 121 /* Access functions for when a NAND-like device is mapped into memory: 122 * all addresses in the region behave like accesses to the relevant 123 * GPMC_NAND_DATA_i register (which is actually implemented to call these) 124 */ 125 static uint64_t omap_nand_read(void *opaque, hwaddr addr, 126 unsigned size) 127 { 128 struct omap_gpmc_cs_file_s *f = (struct omap_gpmc_cs_file_s *)opaque; 129 uint64_t v; 130 nand_setpins(f->dev, 0, 0, 0, 1, 0); 131 switch (omap_gpmc_devsize(f)) { 132 case OMAP_GPMC_8BIT: 133 v = nand_getio(f->dev); 134 if (size == 1) { 135 return v; 136 } 137 v |= (nand_getio(f->dev) << 8); 138 if (size == 2) { 139 return v; 140 } 141 v |= (nand_getio(f->dev) << 16); 142 v |= (nand_getio(f->dev) << 24); 143 return v; 144 case OMAP_GPMC_16BIT: 145 v = nand_getio(f->dev); 146 if (size == 1) { 147 /* 8 bit read from 16 bit device : probably a guest bug */ 148 return v & 0xff; 149 } 150 if (size == 2) { 151 return v; 152 } 153 v |= (nand_getio(f->dev) << 16); 154 return v; 155 default: 156 abort(); 157 } 158 } 159 160 static void omap_nand_setio(DeviceState *dev, uint64_t value, 161 int nandsize, int size) 162 { 163 /* Write the specified value to the NAND device, respecting 164 * both size of the NAND device and size of the write access. 165 */ 166 switch (nandsize) { 167 case OMAP_GPMC_8BIT: 168 switch (size) { 169 case 1: 170 nand_setio(dev, value & 0xff); 171 break; 172 case 2: 173 nand_setio(dev, value & 0xff); 174 nand_setio(dev, (value >> 8) & 0xff); 175 break; 176 case 4: 177 default: 178 nand_setio(dev, value & 0xff); 179 nand_setio(dev, (value >> 8) & 0xff); 180 nand_setio(dev, (value >> 16) & 0xff); 181 nand_setio(dev, (value >> 24) & 0xff); 182 break; 183 } 184 break; 185 case OMAP_GPMC_16BIT: 186 switch (size) { 187 case 1: 188 /* writing to a 16bit device with 8bit access is probably a guest 189 * bug; pass the value through anyway. 190 */ 191 case 2: 192 nand_setio(dev, value & 0xffff); 193 break; 194 case 4: 195 default: 196 nand_setio(dev, value & 0xffff); 197 nand_setio(dev, (value >> 16) & 0xffff); 198 break; 199 } 200 break; 201 } 202 } 203 204 static void omap_nand_write(void *opaque, hwaddr addr, 205 uint64_t value, unsigned size) 206 { 207 struct omap_gpmc_cs_file_s *f = (struct omap_gpmc_cs_file_s *)opaque; 208 nand_setpins(f->dev, 0, 0, 0, 1, 0); 209 omap_nand_setio(f->dev, value, omap_gpmc_devsize(f), size); 210 } 211 212 static const MemoryRegionOps omap_nand_ops = { 213 .read = omap_nand_read, 214 .write = omap_nand_write, 215 .endianness = DEVICE_NATIVE_ENDIAN, 216 }; 217 218 static void fill_prefetch_fifo(struct omap_gpmc_s *s) 219 { 220 /* Fill the prefetch FIFO by reading data from NAND. 221 * We do this synchronously, unlike the hardware which 222 * will do this asynchronously. We refill when the 223 * FIFO has THRESHOLD bytes free, and we always refill 224 * as much data as possible starting at the top end 225 * of the FIFO. 226 * (We have to refill at THRESHOLD rather than waiting 227 * for the FIFO to empty to allow for the case where 228 * the FIFO size isn't an exact multiple of THRESHOLD 229 * and we're doing DMA transfers.) 230 * This means we never need to handle wrap-around in 231 * the fifo-reading code, and the next byte of data 232 * to read is always fifo[63 - fifopointer]. 233 */ 234 int fptr; 235 int cs = prefetch_cs(s->prefetch.config1); 236 int is16bit = (((s->cs_file[cs].config[0] >> 12) & 3) != 0); 237 int bytes; 238 /* Don't believe the bit of the OMAP TRM that says that COUNTVALUE 239 * and TRANSFERCOUNT are in units of 16 bit words for 16 bit NAND. 240 * Instead believe the bit that says it is always a byte count. 241 */ 242 bytes = 64 - s->prefetch.fifopointer; 243 if (bytes > s->prefetch.count) { 244 bytes = s->prefetch.count; 245 } 246 if (is16bit) { 247 bytes &= ~1; 248 } 249 250 s->prefetch.count -= bytes; 251 s->prefetch.fifopointer += bytes; 252 fptr = 64 - s->prefetch.fifopointer; 253 /* Move the existing data in the FIFO so it sits just 254 * before what we're about to read in 255 */ 256 while (fptr < (64 - bytes)) { 257 s->prefetch.fifo[fptr] = s->prefetch.fifo[fptr + bytes]; 258 fptr++; 259 } 260 while (fptr < 64) { 261 if (is16bit) { 262 uint32_t v = omap_nand_read(&s->cs_file[cs], 0, 2); 263 s->prefetch.fifo[fptr++] = v & 0xff; 264 s->prefetch.fifo[fptr++] = (v >> 8) & 0xff; 265 } else { 266 s->prefetch.fifo[fptr++] = omap_nand_read(&s->cs_file[cs], 0, 1); 267 } 268 } 269 if (s->prefetch.startengine && (s->prefetch.count == 0)) { 270 /* This was the final transfer: raise TERMINALCOUNTSTATUS */ 271 s->irqst |= 2; 272 s->prefetch.startengine = 0; 273 } 274 /* If there are any bytes in the FIFO at this point then 275 * we must raise a DMA request (either this is a final part 276 * transfer, or we filled the FIFO in which case we certainly 277 * have THRESHOLD bytes available) 278 */ 279 if (s->prefetch.fifopointer != 0) { 280 omap_gpmc_dma_update(s, 1); 281 } 282 omap_gpmc_int_update(s); 283 } 284 285 /* Access functions for a NAND-like device when the prefetch/postwrite 286 * engine is enabled -- all addresses in the region behave alike: 287 * data is read or written to the FIFO. 288 */ 289 static uint64_t omap_gpmc_prefetch_read(void *opaque, hwaddr addr, 290 unsigned size) 291 { 292 struct omap_gpmc_s *s = (struct omap_gpmc_s *) opaque; 293 uint32_t data; 294 if (s->prefetch.config1 & 1) { 295 /* The TRM doesn't define the behaviour if you read from the 296 * FIFO when the prefetch engine is in write mode. We choose 297 * to always return zero. 298 */ 299 return 0; 300 } 301 /* Note that trying to read an empty fifo repeats the last byte */ 302 if (s->prefetch.fifopointer) { 303 s->prefetch.fifopointer--; 304 } 305 data = s->prefetch.fifo[63 - s->prefetch.fifopointer]; 306 if (s->prefetch.fifopointer == 307 (64 - prefetch_threshold(s->prefetch.config1))) { 308 /* We've drained THRESHOLD bytes now. So deassert the 309 * DMA request, then refill the FIFO (which will probably 310 * assert it again.) 311 */ 312 omap_gpmc_dma_update(s, 0); 313 fill_prefetch_fifo(s); 314 } 315 omap_gpmc_int_update(s); 316 return data; 317 } 318 319 static void omap_gpmc_prefetch_write(void *opaque, hwaddr addr, 320 uint64_t value, unsigned size) 321 { 322 struct omap_gpmc_s *s = (struct omap_gpmc_s *) opaque; 323 int cs = prefetch_cs(s->prefetch.config1); 324 if ((s->prefetch.config1 & 1) == 0) { 325 /* The TRM doesn't define the behaviour of writing to the 326 * FIFO when the prefetch engine is in read mode. We 327 * choose to ignore the write. 328 */ 329 return; 330 } 331 if (s->prefetch.count == 0) { 332 /* The TRM doesn't define the behaviour of writing to the 333 * FIFO if the transfer is complete. We choose to ignore. 334 */ 335 return; 336 } 337 /* The only reason we do any data buffering in postwrite 338 * mode is if we are talking to a 16 bit NAND device, in 339 * which case we need to buffer the first byte of the 340 * 16 bit word until the other byte arrives. 341 */ 342 int is16bit = (((s->cs_file[cs].config[0] >> 12) & 3) != 0); 343 if (is16bit) { 344 /* fifopointer alternates between 64 (waiting for first 345 * byte of word) and 63 (waiting for second byte) 346 */ 347 if (s->prefetch.fifopointer == 64) { 348 s->prefetch.fifo[0] = value; 349 s->prefetch.fifopointer--; 350 } else { 351 value = (value << 8) | s->prefetch.fifo[0]; 352 omap_nand_write(&s->cs_file[cs], 0, value, 2); 353 s->prefetch.count--; 354 s->prefetch.fifopointer = 64; 355 } 356 } else { 357 /* Just write the byte : fifopointer remains 64 at all times */ 358 omap_nand_write(&s->cs_file[cs], 0, value, 1); 359 s->prefetch.count--; 360 } 361 if (s->prefetch.count == 0) { 362 /* Final transfer: raise TERMINALCOUNTSTATUS */ 363 s->irqst |= 2; 364 s->prefetch.startengine = 0; 365 } 366 omap_gpmc_int_update(s); 367 } 368 369 static const MemoryRegionOps omap_prefetch_ops = { 370 .read = omap_gpmc_prefetch_read, 371 .write = omap_gpmc_prefetch_write, 372 .endianness = DEVICE_NATIVE_ENDIAN, 373 .impl.min_access_size = 1, 374 .impl.max_access_size = 1, 375 }; 376 377 static MemoryRegion *omap_gpmc_cs_memregion(struct omap_gpmc_s *s, int cs) 378 { 379 /* Return the MemoryRegion* to map/unmap for this chipselect */ 380 struct omap_gpmc_cs_file_s *f = &s->cs_file[cs]; 381 if (omap_gpmc_devtype(f) == OMAP_GPMC_NOR) { 382 return f->iomem; 383 } 384 if ((s->prefetch.config1 & 0x80) && 385 (prefetch_cs(s->prefetch.config1) == cs)) { 386 /* The prefetch engine is enabled for this CS: map the FIFO */ 387 return &s->prefetch.iomem; 388 } 389 return &f->nandiomem; 390 } 391 392 static void omap_gpmc_cs_map(struct omap_gpmc_s *s, int cs) 393 { 394 struct omap_gpmc_cs_file_s *f = &s->cs_file[cs]; 395 uint32_t mask = (f->config[6] >> 8) & 0xf; 396 uint32_t base = f->config[6] & 0x3f; 397 uint32_t size; 398 399 if (!f->iomem && !f->dev) { 400 return; 401 } 402 403 if (!(f->config[6] & (1 << 6))) { 404 /* Do nothing unless CSVALID */ 405 return; 406 } 407 408 /* TODO: check for overlapping regions and report access errors */ 409 if (mask != 0x8 && mask != 0xc && mask != 0xe && mask != 0xf 410 && !(s->accept_256 && !mask)) { 411 fprintf(stderr, "%s: invalid chip-select mask address (0x%x)\n", 412 __func__, mask); 413 } 414 415 base <<= 24; 416 size = (0x0fffffff & ~(mask << 24)) + 1; 417 /* TODO: rather than setting the size of the mapping (which should be 418 * constant), the mask should cause wrapping of the address space, so 419 * that the same memory becomes accessible at every <i>size</i> bytes 420 * starting from <i>base</i>. */ 421 memory_region_init(&f->container, NULL, "omap-gpmc-file", size); 422 memory_region_add_subregion(&f->container, 0, 423 omap_gpmc_cs_memregion(s, cs)); 424 memory_region_add_subregion(get_system_memory(), base, 425 &f->container); 426 } 427 428 static void omap_gpmc_cs_unmap(struct omap_gpmc_s *s, int cs) 429 { 430 struct omap_gpmc_cs_file_s *f = &s->cs_file[cs]; 431 if (!(f->config[6] & (1 << 6))) { 432 /* Do nothing unless CSVALID */ 433 return; 434 } 435 if (!f->iomem && !f->dev) { 436 return; 437 } 438 memory_region_del_subregion(get_system_memory(), &f->container); 439 memory_region_del_subregion(&f->container, omap_gpmc_cs_memregion(s, cs)); 440 object_unparent(OBJECT(&f->container)); 441 } 442 443 void omap_gpmc_reset(struct omap_gpmc_s *s) 444 { 445 int i; 446 447 s->sysconfig = 0; 448 s->irqst = 0; 449 s->irqen = 0; 450 omap_gpmc_int_update(s); 451 for (i = 0; i < 8; i++) { 452 /* This has to happen before we change any of the config 453 * used to determine which memory regions are mapped or unmapped. 454 */ 455 omap_gpmc_cs_unmap(s, i); 456 } 457 s->timeout = 0; 458 s->config = 0xa00; 459 s->prefetch.config1 = 0x00004000; 460 s->prefetch.transfercount = 0x00000000; 461 s->prefetch.startengine = 0; 462 s->prefetch.fifopointer = 0; 463 s->prefetch.count = 0; 464 for (i = 0; i < 8; i ++) { 465 s->cs_file[i].config[1] = 0x101001; 466 s->cs_file[i].config[2] = 0x020201; 467 s->cs_file[i].config[3] = 0x10031003; 468 s->cs_file[i].config[4] = 0x10f1111; 469 s->cs_file[i].config[5] = 0; 470 s->cs_file[i].config[6] = 0xf00; 471 /* In theory we could probe attached devices for some CFG1 472 * bits here, but we just retain them across resets as they 473 * were set initially by omap_gpmc_attach(). 474 */ 475 if (i == 0) { 476 s->cs_file[i].config[0] &= 0x00433e00; 477 s->cs_file[i].config[6] |= 1 << 6; /* CSVALID */ 478 omap_gpmc_cs_map(s, i); 479 } else { 480 s->cs_file[i].config[0] &= 0x00403c00; 481 } 482 } 483 s->ecc_cs = 0; 484 s->ecc_ptr = 0; 485 s->ecc_cfg = 0x3fcff000; 486 for (i = 0; i < 9; i ++) 487 ecc_reset(&s->ecc[i]); 488 } 489 490 static int gpmc_wordaccess_only(hwaddr addr) 491 { 492 /* Return true if the register offset is to a register that 493 * only permits word width accesses. 494 * Non-word accesses are only OK for GPMC_NAND_DATA/ADDRESS/COMMAND 495 * for any chipselect. 496 */ 497 if (addr >= 0x60 && addr <= 0x1d4) { 498 int cs = (addr - 0x60) / 0x30; 499 addr -= cs * 0x30; 500 if (addr >= 0x7c && addr < 0x88) { 501 /* GPMC_NAND_COMMAND, GPMC_NAND_ADDRESS, GPMC_NAND_DATA */ 502 return 0; 503 } 504 } 505 return 1; 506 } 507 508 static uint64_t omap_gpmc_read(void *opaque, hwaddr addr, 509 unsigned size) 510 { 511 struct omap_gpmc_s *s = (struct omap_gpmc_s *) opaque; 512 int cs; 513 struct omap_gpmc_cs_file_s *f; 514 515 if (size != 4 && gpmc_wordaccess_only(addr)) { 516 return omap_badwidth_read32(opaque, addr); 517 } 518 519 switch (addr) { 520 case 0x000: /* GPMC_REVISION */ 521 return s->revision; 522 523 case 0x010: /* GPMC_SYSCONFIG */ 524 return s->sysconfig; 525 526 case 0x014: /* GPMC_SYSSTATUS */ 527 return 1; /* RESETDONE */ 528 529 case 0x018: /* GPMC_IRQSTATUS */ 530 return s->irqst; 531 532 case 0x01c: /* GPMC_IRQENABLE */ 533 return s->irqen; 534 535 case 0x040: /* GPMC_TIMEOUT_CONTROL */ 536 return s->timeout; 537 538 case 0x044: /* GPMC_ERR_ADDRESS */ 539 case 0x048: /* GPMC_ERR_TYPE */ 540 return 0; 541 542 case 0x050: /* GPMC_CONFIG */ 543 return s->config; 544 545 case 0x054: /* GPMC_STATUS */ 546 return 0x001; 547 548 case 0x060 ... 0x1d4: 549 cs = (addr - 0x060) / 0x30; 550 addr -= cs * 0x30; 551 f = s->cs_file + cs; 552 switch (addr) { 553 case 0x60: /* GPMC_CONFIG1 */ 554 return f->config[0]; 555 case 0x64: /* GPMC_CONFIG2 */ 556 return f->config[1]; 557 case 0x68: /* GPMC_CONFIG3 */ 558 return f->config[2]; 559 case 0x6c: /* GPMC_CONFIG4 */ 560 return f->config[3]; 561 case 0x70: /* GPMC_CONFIG5 */ 562 return f->config[4]; 563 case 0x74: /* GPMC_CONFIG6 */ 564 return f->config[5]; 565 case 0x78: /* GPMC_CONFIG7 */ 566 return f->config[6]; 567 case 0x84 ... 0x87: /* GPMC_NAND_DATA */ 568 if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) { 569 return omap_nand_read(f, 0, size); 570 } 571 return 0; 572 } 573 break; 574 575 case 0x1e0: /* GPMC_PREFETCH_CONFIG1 */ 576 return s->prefetch.config1; 577 case 0x1e4: /* GPMC_PREFETCH_CONFIG2 */ 578 return s->prefetch.transfercount; 579 case 0x1ec: /* GPMC_PREFETCH_CONTROL */ 580 return s->prefetch.startengine; 581 case 0x1f0: /* GPMC_PREFETCH_STATUS */ 582 /* NB: The OMAP3 TRM is inconsistent about whether the GPMC 583 * FIFOTHRESHOLDSTATUS bit should be set when 584 * FIFOPOINTER > FIFOTHRESHOLD or when it is >= FIFOTHRESHOLD. 585 * Apparently the underlying functional spec from which the TRM was 586 * created states that the behaviour is ">=", and this also 587 * makes more conceptual sense. 588 */ 589 return (s->prefetch.fifopointer << 24) | 590 ((s->prefetch.fifopointer >= 591 ((s->prefetch.config1 >> 8) & 0x7f) ? 1 : 0) << 16) | 592 s->prefetch.count; 593 594 case 0x1f4: /* GPMC_ECC_CONFIG */ 595 return s->ecc_cs; 596 case 0x1f8: /* GPMC_ECC_CONTROL */ 597 return s->ecc_ptr; 598 case 0x1fc: /* GPMC_ECC_SIZE_CONFIG */ 599 return s->ecc_cfg; 600 case 0x200 ... 0x220: /* GPMC_ECC_RESULT */ 601 cs = (addr & 0x1f) >> 2; 602 /* TODO: check correctness */ 603 return 604 ((s->ecc[cs].cp & 0x07) << 0) | 605 ((s->ecc[cs].cp & 0x38) << 13) | 606 ((s->ecc[cs].lp[0] & 0x1ff) << 3) | 607 ((s->ecc[cs].lp[1] & 0x1ff) << 19); 608 609 case 0x230: /* GPMC_TESTMODE_CTRL */ 610 return 0; 611 case 0x234: /* GPMC_PSA_LSB */ 612 case 0x238: /* GPMC_PSA_MSB */ 613 return 0x00000000; 614 } 615 616 OMAP_BAD_REG(addr); 617 return 0; 618 } 619 620 static void omap_gpmc_write(void *opaque, hwaddr addr, 621 uint64_t value, unsigned size) 622 { 623 struct omap_gpmc_s *s = (struct omap_gpmc_s *) opaque; 624 int cs; 625 struct omap_gpmc_cs_file_s *f; 626 627 if (size != 4 && gpmc_wordaccess_only(addr)) { 628 omap_badwidth_write32(opaque, addr, value); 629 return; 630 } 631 632 switch (addr) { 633 case 0x000: /* GPMC_REVISION */ 634 case 0x014: /* GPMC_SYSSTATUS */ 635 case 0x054: /* GPMC_STATUS */ 636 case 0x1f0: /* GPMC_PREFETCH_STATUS */ 637 case 0x200 ... 0x220: /* GPMC_ECC_RESULT */ 638 case 0x234: /* GPMC_PSA_LSB */ 639 case 0x238: /* GPMC_PSA_MSB */ 640 OMAP_RO_REG(addr); 641 break; 642 643 case 0x010: /* GPMC_SYSCONFIG */ 644 if ((value >> 3) == 0x3) 645 fprintf(stderr, "%s: bad SDRAM idle mode %"PRIi64"\n", 646 __func__, value >> 3); 647 if (value & 2) 648 omap_gpmc_reset(s); 649 s->sysconfig = value & 0x19; 650 break; 651 652 case 0x018: /* GPMC_IRQSTATUS */ 653 s->irqst &= ~value; 654 omap_gpmc_int_update(s); 655 break; 656 657 case 0x01c: /* GPMC_IRQENABLE */ 658 s->irqen = value & 0xf03; 659 omap_gpmc_int_update(s); 660 break; 661 662 case 0x040: /* GPMC_TIMEOUT_CONTROL */ 663 s->timeout = value & 0x1ff1; 664 break; 665 666 case 0x044: /* GPMC_ERR_ADDRESS */ 667 case 0x048: /* GPMC_ERR_TYPE */ 668 break; 669 670 case 0x050: /* GPMC_CONFIG */ 671 s->config = value & 0xf13; 672 break; 673 674 case 0x060 ... 0x1d4: 675 cs = (addr - 0x060) / 0x30; 676 addr -= cs * 0x30; 677 f = s->cs_file + cs; 678 switch (addr) { 679 case 0x60: /* GPMC_CONFIG1 */ 680 f->config[0] = value & 0xffef3e13; 681 break; 682 case 0x64: /* GPMC_CONFIG2 */ 683 f->config[1] = value & 0x001f1f8f; 684 break; 685 case 0x68: /* GPMC_CONFIG3 */ 686 f->config[2] = value & 0x001f1f8f; 687 break; 688 case 0x6c: /* GPMC_CONFIG4 */ 689 f->config[3] = value & 0x1f8f1f8f; 690 break; 691 case 0x70: /* GPMC_CONFIG5 */ 692 f->config[4] = value & 0x0f1f1f1f; 693 break; 694 case 0x74: /* GPMC_CONFIG6 */ 695 f->config[5] = value & 0x00000fcf; 696 break; 697 case 0x78: /* GPMC_CONFIG7 */ 698 if ((f->config[6] ^ value) & 0xf7f) { 699 omap_gpmc_cs_unmap(s, cs); 700 f->config[6] = value & 0x00000f7f; 701 omap_gpmc_cs_map(s, cs); 702 } 703 break; 704 case 0x7c ... 0x7f: /* GPMC_NAND_COMMAND */ 705 if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) { 706 nand_setpins(f->dev, 1, 0, 0, 1, 0); /* CLE */ 707 omap_nand_setio(f->dev, value, omap_gpmc_devsize(f), size); 708 } 709 break; 710 case 0x80 ... 0x83: /* GPMC_NAND_ADDRESS */ 711 if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) { 712 nand_setpins(f->dev, 0, 1, 0, 1, 0); /* ALE */ 713 omap_nand_setio(f->dev, value, omap_gpmc_devsize(f), size); 714 } 715 break; 716 case 0x84 ... 0x87: /* GPMC_NAND_DATA */ 717 if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) { 718 omap_nand_write(f, 0, value, size); 719 } 720 break; 721 default: 722 goto bad_reg; 723 } 724 break; 725 726 case 0x1e0: /* GPMC_PREFETCH_CONFIG1 */ 727 if (!s->prefetch.startengine) { 728 uint32_t newconfig1 = value & 0x7f8f7fbf; 729 uint32_t changed; 730 changed = newconfig1 ^ s->prefetch.config1; 731 if (changed & (0x80 | 0x7000000)) { 732 /* Turning the engine on or off, or mapping it somewhere else. 733 * cs_map() and cs_unmap() check the prefetch config and 734 * overall CSVALID bits, so it is sufficient to unmap-and-map 735 * both the old cs and the new one. Note that we adhere to 736 * the "unmap/change config/map" order (and not unmap twice 737 * if newcs == oldcs), otherwise we'll try to delete the wrong 738 * memory region. 739 */ 740 int oldcs = prefetch_cs(s->prefetch.config1); 741 int newcs = prefetch_cs(newconfig1); 742 omap_gpmc_cs_unmap(s, oldcs); 743 if (oldcs != newcs) { 744 omap_gpmc_cs_unmap(s, newcs); 745 } 746 s->prefetch.config1 = newconfig1; 747 omap_gpmc_cs_map(s, oldcs); 748 if (oldcs != newcs) { 749 omap_gpmc_cs_map(s, newcs); 750 } 751 } else { 752 s->prefetch.config1 = newconfig1; 753 } 754 } 755 break; 756 757 case 0x1e4: /* GPMC_PREFETCH_CONFIG2 */ 758 if (!s->prefetch.startengine) { 759 s->prefetch.transfercount = value & 0x3fff; 760 } 761 break; 762 763 case 0x1ec: /* GPMC_PREFETCH_CONTROL */ 764 if (s->prefetch.startengine != (value & 1)) { 765 s->prefetch.startengine = value & 1; 766 if (s->prefetch.startengine) { 767 /* Prefetch engine start */ 768 s->prefetch.count = s->prefetch.transfercount; 769 if (s->prefetch.config1 & 1) { 770 /* Write */ 771 s->prefetch.fifopointer = 64; 772 } else { 773 /* Read */ 774 s->prefetch.fifopointer = 0; 775 fill_prefetch_fifo(s); 776 } 777 } else { 778 /* Prefetch engine forcibly stopped. The TRM 779 * doesn't define the behaviour if you do this. 780 * We clear the prefetch count, which means that 781 * we permit no more writes, and don't read any 782 * more data from NAND. The CPU can still drain 783 * the FIFO of unread data. 784 */ 785 s->prefetch.count = 0; 786 } 787 omap_gpmc_int_update(s); 788 } 789 break; 790 791 case 0x1f4: /* GPMC_ECC_CONFIG */ 792 s->ecc_cs = 0x8f; 793 break; 794 case 0x1f8: /* GPMC_ECC_CONTROL */ 795 if (value & (1 << 8)) 796 for (cs = 0; cs < 9; cs ++) 797 ecc_reset(&s->ecc[cs]); 798 s->ecc_ptr = value & 0xf; 799 if (s->ecc_ptr == 0 || s->ecc_ptr > 9) { 800 s->ecc_ptr = 0; 801 s->ecc_cs &= ~1; 802 } 803 break; 804 case 0x1fc: /* GPMC_ECC_SIZE_CONFIG */ 805 s->ecc_cfg = value & 0x3fcff1ff; 806 break; 807 case 0x230: /* GPMC_TESTMODE_CTRL */ 808 if (value & 7) 809 fprintf(stderr, "%s: test mode enable attempt\n", __func__); 810 break; 811 812 default: 813 bad_reg: 814 OMAP_BAD_REG(addr); 815 return; 816 } 817 } 818 819 static const MemoryRegionOps omap_gpmc_ops = { 820 .read = omap_gpmc_read, 821 .write = omap_gpmc_write, 822 .endianness = DEVICE_NATIVE_ENDIAN, 823 }; 824 825 struct omap_gpmc_s *omap_gpmc_init(struct omap_mpu_state_s *mpu, 826 hwaddr base, 827 qemu_irq irq, qemu_irq drq) 828 { 829 int cs; 830 struct omap_gpmc_s *s = g_new0(struct omap_gpmc_s, 1); 831 832 memory_region_init_io(&s->iomem, NULL, &omap_gpmc_ops, s, "omap-gpmc", 0x1000); 833 memory_region_add_subregion(get_system_memory(), base, &s->iomem); 834 835 s->irq = irq; 836 s->drq = drq; 837 s->accept_256 = cpu_is_omap3630(mpu); 838 s->revision = cpu_class_omap3(mpu) ? 0x50 : 0x20; 839 s->lastirq = 0; 840 omap_gpmc_reset(s); 841 842 /* We have to register a different IO memory handler for each 843 * chip select region in case a NAND device is mapped there. We 844 * make the region the worst-case size of 256MB and rely on the 845 * container memory region in cs_map to chop it down to the actual 846 * guest-requested size. 847 */ 848 for (cs = 0; cs < 8; cs++) { 849 memory_region_init_io(&s->cs_file[cs].nandiomem, NULL, 850 &omap_nand_ops, 851 &s->cs_file[cs], 852 "omap-nand", 853 256 * 1024 * 1024); 854 } 855 856 memory_region_init_io(&s->prefetch.iomem, NULL, &omap_prefetch_ops, s, 857 "omap-gpmc-prefetch", 256 * 1024 * 1024); 858 return s; 859 } 860 861 void omap_gpmc_attach(struct omap_gpmc_s *s, int cs, MemoryRegion *iomem) 862 { 863 struct omap_gpmc_cs_file_s *f; 864 assert(iomem); 865 866 if (cs < 0 || cs >= 8) { 867 fprintf(stderr, "%s: bad chip-select %i\n", __func__, cs); 868 exit(-1); 869 } 870 f = &s->cs_file[cs]; 871 872 omap_gpmc_cs_unmap(s, cs); 873 f->config[0] &= ~(0xf << 10); 874 f->iomem = iomem; 875 omap_gpmc_cs_map(s, cs); 876 } 877 878 void omap_gpmc_attach_nand(struct omap_gpmc_s *s, int cs, DeviceState *nand) 879 { 880 struct omap_gpmc_cs_file_s *f; 881 assert(nand); 882 883 if (cs < 0 || cs >= 8) { 884 fprintf(stderr, "%s: bad chip-select %i\n", __func__, cs); 885 exit(-1); 886 } 887 f = &s->cs_file[cs]; 888 889 omap_gpmc_cs_unmap(s, cs); 890 f->config[0] &= ~(0xf << 10); 891 f->config[0] |= (OMAP_GPMC_NAND << 10); 892 f->dev = nand; 893 if (nand_getbuswidth(f->dev) == 16) { 894 f->config[0] |= OMAP_GPMC_16BIT << 12; 895 } 896 omap_gpmc_cs_map(s, cs); 897 } 898