1 /* 2 * TI OMAP general purpose memory controller emulation. 3 * 4 * Copyright (C) 2007-2009 Nokia Corporation 5 * Original code written by Andrzej Zaborowski <andrew@openedhand.com> 6 * Enhancements for OMAP3 and NAND support written by Juha Riihimäki 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License as 10 * published by the Free Software Foundation; either version 2 or 11 * (at your option) any later version of the License. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License along 19 * with this program; if not, see <http://www.gnu.org/licenses/>. 20 */ 21 #include "hw/hw.h" 22 #include "hw/block/flash.h" 23 #include "hw/arm/omap.h" 24 #include "exec/memory.h" 25 #include "exec/address-spaces.h" 26 27 /* General-Purpose Memory Controller */ 28 struct omap_gpmc_s { 29 qemu_irq irq; 30 qemu_irq drq; 31 MemoryRegion iomem; 32 int accept_256; 33 34 uint8_t revision; 35 uint8_t sysconfig; 36 uint16_t irqst; 37 uint16_t irqen; 38 uint16_t lastirq; 39 uint16_t timeout; 40 uint16_t config; 41 struct omap_gpmc_cs_file_s { 42 uint32_t config[7]; 43 MemoryRegion *iomem; 44 MemoryRegion container; 45 MemoryRegion nandiomem; 46 DeviceState *dev; 47 } cs_file[8]; 48 int ecc_cs; 49 int ecc_ptr; 50 uint32_t ecc_cfg; 51 ECCState ecc[9]; 52 struct prefetch { 53 uint32_t config1; /* GPMC_PREFETCH_CONFIG1 */ 54 uint32_t transfercount; /* GPMC_PREFETCH_CONFIG2:TRANSFERCOUNT */ 55 int startengine; /* GPMC_PREFETCH_CONTROL:STARTENGINE */ 56 int fifopointer; /* GPMC_PREFETCH_STATUS:FIFOPOINTER */ 57 int count; /* GPMC_PREFETCH_STATUS:COUNTVALUE */ 58 MemoryRegion iomem; 59 uint8_t fifo[64]; 60 } prefetch; 61 }; 62 63 #define OMAP_GPMC_8BIT 0 64 #define OMAP_GPMC_16BIT 1 65 #define OMAP_GPMC_NOR 0 66 #define OMAP_GPMC_NAND 2 67 68 static int omap_gpmc_devtype(struct omap_gpmc_cs_file_s *f) 69 { 70 return (f->config[0] >> 10) & 3; 71 } 72 73 static int omap_gpmc_devsize(struct omap_gpmc_cs_file_s *f) 74 { 75 /* devsize field is really 2 bits but we ignore the high 76 * bit to ensure consistent behaviour if the guest sets 77 * it (values 2 and 3 are reserved in the TRM) 78 */ 79 return (f->config[0] >> 12) & 1; 80 } 81 82 /* Extract the chip-select value from the prefetch config1 register */ 83 static int prefetch_cs(uint32_t config1) 84 { 85 return (config1 >> 24) & 7; 86 } 87 88 static int prefetch_threshold(uint32_t config1) 89 { 90 return (config1 >> 8) & 0x7f; 91 } 92 93 static void omap_gpmc_int_update(struct omap_gpmc_s *s) 94 { 95 /* The TRM is a bit unclear, but it seems to say that 96 * the TERMINALCOUNTSTATUS bit is set only on the 97 * transition when the prefetch engine goes from 98 * active to inactive, whereas the FIFOEVENTSTATUS 99 * bit is held high as long as the fifo has at 100 * least THRESHOLD bytes available. 101 * So we do the latter here, but TERMINALCOUNTSTATUS 102 * is set elsewhere. 103 */ 104 if (s->prefetch.fifopointer >= prefetch_threshold(s->prefetch.config1)) { 105 s->irqst |= 1; 106 } 107 if ((s->irqen & s->irqst) != s->lastirq) { 108 s->lastirq = s->irqen & s->irqst; 109 qemu_set_irq(s->irq, s->lastirq); 110 } 111 } 112 113 static void omap_gpmc_dma_update(struct omap_gpmc_s *s, int value) 114 { 115 if (s->prefetch.config1 & 4) { 116 qemu_set_irq(s->drq, value); 117 } 118 } 119 120 /* Access functions for when a NAND-like device is mapped into memory: 121 * all addresses in the region behave like accesses to the relevant 122 * GPMC_NAND_DATA_i register (which is actually implemented to call these) 123 */ 124 static uint64_t omap_nand_read(void *opaque, hwaddr addr, 125 unsigned size) 126 { 127 struct omap_gpmc_cs_file_s *f = (struct omap_gpmc_cs_file_s *)opaque; 128 uint64_t v; 129 nand_setpins(f->dev, 0, 0, 0, 1, 0); 130 switch (omap_gpmc_devsize(f)) { 131 case OMAP_GPMC_8BIT: 132 v = nand_getio(f->dev); 133 if (size == 1) { 134 return v; 135 } 136 v |= (nand_getio(f->dev) << 8); 137 if (size == 2) { 138 return v; 139 } 140 v |= (nand_getio(f->dev) << 16); 141 v |= (nand_getio(f->dev) << 24); 142 return v; 143 case OMAP_GPMC_16BIT: 144 v = nand_getio(f->dev); 145 if (size == 1) { 146 /* 8 bit read from 16 bit device : probably a guest bug */ 147 return v & 0xff; 148 } 149 if (size == 2) { 150 return v; 151 } 152 v |= (nand_getio(f->dev) << 16); 153 return v; 154 default: 155 abort(); 156 } 157 } 158 159 static void omap_nand_setio(DeviceState *dev, uint64_t value, 160 int nandsize, int size) 161 { 162 /* Write the specified value to the NAND device, respecting 163 * both size of the NAND device and size of the write access. 164 */ 165 switch (nandsize) { 166 case OMAP_GPMC_8BIT: 167 switch (size) { 168 case 1: 169 nand_setio(dev, value & 0xff); 170 break; 171 case 2: 172 nand_setio(dev, value & 0xff); 173 nand_setio(dev, (value >> 8) & 0xff); 174 break; 175 case 4: 176 default: 177 nand_setio(dev, value & 0xff); 178 nand_setio(dev, (value >> 8) & 0xff); 179 nand_setio(dev, (value >> 16) & 0xff); 180 nand_setio(dev, (value >> 24) & 0xff); 181 break; 182 } 183 break; 184 case OMAP_GPMC_16BIT: 185 switch (size) { 186 case 1: 187 /* writing to a 16bit device with 8bit access is probably a guest 188 * bug; pass the value through anyway. 189 */ 190 case 2: 191 nand_setio(dev, value & 0xffff); 192 break; 193 case 4: 194 default: 195 nand_setio(dev, value & 0xffff); 196 nand_setio(dev, (value >> 16) & 0xffff); 197 break; 198 } 199 break; 200 } 201 } 202 203 static void omap_nand_write(void *opaque, hwaddr addr, 204 uint64_t value, unsigned size) 205 { 206 struct omap_gpmc_cs_file_s *f = (struct omap_gpmc_cs_file_s *)opaque; 207 nand_setpins(f->dev, 0, 0, 0, 1, 0); 208 omap_nand_setio(f->dev, value, omap_gpmc_devsize(f), size); 209 } 210 211 static const MemoryRegionOps omap_nand_ops = { 212 .read = omap_nand_read, 213 .write = omap_nand_write, 214 .endianness = DEVICE_NATIVE_ENDIAN, 215 }; 216 217 static void fill_prefetch_fifo(struct omap_gpmc_s *s) 218 { 219 /* Fill the prefetch FIFO by reading data from NAND. 220 * We do this synchronously, unlike the hardware which 221 * will do this asynchronously. We refill when the 222 * FIFO has THRESHOLD bytes free, and we always refill 223 * as much data as possible starting at the top end 224 * of the FIFO. 225 * (We have to refill at THRESHOLD rather than waiting 226 * for the FIFO to empty to allow for the case where 227 * the FIFO size isn't an exact multiple of THRESHOLD 228 * and we're doing DMA transfers.) 229 * This means we never need to handle wrap-around in 230 * the fifo-reading code, and the next byte of data 231 * to read is always fifo[63 - fifopointer]. 232 */ 233 int fptr; 234 int cs = prefetch_cs(s->prefetch.config1); 235 int is16bit = (((s->cs_file[cs].config[0] >> 12) & 3) != 0); 236 int bytes; 237 /* Don't believe the bit of the OMAP TRM that says that COUNTVALUE 238 * and TRANSFERCOUNT are in units of 16 bit words for 16 bit NAND. 239 * Instead believe the bit that says it is always a byte count. 240 */ 241 bytes = 64 - s->prefetch.fifopointer; 242 if (bytes > s->prefetch.count) { 243 bytes = s->prefetch.count; 244 } 245 if (is16bit) { 246 bytes &= ~1; 247 } 248 249 s->prefetch.count -= bytes; 250 s->prefetch.fifopointer += bytes; 251 fptr = 64 - s->prefetch.fifopointer; 252 /* Move the existing data in the FIFO so it sits just 253 * before what we're about to read in 254 */ 255 while (fptr < (64 - bytes)) { 256 s->prefetch.fifo[fptr] = s->prefetch.fifo[fptr + bytes]; 257 fptr++; 258 } 259 while (fptr < 64) { 260 if (is16bit) { 261 uint32_t v = omap_nand_read(&s->cs_file[cs], 0, 2); 262 s->prefetch.fifo[fptr++] = v & 0xff; 263 s->prefetch.fifo[fptr++] = (v >> 8) & 0xff; 264 } else { 265 s->prefetch.fifo[fptr++] = omap_nand_read(&s->cs_file[cs], 0, 1); 266 } 267 } 268 if (s->prefetch.startengine && (s->prefetch.count == 0)) { 269 /* This was the final transfer: raise TERMINALCOUNTSTATUS */ 270 s->irqst |= 2; 271 s->prefetch.startengine = 0; 272 } 273 /* If there are any bytes in the FIFO at this point then 274 * we must raise a DMA request (either this is a final part 275 * transfer, or we filled the FIFO in which case we certainly 276 * have THRESHOLD bytes available) 277 */ 278 if (s->prefetch.fifopointer != 0) { 279 omap_gpmc_dma_update(s, 1); 280 } 281 omap_gpmc_int_update(s); 282 } 283 284 /* Access functions for a NAND-like device when the prefetch/postwrite 285 * engine is enabled -- all addresses in the region behave alike: 286 * data is read or written to the FIFO. 287 */ 288 static uint64_t omap_gpmc_prefetch_read(void *opaque, hwaddr addr, 289 unsigned size) 290 { 291 struct omap_gpmc_s *s = (struct omap_gpmc_s *) opaque; 292 uint32_t data; 293 if (s->prefetch.config1 & 1) { 294 /* The TRM doesn't define the behaviour if you read from the 295 * FIFO when the prefetch engine is in write mode. We choose 296 * to always return zero. 297 */ 298 return 0; 299 } 300 /* Note that trying to read an empty fifo repeats the last byte */ 301 if (s->prefetch.fifopointer) { 302 s->prefetch.fifopointer--; 303 } 304 data = s->prefetch.fifo[63 - s->prefetch.fifopointer]; 305 if (s->prefetch.fifopointer == 306 (64 - prefetch_threshold(s->prefetch.config1))) { 307 /* We've drained THRESHOLD bytes now. So deassert the 308 * DMA request, then refill the FIFO (which will probably 309 * assert it again.) 310 */ 311 omap_gpmc_dma_update(s, 0); 312 fill_prefetch_fifo(s); 313 } 314 omap_gpmc_int_update(s); 315 return data; 316 } 317 318 static void omap_gpmc_prefetch_write(void *opaque, hwaddr addr, 319 uint64_t value, unsigned size) 320 { 321 struct omap_gpmc_s *s = (struct omap_gpmc_s *) opaque; 322 int cs = prefetch_cs(s->prefetch.config1); 323 if ((s->prefetch.config1 & 1) == 0) { 324 /* The TRM doesn't define the behaviour of writing to the 325 * FIFO when the prefetch engine is in read mode. We 326 * choose to ignore the write. 327 */ 328 return; 329 } 330 if (s->prefetch.count == 0) { 331 /* The TRM doesn't define the behaviour of writing to the 332 * FIFO if the transfer is complete. We choose to ignore. 333 */ 334 return; 335 } 336 /* The only reason we do any data buffering in postwrite 337 * mode is if we are talking to a 16 bit NAND device, in 338 * which case we need to buffer the first byte of the 339 * 16 bit word until the other byte arrives. 340 */ 341 int is16bit = (((s->cs_file[cs].config[0] >> 12) & 3) != 0); 342 if (is16bit) { 343 /* fifopointer alternates between 64 (waiting for first 344 * byte of word) and 63 (waiting for second byte) 345 */ 346 if (s->prefetch.fifopointer == 64) { 347 s->prefetch.fifo[0] = value; 348 s->prefetch.fifopointer--; 349 } else { 350 value = (value << 8) | s->prefetch.fifo[0]; 351 omap_nand_write(&s->cs_file[cs], 0, value, 2); 352 s->prefetch.count--; 353 s->prefetch.fifopointer = 64; 354 } 355 } else { 356 /* Just write the byte : fifopointer remains 64 at all times */ 357 omap_nand_write(&s->cs_file[cs], 0, value, 1); 358 s->prefetch.count--; 359 } 360 if (s->prefetch.count == 0) { 361 /* Final transfer: raise TERMINALCOUNTSTATUS */ 362 s->irqst |= 2; 363 s->prefetch.startengine = 0; 364 } 365 omap_gpmc_int_update(s); 366 } 367 368 static const MemoryRegionOps omap_prefetch_ops = { 369 .read = omap_gpmc_prefetch_read, 370 .write = omap_gpmc_prefetch_write, 371 .endianness = DEVICE_NATIVE_ENDIAN, 372 .impl.min_access_size = 1, 373 .impl.max_access_size = 1, 374 }; 375 376 static MemoryRegion *omap_gpmc_cs_memregion(struct omap_gpmc_s *s, int cs) 377 { 378 /* Return the MemoryRegion* to map/unmap for this chipselect */ 379 struct omap_gpmc_cs_file_s *f = &s->cs_file[cs]; 380 if (omap_gpmc_devtype(f) == OMAP_GPMC_NOR) { 381 return f->iomem; 382 } 383 if ((s->prefetch.config1 & 0x80) && 384 (prefetch_cs(s->prefetch.config1) == cs)) { 385 /* The prefetch engine is enabled for this CS: map the FIFO */ 386 return &s->prefetch.iomem; 387 } 388 return &f->nandiomem; 389 } 390 391 static void omap_gpmc_cs_map(struct omap_gpmc_s *s, int cs) 392 { 393 struct omap_gpmc_cs_file_s *f = &s->cs_file[cs]; 394 uint32_t mask = (f->config[6] >> 8) & 0xf; 395 uint32_t base = f->config[6] & 0x3f; 396 uint32_t size; 397 398 if (!f->iomem && !f->dev) { 399 return; 400 } 401 402 if (!(f->config[6] & (1 << 6))) { 403 /* Do nothing unless CSVALID */ 404 return; 405 } 406 407 /* TODO: check for overlapping regions and report access errors */ 408 if (mask != 0x8 && mask != 0xc && mask != 0xe && mask != 0xf 409 && !(s->accept_256 && !mask)) { 410 fprintf(stderr, "%s: invalid chip-select mask address (0x%x)\n", 411 __func__, mask); 412 } 413 414 base <<= 24; 415 size = (0x0fffffff & ~(mask << 24)) + 1; 416 /* TODO: rather than setting the size of the mapping (which should be 417 * constant), the mask should cause wrapping of the address space, so 418 * that the same memory becomes accessible at every <i>size</i> bytes 419 * starting from <i>base</i>. */ 420 memory_region_init(&f->container, NULL, "omap-gpmc-file", size); 421 memory_region_add_subregion(&f->container, 0, 422 omap_gpmc_cs_memregion(s, cs)); 423 memory_region_add_subregion(get_system_memory(), base, 424 &f->container); 425 } 426 427 static void omap_gpmc_cs_unmap(struct omap_gpmc_s *s, int cs) 428 { 429 struct omap_gpmc_cs_file_s *f = &s->cs_file[cs]; 430 if (!(f->config[6] & (1 << 6))) { 431 /* Do nothing unless CSVALID */ 432 return; 433 } 434 if (!f->iomem && !f->dev) { 435 return; 436 } 437 memory_region_del_subregion(get_system_memory(), &f->container); 438 memory_region_del_subregion(&f->container, omap_gpmc_cs_memregion(s, cs)); 439 object_unparent(OBJECT(&f->container)); 440 } 441 442 void omap_gpmc_reset(struct omap_gpmc_s *s) 443 { 444 int i; 445 446 s->sysconfig = 0; 447 s->irqst = 0; 448 s->irqen = 0; 449 omap_gpmc_int_update(s); 450 for (i = 0; i < 8; i++) { 451 /* This has to happen before we change any of the config 452 * used to determine which memory regions are mapped or unmapped. 453 */ 454 omap_gpmc_cs_unmap(s, i); 455 } 456 s->timeout = 0; 457 s->config = 0xa00; 458 s->prefetch.config1 = 0x00004000; 459 s->prefetch.transfercount = 0x00000000; 460 s->prefetch.startengine = 0; 461 s->prefetch.fifopointer = 0; 462 s->prefetch.count = 0; 463 for (i = 0; i < 8; i ++) { 464 s->cs_file[i].config[1] = 0x101001; 465 s->cs_file[i].config[2] = 0x020201; 466 s->cs_file[i].config[3] = 0x10031003; 467 s->cs_file[i].config[4] = 0x10f1111; 468 s->cs_file[i].config[5] = 0; 469 s->cs_file[i].config[6] = 0xf00 | (i ? 0 : 1 << 6); 470 471 s->cs_file[i].config[6] = 0xf00; 472 /* In theory we could probe attached devices for some CFG1 473 * bits here, but we just retain them across resets as they 474 * were set initially by omap_gpmc_attach(). 475 */ 476 if (i == 0) { 477 s->cs_file[i].config[0] &= 0x00433e00; 478 s->cs_file[i].config[6] |= 1 << 6; /* CSVALID */ 479 omap_gpmc_cs_map(s, i); 480 } else { 481 s->cs_file[i].config[0] &= 0x00403c00; 482 } 483 } 484 s->ecc_cs = 0; 485 s->ecc_ptr = 0; 486 s->ecc_cfg = 0x3fcff000; 487 for (i = 0; i < 9; i ++) 488 ecc_reset(&s->ecc[i]); 489 } 490 491 static int gpmc_wordaccess_only(hwaddr addr) 492 { 493 /* Return true if the register offset is to a register that 494 * only permits word width accesses. 495 * Non-word accesses are only OK for GPMC_NAND_DATA/ADDRESS/COMMAND 496 * for any chipselect. 497 */ 498 if (addr >= 0x60 && addr <= 0x1d4) { 499 int cs = (addr - 0x60) / 0x30; 500 addr -= cs * 0x30; 501 if (addr >= 0x7c && addr < 0x88) { 502 /* GPMC_NAND_COMMAND, GPMC_NAND_ADDRESS, GPMC_NAND_DATA */ 503 return 0; 504 } 505 } 506 return 1; 507 } 508 509 static uint64_t omap_gpmc_read(void *opaque, hwaddr addr, 510 unsigned size) 511 { 512 struct omap_gpmc_s *s = (struct omap_gpmc_s *) opaque; 513 int cs; 514 struct omap_gpmc_cs_file_s *f; 515 516 if (size != 4 && gpmc_wordaccess_only(addr)) { 517 return omap_badwidth_read32(opaque, addr); 518 } 519 520 switch (addr) { 521 case 0x000: /* GPMC_REVISION */ 522 return s->revision; 523 524 case 0x010: /* GPMC_SYSCONFIG */ 525 return s->sysconfig; 526 527 case 0x014: /* GPMC_SYSSTATUS */ 528 return 1; /* RESETDONE */ 529 530 case 0x018: /* GPMC_IRQSTATUS */ 531 return s->irqst; 532 533 case 0x01c: /* GPMC_IRQENABLE */ 534 return s->irqen; 535 536 case 0x040: /* GPMC_TIMEOUT_CONTROL */ 537 return s->timeout; 538 539 case 0x044: /* GPMC_ERR_ADDRESS */ 540 case 0x048: /* GPMC_ERR_TYPE */ 541 return 0; 542 543 case 0x050: /* GPMC_CONFIG */ 544 return s->config; 545 546 case 0x054: /* GPMC_STATUS */ 547 return 0x001; 548 549 case 0x060 ... 0x1d4: 550 cs = (addr - 0x060) / 0x30; 551 addr -= cs * 0x30; 552 f = s->cs_file + cs; 553 switch (addr) { 554 case 0x60: /* GPMC_CONFIG1 */ 555 return f->config[0]; 556 case 0x64: /* GPMC_CONFIG2 */ 557 return f->config[1]; 558 case 0x68: /* GPMC_CONFIG3 */ 559 return f->config[2]; 560 case 0x6c: /* GPMC_CONFIG4 */ 561 return f->config[3]; 562 case 0x70: /* GPMC_CONFIG5 */ 563 return f->config[4]; 564 case 0x74: /* GPMC_CONFIG6 */ 565 return f->config[5]; 566 case 0x78: /* GPMC_CONFIG7 */ 567 return f->config[6]; 568 case 0x84 ... 0x87: /* GPMC_NAND_DATA */ 569 if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) { 570 return omap_nand_read(f, 0, size); 571 } 572 return 0; 573 } 574 break; 575 576 case 0x1e0: /* GPMC_PREFETCH_CONFIG1 */ 577 return s->prefetch.config1; 578 case 0x1e4: /* GPMC_PREFETCH_CONFIG2 */ 579 return s->prefetch.transfercount; 580 case 0x1ec: /* GPMC_PREFETCH_CONTROL */ 581 return s->prefetch.startengine; 582 case 0x1f0: /* GPMC_PREFETCH_STATUS */ 583 /* NB: The OMAP3 TRM is inconsistent about whether the GPMC 584 * FIFOTHRESHOLDSTATUS bit should be set when 585 * FIFOPOINTER > FIFOTHRESHOLD or when it is >= FIFOTHRESHOLD. 586 * Apparently the underlying functional spec from which the TRM was 587 * created states that the behaviour is ">=", and this also 588 * makes more conceptual sense. 589 */ 590 return (s->prefetch.fifopointer << 24) | 591 ((s->prefetch.fifopointer >= 592 ((s->prefetch.config1 >> 8) & 0x7f) ? 1 : 0) << 16) | 593 s->prefetch.count; 594 595 case 0x1f4: /* GPMC_ECC_CONFIG */ 596 return s->ecc_cs; 597 case 0x1f8: /* GPMC_ECC_CONTROL */ 598 return s->ecc_ptr; 599 case 0x1fc: /* GPMC_ECC_SIZE_CONFIG */ 600 return s->ecc_cfg; 601 case 0x200 ... 0x220: /* GPMC_ECC_RESULT */ 602 cs = (addr & 0x1f) >> 2; 603 /* TODO: check correctness */ 604 return 605 ((s->ecc[cs].cp & 0x07) << 0) | 606 ((s->ecc[cs].cp & 0x38) << 13) | 607 ((s->ecc[cs].lp[0] & 0x1ff) << 3) | 608 ((s->ecc[cs].lp[1] & 0x1ff) << 19); 609 610 case 0x230: /* GPMC_TESTMODE_CTRL */ 611 return 0; 612 case 0x234: /* GPMC_PSA_LSB */ 613 case 0x238: /* GPMC_PSA_MSB */ 614 return 0x00000000; 615 } 616 617 OMAP_BAD_REG(addr); 618 return 0; 619 } 620 621 static void omap_gpmc_write(void *opaque, hwaddr addr, 622 uint64_t value, unsigned size) 623 { 624 struct omap_gpmc_s *s = (struct omap_gpmc_s *) opaque; 625 int cs; 626 struct omap_gpmc_cs_file_s *f; 627 628 if (size != 4 && gpmc_wordaccess_only(addr)) { 629 return omap_badwidth_write32(opaque, addr, value); 630 } 631 632 switch (addr) { 633 case 0x000: /* GPMC_REVISION */ 634 case 0x014: /* GPMC_SYSSTATUS */ 635 case 0x054: /* GPMC_STATUS */ 636 case 0x1f0: /* GPMC_PREFETCH_STATUS */ 637 case 0x200 ... 0x220: /* GPMC_ECC_RESULT */ 638 case 0x234: /* GPMC_PSA_LSB */ 639 case 0x238: /* GPMC_PSA_MSB */ 640 OMAP_RO_REG(addr); 641 break; 642 643 case 0x010: /* GPMC_SYSCONFIG */ 644 if ((value >> 3) == 0x3) 645 fprintf(stderr, "%s: bad SDRAM idle mode %"PRIi64"\n", 646 __FUNCTION__, value >> 3); 647 if (value & 2) 648 omap_gpmc_reset(s); 649 s->sysconfig = value & 0x19; 650 break; 651 652 case 0x018: /* GPMC_IRQSTATUS */ 653 s->irqst &= ~value; 654 omap_gpmc_int_update(s); 655 break; 656 657 case 0x01c: /* GPMC_IRQENABLE */ 658 s->irqen = value & 0xf03; 659 omap_gpmc_int_update(s); 660 break; 661 662 case 0x040: /* GPMC_TIMEOUT_CONTROL */ 663 s->timeout = value & 0x1ff1; 664 break; 665 666 case 0x044: /* GPMC_ERR_ADDRESS */ 667 case 0x048: /* GPMC_ERR_TYPE */ 668 break; 669 670 case 0x050: /* GPMC_CONFIG */ 671 s->config = value & 0xf13; 672 break; 673 674 case 0x060 ... 0x1d4: 675 cs = (addr - 0x060) / 0x30; 676 addr -= cs * 0x30; 677 f = s->cs_file + cs; 678 switch (addr) { 679 case 0x60: /* GPMC_CONFIG1 */ 680 f->config[0] = value & 0xffef3e13; 681 break; 682 case 0x64: /* GPMC_CONFIG2 */ 683 f->config[1] = value & 0x001f1f8f; 684 break; 685 case 0x68: /* GPMC_CONFIG3 */ 686 f->config[2] = value & 0x001f1f8f; 687 break; 688 case 0x6c: /* GPMC_CONFIG4 */ 689 f->config[3] = value & 0x1f8f1f8f; 690 break; 691 case 0x70: /* GPMC_CONFIG5 */ 692 f->config[4] = value & 0x0f1f1f1f; 693 break; 694 case 0x74: /* GPMC_CONFIG6 */ 695 f->config[5] = value & 0x00000fcf; 696 break; 697 case 0x78: /* GPMC_CONFIG7 */ 698 if ((f->config[6] ^ value) & 0xf7f) { 699 omap_gpmc_cs_unmap(s, cs); 700 f->config[6] = value & 0x00000f7f; 701 omap_gpmc_cs_map(s, cs); 702 } 703 break; 704 case 0x7c ... 0x7f: /* GPMC_NAND_COMMAND */ 705 if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) { 706 nand_setpins(f->dev, 1, 0, 0, 1, 0); /* CLE */ 707 omap_nand_setio(f->dev, value, omap_gpmc_devsize(f), size); 708 } 709 break; 710 case 0x80 ... 0x83: /* GPMC_NAND_ADDRESS */ 711 if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) { 712 nand_setpins(f->dev, 0, 1, 0, 1, 0); /* ALE */ 713 omap_nand_setio(f->dev, value, omap_gpmc_devsize(f), size); 714 } 715 break; 716 case 0x84 ... 0x87: /* GPMC_NAND_DATA */ 717 if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) { 718 omap_nand_write(f, 0, value, size); 719 } 720 break; 721 default: 722 goto bad_reg; 723 } 724 break; 725 726 case 0x1e0: /* GPMC_PREFETCH_CONFIG1 */ 727 if (!s->prefetch.startengine) { 728 uint32_t newconfig1 = value & 0x7f8f7fbf; 729 uint32_t changed; 730 changed = newconfig1 ^ s->prefetch.config1; 731 if (changed & (0x80 | 0x7000000)) { 732 /* Turning the engine on or off, or mapping it somewhere else. 733 * cs_map() and cs_unmap() check the prefetch config and 734 * overall CSVALID bits, so it is sufficient to unmap-and-map 735 * both the old cs and the new one. Note that we adhere to 736 * the "unmap/change config/map" order (and not unmap twice 737 * if newcs == oldcs), otherwise we'll try to delete the wrong 738 * memory region. 739 */ 740 int oldcs = prefetch_cs(s->prefetch.config1); 741 int newcs = prefetch_cs(newconfig1); 742 omap_gpmc_cs_unmap(s, oldcs); 743 if (oldcs != newcs) { 744 omap_gpmc_cs_unmap(s, newcs); 745 } 746 s->prefetch.config1 = newconfig1; 747 omap_gpmc_cs_map(s, oldcs); 748 if (oldcs != newcs) { 749 omap_gpmc_cs_map(s, newcs); 750 } 751 } else { 752 s->prefetch.config1 = newconfig1; 753 } 754 } 755 break; 756 757 case 0x1e4: /* GPMC_PREFETCH_CONFIG2 */ 758 if (!s->prefetch.startengine) { 759 s->prefetch.transfercount = value & 0x3fff; 760 } 761 break; 762 763 case 0x1ec: /* GPMC_PREFETCH_CONTROL */ 764 if (s->prefetch.startengine != (value & 1)) { 765 s->prefetch.startengine = value & 1; 766 if (s->prefetch.startengine) { 767 /* Prefetch engine start */ 768 s->prefetch.count = s->prefetch.transfercount; 769 if (s->prefetch.config1 & 1) { 770 /* Write */ 771 s->prefetch.fifopointer = 64; 772 } else { 773 /* Read */ 774 s->prefetch.fifopointer = 0; 775 fill_prefetch_fifo(s); 776 } 777 } else { 778 /* Prefetch engine forcibly stopped. The TRM 779 * doesn't define the behaviour if you do this. 780 * We clear the prefetch count, which means that 781 * we permit no more writes, and don't read any 782 * more data from NAND. The CPU can still drain 783 * the FIFO of unread data. 784 */ 785 s->prefetch.count = 0; 786 } 787 omap_gpmc_int_update(s); 788 } 789 break; 790 791 case 0x1f4: /* GPMC_ECC_CONFIG */ 792 s->ecc_cs = 0x8f; 793 break; 794 case 0x1f8: /* GPMC_ECC_CONTROL */ 795 if (value & (1 << 8)) 796 for (cs = 0; cs < 9; cs ++) 797 ecc_reset(&s->ecc[cs]); 798 s->ecc_ptr = value & 0xf; 799 if (s->ecc_ptr == 0 || s->ecc_ptr > 9) { 800 s->ecc_ptr = 0; 801 s->ecc_cs &= ~1; 802 } 803 break; 804 case 0x1fc: /* GPMC_ECC_SIZE_CONFIG */ 805 s->ecc_cfg = value & 0x3fcff1ff; 806 break; 807 case 0x230: /* GPMC_TESTMODE_CTRL */ 808 if (value & 7) 809 fprintf(stderr, "%s: test mode enable attempt\n", __FUNCTION__); 810 break; 811 812 default: 813 bad_reg: 814 OMAP_BAD_REG(addr); 815 return; 816 } 817 } 818 819 static const MemoryRegionOps omap_gpmc_ops = { 820 .read = omap_gpmc_read, 821 .write = omap_gpmc_write, 822 .endianness = DEVICE_NATIVE_ENDIAN, 823 }; 824 825 struct omap_gpmc_s *omap_gpmc_init(struct omap_mpu_state_s *mpu, 826 hwaddr base, 827 qemu_irq irq, qemu_irq drq) 828 { 829 int cs; 830 struct omap_gpmc_s *s = (struct omap_gpmc_s *) 831 g_malloc0(sizeof(struct omap_gpmc_s)); 832 833 memory_region_init_io(&s->iomem, NULL, &omap_gpmc_ops, s, "omap-gpmc", 0x1000); 834 memory_region_add_subregion(get_system_memory(), base, &s->iomem); 835 836 s->irq = irq; 837 s->drq = drq; 838 s->accept_256 = cpu_is_omap3630(mpu); 839 s->revision = cpu_class_omap3(mpu) ? 0x50 : 0x20; 840 s->lastirq = 0; 841 omap_gpmc_reset(s); 842 843 /* We have to register a different IO memory handler for each 844 * chip select region in case a NAND device is mapped there. We 845 * make the region the worst-case size of 256MB and rely on the 846 * container memory region in cs_map to chop it down to the actual 847 * guest-requested size. 848 */ 849 for (cs = 0; cs < 8; cs++) { 850 memory_region_init_io(&s->cs_file[cs].nandiomem, NULL, 851 &omap_nand_ops, 852 &s->cs_file[cs], 853 "omap-nand", 854 256 * 1024 * 1024); 855 } 856 857 memory_region_init_io(&s->prefetch.iomem, NULL, &omap_prefetch_ops, s, 858 "omap-gpmc-prefetch", 256 * 1024 * 1024); 859 return s; 860 } 861 862 void omap_gpmc_attach(struct omap_gpmc_s *s, int cs, MemoryRegion *iomem) 863 { 864 struct omap_gpmc_cs_file_s *f; 865 assert(iomem); 866 867 if (cs < 0 || cs >= 8) { 868 fprintf(stderr, "%s: bad chip-select %i\n", __FUNCTION__, cs); 869 exit(-1); 870 } 871 f = &s->cs_file[cs]; 872 873 omap_gpmc_cs_unmap(s, cs); 874 f->config[0] &= ~(0xf << 10); 875 f->iomem = iomem; 876 omap_gpmc_cs_map(s, cs); 877 } 878 879 void omap_gpmc_attach_nand(struct omap_gpmc_s *s, int cs, DeviceState *nand) 880 { 881 struct omap_gpmc_cs_file_s *f; 882 assert(nand); 883 884 if (cs < 0 || cs >= 8) { 885 fprintf(stderr, "%s: bad chip-select %i\n", __func__, cs); 886 exit(-1); 887 } 888 f = &s->cs_file[cs]; 889 890 omap_gpmc_cs_unmap(s, cs); 891 f->config[0] &= ~(0xf << 10); 892 f->config[0] |= (OMAP_GPMC_NAND << 10); 893 f->dev = nand; 894 if (nand_getbuswidth(f->dev) == 16) { 895 f->config[0] |= OMAP_GPMC_16BIT << 12; 896 } 897 omap_gpmc_cs_map(s, cs); 898 } 899