1 /* 2 * QEMU IDE Emulation: MacIO support. 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * Copyright (c) 2006 Openedhand Ltd. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a copy 8 * of this software and associated documentation files (the "Software"), to deal 9 * in the Software without restriction, including without limitation the rights 10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 * copies of the Software, and to permit persons to whom the Software is 12 * furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 * THE SOFTWARE. 24 */ 25 #include "hw/hw.h" 26 #include "hw/ppc/mac.h" 27 #include "hw/ppc/mac_dbdma.h" 28 #include "block/block.h" 29 #include "sysemu/dma.h" 30 31 #include <hw/ide/internal.h> 32 33 /* debug MACIO */ 34 // #define DEBUG_MACIO 35 36 #ifdef DEBUG_MACIO 37 static const int debug_macio = 1; 38 #else 39 static const int debug_macio = 0; 40 #endif 41 42 #define MACIO_DPRINTF(fmt, ...) do { \ 43 if (debug_macio) { \ 44 printf(fmt , ## __VA_ARGS__); \ 45 } \ 46 } while (0) 47 48 49 /***********************************************************/ 50 /* MacIO based PowerPC IDE */ 51 52 #define MACIO_PAGE_SIZE 4096 53 54 static void pmac_ide_atapi_transfer_cb(void *opaque, int ret) 55 { 56 DBDMA_io *io = opaque; 57 MACIOIDEState *m = io->opaque; 58 IDEState *s = idebus_active_if(&m->bus); 59 int unaligned; 60 61 if (ret < 0) { 62 m->aiocb = NULL; 63 qemu_sglist_destroy(&s->sg); 64 ide_atapi_io_error(s, ret); 65 io->remainder_len = 0; 66 goto done; 67 } 68 69 if (!m->dma_active) { 70 MACIO_DPRINTF("waiting for data (%#x - %#x - %x)\n", 71 s->nsector, io->len, s->status); 72 /* data not ready yet, wait for the channel to get restarted */ 73 io->processing = false; 74 return; 75 } 76 77 MACIO_DPRINTF("io_buffer_size = %#x\n", s->io_buffer_size); 78 79 if (s->io_buffer_size > 0) { 80 m->aiocb = NULL; 81 qemu_sglist_destroy(&s->sg); 82 83 s->packet_transfer_size -= s->io_buffer_size; 84 85 s->io_buffer_index += s->io_buffer_size; 86 s->lba += s->io_buffer_index >> 11; 87 s->io_buffer_index &= 0x7ff; 88 } 89 90 s->io_buffer_size = MIN(io->len, s->packet_transfer_size); 91 92 MACIO_DPRINTF("remainder: %d io->len: %d size: %d\n", io->remainder_len, 93 io->len, s->packet_transfer_size); 94 if (io->remainder_len && io->len) { 95 /* guest wants the rest of its previous transfer */ 96 int remainder_len = MIN(io->remainder_len, io->len); 97 98 MACIO_DPRINTF("copying remainder %d bytes\n", remainder_len); 99 100 cpu_physical_memory_write(io->addr, io->remainder + 0x200 - 101 remainder_len, remainder_len); 102 103 io->addr += remainder_len; 104 io->len -= remainder_len; 105 s->io_buffer_size = remainder_len; 106 io->remainder_len -= remainder_len; 107 /* treat remainder as individual transfer, start again */ 108 qemu_sglist_init(&s->sg, DEVICE(m), io->len / MACIO_PAGE_SIZE + 1, 109 &address_space_memory); 110 pmac_ide_atapi_transfer_cb(opaque, 0); 111 return; 112 } 113 114 if (!s->packet_transfer_size) { 115 MACIO_DPRINTF("end of transfer\n"); 116 ide_atapi_cmd_ok(s); 117 m->dma_active = false; 118 } 119 120 if (io->len == 0) { 121 MACIO_DPRINTF("end of DMA\n"); 122 goto done; 123 } 124 125 /* launch next transfer */ 126 127 /* handle unaligned accesses first, get them over with and only do the 128 remaining bulk transfer using our async DMA helpers */ 129 unaligned = io->len & 0x1ff; 130 if (unaligned) { 131 int sector_num = (s->lba << 2) + (s->io_buffer_index >> 9); 132 int nsector = io->len >> 9; 133 134 MACIO_DPRINTF("precopying unaligned %d bytes to %#" HWADDR_PRIx "\n", 135 unaligned, io->addr + io->len - unaligned); 136 137 bdrv_read(s->bs, sector_num + nsector, io->remainder, 1); 138 cpu_physical_memory_write(io->addr + io->len - unaligned, 139 io->remainder, unaligned); 140 141 io->len -= unaligned; 142 } 143 144 MACIO_DPRINTF("io->len = %#x\n", io->len); 145 146 qemu_sglist_init(&s->sg, DEVICE(m), io->len / MACIO_PAGE_SIZE + 1, 147 &address_space_memory); 148 qemu_sglist_add(&s->sg, io->addr, io->len); 149 io->addr += s->io_buffer_size; 150 io->remainder_len = MIN(s->packet_transfer_size - s->io_buffer_size, 151 (0x200 - unaligned) & 0x1ff); 152 MACIO_DPRINTF("set remainder to: %d\n", io->remainder_len); 153 154 /* We would read no data from the block layer, thus not get a callback. 155 Just fake completion manually. */ 156 if (!io->len) { 157 pmac_ide_atapi_transfer_cb(opaque, 0); 158 return; 159 } 160 161 io->len = 0; 162 163 MACIO_DPRINTF("sector_num=%d size=%d, cmd_cmd=%d\n", 164 (s->lba << 2) + (s->io_buffer_index >> 9), 165 s->packet_transfer_size, s->dma_cmd); 166 167 m->aiocb = dma_bdrv_read(s->bs, &s->sg, 168 (int64_t)(s->lba << 2) + (s->io_buffer_index >> 9), 169 pmac_ide_atapi_transfer_cb, io); 170 return; 171 172 done: 173 MACIO_DPRINTF("done DMA\n"); 174 bdrv_acct_done(s->bs, &s->acct); 175 io->dma_end(opaque); 176 } 177 178 static void pmac_ide_transfer_cb(void *opaque, int ret) 179 { 180 DBDMA_io *io = opaque; 181 MACIOIDEState *m = io->opaque; 182 IDEState *s = idebus_active_if(&m->bus); 183 int n = 0; 184 int64_t sector_num; 185 int unaligned; 186 187 if (ret < 0) { 188 MACIO_DPRINTF("DMA error\n"); 189 m->aiocb = NULL; 190 qemu_sglist_destroy(&s->sg); 191 ide_dma_error(s); 192 io->remainder_len = 0; 193 goto done; 194 } 195 196 if (--io->requests) { 197 /* More requests still in flight */ 198 return; 199 } 200 201 if (!m->dma_active) { 202 MACIO_DPRINTF("waiting for data (%#x - %#x - %x)\n", 203 s->nsector, io->len, s->status); 204 /* data not ready yet, wait for the channel to get restarted */ 205 io->processing = false; 206 return; 207 } 208 209 sector_num = ide_get_sector(s); 210 MACIO_DPRINTF("io_buffer_size = %#x\n", s->io_buffer_size); 211 if (s->io_buffer_size > 0) { 212 m->aiocb = NULL; 213 qemu_sglist_destroy(&s->sg); 214 n = (s->io_buffer_size + 0x1ff) >> 9; 215 sector_num += n; 216 ide_set_sector(s, sector_num); 217 s->nsector -= n; 218 } 219 220 if (io->finish_remain_read) { 221 /* Finish a stale read from the last iteration */ 222 io->finish_remain_read = false; 223 cpu_physical_memory_write(io->finish_addr, io->remainder, 224 io->finish_len); 225 } 226 227 MACIO_DPRINTF("remainder: %d io->len: %d nsector: %d " 228 "sector_num: %" PRId64 "\n", 229 io->remainder_len, io->len, s->nsector, sector_num); 230 if (io->remainder_len && io->len) { 231 /* guest wants the rest of its previous transfer */ 232 int remainder_len = MIN(io->remainder_len, io->len); 233 uint8_t *p = &io->remainder[0x200 - remainder_len]; 234 235 MACIO_DPRINTF("copying remainder %d bytes at %#" HWADDR_PRIx "\n", 236 remainder_len, io->addr); 237 238 switch (s->dma_cmd) { 239 case IDE_DMA_READ: 240 cpu_physical_memory_write(io->addr, p, remainder_len); 241 break; 242 case IDE_DMA_WRITE: 243 cpu_physical_memory_read(io->addr, p, remainder_len); 244 break; 245 case IDE_DMA_TRIM: 246 break; 247 } 248 io->addr += remainder_len; 249 io->len -= remainder_len; 250 io->remainder_len -= remainder_len; 251 252 if (s->dma_cmd == IDE_DMA_WRITE && !io->remainder_len) { 253 io->requests++; 254 qemu_iovec_reset(&io->iov); 255 qemu_iovec_add(&io->iov, io->remainder, 0x200); 256 257 m->aiocb = bdrv_aio_writev(s->bs, sector_num - 1, &io->iov, 1, 258 pmac_ide_transfer_cb, io); 259 } 260 } 261 262 if (s->nsector == 0 && !io->remainder_len) { 263 MACIO_DPRINTF("end of transfer\n"); 264 s->status = READY_STAT | SEEK_STAT; 265 ide_set_irq(s->bus); 266 m->dma_active = false; 267 } 268 269 if (io->len == 0) { 270 MACIO_DPRINTF("end of DMA\n"); 271 goto done; 272 } 273 274 /* launch next transfer */ 275 276 s->io_buffer_index = 0; 277 s->io_buffer_size = MIN(io->len, s->nsector * 512); 278 279 /* handle unaligned accesses first, get them over with and only do the 280 remaining bulk transfer using our async DMA helpers */ 281 unaligned = io->len & 0x1ff; 282 if (unaligned) { 283 int nsector = io->len >> 9; 284 285 MACIO_DPRINTF("precopying unaligned %d bytes to %#" HWADDR_PRIx "\n", 286 unaligned, io->addr + io->len - unaligned); 287 288 switch (s->dma_cmd) { 289 case IDE_DMA_READ: 290 io->requests++; 291 io->finish_addr = io->addr + io->len - unaligned; 292 io->finish_len = unaligned; 293 io->finish_remain_read = true; 294 qemu_iovec_reset(&io->iov); 295 qemu_iovec_add(&io->iov, io->remainder, 0x200); 296 297 m->aiocb = bdrv_aio_readv(s->bs, sector_num + nsector, &io->iov, 1, 298 pmac_ide_transfer_cb, io); 299 break; 300 case IDE_DMA_WRITE: 301 /* cache the contents in our io struct */ 302 cpu_physical_memory_read(io->addr + io->len - unaligned, 303 io->remainder + io->remainder_len, 304 unaligned); 305 break; 306 case IDE_DMA_TRIM: 307 break; 308 } 309 } 310 311 MACIO_DPRINTF("io->len = %#x\n", io->len); 312 313 qemu_sglist_init(&s->sg, DEVICE(m), io->len / MACIO_PAGE_SIZE + 1, 314 &address_space_memory); 315 qemu_sglist_add(&s->sg, io->addr, io->len); 316 io->addr += io->len + unaligned; 317 io->remainder_len = (0x200 - unaligned) & 0x1ff; 318 MACIO_DPRINTF("set remainder to: %d\n", io->remainder_len); 319 320 /* Only subsector reads happening */ 321 if (!io->len) { 322 if (!io->requests) { 323 io->requests++; 324 pmac_ide_transfer_cb(opaque, ret); 325 } 326 return; 327 } 328 329 io->len = 0; 330 331 MACIO_DPRINTF("sector_num=%" PRId64 " n=%d, nsector=%d, cmd_cmd=%d\n", 332 sector_num, n, s->nsector, s->dma_cmd); 333 334 switch (s->dma_cmd) { 335 case IDE_DMA_READ: 336 m->aiocb = dma_bdrv_read(s->bs, &s->sg, sector_num, 337 pmac_ide_transfer_cb, io); 338 break; 339 case IDE_DMA_WRITE: 340 m->aiocb = dma_bdrv_write(s->bs, &s->sg, sector_num, 341 pmac_ide_transfer_cb, io); 342 break; 343 case IDE_DMA_TRIM: 344 m->aiocb = dma_bdrv_io(s->bs, &s->sg, sector_num, 345 ide_issue_trim, pmac_ide_transfer_cb, io, 346 DMA_DIRECTION_TO_DEVICE); 347 break; 348 } 349 350 io->requests++; 351 return; 352 353 done: 354 if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) { 355 bdrv_acct_done(s->bs, &s->acct); 356 } 357 io->dma_end(io); 358 } 359 360 static void pmac_ide_transfer(DBDMA_io *io) 361 { 362 MACIOIDEState *m = io->opaque; 363 IDEState *s = idebus_active_if(&m->bus); 364 365 MACIO_DPRINTF("\n"); 366 367 s->io_buffer_size = 0; 368 if (s->drive_kind == IDE_CD) { 369 370 /* Handle non-block ATAPI DMA transfers */ 371 if (s->lba == -1) { 372 s->io_buffer_size = MIN(io->len, s->packet_transfer_size); 373 bdrv_acct_start(s->bs, &s->acct, s->io_buffer_size, 374 BDRV_ACCT_READ); 375 MACIO_DPRINTF("non-block ATAPI DMA transfer size: %d\n", 376 s->io_buffer_size); 377 378 /* Copy ATAPI buffer directly to RAM and finish */ 379 cpu_physical_memory_write(io->addr, s->io_buffer, 380 s->io_buffer_size); 381 ide_atapi_cmd_ok(s); 382 m->dma_active = false; 383 384 MACIO_DPRINTF("end of non-block ATAPI DMA transfer\n"); 385 bdrv_acct_done(s->bs, &s->acct); 386 io->dma_end(io); 387 return; 388 } 389 390 bdrv_acct_start(s->bs, &s->acct, io->len, BDRV_ACCT_READ); 391 pmac_ide_atapi_transfer_cb(io, 0); 392 return; 393 } 394 395 switch (s->dma_cmd) { 396 case IDE_DMA_READ: 397 bdrv_acct_start(s->bs, &s->acct, io->len, BDRV_ACCT_READ); 398 break; 399 case IDE_DMA_WRITE: 400 bdrv_acct_start(s->bs, &s->acct, io->len, BDRV_ACCT_WRITE); 401 break; 402 default: 403 break; 404 } 405 406 io->requests++; 407 pmac_ide_transfer_cb(io, 0); 408 } 409 410 static void pmac_ide_flush(DBDMA_io *io) 411 { 412 MACIOIDEState *m = io->opaque; 413 414 if (m->aiocb) { 415 bdrv_drain_all(); 416 } 417 } 418 419 /* PowerMac IDE memory IO */ 420 static void pmac_ide_writeb (void *opaque, 421 hwaddr addr, uint32_t val) 422 { 423 MACIOIDEState *d = opaque; 424 425 addr = (addr & 0xFFF) >> 4; 426 switch (addr) { 427 case 1 ... 7: 428 ide_ioport_write(&d->bus, addr, val); 429 break; 430 case 8: 431 case 22: 432 ide_cmd_write(&d->bus, 0, val); 433 break; 434 default: 435 break; 436 } 437 } 438 439 static uint32_t pmac_ide_readb (void *opaque,hwaddr addr) 440 { 441 uint8_t retval; 442 MACIOIDEState *d = opaque; 443 444 addr = (addr & 0xFFF) >> 4; 445 switch (addr) { 446 case 1 ... 7: 447 retval = ide_ioport_read(&d->bus, addr); 448 break; 449 case 8: 450 case 22: 451 retval = ide_status_read(&d->bus, 0); 452 break; 453 default: 454 retval = 0xFF; 455 break; 456 } 457 return retval; 458 } 459 460 static void pmac_ide_writew (void *opaque, 461 hwaddr addr, uint32_t val) 462 { 463 MACIOIDEState *d = opaque; 464 465 addr = (addr & 0xFFF) >> 4; 466 val = bswap16(val); 467 if (addr == 0) { 468 ide_data_writew(&d->bus, 0, val); 469 } 470 } 471 472 static uint32_t pmac_ide_readw (void *opaque,hwaddr addr) 473 { 474 uint16_t retval; 475 MACIOIDEState *d = opaque; 476 477 addr = (addr & 0xFFF) >> 4; 478 if (addr == 0) { 479 retval = ide_data_readw(&d->bus, 0); 480 } else { 481 retval = 0xFFFF; 482 } 483 retval = bswap16(retval); 484 return retval; 485 } 486 487 static void pmac_ide_writel (void *opaque, 488 hwaddr addr, uint32_t val) 489 { 490 MACIOIDEState *d = opaque; 491 492 addr = (addr & 0xFFF) >> 4; 493 val = bswap32(val); 494 if (addr == 0) { 495 ide_data_writel(&d->bus, 0, val); 496 } 497 } 498 499 static uint32_t pmac_ide_readl (void *opaque,hwaddr addr) 500 { 501 uint32_t retval; 502 MACIOIDEState *d = opaque; 503 504 addr = (addr & 0xFFF) >> 4; 505 if (addr == 0) { 506 retval = ide_data_readl(&d->bus, 0); 507 } else { 508 retval = 0xFFFFFFFF; 509 } 510 retval = bswap32(retval); 511 return retval; 512 } 513 514 static const MemoryRegionOps pmac_ide_ops = { 515 .old_mmio = { 516 .write = { 517 pmac_ide_writeb, 518 pmac_ide_writew, 519 pmac_ide_writel, 520 }, 521 .read = { 522 pmac_ide_readb, 523 pmac_ide_readw, 524 pmac_ide_readl, 525 }, 526 }, 527 .endianness = DEVICE_NATIVE_ENDIAN, 528 }; 529 530 static const VMStateDescription vmstate_pmac = { 531 .name = "ide", 532 .version_id = 3, 533 .minimum_version_id = 0, 534 .fields = (VMStateField[]) { 535 VMSTATE_IDE_BUS(bus, MACIOIDEState), 536 VMSTATE_IDE_DRIVES(bus.ifs, MACIOIDEState), 537 VMSTATE_END_OF_LIST() 538 } 539 }; 540 541 static void macio_ide_reset(DeviceState *dev) 542 { 543 MACIOIDEState *d = MACIO_IDE(dev); 544 545 ide_bus_reset(&d->bus); 546 } 547 548 static int ide_nop(IDEDMA *dma) 549 { 550 return 0; 551 } 552 553 static int ide_nop_int(IDEDMA *dma, int x) 554 { 555 return 0; 556 } 557 558 static void ide_nop_restart(void *opaque, int x, RunState y) 559 { 560 } 561 562 static void ide_dbdma_start(IDEDMA *dma, IDEState *s, 563 BlockDriverCompletionFunc *cb) 564 { 565 MACIOIDEState *m = container_of(dma, MACIOIDEState, dma); 566 567 MACIO_DPRINTF("\n"); 568 m->dma_active = true; 569 DBDMA_kick(m->dbdma); 570 } 571 572 static const IDEDMAOps dbdma_ops = { 573 .start_dma = ide_dbdma_start, 574 .start_transfer = ide_nop, 575 .prepare_buf = ide_nop_int, 576 .rw_buf = ide_nop_int, 577 .set_unit = ide_nop_int, 578 .add_status = ide_nop_int, 579 .set_inactive = ide_nop, 580 .restart_cb = ide_nop_restart, 581 .reset = ide_nop, 582 }; 583 584 static void macio_ide_realizefn(DeviceState *dev, Error **errp) 585 { 586 MACIOIDEState *s = MACIO_IDE(dev); 587 588 ide_init2(&s->bus, s->irq); 589 590 /* Register DMA callbacks */ 591 s->dma.ops = &dbdma_ops; 592 s->bus.dma = &s->dma; 593 } 594 595 static void macio_ide_initfn(Object *obj) 596 { 597 SysBusDevice *d = SYS_BUS_DEVICE(obj); 598 MACIOIDEState *s = MACIO_IDE(obj); 599 600 ide_bus_new(&s->bus, sizeof(s->bus), DEVICE(obj), 0, 2); 601 memory_region_init_io(&s->mem, obj, &pmac_ide_ops, s, "pmac-ide", 0x1000); 602 sysbus_init_mmio(d, &s->mem); 603 sysbus_init_irq(d, &s->irq); 604 sysbus_init_irq(d, &s->dma_irq); 605 } 606 607 static void macio_ide_class_init(ObjectClass *oc, void *data) 608 { 609 DeviceClass *dc = DEVICE_CLASS(oc); 610 611 dc->realize = macio_ide_realizefn; 612 dc->reset = macio_ide_reset; 613 dc->vmsd = &vmstate_pmac; 614 } 615 616 static const TypeInfo macio_ide_type_info = { 617 .name = TYPE_MACIO_IDE, 618 .parent = TYPE_SYS_BUS_DEVICE, 619 .instance_size = sizeof(MACIOIDEState), 620 .instance_init = macio_ide_initfn, 621 .class_init = macio_ide_class_init, 622 }; 623 624 static void macio_ide_register_types(void) 625 { 626 type_register_static(&macio_ide_type_info); 627 } 628 629 /* hd_table must contain 2 block drivers */ 630 void macio_ide_init_drives(MACIOIDEState *s, DriveInfo **hd_table) 631 { 632 int i; 633 634 for (i = 0; i < 2; i++) { 635 if (hd_table[i]) { 636 ide_create_drive(&s->bus, i, hd_table[i]); 637 } 638 } 639 } 640 641 void macio_ide_register_dma(MACIOIDEState *s, void *dbdma, int channel) 642 { 643 s->dbdma = dbdma; 644 DBDMA_register_channel(dbdma, channel, s->dma_irq, 645 pmac_ide_transfer, pmac_ide_flush, s); 646 } 647 648 type_init(macio_ide_register_types) 649