1 /* 2 * QEMU IDE Emulation: PCI Bus support. 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * Copyright (c) 2006 Openedhand Ltd. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a copy 8 * of this software and associated documentation files (the "Software"), to deal 9 * in the Software without restriction, including without limitation the rights 10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 * copies of the Software, and to permit persons to whom the Software is 12 * furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 * THE SOFTWARE. 24 */ 25 26 #include "qemu/osdep.h" 27 #include "hw/irq.h" 28 #include "hw/pci/pci.h" 29 #include "migration/vmstate.h" 30 #include "sysemu/dma.h" 31 #include "qemu/error-report.h" 32 #include "qemu/module.h" 33 #include "hw/ide/pci.h" 34 #include "trace.h" 35 36 #define BMDMA_PAGE_SIZE 4096 37 38 #define BM_MIGRATION_COMPAT_STATUS_BITS \ 39 (IDE_RETRY_DMA | IDE_RETRY_PIO | \ 40 IDE_RETRY_READ | IDE_RETRY_FLUSH) 41 42 static uint64_t pci_ide_status_read(void *opaque, hwaddr addr, unsigned size) 43 { 44 IDEBus *bus = opaque; 45 46 if (addr != 2 || size != 1) { 47 return ((uint64_t)1 << (size * 8)) - 1; 48 } 49 return ide_status_read(bus, addr + 2); 50 } 51 52 static void pci_ide_ctrl_write(void *opaque, hwaddr addr, 53 uint64_t data, unsigned size) 54 { 55 IDEBus *bus = opaque; 56 57 if (addr != 2 || size != 1) { 58 return; 59 } 60 ide_ctrl_write(bus, addr + 2, data); 61 } 62 63 const MemoryRegionOps pci_ide_cmd_le_ops = { 64 .read = pci_ide_status_read, 65 .write = pci_ide_ctrl_write, 66 .endianness = DEVICE_LITTLE_ENDIAN, 67 }; 68 69 static uint64_t pci_ide_data_read(void *opaque, hwaddr addr, unsigned size) 70 { 71 IDEBus *bus = opaque; 72 73 if (size == 1) { 74 return ide_ioport_read(bus, addr); 75 } else if (addr == 0) { 76 if (size == 2) { 77 return ide_data_readw(bus, addr); 78 } else { 79 return ide_data_readl(bus, addr); 80 } 81 } 82 return ((uint64_t)1 << (size * 8)) - 1; 83 } 84 85 static void pci_ide_data_write(void *opaque, hwaddr addr, 86 uint64_t data, unsigned size) 87 { 88 IDEBus *bus = opaque; 89 90 if (size == 1) { 91 ide_ioport_write(bus, addr, data); 92 } else if (addr == 0) { 93 if (size == 2) { 94 ide_data_writew(bus, addr, data); 95 } else { 96 ide_data_writel(bus, addr, data); 97 } 98 } 99 } 100 101 const MemoryRegionOps pci_ide_data_le_ops = { 102 .read = pci_ide_data_read, 103 .write = pci_ide_data_write, 104 .endianness = DEVICE_LITTLE_ENDIAN, 105 }; 106 107 void pci_ide_update_mode(PCIIDEState *s) 108 { 109 PCIDevice *d = PCI_DEVICE(s); 110 uint8_t mode = d->config[PCI_CLASS_PROG]; 111 112 /* 113 * This function only configures the BARs/ioports for now: PCI IDE 114 * controllers must manage their own IRQ routing 115 */ 116 117 switch (mode & 0xf) { 118 case 0xa: 119 /* Both channels legacy mode */ 120 121 /* 122 * TODO: according to the PCI IDE specification the BARs should 123 * be completely disabled, however Linux for the pegasos2 124 * machine stil accesses the BAR addresses after switching to legacy 125 * mode. Hence we leave them active for now. 126 */ 127 128 /* Clear interrupt pin */ 129 pci_config_set_interrupt_pin(d->config, 0); 130 131 /* Add legacy IDE ports */ 132 if (!s->bus[0].portio_list.owner) { 133 portio_list_init(&s->bus[0].portio_list, OBJECT(d), 134 ide_portio_list, &s->bus[0], "ide"); 135 portio_list_add(&s->bus[0].portio_list, 136 pci_address_space_io(d), 0x1f0); 137 } 138 139 if (!s->bus[0].portio2_list.owner) { 140 portio_list_init(&s->bus[0].portio2_list, OBJECT(d), 141 ide_portio2_list, &s->bus[0], "ide"); 142 portio_list_add(&s->bus[0].portio2_list, 143 pci_address_space_io(d), 0x3f6); 144 } 145 146 if (!s->bus[1].portio_list.owner) { 147 portio_list_init(&s->bus[1].portio_list, OBJECT(d), 148 ide_portio_list, &s->bus[1], "ide"); 149 portio_list_add(&s->bus[1].portio_list, 150 pci_address_space_io(d), 0x170); 151 } 152 153 if (!s->bus[1].portio2_list.owner) { 154 portio_list_init(&s->bus[1].portio2_list, OBJECT(d), 155 ide_portio2_list, &s->bus[1], "ide"); 156 portio_list_add(&s->bus[1].portio2_list, 157 pci_address_space_io(d), 0x376); 158 } 159 break; 160 161 case 0xf: 162 /* Both channels native mode */ 163 164 /* Set interrupt pin */ 165 pci_config_set_interrupt_pin(d->config, 1); 166 167 /* Remove legacy IDE ports */ 168 if (s->bus[0].portio_list.owner) { 169 portio_list_del(&s->bus[0].portio_list); 170 portio_list_destroy(&s->bus[0].portio_list); 171 } 172 173 if (s->bus[0].portio2_list.owner) { 174 portio_list_del(&s->bus[0].portio2_list); 175 portio_list_destroy(&s->bus[0].portio2_list); 176 } 177 178 if (s->bus[1].portio_list.owner) { 179 portio_list_del(&s->bus[1].portio_list); 180 portio_list_destroy(&s->bus[1].portio_list); 181 } 182 183 if (s->bus[1].portio2_list.owner) { 184 portio_list_del(&s->bus[1].portio2_list); 185 portio_list_destroy(&s->bus[1].portio2_list); 186 } 187 break; 188 } 189 } 190 191 static IDEState *bmdma_active_if(BMDMAState *bmdma) 192 { 193 assert(bmdma->bus->retry_unit != (uint8_t)-1); 194 return bmdma->bus->ifs + bmdma->bus->retry_unit; 195 } 196 197 static void bmdma_start_dma(const IDEDMA *dma, IDEState *s, 198 BlockCompletionFunc *dma_cb) 199 { 200 BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); 201 202 bm->dma_cb = dma_cb; 203 bm->cur_prd_last = 0; 204 bm->cur_prd_addr = 0; 205 bm->cur_prd_len = 0; 206 207 if (bm->status & BM_STATUS_DMAING) { 208 bm->dma_cb(bmdma_active_if(bm), 0); 209 } 210 } 211 212 /** 213 * Prepare an sglist based on available PRDs. 214 * @limit: How many bytes to prepare total. 215 * 216 * Returns the number of bytes prepared, -1 on error. 217 * IDEState.io_buffer_size will contain the number of bytes described 218 * by the PRDs, whether or not we added them to the sglist. 219 */ 220 static int32_t bmdma_prepare_buf(const IDEDMA *dma, int32_t limit) 221 { 222 BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); 223 IDEState *s = bmdma_active_if(bm); 224 PCIDevice *pci_dev = PCI_DEVICE(bm->pci_dev); 225 struct { 226 uint32_t addr; 227 uint32_t size; 228 } prd; 229 int l, len; 230 231 pci_dma_sglist_init(&s->sg, pci_dev, 232 s->nsector / (BMDMA_PAGE_SIZE / BDRV_SECTOR_SIZE) + 1); 233 s->io_buffer_size = 0; 234 for(;;) { 235 if (bm->cur_prd_len == 0) { 236 /* end of table (with a fail safe of one page) */ 237 if (bm->cur_prd_last || 238 (bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE) { 239 return s->sg.size; 240 } 241 pci_dma_read(pci_dev, bm->cur_addr, &prd, 8); 242 bm->cur_addr += 8; 243 prd.addr = le32_to_cpu(prd.addr); 244 prd.size = le32_to_cpu(prd.size); 245 len = prd.size & 0xfffe; 246 if (len == 0) 247 len = 0x10000; 248 bm->cur_prd_len = len; 249 bm->cur_prd_addr = prd.addr; 250 bm->cur_prd_last = (prd.size & 0x80000000); 251 } 252 l = bm->cur_prd_len; 253 if (l > 0) { 254 uint64_t sg_len; 255 256 /* Don't add extra bytes to the SGList; consume any remaining 257 * PRDs from the guest, but ignore them. */ 258 sg_len = MIN(limit - s->sg.size, bm->cur_prd_len); 259 if (sg_len) { 260 qemu_sglist_add(&s->sg, bm->cur_prd_addr, sg_len); 261 } 262 263 bm->cur_prd_addr += l; 264 bm->cur_prd_len -= l; 265 s->io_buffer_size += l; 266 } 267 } 268 269 qemu_sglist_destroy(&s->sg); 270 s->io_buffer_size = 0; 271 return -1; 272 } 273 274 /* return 0 if buffer completed */ 275 static int bmdma_rw_buf(const IDEDMA *dma, bool is_write) 276 { 277 BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); 278 IDEState *s = bmdma_active_if(bm); 279 PCIDevice *pci_dev = PCI_DEVICE(bm->pci_dev); 280 struct { 281 uint32_t addr; 282 uint32_t size; 283 } prd; 284 int l, len; 285 286 for(;;) { 287 l = s->io_buffer_size - s->io_buffer_index; 288 if (l <= 0) 289 break; 290 if (bm->cur_prd_len == 0) { 291 /* end of table (with a fail safe of one page) */ 292 if (bm->cur_prd_last || 293 (bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE) 294 return 0; 295 pci_dma_read(pci_dev, bm->cur_addr, &prd, 8); 296 bm->cur_addr += 8; 297 prd.addr = le32_to_cpu(prd.addr); 298 prd.size = le32_to_cpu(prd.size); 299 len = prd.size & 0xfffe; 300 if (len == 0) 301 len = 0x10000; 302 bm->cur_prd_len = len; 303 bm->cur_prd_addr = prd.addr; 304 bm->cur_prd_last = (prd.size & 0x80000000); 305 } 306 if (l > bm->cur_prd_len) 307 l = bm->cur_prd_len; 308 if (l > 0) { 309 if (is_write) { 310 pci_dma_write(pci_dev, bm->cur_prd_addr, 311 s->io_buffer + s->io_buffer_index, l); 312 } else { 313 pci_dma_read(pci_dev, bm->cur_prd_addr, 314 s->io_buffer + s->io_buffer_index, l); 315 } 316 bm->cur_prd_addr += l; 317 bm->cur_prd_len -= l; 318 s->io_buffer_index += l; 319 } 320 } 321 return 1; 322 } 323 324 static void bmdma_set_inactive(const IDEDMA *dma, bool more) 325 { 326 BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); 327 328 bm->dma_cb = NULL; 329 if (more) { 330 bm->status |= BM_STATUS_DMAING; 331 } else { 332 bm->status &= ~BM_STATUS_DMAING; 333 } 334 } 335 336 static void bmdma_restart_dma(const IDEDMA *dma) 337 { 338 BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); 339 340 bm->cur_addr = bm->addr; 341 } 342 343 static void bmdma_cancel(BMDMAState *bm) 344 { 345 if (bm->status & BM_STATUS_DMAING) { 346 /* cancel DMA request */ 347 bmdma_set_inactive(&bm->dma, false); 348 } 349 } 350 351 static void bmdma_reset(const IDEDMA *dma) 352 { 353 BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); 354 355 trace_bmdma_reset(); 356 bmdma_cancel(bm); 357 bm->cmd = 0; 358 bm->status = 0; 359 bm->addr = 0; 360 bm->cur_addr = 0; 361 bm->cur_prd_last = 0; 362 bm->cur_prd_addr = 0; 363 bm->cur_prd_len = 0; 364 } 365 366 static void bmdma_irq(void *opaque, int n, int level) 367 { 368 BMDMAState *bm = opaque; 369 370 if (!level) { 371 /* pass through lower */ 372 qemu_set_irq(bm->irq, level); 373 return; 374 } 375 376 bm->status |= BM_STATUS_INT; 377 378 /* trigger the real irq */ 379 qemu_set_irq(bm->irq, level); 380 } 381 382 void bmdma_cmd_writeb(BMDMAState *bm, uint32_t val) 383 { 384 trace_bmdma_cmd_writeb(val); 385 386 /* Ignore writes to SSBM if it keeps the old value */ 387 if ((val & BM_CMD_START) != (bm->cmd & BM_CMD_START)) { 388 if (!(val & BM_CMD_START)) { 389 ide_cancel_dma_sync(ide_bus_active_if(bm->bus)); 390 bm->status &= ~BM_STATUS_DMAING; 391 } else { 392 bm->cur_addr = bm->addr; 393 if (!(bm->status & BM_STATUS_DMAING)) { 394 bm->status |= BM_STATUS_DMAING; 395 /* start dma transfer if possible */ 396 if (bm->dma_cb) 397 bm->dma_cb(bmdma_active_if(bm), 0); 398 } 399 } 400 } 401 402 bm->cmd = val & 0x09; 403 } 404 405 void bmdma_status_writeb(BMDMAState *bm, uint32_t val) 406 { 407 bm->status = (val & 0x60) | (bm->status & BM_STATUS_DMAING) 408 | (bm->status & ~val & (BM_STATUS_ERROR | BM_STATUS_INT)); 409 } 410 411 static uint64_t bmdma_addr_read(void *opaque, hwaddr addr, 412 unsigned width) 413 { 414 BMDMAState *bm = opaque; 415 uint32_t mask = (1ULL << (width * 8)) - 1; 416 uint64_t data; 417 418 data = (bm->addr >> (addr * 8)) & mask; 419 trace_bmdma_addr_read(data); 420 return data; 421 } 422 423 static void bmdma_addr_write(void *opaque, hwaddr addr, 424 uint64_t data, unsigned width) 425 { 426 BMDMAState *bm = opaque; 427 int shift = addr * 8; 428 uint32_t mask = (1ULL << (width * 8)) - 1; 429 430 trace_bmdma_addr_write(data); 431 bm->addr &= ~(mask << shift); 432 bm->addr |= ((data & mask) << shift) & ~3; 433 } 434 435 MemoryRegionOps bmdma_addr_ioport_ops = { 436 .read = bmdma_addr_read, 437 .write = bmdma_addr_write, 438 .endianness = DEVICE_LITTLE_ENDIAN, 439 }; 440 441 static bool ide_bmdma_current_needed(void *opaque) 442 { 443 BMDMAState *bm = opaque; 444 445 return (bm->cur_prd_len != 0); 446 } 447 448 static bool ide_bmdma_status_needed(void *opaque) 449 { 450 BMDMAState *bm = opaque; 451 452 /* Older versions abused some bits in the status register for internal 453 * error state. If any of these bits are set, we must add a subsection to 454 * transfer the real status register */ 455 uint8_t abused_bits = BM_MIGRATION_COMPAT_STATUS_BITS; 456 457 return ((bm->status & abused_bits) != 0); 458 } 459 460 static int ide_bmdma_pre_save(void *opaque) 461 { 462 BMDMAState *bm = opaque; 463 uint8_t abused_bits = BM_MIGRATION_COMPAT_STATUS_BITS; 464 465 if (!(bm->status & BM_STATUS_DMAING) && bm->dma_cb) { 466 bm->bus->error_status = 467 ide_dma_cmd_to_retry(bmdma_active_if(bm)->dma_cmd); 468 } 469 bm->migration_retry_unit = bm->bus->retry_unit; 470 bm->migration_retry_sector_num = bm->bus->retry_sector_num; 471 bm->migration_retry_nsector = bm->bus->retry_nsector; 472 bm->migration_compat_status = 473 (bm->status & ~abused_bits) | (bm->bus->error_status & abused_bits); 474 475 return 0; 476 } 477 478 /* This function accesses bm->bus->error_status which is loaded only after 479 * BMDMA itself. This is why the function is called from ide_pci_post_load 480 * instead of being registered with VMState where it would run too early. */ 481 static int ide_bmdma_post_load(void *opaque, int version_id) 482 { 483 BMDMAState *bm = opaque; 484 uint8_t abused_bits = BM_MIGRATION_COMPAT_STATUS_BITS; 485 486 if (bm->status == 0) { 487 bm->status = bm->migration_compat_status & ~abused_bits; 488 bm->bus->error_status |= bm->migration_compat_status & abused_bits; 489 } 490 if (bm->bus->error_status) { 491 bm->bus->retry_sector_num = bm->migration_retry_sector_num; 492 bm->bus->retry_nsector = bm->migration_retry_nsector; 493 bm->bus->retry_unit = bm->migration_retry_unit; 494 } 495 496 return 0; 497 } 498 499 static const VMStateDescription vmstate_bmdma_current = { 500 .name = "ide bmdma_current", 501 .version_id = 1, 502 .minimum_version_id = 1, 503 .needed = ide_bmdma_current_needed, 504 .fields = (VMStateField[]) { 505 VMSTATE_UINT32(cur_addr, BMDMAState), 506 VMSTATE_UINT32(cur_prd_last, BMDMAState), 507 VMSTATE_UINT32(cur_prd_addr, BMDMAState), 508 VMSTATE_UINT32(cur_prd_len, BMDMAState), 509 VMSTATE_END_OF_LIST() 510 } 511 }; 512 513 static const VMStateDescription vmstate_bmdma_status = { 514 .name ="ide bmdma/status", 515 .version_id = 1, 516 .minimum_version_id = 1, 517 .needed = ide_bmdma_status_needed, 518 .fields = (VMStateField[]) { 519 VMSTATE_UINT8(status, BMDMAState), 520 VMSTATE_END_OF_LIST() 521 } 522 }; 523 524 static const VMStateDescription vmstate_bmdma = { 525 .name = "ide bmdma", 526 .version_id = 3, 527 .minimum_version_id = 0, 528 .pre_save = ide_bmdma_pre_save, 529 .fields = (VMStateField[]) { 530 VMSTATE_UINT8(cmd, BMDMAState), 531 VMSTATE_UINT8(migration_compat_status, BMDMAState), 532 VMSTATE_UINT32(addr, BMDMAState), 533 VMSTATE_INT64(migration_retry_sector_num, BMDMAState), 534 VMSTATE_UINT32(migration_retry_nsector, BMDMAState), 535 VMSTATE_UINT8(migration_retry_unit, BMDMAState), 536 VMSTATE_END_OF_LIST() 537 }, 538 .subsections = (const VMStateDescription*[]) { 539 &vmstate_bmdma_current, 540 &vmstate_bmdma_status, 541 NULL 542 } 543 }; 544 545 static int ide_pci_post_load(void *opaque, int version_id) 546 { 547 PCIIDEState *d = opaque; 548 int i; 549 550 for(i = 0; i < 2; i++) { 551 /* current versions always store 0/1, but older version 552 stored bigger values. We only need last bit */ 553 d->bmdma[i].migration_retry_unit &= 1; 554 ide_bmdma_post_load(&d->bmdma[i], -1); 555 } 556 557 return 0; 558 } 559 560 const VMStateDescription vmstate_ide_pci = { 561 .name = "ide", 562 .version_id = 3, 563 .minimum_version_id = 0, 564 .post_load = ide_pci_post_load, 565 .fields = (VMStateField[]) { 566 VMSTATE_PCI_DEVICE(parent_obj, PCIIDEState), 567 VMSTATE_STRUCT_ARRAY(bmdma, PCIIDEState, 2, 0, 568 vmstate_bmdma, BMDMAState), 569 VMSTATE_IDE_BUS_ARRAY(bus, PCIIDEState, 2), 570 VMSTATE_IDE_DRIVES(bus[0].ifs, PCIIDEState), 571 VMSTATE_IDE_DRIVES(bus[1].ifs, PCIIDEState), 572 VMSTATE_END_OF_LIST() 573 } 574 }; 575 576 /* hd_table must contain 4 block drivers */ 577 void pci_ide_create_devs(PCIDevice *dev) 578 { 579 PCIIDEState *d = PCI_IDE(dev); 580 DriveInfo *hd_table[2 * MAX_IDE_DEVS]; 581 static const int bus[4] = { 0, 0, 1, 1 }; 582 static const int unit[4] = { 0, 1, 0, 1 }; 583 int i; 584 585 ide_drive_get(hd_table, ARRAY_SIZE(hd_table)); 586 for (i = 0; i < 4; i++) { 587 if (hd_table[i]) { 588 ide_bus_create_drive(d->bus + bus[i], unit[i], hd_table[i]); 589 } 590 } 591 } 592 593 static const struct IDEDMAOps bmdma_ops = { 594 .start_dma = bmdma_start_dma, 595 .prepare_buf = bmdma_prepare_buf, 596 .rw_buf = bmdma_rw_buf, 597 .restart_dma = bmdma_restart_dma, 598 .set_inactive = bmdma_set_inactive, 599 .reset = bmdma_reset, 600 }; 601 602 void bmdma_init(IDEBus *bus, BMDMAState *bm, PCIIDEState *d) 603 { 604 if (bus->dma == &bm->dma) { 605 return; 606 } 607 608 bm->dma.ops = &bmdma_ops; 609 bus->dma = &bm->dma; 610 bm->irq = bus->irq; 611 bus->irq = qemu_allocate_irq(bmdma_irq, bm, 0); 612 bm->bus = bus; 613 bm->pci_dev = d; 614 } 615 616 static void pci_ide_init(Object *obj) 617 { 618 PCIIDEState *d = PCI_IDE(obj); 619 620 qdev_init_gpio_out_named(DEVICE(d), d->isa_irq, "isa-irq", 621 ARRAY_SIZE(d->isa_irq)); 622 } 623 624 static const TypeInfo pci_ide_type_info = { 625 .name = TYPE_PCI_IDE, 626 .parent = TYPE_PCI_DEVICE, 627 .instance_size = sizeof(PCIIDEState), 628 .instance_init = pci_ide_init, 629 .abstract = true, 630 .interfaces = (InterfaceInfo[]) { 631 { INTERFACE_CONVENTIONAL_PCI_DEVICE }, 632 { }, 633 }, 634 }; 635 636 static void pci_ide_register_types(void) 637 { 638 type_register_static(&pci_ide_type_info); 639 } 640 641 type_init(pci_ide_register_types) 642