1 /* 2 * QEMU AHCI Emulation 3 * 4 * Copyright (c) 2010 qiaochong@loongson.cn 5 * Copyright (c) 2010 Roland Elek <elek.roland@gmail.com> 6 * Copyright (c) 2010 Sebastian Herbszt <herbszt@gmx.de> 7 * Copyright (c) 2010 Alexander Graf <agraf@suse.de> 8 * 9 * This library is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU Lesser General Public 11 * License as published by the Free Software Foundation; either 12 * version 2 of the License, or (at your option) any later version. 13 * 14 * This library is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * Lesser General Public License for more details. 18 * 19 * You should have received a copy of the GNU Lesser General Public 20 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 21 * 22 */ 23 24 #include <hw/hw.h> 25 #include <hw/pci/msi.h> 26 #include <hw/i386/pc.h> 27 #include <hw/pci/pci.h> 28 #include <hw/sysbus.h> 29 30 #include "monitor/monitor.h" 31 #include "sysemu/block-backend.h" 32 #include "sysemu/dma.h" 33 #include "internal.h" 34 #include <hw/ide/pci.h> 35 #include <hw/ide/ahci.h> 36 37 #define DEBUG_AHCI 0 38 39 #define DPRINTF(port, fmt, ...) \ 40 do { \ 41 if (DEBUG_AHCI) { \ 42 fprintf(stderr, "ahci: %s: [%d] ", __func__, port); \ 43 fprintf(stderr, fmt, ## __VA_ARGS__); \ 44 } \ 45 } while (0) 46 47 static void check_cmd(AHCIState *s, int port); 48 static int handle_cmd(AHCIState *s,int port,int slot); 49 static void ahci_reset_port(AHCIState *s, int port); 50 static void ahci_write_fis_d2h(AHCIDevice *ad, uint8_t *cmd_fis); 51 static void ahci_init_d2h(AHCIDevice *ad); 52 static int ahci_dma_prepare_buf(IDEDMA *dma, int is_write); 53 static void ahci_commit_buf(IDEDMA *dma, uint32_t tx_bytes); 54 static bool ahci_map_clb_address(AHCIDevice *ad); 55 static bool ahci_map_fis_address(AHCIDevice *ad); 56 static void ahci_unmap_clb_address(AHCIDevice *ad); 57 static void ahci_unmap_fis_address(AHCIDevice *ad); 58 59 60 static uint32_t ahci_port_read(AHCIState *s, int port, int offset) 61 { 62 uint32_t val; 63 AHCIPortRegs *pr; 64 pr = &s->dev[port].port_regs; 65 66 switch (offset) { 67 case PORT_LST_ADDR: 68 val = pr->lst_addr; 69 break; 70 case PORT_LST_ADDR_HI: 71 val = pr->lst_addr_hi; 72 break; 73 case PORT_FIS_ADDR: 74 val = pr->fis_addr; 75 break; 76 case PORT_FIS_ADDR_HI: 77 val = pr->fis_addr_hi; 78 break; 79 case PORT_IRQ_STAT: 80 val = pr->irq_stat; 81 break; 82 case PORT_IRQ_MASK: 83 val = pr->irq_mask; 84 break; 85 case PORT_CMD: 86 val = pr->cmd; 87 break; 88 case PORT_TFDATA: 89 val = pr->tfdata; 90 break; 91 case PORT_SIG: 92 val = pr->sig; 93 break; 94 case PORT_SCR_STAT: 95 if (s->dev[port].port.ifs[0].blk) { 96 val = SATA_SCR_SSTATUS_DET_DEV_PRESENT_PHY_UP | 97 SATA_SCR_SSTATUS_SPD_GEN1 | SATA_SCR_SSTATUS_IPM_ACTIVE; 98 } else { 99 val = SATA_SCR_SSTATUS_DET_NODEV; 100 } 101 break; 102 case PORT_SCR_CTL: 103 val = pr->scr_ctl; 104 break; 105 case PORT_SCR_ERR: 106 val = pr->scr_err; 107 break; 108 case PORT_SCR_ACT: 109 pr->scr_act &= ~s->dev[port].finished; 110 s->dev[port].finished = 0; 111 val = pr->scr_act; 112 break; 113 case PORT_CMD_ISSUE: 114 val = pr->cmd_issue; 115 break; 116 case PORT_RESERVED: 117 default: 118 val = 0; 119 } 120 DPRINTF(port, "offset: 0x%x val: 0x%x\n", offset, val); 121 return val; 122 123 } 124 125 static void ahci_irq_raise(AHCIState *s, AHCIDevice *dev) 126 { 127 AHCIPCIState *d = container_of(s, AHCIPCIState, ahci); 128 PCIDevice *pci_dev = 129 (PCIDevice *)object_dynamic_cast(OBJECT(d), TYPE_PCI_DEVICE); 130 131 DPRINTF(0, "raise irq\n"); 132 133 if (pci_dev && msi_enabled(pci_dev)) { 134 msi_notify(pci_dev, 0); 135 } else { 136 qemu_irq_raise(s->irq); 137 } 138 } 139 140 static void ahci_irq_lower(AHCIState *s, AHCIDevice *dev) 141 { 142 AHCIPCIState *d = container_of(s, AHCIPCIState, ahci); 143 PCIDevice *pci_dev = 144 (PCIDevice *)object_dynamic_cast(OBJECT(d), TYPE_PCI_DEVICE); 145 146 DPRINTF(0, "lower irq\n"); 147 148 if (!pci_dev || !msi_enabled(pci_dev)) { 149 qemu_irq_lower(s->irq); 150 } 151 } 152 153 static void ahci_check_irq(AHCIState *s) 154 { 155 int i; 156 157 DPRINTF(-1, "check irq %#x\n", s->control_regs.irqstatus); 158 159 s->control_regs.irqstatus = 0; 160 for (i = 0; i < s->ports; i++) { 161 AHCIPortRegs *pr = &s->dev[i].port_regs; 162 if (pr->irq_stat & pr->irq_mask) { 163 s->control_regs.irqstatus |= (1 << i); 164 } 165 } 166 167 if (s->control_regs.irqstatus && 168 (s->control_regs.ghc & HOST_CTL_IRQ_EN)) { 169 ahci_irq_raise(s, NULL); 170 } else { 171 ahci_irq_lower(s, NULL); 172 } 173 } 174 175 static void ahci_trigger_irq(AHCIState *s, AHCIDevice *d, 176 int irq_type) 177 { 178 DPRINTF(d->port_no, "trigger irq %#x -> %x\n", 179 irq_type, d->port_regs.irq_mask & irq_type); 180 181 d->port_regs.irq_stat |= irq_type; 182 ahci_check_irq(s); 183 } 184 185 static void map_page(AddressSpace *as, uint8_t **ptr, uint64_t addr, 186 uint32_t wanted) 187 { 188 hwaddr len = wanted; 189 190 if (*ptr) { 191 dma_memory_unmap(as, *ptr, len, DMA_DIRECTION_FROM_DEVICE, len); 192 } 193 194 *ptr = dma_memory_map(as, addr, &len, DMA_DIRECTION_FROM_DEVICE); 195 if (len < wanted) { 196 dma_memory_unmap(as, *ptr, len, DMA_DIRECTION_FROM_DEVICE, len); 197 *ptr = NULL; 198 } 199 } 200 201 /** 202 * Check the cmd register to see if we should start or stop 203 * the DMA or FIS RX engines. 204 * 205 * @ad: Device to engage. 206 * @allow_stop: Allow device to transition from started to stopped? 207 * 'no' is useful for migration post_load, which does not expect a transition. 208 * 209 * @return 0 on success, -1 on error. 210 */ 211 static int ahci_cond_start_engines(AHCIDevice *ad, bool allow_stop) 212 { 213 AHCIPortRegs *pr = &ad->port_regs; 214 215 if (pr->cmd & PORT_CMD_START) { 216 if (ahci_map_clb_address(ad)) { 217 pr->cmd |= PORT_CMD_LIST_ON; 218 } else { 219 error_report("AHCI: Failed to start DMA engine: " 220 "bad command list buffer address"); 221 return -1; 222 } 223 } else if (pr->cmd & PORT_CMD_LIST_ON) { 224 if (allow_stop) { 225 ahci_unmap_clb_address(ad); 226 pr->cmd = pr->cmd & ~(PORT_CMD_LIST_ON); 227 } else { 228 error_report("AHCI: DMA engine should be off, " 229 "but appears to still be running"); 230 return -1; 231 } 232 } 233 234 if (pr->cmd & PORT_CMD_FIS_RX) { 235 if (ahci_map_fis_address(ad)) { 236 pr->cmd |= PORT_CMD_FIS_ON; 237 } else { 238 error_report("AHCI: Failed to start FIS receive engine: " 239 "bad FIS receive buffer address"); 240 return -1; 241 } 242 } else if (pr->cmd & PORT_CMD_FIS_ON) { 243 if (allow_stop) { 244 ahci_unmap_fis_address(ad); 245 pr->cmd = pr->cmd & ~(PORT_CMD_FIS_ON); 246 } else { 247 error_report("AHCI: FIS receive engine should be off, " 248 "but appears to still be running"); 249 return -1; 250 } 251 } 252 253 return 0; 254 } 255 256 static void ahci_port_write(AHCIState *s, int port, int offset, uint32_t val) 257 { 258 AHCIPortRegs *pr = &s->dev[port].port_regs; 259 260 DPRINTF(port, "offset: 0x%x val: 0x%x\n", offset, val); 261 switch (offset) { 262 case PORT_LST_ADDR: 263 pr->lst_addr = val; 264 break; 265 case PORT_LST_ADDR_HI: 266 pr->lst_addr_hi = val; 267 break; 268 case PORT_FIS_ADDR: 269 pr->fis_addr = val; 270 break; 271 case PORT_FIS_ADDR_HI: 272 pr->fis_addr_hi = val; 273 break; 274 case PORT_IRQ_STAT: 275 pr->irq_stat &= ~val; 276 ahci_check_irq(s); 277 break; 278 case PORT_IRQ_MASK: 279 pr->irq_mask = val & 0xfdc000ff; 280 ahci_check_irq(s); 281 break; 282 case PORT_CMD: 283 /* Block any Read-only fields from being set; 284 * including LIST_ON and FIS_ON. */ 285 pr->cmd = (pr->cmd & PORT_CMD_RO_MASK) | (val & ~PORT_CMD_RO_MASK); 286 287 /* Check FIS RX and CLB engines, allow transition to false: */ 288 ahci_cond_start_engines(&s->dev[port], true); 289 290 /* XXX usually the FIS would be pending on the bus here and 291 issuing deferred until the OS enables FIS receival. 292 Instead, we only submit it once - which works in most 293 cases, but is a hack. */ 294 if ((pr->cmd & PORT_CMD_FIS_ON) && 295 !s->dev[port].init_d2h_sent) { 296 ahci_init_d2h(&s->dev[port]); 297 s->dev[port].init_d2h_sent = true; 298 } 299 300 check_cmd(s, port); 301 break; 302 case PORT_TFDATA: 303 /* Read Only. */ 304 break; 305 case PORT_SIG: 306 /* Read Only */ 307 break; 308 case PORT_SCR_STAT: 309 /* Read Only */ 310 break; 311 case PORT_SCR_CTL: 312 if (((pr->scr_ctl & AHCI_SCR_SCTL_DET) == 1) && 313 ((val & AHCI_SCR_SCTL_DET) == 0)) { 314 ahci_reset_port(s, port); 315 } 316 pr->scr_ctl = val; 317 break; 318 case PORT_SCR_ERR: 319 pr->scr_err &= ~val; 320 break; 321 case PORT_SCR_ACT: 322 /* RW1 */ 323 pr->scr_act |= val; 324 break; 325 case PORT_CMD_ISSUE: 326 pr->cmd_issue |= val; 327 check_cmd(s, port); 328 break; 329 default: 330 break; 331 } 332 } 333 334 static uint64_t ahci_mem_read(void *opaque, hwaddr addr, 335 unsigned size) 336 { 337 AHCIState *s = opaque; 338 uint32_t val = 0; 339 340 if (addr < AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR) { 341 switch (addr) { 342 case HOST_CAP: 343 val = s->control_regs.cap; 344 break; 345 case HOST_CTL: 346 val = s->control_regs.ghc; 347 break; 348 case HOST_IRQ_STAT: 349 val = s->control_regs.irqstatus; 350 break; 351 case HOST_PORTS_IMPL: 352 val = s->control_regs.impl; 353 break; 354 case HOST_VERSION: 355 val = s->control_regs.version; 356 break; 357 } 358 359 DPRINTF(-1, "(addr 0x%08X), val 0x%08X\n", (unsigned) addr, val); 360 } else if ((addr >= AHCI_PORT_REGS_START_ADDR) && 361 (addr < (AHCI_PORT_REGS_START_ADDR + 362 (s->ports * AHCI_PORT_ADDR_OFFSET_LEN)))) { 363 val = ahci_port_read(s, (addr - AHCI_PORT_REGS_START_ADDR) >> 7, 364 addr & AHCI_PORT_ADDR_OFFSET_MASK); 365 } 366 367 return val; 368 } 369 370 371 372 static void ahci_mem_write(void *opaque, hwaddr addr, 373 uint64_t val, unsigned size) 374 { 375 AHCIState *s = opaque; 376 377 /* Only aligned reads are allowed on AHCI */ 378 if (addr & 3) { 379 fprintf(stderr, "ahci: Mis-aligned write to addr 0x" 380 TARGET_FMT_plx "\n", addr); 381 return; 382 } 383 384 if (addr < AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR) { 385 DPRINTF(-1, "(addr 0x%08X), val 0x%08"PRIX64"\n", (unsigned) addr, val); 386 387 switch (addr) { 388 case HOST_CAP: /* R/WO, RO */ 389 /* FIXME handle R/WO */ 390 break; 391 case HOST_CTL: /* R/W */ 392 if (val & HOST_CTL_RESET) { 393 DPRINTF(-1, "HBA Reset\n"); 394 ahci_reset(s); 395 } else { 396 s->control_regs.ghc = (val & 0x3) | HOST_CTL_AHCI_EN; 397 ahci_check_irq(s); 398 } 399 break; 400 case HOST_IRQ_STAT: /* R/WC, RO */ 401 s->control_regs.irqstatus &= ~val; 402 ahci_check_irq(s); 403 break; 404 case HOST_PORTS_IMPL: /* R/WO, RO */ 405 /* FIXME handle R/WO */ 406 break; 407 case HOST_VERSION: /* RO */ 408 /* FIXME report write? */ 409 break; 410 default: 411 DPRINTF(-1, "write to unknown register 0x%x\n", (unsigned)addr); 412 } 413 } else if ((addr >= AHCI_PORT_REGS_START_ADDR) && 414 (addr < (AHCI_PORT_REGS_START_ADDR + 415 (s->ports * AHCI_PORT_ADDR_OFFSET_LEN)))) { 416 ahci_port_write(s, (addr - AHCI_PORT_REGS_START_ADDR) >> 7, 417 addr & AHCI_PORT_ADDR_OFFSET_MASK, val); 418 } 419 420 } 421 422 static const MemoryRegionOps ahci_mem_ops = { 423 .read = ahci_mem_read, 424 .write = ahci_mem_write, 425 .endianness = DEVICE_LITTLE_ENDIAN, 426 }; 427 428 static uint64_t ahci_idp_read(void *opaque, hwaddr addr, 429 unsigned size) 430 { 431 AHCIState *s = opaque; 432 433 if (addr == s->idp_offset) { 434 /* index register */ 435 return s->idp_index; 436 } else if (addr == s->idp_offset + 4) { 437 /* data register - do memory read at location selected by index */ 438 return ahci_mem_read(opaque, s->idp_index, size); 439 } else { 440 return 0; 441 } 442 } 443 444 static void ahci_idp_write(void *opaque, hwaddr addr, 445 uint64_t val, unsigned size) 446 { 447 AHCIState *s = opaque; 448 449 if (addr == s->idp_offset) { 450 /* index register - mask off reserved bits */ 451 s->idp_index = (uint32_t)val & ((AHCI_MEM_BAR_SIZE - 1) & ~3); 452 } else if (addr == s->idp_offset + 4) { 453 /* data register - do memory write at location selected by index */ 454 ahci_mem_write(opaque, s->idp_index, val, size); 455 } 456 } 457 458 static const MemoryRegionOps ahci_idp_ops = { 459 .read = ahci_idp_read, 460 .write = ahci_idp_write, 461 .endianness = DEVICE_LITTLE_ENDIAN, 462 }; 463 464 465 static void ahci_reg_init(AHCIState *s) 466 { 467 int i; 468 469 s->control_regs.cap = (s->ports - 1) | 470 (AHCI_NUM_COMMAND_SLOTS << 8) | 471 (AHCI_SUPPORTED_SPEED_GEN1 << AHCI_SUPPORTED_SPEED) | 472 HOST_CAP_NCQ | HOST_CAP_AHCI; 473 474 s->control_regs.impl = (1 << s->ports) - 1; 475 476 s->control_regs.version = AHCI_VERSION_1_0; 477 478 for (i = 0; i < s->ports; i++) { 479 s->dev[i].port_state = STATE_RUN; 480 } 481 } 482 483 static void check_cmd(AHCIState *s, int port) 484 { 485 AHCIPortRegs *pr = &s->dev[port].port_regs; 486 int slot; 487 488 if ((pr->cmd & PORT_CMD_START) && pr->cmd_issue) { 489 for (slot = 0; (slot < 32) && pr->cmd_issue; slot++) { 490 if ((pr->cmd_issue & (1U << slot)) && 491 !handle_cmd(s, port, slot)) { 492 pr->cmd_issue &= ~(1U << slot); 493 } 494 } 495 } 496 } 497 498 static void ahci_check_cmd_bh(void *opaque) 499 { 500 AHCIDevice *ad = opaque; 501 502 qemu_bh_delete(ad->check_bh); 503 ad->check_bh = NULL; 504 505 if ((ad->busy_slot != -1) && 506 !(ad->port.ifs[0].status & (BUSY_STAT|DRQ_STAT))) { 507 /* no longer busy */ 508 ad->port_regs.cmd_issue &= ~(1 << ad->busy_slot); 509 ad->busy_slot = -1; 510 } 511 512 check_cmd(ad->hba, ad->port_no); 513 } 514 515 static void ahci_init_d2h(AHCIDevice *ad) 516 { 517 uint8_t init_fis[20]; 518 IDEState *ide_state = &ad->port.ifs[0]; 519 520 memset(init_fis, 0, sizeof(init_fis)); 521 522 init_fis[4] = 1; 523 init_fis[12] = 1; 524 525 if (ide_state->drive_kind == IDE_CD) { 526 init_fis[5] = ide_state->lcyl; 527 init_fis[6] = ide_state->hcyl; 528 } 529 530 ahci_write_fis_d2h(ad, init_fis); 531 } 532 533 static void ahci_reset_port(AHCIState *s, int port) 534 { 535 AHCIDevice *d = &s->dev[port]; 536 AHCIPortRegs *pr = &d->port_regs; 537 IDEState *ide_state = &d->port.ifs[0]; 538 int i; 539 540 DPRINTF(port, "reset port\n"); 541 542 ide_bus_reset(&d->port); 543 ide_state->ncq_queues = AHCI_MAX_CMDS; 544 545 pr->scr_stat = 0; 546 pr->scr_err = 0; 547 pr->scr_act = 0; 548 pr->tfdata = 0x7F; 549 pr->sig = 0xFFFFFFFF; 550 d->busy_slot = -1; 551 d->init_d2h_sent = false; 552 553 ide_state = &s->dev[port].port.ifs[0]; 554 if (!ide_state->blk) { 555 return; 556 } 557 558 /* reset ncq queue */ 559 for (i = 0; i < AHCI_MAX_CMDS; i++) { 560 NCQTransferState *ncq_tfs = &s->dev[port].ncq_tfs[i]; 561 if (!ncq_tfs->used) { 562 continue; 563 } 564 565 if (ncq_tfs->aiocb) { 566 blk_aio_cancel(ncq_tfs->aiocb); 567 ncq_tfs->aiocb = NULL; 568 } 569 570 /* Maybe we just finished the request thanks to blk_aio_cancel() */ 571 if (!ncq_tfs->used) { 572 continue; 573 } 574 575 qemu_sglist_destroy(&ncq_tfs->sglist); 576 ncq_tfs->used = 0; 577 } 578 579 s->dev[port].port_state = STATE_RUN; 580 if (!ide_state->blk) { 581 pr->sig = 0; 582 ide_state->status = SEEK_STAT | WRERR_STAT; 583 } else if (ide_state->drive_kind == IDE_CD) { 584 pr->sig = SATA_SIGNATURE_CDROM; 585 ide_state->lcyl = 0x14; 586 ide_state->hcyl = 0xeb; 587 DPRINTF(port, "set lcyl = %d\n", ide_state->lcyl); 588 ide_state->status = SEEK_STAT | WRERR_STAT | READY_STAT; 589 } else { 590 pr->sig = SATA_SIGNATURE_DISK; 591 ide_state->status = SEEK_STAT | WRERR_STAT; 592 } 593 594 ide_state->error = 1; 595 ahci_init_d2h(d); 596 } 597 598 static void debug_print_fis(uint8_t *fis, int cmd_len) 599 { 600 #if DEBUG_AHCI 601 int i; 602 603 fprintf(stderr, "fis:"); 604 for (i = 0; i < cmd_len; i++) { 605 if ((i & 0xf) == 0) { 606 fprintf(stderr, "\n%02x:",i); 607 } 608 fprintf(stderr, "%02x ",fis[i]); 609 } 610 fprintf(stderr, "\n"); 611 #endif 612 } 613 614 static bool ahci_map_fis_address(AHCIDevice *ad) 615 { 616 AHCIPortRegs *pr = &ad->port_regs; 617 map_page(ad->hba->as, &ad->res_fis, 618 ((uint64_t)pr->fis_addr_hi << 32) | pr->fis_addr, 256); 619 return ad->res_fis != NULL; 620 } 621 622 static void ahci_unmap_fis_address(AHCIDevice *ad) 623 { 624 dma_memory_unmap(ad->hba->as, ad->res_fis, 256, 625 DMA_DIRECTION_FROM_DEVICE, 256); 626 ad->res_fis = NULL; 627 } 628 629 static bool ahci_map_clb_address(AHCIDevice *ad) 630 { 631 AHCIPortRegs *pr = &ad->port_regs; 632 ad->cur_cmd = NULL; 633 map_page(ad->hba->as, &ad->lst, 634 ((uint64_t)pr->lst_addr_hi << 32) | pr->lst_addr, 1024); 635 return ad->lst != NULL; 636 } 637 638 static void ahci_unmap_clb_address(AHCIDevice *ad) 639 { 640 dma_memory_unmap(ad->hba->as, ad->lst, 1024, 641 DMA_DIRECTION_FROM_DEVICE, 1024); 642 ad->lst = NULL; 643 } 644 645 static void ahci_write_fis_sdb(AHCIState *s, int port, uint32_t finished) 646 { 647 AHCIDevice *ad = &s->dev[port]; 648 AHCIPortRegs *pr = &ad->port_regs; 649 IDEState *ide_state; 650 SDBFIS *sdb_fis; 651 652 if (!s->dev[port].res_fis || 653 !(pr->cmd & PORT_CMD_FIS_RX)) { 654 return; 655 } 656 657 sdb_fis = (SDBFIS *)&ad->res_fis[RES_FIS_SDBFIS]; 658 ide_state = &ad->port.ifs[0]; 659 660 sdb_fis->type = SATA_FIS_TYPE_SDB; 661 /* Interrupt pending & Notification bit */ 662 sdb_fis->flags = (ad->hba->control_regs.irqstatus ? (1 << 6) : 0); 663 sdb_fis->status = ide_state->status & 0x77; 664 sdb_fis->error = ide_state->error; 665 /* update SAct field in SDB_FIS */ 666 s->dev[port].finished |= finished; 667 sdb_fis->payload = cpu_to_le32(ad->finished); 668 669 /* Update shadow registers (except BSY 0x80 and DRQ 0x08) */ 670 pr->tfdata = (ad->port.ifs[0].error << 8) | 671 (ad->port.ifs[0].status & 0x77) | 672 (pr->tfdata & 0x88); 673 674 ahci_trigger_irq(s, ad, PORT_IRQ_SDB_FIS); 675 } 676 677 static void ahci_write_fis_pio(AHCIDevice *ad, uint16_t len) 678 { 679 AHCIPortRegs *pr = &ad->port_regs; 680 uint8_t *pio_fis, *cmd_fis; 681 uint64_t tbl_addr; 682 dma_addr_t cmd_len = 0x80; 683 IDEState *s = &ad->port.ifs[0]; 684 685 if (!ad->res_fis || !(pr->cmd & PORT_CMD_FIS_RX)) { 686 return; 687 } 688 689 /* map cmd_fis */ 690 tbl_addr = le64_to_cpu(ad->cur_cmd->tbl_addr); 691 cmd_fis = dma_memory_map(ad->hba->as, tbl_addr, &cmd_len, 692 DMA_DIRECTION_TO_DEVICE); 693 694 if (cmd_fis == NULL) { 695 DPRINTF(ad->port_no, "dma_memory_map failed in ahci_write_fis_pio"); 696 ahci_trigger_irq(ad->hba, ad, PORT_IRQ_HBUS_ERR); 697 return; 698 } 699 700 if (cmd_len != 0x80) { 701 DPRINTF(ad->port_no, 702 "dma_memory_map mapped too few bytes in ahci_write_fis_pio"); 703 dma_memory_unmap(ad->hba->as, cmd_fis, cmd_len, 704 DMA_DIRECTION_TO_DEVICE, cmd_len); 705 ahci_trigger_irq(ad->hba, ad, PORT_IRQ_HBUS_ERR); 706 return; 707 } 708 709 pio_fis = &ad->res_fis[RES_FIS_PSFIS]; 710 711 pio_fis[0] = SATA_FIS_TYPE_PIO_SETUP; 712 pio_fis[1] = (ad->hba->control_regs.irqstatus ? (1 << 6) : 0); 713 pio_fis[2] = s->status; 714 pio_fis[3] = s->error; 715 716 pio_fis[4] = s->sector; 717 pio_fis[5] = s->lcyl; 718 pio_fis[6] = s->hcyl; 719 pio_fis[7] = s->select; 720 pio_fis[8] = s->hob_sector; 721 pio_fis[9] = s->hob_lcyl; 722 pio_fis[10] = s->hob_hcyl; 723 pio_fis[11] = 0; 724 pio_fis[12] = cmd_fis[12]; 725 pio_fis[13] = cmd_fis[13]; 726 pio_fis[14] = 0; 727 pio_fis[15] = s->status; 728 pio_fis[16] = len & 255; 729 pio_fis[17] = len >> 8; 730 pio_fis[18] = 0; 731 pio_fis[19] = 0; 732 733 /* Update shadow registers: */ 734 pr->tfdata = (ad->port.ifs[0].error << 8) | 735 ad->port.ifs[0].status; 736 737 if (pio_fis[2] & ERR_STAT) { 738 ahci_trigger_irq(ad->hba, ad, PORT_IRQ_TF_ERR); 739 } 740 741 ahci_trigger_irq(ad->hba, ad, PORT_IRQ_PIOS_FIS); 742 743 dma_memory_unmap(ad->hba->as, cmd_fis, cmd_len, 744 DMA_DIRECTION_TO_DEVICE, cmd_len); 745 } 746 747 static void ahci_write_fis_d2h(AHCIDevice *ad, uint8_t *cmd_fis) 748 { 749 AHCIPortRegs *pr = &ad->port_regs; 750 uint8_t *d2h_fis; 751 int i; 752 dma_addr_t cmd_len = 0x80; 753 int cmd_mapped = 0; 754 IDEState *s = &ad->port.ifs[0]; 755 756 if (!ad->res_fis || !(pr->cmd & PORT_CMD_FIS_RX)) { 757 return; 758 } 759 760 if (!cmd_fis) { 761 /* map cmd_fis */ 762 uint64_t tbl_addr = le64_to_cpu(ad->cur_cmd->tbl_addr); 763 cmd_fis = dma_memory_map(ad->hba->as, tbl_addr, &cmd_len, 764 DMA_DIRECTION_TO_DEVICE); 765 cmd_mapped = 1; 766 } 767 768 d2h_fis = &ad->res_fis[RES_FIS_RFIS]; 769 770 d2h_fis[0] = SATA_FIS_TYPE_REGISTER_D2H; 771 d2h_fis[1] = (ad->hba->control_regs.irqstatus ? (1 << 6) : 0); 772 d2h_fis[2] = s->status; 773 d2h_fis[3] = s->error; 774 775 d2h_fis[4] = s->sector; 776 d2h_fis[5] = s->lcyl; 777 d2h_fis[6] = s->hcyl; 778 d2h_fis[7] = s->select; 779 d2h_fis[8] = s->hob_sector; 780 d2h_fis[9] = s->hob_lcyl; 781 d2h_fis[10] = s->hob_hcyl; 782 d2h_fis[11] = 0; 783 d2h_fis[12] = cmd_fis[12]; 784 d2h_fis[13] = cmd_fis[13]; 785 for (i = 14; i < 20; i++) { 786 d2h_fis[i] = 0; 787 } 788 789 /* Update shadow registers: */ 790 pr->tfdata = (ad->port.ifs[0].error << 8) | 791 ad->port.ifs[0].status; 792 793 if (d2h_fis[2] & ERR_STAT) { 794 ahci_trigger_irq(ad->hba, ad, PORT_IRQ_TF_ERR); 795 } 796 797 ahci_trigger_irq(ad->hba, ad, PORT_IRQ_D2H_REG_FIS); 798 799 if (cmd_mapped) { 800 dma_memory_unmap(ad->hba->as, cmd_fis, cmd_len, 801 DMA_DIRECTION_TO_DEVICE, cmd_len); 802 } 803 } 804 805 static int prdt_tbl_entry_size(const AHCI_SG *tbl) 806 { 807 return (le32_to_cpu(tbl->flags_size) & AHCI_PRDT_SIZE_MASK) + 1; 808 } 809 810 static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList *sglist, 811 int32_t offset) 812 { 813 AHCICmdHdr *cmd = ad->cur_cmd; 814 uint32_t opts = le32_to_cpu(cmd->opts); 815 uint64_t prdt_addr = le64_to_cpu(cmd->tbl_addr) + 0x80; 816 int sglist_alloc_hint = opts >> AHCI_CMD_HDR_PRDT_LEN; 817 dma_addr_t prdt_len = (sglist_alloc_hint * sizeof(AHCI_SG)); 818 dma_addr_t real_prdt_len = prdt_len; 819 uint8_t *prdt; 820 int i; 821 int r = 0; 822 uint64_t sum = 0; 823 int off_idx = -1; 824 int64_t off_pos = -1; 825 int tbl_entry_size; 826 IDEBus *bus = &ad->port; 827 BusState *qbus = BUS(bus); 828 829 /* 830 * Note: AHCI PRDT can describe up to 256GiB. SATA/ATA only support 831 * transactions of up to 32MiB as of ATA8-ACS3 rev 1b, assuming a 832 * 512 byte sector size. We limit the PRDT in this implementation to 833 * a reasonably large 2GiB, which can accommodate the maximum transfer 834 * request for sector sizes up to 32K. 835 */ 836 837 if (!sglist_alloc_hint) { 838 DPRINTF(ad->port_no, "no sg list given by guest: 0x%08x\n", opts); 839 return -1; 840 } 841 842 /* map PRDT */ 843 if (!(prdt = dma_memory_map(ad->hba->as, prdt_addr, &prdt_len, 844 DMA_DIRECTION_TO_DEVICE))){ 845 DPRINTF(ad->port_no, "map failed\n"); 846 return -1; 847 } 848 849 if (prdt_len < real_prdt_len) { 850 DPRINTF(ad->port_no, "mapped less than expected\n"); 851 r = -1; 852 goto out; 853 } 854 855 /* Get entries in the PRDT, init a qemu sglist accordingly */ 856 if (sglist_alloc_hint > 0) { 857 AHCI_SG *tbl = (AHCI_SG *)prdt; 858 sum = 0; 859 for (i = 0; i < sglist_alloc_hint; i++) { 860 /* flags_size is zero-based */ 861 tbl_entry_size = prdt_tbl_entry_size(&tbl[i]); 862 if (offset <= (sum + tbl_entry_size)) { 863 off_idx = i; 864 off_pos = offset - sum; 865 break; 866 } 867 sum += tbl_entry_size; 868 } 869 if ((off_idx == -1) || (off_pos < 0) || (off_pos > tbl_entry_size)) { 870 DPRINTF(ad->port_no, "%s: Incorrect offset! " 871 "off_idx: %d, off_pos: %"PRId64"\n", 872 __func__, off_idx, off_pos); 873 r = -1; 874 goto out; 875 } 876 877 qemu_sglist_init(sglist, qbus->parent, (sglist_alloc_hint - off_idx), 878 ad->hba->as); 879 qemu_sglist_add(sglist, le64_to_cpu(tbl[off_idx].addr) + off_pos, 880 prdt_tbl_entry_size(&tbl[off_idx]) - off_pos); 881 882 for (i = off_idx + 1; i < sglist_alloc_hint; i++) { 883 /* flags_size is zero-based */ 884 qemu_sglist_add(sglist, le64_to_cpu(tbl[i].addr), 885 prdt_tbl_entry_size(&tbl[i])); 886 if (sglist->size > INT32_MAX) { 887 error_report("AHCI Physical Region Descriptor Table describes " 888 "more than 2 GiB.\n"); 889 qemu_sglist_destroy(sglist); 890 r = -1; 891 goto out; 892 } 893 } 894 } 895 896 out: 897 dma_memory_unmap(ad->hba->as, prdt, prdt_len, 898 DMA_DIRECTION_TO_DEVICE, prdt_len); 899 return r; 900 } 901 902 static void ncq_cb(void *opaque, int ret) 903 { 904 NCQTransferState *ncq_tfs = (NCQTransferState *)opaque; 905 IDEState *ide_state = &ncq_tfs->drive->port.ifs[0]; 906 907 if (ret == -ECANCELED) { 908 return; 909 } 910 /* Clear bit for this tag in SActive */ 911 ncq_tfs->drive->port_regs.scr_act &= ~(1 << ncq_tfs->tag); 912 913 if (ret < 0) { 914 /* error */ 915 ide_state->error = ABRT_ERR; 916 ide_state->status = READY_STAT | ERR_STAT; 917 ncq_tfs->drive->port_regs.scr_err |= (1 << ncq_tfs->tag); 918 } else { 919 ide_state->status = READY_STAT | SEEK_STAT; 920 } 921 922 ahci_write_fis_sdb(ncq_tfs->drive->hba, ncq_tfs->drive->port_no, 923 (1 << ncq_tfs->tag)); 924 925 DPRINTF(ncq_tfs->drive->port_no, "NCQ transfer tag %d finished\n", 926 ncq_tfs->tag); 927 928 block_acct_done(blk_get_stats(ncq_tfs->drive->port.ifs[0].blk), 929 &ncq_tfs->acct); 930 qemu_sglist_destroy(&ncq_tfs->sglist); 931 ncq_tfs->used = 0; 932 } 933 934 static int is_ncq(uint8_t ata_cmd) 935 { 936 /* Based on SATA 3.2 section 13.6.3.2 */ 937 switch (ata_cmd) { 938 case READ_FPDMA_QUEUED: 939 case WRITE_FPDMA_QUEUED: 940 case NCQ_NON_DATA: 941 case RECEIVE_FPDMA_QUEUED: 942 case SEND_FPDMA_QUEUED: 943 return 1; 944 default: 945 return 0; 946 } 947 } 948 949 static void process_ncq_command(AHCIState *s, int port, uint8_t *cmd_fis, 950 int slot) 951 { 952 NCQFrame *ncq_fis = (NCQFrame*)cmd_fis; 953 uint8_t tag = ncq_fis->tag >> 3; 954 NCQTransferState *ncq_tfs = &s->dev[port].ncq_tfs[tag]; 955 956 if (ncq_tfs->used) { 957 /* error - already in use */ 958 fprintf(stderr, "%s: tag %d already used\n", __FUNCTION__, tag); 959 return; 960 } 961 962 ncq_tfs->used = 1; 963 ncq_tfs->drive = &s->dev[port]; 964 ncq_tfs->slot = slot; 965 ncq_tfs->lba = ((uint64_t)ncq_fis->lba5 << 40) | 966 ((uint64_t)ncq_fis->lba4 << 32) | 967 ((uint64_t)ncq_fis->lba3 << 24) | 968 ((uint64_t)ncq_fis->lba2 << 16) | 969 ((uint64_t)ncq_fis->lba1 << 8) | 970 (uint64_t)ncq_fis->lba0; 971 972 /* Note: We calculate the sector count, but don't currently rely on it. 973 * The total size of the DMA buffer tells us the transfer size instead. */ 974 ncq_tfs->sector_count = ((uint16_t)ncq_fis->sector_count_high << 8) | 975 ncq_fis->sector_count_low; 976 977 DPRINTF(port, "NCQ transfer LBA from %"PRId64" to %"PRId64", " 978 "drive max %"PRId64"\n", 979 ncq_tfs->lba, ncq_tfs->lba + ncq_tfs->sector_count - 2, 980 s->dev[port].port.ifs[0].nb_sectors - 1); 981 982 ahci_populate_sglist(&s->dev[port], &ncq_tfs->sglist, 0); 983 ncq_tfs->tag = tag; 984 985 switch(ncq_fis->command) { 986 case READ_FPDMA_QUEUED: 987 DPRINTF(port, "NCQ reading %d sectors from LBA %"PRId64", " 988 "tag %d\n", 989 ncq_tfs->sector_count-1, ncq_tfs->lba, ncq_tfs->tag); 990 991 DPRINTF(port, "tag %d aio read %"PRId64"\n", 992 ncq_tfs->tag, ncq_tfs->lba); 993 994 dma_acct_start(ncq_tfs->drive->port.ifs[0].blk, &ncq_tfs->acct, 995 &ncq_tfs->sglist, BLOCK_ACCT_READ); 996 ncq_tfs->aiocb = dma_blk_read(ncq_tfs->drive->port.ifs[0].blk, 997 &ncq_tfs->sglist, ncq_tfs->lba, 998 ncq_cb, ncq_tfs); 999 break; 1000 case WRITE_FPDMA_QUEUED: 1001 DPRINTF(port, "NCQ writing %d sectors to LBA %"PRId64", tag %d\n", 1002 ncq_tfs->sector_count-1, ncq_tfs->lba, ncq_tfs->tag); 1003 1004 DPRINTF(port, "tag %d aio write %"PRId64"\n", 1005 ncq_tfs->tag, ncq_tfs->lba); 1006 1007 dma_acct_start(ncq_tfs->drive->port.ifs[0].blk, &ncq_tfs->acct, 1008 &ncq_tfs->sglist, BLOCK_ACCT_WRITE); 1009 ncq_tfs->aiocb = dma_blk_write(ncq_tfs->drive->port.ifs[0].blk, 1010 &ncq_tfs->sglist, ncq_tfs->lba, 1011 ncq_cb, ncq_tfs); 1012 break; 1013 default: 1014 if (is_ncq(cmd_fis[2])) { 1015 DPRINTF(port, 1016 "error: unsupported NCQ command (0x%02x) received\n", 1017 cmd_fis[2]); 1018 } else { 1019 DPRINTF(port, 1020 "error: tried to process non-NCQ command as NCQ\n"); 1021 } 1022 qemu_sglist_destroy(&ncq_tfs->sglist); 1023 } 1024 } 1025 1026 static void handle_reg_h2d_fis(AHCIState *s, int port, 1027 int slot, uint8_t *cmd_fis) 1028 { 1029 IDEState *ide_state = &s->dev[port].port.ifs[0]; 1030 AHCICmdHdr *cmd = s->dev[port].cur_cmd; 1031 uint32_t opts = le32_to_cpu(cmd->opts); 1032 1033 if (cmd_fis[1] & 0x0F) { 1034 DPRINTF(port, "Port Multiplier not supported." 1035 " cmd_fis[0]=%02x cmd_fis[1]=%02x cmd_fis[2]=%02x\n", 1036 cmd_fis[0], cmd_fis[1], cmd_fis[2]); 1037 return; 1038 } 1039 1040 if (cmd_fis[1] & 0x70) { 1041 DPRINTF(port, "Reserved flags set in H2D Register FIS." 1042 " cmd_fis[0]=%02x cmd_fis[1]=%02x cmd_fis[2]=%02x\n", 1043 cmd_fis[0], cmd_fis[1], cmd_fis[2]); 1044 return; 1045 } 1046 1047 if (!(cmd_fis[1] & SATA_FIS_REG_H2D_UPDATE_COMMAND_REGISTER)) { 1048 switch (s->dev[port].port_state) { 1049 case STATE_RUN: 1050 if (cmd_fis[15] & ATA_SRST) { 1051 s->dev[port].port_state = STATE_RESET; 1052 } 1053 break; 1054 case STATE_RESET: 1055 if (!(cmd_fis[15] & ATA_SRST)) { 1056 ahci_reset_port(s, port); 1057 } 1058 break; 1059 } 1060 return; 1061 } 1062 1063 /* Check for NCQ command */ 1064 if (is_ncq(cmd_fis[2])) { 1065 process_ncq_command(s, port, cmd_fis, slot); 1066 return; 1067 } 1068 1069 /* Decompose the FIS: 1070 * AHCI does not interpret FIS packets, it only forwards them. 1071 * SATA 1.0 describes how to decode LBA28 and CHS FIS packets. 1072 * Later specifications, e.g, SATA 3.2, describe LBA48 FIS packets. 1073 * 1074 * ATA4 describes sector number for LBA28/CHS commands. 1075 * ATA6 describes sector number for LBA48 commands. 1076 * ATA8 deprecates CHS fully, describing only LBA28/48. 1077 * 1078 * We dutifully convert the FIS into IDE registers, and allow the 1079 * core layer to interpret them as needed. */ 1080 ide_state->feature = cmd_fis[3]; 1081 ide_state->sector = cmd_fis[4]; /* LBA 7:0 */ 1082 ide_state->lcyl = cmd_fis[5]; /* LBA 15:8 */ 1083 ide_state->hcyl = cmd_fis[6]; /* LBA 23:16 */ 1084 ide_state->select = cmd_fis[7]; /* LBA 27:24 (LBA28) */ 1085 ide_state->hob_sector = cmd_fis[8]; /* LBA 31:24 */ 1086 ide_state->hob_lcyl = cmd_fis[9]; /* LBA 39:32 */ 1087 ide_state->hob_hcyl = cmd_fis[10]; /* LBA 47:40 */ 1088 ide_state->hob_feature = cmd_fis[11]; 1089 ide_state->nsector = (int64_t)((cmd_fis[13] << 8) | cmd_fis[12]); 1090 /* 14, 16, 17, 18, 19: Reserved (SATA 1.0) */ 1091 /* 15: Only valid when UPDATE_COMMAND not set. */ 1092 1093 /* Copy the ACMD field (ATAPI packet, if any) from the AHCI command 1094 * table to ide_state->io_buffer */ 1095 if (opts & AHCI_CMD_ATAPI) { 1096 memcpy(ide_state->io_buffer, &cmd_fis[AHCI_COMMAND_TABLE_ACMD], 0x10); 1097 debug_print_fis(ide_state->io_buffer, 0x10); 1098 s->dev[port].done_atapi_packet = false; 1099 /* XXX send PIO setup FIS */ 1100 } 1101 1102 ide_state->error = 0; 1103 1104 /* Reset transferred byte counter */ 1105 cmd->status = 0; 1106 1107 /* We're ready to process the command in FIS byte 2. */ 1108 ide_exec_cmd(&s->dev[port].port, cmd_fis[2]); 1109 } 1110 1111 static int handle_cmd(AHCIState *s, int port, int slot) 1112 { 1113 IDEState *ide_state; 1114 uint64_t tbl_addr; 1115 AHCICmdHdr *cmd; 1116 uint8_t *cmd_fis; 1117 dma_addr_t cmd_len; 1118 1119 if (s->dev[port].port.ifs[0].status & (BUSY_STAT|DRQ_STAT)) { 1120 /* Engine currently busy, try again later */ 1121 DPRINTF(port, "engine busy\n"); 1122 return -1; 1123 } 1124 1125 if (!s->dev[port].lst) { 1126 DPRINTF(port, "error: lst not given but cmd handled"); 1127 return -1; 1128 } 1129 cmd = &((AHCICmdHdr *)s->dev[port].lst)[slot]; 1130 /* remember current slot handle for later */ 1131 s->dev[port].cur_cmd = cmd; 1132 1133 /* The device we are working for */ 1134 ide_state = &s->dev[port].port.ifs[0]; 1135 if (!ide_state->blk) { 1136 DPRINTF(port, "error: guest accessed unused port"); 1137 return -1; 1138 } 1139 1140 tbl_addr = le64_to_cpu(cmd->tbl_addr); 1141 cmd_len = 0x80; 1142 cmd_fis = dma_memory_map(s->as, tbl_addr, &cmd_len, 1143 DMA_DIRECTION_FROM_DEVICE); 1144 if (!cmd_fis) { 1145 DPRINTF(port, "error: guest passed us an invalid cmd fis\n"); 1146 return -1; 1147 } else if (cmd_len != 0x80) { 1148 ahci_trigger_irq(s, &s->dev[port], PORT_IRQ_HBUS_ERR); 1149 DPRINTF(port, "error: dma_memory_map failed: " 1150 "(len(%02"PRIx64") != 0x80)\n", 1151 cmd_len); 1152 goto out; 1153 } 1154 debug_print_fis(cmd_fis, 0x80); 1155 1156 switch (cmd_fis[0]) { 1157 case SATA_FIS_TYPE_REGISTER_H2D: 1158 handle_reg_h2d_fis(s, port, slot, cmd_fis); 1159 break; 1160 default: 1161 DPRINTF(port, "unknown command cmd_fis[0]=%02x cmd_fis[1]=%02x " 1162 "cmd_fis[2]=%02x\n", cmd_fis[0], cmd_fis[1], 1163 cmd_fis[2]); 1164 break; 1165 } 1166 1167 out: 1168 dma_memory_unmap(s->as, cmd_fis, cmd_len, DMA_DIRECTION_FROM_DEVICE, 1169 cmd_len); 1170 1171 if (s->dev[port].port.ifs[0].status & (BUSY_STAT|DRQ_STAT)) { 1172 /* async command, complete later */ 1173 s->dev[port].busy_slot = slot; 1174 return -1; 1175 } 1176 1177 /* done handling the command */ 1178 return 0; 1179 } 1180 1181 /* DMA dev <-> ram */ 1182 static void ahci_start_transfer(IDEDMA *dma) 1183 { 1184 AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma); 1185 IDEState *s = &ad->port.ifs[0]; 1186 uint32_t size = (uint32_t)(s->data_end - s->data_ptr); 1187 /* write == ram -> device */ 1188 uint32_t opts = le32_to_cpu(ad->cur_cmd->opts); 1189 int is_write = opts & AHCI_CMD_WRITE; 1190 int is_atapi = opts & AHCI_CMD_ATAPI; 1191 int has_sglist = 0; 1192 1193 if (is_atapi && !ad->done_atapi_packet) { 1194 /* already prepopulated iobuffer */ 1195 ad->done_atapi_packet = true; 1196 size = 0; 1197 goto out; 1198 } 1199 1200 if (ahci_dma_prepare_buf(dma, is_write)) { 1201 has_sglist = 1; 1202 } 1203 1204 DPRINTF(ad->port_no, "%sing %d bytes on %s w/%s sglist\n", 1205 is_write ? "writ" : "read", size, is_atapi ? "atapi" : "ata", 1206 has_sglist ? "" : "o"); 1207 1208 if (has_sglist && size) { 1209 if (is_write) { 1210 dma_buf_write(s->data_ptr, size, &s->sg); 1211 } else { 1212 dma_buf_read(s->data_ptr, size, &s->sg); 1213 } 1214 } 1215 1216 out: 1217 /* declare that we processed everything */ 1218 s->data_ptr = s->data_end; 1219 1220 /* Update number of transferred bytes, destroy sglist */ 1221 ahci_commit_buf(dma, size); 1222 1223 s->end_transfer_func(s); 1224 1225 if (!(s->status & DRQ_STAT)) { 1226 /* done with PIO send/receive */ 1227 ahci_write_fis_pio(ad, le32_to_cpu(ad->cur_cmd->status)); 1228 } 1229 } 1230 1231 static void ahci_start_dma(IDEDMA *dma, IDEState *s, 1232 BlockCompletionFunc *dma_cb) 1233 { 1234 AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma); 1235 DPRINTF(ad->port_no, "\n"); 1236 s->io_buffer_offset = 0; 1237 dma_cb(s, 0); 1238 } 1239 1240 static void ahci_restart_dma(IDEDMA *dma) 1241 { 1242 /* Nothing to do, ahci_start_dma already resets s->io_buffer_offset. */ 1243 } 1244 1245 /** 1246 * Called in DMA R/W chains to read the PRDT, utilizing ahci_populate_sglist. 1247 * Not currently invoked by PIO R/W chains, 1248 * which invoke ahci_populate_sglist via ahci_start_transfer. 1249 */ 1250 static int32_t ahci_dma_prepare_buf(IDEDMA *dma, int is_write) 1251 { 1252 AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma); 1253 IDEState *s = &ad->port.ifs[0]; 1254 1255 if (ahci_populate_sglist(ad, &s->sg, s->io_buffer_offset) == -1) { 1256 DPRINTF(ad->port_no, "ahci_dma_prepare_buf failed.\n"); 1257 return -1; 1258 } 1259 s->io_buffer_size = s->sg.size; 1260 1261 DPRINTF(ad->port_no, "len=%#x\n", s->io_buffer_size); 1262 return s->io_buffer_size; 1263 } 1264 1265 /** 1266 * Destroys the scatter-gather list, 1267 * and updates the command header with a bytes-read value. 1268 * called explicitly via ahci_dma_rw_buf (ATAPI DMA), 1269 * and ahci_start_transfer (PIO R/W), 1270 * and called via callback from ide_dma_cb for DMA R/W paths. 1271 */ 1272 static void ahci_commit_buf(IDEDMA *dma, uint32_t tx_bytes) 1273 { 1274 AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma); 1275 IDEState *s = &ad->port.ifs[0]; 1276 1277 tx_bytes += le32_to_cpu(ad->cur_cmd->status); 1278 ad->cur_cmd->status = cpu_to_le32(tx_bytes); 1279 1280 qemu_sglist_destroy(&s->sg); 1281 } 1282 1283 static int ahci_dma_rw_buf(IDEDMA *dma, int is_write) 1284 { 1285 AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma); 1286 IDEState *s = &ad->port.ifs[0]; 1287 uint8_t *p = s->io_buffer + s->io_buffer_index; 1288 int l = s->io_buffer_size - s->io_buffer_index; 1289 1290 if (ahci_populate_sglist(ad, &s->sg, s->io_buffer_offset)) { 1291 return 0; 1292 } 1293 1294 if (is_write) { 1295 dma_buf_read(p, l, &s->sg); 1296 } else { 1297 dma_buf_write(p, l, &s->sg); 1298 } 1299 1300 /* free sglist, update byte count */ 1301 ahci_commit_buf(dma, l); 1302 1303 s->io_buffer_index += l; 1304 s->io_buffer_offset += l; 1305 1306 DPRINTF(ad->port_no, "len=%#x\n", l); 1307 1308 return 1; 1309 } 1310 1311 static void ahci_cmd_done(IDEDMA *dma) 1312 { 1313 AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma); 1314 1315 DPRINTF(ad->port_no, "cmd done\n"); 1316 1317 /* update d2h status */ 1318 ahci_write_fis_d2h(ad, NULL); 1319 1320 if (!ad->check_bh) { 1321 /* maybe we still have something to process, check later */ 1322 ad->check_bh = qemu_bh_new(ahci_check_cmd_bh, ad); 1323 qemu_bh_schedule(ad->check_bh); 1324 } 1325 } 1326 1327 static void ahci_irq_set(void *opaque, int n, int level) 1328 { 1329 } 1330 1331 static const IDEDMAOps ahci_dma_ops = { 1332 .start_dma = ahci_start_dma, 1333 .restart_dma = ahci_restart_dma, 1334 .start_transfer = ahci_start_transfer, 1335 .prepare_buf = ahci_dma_prepare_buf, 1336 .commit_buf = ahci_commit_buf, 1337 .rw_buf = ahci_dma_rw_buf, 1338 .cmd_done = ahci_cmd_done, 1339 }; 1340 1341 void ahci_init(AHCIState *s, DeviceState *qdev, AddressSpace *as, int ports) 1342 { 1343 qemu_irq *irqs; 1344 int i; 1345 1346 s->as = as; 1347 s->ports = ports; 1348 s->dev = g_new0(AHCIDevice, ports); 1349 ahci_reg_init(s); 1350 /* XXX BAR size should be 1k, but that breaks, so bump it to 4k for now */ 1351 memory_region_init_io(&s->mem, OBJECT(qdev), &ahci_mem_ops, s, 1352 "ahci", AHCI_MEM_BAR_SIZE); 1353 memory_region_init_io(&s->idp, OBJECT(qdev), &ahci_idp_ops, s, 1354 "ahci-idp", 32); 1355 1356 irqs = qemu_allocate_irqs(ahci_irq_set, s, s->ports); 1357 1358 for (i = 0; i < s->ports; i++) { 1359 AHCIDevice *ad = &s->dev[i]; 1360 1361 ide_bus_new(&ad->port, sizeof(ad->port), qdev, i, 1); 1362 ide_init2(&ad->port, irqs[i]); 1363 1364 ad->hba = s; 1365 ad->port_no = i; 1366 ad->port.dma = &ad->dma; 1367 ad->port.dma->ops = &ahci_dma_ops; 1368 ide_register_restart_cb(&ad->port); 1369 } 1370 } 1371 1372 void ahci_uninit(AHCIState *s) 1373 { 1374 g_free(s->dev); 1375 } 1376 1377 void ahci_reset(AHCIState *s) 1378 { 1379 AHCIPortRegs *pr; 1380 int i; 1381 1382 s->control_regs.irqstatus = 0; 1383 /* AHCI Enable (AE) 1384 * The implementation of this bit is dependent upon the value of the 1385 * CAP.SAM bit. If CAP.SAM is '0', then GHC.AE shall be read-write and 1386 * shall have a reset value of '0'. If CAP.SAM is '1', then AE shall be 1387 * read-only and shall have a reset value of '1'. 1388 * 1389 * We set HOST_CAP_AHCI so we must enable AHCI at reset. 1390 */ 1391 s->control_regs.ghc = HOST_CTL_AHCI_EN; 1392 1393 for (i = 0; i < s->ports; i++) { 1394 pr = &s->dev[i].port_regs; 1395 pr->irq_stat = 0; 1396 pr->irq_mask = 0; 1397 pr->scr_ctl = 0; 1398 pr->cmd = PORT_CMD_SPIN_UP | PORT_CMD_POWER_ON; 1399 ahci_reset_port(s, i); 1400 } 1401 } 1402 1403 static const VMStateDescription vmstate_ahci_device = { 1404 .name = "ahci port", 1405 .version_id = 1, 1406 .fields = (VMStateField[]) { 1407 VMSTATE_IDE_BUS(port, AHCIDevice), 1408 VMSTATE_IDE_DRIVE(port.ifs[0], AHCIDevice), 1409 VMSTATE_UINT32(port_state, AHCIDevice), 1410 VMSTATE_UINT32(finished, AHCIDevice), 1411 VMSTATE_UINT32(port_regs.lst_addr, AHCIDevice), 1412 VMSTATE_UINT32(port_regs.lst_addr_hi, AHCIDevice), 1413 VMSTATE_UINT32(port_regs.fis_addr, AHCIDevice), 1414 VMSTATE_UINT32(port_regs.fis_addr_hi, AHCIDevice), 1415 VMSTATE_UINT32(port_regs.irq_stat, AHCIDevice), 1416 VMSTATE_UINT32(port_regs.irq_mask, AHCIDevice), 1417 VMSTATE_UINT32(port_regs.cmd, AHCIDevice), 1418 VMSTATE_UINT32(port_regs.tfdata, AHCIDevice), 1419 VMSTATE_UINT32(port_regs.sig, AHCIDevice), 1420 VMSTATE_UINT32(port_regs.scr_stat, AHCIDevice), 1421 VMSTATE_UINT32(port_regs.scr_ctl, AHCIDevice), 1422 VMSTATE_UINT32(port_regs.scr_err, AHCIDevice), 1423 VMSTATE_UINT32(port_regs.scr_act, AHCIDevice), 1424 VMSTATE_UINT32(port_regs.cmd_issue, AHCIDevice), 1425 VMSTATE_BOOL(done_atapi_packet, AHCIDevice), 1426 VMSTATE_INT32(busy_slot, AHCIDevice), 1427 VMSTATE_BOOL(init_d2h_sent, AHCIDevice), 1428 VMSTATE_END_OF_LIST() 1429 }, 1430 }; 1431 1432 static int ahci_state_post_load(void *opaque, int version_id) 1433 { 1434 int i; 1435 struct AHCIDevice *ad; 1436 AHCIState *s = opaque; 1437 1438 for (i = 0; i < s->ports; i++) { 1439 ad = &s->dev[i]; 1440 1441 /* Only remap the CLB address if appropriate, disallowing a state 1442 * transition from 'on' to 'off' it should be consistent here. */ 1443 if (ahci_cond_start_engines(ad, false) != 0) { 1444 return -1; 1445 } 1446 1447 /* 1448 * If an error is present, ad->busy_slot will be valid and not -1. 1449 * In this case, an operation is waiting to resume and will re-check 1450 * for additional AHCI commands to execute upon completion. 1451 * 1452 * In the case where no error was present, busy_slot will be -1, 1453 * and we should check to see if there are additional commands waiting. 1454 */ 1455 if (ad->busy_slot == -1) { 1456 check_cmd(s, i); 1457 } else { 1458 /* We are in the middle of a command, and may need to access 1459 * the command header in guest memory again. */ 1460 if (ad->busy_slot < 0 || ad->busy_slot >= AHCI_MAX_CMDS) { 1461 return -1; 1462 } 1463 ad->cur_cmd = &((AHCICmdHdr *)ad->lst)[ad->busy_slot]; 1464 } 1465 } 1466 1467 return 0; 1468 } 1469 1470 const VMStateDescription vmstate_ahci = { 1471 .name = "ahci", 1472 .version_id = 1, 1473 .post_load = ahci_state_post_load, 1474 .fields = (VMStateField[]) { 1475 VMSTATE_STRUCT_VARRAY_POINTER_INT32(dev, AHCIState, ports, 1476 vmstate_ahci_device, AHCIDevice), 1477 VMSTATE_UINT32(control_regs.cap, AHCIState), 1478 VMSTATE_UINT32(control_regs.ghc, AHCIState), 1479 VMSTATE_UINT32(control_regs.irqstatus, AHCIState), 1480 VMSTATE_UINT32(control_regs.impl, AHCIState), 1481 VMSTATE_UINT32(control_regs.version, AHCIState), 1482 VMSTATE_UINT32(idp_index, AHCIState), 1483 VMSTATE_INT32_EQUAL(ports, AHCIState), 1484 VMSTATE_END_OF_LIST() 1485 }, 1486 }; 1487 1488 #define TYPE_SYSBUS_AHCI "sysbus-ahci" 1489 #define SYSBUS_AHCI(obj) OBJECT_CHECK(SysbusAHCIState, (obj), TYPE_SYSBUS_AHCI) 1490 1491 typedef struct SysbusAHCIState { 1492 /*< private >*/ 1493 SysBusDevice parent_obj; 1494 /*< public >*/ 1495 1496 AHCIState ahci; 1497 uint32_t num_ports; 1498 } SysbusAHCIState; 1499 1500 static const VMStateDescription vmstate_sysbus_ahci = { 1501 .name = "sysbus-ahci", 1502 .fields = (VMStateField[]) { 1503 VMSTATE_AHCI(ahci, SysbusAHCIState), 1504 VMSTATE_END_OF_LIST() 1505 }, 1506 }; 1507 1508 static void sysbus_ahci_reset(DeviceState *dev) 1509 { 1510 SysbusAHCIState *s = SYSBUS_AHCI(dev); 1511 1512 ahci_reset(&s->ahci); 1513 } 1514 1515 static void sysbus_ahci_realize(DeviceState *dev, Error **errp) 1516 { 1517 SysBusDevice *sbd = SYS_BUS_DEVICE(dev); 1518 SysbusAHCIState *s = SYSBUS_AHCI(dev); 1519 1520 ahci_init(&s->ahci, dev, &address_space_memory, s->num_ports); 1521 1522 sysbus_init_mmio(sbd, &s->ahci.mem); 1523 sysbus_init_irq(sbd, &s->ahci.irq); 1524 } 1525 1526 static Property sysbus_ahci_properties[] = { 1527 DEFINE_PROP_UINT32("num-ports", SysbusAHCIState, num_ports, 1), 1528 DEFINE_PROP_END_OF_LIST(), 1529 }; 1530 1531 static void sysbus_ahci_class_init(ObjectClass *klass, void *data) 1532 { 1533 DeviceClass *dc = DEVICE_CLASS(klass); 1534 1535 dc->realize = sysbus_ahci_realize; 1536 dc->vmsd = &vmstate_sysbus_ahci; 1537 dc->props = sysbus_ahci_properties; 1538 dc->reset = sysbus_ahci_reset; 1539 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 1540 } 1541 1542 static const TypeInfo sysbus_ahci_info = { 1543 .name = TYPE_SYSBUS_AHCI, 1544 .parent = TYPE_SYS_BUS_DEVICE, 1545 .instance_size = sizeof(SysbusAHCIState), 1546 .class_init = sysbus_ahci_class_init, 1547 }; 1548 1549 static void sysbus_ahci_register_types(void) 1550 { 1551 type_register_static(&sysbus_ahci_info); 1552 } 1553 1554 type_init(sysbus_ahci_register_types) 1555 1556 void ahci_ide_create_devs(PCIDevice *dev, DriveInfo **hd) 1557 { 1558 AHCIPCIState *d = ICH_AHCI(dev); 1559 AHCIState *ahci = &d->ahci; 1560 int i; 1561 1562 for (i = 0; i < ahci->ports; i++) { 1563 if (hd[i] == NULL) { 1564 continue; 1565 } 1566 ide_create_drive(&ahci->dev[i].port, 0, hd[i]); 1567 } 1568 1569 } 1570